index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,400 | 0563408648ff3194c8f80895855396eb80a3d813 | from copy import deepcopy
from .BaseNews import BaseNews
from .MessageHandlers import Reader, Writer
from Model import Cell as ModelCell
class ViewOppBase(BaseNews):
huffman_prefix = "00010"
def __init__(self, cell: ModelCell):
super().__init__()
self.cell: ModelCell = deepcopy(cell)
def __str__(self):
return f"BS{self.cell.x}{self.cell.y}"
def get_cell(self) -> ModelCell:
return self.cell
def message_size(self) -> int:
return len(self.huffman_prefix) + 12 # prefix (x, y)
def get_priority(self):
return 3000000
def encode(self, writer: Writer):
writer.write(int(self.huffman_prefix, 2), len(self.huffman_prefix))
writer.write(self.cell.x, 6)
writer.write(self.cell.y, 6)
@staticmethod
def decode(reader: Reader) -> BaseNews:
x = reader.read(6)
y = reader.read(6)
cell = ModelCell(x, y, None, None, None)
return ViewOppBase(cell)
"""
initialize it with cell that you want to report
use get_cell() to get Model.Cell of that cell (only x,y is known other data is None)
don't use another functions
""" |
15,401 | 5cb001730af3251a6fcbcf491490eb563b3db5cd | from collections import Counter
raw_data = open('06_input').read()
group_answers = list(map(lambda x: x.split('\n'), raw_data.split('\n\n')))
print("Part 1: ", sum(map(lambda gr: len(Counter(''.join(gr))), group_answers)))
unanimous_count = 0
for group in group_answers:
ans_dict = {}
for person in group:
for question in person:
if question not in ans_dict:
ans_dict[question] = 1
else:
ans_dict[question] += 1
group_unanimous_count = len([q for q in ans_dict if ans_dict[q] == len(group)])
unanimous_count += group_unanimous_count
print('Part 2: ', unanimous_count)
|
15,402 | 9d01f41266b14991d40f79896a59df8eb690b2f4 |
import os
import numpy as np
from matplotlib import pyplot as plt
import mwarp1d
#load data:
dir0 = os.path.dirname(__file__)
fnameCSV = os.path.join(dir0, 'data', 'Dorn2012.csv')
y = np.loadtxt(fnameCSV, delimiter=',')
y0,y1 = y[0], y[-1] #two trials for demonstration
#define landmarks and apply warp:
lm0 = [9, 14, 24, 70]
lm1 = [11, 22, 33, 73]
y1w = mwarp1d.warp_landmark(y1, lm1, lm0)
#plot:
plt.close('all')
plt.figure( figsize=(10,4) )
#create axes:
ax0 = plt.axes([0.08,0.12,0.42,0.84])
ax1 = plt.axes([0.57,0.12,0.42,0.84])
c0 = (0.3, 0.3, 0.98)
c1 = (0.98, 0.7, 0.3)
#plot data and landmarks:
h0 = ax0.plot(y0, color='0.0', lw=3)[0]
h1 = ax0.plot(y1, color='0.7', lw=1)[0]
h2 = ax0.plot(lm0, y0[lm0], 'o', color=c0, ms=7)[0]
h3 = ax0.plot(lm1, y1[lm1], 'o', color=c1, ms=7)[0]
ax0.legend([h0,h1,h2,h3], ['Template', 'Source', 'Template landmarks', 'Source landmarks'], loc='lower right')
# for x in lm0[1:-1]:
# ax0.plot([x,x],[y0[x],y1[x]], color=c0, ls=':', lw=1)
#plot warped data:
h0 = ax1.plot(y0, color='0.0', lw=3)[0]
h1 = ax1.plot(y1, color='0.7', lw=1)[0]
h2 = ax1.plot(y1w, color=c1, lw=2)[0]
ax1.legend([h0,h1,h2], ['Template', 'Source', 'Warped source'], loc='lower right')
# for x in lm0[1:-1]:
# ax1.plot([x,x],[y0[x],y1w[x]], color=c0, ls=':', lw=1)
#annotate:
for ax in [ax0,ax1]:
ax.axhline(0, color='k', ls=':')
ax.text(70, 40, 'Medial', va='center')
ax.text(70,-40, 'Lateral', va='center')
ax.set_xlabel('Time (%)', size=13)
ax0.set_ylabel('Mediolateral ground reaction force (N)', size=13)
#add panel labels:
ax0.text(-3, 520, '(a)', size=14)
ax1.text(-3, 520, '(b)', size=14)
plt.show()
# #save figure:
# fnamePDF = os.path.join(dir0, 'figs', 'fig_landmarks.pdf')
# plt.savefig(fnamePDF) |
15,403 | 117035afc3f3bd5bc923bffbbe99b3484680cdc6 | from erode import erode
def dilate(imgname, outfile=None):
return erode(imgname, True, outfile)
|
15,404 | 3950d1dd7327ca89a0b53b84de885269631740de | """
Quiz for Data Structure
Q1: Which of the following sets of properties is true for a list?
Ans: Ordered
Mutable
Indexed
Q2: For a given data structure, ds, what is the correct way of calculating its length?
Ans: len(ds)
Q3: In a dictionary, key-value pairs are indexed by _____.
Ans: Keys
Q4: A set can contain a tuple, but not a list.
Ans: True
Q5: What will be the value of entry at the end of this code?
traffic_light = {"Green": "Go", "Yellow": "Wait", "Red": "Stop"}
entry = traffic_light.popitem()
Ans: ("Red", "Stop")
Q6: An empty set can be made using ______.
Ans: set()
Q7: What is the correct list comprehension for the following code?
string_list = ["Anakin", "Luke", "Rey", "Leia", "Vader"]
result = []
for s in string_list:
if len(s) < 5:
result.append(len(s))
Ans: string_list = ["Anakin", "Luke", "Rey", "Leia", "Vader"]
result = [len(s) for s in string_list if len(s) < 5]
""" |
15,405 | 7221e063c276b4e3bde593227d48303999911fef | """added upvoted
Revision ID: 6dca8c020d2d
Revises:
Create Date: 2021-02-17 18:03:15.218694
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6dca8c020d2d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('upvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('questions', sa.Column('correct_answer_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'questions', 'answers', ['correct_answer_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'questions', type_='foreignkey')
op.drop_column('questions', 'correct_answer_id')
op.drop_table('upvotes')
# ### end Alembic commands ###
|
15,406 | 30b4dfe1b9b7333939e04c37933bc2f04505fcaa |
#Get website url
#Crawler function
#inits the crawl
#makes a thread for each page being crawled
#tracks pages that have been crawled already
#Threading function
#create queues
#Crawled data function
#stores & handles all data from crawled pages
from bs4 import BeautifulSoup
import requests, argparse, pprint
all_links = []
ok_links = []
problem_links = []
def check_response():
print 'Checking URL responses'
for al in all_links:
li = requests.get(al)
if li.status_code == 200:
ok_links.append(al)
else:
problem_links.append(al)
def get_data():
parser = argparse.ArgumentParser()
parser.add_argument('website_url', help='Enter the full url of your website')
args = parser.parse_args()
user_url = args.website_url
crawler(user_url)
def crawler(user_url):
r = requests.get(user_url)
print 'Fetching ', user_url, '. Returns: ', r.status_code
soup = BeautifulSoup(r.text, 'html.parser')
for l in soup.find_all('a'):
all_links.append(l.get('href'))
for al in all_links:
if al[0:3] != 'http':
all_links.remove(al)
pp = pprint.PrettyPrinter()
pp.pprint(all_links)
funcs = check_response()
try:
funcs()
except Exception:
print 'Error, stopping'
print problem_links
if __name__ == '__main__':
get_data()
|
15,407 | 4f9e1333bd38949ab147ca3cbe381a6c276524a8 | #
# @lc app=leetcode.cn id=788 lang=python3
#
# [788] 旋转数字
#
class Solution:
def rotatedDigits(self, N: int) -> int:
trans = {'0': '0', '1': '1', '2': '5',
'5': '2', '6': '9', '8': '8', '9': '6'}
count = 0
for i in range(N+1):
c = 0
for k in str(i):
if k not in trans:
c += 1
if c == 0 and i != int(''.join([trans[x] for x in str(i)])):
count += 1
return count
|
15,408 | 11d3251966e31062663d51b4a378e7ed1fb58152 | f = open('./input.txt','r')
s = f.readlines()
f.close()
to_sum = []
for line in s:
mini = 0
maxi = 0
linearray = line.split()
print (linearray)
for num in linearray:
if (mini == 0) and (maxi == 0):
mini,maxi = int(num), int(num)
if int(num) < mini:
mini = int(num)
if int(num) > maxi:
maxi = int(num)
print('Min: '+str(mini)+' - Max: '+str(maxi)+' - Diff: '+str(maxi-mini))
to_sum.append(maxi-mini)
print(to_sum)
total = 0
for num in to_sum:
total = total + (int(num))
print(total) |
15,409 | 104e8b2e87e71dca58566d850357a13867d08f01 | import csv
data = []
with open('./forestfires.csv') as csv_data:
reader = csv.DictReader(csv_data)
for row in reader:
this_row = [float(row['temp']), float(row['wind']), float(row['rain']), float(row['RH'])]
data.append(this_row)
Examples = {
'ForestFires': {
'data': data,
'k': [2, 3, 4]
}
}
# DataFrame stuff....KMeans didn't like....
# d = {'Temperatures': temps, 'Winds': winds, 'Rains': rain, 'Relative Humidity': humidity}
# df = pd.DataFrame(data=d)
#
# temp_data = df['Temperatures'].values
# wind_data = df['Winds'].values
# rain_data = df['Rains'].values
# humidity_data = df['Relative Humidity'].values
#
# X = np.matrix(zip(temp_data, wind_data, rain_data, humidity_data))
|
15,410 | 72fe9af1883ffce6f17eeaa507524220e3ebf865 | import pytest
from sklearn.datasets import load_iris
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing.label import label_binarize
def roc_auc_avg_score(y_true, y_score):
y_bin = label_binarize(y_true, classes=sorted(set(y_true)))
return roc_auc_score(y_bin, y_score)
def test_h2o_skearn():
pytest.importorskip('h2o')
from dstools.ml.h2o import H2ODecorator
iris = load_iris()
est = H2ODecorator('glm', {})
scorer = make_scorer(roc_auc_avg_score, needs_proba=True)
scores = cross_val_score(estimator=est, X=iris.data, y=iris.target, cv=3, scoring=scorer)
print(scores.mean(), scores.std())
|
15,411 | bfe9db2a9588859aee9ea36e158a5a0c88f003f4 | #_*_encoding:utf-8
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Course(models.Model):
name = models.CharField(max_length=50,verbose_name=u'专题')
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'专题'
verbose_name_plural = verbose_name
def to_obj(self):
return dict(
id=self.id,
name=self.name,
# lesson=[l.to_obj() for l in self.lesson_set.all()]
)
def to_obj2(self):
return dict(
id=self.id,
name=self.name,
lesson=[l.to_obj() for l in self.lesson_set.all()]
)
class Lesson(models.Model):
course = models.ForeignKey(Course,verbose_name=u'所属专题')
name = models.CharField(max_length=50, verbose_name=u'课程')
url = models.CharField(max_length=512, verbose_name=u'链接',null=True,blank=True)
# url = models.CharField(max_length=512)
def __unicode__(self):
return self.name
def to_obj(self):
return dict(
id=self.id,
name=self.name,
url=self.url,
)
class Meta:
verbose_name = u'课程'
verbose_name_plural = verbose_name
|
15,412 | c1b12241d9af12e7a95a86a7d9774f7b9ea5a11f | # -*- coding: utf-8 -*-
# Copyright (C) Cardiff University (2018-2021)
# SPDX-License-Identifier: MIT
"""Tests for :mod:`gwosc.datasets`
"""
import re
from unittest import mock
import pytest
from .. import datasets
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
DATASET_JSON = {
'events': {
'GW150914': {'GPStime': 12345, 'detectors': ['H1', 'L1']},
'GW151226': {'GPStime': 12347, 'detectors': ['H1', 'L1']},
},
'runs': {
'S1': {'GPSstart': 0, 'GPSend': 1, 'detectors': ['H1', 'L1', 'V1']},
'tenyear': None,
},
}
CATALOG_JSON = {
'data': {
'GW150914': {
'files': {
'DataRevisionNum': 'R1',
'OperatingIFOs': "H1 L1",
'H1': {},
'L1': {},
},
},
}
}
EVENT_JSON = {
'events': {
'mock-event-1': {
'GPS': 1240215503.0,
'luminosity_distance': 159.0,
'mass_1_source': 1.74,
},
'mock-event-2': {
'GPS': 1240215503.0,
'luminosity_distance': 160.0,
'mass_1_source': 2.0,
},
'mock-event-3': {
'GPS': 1240215503.0,
'luminosity_distance': 150.0,
'mass_1_source': 2.1,
}
}
}
@pytest.mark.remote
def test_find_datasets():
sets = datasets.find_datasets()
for dset in ('S6', 'O1', 'GW150914-v1', 'GW170817-v3'):
assert dset in sets
assert 'tenyear' not in sets
assert 'history' not in sets
@pytest.mark.remote
def test_find_datasets_detector():
v1sets = datasets.find_datasets('V1')
assert 'GW170817-v3' in v1sets
assert 'GW150914-v1' not in v1sets
assert datasets.find_datasets('X1', type="run") == []
@pytest.mark.remote
def test_find_datasets_type():
runsets = datasets.find_datasets(type='run')
assert 'O1' in runsets
run_regex = re.compile(
r'\A([OS]\d+([a-z]|[A-Z]+)?|BKGW\d{6})(_\d+KHZ)?(_[RV]\d+)?\Z',
)
for dset in runsets:
assert run_regex.match(dset)
assert datasets.find_datasets(type='badtype') == []
@pytest.mark.remote
def test_find_datasets_segment():
sets = datasets.find_datasets(segment=(1126051217, 1137254417))
assert "GW150914-v1" in sets
assert "GW170817" not in sets
@pytest.mark.remote
def test_find_datasets_match():
assert "O1" not in datasets.find_datasets(match="GW")
@pytest.mark.remote
def test_find_datasets_event_version_detector():
# this raises a ValueError with gwosc-0.5.0
sets = datasets.find_datasets(type='event', version=1, detector='L1')
assert "GW150914-v1" in sets
assert "GW150914-v3" not in sets # v3
@mock.patch("gwosc.datasets._run_datasets", return_value=[])
def test_find_datasets_warning(_):
with pytest.warns(UserWarning):
datasets.find_datasets(type='run', version=1)
@pytest.mark.remote
def test_event_gps():
assert datasets.event_gps('GW170817') == 1187008882.4
@mock.patch(
'gwosc.api._fetch_allevents_event_json',
return_value={"events": {"GW150914": {
'GPS': 12345,
'something else': None,
}}},
)
def test_event_gps_local(fetch):
assert datasets.event_gps('GW150914') == 12345
@pytest.mark.remote
def test_event_segment():
assert datasets.event_segment("GW170817") == (1187006835, 1187010931)
@mock.patch(
'gwosc.api._fetch_allevents_event_json',
mock.MagicMock(return_value={"events": {"GW150914": {
"GPS": 12345,
"something else": None,
"strain": [
{
"GPSstart": 0,
"duration": 32,
"detector": "X1",
},
{
"GPSstart": 10,
"duration": 32,
"detector": "Y1",
},
],
}}}),
)
def test_event_segment_local():
assert datasets.event_segment("GW170817") == (0, 42)
assert datasets.event_segment("GW170817", detector="Y1") == (10, 42)
@pytest.mark.remote
def test_event_at_gps():
assert datasets.event_at_gps(1187008882) == 'GW170817'
with pytest.raises(ValueError) as exc:
datasets.event_at_gps(1187008882, tol=.1)
assert str(exc.value) == 'no event found within 0.1 seconds of 1187008882'
@mock.patch(
'gwosc.api.fetch_allevents_json',
mock.MagicMock(return_value={"events": {
"GW150914": {"GPS": 12345.5, "commonName": "GW150914"},
"GW150915": {"GPS": 12346.5, "commonName": "GW150915"},
}}),
)
def test_event_at_gps_local():
assert datasets.event_at_gps(12345) == 'GW150914'
with pytest.raises(ValueError):
datasets.event_at_gps(12349)
@pytest.mark.remote
def test_event_detectors():
assert datasets.event_detectors("GW150914") == {"H1", "L1"}
assert datasets.event_detectors("GW170814") == {"H1", "L1", "V1"}
@mock.patch(
"gwosc.api._fetch_allevents_event_json",
mock.MagicMock(return_value={
"events": {"test": {"strain": [
{"detector": "A1"},
{"detector": "B1"},
]}},
}),
)
def test_event_detectors_local():
assert datasets.event_detectors("test") == {"A1", "B1"}
@pytest.mark.remote
def test_run_segment():
assert datasets.run_segment('O1') == (1126051217, 1137254417)
with pytest.raises(ValueError) as exc:
datasets.run_segment('S7')
assert str(exc.value) == 'no run dataset found for \'S7\''
@mock.patch(
'gwosc.api.fetch_dataset_json',
mock.MagicMock(return_value=DATASET_JSON),
)
def test_run_segment_local():
assert datasets.run_segment('S1') == (0, 1)
with pytest.raises(ValueError):
datasets.run_segment('S2')
@pytest.mark.remote
def test_run_at_gps():
assert datasets.run_at_gps(1135136350) in {'O1', 'O1_16KHZ'}
with pytest.raises(ValueError) as exc:
datasets.run_at_gps(0)
assert str(exc.value) == 'no run dataset found containing GPS 0'
@mock.patch(
'gwosc.api.fetch_dataset_json',
mock.MagicMock(return_value=DATASET_JSON),
)
def test_run_at_gps_local():
assert datasets.run_at_gps(0) == 'S1'
with pytest.raises(ValueError):
datasets.run_at_gps(10)
@pytest.mark.remote
def test_dataset_type():
assert datasets.dataset_type("O1") == "run"
assert datasets.dataset_type("GW150914-v1") == "event"
assert datasets.dataset_type("GWTC-1-confident") == "catalog"
with pytest.raises(ValueError):
datasets.dataset_type("invalid")
@mock.patch(
'gwosc.datasets.find_datasets',
mock.MagicMock(side_effect=[["testrun"], [], ["testevent"], [], [], []]),
)
def test_dataset_type_local():
assert datasets.dataset_type("testevent") == "event"
with pytest.raises(ValueError):
datasets.dataset_type("invalid")
@pytest.mark.remote
def test_query_events():
events = datasets.query_events(
select=["10 <= luminosity-distance <= 200"]
)
assert 'GW190425-v1' in events
assert 'GW190425-v2' in events
assert 'GW190425_081805-v3' in events
@mock.patch(
'gwosc.api.fetch_filtered_events_json',
mock.MagicMock(return_value=EVENT_JSON),
)
def test_query_events_local():
events = datasets.query_events(
select=["mass-1-source >= 1.4", "10 <= luminosity-distance <= 200"]
)
assert 'mock-event-1' in events
assert 'mock-event-2' in events
assert 'mock-event-2' in events
|
15,413 | 810e7fc3c7fead9bee6d9f20ffd3bde32a774940 | n = int(input())
scores = [int(x) for x in input().split()]
fix_scores = []
for i in range(n):
fix_score = scores[i] / max(scores) * 100
fix_scores.append(fix_score)
fix_average = sum(fix_scores) / n
print(float(fix_average)) |
15,414 | 23c95538ac9438eb6ff9e7381cccfeeac40c3d52 | import numpy as np
import cv2
# Loading image
image = cv2.imread("coins.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
cv2.imshow("Blurred Image", blurred)
# Applying adaptive thresholding
meanThresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)
cv2.imshow("Mean Thresh", meanThresh)
gaussianThresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 3)
cv2.imshow("Gaussian Thresh", gaussianThresh)
cv2.imshow("Comparison", np.hstack([blurred, meanThresh, gaussianThresh]))
cv2.waitKey(0) |
15,415 | bf5a9016fbf437a22afd629f2c94b16470363521 | # python conv_reddit.py stopword_file reddit_pickle_files
# converting reddit pickle files to input files required for further processing
# files created :
# vocab1.txt : question vocab
# vocab2.txt : word vocab
# input_sc.txt : question word count data; (q_id word_id count)
# aux_data : question aux data; (link_Id name reply)
import operator
import sys
import pickle
import re
from sets import Set
from collections import Counter
from wordnet_modules import *
import multiprocessing
from joblib import Parallel, delayed
stopwords=Set()
vocab=Set()
vocab_map={}
# expanding question vocab by adding disambiguated synonyms
def processQuestion(q):
output_dict={}
q_proccessed=re.sub(r'[^a-zA-Z0-9 ]',r'',q.lower())
l=Counter(q_proccessed.strip().split())
for word,count in l.iteritems():
if(word in stopwords):
continue
output_dict[vocab_map[word]]=count
# Finding wsd sysnset for words in sentence and adding their lemmas
for word,lemma in get_disambiguated_synonym(q_proccessed):
if(lemma in vocab):
output_dict[vocab_map[lemma]]=l[word]
print "Done"
return output_dict
def main():
global stopwords,vocab,vocab_map
slist=open(sys.argv[1],"r")
for line in slist:
stopwords.add(line.strip().lower())
print "Number of stopwords : ",len(stopwords)
questions=[]
questions_aux_data=[]
for i in range(2,len(sys.argv)):
obj=pickle.load( open( sys.argv[i], "rb" ) )
for j in range(len(obj)):
questions_aux_data.append( obj[j]["link_id"] + " " + obj[j]["name"] + " " + str(1 if obj[j]['reply'] != 'None' else 0 ) )
questions.append(obj[j]['body'].replace("\n"," "))
vocab|=Set(re.sub(r'[^a-zA-Z0-9 ]',r'',obj[j]['body'].replace("\n"," ").lower()).strip().split())
print "vocab size : ",len(vocab)
vocab=vocab.difference(stopwords)
print "vocab size after removing stopwords : ",len(vocab)
print "Number of questions : ",len(questions)
vocab_list=list(vocab)
counter=0
for word in vocab_list:
vocab_map[word]=counter
counter+=1
output_dict={}
num_cores = multiprocessing.cpu_count()
outupt_q_map = Parallel(n_jobs=num_cores, verbose=2)(delayed(processQuestion)(q) for q in questions)
q_count=0
for q_map in outupt_q_map:
output_dict[q_count]=q_map
q_count+=1
v1=open("vocab1.txt","w") # question vocab
v2=open("vocab2.txt","w") # word vocab
v3=open("aux_data.txt","w") # question aux data
output_file=open("input_sc.txt","w") # question word count data
for word in vocab_list:
v2.write(word+"\n")
for q in questions:
v1.write(q.encode("UTF-8")+"\n")
for aux_info in questions_aux_data:
v3.write(aux_info+"\n")
for key,value in output_dict.iteritems():
for word,count in value.iteritems():
output_file.write(str(key)+" "+str(word)+" "+str(count)+"\n")
v1.close()
v2.close()
v3.close()
output_file.close()
if __name__=="__main__":
main()
|
15,416 | 8df47f93c2c2c6fcf6c2b44b5a93f88e33249806 | import sys
s_in = sys.stdin
while True:
content = s_in.readline().rstrip('\n')
if content == 'exitc':
break
print(content)
|
15,417 | a7779ec3f0964d0538a3d130d0df53dc6b33ffa2 | num = int(input("Enter a number: "))
if (num % 2) == 0:
print(num, "True")
else:
print(num, "False")
|
15,418 | 1bc959a7c01bac940c8725126fa77c80326f5a45 | # coding = utf-8
'''
@author = super_fazai
@File : use_bs4_css选择器.py
@connect : superonesfazai@gmail.com
'''
from bs4 import BeautifulSoup
html = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
# 创建 Beautiful Soup 对象
soup = BeautifulSoup(html, 'lxml')
# 1. 通过标签名查找
print(soup.select('title')) # [<title>The Dormouse's story</title>]
print(soup.select('a'))
print(soup.select('b'))
# 2. 通过类名查找
print(soup.select('.sister'))
# 3.通过id名查找
print(soup.select('#link1'))
# 4. 组合查找
print(soup.select('p #link1'))
print(soup.select('head > title'))
# 5. 属性查找
print(soup.select('a[class="sister"]'))
print(soup.select('a[href="http://example.com/elsie"]'))
print(soup.select('p a[href="http://example.com/elsie"]'))
# 6. 获取内容
# 以上的 select 方法返回的结果都是列表形式,可以遍历形式输出,然后用 get_text() 方法来获取它的内容
print(type(soup.select('title')))
print(soup.select('title')[0].get_text())
for title in soup.select('title'):
print(title.get_text()) |
15,419 | 2af98fb79655a9fce9dadecb1ba7962e53b280cd | N = int(input())
cnt = list(map(int, input().split()))
answer = [-1] * N
for i, num in enumerate(cnt):
curr_num = num
idx = 0
while curr_num or answer[idx] != -1:
if answer[idx] == -1:
curr_num -= 1
idx += 1
answer[idx] = i+1
print(" ".join(list(map(str, answer))))
|
15,420 | 4b3e5b3a15c638256323dfa7d2a1bab0bb6bed74 | #TASK - 2
#Program for 7 different methods of lists
#To initialize the list
random = [5, 7, 29, 37, 18]
#To print this list
print ("The list of these random numbers: ", random)
#The index of the elements
random.index(37)
print ("The index of the element 37 is: ", random.index(37))
#To Change the element at a particular index
random[4] = 10
print ("The updated list is: ", random)
#Sorting the list
random.sort()
print ("The sorted list is: ", random)
#To add elements in the list
random.append(33)
print ("The list after adding the element at the end of the list: ", random)
#Remove elements from a list
random[0:2] = []
print ("The list after removing the elements: ", random)
|
15,421 | 48ff85b2277fc8c85c2a523a086b44b8ec697602 |
#import the argv method from the sys module
from sys import argv
#declare the expected command line arguments
script,first_var,second_var,third_var = argv
print("The script using which the program was run" , script)
print("The first argument passed was",first_var)
#print the lenght of the number of arguments
print(f"The total number of arguments is {len(argv)}")
#print the array of script and argument recieved on the command line
print(argv)
#another way to write the above is to just import sys and use sys.argv in the code to call the argv method. Clearly this is inefficient
import sys
#declare the expected command line arguments
script,first_var,second_var,third_var = sys.argv
print("The script using which the program was run" , script)
print("The first argument passed was",first_var)
#print the lenght of the number of arguments
print(f"The total number of arguments is {len(sys.argv)}")
#print the array of script and argument recieved on the command line
print(sys.argv)
|
15,422 | 49ea7f3eee8aab1bd4c132e392cb7b0d20ed3cdd | #For a given array A of N integers and a sequence S of N integers from the set {−1, 1}, we define val(A, S) as follows:
#
#val(A, S) = |sum{ A[i]*S[i] for i = 0..N−1 }|
#
#(Assume that the sum of zero elements equals zero.)
#
#For a given array A, we are looking for such a sequence S that minimizes val(A,S).
#
#Write a function:
#
#def solution(A)
#
#that, given an array A of N integers, computes the minimum value of val(A,S) from all possible values of val(A,S) for all possible sequences S of N integers from the set {−1, 1}.
#
#For example, given array:
#
# A[0] = 1
# A[1] = 5
# A[2] = 2
# A[3] = -2
#your function should return 0, since for S = [−1, 1, −1, 1], val(A, S) = 0, which is the minimum possible value.
#
#Write an efficient algorithm for the following assumptions:
#
#N is an integer within the range [0..20,000];
#each element of array A is an integer within the range [−100..100].
def solution(A):
# Aが与えられていて、val(A,S)が最小となる配列S (-1 or 1)を求める
# その最小値をreturnする
#
# Sを直接求めるわけではないため、全ての符号を無視して、配列Aを2つのstackに足していき、差分を最小限になるようにする
# そのためにはAを大きい順に並べる
if len(A) < 0 or len(A) > 20000:
return 0
abs_A = map(abs, A)
sorted_A = sorted(abs_A, reverse=True)
stack_A = 0
stack_B = 0
for element_A in sorted_A:
if element_A > 100:
return 0
if stack_A >= stack_B:
stack_B = stack_B + element_A
else:
stack_A = stack_A + element_A
result = abs(stack_A - stack_B)
return result
|
15,423 | 8326e7b51751ec0d5d2bd8d85f39bff87e644695 | # -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request, render_template
author_blueprint = Blueprint("author", __name__)
from models import Author
# @author_blueprint.route("/", methods=["GET"])
# def index():
# "首页入口"
# return render_template("author2.html")
@author_blueprint.route("/author", methods=["GET", "POST"])
def author_list():
"查询全部作者表或者添加的视图"
if request.method == "GET":
alist = Author.query.all()
alist2 = [author.to_dict() for author in alist]
return jsonify(alist = alist2)
if request.method == "POST":
author_name = request.form.get("author_name")
author = Author()
author.name = author_name
author.save()
return jsonify(msg="ok")
@author_blueprint.route("/author/<id>", methods=["GET", "PUT", "DELETE"])
def author_one(id):
"根据id进行指定作者操作的视图"
if request.method == "GET":
# 根据id查询作者
author = Author.query.get(id)
print(id, author)
return jsonify(author=author.to_dict())
if request.method == "DELETE":
# 根据id物理删除作者
author = Author.query.get(id)
author.delete()
return jsonify(msg="ok")
if request.method == "PUT":
# 修改作者姓名
author = Author.query.get(id)
name = request.form.get("name")
print(name, author.name)
author.name = name
author.save()
return jsonify(msg="ok")
|
15,424 | 25d04485b006db479a0e7355cc62f4070e02f09e | """
Check if the given string is a correct time representation of the 24-hour clock.
Example
For time = "13:58", the output should be
validTime(time) = true;
For time = "25:51", the output should be
validTime(time) = false;
For time = "02:76", the output should be
validTime(time) = false.
"""
def validTime(time):
return 0 <= int(time.split(':')[0]) <= 23 and 0 <= int(time.split(':')[1]) <= 59
time = "13:58"
time = "25:51"
time = "02:76"
print(validTime(time)) |
15,425 | a7668d53942ac2cf33cf026a90c97bdc0931ae09 | def sum(seq):
def add(x,y): return x+y
return reduce(add, seq, 0)
result=sum(range(1,6))
print result |
15,426 | a63134c940d6d660aaba64a689a6674aed56e73b | class Employee:
company="camel"
salary=100
location="kolkata"
@classmethod
def salaryChange(cls,sal):
cls.salary= sal
e = Employee()
print(e.salary)
e.salaryChange(400)
print(e.salary)
print(Employee.salary) |
15,427 | 49498b9428eb1d478f2ff8f991214122d2b6395b | #AI_bot
import config
import euclid
def norm(A):
if not A==0:
return A/abs(A)
else:
return 0
def AI_commands(state):
if (config.difficulty==0):
# ball1_1, ball1_2, ball2_1, ball2_2, puck, walls_left, walls_right, stopPower, wall_player1_goal, wall_player2_goal, goal_player1, goal_player2 = state
ball2_1 = state.getByIdent('letters2')
ball2_2 = state.getByIdent('arrows2')
puck = state.getByIdent('puck')
#
#
# Somehow get following variables
#
#
#
pos_ball1x=ball2_1.pos.x
pos_ball1y=ball2_1.pos.y
pos_ball2x=ball2_2.pos.x
pos_ball2y=ball2_2.pos.y
pos_bigballx=puck.pos.x
pos_bigbally=puck.pos.y
num="2"
pos_goalx=config.field_width/2
pos_goaly=0
# In theory powerup locations also.
# And opponent's balls also
#AI brain starts:
#Point where balls want is pos_bigball + norm(pos_bigball)
#2 possibility: by keys or straightly by updating state. Last one might cause problems, so:
#ball1
ball1x=pos_bigballx-pos_ball1x + (pos_bigballx-pos_ball1x)/abs(pos_bigballx-pos_ball1x)
ball1y=pos_bigbally-pos_ball1y + (pos_bigbally-pos_ball1y)/abs(pos_bigbally-pos_ball1y)
ball2x=pos_bigballx-pos_ball2x + (pos_bigballx-pos_ball2x)/abs(pos_bigballx-pos_ball2x)
ball2y=pos_bigbally-pos_ball2y + (pos_bigbally-pos_ball2y)/abs(pos_bigbally-pos_ball2y)
serial= { 'letters' + num: {'x': int(ball1x), 'y': int(ball1y)}, 'arrows' + num: {'x': ball2x, 'y': ball2y}, 'seq':'0', 'type':'input'}
# print serial
elif(config.difficulty==1):
# ball1_1, ball1_2, ball2_1, ball2_2, puck, walls_left, walls_right, stopPower, wall_player1_goal, wall_player2_goal, goal_player1, goal_player2 = state
ball2_1 = state.getByIdent('letters2')
ball2_2 = state.getByIdent('arrows2')
puck = state.getByIdent('puck')
#
#
# Somehow get following variables
#
#
#
pos_ball1x=ball2_1.pos.x
pos_ball1y=ball2_1.pos.y
pos_ball2x=ball2_2.pos.x
pos_ball2y=ball2_2.pos.y
pos_bigballx=puck.pos.x
pos_bigbally=puck.pos.y
num="2"
pos_goalx=config.field_width/2
pos_goaly=0
# In theory powerup locations also.
# And opponent's balls also
#AI brain starts:
#Point where balls want is pos_bigball + norm(pos_bigball)
#2 possibility: by keys or straightly by updating state. Last one might cause problems, so:
#ball1
ball1x=pos_bigballx-pos_ball1x + (pos_bigballx-pos_ball1x)/abs(pos_bigballx-pos_ball1x)
ball1y=pos_bigbally-pos_ball1y + (pos_bigbally-pos_ball1y)/abs(pos_bigbally-pos_ball1y)
ball2x=pos_bigballx-pos_ball2x + (pos_bigballx-pos_ball2x)/abs(pos_bigballx-pos_ball2x)
ball2y=pos_bigbally-pos_ball2y + (pos_bigbally-pos_ball2y)/abs(pos_bigbally-pos_ball2y)
if (pos_bigbally-pos_ball1y+10)>0:
ball1y=ball1y+150
if (pos_bigbally-pos_ball2y+10)>0:
ball2y=ball2y+150
# if (pos_bigballx-pos_ball1x)
# ball2x=pos_ball2x;
# ball2y=pos_ball2y;
# ball2x=pos_bigballx-pos_ball2x + (pos_bigballx-pos_ball2x)/abs(pos_bigballx-pos_ball2x)
# ball2y=pos_bigbally-pos_ball2y + (pos_bigbally-pos_ball2y)/abs(pos_bigbally-pos_ball2y)
serial= { 'letters' + num: {'x': int(ball1x), 'y': int(ball1y)}, 'arrows' + num: {'x': ball2x, 'y': ball2y}, 'seq':'0', 'type':'input'}
elif(config.difficulty==2):
# ball1_1, ball1_2, ball2_1, ball2_2, puck, walls_left, walls_right, stopPower, wall_player1_goal, wall_player2_goal, goal_player1, goal_player2 = state
ball2_1 = state.getByIdent('letters2')
ball2_2 = state.getByIdent('arrows2')
puck = state.getByIdent('puck')
#
#
# Somehow get following variables
#
#
#
pos_ball1x=ball2_1.pos.x
pos_ball1y=ball2_1.pos.y
pos_ball2x=ball2_2.pos.x
pos_ball2y=ball2_2.pos.y
pos_bigballx=puck.pos.x
pos_bigbally=puck.pos.y
num="2"
pos_goalx=config.field_width/2
pos_goaly=0
pos_ourgoal_x=config.field_width/2
pos_ourgoal_y=config.field_height
# In theory powerup locations also.
# And opponent's balls also
#AI brain starts:
#Point where balls want is pos_bigball + norm(pos_bigball)
#2 possibility: by keys or straightly by updating state. Last one might cause problems, so:
#ball1
ball1x=(pos_bigballx + pos_ourgoal_x)/2-pos_ball1x + 2*((pos_bigballx+pos_ourgoal_x)/2-pos_ball1x)/abs((pos_bigballx+pos_ourgoal_x)/2-pos_ball1x)
ball1y=(pos_bigbally + pos_ourgoal_y)/2-pos_ball1y + 2*((pos_bigbally +pos_ourgoal_y)/2-pos_ball1y)/abs((pos_bigbally +pos_ourgoal_y)/2-pos_ball1y)
ball2x=pos_bigballx-pos_ball2x + (pos_bigballx-pos_ball2x)/abs(pos_bigballx-pos_ball2x)
ball2y=pos_bigbally-pos_ball2y + (pos_bigbally-pos_ball2y)/abs(pos_bigbally-pos_ball2y)
if (pos_ball1y)<pos_ourgoal_y*0.8:
ball1y=pos_ball1y+50
if (pos_bigbally-pos_ball2y+10)>0:
ball2y=ball2y+150
# if (pos_bigballx-pos_ball1x)
# ball2x=pos_ball2x;
# ball2y=pos_ball2y;
# ball2x=pos_bigballx-pos_ball2x + (pos_bigballx-pos_ball2x)/abs(pos_bigballx-pos_ball2x)
# ball2y=pos_bigbally-pos_ball2y + (pos_bigbally-pos_ball2y)/abs(pos_bigbally-pos_ball2y)
serial= { 'letters' + num: {'x': int(ball1x), 'y': int(ball1y)}, 'arrows' + num: {'x': ball2x, 'y': ball2y}, 'seq':'0', 'type':'input'}
return serial |
15,428 | edeee2e685e3a43e3c3b82b842dced515269224b | #!/usr/bin/env python
import rospy
import time
from demo_test.srv import *
def test_server_fun():
# init service
rospy.wait_for_service('teleop_ctrl_service2')
rospy.wait_for_service('teleop_ctrl_service3')
rospy.wait_for_service('teleop_ctrl_service4')
teleop_srv_init2 = rospy.ServiceProxy('teleop_ctrl_service2',teleop_ctrl)
teleop_srv_init3 = rospy.ServiceProxy('teleop_ctrl_service3',teleop_ctrl)
teleop_srv_init4 = rospy.ServiceProxy('teleop_ctrl_service4',teleop_ctrl)
# takeoff
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.ARM_TAKEOFF)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.ARM_TAKEOFF)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.ARM_TAKEOFF)
print resp
time.sleep(10)
# fly to point 1
print 'stage 1'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 1.0,
hover_pos_y = 1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = -1.0,
hover_pos_y = 1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = -1.414,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
time.sleep(5)
# fly to point 2
print 'stage 2'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = -1.0,
hover_pos_y = -1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 1.0,
hover_pos_y = -1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = 1.414,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
time.sleep(20)
# fly to point 3
print 'stage 3'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = -1.0,
hover_pos_y = -1.0,
hover_pos_z = -1.3,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 1.0,
hover_pos_y = -1.0,
hover_pos_z = -1.3,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = 1.414,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
time.sleep(3)
# fly to point 4
print 'stage 4'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 1.0,
hover_pos_y = 1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = -1.0,
hover_pos_y = 1.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = -1.414,
hover_pos_z = -1.3,
hover_pos_yaw = -1.57)
print resp
time.sleep(20)
# fly to point 5
print 'stage 5'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = -1.2,
hover_pos_y = 0.0,
hover_pos_z = -1.2,
hover_pos_yaw = 0.0)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = 0.0,
hover_pos_z = -1.3,
hover_pos_yaw = 0.0)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 1.2,
hover_pos_y = 0.0,
hover_pos_z = -1.2,
hover_pos_yaw = 0.0)
print resp
time.sleep(13)
# fly to point 6
print 'stage 6'
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = 0.0,
hover_pos_z = -1.2,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = 1.2,
hover_pos_z = -1.3,
hover_pos_yaw = -1.57)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_HOVER_POS,
hover_pos_x = 0.0,
hover_pos_y = -1.2,
hover_pos_z = -1.3,
hover_pos_yaw = -1.57)
print resp
time.sleep(10)
# land and disarm
resp = teleop_srv_init2(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.LAND_DISARM)
print resp
resp = teleop_srv_init3(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.LAND_DISARM)
print resp
resp = teleop_srv_init4(teleop_ctrl_mask = teleop_ctrlRequest.MASK_ARM_DISARM,
base_contrl = teleop_ctrlRequest.LAND_DISARM)
print resp
print 'task done!'
time.sleep(2)
return resp
if __name__ == "__main__":
print "start test teleop control service"
test_server_fun()
print 'exit!'
|
15,429 | 7fc259fdf9bcfe0d858c0f2baeb5e46a630a1dec | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('../data/azureml/Bike_Rental_UCI_dataset.csv')
def day_of_week():
## First day in the dataset is Saturday
days = pd.DataFrame([[0, 1, 2, 3, 4, 5, 6],
["Sun", "Mon", "Tue", "Wed", "Thr", "Fri", "Sat"]]).transpose()
days.columns = ['weekday', 'dayOfWeek']
return days
days_df = day_of_week()
days_df.head()
df = pd.merge(df, days_df, on='weekday', how='outer')
df.head()
def set_days(df):
number_of_rows = df.shape[0]
df['days'] = pd.Series(range(number_of_rows))/24
df['days'] = df['days'].astype('int')
return df
set_days(df)
print("Done...") |
15,430 | ea206c515bdd2c30badfc84c3f4b20630100052b | import glob
import numpy as np
import astropy.io.fits as pyfits
import commands
import sys
from drizzlepac import tweakreg, astrodrizzle
import collections
import copy
set_num = sys.argv[1]
def do_it(cmd):
print cmd
print commands.getoutput(cmd)
def get_filter(the_header):
try:
return the_header["FILTER"]
except:
filt = the_header["FILTER1"]
if filt.find("CLEAR") == -1:
return filt
else:
return the_header["FILTER2"]
def find_filter(flt_list, the_filters):
if type(the_filters) == type("a"):
filt_list = [copy.deepcopy(the_filters)]
else:
filt_list = copy.deepcopy(the_filters)
f125w_list = []
for item in flt_list:
f = pyfits.open(item)
if filt_list.count(get_filter(f[0].header)):
f125w_list.append(item)
return f125w_list
def find_best_ref(all_flc_list, filt_priority=["F110W", "F105W",
"F140W", "F125W",
"F814W", "F775W",
"F606W", "F160W"]):
flc_list = []
for filt in filt_priority:
if flc_list == []:
flc_list = find_filter(all_flc_list, filt)
print "flc_list for ", filt, flc_list
print "Find ref with least maximum disagreement."
print "In princple, this should take rotation into account."
xlist = np.array([], dtype=np.float64)
ylist = np.array([], dtype=np.float64)
for fl in flc_list:
f = pyfits.open(fl)
ra = f[0].header["RA_TARG"]
dec = f[0].header["DEC_TARG"]
f.close()
x = ra*np.cos(dec/57.3)*3600*20
y = dec*3600*20
xlist = np.append(xlist, x)
ylist = np.append(ylist, y)
besttotal = 1.e10
for i in range(len(xlist)):
new = np.sqrt((xlist - xlist[i])**2. + (ylist - ylist[i])**2.)
if max(new) < besttotal:
besttotal = max(new)
besti = i
print "Ref to use ", flc_list[besti], besti
return flc_list[besti], besti
def transfer_header(infl, outfl):
"""I don't know why Eli's version of this doesn't work..."""
print "Transfer", infl, "to", outfl
fin = pyfits.open(infl)
fout = pyfits.open(outfl, 'update')
dont_transfer = ["HSTSLAC", "MDRIZSKY", "LACOSMIC", "HISTORY", "COMMENT", ""]
print "Transferring: ",
for i in range(len(fin)):
for key in fin[i].header:
if dont_transfer.count(key) == 0:
if fin[i].header[key] != fout[i].header.get(key, default = None):
print key,
fout[i].header[key] = fin[i].header[key]
fout.flush()
fout.close()
fin.close()
print
def do_tweak(flt_list, besti, lowthreshold = 0):
f = open(bad_pix_list_wfc3)
lines = f.read().split('\n')
f.close()
lines = [item.split(None) for item in lines]
lines = [item for item in lines if item != []]
bad_pix = [(int(item[0]), int(item[1])) for item in lines]
tmp_ims = []
for i in range(len(flt_list)):
f = pyfits.open(flt_list[i])
if f[0].header["INSTRUME"] == "ACS":
tmp_ims.append(flt_list[i].replace(".fits", "_lac.fits"))
acs = True
else:
tmp_ims.append(flt_list[i].replace(".fits", "_filter.fits"))
if flt_list[i] == tmp_ims[i]:
print "Error with ", flt_list[i]
sys.exit(1)
print "Median Filtering ", flt_list[i]
f = pyfits.open(flt_list[i])
tmpdata = copy.deepcopy(f["SCI"].data)
LTV1 = f["SCI"].header["LTV1"]
LTV2 = f["SCI"].header["LTV2"]
for this_x, this_y in bad_pix:
this_x += LTV1
this_y += LTV2
if this_x > 1 and this_x < len(tmpdata[0]) and this_y > 1 and this_y < len(tmpdata):
f["SCI"].data[int(np.around(this_y - 1)), int(np.around(this_x - 1))] = np.median(tmpdata[int(np.around(this_y - 2)): int(np.around(this_y + 1)), int(np.around(this_x - 2)): int(np.around(this_x + 1))])
f.writeto(tmp_ims[i], clobber = True)
f.close()
acs = False
do_it("cp -f " + tmp_ims[i] + " " + tmp_ims[i].replace("/orig_files/", "/"))
tmp_ims[i] = tmp_ims[i].replace("/orig_files/", "/")
print "tmp_ims ", tmp_ims
tweakref = tmp_ims[besti]
tweakreg.TweakReg(','.join(tmp_ims),
updatehdr=True,
shiftfile=True, # This is just for show
############ Change This Between Iterations: ##########
refimage=tweakref,
updatewcs=False, # I think this should always be false.
searchrad=4,
searchunits='arcseconds',
threshold=(1. + 7.*acs)/(lowthreshold + 1.),
conv_width=(2.5 + 1.*acs), # 3.5 for optical, 2.5 for IR
######### Change This Between Iterations: ##############
wcsname="TWEAK_rough",
residplot='No plot',
see2dplot=False,
fitgeometry='shift') # Have to change this for that one epoch, G cluster?
f = open("shifts.txt")
lines = f.read()
f.close()
if lines.find(" nan ") != -1:
print "Couldn't match!"
if lowthreshold == 0: # First iteration
print "Trying lower threshold..."
do_tweak(flt_list, besti, lowthreshold = 1)
else:
print "...even though lowthreshold is ", lowthreshold
sys.exit(1)
for i in range(len(flt_list)):
print "Transferring from ", tmp_ims[i], flt_list[i]
transfer_header(tmp_ims[i], flt_list[i])
def do_drizzle(flc_list, outputname, clean = True, refimage = "", build = True, cr_sensitive = False, outputscale = 0.05):
print "overriding cr_sensitive", cr_sensitive
cr_sensitive = True
n_img = len(flc_list)
combine_type = "minmed"*(n_img <= 4.) + "median"*(n_img > 4)
print "Number of images ", n_img, combine_type
if refimage != "":
print "Using refimage", refimage
nicmos = (flc_list[0].split("/")[-1][0] == "n")
if nicmos:
combine_type = "minmed"
wfc3 = (flc_list[0].split("/")[-1][0] == "i")
print "flc_list, nicmos, wfc3 ", flc_list, nicmos, wfc3
astrodrizzle.AstroDrizzle(','.join(flc_list),
preserve=False,
build=build,
output=outputname,
clean=clean*0, # Clean up tmp files
updatewcs=nicmos, # This is right
proc_unit='native',
driz_sep_kernel='square',
driz_sep_pixfrac=1.0,
driz_sep_scale=0.128,
driz_sep_bits=(0 + (512+1024+2048)*nicmos
+ (2048+8192)*wfc3),
combine_type=combine_type,
driz_cr=(n_img > 1),
median=(n_img > 1),
blot=(n_img > 1),
static=(n_img > 1),
#driz_cr_snr = "3.5 3.0",
driz_cr_scale=("3 2"*(1 - cr_sensitive)
+ "2 1.5"*cr_sensitive), # Up from default 1.2, 0.7
#driz_cr_scale = "2. 1.5",
#final_wht_type = "ERR", # This is very wrong! Why do they even include it?
final_wht_type="EXP", # This one works!
final_kernel="gaussian",
final_pixfrac=1.0, # Should be default.
final_wcs=True,
final_rot=0.,
final_bits=(0 + (512+1024+2048)*nicmos
+ (2048+8192)*wfc3),
final_scale=outputscale,
final_refimage=refimage)
if nicmos:
f = pyfits.open(outputname + "_drz.fits", 'update')
expend = f[0].header["EXPEND"]
print outputname, "EXPEND", expend
if expend > 51544:
print "Multiplying by 1.007!"
f["SCI"].data *= 1.007
f.flush()
f.close()
def get_fls_by_filter_date(globpath = ""):
files_by_filter_date = collections.OrderedDict()
if globpath == "":
origfls = glob.glob(data_path
+ "set_%s/orig_files/*flt.fits" % set_num)
simfls = [] #glob.glob("simulated_ims/*flt.fits")
else:
origfls = glob.glob(globpath)
simfls = []
for i in range(len(origfls))[::-1]:
foundsim = 0
for simfl in simfls:
if origfls[i].split("/")[-1] == simfl.split("/")[-1]:
foundsim = 1
if foundsim:
del origfls[i]
fls_sorted_by_date = []
for fl in origfls + simfls:
f = pyfits.open(fl)
EXPEND = f[0].header["EXPEND"]
f.close()
fls_sorted_by_date.append((EXPEND, fl))
fls_sorted_by_date.sort()
# print fls_sorted_by_date
fls_sorted_by_date = [item[1] for item in fls_sorted_by_date]
for fl in fls_sorted_by_date:
f = pyfits.open(fl)
EXPEND = f[0].header["EXPEND"]
FILTER = f[0].header["FILTER"]
f.close()
found = 0
for key in files_by_filter_date:
if (key[0] == FILTER) and (abs(EXPEND - key[1]) < 1.):
files_by_filter_date[key].append(fl)
found += 1
assert found < 2
if found == 0:
files_by_filter_date[(FILTER, EXPEND)] = [fl]
# for key in files_by_filter_date:
# print key, files_by_filter_date[key]
return files_by_filter_date
def sort_ims(ims_path):
origfls = glob.glob(ims_path+'/*flt.fits')
print origfls
ims_dict = {}
for fl in origfls:
f = pyfits.open(fl)
EXPEND = int(f[0].header["EXPEND"])
FILTER = f[0].header["FILTER"]
f.close()
just_fl = fl.split('/')[-1]
print just_fl, FILTER, EXPEND
try:
ims_dict[FILTER]
except:
ims_dict[FILTER] = {}
try:
ims_dict[FILTER][EXPEND].append(just_fl)
except:
ims_dict[FILTER][EXPEND] = []
ims_dict[FILTER][EXPEND].append(just_fl)
filt1, filt2 = ims_dict.keys()
filt1_e1 = np.min(ims_dict[filt1].keys())
filt1_e2 = np.max(ims_dict[filt1].keys())
filt2_e1 = np.min(ims_dict[filt2].keys())
filt2_e2 = np.max(ims_dict[filt2].keys())
filt1_epoch1_fls = ims_dict[filt1][filt1_e1]
filt1_epoch2_fls = ims_dict[filt1][filt1_e2]
filt2_epoch1_fls = ims_dict[filt2][filt2_e1]
filt2_epoch2_fls = ims_dict[filt2][filt2_e2]
return filt1, filt2, filt1_epoch1_fls, filt1_epoch2_fls, \
filt2_epoch1_fls, filt2_epoch2_fls
def get_filters(ims_path):
origfls = glob.glob(ims_path+'/*flt.fits')
print ims_path
print origfls
filts = []
for fl in origfls:
f = pyfits.open(fl)
FILTER = f[0].header["FILTER"]
f.close()
filts.append(FILTER)
unique_filters = np.unique(filts)
return unique_filters
#path = '/Users/mcurrie/Projects/TransiNet/data/set_%s/orig_files' % set_num
#data_path = '/Users/mcurrie/Projects/TransiNet/data/'
#path = '/Volumes/My_book/TransiNet/data/set_%s/orig_files' % set_num
#data_path = '/Volumes/My_book/TransiNet/data/'
data_path = '/Volumes/My_Book/TransiNet/data/sets_newbadpix/'
# step 0: stack images
with open('obj_coords.dat', 'wb') as f:
f.write('set_%s 0 0' % set_num)
outputscale = 0.09
sky_nlc_order = 'nlcsky'
bad_pix_list_wfc3 = data_path + 'bad_pix_list_wfc3.txt'
set_num = sys.argv[1]
set_dir = 'set_' + set_num
userrefimage = ''
do_it("mkdir %s/%s/orig_files" % (data_path, set_dir))
do_it("mv %s/*fits %s/orig_files" % (data_path + set_dir,
data_path + set_dir))
print "Aligning Images..."
for filter in [["F606W", "F775W", "F814W"],
["F105W", "F110W", "F125W", "F140W", "F160W"]]:
flt_list = glob.glob(data_path + set_dir + "/orig_files/i*flt.fits") + \
glob.glob(data_path + set_dir + "/orig_files/j*flc.fits")
flt_list.sort()
flt_list = find_filter(flt_list, filter)
if flt_list != []:
best_ref, besti = find_best_ref(flt_list)
do_tweak(flt_list, besti)
do_it("rm -f %s/*.coo %s/*.match %s/*catfile.list"
% (data_path + set_dir,
data_path + set_dir,
data_path + set_dir))
do_it("mv shifts.txt "
+ data_path + set_dir
+ "/shifts_%s.txt" % "_".join(filter))
print 'Finished alignment'
print "Drizzling WFC3..."
for filter in ["F105W", "F110W", "F125W", "F140W", "F160W"]:
files = find_filter(glob.glob(data_path + set_dir
+ "/orig_files/i*flt.fits"), filter)
print "filter, files", filter, files
if len(files) > 0:
for cr_sensitive in [0]:
new_files = [item.replace("/orig_files", "") for item in files]
for file, new_file in zip(files, new_files):
if new_file == file:
print "Error,", new_file, "is the same!"
sys.exit(1)
do_it("cp -vf " + file + " " + new_file)
driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive
do_drizzle(new_files,
driz_filename,
clean=True,
refimage=(userrefimage != "None")*userrefimage,
build = True,
cr_sensitive=cr_sensitive,
outputscale=outputscale)
do_it("mv " + driz_filename + "_drz.fits "
+ data_path + set_dir)
do_it("rm -fv " + " ".join(new_files))
print "Drizzling ACS..."
for filter in ["F775W", "F814W", "F606W", "F850LP"]:
files = find_filter(glob.glob(data_path + set_dir
+ "/orig_files/j*flc.fits"), filter)
print "filter, files", filter, files
if len(files) > 0:
for cr_sensitive in [0]:
new_files = [item.replace("/orig_files", "") for item in files]
for file, new_file in zip(files, new_files):
if new_file == file:
print "Error,", new_file, "is the same!"
sys.exit(1)
do_it("cp -vf " + file + " " + new_file)
driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive
do_drizzle(new_files,
driz_filename,
clean=True,
refimage=(userrefimage != "None")*userrefimage,
build=True,
cr_sensitive=cr_sensitive,
outputscale=outputscale)
do_it("mv " + driz_filename + "_drc.fits "
+ data_path + set_dir)
do_it("rm -fv " + " ".join(new_files))
unique_filters = get_filters(data_path+set_dir+'/orig_files/')
origfls = glob.glob(data_path+'/orig_files/*flt.fits')
with open(data_path + 'paramfile_%s.txt' % set_num, 'wb') as paramfl:
paramfl.write('drz\t%s/set_%s/%s_stack_drz.fits\n' % (data_path,
set_num,
unique_filters[0]))
paramfl.write('aligned\t%s\n' % ' '.join(origfls))
paramfl.write('F125W_zp\t26.23\n')
paramfl.write('F105W_zp\t26.24\n')
paramfl.write('F140W_zp\t26.44\n')
paramfl.write('F160W_zp\t25.92\n')
paramfl.write('min_mag\t25.0\n')
paramfl.write('max_mag\t27.0\n')
paramfl.write('step_mag\t0.2\n')
paramfl.write('gauss_r\t4\n')
paramfl.write('frac_real\t0.5\n')
paramfl.write('F125W_highz\t26.8\n')
paramfl.write('F105W_highz\t26.8\n')
paramfl.write('F140W_highz\t26.0\n')
paramfl.write('F160W_highz\t25.9\n')
paramfl.write('frac_highz\t0.003\n')
# stack epochs
fls_by_filter_date = get_fls_by_filter_date()
commands.getoutput("rm -f %s/set_%s_epochs/*" % (data_path, set_num))
commands.getoutput("mkdir %s/set_%s_epochs" % (data_path, set_num))
filter_counter = []
for item in fls_by_filter_date:
print item
for im in fls_by_filter_date[item]:
commands.getoutput("cp " + im + " %s/set_%s_epochs" % (data_path,
set_num))
filter_counter.append(item[0])
refimage = commands.getoutput("grep drz "+data_path+"paramfile_%s.txt" %
set_num).split(None)[1] + "[SCI]"
print "refimage", refimage
do_drizzle([data_path + "set_"+set_num+"_epochs/" + subitem.split("/")[-1] for subitem in fls_by_filter_date[item]],
outputname = data_path + "set_"+set_num+"_epochs/" + item[0] + "_epoch%02i" % (filter_counter.count(filter_counter[-1])),
refimage=refimage,
outputscale=outputscale)
|
15,431 | b728ad737fea657e56d0033daa1e4c82d37b2ecf | # -*- coding: utf-8 -*-
import os
import sys
sys.path.append('../Compiladores/')
#import uuid
#from typing import *
from afnd.automata import AFNDmV
'''
classe de conversao de afnd-e para afd pelo metodo de construção de subconjutos
recebe um afnd calcula os fechos e retorna um afd
'''
class AFD(AFNDmV):
# herdando o construtor do pai
def __init__(self, afnd):
# copiando o alfabeto do afnd, removendo transicao vazia
# como o & é sempre o ultimo alfabeto, só copiar a lista 0 ~ -1
self.alfabeto = afnd.alfabeto[:-1]
self.fecho_E = []
self.matrizTransicao = {}
def rename_state(self, afd: object) -> object:
'''
Renomeação dos estados para uma melhor visualização
'''
chaves = []
new_dict = {}
for keys in afd.matrizTransicao.keys():
if keys[0] not in chaves:
chaves.append(keys[0])
# renomealos tanto na chave, quanto no valor
for keys in afd.matrizTransicao.keys():
# manter o 'final' na nova chave
if 'final' in keys:
new_dict[ ( chaves.index(tuple(keys[0])), keys[1], keys[2] ) ] = chaves.index(tuple(afd.matrizTransicao.get(keys)))
else:
# chave nova nao final
new_dict[ ( chaves.index(tuple(keys[0])), keys[1] ) ] = chaves.index(tuple(afd.matrizTransicao.get(keys)))
afd.matrizTransicao = new_dict
print('novas chaves', new_dict.keys())
return (afd, chaves)
def gerar_AFD(self, afnd: object, fecho_E: [int], matrizTransicao: dict ) -> object:
'''
Recebe o primeiro conjunto de estados e calcula os outros
'''
afd = AFD(afnd)
estados = []
estados.append(fecho_E[0])
new_state = []
fechos = []
transicoes = []
afd_Transicoes = {}
# print(estados)
# print('Estado final', afnd.estado_final)
# print('Alfabeto:', afnd.alfabeto)
# percorre os estados iniciais do fecho_E[0]
for itens in estados:
for j in afd.alfabeto:
for i in itens:
# print('Estado[i]:',i,'Simbolo:',j)
# print('Transicao: ',matrizTransicao.get((i,j)))
# pego os estados pra dps calcular seus fechos
if matrizTransicao.get((i,j)) != None:
#new_state.append(fecho_E[matrizTransicao.get((i,j))])
fechos.append(matrizTransicao.get((i,j)))
# apos o termino dos estados calcula-se seus fechos
# print(fechos)
for i in fechos:
print(i)
transicoes += fecho_E[i]
new_state += (list(set(transicoes)))
# print('new_state',new_state)
# se o new_state for vazia estado representa erro -> '$' = ERRO
if not new_state:
new_state = ['$']
# print('Novo Estado Vazio',new_state)
# adiciona o erro na lista de estados
if new_state not in estados:
# print('novo estado ta na lista de estados')
estados.append(['$'])
# se o estado final estiver indo pra o estado de erro, adicionar nas transicoes
if afnd.estado_final in itens:
# print("Final -> adiciona index 'final' na tupla do afd")
afd_Transicoes[ (tuple(itens), j,'final') ] = new_state.copy()
else:
afd_Transicoes[ (tuple(itens), j) ] = new_state.copy()
# novo estado != vazio e nao ta lista de estados
elif new_state not in estados:
# estado final contem no novo estado
if afnd.estado_final in itens:
afd_Transicoes[ (tuple(itens), j,'final') ] = new_state.copy()
else:
afd_Transicoes[ (tuple(itens), j) ] = new_state.copy()
estados.append(new_state.copy())
# estado na lista de estados
else:
if afnd.estado_final in itens:
afd_Transicoes[ (tuple(itens), j,'final') ] = new_state.copy()
else:
afd_Transicoes[ (tuple(itens), j) ] = new_state.copy()
# limpando os vetores
new_state.clear()
transicoes.clear()
fechos.clear()
# atribuições do objeto que representa o afd
afd.matrizTransicao = afd_Transicoes
afd.fecho_E = fecho_E
print(estados)
print(afd.matrizTransicao)
#print(afd.matrizTransicao)
#print(afd.fecho_E)
return afd
# adicionar os conjuntos novos
def minimize_afd(self, automata: object) -> object:
'''
Minimzação do afd utilizando a tabela de estados finais
'''
pass
def calcular_fechoE(self, automato: object) -> []:
'''
Recebe as transicoes do automato e calcula os fechos de cada estado
TODO: tentar fazer DP, passando lista de estados já visitados e calculados
'''
for keys in automato.matrizTransicao:
self.fecho_E.append(self.fechoE(automato.matrizTransicao, keys[0]))
# removendo redundancia
for i in range(len(self.fecho_E)):
self.fecho_E[i] = list(set(self.fecho_E[i]))
# removendo as tuplas dentros dos fechos
for i in range(len(self.fecho_E)):
for j in (self.fecho_E[i]):
if type(j) is tuple:
self.fecho_E[i].remove(j)
return self.fecho_E
def fechoE(self, transicoes: dict, estado_atual: int) -> []:
fecho_E = []
fecho_E.append(estado_atual)
#print("estado atual: ", estado_atual)
if (estado_atual,'&') in transicoes.keys():
# lista de estados alcançados pelo fecho-&
try:
for i in transicoes.get((estado_atual,'&')):
if not (i in self.fecho_E):
fecho_E += self.fechoE(transicoes, i)
# só um estado alcançável pelo fecho-&
except:
fecho_E += self.fechoE(transicoes, transicoes.get((estado_atual,'&')))
return fecho_E
if __name__ == '__main__':
automato = AFNDmV()
automato = automato.gerar_AFND(automato.validacao_input(sys.argv[1]))
print("afnd alfabeto", automato.alfabeto)
afd = AFD(automato)
print("afd alfabeto", afd.alfabeto)
afd.fecho_E = afd.calcular_fechoE(automato)
afd = afd.gerar_AFD(automato, afd.fecho_E, automato.matrizTransicao)
'''
Futura funcao pra transformar em uma tabela html
'''
print("afnd alfabeto",automato.alfabeto)
# mapeamento dos novos estados do afd.
afd = afd.rename_state(afd)
print(afd[0].matrizTransicao) |
15,432 | 69190e945486e38231286ff09f4c2d71f5d8973d | from django.core.exceptions import ValidationError
from rest_framework import status
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rareserverapi.models import Category
from rareserverapi.serializers import CategorySerializer
class CategoriesViewSet(ViewSet):
def retrieve(self, request, pk=None):
try:
category = Category.objects.get(pk=pk)
serializer = CategorySerializer(
category, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex)
def list(self, request):
categories = Category.objects.all().order_by("label")
serializer = CategorySerializer(
categories, many=True, context={'request': request})
return Response(serializer.data)
def create(self, request):
category = Category()
category.label = request.data["label"]
try:
category.save()
serializer = CategorySerializer(category, context={'request': request})
return Response(serializer.data)
except ValidationError as ex:
return Response({'reason': ex.message}, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
try:
category = Category.objects.get(pk=pk)
category.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Category.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def update(self, request, pk=None):
category = Category.objects.get(pk=pk)
category.label = request.data["label"]
category.save()
return Response({}, status=status.HTTP_204_NO_CONTENT)
|
15,433 | 1d62ad79683208cec52f5b8aca9be4bfe26f4e89 | # Given an array, rotate the array to the right by k steps, where k is non-negat
# ive.
#
# Follow up:
#
#
# Try to come up as many solutions as you can, there are at least 3 different w
# ays to solve this problem.
# Could you do it in-place with O(1) extra space?
#
#
#
# Example 1:
#
#
# Input: nums = [1,2,3,4,5,6,7], k = 3
# Output: [5,6,7,1,2,3,4]
# Explanation:
# rotate 1 steps to the right: [7,1,2,3,4,5,6]
# rotate 2 steps to the right: [6,7,1,2,3,4,5]
# rotate 3 steps to the right: [5,6,7,1,2,3,4]
#
#
# Example 2:
#
#
# Input: nums = [-1,-100,3,99], k = 2
# Output: [3,99,-1,-100]
# Explanation:
# rotate 1 steps to the right: [99,-1,-100,3]
# rotate 2 steps to the right: [3,99,-1,-100]
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 2 * 10^4
# It's guaranteed that nums[i] fits in a 32 bit-signed integer.
# k >= 0
#
# Related Topics 数组
# 👍 629 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def rotate_insert(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: None Do not return anything, modify nums in-place instead.
"""
k %= len(nums)
for _ in range(k):
nums.insert(0, nums.pop())
# slicing应该已经算是用了额外空间了
def rotate_slice(self, nums, k):
k %= len(nums)
nums[:] = nums[-k:] + nums[:-k]
#采用双指针翻转,切片也行,但是用了额外空间
def rotate_flip(self, nums, k):
k %= len(nums)
def flip(nums, start, end):
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
flip(nums, 0, len(nums)-1)
flip(nums, 0, k-1)
flip(nums, k, len(nums)-1)
def rotate_cycle(self, nums, k):
k %= len(nums)
count = 0
start = 0
while count < len(nums):
current = start
temp = nums[current]
while True:
nums[(current + k) % len(nums)], temp = temp, nums[(current + k) % len(nums)]
current = (current + k) % len(nums)
count += 1
if current == start: break
start += 1
# leetcode submit region end(Prohibit modification and deletion)
|
15,434 | 8b1d394e3311620bd8e71f863c6255dd3d3e8d8c | #-*-coding:utf8-*-
#user:brian
#created_at:2018/6/9 10:18
# file: MultionormialNB.py
#location: china chengdu 610000
from Bayesmodel.Bacic import *
class MultinormialNB(NativaBsyes):
def feed_data(self,x,y,sample_weight=None):
if isinstance(x,list):
features=map(list,zip(*x))
else:
features=x.T
#采用bincout来计算
features=[set(feat) for feat in features] #一共有多少种组合
feat_dics=[{_l:i for i,_l in enumerate(feats)}for feats in features] #许多个字典
label_dics={_l:i for i,_l in enumerate(set(y))} #一个字典
x=np.array([[feat_dics[i][_l]for i ,_l in enumerate(sample) ]for sample in x])
y= np.array([label_dics[yy] for yy in y])
cat_counter = np.bincount(y)
n_possibilities = [len(feats) for feats in features]
labels = [y == value for value in range(len(cat_counter))]
labelled_x = [x[ci].T for ci in labels]
#更新数据
self._x, self._y = x, y
self._labelled_x, self._label_zip = labelled_x, list(zip(labels, labelled_x))
self._cat_counter, self._feat_dics, self._n_possibilities = cat_counter, feat_dics, n_possibilities
self.label_dict = {i:_l for _l,i in label_dics.items()}
self.feed_sample_weight(sample_weight)
def feed_sample_weight(self, sample_weight=None):
self._con_counter = []
for dim, p in enumerate(self._n_possibilities):
if sample_weight is None:
self._con_counter.append([
np.bincount(xx[dim], minlength=p) for xx in self._labelled_x])
else:
local_weights = sample_weight * len(sample_weight)
self._con_counter.append([
np.bincount(xx[dim], weights=local_weights[label], minlength=p)
for label, xx in self._label_zip])
def _fit(self, lb):
n_dim = len(self._n_possibilities)
n_category = len(self._cat_counter)
p_category = self.get_prior_probablity(lb)
data = [[] for _ in range(n_dim)]
for dim, n_possibilities in enumerate(self._n_possibilities):
data[dim] = [
[(self._con_counter[dim][c][p] + lb) / (self._cat_counter[c] + lb * n_possibilities)
for p in range(n_possibilities)] for c in range(n_category)]
self._data = [np.asarray(dim_info) for dim_info in data]
def func(input_x, tar_category):
rs = 1
for d, xx in enumerate(input_x):
rs *= data[d][tar_category][xx]
return rs * p_category[tar_category]
return func
def _transfer(self, x):
for j, char in enumerate(x):
x[j] = self._feat_dics[j][char]
return x
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams["font.sans-serif"]=['Fangsong']
mpl.rcParams["axes.unicode_minus"]=False
# def plot_all(nb,dataset):
# data=nb._data
# colors={"不爆炸":"blue","爆炸":"red"}
# #反字典化
# _rev_feat_dics=[{_val:key for key ,_val in item.items()} for item in nb._feat_dics]
# for _j in range(nb._x.shape[1]):
# sj=nb._n_possibilities[_j]
# temp_x=np.arange(1,sj+1)
# tittle="$j= {};s_j={}$".format(_j+1,sj)
# plt.figure()
# plt.title(tittle)
# for _c in range(len(nb.label_dict)):
# plt.bar(temp_x-0.35*_c,data[_j][_c,:],width=0.35, facecolor=colors[nb.label_dict[_c]],edgecolor="white"
# ,label="class :{}".format(nb.label_dict[_c]))
# plt.xticks([i for i in range(sj)], [""] + [_rev_feat_dics[_j]] + [""])
# plt.ylim(0, 1.0)
# plt.legend()
# # 保存画好的图像
# plt.savefig("../result/d{0},{1}.png".format(dataset,_j + 1))
if __name__ == '__main__':
import time
from Util.util import DataUtil
for dataset in ["balloon1.0","balloon1.5"]:
print("="*20)
print(dataset)
print("-"*20)
_X,_Y=DataUtil.get_dataset(dataset,"../data/{}.txt".format(dataset))
learinning_time=time.time()
nb=MultinormialNB()
nb.fit(_X,_Y)
learinning_time=time.time()-learinning_time
estiamtime=time.time()
nb.evaluate(_X,_Y)
estiamtime=time.time()-estiamtime
#print output
print(
"model bulding: {:12.6}s\n"
"Estimation: {:12.6}s\n"
"Toatl :{:12.6}".format(
learinning_time,estiamtime,learinning_time+estiamtime
)
)
print(" "*20)
# plot_all(nb,dataset)
|
15,435 | 268c394de7cad11353322e5d98230d1414c16e34 | '''
DESCRIPTION
This script reads HDF5 output files from The FLASH code and
extract its fields, data and computes the extent of the
mixing layer.
AUTHOR
Erik S. Proano
Embry-Riddle Aeronautical University
'''
import yt
from yt.funcs import mylog
import h5py
import numpy as np
import scipy.interpolate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from matplotlib import cm, ticker
from matplotlib.ticker import MaxNLocator
from mpi4py import MPI
import os
#fname = 'cylindrical_rmi_2d_hdf5_chk_'
fname = "spherical_rmi_3d_hdf5_chk_"
location = os.getcwd()
# Initialize communicators
#comm = MPI.COMM.WORLD()
#Nproc = int(comm.Get_size())
#Pid = int(comm.Get_rank())
initfile = 0
finalfile = 150
r_max = (3.90, "cm") # Max. Radius as a tuple with (Ma. Radius, "units")
p_res = 2*1024 # Number of desired radial bins
r_tar = 2.5
# Divide equal amount of work to each worker (Done by master thread)
start = initfile
end = finalfile
nfiles = finalfile-initfile
#local_start = int(start + Pid*(finalfile-start)/Nproc)
#local_end = int(local_start + (finalfile-start)/Nproc)
#local_nfiles = local_end - local_start
#print('Processor ', Pid, 'will read from', local_start, 'to', local_end)
#comm.Barrier()
#t_start = MPI.Wtime()
time = np.array([])
rad_dens =np.zeros([p_res, nfiles])
#center = [0.0, 0.0, 0.5]
for i in range(start, end):
if i > 9 and i <= 99:
file = fname + "00" + str(i)
elif i > 99 and i <= 999:
file = fname + "0" + str(i)
elif i > 999 :
file = fname + str(i)
else :
file = fname + "000" + str(i)
mylog.setLevel(40)
ds = yt.load(file)
print("Reading", file)
# Create a 4cm-radius sphere
center = ds.domain_left_edge#-(0.45,0.45,0.)
sp = ds.sphere(center, r_max)
profile = yt.create_profile(sp, 'radius', ['pressure'], n_bins=p_res,
units = {'radius': 'cm', "pressure": "dyn/cm**2"},
logs = {'radius': False, "pressure": True})
# Transform the profile from a dictionary to a numpy array
profVal = list(profile.field_data.values())
for k in profVal:
d = k
rad_dens[:,i] = d
time = np.append(time, float(ds.current_time))
rad = np.array(profile.x)/r_tar
X, Y = np.meshgrid(time*1.E06, rad)
levels=MaxNLocator(nbins=512).tick_values(rad_dens.min(),
rad_dens.max())
#rbf = scipy.interpolate.Rbf(time, rad, rad_dens,function='linear')
#zi = rbf(X,Y)
#plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
# extent=[x.min(), x.max(), y.min(), y.max()])
#plt.scatter()
fig = plt.figure()
ax = fig.add_subplot(111)
cf = ax.contourf(X,Y,rad_dens,
levels=levels,
locator=ticker.LogLocator(),
cmap=cm.binary)
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_surface(X, Y, rad_dens, cmap=cm.binary,
# linewidth=0, antialiased=False)
ax.set_ylabel(r'$r^{*}$',fontsize=16)
ax.set_xlabel(r'Time ($\mu s$)',fontsize=16)
cbar = fig.colorbar(cf, ax=ax)
fig.tight_layout()
fig.savefig("rt.png")
|
15,436 | 1e7f95853e36bbc1a47b8911c239736e7f68128b | if __name__ == "__main__":
from main_xml_creator import main
main()
|
15,437 | e7ed2dc0ce5d30b4bb8fdfa8ae2abdae16ca9eb3 | class Utwory_muzyczne():
def __init__(self,wykonawca,tytul,album,rok):
self.wykonawca = wykonawca
self.tytul = tytul
self.album = album
self.rok = rok
def __str__(self):
return f'Wykonawca: {self.wykonawca}\nUtwór: {self.tytul}\nAlbum: {self.album}\nRok: {self.rok}'
utwory = Utwory_muzyczne('Dawid Podsiadło','Nie ma fal','Małomiasteczkowy','2018')
print(utwory) |
15,438 | c3380c9fa633c0253a66ffb739b5013fb9e86d17 | import logging
import time
from functools import wraps
from threading import Lock
from googleplay_api.googleplay import GooglePlayAPI, LoginError, DecodeError
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(module)s.%(funcName)s - %(message)s')
logger = logging.getLogger('googleplay-proxy')
logger.setLevel(logging.INFO)
def _with_login(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if not self.is_logged_in():
self.login()
try:
return method(self, *args, **kwargs)
except DecodeError as err:
logger.warn('Failed to decode the response, possible authentication token issue: %s', err)
self.login()
return method(self, *args, **kwargs)
return wrapper
class ApiLoginException(BaseException):
def __init__(self, cause):
super(ApiLoginException, self).__init__(cause)
class ApiItem(dict):
def __setattr__(self, key, value):
self[key] = value
class ApiClient(object):
def __init__(self, android_id=None, username=None, password=None,
auth_token=None, proxy=None, max_login_retries=10, language=None, debug=False):
self._api = GooglePlayAPI(android_id, language, debug)
self._username = username
self._password = password
self._auth_token = auth_token
self._proxy = proxy
self._max_login_retries = max_login_retries
self._login_lock = Lock()
self._logged_in = False
def is_logged_in(self):
return self._logged_in
def login(self):
self._logged_in = False
with self._login_lock:
logger.info('Executing login')
login_error = None
for _ in xrange(self._max_login_retries):
try:
self._api.login(self._username, self._password, self._auth_token, self._proxy)
self._logged_in = True
break
except LoginError as err:
login_error = err
time.sleep(0.2)
else:
logger.error('Failed to log in: %s', login_error)
raise ApiLoginException(login_error)
@_with_login
def search(self, package_prefix):
logger.info('Searching for %s', package_prefix)
results = list()
response = self._api.search(package_prefix)
if len(response.doc):
document = response.doc[0]
for child in document.child:
package_name = child.details.appDetails.packageName
if not package_name.startswith(package_prefix):
continue
item = self._extract_api_item(child, simple=True)
results.append(item)
return results
def developer(self, developer_name):
raise NotImplementedError('Searching by developer is not supported')
@_with_login
def get_details(self, package_name):
logger.info('Fetching details for %s', package_name)
details = self._api.details(package_name)
return self._extract_api_item(details.docV2, simple=False)
@staticmethod
def _extract_api_item(api_object, simple):
details = api_object.details.appDetails
item = ApiItem()
item.package_name = details.packageName
item.title = api_object.title
item.creator = api_object.creator
item.upload_date = details.uploadDate
item.num_downloads = details.numDownloads
item.version_code = details.versionCode
item.share_url = api_object.shareUrl
if not simple:
item.description_html = api_object.descriptionHtml
item.developer_name = details.developerName
item.developer_website = details.developerWebsite
item.version_string = details.versionString
item.recent_changes_html = details.recentChangesHtml
images = list()
for image_object in api_object.image:
image = ApiItem()
image.type = image_object.imageType
image.url = image_object.imageUrl
if not simple:
image.width = image_object.dimension.width
image.height = image_object.dimension.height
image.position = image_object.positionInSequence
images.append(image)
item.images = images
item.ratings = {
'stars': api_object.aggregateRating.starRating,
'total': api_object.aggregateRating.ratingsCount,
'comments': api_object.aggregateRating.commentCount,
'count': {
1: api_object.aggregateRating.oneStarRatings,
2: api_object.aggregateRating.twoStarRatings,
3: api_object.aggregateRating.threeStarRatings,
4: api_object.aggregateRating.fourStarRatings,
5: api_object.aggregateRating.fiveStarRatings
}
}
return item
|
15,439 | 4fad3aa05fc6c9842fc98a0082a9e1672ccf6ffc |
"""
function provided by Riccardo Manzoni for scaling
double tau trigger MC to data
22 Feb 2016
Updated 30 Jan 2017 to include data/MC scale factors
"""
import math
import ROOT
import json
from helpers import getTH1FfromTGraphAsymmErrors
class DoubleTau35Efficiencies :
"""A class to provide trigger efficiencies
for HLT DoubleTau35 trigger"""
def __init__( self, channel ):
if channel == 'tt' :
#print "Initializing LepWeight class for channel ",channel
#effType = 'binned'
#effType = 'cumulative'
#with open('data/triggerSF/di-tau/high_mt_%s.json' % effType) as f1 :
# self.high_mt_json = json.load(f1)
with open('data/triggerSF/di-tau/fitresults_tt_moriond2017.json') as f2 :
self.real_taus_json = json.load(f2)
#with open('data/triggerSF/di-tau/same_sign_%s.json' % effType) as f3 :
# self.same_sign_json = json.load(f3)
else :
#self.high_mt_json = ''
self.real_taus_json = ''
#self.high_mt_json = ''
# Directly from Riccardo
def CBeff(self, x, m0, sigma, alpha, n, norm):
sqrtPiOver2 = math.sqrt(ROOT.TMath.PiOver2())
sqrt2 = math.sqrt(2.)
sig = abs(sigma)
t = (x - m0)/sig * alpha / abs(alpha)
absAlpha = abs(alpha/sig)
a = ROOT.TMath.Power(n/absAlpha, n) * ROOT.TMath.Exp(-0.5 * absAlpha * absAlpha)
b = absAlpha - n/absAlpha
arg = absAlpha / sqrt2;
if arg > 5.: ApproxErf = 1.
elif arg < -5.: ApproxErf = -1.
else : ApproxErf = ROOT.TMath.Erf(arg)
leftArea = (1. + ApproxErf) * sqrtPiOver2
rightArea = ( a * 1./ROOT.TMath.Power(absAlpha-b, n-1) ) / (n-1)
area = leftArea + rightArea
if t <= absAlpha:
arg = t / sqrt2
if arg > 5.: ApproxErf = 1.
elif arg < -5.: ApproxErf = -1.
else : ApproxErf = ROOT.TMath.Erf(arg)
return norm * (1 + ApproxErf) * sqrtPiOver2 / area
else:
return norm * (leftArea + a * (1/ROOT.TMath.Power(t-b,n-1) - \
1/ROOT.TMath.Power(absAlpha - b,n-1)) / (1-n)) / area
def doubleTauTriggerEff(self, pt, iso, genCode, decayMode ) :
# Check that there are no 2 prong taus
assert( decayMode in [0,1,10]), "You have not cleaned your decay \
modes of your taus!"
""" 2016 Moriond17 set up has differing efficiencies per decay mode
remove the lumi weighted approach. Calculate Data/MC
SF as final output """
m0 = self.real_taus_json['data_genuine_TightIso_dm%i' % int(decayMode)]['m_{0}']
sigma = self.real_taus_json['data_genuine_TightIso_dm%i' % int(decayMode)]['sigma']
alpha = self.real_taus_json['data_genuine_TightIso_dm%i' % int(decayMode)]['alpha']
n = self.real_taus_json['data_genuine_TightIso_dm%i' % int(decayMode)]['n']
norm = self.real_taus_json['data_genuine_TightIso_dm%i' % int(decayMode)]['norm']
dataW = self.CBeff( pt, m0, sigma, alpha, n, norm )
m0 = self.real_taus_json['mc_genuine_TightIso_dm%i' % int(decayMode)]['m_{0}']
sigma = self.real_taus_json['mc_genuine_TightIso_dm%i' % int(decayMode)]['sigma']
alpha = self.real_taus_json['mc_genuine_TightIso_dm%i' % int(decayMode)]['alpha']
n = self.real_taus_json['mc_genuine_TightIso_dm%i' % int(decayMode)]['n']
norm = self.real_taus_json['mc_genuine_TightIso_dm%i' % int(decayMode)]['norm']
mcW = self.CBeff( pt, m0, sigma, alpha, n, norm )
return 1.*dataW / mcW
if __name__ == '__main__' :
c = DoubleTau35Efficiencies('tt')
print c.doubleTauTriggerEff(68., 'VTightIso', 5, 1 ) # 5 = gen_match real tau
print c.doubleTauTriggerEff(68., 'VTightIso', 3, 0 ) # 3 = gen_match NOT real tau
print c.doubleTauTriggerEff(68., 'TightIso', 5 , 10 ) # 5 = gen_match real tau
print c.doubleTauTriggerEff(68., 'TightIso', 3 , 6 ) # 3 = gen_match NOT real tau
|
15,440 | 1751798fa6bc81445979bcbf8dd35a7364c54049 | import re
# Patterns for all regular expressions
GENERAL_PATTERN = r'^([A-Z0-9]{3}|[A-Z0-9]{4})\ [A-Z0-9]{3}$'
AA9A_PATTERN = r'^(^(WC[12]|EC[1-4]|SW1)[ABEHMNPRVWXY]$|SE1P|NW1W)$'
A9A_PATTERN = r'^(E1W|N1[CP]|W1[ABCDEFGHJKPSTUW])$'
A9_PATTERN = r'^([BEGLMNSW][1-9])$'
A99_PATTERN = r'^([BEGLMNSW][1-9]\d)$'
AA9_PATTERN = r'^(((?!AB|LL|SO)[A-PR-UWYZ][A-HK-Y][1-9])|((BL|BS|CM|CR|FY|HA|PR|SL|SS)\d))$'
AA99_PATTERN = r'^(((?!BR|FY|HA|HD|HG|HR|HS|HX|JE|LD|SM|SR|WC|WN|ZE)[A-PR-UWYZ][A-HK-Y][1-9]\d))$'
OUTWARDCODE_PATTERN = '|'.join((AA9A_PATTERN,A9A_PATTERN,A9_PATTERN,A99_PATTERN,AA9_PATTERN,AA99_PATTERN))
INWARDCODE_PATTERN = r'^ \d[A-BD-HJLNPQ-UW-Z]{2}$'
# Checking in this function Validation of the postcodes
def isValid(postcode):
if (re.match(GENERAL_PATTERN, postcode)):
if (re.match(OUTWARDCODE_PATTERN, postcode[:-4]) and re.match(INWARDCODE_PATTERN, postcode[-4:])):
return True
else:
return False
else:
return False
|
15,441 | 18702b23c16c1fbb8cbaf7154c04d1b5384c1aea | ## Write a function that accepts an array of 10 integers (between 0 and 9), that returns a string of those numbers in the form of a phone number.
## create_phone_number([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # => returns "(123) 456-7890"
def create_phone_number(n):
#your code here
if len(n) > 10:
return print('Número de telefone incorreto!')
n.insert(0,'(')
n.insert(4,')')
n.insert(5,' ')
n.insert(9,'-')
n = ''.join(map(str,n))
return print(n) |
15,442 | 273219d52422dcbcb3ad064b101cfcba185c5a17 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import httplib, urllib
import sys
import random, string
first_player_authenticated = False #false while the first player has not registered
second_player_authenticated = False
p1score = 10
p2score = 10
first_player_id = 0
second_player_id = 0
payment_amount = .01
def get_id(auth_id):
#Gets the 'id' number of a user based on the 'authentication id'
first_conn = httplib.HTTPSConnection("api.venmo.com/v1/me")
first_conn.request("GET", "?access_token=" + str(auth_id))
returned_json = str(first_conn.getresponse().read())
receiver_id = returned_json.split("id\":")[1].split(",")[0][2:-1]
print >>sys.stderr, receiver_id
return receiver_id
def pay(sender, receiver, amount, note):
#Makes a payment, given two authentication ids
print >>sys.stderr, sender
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))
params = urllib.urlencode({'access_token':sender, 'user_id':get_id(receiver), 'amount':amount, 'note':random_string})
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
conn = httplib.HTTPSConnection("api.venmo.com/v1/payments")
conn.request("POST", "", params, headers)
response = conn.getresponse()
conn.close()
return response.read()
class MainPage(webapp2.RequestHandler):
#Generate the page based on the variables
global first_player_authenticated, second_player_authenticated
def get(self):
if (first_player_authenticated):
self.response.write("First Player Registered<br>")
else:
self.response.write('''<form method="get" action="/request-authentication1">
<button type="submit">Authenticate Player 1</button>
</form>''')
if (second_player_authenticated):
self.response.write("Second Player Registered<br>")
else:
self.response.write('''<form method="get" action="/request-authentication2">
<button type="submit">Authenticate Player 2</button>
</form>''')
if (first_player_authenticated and second_player_authenticated):
self.response.write('''<form method="get" action="/reset">
<button type="submit">New Game</button>
</form>''')
class RequestAuthentication1(webapp2.RequestHandler):
def get(self):
self.redirect("https://api.venmo.com/v1/oauth/authorize?client_id=1577&scope=make_payments%20access_profile&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Fvenmo_oauth%3Fplayer%3D1")
class RequestAuthentication2(webapp2.RequestHandler):
def get(self):
self.redirect("https://api.venmo.com/v1/oauth/authorize?client_id=1577&scope=make_payments%20access_profile&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Fvenmo_oauth%3Fplayer%3D2")
class Authentication(webapp2.RequestHandler):
def get(self):
#self.response.write(get_id(self.request.get("access_token")))
global first_player_authenticated, second_player_authenticated, first_player_id, second_player_id
player = int(self.request.get("player"))
if player == 1:
first_player_id = str(self.request.get("access_token"))
first_player_authenticated = True
elif player == 2:
second_player_id = str(self.request.get("access_token"))
second_player_authenticated = True
self.redirect("http://localhost:8080")
class MakePayment(webapp2.RequestHandler):
def get(self):
global first_player_id, second_player_id
winner = int(self.request.get("winner"))
if winner == 1:
pay(second_player_id, first_player_id, payment_amount, "blablabla")
elif winner == 2:
pay(first_player_id, second_player_id, payment_amount, "blablabla")
class Reset(webapp2.RequestHandler):
def get(self):
global first_player_authenticated, second_player_authenticated
first_player_authenticated = False
second_player_authenticated = False
self.redirect("http://localhost:8080")
class GetScore(webapp2.RequestHandler):
#Gets the current score for the user
def get(self):
self.response.write(str(p1score)+"&"+str(p2score))
class UpdateScore(webapp2.RequestHandler):
#Updates the score given a post request from Processing
def get(self):
global p1score, p2score
p1score = self.request.get("p1score")
p2score = self.request.get("p2score")
application = webapp2.WSGIApplication([
('/', MainPage),
('/venmo_oauth',Authentication),
('/payment',MakePayment),
('/reset',Reset),
('/request-authentication1', RequestAuthentication1),
('/request-authentication2', RequestAuthentication2),
('/get-score',GetScore),
('/update-score', UpdateScore)
], debug=True) |
15,443 | 7f5d6eb9f09b352052404cba431eb82324268153 | class Solution:
# @return a list of lists of integers
def generate(self, numRows):
s = []
for i in xrange(0, numRows):
s.append([1])
if i == 0:
continue
elif i == 1:
s[i].append(1)
continue
for j in xrange(0, len(s[i - 1]) - 1):
s[i].append(s[i - 1][j] + s[i - 1][j + 1])
s[i].append(1)
return s |
15,444 | dde69318f001346ed53108cd97310ac137d40590 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/9 11:53
# @Author : maxu
# dict
# Python内置了字典:dict的支持,dict全称dictionary,在其他语言中也称为map,使用键-值(key-value)存储,具有极快的查找速度
d ={'A':90,'B':55,'C':27}
print(d['B'])
# 把数据放入dict的方法,除了初始化时指定外,还可以通过key放入
d['D']=666
print(d['D'])
# 由于一个key只能对应一个value,所以,多次对一个key放入value,后面的值会把前面的值冲掉
d['E'] = 888
print(d['E'])
d['E'] = 123
print(d['E'])
d = {
'michael':95,
'tom':90,
'bruce':85,
}
print('d[\'michael\']=', d['michael'])
print('d[\'tom\']=', d['tom'])
print('d[\'bruce\']=',d['bruce'])
print('d.get(\'mark\',-1)=',d.get('mark',-1))
# 请务必注意,dict内部存放的顺序和key放入的顺序是没有关系的。
# 和list比较,dict有以下几个特点: 查找和插入的速度极快,不会随着key的增加而变慢;需要占用大量的内存,内存浪费多。
# 而list相反:查找和插入的时间随着元素的增加而增加;占用空间小,浪费内存很少。所以,dict是用空间来换取时间的一种方法。
# dict可以用在需要高速查找的很多地方,在Python代码中几乎无处不在,正确使用dict非常重要,需要牢记的第一条就是dict的key必须是不可变对象。
# 这是因为dict根据key来计算value的存储位置,如果每次计算相同的key得出的结果不同,那dict内部就完全混乱了。这个通过key计算位置的算法称为哈希算法(Hash)。
# 要保证hash的正确性,作为key的对象就不能变。在Python中,字符串、整数等都是不可变的,因此,可以放心地作为key。而list是可变的,就不能作为key
# set: set和dict类似,也是一组key的集合,但不存储value。由于key不能重复,所以,在set中,没有重复的key。
# 要创建一个set,需要提供一个list作为输入集合
s = set([1,2,3])
print(s)
s.add(4)
print(s)
s = set([1,1,2,2,3,3,4,4,5])
print(s)
s.remove(4)
print(s)
s1 =set([1,3,5])
s2 =set([1,2,3])
print(s1 & s2) #交集
print(s1 | s2) #并集
a ='abc'
a.replace('a','A')
print(a)
b = a.replace('a','A')
print(b)
print(a)
|
15,445 | 5c4a20dcf966d3e8dbeb4c153a2de66e78d9b580 | """ https://leetcode.com/problems/reducing-dishes/
1. sort the dishes
2. define the knapsack problem
choose dish: (i+1-dis)*A[i]+dp(i+1, dis)
discard dish: dp(i+1, dis+1)
"""
from header import *
class Solution:
def maxSatisfaction(self, A: List[int]) -> int:
A.sort()
@cache
def dp(i, dis):
if i==len(A): return 0
# choose
c = (i+1-dis)*A[i]+dp(i+1, dis)
# skip
d = dp(i+1, dis+1)
return max(c, d)
return dp(0, 0)
|
15,446 | c09626405cd0c661e1d840078a00fa6588d91ea3 | from typing import Dict, Any, List, Optional
import yaml
import json
from json import JSONEncoder
import os
import re
import datetime as dt
import pandas as pd
from pprint import pprint
from pathlib import Path
from bs4 import BeautifulSoup
from bs4.element import Tag
import numpy as np
HOME = Path( os.getenv('HOME') )
# TODO:
# Logros
# Destacado: cristian-david-montoya-saldarriaga-09638514a
# Herramientas y tecnologías
# TODO: features to extract
# Whether has resume available
# extract english level
# https://www.linkedin.com/in/luis-mario-urrea-murillo/
MY_PATH = HOME / '_data/talent'
SECS_IN_YEAR = 365.25 * 24 * 3600
COMMON_ENGLISH = {'the', 'with', 'on', 'and', 'I', 'am', 'is', 'my'}
COMMON_SPANISH = {'y', 'el', 'la', 'de', 'los', 'las'}
class _Config:
raw_profiles_path = MY_PATH / 'linkedin_raw_profiles'
profiles_yamls_path = MY_PATH / 'linkedin_yaml_profiles'
CFG = _Config
class DateTimeEncoder(JSONEncoder):
"""Override the default method"""
def default(self, obj):
"""default formating as string"""
if isinstance(obj, (dt.date, dt.datetime)):
return obj.isoformat()
yaml.SafeDumper.yaml_representers[None] = lambda self, data: \
yaml.representer.SafeRepresenter.represent_str( self, str(data) )
# %%
def main():
"""Read scraped profiles parse them and write to json and yamls"""
# %%
CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True)
fpaths = list( _Config.raw_profiles_path.glob('*.html') )
print( f'{len(fpaths)} htmls found' )
# %%
fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html'
# %%
fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html'
# %%
fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ]
# %%
fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')]
# %%
dics = {}
# %%
for i, fpath in enumerate(fpaths):
if fpath in dics:
continue
with fpath.open('rt') as f_in:
html = f_in.read()
print( f'\n***{i+1}/{len(fpaths)} {fpath.name}:')
dic = extract_one( html, fpath )
dic['linkedin_url'] = f"https://www.linkedin.com/in/{fpath.name.split('.')[0]}"
dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )
# pprint(dic['work_stats'])
dics[fpath] = dic
dics_arr = list(dics.values())
# %%
del dics
# %%
with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out:
json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 )
# %%
with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out:
yaml.safe_dump( dics_arr, f_out )
# %%
df = produce_summary_table( dics_arr )
df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx',
index=False)
# %%
def _interactive_testing( dics_arr, fpaths, html: str ):
# %%
# noinspection PyUnresolvedReferences
runfile('talent-miner/extractor.py')
# %%
pprint( dics_arr[4] )
# %%
fpath = [ f for f in fpaths if str(f).find('israellaguan') >= 0 ][0]
# %%
doc = BeautifulSoup( html, features='html.parser' )
# %%
_extract_accomplishments(doc)
# %%
def _extract_accomplishments( doc: BeautifulSoup ) -> Dict[str, List[str]]:
accomps = doc.find_all('section', {'class': 'pv-accomplishments-block'})
# accomp0 = accomps[2]
ret = {}
for accomp in accomps:
accomp_header = accomp.find_all('h3', {'class': 'pv-accomplishments-block__title'})[0].text
accomp_vals = [ li_elem.text for li_elem in accomp.find_all('li') ]
ret[accomp_header] = accomp_vals
return ret
# %%
def produce_summary_table( dics: List[Dict[str, Any]]) -> pd.DataFrame:
# %%
recs = []
for dic in dics:
try:
w_stats = dic['work_stats']
edu_stats = dic['education_stats']
skills = dic['skills']
rec = dict( name=dic['name'],
total_experience_yrs=w_stats['total_experience_yrs'],
n_work_positions=w_stats['n_work_positions'],
pos_lt1_year=w_stats['poss_lt1.2_years'],
pos_lt2_year=w_stats['poss_lt2_years'],
about=dic['about'],
about_eng_ratio=dic['about_stats']['about_eng_ratio'],
current_position=dic['current_position'],
has_worked_abroad=w_stats['has_worked_abroad'],
max_degree=edu_stats['max_degree'],
studied_abroad=edu_stats['has_studied_abroad'],
ruby=(skills.get('Ruby', -1) + 1) + (skills.get('Ruby on Rails', -1) + 1),
python=skills.get('Python (Programming Language)', -1) + 1,
java=skills.get('Java', -1) + 1,
javascript=skills.get('JavaScript', -1) + 1,
cpp=skills.get('C++', -1) + 1,
csharp=skills.get('C#', -1) + 1,
skills=skills,
profile_text_length=dic['profile_text_stats']['length'],
profile_eng_ratio=dic['profile_text_stats']['eng_ratio'] * 10.0,
languages=",".join ( dic.get('accomplishments', {}).get('idiomas', []) ),
num_contacts=dic['num_contacts'],
location=dic['location'],
linkedin_url=dic['linkedin_url'],
scraped_at=dic['scraped_at'])
except Exception as exc:
pprint( dic )
raise exc
recs.append(rec)
df = pd.DataFrame( recs )
# %%
return df
# %%
def extract_one( html: str, fpath: Path ):
"""Extract data from one scraped html"""
# %%
doc = BeautifulSoup( html, features='html.parser')
ret = { 'linkedin_handle': fpath.name.split('.')[0] }
_parse_top_card( ret, doc )
# %%
ret['about'] = _extract_about( doc )
# if len(ret['about']) < 100 and ret['about'].find('ver más') > 0:
# print( f"\nVer más detected: \nabout:{ret['about']} fpath={fpath}" )
ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])}
# %%
ret['work_experience'] = _parse_experiences( doc )
ret['work_stats'] = calc_work_stats( ret['work_experience'])
# %%
ret['skills'] = proc_skills_section( doc )
ret['education'] = _parse_education( doc )
ret['education_stats'] = _education_stats( ret['education'])
ret['accomplishments'] = _extract_accomplishments(doc)
ret['profile_text_stats'] = profile_text_stats( doc )
# %%
return ret
# %%
def calc_work_stats( work_xps: List[Dict[str, Any]] ):
"""Calculate total_experience_yrs and other stats"""
durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ]
total_years = sum( durations ) if durations else None
avg_years = np.round( total_years / len(durations), 2) if durations else None
poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 )
poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 )
has_worked_abroad = any( rec for rec in work_xps
if _is_location_abroad( rec.get('location_raw') ))
return { "total_experience_yrs": total_years,
'avg_years': avg_years,
'n_work_positions': len(durations),
'poss_lt2_years': poss_lt2_years,
'poss_lt1.2_years': poss_lt1_2_years,
'has_worked_abroad': has_worked_abroad }
# %%
def _is_location_abroad( location: Optional[str] ):
if location is None or location.strip() == '':
return False
else:
ret = not re.search( 'Colombia|Medell.n|Bogot.|Barranquilla|Cali|Pereira'
'|Caldas|Cucuta|Dosquebradas|Antioquia|Remot[eo]',
location, re.IGNORECASE)
if ret:
print( f'abroad location: {location}')
return ret
def _is_abroad_school( school: Optional[str] ):
ret = re.search(r"(University|College|\bof\b)", school)
if ret:
print( f'abroad school: {school}')
return ret
def profile_text_stats( doc: BeautifulSoup ):
"""some metrics on the whole profile text"""
text = doc.find('main', {'class': 'core-rail'}).text.strip()
words = text.split()
eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001)
return { 'length': len( text ),
'eng_ratio': np.round( eng_ratio, 2)}
# %%
def _extract_about( doc ) -> Optional[str]:
about_section = doc.find('section', {'class': 'pv-about-section'})
if about_section is None:
return None
parts = about_section.find_all("p")
return (" ".join( part.text.replace('\n', ' ').strip() for part in parts )
.replace( '... ver más', '') )
# %%
def _parse_top_card( ret: Dict[ str, Any], doc: BeautifulSoup ):
top_card_els = doc.find_all( "ul", {"class": "pv-top-card--list"} )
name_elem = top_card_els[0].find_all("li")[0]
name = name_elem.text.strip()
current_position = doc.find_all("h2", {"class": "mt1"})[0].text.strip()
location = top_card_els[1].find_all( "li" )[0].text.strip()
# %%
num_contacts = _extract_num_contacts( top_card_els[1] )
# %%
top_card_xp = doc.find_all('a', {"class": "pv-top-card--experience-list-item"})
main_school = top_card_xp[0].text.strip() if top_card_xp else None
data = dict(name=name,
current_position=current_position,
location=location,
num_contacts=num_contacts,
main_school=main_school)
ret.update(data)
# %%
def _extract_num_contacts( elem: Tag ):
num_contacts_text = elem.find_all("li")[1].text.strip()
mch = re.match(r'(\d+) contactos', num_contacts_text)
if mch:
return int(mch.group(1))
mch2 = re.search(r'Más de 500 contactos', num_contacts_text)
if mch2:
return 501
def _parse_experiences(doc: BeautifulSoup) -> List[Dict]:
# %%
xp_section = doc.find( 'section', {'id': 'experience-section'} )
if xp_section is None:
return []
# %%
summaries = xp_section.find_all('div', {'class': 'pv-entity__summary-info'})
ret = [ proc_employment_summary(summary) for summary in summaries ]
return ret
# %%
def proc_employment_summary(summary: Tag) -> Dict:
"""process one employment summary and extract info from it"""
xp_record = dict()
xp_record['position'] = summary.find('h3').text.strip()
company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0]
xp_record['company'] = "; ".join( [ line.strip() for line in company.text.split('\n')
if line.strip() != ''] )
# %%
for xp_line in summary.find_all('h4'):
fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ]
if fld_name == 'Fechas de empleo':
xp_record['period_raw'] = value
period = _extract_period( value )
xp_record['period'] = period
# print( period )
xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds()
/ SECS_IN_YEAR, 2)
elif fld_name == 'Duración del empleo':
xp_record['duration_raw'] = value
elif fld_name == 'Ubicación':
xp_record['location_raw'] = value
# print( f'location: {value}')
elif fld_name.startswith('LinkedIn me ayud'):
continue
else:
print( "proc_employment_summary: ", fld_name, value )
# %%
# pprint( xp_record )
# %%
return xp_record
# %%
def _extract_period( period_raw: str ):
mch2 = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . '
r'(?P<mes2>[a-z]+)\. de (?P<year2>[0-9]+)', period_raw)
if mch2:
# print('mch2', mch2, mch2.group("year1"), mch2.group("year2"))
mes1, mes2 = _translate_mes(mch2.group("mes1")), _translate_mes(mch2.group("mes2"))
return ( dt.date(int(mch2.group("year1")), int( mes1 ), 1),
dt.date(int(mch2.group("year2")), int( mes2 ), 1) )
mch1 = re.match(r'(?P<mes>[a-z]+)\. de (?P<year>[0-9]+)( . actualidad)?', period_raw)
if mch1:
# print('mch1')
mes = _translate_mes(mch1.group("mes"))
return dt.date(int(mch1.group("year")), mes, 1), dt.date.today()
mch2b = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . (?P<year2>[0-9]{4})', period_raw)
if mch2b:
mes1 = _translate_mes(mch2b.group("mes1"))
return ( dt.date(int(mch2b.group("year1")), int(mes1), 1),
dt.date(int(mch2b.group("year2")), 1, 1) )
mch3 = re.match(r'(?P<year1>[0-9]{4}) . (?P<year2>[0-9]{4})', period_raw)
if mch3:
return (dt.date(int(mch3.group("year1")), 1, 1),
dt.date(int(mch3.group("year2")), 1, 1))
mch4 = re.match(r'(?P<year1>[0-9]{4})', period_raw)
if mch4:
return (dt.date(int(mch4.group("year1")), 1, 1),
dt.date(int(mch4.group("year1")) + 1, 1, 1))
assert False, period_raw
# %%
def _interactive_test():
# %%
period_raw = 'ene. de 2015 – actualidad'
# %%
period_raw = 'ene. de 2015 – may. de 2015'
print( _extract_period( period_raw ) )
# %%
period_raw = 'ene. de 2012 – may. de 2013'
print(_extract_period(period_raw))
# %%
def _translate_mes( mes: str) -> int:
return {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'ago': 8, 'sept': 9, 'oct': 10, 'nov': 11, 'dic': 12}[mes]
def _common_english_ratio( a_text: str ) -> int:
if a_text is None:
return None
words = a_text.split()
cnt_english = sum( 1 for word in words if word in COMMON_ENGLISH )
return np.round( cnt_english / (len(words) + 0.001) * 10, 2)
def _parse_education(doc: BeautifulSoup) -> List[Dict]:
# %%
edu_section = doc.find( 'section', {'id': 'education-section'} )
# %%
if edu_section is None:
return []
# %%
summaries = edu_section.find_all('li', {'class': 'pv-education-entity'})
ret = [ proc_education_summary(summary) for summary in summaries ]
# %%
return ret
# %%
def _education_stats( edu_records: List[Dict[str, str]]):
return {'has_studied_abroad': any(rec['is_abroad_school'] for rec in edu_records),
'max_degree': _max_degree(edu_records)}
def proc_education_summary( summary: Tag ) -> Dict[str, str]:
"""Process one education summary and generate a record"""
edu_record = dict()
edu_record['school'] = summary.find('h3').text.strip()
edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] )
for parag in summary.find_all('p'):
spans = [span.text.strip() for span in parag.find_all('span')]
if len( spans ) == 2:
fld_name, value = spans
value = value.strip()
elif len(spans) == 0:
# print( 'education parag: ', parag )
edu_record['description'] = parag.text.strip()
continue
else:
print( 'education spans: ', spans )
continue
if fld_name == 'Nombre de la titulación':
edu_record['degree_raw'] = value
edu_record['degree'] = _classify_degree( value )
# print( 'degree: ', value, _classify_degree(value) )
elif fld_name == 'Disciplina académica':
edu_record['field_raw'] = value
elif fld_name == 'Nota':
edu_record['grade_raw'] = value
elif fld_name.startswith('Fechas de estudios'):
edu_record['period_raw'] = value
elif fld_name.startswith('Actividades y asociaciones'):
edu_record['activities_raw'] = value
else:
print("proc_education_summary: ", fld_name, ' :: ', value)
if edu_record.get('degree', 'Unknown') == 'Unknown':
if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ):
edu_record['degree'] = 'University'
return edu_record
# %%
def _classify_degree( degree: str ) -> str:
if re.search('Ingenier|Engineer', degree):
return 'University'
elif re.search('^Tecn.log', degree):
return 'Tecnología'
elif re.search('^Mae?ste?r', degree):
return 'Master''s'
elif re.search('^Dimplom', degree):
return 'Diploma'
elif re.search('^(Esp\.|Especializ)', degree):
return 'Specialization'
elif re.search('^Phd', degree, re.IGNORECASE):
return 'PhD'
else:
return 'Unknown'
DEGREE_LEVELS = {'Tecnología': 1, 'University': 2, 'Diploma': 3,
'Specialization': 4, 'Master''s': 5, 'PhD': 5, 'Unknown': -1}
def _max_degree(edu_records: List[Dict[str, str]]) -> Optional[str] :
levels = DEGREE_LEVELS
if len(edu_records) > 0:
return max( [rec.get('degree', 'Unknown') for rec in edu_records ],
key=lambda x: levels[x])
else:
return None
def proc_skills_section( doc: BeautifulSoup ):
# %%
skills_section = doc.find('section', {'class': 'pv-skill-categories-section'})
if skills_section is None:
return {}
# %%
divs = skills_section.find_all('div', {'class': 'pv-skill-category-entity__skill-wrapper'})
# %%
ret = {}
for div in divs:
texts = [ span.text.strip() for span in div.find_all('span') if span.text.strip() != '' ]
if len(texts) >= 1:
key = texts[0]
if len(texts) >= 3:
mch = re.match(r'(\d+)', texts[2])
if mch:
ret[key] = int( mch.group(1))
else:
print( f"skills {len(texts)} spans: {texts}")
ret[key] = None
elif len(texts) == 1:
ret[key] = 0
else:
print(f"skills {len(texts)} spans: {texts}")
# %%
return ret |
15,447 | 8b614955cfbf5bebfbf86b438adfbad5665c6681 | import os
import paho.mqtt.client as mqtt
from flask import Flask, render_template, send_from_directory
client = mqtt.Client()
client.connect("moorhouseassociates.com", 1883, 60)
app = Flask(__name__)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('css', path)
@app.route('/')
def btn():
print("button clicked")
client.publish("test/all", "hello guyz.......@Gomah")
return ""
@app.route('/img/<path:path>')
def send_js(path):
return send_from_directory('img', path)
@app.route('/linux')
def linux():
return render_template('linux.html')
@app.route('/python')
def python():
return render_template('python.html')
@app.route('/')
def index():
return render_template("index.html")
@app.route('/hello/<name>')
def foo(name):
return render_template('index.html', to=name)
@app.route('/whereami')
def whereami():
return "Koforidua"
|
15,448 | 6bd102d64c86860d0718e0f634a7da07b09a8d32 | import re
class Container(object):
"""
Abstract container class which makes everything that inherits from it behave like a list.
"""
def __init__(self, items=None):
"""
Create new container, optionally preset with given items.
:param items: Items to preset the container with
"""
if items is None:
self.items = []
else:
self.items = items
def append(self, item):
"""
Add a new item to the container data.
:param item: Item to add
"""
self.items.append(item)
def __getitem__(self, idx):
"""
Allow support of the [] operator.
:param idx: Index which should be accessed.
:return: Item from container on given index
"""
return self.items[idx]
def __iter__(self):
"""
Support iteration.
:return: Yields one item from items
"""
for d in self.items:
yield d
def __len__(self):
"""
Number of items in this container.
:return: Number of items in this container
"""
return len(self.items)
class Tok(object):
"""
Class to represent a token defined by a token kind and the token value. This class is used for the input tokens,
as well as for the rules in the grammar defining the expected tokens.
"""
def __init__(self, kind, value=None, neg_kind=False, neg_value=False):
"""
Create a new Token representation.
:param kind: The token kind (the token kinds need to be defined from the outside).
For tokens from the input stream, this needs to be a defined value.
For matching tokens in a rule (by using == or __eq__), this could be:
- None (default): this matches every kind of the input token
- Kind: the kind of the input token must match the output token
- List of kinds: the input token kind must be in this list
:param value: For tokens from the input stream, this is the value found.
For matching tokens in a rule (by using == or __eq__), this could be:
- None (default): this matches every value of the input token
- Value: the value of the input token must match the output token
- List of values: the input token value must be in this list
:param neg_kind: If this is True, matching for the kind is negated
:param neg_value: If this is True, matching for the value is negated
"""
self._kind = kind
self._value = value
self.neg_kind = neg_kind
self.neg_value = neg_value
@property
def kind(self):
"""
Get the kind of this token.
:return: The kind of this token
"""
return self._kind
@property
def value(self):
"""
Get the value of this token.
:return: The value of this token
"""
return self._value
def __eq__(self, other):
"""
Compare two tokens (used for matching).
:param other: Other token to compare this one with
:return: True if tokens equal, False otherwise
"""
if not isinstance(other.kind, list):
k0 = [other.kind]
else:
k0 = other.kind
if not isinstance(other.value, list):
v0 = [other.value]
else:
v0 = other.value
if not isinstance(self.kind, list):
k1 = [self.kind]
else:
k1 = self.kind
if not isinstance(self.value, list):
v1 = [self.value]
else:
v1 = self.value
neg_kind = self.neg_kind or other.neg_kind
neg_value = self.neg_value or other.neg_value
if v0 == [None] or v1 == [None]:
result = (len(list(set(k0) & set(k1))) and not neg_kind)
else:
k = len(list(set(k0) & set(k1)))
v = len(list(set(v0) & set(v1)))
if neg_kind:
k = not k
if neg_value:
v = not v
result = k and v
return bool(result)
def __repr__(self):
"""
Represent this token.
:return: String to represent this token
"""
if isinstance(self.kind, str):
k = "'%s'" % self.kind
else:
k = str(self.kind)
if isinstance(self.value, str):
v = "'%s'" % self.value
else:
v = str(self.value)
return "Token(%s, %s, neg_kind=%s, neg_value=%s)" % (k, v, self.neg_kind, self.neg_value)
class Consumer(Container):
"""
Abstract class all consumers inherit from.
"""
def __init__(self, *args, **kwargs):
"""
Construct a consumer.
:param args: List of tokens (Tok) and consumers (ConsAND, ConsOR, ConsMULT)
:param kwargs: Additional arguments:
- action: callback function called when the consumer consumed successfully
(takes tokens as an argument)
"""
Container.__init__(self, list(args))
if "action" in kwargs:
self.action = kwargs["action"]
else:
self.action = None
def __repr__(self):
"""
Represent a consumer as a string.
:return: Consumer string representation
"""
param = ""
action = None
if isinstance(self.items, list):
for i in self.items:
if len(param) > 0:
param += ", "
param += i.__repr__()
if self.action is not None:
action = self.action.__name__
return "%s(%s, action=%s)" % (self.__class__.__name__, param, action)
def match(self, inp):
"""
Try to match expected tokens against input, and if they match , consume them from the input.
:param inp: List of input tokens
:return: Number of tokens consumed from the input
"""
return 0
class AND(Consumer):
"""
AND consumer: every token or other operation inside an AND operation need to match and consume from
the input token list.
"""
def match(self, inp):
"""
Try to match expected tokens against input, and if they match , consume them from the input.
This consumer only consumes when all sub-consumers consumed (AND).
:param inp: List of input tokens
:return: Number of tokens consumed from the input
"""
and_complete = len(self.items)
matches = 0
work = inp
for t in self.items:
if isinstance(t, Consumer) and len(work):
r = t.match(work)
if r:
and_complete -= 1
matches += r
work = work[matches:]
elif len(work):
if work[0] == t:
matches += 1
work = work[1:]
and_complete -= 1
else:
return 0
if and_complete > 0:
matches = 0
elif self.action is not None:
self.action(inp[:matches])
return matches
class OR(Consumer):
"""
OR consumer: might or might not consume form the input token list.
"""
def match(self, inp):
"""
Try to match expected tokens against input, and if they match , consume them from the input.
This consumer only consumes when one of the sub-consumers consumed (OR).
:param inp: List of input tokens
:return: Number of tokens consumed from the input
"""
matches = 0
work = inp
for i in self.items:
if isinstance(i, Consumer) and len(work):
matches += i.match(work)
if matches:
return matches
elif len(work):
if work[0] == i:
matches += 1
break
if matches and self.action is not None:
self.action(inp[:matches])
return matches
class MULT(Consumer):
"""
Take a consumer and repeats it until no more tokens could be consumed from the input tokens.
"""
def match(self, inp):
"""
This consumer executes the containing root consumer as long as that consumer consumed.
:param inp: List of input tokens
:return: Number of tokens that could be consumed from the input
"""
matches = 0
while True:
m = self.items[0].match(inp)
inp = inp[m:]
if m > 0:
matches += m
else:
break
if matches and self.action is not None:
self.action(inp[:matches])
return matches
class Rule(object):
"""
A single rule of the grammar.
"""
def __init__(self, root_cons):
"""
Create a rule with a given root consumer.
:param root_cons: The root consumer of this rule
"""
assert isinstance(root_cons, Consumer)
self.root_cons = root_cons
def match(self, inp):
"""
Try to match this rule to the input tokens.
:param inp: Input tokens
:return: Number of tokens that could be consumed from the input
"""
matched = self.root_cons.match(inp)
return matched
def __repr__(self):
"""
Represent a rule as a string.
:return: Rule string representation
"""
return "%s(%s)" % (self.__class__.__name__, self.root_cons.__repr__())
class Grammar(Container):
"""
The whole grammar (made up of rules)
"""
def __init__(self, rules=None):
"""
Create a grammar.
:param rules: List of rules that make up the grammar
"""
Container.__init__(self, rules)
class Scanner:
"""
Scanner used by the tokenizer
"""
def __init__(self, lexicon, flags=0):
"""
Create a parser from a given lexicon.
:param lexicon: Lexicon in the form of list, each entry with:
(<regex>, lambda scanner, token: Tok(<kind>, <value>)))
:param flags: Extra flags for parsing.
"""
import sre_parse
import sre_compile
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p, re.MULTILINE)
def scan(self, string):
"""
Scan the input string, return a list of tokens.
:param string: Input string to scan
:return: List of tokens (Tok)
"""
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
class Tokenizer(Container):
"""
A tokenizer.
"""
def __init__(self, patterns=None):
"""
Create a tokenizer from a list of patterns.
:param patterns: Patterns in the form of list, each entry with:
(<regex>, lambda scanner, token: Tok(<kind>, <value>)))
"""
Container.__init__(self, patterns)
def tokenize(self, inp):
"""
Tokenize the input string.
:param inp: Input string
:return: List of tokens (Tok)
"""
scanner = Scanner(self.items)
return scanner.scan(inp)[0]
def __repr__(self):
"""
String representation of this Tokenizer.
:return: String representation.
"""
return "%s()" % self.__class__.__name__
class Parser(object):
"""
Complete parser using a tokenizer and a grammar to do it's work
"""
def __init__(self, tokenizer, grammar):
"""
Create a parser.
:param tokenizer: The tokenizer to use (Tokenizer)
:param grammar: The grammar to use (Grammar)
"""
assert isinstance(tokenizer, Tokenizer)
assert isinstance(grammar, Grammar)
self.tokenizer = tokenizer
self.grammar = grammar
def parse(self, inp):
"""
Parse the input by first tokenizing it, and than applying the grammar.
:param inp: Input string
:return: Tuple: True/False on success/failure, Tokens not parsed
"""
tokens = self.tokenizer.tokenize(inp)
tokens_left = len(tokens)
# print(tokens)
while tokens_left:
for rule in self.grammar:
tokens = tokens[rule.match(tokens):]
if len(tokens) < tokens_left:
tokens_left = len(tokens)
else:
# nothing is matching any more - stop
break
return len(tokens) == 0, tokens
def __repr__(self):
"""
String representation of this Parser.
:return: String representation.
"""
if self.tokenizer is not None:
tok = self.tokenizer.__repr__()
else:
tok = None
if self.grammar is not None:
gr = self.grammar.__repr__()
else:
gr = None
return "%s(%s, %s)" % (self.__class__.__name__, tok, gr)
|
15,449 | 54109345febac6475126f30deda05578acb174d9 | from torchvision import transforms
from torchvision.datasets import MNIST
import torch
from PIL import Image
import numpy as np
from tqdm import tqdm
class MNISTInvase(MNIST):
def __init__(self, *args, **kwargs):
super(MNISTInvase, self).__init__(*args, **kwargs)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
img = img.view(-1)
# Below -1 is due to G being undefined
return img, target, -1
def one_hot(arr):
temp = torch.zeros((arr.shape[0], arr.max() + 1))
temp[torch.arange(arr.shape[0]), arr] = 1
return temp
def get_mnist(args):
base_path = "./data-dir"
batch_size = args.batch_size if args.batch_size else 256
test_batch_size = args.batch_size if args.batch_size else 512
num_workers = args.workers if args.workers else 4
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_data = MNISTInvase(base_path, train=True, download=True,
transform=transform)
train_data.means = (0.1307,)
train_data.stds = (0.3081,)
train_data.bounds = [0, 1]
train_data.input_size = 784
train_data.output_size = 10
train_data.targets = one_hot(train_data.targets)
test_data = MNISTInvase(base_path, train=False,
transform=transform)
test_data.targets = one_hot(test_data.targets)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
shuffle=True, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=test_batch_size,
shuffle=False, num_workers=num_workers)
return train_loader, test_loader
|
15,450 | 2a3e02a24dc30cf064ce08ee2dcde1db746443b7 | from flask import Flask
app = Flask(__name__)
import flaskr.main
from flaskr import db
db.create_books_table() |
15,451 | 985b3c2c2443b32b3db3ce86bb89ef11e913a555 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'add_vin_regiune.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(515, 312)
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 492, 285))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_65 = QtWidgets.QLabel(self.layoutWidget)
self.label_65.setObjectName("label_65")
self.gridLayout.addWidget(self.label_65, 0, 0, 1, 1)
self.denumire_generica_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.denumire_generica_2.setObjectName("denumire_generica_2")
self.gridLayout.addWidget(self.denumire_generica_2, 0, 1, 1, 1)
self.label_73 = QtWidgets.QLabel(self.layoutWidget)
self.label_73.setObjectName("label_73")
self.gridLayout.addWidget(self.label_73, 1, 0, 1, 1)
self.soi_struguri_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.soi_struguri_4.setObjectName("soi_struguri_4")
self.gridLayout.addWidget(self.soi_struguri_4, 1, 1, 1, 1)
self.label_68 = QtWidgets.QLabel(self.layoutWidget)
self.label_68.setObjectName("label_68")
self.gridLayout.addWidget(self.label_68, 2, 0, 1, 1)
self.tara_origine_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.tara_origine_4.setObjectName("tara_origine_4")
self.gridLayout.addWidget(self.tara_origine_4, 2, 1, 1, 1)
self.label_69 = QtWidgets.QLabel(self.layoutWidget)
self.label_69.setObjectName("label_69")
self.gridLayout.addWidget(self.label_69, 3, 0, 1, 1)
self.zona_geografica_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.zona_geografica_2.setObjectName("zona_geografica_2")
self.gridLayout.addWidget(self.zona_geografica_2, 3, 1, 1, 1)
self.pushButton_14 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_14.setObjectName("pushButton_14")
self.gridLayout.addWidget(self.pushButton_14, 10, 2, 1, 1)
self.pushButton_15 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_15.setObjectName("pushButton_15")
self.gridLayout.addWidget(self.pushButton_15, 10, 3, 1, 1)
self.label_67 = QtWidgets.QLabel(self.layoutWidget)
self.label_67.setObjectName("label_67")
self.gridLayout.addWidget(self.label_67, 4, 0, 1, 1)
self.label_66 = QtWidgets.QLabel(self.layoutWidget)
self.label_66.setObjectName("label_66")
self.gridLayout.addWidget(self.label_66, 5, 0, 1, 1)
self.label_61 = QtWidgets.QLabel(self.layoutWidget)
self.label_61.setObjectName("label_61")
self.gridLayout.addWidget(self.label_61, 6, 0, 1, 1)
self.label_62 = QtWidgets.QLabel(self.layoutWidget)
self.label_62.setObjectName("label_62")
self.gridLayout.addWidget(self.label_62, 7, 0, 1, 1)
self.label_58 = QtWidgets.QLabel(self.layoutWidget)
self.label_58.setObjectName("label_58")
self.gridLayout.addWidget(self.label_58, 8, 0, 1, 1)
self.producator_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.producator_4.setObjectName("producator_4")
self.gridLayout.addWidget(self.producator_4, 4, 1, 1, 1)
self.procent_alcool_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.procent_alcool_4.setObjectName("procent_alcool_4")
self.gridLayout.addWidget(self.procent_alcool_4, 5, 1, 1, 1)
self.cantitate_zahar_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.cantitate_zahar_4.setObjectName("cantitate_zahar_4")
self.gridLayout.addWidget(self.cantitate_zahar_4, 6, 1, 1, 1)
self.culoare_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.culoare_4.setObjectName("culoare_4")
self.gridLayout.addWidget(self.culoare_4, 7, 1, 1, 1)
self.recipient_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.recipient_4.setObjectName("recipient_4")
self.gridLayout.addWidget(self.recipient_4, 8, 1, 1, 1)
self.label_72 = QtWidgets.QLabel(self.layoutWidget)
self.label_72.setObjectName("label_72")
self.gridLayout.addWidget(self.label_72, 9, 0, 1, 1)
self.volum_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.volum_2.setObjectName("volum_2")
self.gridLayout.addWidget(self.volum_2, 9, 1, 1, 1)
self.label_64 = QtWidgets.QLabel(self.layoutWidget)
self.label_64.setObjectName("label_64")
self.gridLayout.addWidget(self.label_64, 0, 2, 1, 1)
self.label_60 = QtWidgets.QLabel(self.layoutWidget)
self.label_60.setObjectName("label_60")
self.gridLayout.addWidget(self.label_60, 1, 2, 1, 1)
self.label_70 = QtWidgets.QLabel(self.layoutWidget)
self.label_70.setObjectName("label_70")
self.gridLayout.addWidget(self.label_70, 2, 2, 1, 1)
self.label_63 = QtWidgets.QLabel(self.layoutWidget)
self.label_63.setObjectName("label_63")
self.gridLayout.addWidget(self.label_63, 3, 2, 1, 1)
self.label_59 = QtWidgets.QLabel(self.layoutWidget)
self.label_59.setObjectName("label_59")
self.gridLayout.addWidget(self.label_59, 4, 2, 1, 1)
self.numar_unitati_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.numar_unitati_4.setObjectName("numar_unitati_4")
self.gridLayout.addWidget(self.numar_unitati_4, 0, 3, 1, 1)
self.pret_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.pret_4.setObjectName("pret_4")
self.gridLayout.addWidget(self.pret_4, 1, 3, 1, 1)
self.an_productie = QtWidgets.QLineEdit(self.layoutWidget)
self.an_productie.setObjectName("an_productie")
self.gridLayout.addWidget(self.an_productie, 2, 3, 1, 1)
self.timp_pastrare = QtWidgets.QLineEdit(self.layoutWidget)
self.timp_pastrare.setObjectName("timp_pastrare")
self.gridLayout.addWidget(self.timp_pastrare, 3, 3, 1, 1)
self.descriere_4 = QtWidgets.QTextEdit(self.layoutWidget)
self.descriere_4.setObjectName("descriere_4")
self.gridLayout.addWidget(self.descriere_4, 5, 2, 5, 2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_65.setText(_translate("Dialog", "Denumire generica"))
self.label_73.setText(_translate("Dialog", "Soi struguri"))
self.label_68.setText(_translate("Dialog", "Tara origine"))
self.label_69.setText(_translate("Dialog", "Zona geografica"))
self.pushButton_14.setText(_translate("Dialog", "Clear"))
self.pushButton_15.setText(_translate("Dialog", "Insert in DB"))
self.label_67.setText(_translate("Dialog", "Producator"))
self.label_66.setText(_translate("Dialog", "Procent alcool"))
self.label_61.setText(_translate("Dialog", "Cantitate zahar"))
self.label_62.setText(_translate("Dialog", "Culoare"))
self.label_58.setText(_translate("Dialog", "Recipient"))
self.label_72.setText(_translate("Dialog", "Volum"))
self.label_64.setText(_translate("Dialog", "Numar unitati"))
self.label_60.setText(_translate("Dialog", "Pret"))
self.label_70.setText(_translate("Dialog", "An productie"))
self.label_63.setText(_translate("Dialog", "Timp pastrare"))
self.label_59.setText(_translate("Dialog", "Descriere"))
|
15,452 | 5685ad5c83e1c2a7f8c52bfd237d24268aab57dd | #!/usr/bin/env python
from boxoffice.models import *
from datetime import date
from dateutil.relativedelta import relativedelta
def init_data():
db.drop_all()
db.create_all()
user = User(userid="U3_JesHfQ2OUmdihAXaAGQ", email="test@hasgeek.com")
db.session.add(user)
db.session.commit()
one_month_from_now = date.today() + relativedelta(months=+1)
rootconf = Organization(title='Rootconf', userid="U3_JesHfQ2OUmdihAXaAGQ",
status=0, contact_email=u'test@gmail.com',
details={'service_tax_no': 'xx', 'address': u'<h2 class="company-name">XYZ</h2> <p>Bangalore - 560034</p> <p>India</p>', 'cin': u'1234', 'pan': u'abc', 'website': u'https://www.test.com'})
db.session.add(rootconf)
db.session.commit()
rc2016 = ItemCollection(title='2016', organization=rootconf)
db.session.add(rc2016)
db.session.commit()
category_conference = Category(title='Conference', item_collection=rc2016, seq=1)
db.session.add(category_conference)
category_workshop = Category(title='Workshop', item_collection=rc2016, seq=2)
db.session.add(category_workshop)
category_merch = Category(title='Merchandise', item_collection=rc2016, seq=3)
db.session.add(category_merch)
db.session.commit()
conf_ticket = Item(title='Conference ticket', description='<p><i class="fa fa-calendar"></i>14 - 15 April 2016</p><p><i class="fa fa-map-marker ticket-venue"></i>MLR Convention Center, JP Nagar</p><p>This ticket gets you access to rootconf conference on 14th and 15th April 2016.</p>', item_collection=rc2016, category=Category.query.filter_by(name='conference').first(), quantity_total=1000)
db.session.add(conf_ticket)
db.session.commit()
price = Price(item=conf_ticket, title='Super Early Geek', start_at=date.today(), end_at=one_month_from_now, amount=3500)
db.session.add(price)
db.session.commit()
single_day_conf_ticket = Item(title='Single Day', description='<p><i class="fa fa-calendar"></i>14 April 2016</p><p><i class="fa fa-map-marker ticket-venue"></i>MLR Convention Center, JP Nagar</p><p>This ticket gets you access to rootconf conference on 14th April 2016.</p>', item_collection=rc2016, category=Category.query.filter_by(name='conference').first(), quantity_total=1000)
db.session.add(single_day_conf_ticket)
db.session.commit()
single_day_price = Price(item=single_day_conf_ticket, title='Single Day', start_at=date.today(), end_at=one_month_from_now, amount=2500)
db.session.add(single_day_price)
db.session.commit()
tshirt = Item(title='T-shirt', description='Rootconf', item_collection=rc2016, category=Category.query.filter_by(name='merchandise').first(), quantity_total=1000)
db.session.add(tshirt)
db.session.commit()
tshirt_price = Price(item=tshirt, title='T-shirt', start_at=date.today(), end_at=one_month_from_now, amount=500)
db.session.add(tshirt_price)
db.session.commit()
dns_workshop = Item(title='DNSSEC workshop', description='<p><i class="fa fa-calendar"></i>12 April 2016</p><p><i class="fa fa-map-marker ticket-venue"></i>TERI, Domlur</p><p>This ticket gets you access to DNSSEC workshop 12th April 2016.</p>', item_collection=rc2016, category=Category.query.filter_by(name='workshop').first(), quantity_total=1000)
db.session.add(dns_workshop)
db.session.commit()
dns_workshop_price = Price(item=dns_workshop, title='DNSSEC workshop early', start_at=date.today(), end_at=one_month_from_now, amount=2500)
db.session.add(dns_workshop_price)
db.session.commit()
policy = DiscountPolicy(title='10% discount on rootconf', item_quantity_min=10, percentage=10, organization=rootconf)
policy.items.append(conf_ticket)
db.session.add(policy)
db.session.commit()
tshirt_policy = DiscountPolicy(title='5% discount on 5 t-shirts', item_quantity_min=5, percentage=5, organization=rootconf)
tshirt_policy.items.append(tshirt)
db.session.add(tshirt_policy)
db.session.commit()
discount_coupon1 = DiscountPolicy(title='15% discount for coupon code with STU', item_quantity_min=1, percentage=15, organization=rootconf, discount_type=DISCOUNT_TYPE.COUPON)
discount_coupon1.items.append(conf_ticket)
db.session.add(discount_coupon1)
db.session.commit()
coupon1 = DiscountCoupon(code='coupon1', discount_policy=discount_coupon1)
db.session.add(coupon1)
db.session.commit()
discount_coupon2 = DiscountPolicy(title='100% discount', item_quantity_min=1, percentage=100, organization=rootconf, discount_type=DISCOUNT_TYPE.COUPON)
discount_coupon2.items.append(conf_ticket)
db.session.add(discount_coupon1)
db.session.commit()
coupon2 = DiscountCoupon(code='coupon2', discount_policy=discount_coupon2)
db.session.add(coupon2)
db.session.commit()
coupon3 = DiscountCoupon(code='coupon3', discount_policy=discount_coupon2)
db.session.add(coupon3)
db.session.commit()
forever_early_geek = DiscountPolicy(title='Forever Early Geek',
item_quantity_min=1,
is_price_based=True,
discount_type=DISCOUNT_TYPE.COUPON,
organization=rootconf)
forever_early_geek.items.append(conf_ticket)
db.session.add(forever_early_geek)
db.session.commit()
forever_coupon = DiscountCoupon(code='forever', discount_policy=forever_early_geek)
db.session.add(forever_coupon)
db.session.commit()
forever_unlimited_coupon = DiscountCoupon(code='unlimited', discount_policy=forever_early_geek,
usage_limit=500)
db.session.add(forever_unlimited_coupon)
db.session.commit()
discount_price = Price(item=conf_ticket,
discount_policy=forever_early_geek, title='Forever Early Geek',
start_at=date.today(), end_at=one_month_from_now, amount=3400)
db.session.add(discount_price)
db.session.commit()
zero_discount = DiscountPolicy(title='Zero Discount',
item_quantity_min=1,
is_price_based=True,
discount_type=DISCOUNT_TYPE.COUPON,
organization=rootconf)
zero_discount.items.append(conf_ticket)
db.session.add(zero_discount)
db.session.commit()
zero_coupon = DiscountCoupon(code='zerodi', discount_policy=zero_discount)
db.session.add(zero_coupon)
db.session.commit()
zero_discount_price = Price(item=conf_ticket,
discount_policy=zero_discount, title='Zero Discount',
start_at=date.today(), end_at=one_month_from_now, amount=3600)
db.session.add(zero_discount_price)
db.session.commit()
|
15,453 | f784bf7fd25f7d7de5b4449cf06cca4676e7726a | count = 0
total = 0
largest_so_far = float('-Inf')
smallest_so_far = float('Inf')
print('Before', total, count, largest_so_far, smallest_so_far)
while True:
line = input('> ')
if line == 'done':
break
try:
line = float(line)
count = count + 1
total = total + line
if line > largest_so_far:
largest_so_far = line
if line < smallest_so_far:
smallest_so_far = line
print(line, total, count, largest_so_far, smallest_so_far)
except:
print('invalid input')
if count != 0:
print('After', total, count, largest_so_far, smallest_so_far)
else:
print('count = 0')
|
15,454 | 04c74385c5d78841de914b54031d1d243721d5b5 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'kgy'
import tesserocr
from PIL import Image
def hello_world():
image = Image.open('CheckCode.jpg')
result = tesserocr.image_to_text(image)
print(result)
image = image.convert('L')
image.show()
image = image.convert('1')
image.show()
def image_test():
image = Image.open('CheckCode.jpg')
image = image.convert('L')
threshold = 140
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
image = image.point(table,'1')
image.show()
result = tesserocr.image_to_text(image)
print(result)
if __name__ == '__main__':
# hello_world()
image_test() |
15,455 | 04e2a9f7f9f308702b19d3f623d8aff47ea80831 | import time
import sys
import random;
path_to_config = "/home/provconf/adil/conf.txt"
path_to_non = "/home/provconf/adil/van.txt"
N = 1000000
config_perc = int(sys.argv[1]);
config_percent = config_perc/100.0
L = list(range(1, N+1))
random.shuffle(L) # shuffles in-place
# print(L)
config_numbers = config_percent * N;
# print(config_numbers)
f_conf = open(path_to_config,"a")
f_van = open(path_to_non,"a")
diff = 0
for number in L:
if(number<=config_numbers):
start1 = time.time_ns()
f_conf.write("test-data\n")
end1 = time.time_ns()
diff += (end1-start1)
f_conf.flush()
else:
start2 = time.time_ns()
f_van.write("test-data\n")
end2 = time.time_ns()
diff += (end2-start2)
f_van.flush()
time.sleep(0.00002)
time_per = diff/N
# print(van_count)
# print(config_count)
print(int(time_per), "ns/op")
f_conf.close()
f_van.close() |
15,456 | 9053b41a8d7ad8a9349533a3e303112ae276ea59 | """ Data Discovery '/data' """
from flask.views import MethodView
from .. import gateway as gw
class ProductApi(MethodView):
decorators = [
gw.get_decorator("validate"),
gw.get_decorator("auth")
]
def get(self, user_id:str=None, data_id:str=None):
arguments = {"qtype": "products", "qname": data_id}
return gw.send_rpc("get_records", user_id, arguments)
# class ProductDetailApi
# def post(self, user_id, body_1, body_2=None):
# return gw.res.data(200, body_1)
# from flask_apispec.views import MethodResource
# from flask_apispec import doc, use_kwargs
# from flask_restful.utils import cors
# from marshmallow import fields, Schema, validate
# from . import gateway
# from .src.auth import auth
# # class ProductDetailSchema(Schema):
# # data_id = fields.Str(required=True)
# # # qgeom = fields.Str()
# # # qstartdate = fields.Str()
# # # qenddate = fields.Str()
# # class ProductAPI(MethodResource):
# # # TODO: Exclude Aliases?
# # @cors.crossdomain(["*"], ["GET"],["Authorization", "Content-Type"], credentials=True)
# # @auth()
# # @doc(**gateway.spec.get_spec("/data", "get"))
# # def get(self, user_id):
# # try:
# # rpc_response = gateway.rpc.data.get_records(
# # qtype="products")
# # if rpc_response["status"] == "error":
# # raise gateway.res.map_exceptions(rpc_response, user_id)
# # return gateway.res.data(200, rpc_response["data"])
# # except Exception as exc:
# # return gateway.res.error(exc)
# # class ProductDetailAPI(MethodResource):
# # # TODO: Asked Matthias why data_id is body and url parameter?
# # @use_kwargs(RecordRequestSchema)
# # @cors.crossdomain(["*"], ["GET"],["Authorization", "Content-Type"], credentials=True)
# # @auth()
# # @doc(**gateway.spec.get_spec("/data/{data_id}", "get"))
# # def get(self, user_id, **kwargs):
# # try:
# # for a in kwargs:
# # stop = 1
# # rpc_response = gateway.rpc.data.get_records(
# # qtype="product_details",
# # qname=data_id)
# # if rpc_response["status"] == "error":
# # raise gateway.res.map_exceptions(rpc_response, user_id)
# # return gateway.res.data(200, rpc_response["data"])
# # except Exception as exc:
# # return gateway.res.error(exc)
# class RecordRequestSchema(Schema):
# type = fields.Str(required=True)
# data_id = fields.Str(required=True)
# bbox = fields.Str(required=True)
# start = fields.Str(required=True)
# end = fields.Str(required=True)
# class RecordsAPI(MethodResource):
# @use_kwargs({
# 'type': fields.Str(description="The detail level (full, short, file_paths).", required=True, validate=validate.Regexp(r"^(full|short|file_path)$")),
# 'data_id': fields.Str(description="String expression to search available datasets by name."),
# 'bbox': fields.Str(description="WKT polygon or bbox to search for available datasets that spatially intersect with the polygon."),
# 'start': fields.Str(description="ISO 8601 date/time string to find datasets with any data acquired after the given date/time."),
# 'end': fields.Str(description="ISO 8601 date/time string to find datasets with any data acquired before the given date/time."),
# },locations=['query'])
# @cors.crossdomain(["*"], ["GET"],["Authorization", "Content-Type"], credentials=True)
# @auth()
# @doc(**gateway.spec.get_spec("/records", "get"))
# def get(self, user_id, **kwargs):
# try:
# rpc_response = gateway.rpc.data.get_records(
# qtype=kwargs["type"],
# qname=kwargs["data_id"] if "data_id" in kwargs else None,
# qgeom=kwargs["bbox"] if "bbox" in kwargs else None,
# qstartdate=kwargs["start"] if "start" in kwargs else None,
# qenddate=kwargs["end"] if "end" in kwargs else None)
# if rpc_response["status"] == "error":
# raise gateway.res.map_exceptions(rpc_response, user_id)
# return gateway.res.data(200, rpc_response["data"])
# except Exception as exc:
# return gateway.res.error(exc)
# # "products": self.get_products,
# # "product_details": self.get_product_details,
# # "full": self.get_records_full,
# # "short": self.get_records_shorts,
# # "file_paths": self.get_file_paths
# # ''' /data '''
# # from flask_restful_swagger_2 import Resource, swagger
# # from flask_restful.reqparse import RequestParser
# # from flask_restful.utils import cors
# # from . import rpc
# # from .src.response import *
# # from .src.request import ModelRequestParser
# # from .src.cors import CORS
# # from .src.parameters import qtype, qname, qgeom, qstartdate, qenddate, product_id
# # class RecordsApi(Resource):
# # __res_parser = ResponseParser()
# # __req_parser = ModelRequestParser([qtype, qname, qgeom, qstartdate, qenddate], location="args")
# # @cors.crossdomain(["*"], ["GET"],["Authorization", "Content-Type"], credentials=True)
# # @auth()
# # @doc(**gateway.spec_parser.get_spec("openeo", "/data", "get"))
# # def get(self, user_id):
# # try:
# # args = self.__req_parser.parse_args()
# # rpc_response = rpc.data.get_records(
# # args["qtype"],
# # args["qname"],
# # args["qgeom"],
# # args["qstartdate"],
# # args["qenddate"])
# # if rpc_response["status"] == "error":
# # raise self.__res_parser.map_exceptions(rpc_response, user_id)
# # return self.__res_parser.data(200, rpc_response["data"])
# # except Exception as exc:
# # return self.__res_parser.error(exc)
# # class ProductDetailApi(Resource):
# # __res_parser = ResponseParser()
# # @cors.crossdomain(
# # origin=["*"],
# # methods=["GET"],
# # headers=["Authorization", "Content-Type"],
# # credentials=True)
# # @swagger.doc(CORS().__parse__([product_id]))
# # def options(self):
# # return self.__res_parser.code(200)
# # @cors.crossdomain(
# # origin=["*"],
# # methods=["GET"],
# # headers=["Authorization", "Content-Type"],
# # credentials=True)
# # @auth()
# # @swagger.doc({
# # "tags": ["EO Data Discovery"],
# # "description": "Returns basic information about EO datasets that are available at the back-end.",
# # "parameters": [product_id],
# # "security": [{"Bearer": []}],
# # "responses": {
# # "200": OK("Returns further information on a given EO product available at the back-end.").__parse__(),
# # "400": BadRequest().__parse__(),
# # "401": Unauthorized().__parse__(),
# # "403": Forbidden().__parse__(),
# # "500": InternalServerError().__parse__(),
# # "501": NotImplemented().__parse__(),
# # "503": ServiceUnavailable().__parse__()
# # }
# # })
# # def get(self, user_id, product_id):
# # try:
# # rpc_response = rpc.data.get_records(
# # qtype="product_details",
# # qname=product_id)
# # if rpc_response["status"] == "error":
# # raise self.__res_parser.map_exceptions(rpc_response, user_id)
# # return self.__res_parser.data(200, rpc_response["data"])
# # except Exception as exc:
# # return self.__res_parser.error(exc)
|
15,457 | 0187bdeceb131e7e30b2a625e165932d10af83b7 | #!/usr/bin/env python3
#
# Copyright (c) 2021 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
from base_generator import write
from dx12_base_generator import Dx12BaseGenerator
class Dx12EnumToStringBodyGenerator(Dx12BaseGenerator):
"""TODO : Generates C++ functions responsible for Convert to texts."""
BITS_LIST = [
'_FLAGS', '_STATES', '_STATUS', 'D3D12_SHADER_MIN_PRECISION_SUPPORT',
'D3D12_FORMAT_SUPPORT1', 'D3D12_FORMAT_SUPPORT2'
]
def __init__(
self,
source_dict,
dx12_prefix_strings,
err_file=sys.stderr,
warn_file=sys.stderr,
diag_file=sys.stdout
):
Dx12BaseGenerator.__init__(
self, source_dict, dx12_prefix_strings, err_file, warn_file,
diag_file
)
def beginFile(self, gen_opts):
"""Methond override."""
Dx12BaseGenerator.beginFile(self, gen_opts)
code = '#include "generated_dx12_enum_to_string.h"\n'
write(code, file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(util)', file=self.outFile)
self.newline()
def generate_feature(self):
for k, v in self.source_dict['enum_dict'].items():
# Generate enum handler for all enums
body = 'template <> std::string ToString<{0}>(const {0}& value, ToStringFlags, uint32_t, uint32_t)\n'
body += '{{\n'
body += ' switch (value) {{\n'
processed_values = set()
for value in v['values']:
if not value['value'] in processed_values:
body += ' case {0}: return "{0}";\n'.format(value['name'])
processed_values.add(value['name'])
processed_values.add(value['value'])
body += ' default: break;\n'
body += ' }}\n'
body += ' return "Unhandled {0}";\n'
body += '}}\n'
# Generate flags handler for enums identified as bitmasks
for bits in self.BITS_LIST:
if k.find(bits) >= 0:
body += '\ntemplate <> std::string ToString<{0}>(uint32_t flags, ToStringFlags, uint32_t, uint32_t)\n'
body += '{{\n'
body += ' return BitmaskToString<{0}>(flags);\n'
body += '}}\n'
write(body.format(k), file=self.outFile)
# Generate REFIID handler
iids = list()
for k, v in self.source_dict['header_dict'].items():
if hasattr(v, 'variables'):
for m in v.variables:
if 'DEFINE_GUID' in m['type']:
index = m['type'].find(',')
iids.append(m['type'][len('DEFINE_GUID ( '):index])
body = 'template <> std::string ToString<IID>(const IID& iid, ToStringFlags toStringFlags, uint32_t tabCount, uint32_t tabSize)\n'
body += '{\n'
if not "IID_IUnknown" in iids:
iids.append("IID_IUnknown")
for iid in iids:
body += ' if (iid == {0}) return "\\\"{0}\\\"";\n'.format(iid)
body += ' return "\\\"Invalid IID\\\"";\n'
body += '}\n'
write(body, file=self.outFile)
def endFile(self):
"""Methond override."""
self.newline()
write('GFXRECON_END_NAMESPACE(util)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
Dx12BaseGenerator.endFile(self)
|
15,458 | d1f979a192eba53872c63ed56e9d39f5e5da7fa7 | from __future__ import unicode_literals
import six
import copy
from itertools import chain
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.serializers import ListSerializer
from drf_sideloading.renderers import BrowsableAPIRendererWithoutForms
from drf_sideloading.serializers import SideLoadableSerializer
class SideloadableRelationsMixin(object):
query_param_name = "sideload"
sideloading_serializer_class = None
_primary_field_name = None
_sideloadable_fields = None
relations_to_sideload = None
def __init__(self, **kwargs):
self.check_sideloading_serializer_class()
self._primary_field_name = self.get_primary_field_name()
self._sideloadable_fields = self.get_sideloadable_fields()
self._prefetches = self.get_sideloading_prefetches()
super(SideloadableRelationsMixin, self).__init__(**kwargs)
def check_sideloading_serializer_class(self):
assert (
self.sideloading_serializer_class is not None
), "'{}' should either include a `sideloading_serializer_class` attribute, ".format(
self.__class__.__name__
)
assert issubclass(
self.sideloading_serializer_class, SideLoadableSerializer
), "'{}' `sideloading_serializer_class` must be a SideLoadableSerializer subclass".format(
self.__class__.__name__
)
assert not getattr(
self.sideloading_serializer_class, "many", None
), "Sideloadable serializer can not be 'many=True'!"
# Check Meta class
assert hasattr(
self.sideloading_serializer_class, "Meta"
), "Sideloadable serializer must have a Meta class defined with the 'primary' field name!"
assert getattr(
self.sideloading_serializer_class.Meta, "primary", None
), "Sideloadable serializer must have a Meta attribute called primary!"
assert (
self.sideloading_serializer_class.Meta.primary
in self.sideloading_serializer_class._declared_fields
), "Sideloadable serializer Meta.primary must point to a field in the serializer!"
if (
getattr(self.sideloading_serializer_class.Meta, "prefetches", None)
is not None
):
assert isinstance(
self.sideloading_serializer_class.Meta.prefetches, dict
), "Sideloadable serializer Meta attribute 'prefetches' must be a dict."
# check serializer fields:
for name, field in self.sideloading_serializer_class._declared_fields.items():
assert getattr(
field, "many", None
), "SideLoadable field '{}' must be set as many=True".format(name)
# check serializer fields:
for name, field in self.sideloading_serializer_class._declared_fields.items():
assert getattr(
field, "many", None
), "SideLoadable field '{}' must be set as many=True".format(name)
def get_primary_field_name(self):
return self.sideloading_serializer_class.Meta.primary
def get_sideloadable_fields(self):
sideloadable_fields = copy.deepcopy(
self.sideloading_serializer_class._declared_fields
)
sideloadable_fields.pop(self._primary_field_name, None)
return sideloadable_fields
def get_sideloading_prefetches(self):
prefetches = getattr(self.sideloading_serializer_class.Meta, "prefetches", {})
if not prefetches:
return None
cleaned_prefetches = {}
for k, v in prefetches.items():
if v is not None:
if isinstance(v, list):
cleaned_prefetches[k] = v
elif isinstance(v, six.string_types):
cleaned_prefetches[k] = [v]
else:
raise RuntimeError(
"Sideloadable prefetch values must be presented either as a list or a string"
)
return cleaned_prefetches
def initialize_request(self, request, *args, **kwargs):
request = super(SideloadableRelationsMixin, self).initialize_request(
request=request, *args, **kwargs
)
sideload_params = self.parse_query_param(
sideload_parameter=request.query_params.get(self.query_param_name, "")
)
if request.method == "GET" and sideload_params:
# When sideloading disable BrowsableAPIForms
if BrowsableAPIRenderer in self.renderer_classes:
renderer_classes = (
list(self.renderer_classes)
if isinstance(self.renderer_classes, tuple)
else self.renderer_classes
)
renderer_classes = [
BrowsableAPIRendererWithoutForms if r == BrowsableAPIRenderer else r
for r in renderer_classes
]
self.renderer_classes = renderer_classes
return request
def list(self, request, *args, **kwargs):
sideload_params = self.parse_query_param(
sideload_parameter=request.query_params.get(self.query_param_name, "")
)
# Do not sideload unless params and GET method
if request.method != "GET" or not sideload_params:
return super(SideloadableRelationsMixin, self).list(
request, *args, **kwargs
)
# After this `relations_to_sideload` is safe to use
queryset = self.get_queryset()
# Add prefetches if applicable
prefetch_relations = self.get_relevant_prefetches()
if prefetch_relations:
queryset = queryset.prefetch_related(*prefetch_relations)
queryset = self.filter_queryset(queryset)
# Create page
page = self.paginate_queryset(queryset)
if page is not None:
sideloadable_page = self.get_sideloadable_page(page)
serializer = self.sideloading_serializer_class(
instance=sideloadable_page,
fields_to_load=[self._primary_field_name]
+ list(self.relations_to_sideload),
context={"request": request},
)
return self.get_paginated_response(serializer.data)
else:
sideloadable_page = self.get_sideloadable_page_from_queryset(queryset)
serializer = self.sideloading_serializer_class(
instance=sideloadable_page,
fields_to_load=[self._primary_field_name]
+ list(self.relations_to_sideload),
context={"request": request},
)
return Response(serializer.data)
def parse_query_param(self, sideload_parameter):
"""
Parse query param and take validated names
:param sideload_parameter string
:return valid relation names list
comma separated relation names may contain invalid or unusable characters.
This function finds string match between requested names and defined relation in view
"""
self.relations_to_sideload = set(sideload_parameter.split(",")) & set(
self._sideloadable_fields.keys()
)
return self.relations_to_sideload
def get_relevant_prefetches(self):
if not self._prefetches:
return set()
return set(
pf
for relation in self.relations_to_sideload
for pf in self._prefetches.get(relation, [])
)
def get_sideloadable_page_from_queryset(self, queryset):
# this works wonders, but can't be used when page is paginated...
sideloadable_page = {self._primary_field_name: queryset}
for relation in self.relations_to_sideload:
if not isinstance(self._sideloadable_fields[relation], ListSerializer):
raise RuntimeError(
"SideLoadable field '{}' must be set as many=True".format(relation)
)
source = self._sideloadable_fields[relation].source or relation
rel_model = self._sideloadable_fields[relation].child.Meta.model
rel_qs = rel_model.objects.filter(
pk__in=queryset.values_list(source, flat=True)
)
sideloadable_page[source] = rel_qs
return sideloadable_page
def get_sideloadable_page(self, page):
sideloadable_page = {self._primary_field_name: page}
for relation in self.relations_to_sideload:
if not isinstance(self._sideloadable_fields[relation], ListSerializer):
raise RuntimeError(
"SideLoadable field '{}' must be set as many=True".format(relation)
)
source = self._sideloadable_fields[relation].source or relation
sideloadable_page[source] = self.filter_related_objects(
related_objects=page, lookup=source
)
return sideloadable_page
def filter_related_objects(self, related_objects, lookup):
current_lookup, remaining_lookup = (
lookup.split("__", 1) if "__" in lookup else (lookup, None)
)
related_objects_set = {getattr(r, current_lookup) for r in related_objects} - {None}
if related_objects_set and next(
iter(related_objects_set)
).__class__.__name__ in ["ManyRelatedManager", "RelatedManager"]:
related_objects_set = set(
chain(
*[
related_queryset.all()
for related_queryset in related_objects_set
]
)
)
if remaining_lookup:
return self.filter_related_objects(related_objects_set, remaining_lookup)
return set(related_objects_set) - {"", None}
|
15,459 | f4788375b7d0920a3ff4208aa6203fd79823336e | # sub-parts of the U-Net model
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import params
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose):
super(double_conv, self).__init__()
self.padding = padding
self.doubleConvTranspose = doubleConvTranspose
self.padding_mode = padding_mode
self.up_mode = up_mode
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=padding, padding_mode=padding_mode)
if unet_norm == 'batch_norm':
self.norm = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm = nn.InstanceNorm2d(out_ch)
else:
self.norm = None
if activation == "relu":
self.activation = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
self.conv1 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=padding, padding_mode=padding_mode)
if unet_norm == 'batch_norm':
self.norm1 = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm1 = nn.InstanceNorm2d(out_ch)
else:
self.norm1 = None
if activation == "relu":
self.activation1 = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation1 = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
# self.relu1 = nn.ReLU(inplace=True)
# self.conv = nn.Sequential(
# nn.Conv2d(in_ch, out_ch, 3, stride=1, padding=0),
# nn.BatchNorm2d(out_ch),
# nn.ReLU(inplace=True),
# nn.Conv2d(out_ch, out_ch, 3, stride=1, padding=0),
# nn.BatchNorm2d(out_ch),
# nn.ReLU(inplace=True)
# )
def forward(self, x):
# print(x.shape)
# if self.padding:
# expanded_padding = ((self.padding + 1) // 2, self.padding // 2,
# (self.padding + 1) // 2, self.padding // 2)
# x = F.pad(x, expanded_padding, mode=self.padding_mode)
# # print("x",x.shape)
x = self.conv(x)
if self.up_mode and not self.doubleConvTranspose:
expanded_padding = (1, 1,
1, 1)
x = F.pad(x, expanded_padding, mode='replicate')
# print("x", x.shape)
if self.norm:
x = self.norm(x)
x = self.activation(x)
# if self.padding:
# expanded_padding = ((self.padding + 1) // 2, self.padding // 2,
# (self.padding + 1) // 2, self.padding // 2)
# x = F.pad(x, expanded_padding, mode=self.padding_mode)
# print("x", x.shape)
x = self.conv1(x)
if self.up_mode and not self.doubleConvTranspose:
expanded_padding = (1, 1,
1, 1)
x = F.pad(x, expanded_padding, mode='replicate')
# print("x", x.shape)
if self.norm1:
x = self.norm1(x)
x = self.activation1(x)
return x
class double_last_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose):
super(double_last_conv, self).__init__()
self.padding_mode = padding_mode
self.padding = padding
self.up_mode = up_mode
self.doubleConvTranspose = doubleConvTranspose
self.conv = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=padding, padding_mode=padding_mode)
if unet_norm == 'batch_norm':
self.norm = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm = nn.InstanceNorm2d(out_ch)
else:
self.norm = None
if activation == "relu":
self.activation = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
self.conv1 = nn.ConvTranspose2d(in_ch, out_ch, kernel_size=3, stride=1, padding=padding)
if unet_norm == 'batch_norm':
self.norm1 = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm1 = nn.InstanceNorm2d(out_ch)
else:
self.norm1 = None
if activation == "relu":
self.activation1 = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation1 = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
def forward(self, x):
x = self.conv(x)
if self.up_mode and not self.doubleConvTranspose:
expanded_padding = (1, 1,
1, 1)
x = F.pad(x, expanded_padding, mode='replicate')
if self.norm:
x = self.norm(x)
x = self.activation(x)
if self.doubleConvTranspose:
x = self.conv1(x)
if self.norm1:
x = self.norm1(x)
x = self.activation1(x)
return x
class double_conv_traspose(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch, unet_norm, activation, kernel_size=3):
super(double_conv_traspose, self).__init__()
self.conv = nn.ConvTranspose2d(in_ch, out_ch, kernel_size=kernel_size, stride=1, padding=0)
if unet_norm == 'batch_norm':
self.norm = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm = nn.InstanceNorm2d(out_ch)
else:
self.norm = None
if activation == "relu":
self.activation = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
self.conv1 = nn.ConvTranspose2d(out_ch, out_ch, kernel_size=kernel_size, stride=1, padding=0)
if unet_norm == 'batch_norm':
self.norm1 = nn.BatchNorm2d(out_ch)
elif unet_norm == 'instance_norm':
self.norm1 = nn.InstanceNorm2d(out_ch)
else:
self.norm1 = None
if activation == "relu":
self.activation1 = nn.ReLU(inplace=True)
elif activation == "leakyrelu":
self.activation1 = nn.LeakyReLU(0.2, inplace=True)
else:
assert 0, "Unsupported activation: {%s}" % (activation)
# self.conv = nn.Sequential(
# nn.ConvTranspose2d(in_ch, out_ch, kernel_size=3, stride=1, padding=0),
# nn.BatchNorm2d(out_ch),
# nn.ReLU(inplace=True),
# nn.ConvTranspose2d(out_ch, out_ch, kernel_size=3, stride=1, padding=0),
# nn.BatchNorm2d(out_ch),
# nn.ReLU(inplace=True)
# )
def forward(self, x):
x = self.conv(x)
if self.norm:
x = self.norm(x)
x = self.activation(x)
x = self.conv1(x)
if self.norm1:
x = self.norm1(x)
x = self.activation1(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch, network, dilation, unet_norm, activation, padding,
padding_mode, up_mode, doubleConvTranspose):
super(down, self).__init__()
if network == params.unet_network:
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose)
)
elif network == params.torus_network: # for torus
self.mpconv = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, stride=1, padding=0, dilation=dilation),
double_conv(in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode)
)
else:
assert 0, "Unsupported network request: {}".format(self.network)
def forward(self, x):
x = self.mpconv(x)
return x
class last_down(nn.Module):
def __init__(self, in_ch, out_ch, network, dilation, unet_norm, activation,
padding, padding_mode, up_mode, doubleConvTranspose):
super(last_down, self).__init__()
if network == params.unet_network:
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_last_conv(in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose)
)
else:
assert 0, "Unsupported network request: {}".format(self.network)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear, layer_factor, network, dilation, unet_norm, activation,
doubleConvTranspose, padding, padding_mode, convtranspose_kernel, up_mode, output_padding1=0):
super(up, self).__init__()
self.padding_mode = padding_mode
self.up_mode = up_mode
# print("padding",padding)
# if not doubleConvTranspose:
# padding = 1
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if network == params.unet_network:
if not up_mode:
if bilinear:
self.up = nn.Sequential(nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch // layer_factor, in_ch // layer_factor, kernel_size=1))
# elif up_mode:
# self.up =
else:
cur_padding = 0
output_padding = output_padding1
if convtranspose_kernel == 5:
cur_padding = convtranspose_kernel // 2
output_padding = 1
if convtranspose_kernel == 4:
cur_padding = 1
self.up = nn.ConvTranspose2d(in_ch // layer_factor, in_ch // layer_factor,
convtranspose_kernel, stride=2, padding=cur_padding,
output_padding=output_padding)
elif network == params.torus_network: # for torus
self.up = nn.ConvTranspose2d(in_ch // layer_factor, in_ch // layer_factor, 3, stride=1, padding=0,
dilation=dilation)
else:
assert 0, "Unsupported network request: {}".format(network)
if doubleConvTranspose:
self.conv = double_conv_traspose(in_ch, out_ch, unet_norm, activation)
else:
self.conv = double_conv(in_ch, out_ch, unet_norm, activation, padding, padding_mode, up_mode, doubleConvTranspose)
def forward(self, x1, x2, con_operator, network, d_weight_mul):
if self.up_mode:
stride = 2
w = x1.new_zeros(stride, stride)
w[0, 0] = 1
x1 = F.conv_transpose2d(x1, w.expand(x1.size(1), 1, stride, stride), stride=stride, groups=x1.size(1))
else:
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
if diffX or diffY:
print("diffX", diffX, x1.size())
print("diffY", diffY, x2.size())
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2), mode=self.padding_mode)
print("new size", x1.size())
# if diffY < 0 or diffX < 0:
# diffY = abs(diffY)
# diffX = abs(diffX)
# x1 = x1[:, :, diffY // 2:x1.shape[2] - (diffY - diffY // 2),
# diffX // 2:x1.shape[3] - (diffX - diffX // 2)]
# print("new size", x1.size())
# else:
# x2 = x2[:, :, diffY // 2:x2.shape[2] - (diffY - diffY // 2), diffX // 2:x2.shape[3] - (diffX - diffX // 2)]
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
if con_operator == params.original_unet:
x = torch.cat([x2, x1], dim=1)
elif con_operator == params.square:
square_x = torch.pow(x2, 2)
x = torch.cat([x2, x1, square_x], dim=1)
elif con_operator == params.square_root:
square_root_x = torch.pow(x2 + params.epsilon, 0.5)
x = torch.cat([x2, x1, square_root_x], dim=1)
elif con_operator == params.square_and_square_root:
square_x = torch.pow(x2, 2)
square_root_x = torch.pow(x2 + params.epsilon, 0.5)
x = torch.cat([x2, x1, square_x, square_root_x], dim=1)
elif con_operator == params.gamma:
square_root_x = torch.pow(x2 + params.epsilon, 0.02)
x = torch.cat([x2, x1, square_root_x], dim=1)
elif con_operator == params.square_and_square_root_manual_d:
square_x = torch.pow(x2, 2)
square_root_x = torch.pow(x2 + params.epsilon, 0.5)
weight_channel = torch.full((x2.shape[0], 1, x2.shape[2], x2.shape[3]), d_weight_mul).type_as(x2)
x = torch.cat([weight_channel, x2, x1, square_x, square_root_x], dim=1)
else:
assert 0, "Unsupported con_operator request: {}".format(con_operator)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
|
15,460 | acffdf17d1086689281d9d346fe24a1627151e9d | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
data_path = '../tfrecord/many2one.tfrecords' # address to save the hdf5 file
with tf.Session() as sess:
feature = {'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)}
# create a list of filenames and pass it to a queue
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
# define a reader and read the next record
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# decode the record read by the reader
features = tf.parse_single_example(serialized_example, features=feature)
# convert the image data from string back to the numbers
images = tf.decode_raw(features['image'], tf.float32)
# cast label data into int32
labels = tf.cast(features['label'], tf.int32)
# Reshape image data into the original shape
images = tf.reshape(images, [224, 224, 3])
# Initialize all global and local variables
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
# create a coordinator and run all QueueRunner objects
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(12):
img, lbl = sess.run([images, labels])
img = img.astype(np.uint8)
#plt.plot(224,224)
plt.imshow(img)
plt.title('bollard' if lbl==0 else 'bench')
plt.show()
# stop the threads
coord.request_stop()
# wait for threads to stop
coord.join(threads)
sess.close()
|
15,461 | 284a092bd9ed1c037e354fc12e91df03bcab8b8a | #!/usr/bin/env python
# coding=utf-8
import rospy
from std_msgs.msg import Float32
from std_msgs.msg import Int64
import numpy as np
import requests
import threading
# 数据格式定义:time,car_speed,reel_speed,cb_speed,reel_current,cm7290_current,cb_current
class Interpreter:
def __init__(self):
self.init_time = rospy.get_rostime()
self.time = 0
self.car_speed = []
self.reel_speed = []
self.cb_speed = []
self.pf_speed = []
self.reel_current = []
self.cm7290_current = []
self.cb_current = []
self.pf_current = []
# 把后边可能用到的 sub, pub 在初始化函数中定义好
# 数据内容:time,car_speed,reel_speed,cb_speed,reel_current,cm7290_current,cb_current
rospy.Subscriber('/car_speed', Float32, self.callback_car_speed)
rospy.Subscriber('/REEL_speed', Float32, self.callback_reel_speed)
rospy.Subscriber('/CB_speed', Float32, self.callback_cb_speed)
rospy.Subscriber('/FH_speed', Float32, self.callback_pf_speed) # 注意这里临时改成了FH
rospy.Subscriber('/REEL_current', Float32, self.callback_reel_current)
rospy.Subscriber('/current_cm7290', Float32, self.callback_cm7290_current)
rospy.Subscriber('/CB_current', Float32, self.callback_cb_current)
rospy.Subscriber('/PF_current', Float32, self.callback_pf_current)
self.time_thread = threading.Thread(target=self.time_get_job)
self.time_thread.start()
self.callback_thread = threading.Thread(target=self.call_back_jobs)
self.callback_thread.start()
## callback functions ##
def callback_car_speed(self, data):
msg = data.data
self.car_speed.append(np.array([self.time, msg]))
def callback_reel_speed(self, data):
msg = data.data
self.reel_speed.append(np.array([self.time, msg]))
def callback_cb_speed(self, data):
msg = data.data
self.cb_speed.append(np.array([self.time, msg]))
def callback_pf_speed(self, data):
msg = data.data
self.pf_speed.append(np.array([self.time, msg]))
def callback_reel_current(self, data):
msg = data.data
self.reel_current.append(np.array([self.time, msg]))
def callback_cm7290_current(self, data):
msg = data.data
self.cm7290_current.append(np.array([self.time, msg]))
def callback_cb_current(self, data):
msg = data.data
self.cb_current.append(np.array([self.time, msg]))
def callback_pf_current(self, data):
msg = data.data
self.pf_current.append(np.array([self.time, msg]))
## thread functions ##
def call_back_jobs(self):
rospy.spin()
def time_get_job(self):
while not rospy.is_shutdown():
time_duration = rospy.get_rostime() - self.init_time
self.time = time_duration.to_sec()
# print 'time:' + str(self.time)
## save data ##
def save_data_to_npy(self):
# 数据内容:time,car_speed,reel_speed,cb_speed,reel_current,cm7290_current,cb_current
car_speed_npy = np.stack(self.car_speed)
np.save("m234_car_speed.npy", car_speed_npy)
reel_speed_npy = np.stack(self.reel_speed)
np.save("m234_reel_speed.npy", reel_speed_npy)
cb_speed_npy = np.stack(self.cb_speed)
np.save("m234_cb_speed.npy", cb_speed_npy)
pf_speed_npy = np.stack(self.pf_speed)
np.save("m234_pf_speed.npy", pf_speed_npy)
reel_current_npy = np.stack(self.reel_current)
np.save("m234_reel_current.npy", reel_current_npy)
cm7290_current_npy = np.stack(self.cm7290_current)
np.save("m234_cm7290_current.npy", cm7290_current_npy)
cb_current_npy = np.stack(self.cb_current)
np.save("m234_cb_current.npy", cb_current_npy)
pf_current_npy = np.stack(self.pf_current)
np.save("m234_pf_current.npy", pf_current_npy)
if __name__ == '__main__':
rospy.init_node('interpreter')
try:
convertor = Interpreter()
while not rospy.is_shutdown():
print 'Converting...'
rospy.sleep(1)
convertor.save_data_to_npy()
print 'All data have been save as npy files.'
convertor.callback_thread.join()
convertor.time_thread.join()
print 'Convertor exited.'
except rospy.ROSInterruptException:
pass
|
15,462 | 37e3d3edf99f7be7d794c12fd485f37b92ea59fc | # -*- coding: utf-8 -*-
import os.path
from fabric.api import env, task, run, put, cd, sudo
from fabric.contrib.files import exists, append, contains, sed
from .utils import mkdir
from . import conf_file
env.use_ssh_config = True
BASE_PATH = '$HOME/tmp-fabric-toolkit'
@task
def test():
"""执行uname -a命令"""
run('uname -a')
@task
def update():
sudo('apt-get -q update')
@task
def upgrade():
sudo('apt-get -y -q upgrade')
@task
def lc_all():
path = '/etc/default/locale'
if not contains(path, 'LC_ALL'):
append(path, 'LC_ALL="en_US.UTF-8"', use_sudo=True)
@task
def cn_source():
with cd('/etc/apt'):
bak = ''
if exists('sources.list.bak'):
bak = '2'
sudo('cp sources.list sources.list.bak' + bak)
sed('/etc/apt/sources.list', '//us.', '//cn.',
use_sudo=True, backup='')
@task
def sudo_nopassword():
"""
sudo命令无需密码
http://stackoverflow.com/questions/323957/how-do-i-edit-etc-sudoers-from-a-script
"""
mkdir(BASE_PATH)
user = run('whoami')
add_content = '{}\tALL=(ALL) NOPASSWD:ALL'.format(user)
with cd(BASE_PATH):
tmp = os.path.join(run('pwd'), 'sudoers.tmp')
run('sudo cat /etc/sudoers > {}'.format(tmp))
if contains(tmp, add_content):
return
append(tmp, add_content)
sudo('EDITOR="cp {0}" visudo'.format(tmp))
@task
def install_vim_gtk():
"""安装vim-gtk"""
if not exists('/usr/bin/vim.gtk'):
sudo('apt-get -y -q install vim-gtk')
@task
def install_git():
if not exists('/usr/bin/git'):
sudo('apt-get -y -q install git')
@task
def default_editor():
"""更改默认编辑器"""
sudo('update-alternatives --config editor')
@task
def dotfiles():
"""设置git,vim等的默认配置"""
mkdir(BASE_PATH)
with cd(BASE_PATH):
if not exists('dotfiles'):
run('git clone https://github.com/codeif/dotfiles.git')
with cd('dotfiles'):
run('git pull -q')
run('./bootstrap.sh -f')
sudo('apt-get -y -q install exuberant-ctags')
@task
def install_pip():
if not exists('/usr/bin/python'):
sudo('apt-get -y -q install python')
if not exists('/usr/bin/python3'):
sudo('apt-get -y -q install python3-dev')
if not exists('/usr/local/bin/pip'):
run('curl --silent --show-error --retry 3 '
'https://bootstrap.pypa.io/get-pip.py | '
'sudo -H python')
run('sudo -H pip install -U pip')
@task
def pip_conf():
"""使用豆瓣的pip源"""
if not exists('~/.pip/'):
run('mkdir ~/.pip')
path = conf_file.get_path('pip.conf')
put(path, '~/.pip/')
@task
def install_nginx():
if not exists('/usr/sbin/nginx'):
sudo('apt-get -y -q install nginx')
@task
def install_supervisor():
if not exists('/usr/bin/supervisorctl'):
sudo('apt-get -y -q install supervisor')
sudo('service supervisor start')
# 设置开机启动
sudo('update-rc.d supervisor defaults')
# in ubuntu 16.04
sudo('systemctl enable supervisor.service')
@task
def install_virtualenv():
run('sudo -H pip install virtualenv')
run('sudo -H pip install virtualenvwrapper')
mkdir('~/.virtualenvs')
contents = [
'',
'export WORKON_HOME=$HOME/.virtualenvs',
'source /usr/local/bin/virtualenvwrapper.sh',
]
if not contains('~/.bashrc', 'export WORKON_HOME'):
append('~/.bashrc', '\n'.join(contents))
@task(alias='git-aware-prompt')
def git_aware_prompt():
"""git显示分支名
https://github.com/jimeh/git-aware-prompt
"""
mkdir('~/.bash')
with cd('~/.bash'):
if not exists('git-aware-prompt'):
run('git clone git://github.com/jimeh/git-aware-prompt.git')
else:
with cd('git-aware-prompt'):
run('git pull')
if contains('~/.bashrc', 'export GITAWAREPROMPT'):
return
mkdir(BASE_PATH)
with cd(BASE_PATH):
tmp = os.path.join(run('pwd'), 'git-aware-prompt.tmp')
path = conf_file.get_path('git-aware-prompt.bashrc')
put(path, tmp)
run('cat {} >> ~/.bashrc'.format(tmp))
@task
def ntpdate():
"""同步时间"""
if not exists('/usr/sbin/ntpdate'):
sudo('apt-get -y -q install ntpdate')
sudo('ntpdate cn.pool.ntp.org')
@task(default=True)
def all_task():
sudo_nopassword()
cn_source()
update()
lc_all()
install_git()
install_vim_gtk()
# set default editor
mkdir(BASE_PATH)
with cd(BASE_PATH):
tmp = os.path.join(run('pwd'), 'default-editor.tmp')
run('update-alternatives --query editor | grep \'Best:\' > {}'.format(tmp))
if not contains(tmp, 'vim'):
default_editor()
dotfiles()
install_pip()
pip_conf()
install_nginx()
install_supervisor()
install_virtualenv()
git_aware_prompt()
ntpdate()
|
15,463 | c9864bc74f80fe08cc36bba9b7cb2ff2779cd955 | import os
import pytest
import glob
from VCF.VCFfilter.BCFTools import BCFTools
# test_vcfFilter_BCFTools.py
@pytest.fixture
def vcf_object():
'''Returns an object'''
vcf_file = pytest.config.getoption("--vcf")
bcftools_folder = pytest.config.getoption("--bcftools_folder")
vcf_object=BCFTools(vcf=vcf_file,bcftools_folder=bcftools_folder)
return vcf_object
@pytest.fixture
def clean_tmp():
yield
print("Cleanup files")
files = glob.glob('data/outdir/*')
for f in files:
os.remove(f)
def test_filter_by_variant_type(vcf_object):
'''
Test method filter_by_variant_type
Will select SNPs from the VCF file
'''
outfile=vcf_object.filter_by_variant_type(outprefix='data/outdir/test', verbose=True)
assert os.path.isfile(outfile) is True
def test_filter_by_variant_type_biallelic(vcf_object):
'''
Test method filter_by_variant_type
using the biallelic option
'''
outfile=vcf_object.filter_by_variant_type(outprefix='data/outdir/test', biallelic=True, verbose=True)
assert os.path.isfile(outfile) is True
def test_filter_by_variant_type_biallelic_compressed(vcf_object):
'''
Test method filter_by_variant_type
using the biallelic option
'''
outfile=vcf_object.filter_by_variant_type(outprefix='data/outdir/test', biallelic=True, compress=False,
verbose=True)
assert os.path.isfile(outfile) is True
def test_subset_vcf(vcf_object):
'''
Test method subset_vcf to subset a VCF by using a BED file/region
'''
outfile=vcf_object.subset_vcf(outprefix='data/outdir/test.vcf.gz', region="chr1", apply_filters="PASS",verbose=True)
assert os.path.isfile(outfile) is True
def test_subset_vcf_and_throwerror(vcf_object):
'''
Test method subset_vcf to subset a VCF by using a BED file/region and using an invalid 'action'
parameter to throw an exception
'''
with pytest.raises(Exception):
outfile=vcf_object.subset_vcf(outprefix='data/outdir/test.vcf.gz', region="chr1", action='test', apply_filters="PASS",verbose=True)
def test_select_variants(vcf_object):
'''
Test method to select only the variants (exclude the 0|0 genotypes) from a VCF file
'''
outfile=vcf_object.select_variants(outprefix='data/outdir/test')
assert os.path.isfile(outfile) is True
def test_select_variants_exclude_uncalled(vcf_object):
'''
Test method to select only the variants (exclude the 0|0 genotypes) and also exclude the sites with uncalled genoytpes
from a VCF file
'''
outfile=vcf_object.select_variants(outprefix='data/outdir/test', uncalled='exclude', verbose=True)
assert os.path.isfile(outfile) is True
def test_filter(vcf_object,clean_tmp):
'''
Test method to filter variants from a VCF file by running bcftools filter
'''
outfile=vcf_object.filter(name='TESTFILTER',expression="'INFO/DP>24304'")
assert os.path.isfile(outfile) is True
|
15,464 | d21e6f5bdab2767c81ac29223d343fa7fcbcc4a0 | from .utils import expire_page
class ExpireCacheMiddleware(object):
def process_request(self, request):
if '__placeholder_expire_page' in request.GET:
expire_page(request.path)
def process_response(self, request, response):
if '__placeholder_expire_page' in request.GET:
expire_page(request.path)
|
15,465 | 0c3e226138d44858122d9846ed7085e7b2fe6c49 | """Scrape property sales information from QV.co.nz."""
import requests
from datetime import datetime
from settings import settings
SALES_SHEET = "Sales"
COLUMNS = ["Property", "Sale price", "Sale date", "Rates value"]
ID = "Property"
REQUEST_DATA = {
"MIME Type": "application/x-www-form-urlencoded; charset=UTF-8",
"op": "qv_widgets.rspRecentlySold.rspRecentlySold",
"subop": "lazyLoadData",
"maxSearch": "30",
"propertyDetailsNavpoint": "phoenix-656",
"areaType": "ta",
}
def convert_prices(price):
"""Convert price string to int."""
return int(price.replace("$", "").replace(",", ""))
def convert_date(date_str):
"""Convert date string to datetime."""
return datetime.strptime(date_str, "%d/%m/%Y")
def process_property(prop):
"""Extract key information from supplied dicts."""
output = {}
output['Property'] = prop['PropertyAddress']
output['Sale date'] = convert_date(prop['DateSold'])
output['Sale price'] = convert_prices(prop['SalePrice'])
output['Rates value'] = convert_prices(prop['CapitalValue'])
return output
def get_sale_prices():
"""Scrape the most recent sales from the specified QV URL (region)."""
r = requests.post(settings.qv_url, data=REQUEST_DATA)
response = r.json()
data_processed = [process_property(prop) for prop in response['LocalAreaSales']]
return data_processed
|
15,466 | 69ad01852b9bb55254cd551ba85b973991d9ba43 | class Person(object):
def talk(self):
print('talk')
def run(self):
print('run person')
class Car(object):
def run(self):
print('run')
# left high priority
class PersonCarRobot(Car, Person):
def fly(self):
print('fly')
person_car_robot = PersonCarRobot()
person_car_robot.talk()
person_car_robot.run()
person_car_robot.fly() |
15,467 | ba187610427618a4e3026f14191523c00ae5a111 | import pygame
from random import randint
main_display = pygame.display.set_mode((800, 600))
bg = pygame.image.load('forest.jpg')
mosquito_raw = pygame.image.load('mosquito.png')
bg = pygame.transform.scale(bg, (800, 600))
mosquito = pygame.transform.scale(mosquito_raw, (150, 150))
mosquito_rect = mosquito.get_rect(x = randint(0, 700), y = randint(0, 500))
game = True
clock = pygame.time.Clock()
FPS = 20
speed_x = 0
speed_y = 0
while game:
now = 0
clock.tick(FPS)
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
game = False
if e.type == pygame.MOUSEBUTTONDOWN:
click_time = pygame.time.get_ticks()
if mosquito_rect.collidepoint(e.pos):
pygame.draw.circle(bg, 'red', e.pos, 15)
mosquito = pygame.transform.scale(mosquito_raw, (1, 1))
main_display.blit(mosquito, mosquito_rect)
now = pygame.time.get_ticks()
while now <= 1000 + click_time:
now = pygame.time.get_ticks()
mosquito = pygame.transform.scale(mosquito_raw, (150, 150))
main_display.blit(mosquito, mosquito_rect)
mosquito_rect.x += speed_x
mosquito_rect.y += speed_y
speed_x += randint(-2, 2)
speed_y += randint(-2, 2)
if speed_x > 5:
speed_x -= randint(1, 3)
if speed_y > 5:
speed_y -= randint(1, 3)
if mosquito_rect.x > 650 or mosquito_rect.x < 0:
speed_x *= -1
if mosquito_rect.y > 450 or mosquito_rect.y < 0:
speed_y *= -1
main_display.blit(bg, (0, 0))
main_display.blit(mosquito, mosquito_rect)
pygame.display.update() |
15,468 | 9dc646253470bca38c641a15482a16cd4349135b | # -*- coding: utf-8 -*-
from flask import Flask, Response, make_response, render_template, request
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file
from bokeh.embed import components
from bokeh.charts import Histogram
app = Flask(__name__)
# Import dataset
data = pd.read_csv('data/gapminder.csv')
data = data[(data.Year >= 1950)]
country_names = sorted(list(set(data.Country)))
attribute_names = data.columns[2:-1].values.tolist()
# Load the Iris Data Set
iris_df = pd.read_csv("data/iris.data",
names=["Sepal Length", "Sepal Width", "Petal Length", "Petal Width", "Species"])
feature_names = iris_df.columns[0:-1].values.tolist()
# Index page
@app.route('/')
def index():
return render_template('index.html')
# Create the main plot
def create_gapminder_figure(first_country='China',
second_country='Singapore',
selected_attribute='income'):
# filter datasets according to country
first_country_data = data[(data.Country == first_country)]
second_country_data = data[(data.Country == second_country)]
first_country_data_attribute = list(first_country_data[selected_attribute])
second_country_data_attribute = list(second_country_data[selected_attribute])
years = list(first_country_data["Year"])
# output to static HTML file
output_file("gapminder.html")
# create a new plot
p = figure(title="Country Data Analysis", x_axis_label='Years',
width=1280, height=720)
p.line(years, first_country_data_attribute, legend=first_country,
line_color="blue", line_width=3)
p.line(years, second_country_data_attribute, legend=second_country,
line_color="green", line_width=3)
return p
@app.route('/gapminder', methods=['GET', 'POST'])
def gapminder_plot():
first_country = "China"
second_country = "Singapore"
selected_attribute = "income"
if request.method == 'POST':
first_country = request.form["first_country"]
second_country = request.form["second_country"]
selected_attribute = request.form["selected_attribute"]
# Create the plot
plot = create_gapminder_figure(first_country, second_country, selected_attribute)
# Embed plot into HTML via Flask Render
script, div = components(plot)
return render_template("gapminder.html",
script=script,
div=div,
country_names=country_names,
attribute_names=attribute_names,
selected_attribute=selected_attribute,
first_country=first_country,
second_country=second_country)
# Create the iris plot
def create_iris_figure(current_feature_name, bins):
p = Histogram(iris_df, current_feature_name,
title=current_feature_name, color='Species',
bins=bins, legend='top_right', width=600, height=400)
# Set the x axis label
p.xaxis.axis_label = current_feature_name
# Set the y axis label
p.yaxis.axis_label = 'Count'
return p
@app.route('/iris', methods=['GET', 'POST'])
def iris_plot():
# Determine the selected feature
current_feature_name = request.args.get("feature_name")
if current_feature_name == None:
current_feature_name = "Sepal Length"
# Create the plot
plot = create_iris_figure(current_feature_name, 10)
script, div = components(plot)
return render_template("iris.html",
script=script,
div=div,
feature_names=feature_names,
current_feature_name=current_feature_name)
# With debug=True, Flask server will auto-reload
# when there are code changes
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
15,469 | c4042d835cf2257ca1835c7471940370a7c8e2f9 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv("linear.csv")
x=data["metrekare"]
y=data["fiyat"]
x=np.array(x)
y=np.array(y)
#plt.scatter(x,y)
#plt.show()
m,b=np.polyfit(x,y,1)
print("en uygun eğim" ,m)
print("en uygın b değeri",b)
uzunluk=np.arange(200)
plt.scatter(x,y)
plt.plot(m*uzunluk+b)
plt.show()
z=int(input("Kaç metrekare Tahmin etmek istersiniz?"))
print("Tahmininiz:{}".format(z))
tahmin=m*z+b
plt.scatter(x,y)
plt.plot(m*uzunluk+b)
plt.scatter(z,tahmin,c="red",marker="v")
plt.show()
|
15,470 | 664c698a04b6046666f0171b26b5aadff343d550 | from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Ingredient(models.Model):
name = models.TextField(verbose_name='ingredient_name')
units = models.TextField(verbose_name='units')
def __str__(self):
return self.name
class Meta:
constraints = [
models.UniqueConstraint(
fields=[
'name',
'units'],
name='unique ingredient'),
]
class Tag(models.Model):
name = models.CharField(max_length=255, verbose_name='tagname')
color = models.CharField(max_length=100, blank=True,
verbose_name='tagcolor', default='')
def __str__(self):
return self.name
class Recipe(models.Model):
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='recipes',
verbose_name='author')
title = models.CharField(max_length=64, verbose_name='title')
image = models.ImageField(blank=True, null=False, verbose_name='image')
description = models.TextField(verbose_name='description')
ingredients = models.ManyToManyField(
Ingredient,
through='IngredientRecipe',
verbose_name='ingredientrecipe')
tags = models.ManyToManyField(
Tag, related_name='recipes', verbose_name='tags')
cooktime = models.PositiveIntegerField(verbose_name='cooktime')
slug = models.SlugField(verbose_name='slug')
pub_date = models.DateTimeField(
auto_now_add=True,
db_index=True,
verbose_name='date'
)
def __str__(self):
return self.title
class IngredientRecipe(models.Model):
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
related_name='ingredients',
verbose_name='ingredient')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='ingredient_recipe',
verbose_name='recipe')
value = models.PositiveIntegerField(verbose_name='value', null=True)
class Follow(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='follower',
verbose_name='follower')
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='following',
verbose_name='following')
class Meta:
constraints = [
models.UniqueConstraint(
fields=[
'user',
'author'],
name='unique follow'),
]
class Favorite(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='my_user',
verbose_name='user')
recipe = models.ForeignKey(Recipe, blank=True,
on_delete=models.CASCADE,
related_name='favorite_recipes',
default='',
verbose_name='favorites')
class Meta:
constraints = [
models.UniqueConstraint(
fields=[
'user',
'recipe'],
name='unique favorite'),
]
class Cart(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='user_purchases',
verbose_name='user')
recipe = models.ForeignKey(
Recipe, blank=True,
on_delete=models.CASCADE,
related_name='listed_recipes',
default='',
verbose_name='listed_recipes')
class Meta:
constraints = [
models.UniqueConstraint(
fields=[
'user',
'recipe'],
name='unique cart'),
]
|
15,471 | 5a6c14c4cefbcb00ad403c8d4cb445680f784c1a | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse('Hello world!')
def hello(request):
return render(request, 'hello.html', {
'test': 'aa',
}) |
15,472 | 92d6756b1946940fd08ab49577f4701d965f9e94 | """
starter.py
All text enclosed in triple quotes (regardless of line breaks inside) will turn into a comment.
Comments will be ignored when the code, which means we can use this to document what our code does.
"""
# using a hashtag is another way to comment. you need another hashtag if you are going to use multiple lines.
# Remember those packages we installed into the conda environment? This is how we import and use them.
import numpy as np # here, the syntax is import <package name> with an optional mechanism to rename the package. (as np)
def main():
# this is something that will appear pretty much in every python script. The explanation is a little bit complicated.
# Basically, each file of our code can either be directly executed in command line (by using python <file name>)
# or we can reference the contents of the file in a different file (to organize our code better)
# The following line checks to see if we are currently directly executing this file.
if __name__ == "__main__":
# this stuff will only run if we are executing this script.
# We can print things to the command line, which will show up when we run the file.
print("Hello! My name is Alex")
# we can directly define variables and assign them values:
a = 1
b = 2.0
# we can print out variables:
print(a)
print(b)
# we can do mathematical operations:
d = a + b
# note that we are putting d on the left hand.
# you can think of the equals sign as saying "d receives the value of a + b"
print(d)
# We can also define a variable to be some text. This is called a String, bc it's a string of characters.
c = "Hi! Nice to meet you."
# It can be printed.
print(c)
# This is nice, but it also makes sense to have things that are not just single numbers or strings.
# We can define data structures like Lists. This is done by using brackets.
empty_list = []
full_list = ["apple", "pear", "COVID"]
print(empty_list)
print(full_list)
# Now, we're going to run a function
main() |
15,473 | 8a7899012bd32a7fd13f78da0232c3309e127687 | import csv
import utility
def convert_to_float(string_vector):
vector = []
for item in string_vector:
vector.append(float(item))
return vector
def get_features_list(csv_file):
data_set = []
with open(csv_file) as f:
data_set_count = sum(1 for line in f) - 1
with open(csv_file, 'r') as csvFile:
reader = csv.reader(csvFile)
iteration = 0
print("")
print("preparing data...")
for row in reader:
if iteration > 0:
shot_metadata = [row[0], row[1], row[2], 0]
shot_metadata.append(shot_metadata)
data_set.append(convert_to_float(row[3:len(row)]))
utility.print_progress_bar(iteration, data_set_count)
else:
data_set_count -= 1
iteration += 1
csvFile.close()
print("")
return data_set
def generate_average_distance_csv(features_csv, distance_output):
feature_vectors = get_features_list(features_csv)
feature_vector_count = len(feature_vectors)
print "generating distance csv..."
distance_count = 0
distance_sum = 0
iteration = 1
for i in range(feature_vector_count):
if i + 1 > feature_vector_count:
break
for j in range(i + 1, feature_vector_count):
v1 = feature_vectors[i]
v2 = feature_vectors[j]
distance_sum += utility.calculate_distance(v1, v2)
distance_count += 1
utility.print_progress_bar(iteration, feature_vector_count)
iteration += 1
with open(distance_output, 'wb') as f:
the_writer = csv.writer(f)
headers = [
"file",
"average_distance"
]
the_writer.writerow(headers)
vector = [features_csv, float(distance_sum) / float(distance_count)]
the_writer.writerow(vector)
f.close()
def main():
files = [
"./input/complete_video_features.csv",
"./input/normalized_complete_video_features.csv",
"./input/video_features.csv",
"./input/normalized_video_features.csv",
]
for f in files:
output = f.replace(".csv", "") + "_distance.csv"
generate_average_distance_csv(f, output)
main()
|
15,474 | 5c52a1c8752c66c349175566d0051d1508c6ad46 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 2021
@author: George Yiasemis
"""
import numpy as np
# The Agent class allows the agent to interact with the environment.
class Agent():
# The class initialisation function.
def __init__(self, environment, gamma=0.9, reward_fun=None):
# Set the agent's environment.
self.environment = environment
# Set gamma for the Bellman Eq.
self.gamma = gamma
# Create the agent's current state
self.state = None
# Set reward function
if reward_fun == None:
self.reward_fun = reward_fun_a(a=1)
else:
self.reward_fun = reward_fun
# Create the agent's total reward for the current episode.
self.total_reward = None
# Reset the agent.
self.reset()
# Function to reset the environment, and set the agent to its initial state. This should be done at the start of every episode.
def reset(self):
# Reset the environment for the start of the new episode, and set the agent's state to the initial state as defined by the environment.
self.state = self.environment.reset()
# Set the agent's total reward for this episode to zero.
self.total_reward = 0.0
# Function to make the agent take one step in the environment.
def step(self, discrete_action):# epsilon=None, greedy=False):
assert discrete_action in range(self.environment.num_actions)
# Convert the discrete action into a continuous action.
continuous_action = self._discrete_action_to_continuous(discrete_action)
# Take one step in the environment, using this continuous action, based on the agent's current state. This returns the next state, and the new distance to the goal from this new state. It also draws the environment, if display=True was set when creating the environment object..
next_state, self.distance_to_goal = self.environment.step(self.state, continuous_action)
# Compute the reward for this paction.
reward = self._compute_reward(self.distance_to_goal)
# Create a transition tuple for this step.
transition = (self.state, discrete_action, reward, next_state)
# Set the agent's state for the next step, as the next state from this step
self.state = next_state
# Update the agent's reward for this episode
self.total_reward += reward
# print(self.distance_to_goal)
has_reached_goal = np.all(self.state.round(2) == self.environment.goal_state.round(2))
# Return the transition and
return transition, has_reached_goal
# Function for the agent to compute its reward. In this example, the reward is based on the agent's distance to the goal after the agent takes an action.
def _compute_reward(self, distance_to_goal):
return self.reward_fun(distance_to_goal)
# Function to convert discrete action (as used by a DQN) to a continuous action (as used by the environment).
def _discrete_action_to_continuous(self, discrete_action):
# Up, Right, Down, Left
actions = {0: np.array([0.1, 0]), 1: np.array([0, 0.1]),
2: np.array([-0.1, 0]), 3: np.array([0, -0.1])}
return actions[discrete_action].astype('float32')
def reward_fun_a(a=1):
return lambda dist: np.power(1 - dist, a)
def step_reward_fun(goal_reward=1):
return lambda dist: goal_reward if dist <= 0.05 else 0
|
15,475 | edcdd247adc037930ba1d18ec9ab9b89711bc1ca | #!/usr/bin/env python
from __future__ import print_function
from base64 import b64decode, b64encode
from hashlib import md5, sha1, sha256
from os.path import join
from subprocess import Popen, check_output
import binascii, hmac, os, platform, tarfile
import Queue, random, re, shutil, signal, sys, time
import SimpleHTTPServer, socket, threading, zipfile
import httplib
try: import wingdbstub
except: pass
#file system setup.
data_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(data_dir))
install_dir = os.path.dirname(os.path.dirname(data_dir))
sessions_dir = join(data_dir, 'sessions')
time_str = time.strftime('%d-%b-%Y-%H-%M-%S', time.gmtime())
current_session_dir = join(sessions_dir, time_str)
os.makedirs(current_session_dir)
#OS detection
m_platform = platform.system()
if m_platform == 'Windows': OS = 'mswin'
elif m_platform == 'Linux': OS = 'linux'
elif m_platform == 'Darwin': OS = 'macos'
#Globals
my_id = binascii.hexlify(os.urandom(3))
firefox_pid = selftest_pid = 0
audit_no = 0 #we may be auditing multiple URLs. This var keeps track of how many
#successful audits there were so far and is used to index html files audited.
suspended_session = None #while FF validates the certificate
#Default values from the config file. Will be overridden after configfile is parsed
global_tlsver = bytearray('\x03\x02')
global_use_gzip = True
global_use_slowaes = False
global_use_paillier = False
hcts = None #an http connection to notary
#Receive AES cleartext and send ciphertext to browser
class HandlerClass_aes(SimpleHTTPServer.SimpleHTTPRequestHandler):
#Using HTTP/1.0 instead of HTTP/1.1 is crucial, otherwise the minihttpd just keep hanging
#https://mail.python.org/pipermail/python-list/2013-April/645128.html
protocol_version = "HTTP/1.0"
def do_HEAD(self):
print ('aes_http received ' + self.path[:80] + ' request',end='\r\n')
# example HEAD string "/command?parameter=124value1¶2=123value2"
# we need to adhere to CORS and add extra Access-Control-* headers in server replies
if self.path.startswith('/ready_to_decrypt'):
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Expose-Headers", "status, response, ciphertext, key, iv")
self.send_header("response", "ready_to_decrypt")
self.send_header("status", "success")
#wait for sth to appear in the queue
ciphertext, key, iv = aes_ciphertext_queue.get()
self.send_header("ciphertext", b64encode(ciphertext))
self.send_header("key", b64encode(key))
self.send_header("iv", b64encode(iv))
global b_awaiting_cleartext
b_awaiting_cleartext = True
self.end_headers()
return
if self.path.startswith('/cleartext'):
if not b_awaiting_cleartext:
print ('OUT OF ORDER:' + self.path)
raise Exception ('received a cleartext request out of order')
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Expose-Headers", "status, response")
self.send_header("response", "cleartext")
self.send_header("status", "success")
cleartext = b64decode(self.path[len('/cleartext?b64cleartext='):])
aes_cleartext_queue.put(cleartext)
b_awaiting_cleartext = False
self.end_headers()
return
#overriding BaseHTTPServer.py's method to cap the output
def log_message(self, fmt, *args):
sys.stderr.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
(fmt%args)[:80]))
#Receive HTTP HEAD requests from FF addon
class HandleBrowserRequestsClass(SimpleHTTPServer.SimpleHTTPRequestHandler):
#HTTP/1.0 instead of HTTP/1.1 is crucial, otherwise the http server just keep hanging
#https://mail.python.org/pipermail/python-list/2013-April/645128.html
protocol_version = 'HTTP/1.0'
def respond(self, headers):
# we need to adhere to CORS and add extra Access-Control-* headers in server replies
keys = [k for k in headers]
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Expose-Headers', ','.join(keys))
for key in headers:
self.send_header(key, headers[key])
self.end_headers()
def get_certificate(self, args):
if not args.startswith('b64headers='):
self.respond({'response':'get_certificate', 'status':'wrong HEAD parameter'})
return
b64headers = args[len('b64headers='):]
headers = b64decode(b64headers)
server_name, modified_headers = parse_headers(headers)
print('Probing server to get its certificate')
try:
probe_session = shared.TLSNClientSession(server_name, tlsver=global_tlsver)
probe_sock = shared.create_sock(probe_session.server_name,probe_session.ssl_port)
probe_session.start_handshake(probe_sock)
except shared.TLSNSSLError:
shared.ssl_dump(probe_session)
raise
probe_sock.close()
certBase64 = b64encode(probe_session.server_certificate.asn1cert)
certhash = sha256(probe_session.server_certificate.asn1cert).hexdigest()
self.respond({'response':'get_certificate', 'status':'success','certBase64':certBase64})
return [server_name, modified_headers, certhash]
def start_audit(self, args):
global global_tlsver
global global_use_gzip
global global_use_slowaes
global suspended_session
arg1, arg2 = args.split('&')
if not arg1.startswith('server_modulus=') or not arg2.startswith('ciphersuite='):
self.respond({'response':'start_audit', 'status':'wrong HEAD parameter'})
return
server_modulus_hex = arg1[len('server_modulus='):]
#modulus is lowercase hexdigest
server_modulus = bytearray(server_modulus_hex.decode("hex"))
cs = arg2[len('ciphersuite='):] #used for testing, empty otherwise
server_name, modified_headers, certhash = suspended_session
tlsn_session = shared.TLSNClientSession(server_name, tlsver=global_tlsver)
tlsn_session.server_modulus = shared.ba2int(server_modulus)
tlsn_session.server_mod_length = shared.bi2ba(len(server_modulus))
print ('Preparing encrypted pre-master secret')
prepare_pms(tlsn_session)
for i in range(10):
try:
print ('Performing handshake with server')
tls_sock = shared.create_sock(tlsn_session.server_name,tlsn_session.ssl_port)
tlsn_session.start_handshake(tls_sock)
retval = negotiate_crippled_secrets(tlsn_session, tls_sock)
if not retval == 'success':
raise shared.TLSNSSLError('Failed to negotiate secrets: '+retval)
#before sending any data to server compare this connection's cert to the
#one which FF already validated earlier
if sha256(tlsn_session.server_certificate.asn1cert).hexdigest() != certhash:
raise Exception('Certificate mismatch')
print ('Getting data from server')
response = make_tlsn_request(modified_headers,tlsn_session,tls_sock)
#prefix response with number of to-be-ignored records,
#note: more than 256 unexpected records will cause a failure of audit. Just as well!
response = shared.bi2ba(tlsn_session.unexpected_server_app_data_count,fixed=1) + response
break
except shared.TLSNSSLError:
shared.ssl_dump(tlsn_session)
raise
except Exception as e:
print ('Exception caught while getting data from server, retrying...', e)
if i == 9:
raise Exception('Audit failed')
continue
global audit_no
audit_no += 1 #we want to increase only after server responded with data
sf = str(audit_no)
commit_hash, pms2, signature = commit_session(tlsn_session, response,sf)
with open(join(current_session_dir,'sigfile'+sf),'wb') as f:
f.write(signature)
with open(join(current_session_dir,'commit_hash_pms2_servermod'+sf),'wb') as f:
f.write(commit_hash+pms2+shared.bi2ba(tlsn_session.server_modulus))
#create temporary file containing hash of message (called the 'digest')
with open(join(current_session_dir,'commit_digest'+sf),'wb') as f:
f.write(sha256(commit_hash+pms2+shared.bi2ba(tlsn_session.server_modulus)).digest())
print ('Verifying against notary server pubkey...')
if not shared.verify_data(join(current_session_dir,'commit_digest'+sf),
join(current_session_dir,'sigfile'+sf), join(install_dir,'public.pem')):
raise Exception("Audit FAILED, notary signature invalid.")
print ('Verified OK')
#another option would be a fixed binary format for a *.audit file:
#cs|cr|sr|pms1|pms2|n|e|domain|tlsver|origtlsver|response|signature|notary_pubkey
audit_data = 'tlsnotary audit file\n\n'
audit_data += '\x00\x01' #2 version bytes
audit_data += shared.bi2ba(tlsn_session.chosen_cipher_suite,fixed=2) # 2 bytes
audit_data += tlsn_session.client_random + tlsn_session.server_random # 64 bytes
audit_data += tlsn_session.pms1 + pms2 #48 bytes
audit_data += tlsn_session.server_mod_length #2 bytes
audit_data += shared.bi2ba(tlsn_session.server_modulus) #256 bytes usually
audit_data += shared.bi2ba(tlsn_session.server_exponent, fixed=8) #8 bytes
audit_data += shared.bi2ba(len(tlsn_session.server_name),fixed=2)
audit_data += tlsn_session.server_name #variable; around 10 bytes
audit_data += tlsn_session.tlsver #2 bytes
audit_data += tlsn_session.initial_tlsver #2 bytes
audit_data += shared.bi2ba(len(response),fixed=8) #8 bytes
audit_data += response #note that it includes unexpected pre-request app data, 10s of kB
IV = tlsn_session.IV_after_finished if tlsn_session.chosen_cipher_suite in [47,53] \
else shared.rc4_state_to_bytearray(tlsn_session.IV_after_finished)
audit_data += shared.bi2ba(len(IV),fixed=2) #2 bytes
audit_data += IV #16 bytes or 258 bytes for RC4.
audit_data += signature #512 bytes RSA PKCS 1 v1.5 padding
audit_data += commit_hash #32 bytes sha256 hash
with open(join(install_dir,"public.pem"),"rb") as f:
audit_data += f.read()
with open(join(current_session_dir,sf+".audit"),"wb") as f:
f.write(audit_data)
print ("\n\n AUDIT SUCCEEDED. \n ",
"You can pass the file(s) " , join(current_session_dir, "1.audit (and 2.audit etc. if they exist)"),
" to an auditor for verification.")
rv = decrypt_html(pms2, tlsn_session, sf)
if rv[0] == 'decrypt':
ciphertexts = rv[1]
ciphertext, key, iv = ciphertexts[0]
b64blob = b64encode(iv)+';'+b64encode(key)+';'+b64encode(ciphertext)
suspended_session = [tlsn_session, ciphertexts, [], 0, sf]
self.respond({'response':'start_audit', 'status':'success',
'next_action':'decrypt', 'argument':b64blob})
return
#else no browser decryption necessary
html_paths = b64encode(rv[1])
self.respond({'response':'start_audit', 'status':'success', 'next_action':'audit_finished', 'argument':html_paths})
def process_cleartext(self, args):
global suspended_session
tlsn_session, ciphertexts, plaintexts, index, sf = suspended_session
raw_cleartext = b64decode(args[len('b64cleartext='):])
#crypto-js removes pkcs7 padding. There is still an extra byte which we remove it manually
plaintexts.append(raw_cleartext[:-1])
if (index+1) < len(ciphertexts):
index = index + 1
ciphertext, key, iv = ciphertexts[index]
b64blob = b64encode(iv)+';'+b64encode(key)+';'+b64encode(ciphertext)
suspended_session = [tlsn_session, ciphertexts, plaintexts, index, sf]
self.respond({'response':'cleartext', 'next_action':'decrypt',
'argument':b64blob, 'status':'success'})
return
#else this was the last decrypted ciphertext
plaintext = tlsn_session.mac_check_plaintexts(plaintexts)
rv = decrypt_html_stage2(plaintext, tlsn_session, sf)
self.respond({'response':'cleartext', 'status':'success', 'next_action':'audit_finished', 'argument':b64encode(rv[1])})
def do_HEAD(self):
request = self.path
print ('browser sent ' + request[:80] + '... request',end='\r\n')
# example HEAD string "/command?parameter=124value1¶2=123value2"
if request.startswith('/get_certificate'):
global suspended_session
suspended_session = self.get_certificate(request.split('?', 1)[1])
elif request.startswith('/start_audit'):
self.start_audit(request.split('?', 1)[1])
elif request.startswith('/cleartext'):
self.process_cleartext(request.split('?', 1)[1])
else:
self.respond({'response':'unknown command'})
#overriding BaseHTTPRequestHandler's method to cap the output
def log_message(self, fmt, *args):
sys.stderr.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
(fmt%args)[:80]))
#Because there is a 1 in ? chance that the encrypted PMS will contain zero bytes in its
#padding, we first try the encrypted PMS with a reliable site and see if it gets rejected.
#TODO the probability seems to have increased too much w.r.t. random padding, investigate
def prepare_pms(tlsn_session):
n = shared.bi2ba(tlsn_session.server_modulus)
rs_choice = random.choice(shared.reliable_sites.keys())
for i in range(10): #keep trying until reliable site check succeeds
try:
pms_session = shared.TLSNClientSession(rs_choice,shared.reliable_sites[rs_choice][0], ccs=53, tlsver=global_tlsver)
if not pms_session:
raise Exception("Client session construction failed in prepare_pms")
tls_sock = shared.create_sock(pms_session.server_name,pms_session.ssl_port)
pms_session.start_handshake(tls_sock)
reply = send_and_recv('rcr_rsr_rsname_n',
pms_session.client_random+pms_session.server_random+rs_choice[:5]+n)
if reply[0] != 'success':
raise Exception ('Failed to receive a reply for rcr_rsr_rsname_n:')
if not reply[1].startswith('rrsapms_rhmac_rsapms'):
raise Exception ('bad reply. Expected rrsapms_rhmac_rsapms:')
reply_data = reply[1][len('rrsapms_rhmac_rsapms:'):]
rrsapms2 = reply_data[:256]
pms_session.p_auditor = reply_data[256:304]
rsapms2 = reply_data[304:]
response = pms_session.complete_handshake(tls_sock,rrsapms2)
tls_sock.close()
if not response:
print ("PMS trial failed")
continue
#judge success/fail based on whether a properly encoded
#Change Cipher Spec record is returned by the server (we could
#also check the server finished, but it isn't necessary)
if not response.count(shared.TLSRecord(shared.chcis,f='\x01', tlsver=global_tlsver).serialized):
print ("PMS trial failed, retrying. (",binascii.hexlify(response),")")
continue
tlsn_session.auditee_secret = pms_session.auditee_secret
tlsn_session.auditee_padding_secret = pms_session.auditee_padding_secret
tlsn_session.enc_second_half_pms = shared.ba2int(rsapms2)
tlsn_session.set_enc_first_half_pms()
tlsn_session.set_encrypted_pms()
return
except shared.TLSNSSLError:
shared.ssl_dump(pms_session,fn='preparepms_ssldump')
shared.ssl_dump(tlsn_session)
raise
#except Exception,e:
# print ('Exception caught in prepare_pms, retrying...', e)
# continue
raise Exception ('Could not prepare PMS with ', rs_choice, ' after 10 tries. Please '+\
'double check that you are using a valid public key modulus for this site; '+\
'it may have expired.')
def send_and_recv (hdr, dat,timeout=5):
rqstring = '/'+ my_id + hdr+':' + b64encode(dat)
hcts.request("HEAD", rqstring)
response = hcts.getresponse()
received_hdr, received_dat = (response.getheader('response'),response.getheader('data'))
if 'busy' in received_hdr:
raise Exception("Notary server is busy, quitting. Try again later.")
return ('success', received_hdr+b64decode(received_dat))
#reconstruct correct http headers
#for passing to TLSNotary custom ssl session
def parse_headers(headers):
header_lines = headers.split('\r\n') #no new line issues; it was constructed like that
server = header_lines[1].split(':')[1].strip()
if not global_use_gzip:
modified_headers = '\r\n'.join([x for x in header_lines if 'gzip' not in x])
else:
modified_headers = '\r\n'.join(header_lines)
return (server,modified_headers)
def negotiate_crippled_secrets(tlsn_session, tls_sock):
'''Negotiate with auditor in order to create valid session keys
(except server mac is garbage as auditor withholds it)'''
assert tlsn_session.handshake_hash_md5
assert tlsn_session.handshake_hash_sha
tlsn_session.set_auditee_secret()
cs_cr_sr_hmacms_verifymd5sha = chr(tlsn_session.chosen_cipher_suite) + tlsn_session.client_random + \
tlsn_session.server_random + tlsn_session.p_auditee[:24] + tlsn_session.handshake_hash_md5 + \
tlsn_session.handshake_hash_sha
reply = send_and_recv('cs_cr_sr_hmacms_verifymd5sha',cs_cr_sr_hmacms_verifymd5sha)
if reply[0] != 'success':
raise Exception ('Failed to receive a reply for cs_cr_sr_hmacms_verifymd5sha:')
if not reply[1].startswith('hmacms_hmacek_hmacverify:'):
raise Exception ('bad reply. Expected hmacms_hmacek_hmacverify but got', reply[1])
reply_data = reply[1][len('hmacms_hmacek_hmacverify:'):]
expanded_key_len = shared.tlsn_cipher_suites[tlsn_session.chosen_cipher_suite][-1]
if len(reply_data) != 24+expanded_key_len+12:
raise Exception('unexpected reply length in negotiate_crippled_secrets')
hmacms = reply_data[:24]
hmacek = reply_data[24:24 + expanded_key_len]
hmacverify = reply_data[24 + expanded_key_len:24 + expanded_key_len+12]
tlsn_session.set_master_secret_half(half=2,provided_p_value = hmacms)
tlsn_session.p_master_secret_auditor = hmacek
tlsn_session.do_key_expansion()
tlsn_session.send_client_finished(tls_sock,provided_p_value=hmacverify)
sha_digest2,md5_digest2 = tlsn_session.set_handshake_hashes(server=True)
reply = send_and_recv('verify_md5sha2',md5_digest2+sha_digest2)
if reply[0] != 'success':
raise Exception("Failed to receive a reply for verify_md5sha2")
if not reply[1].startswith('verify_hmac2:'):
raise Exception("bad reply. Expected verify_hmac2:")
if not tlsn_session.check_server_ccs_finished(provided_p_value = reply[1][len('verify_hmac2:'):]):
raise Exception ("Could not finish handshake with server successfully. Audit aborted")
return 'success'
def make_tlsn_request(headers,tlsn_session,tls_sock):
'''Send TLS request including http headers and receive server response.'''
try:
tlsn_session.build_request(tls_sock,headers)
response = shared.recv_socket(tls_sock) #not handshake flag means we wait on timeout
if not response:
raise Exception ("Received no response to request, cannot continue audit.")
tlsn_session.store_server_app_data_records(response)
except shared.TLSNSSLError:
shared.ssl_dump(tlsn_session)
raise
tls_sock.close()
#we return the full record set, not only the response to our request
return tlsn_session.unexpected_server_app_data_raw + response
def commit_session(tlsn_session,response,sf):
'''Commit the encrypted server response and other data to auditor'''
commit_dir = join(current_session_dir, 'commit')
if not os.path.exists(commit_dir): os.makedirs(commit_dir)
#Serialization of RC4 'IV' requires concatenating the box,x,y elements of the RC4 state tuple
IV = shared.rc4_state_to_bytearray(tlsn_session.IV_after_finished) \
if tlsn_session.chosen_cipher_suite in [4,5] else tlsn_session.IV_after_finished
stuff_to_be_committed = {'response':response,'IV':IV,
'cs':str(tlsn_session.chosen_cipher_suite),
'pms_ee':tlsn_session.pms1,'domain':tlsn_session.server_name,
'certificate.der':tlsn_session.server_certificate.asn1cert,
'origtlsver':tlsn_session.initial_tlsver, 'tlsver':tlsn_session.tlsver}
for k,v in stuff_to_be_committed.iteritems():
with open(join(commit_dir,k+sf),'wb') as f: f.write(v)
commit_hash = sha256(response).digest()
reply = send_and_recv('commit_hash',commit_hash)
#TODO: changed response from webserver
if reply[0] != 'success':
raise Exception ('Failed to receive a reply')
if not reply[1].startswith('pms2:'):
raise Exception ('bad reply. Expected pms2')
return (commit_hash, reply[1][len('pms2:'):len('pms2:')+24], reply[1][len('pms2:')+24:])
def decrypt_html(pms2, tlsn_session,sf):
'''Receive correct server mac key and then decrypt server response (html),
(includes authentication of response). Submit resulting html for browser
for display (optionally render by stripping http headers).'''
try:
tlsn_session.auditor_secret = pms2[:tlsn_session.n_auditor_entropy]
tlsn_session.set_auditor_secret()
tlsn_session.set_master_secret_half() #without arguments sets the whole MS
tlsn_session.do_key_expansion() #also resets encryption connection state
except shared.TLSNSSLError:
shared.ssl_dump(tlsn_session)
raise
if global_use_slowaes or not tlsn_session.chosen_cipher_suite in [47,53]:
#either using slowAES or a RC4 ciphersuite
try:
plaintext,bad_mac = tlsn_session.process_server_app_data_records()
except shared.TLSNSSLError:
shared.ssl_dump(tlsn_session)
raise
if bad_mac:
raise Exception("ERROR! Audit not valid! Plaintext is not authenticated.")
return decrypt_html_stage2(plaintext, tlsn_session, sf)
else: #AES ciphersuite and not using slowaes
try:
ciphertexts = tlsn_session.get_ciphertexts()
except:
shared.ssl_dump(tlsn_session)
raise
return ('decrypt', ciphertexts)
def decrypt_html_stage2(plaintext, tlsn_session, sf):
plaintext = shared.dechunk_http(plaintext)
if global_use_gzip:
plaintext = shared.gunzip_http(plaintext)
#write a session dump for checking even in case of success
with open(join(current_session_dir,'session_dump'+sf),'wb') as f: f.write(tlsn_session.dump())
commit_dir = join(current_session_dir, 'commit')
html_path = join(commit_dir,'html-'+sf)
with open(html_path,'wb') as f: f.write('\xef\xbb\xbf'+plaintext) #see "Byte order mark"
if not int(shared.config.get("General","prevent_render")):
html_path = join(commit_dir,'forbrowser-'+sf+'.html')
with open(html_path,'wb') as f:
f.write('\r\n\r\n'.join(plaintext.split('\r\n\r\n')[1:]))
return ('success',html_path)
#Make a local copy of firefox, find the binary, install the new profile
#and start up firefox with that profile.
def start_firefox(FF_to_backend_port, firefox_install_path):
#find the binary *before* copying; acts as sanity check
ffbinloc = {'linux':['firefox'],'mswin':['firefox.exe'],'macos':['Contents','MacOS','firefox']}
assert os.path.isfile(join(*([firefox_install_path]+ffbinloc[OS]))),\
"Firefox executable not found - invalid Firefox application directory."
local_ff_copy = join(data_dir,'Firefox.app') if OS=='macos' else join(data_dir,'firefoxcopy')
#check if FF-addon/tlsnotary@tlsnotary files were modified. If so, get a fresh
#firefoxcopy and FF-profile. This is useful for developers, otherwise
#we forget to do it manually and end up chasing wild geese
filehashes = []
for root, dirs, files in os.walk(join(data_dir, 'FF-addon', 'tlsnotary@tlsnotary')):
for onefile in files:
with open(join(root, onefile), 'rb') as f: filehashes.append(md5(f.read()).hexdigest())
#sort hashes and get the final hash
filehashes.sort()
final_hash = md5(''.join(filehashes)).hexdigest()
hash_path = join(data_dir, 'ffaddon.md5')
if not os.path.exists(hash_path):
with open(hash_path, 'wb') as f: f.write(final_hash)
else:
with open(hash_path, 'rb') as f: saved_hash = f.read()
if saved_hash != final_hash:
print("FF-addon directory changed since last invocation. Creating a new Firefox profile directory...")
try:
shutil.rmtree(join(data_dir, 'FF-profile'))
except:
pass
with open(hash_path, 'wb') as f: f.write(final_hash)
firefox_exepath = join(*([firefox_install_path]+ffbinloc[OS]))
logs_dir = join(data_dir, 'logs')
if not os.path.isdir(logs_dir): os.makedirs(logs_dir)
with open(join(logs_dir, 'firefox.stdout'), 'w') as f: pass
with open(join(logs_dir, 'firefox.stderr'), 'w') as f: pass
ffprof_dir = join(data_dir, 'FF-profile')
if not os.path.exists(ffprof_dir): os.makedirs(ffprof_dir)
shutil.copyfile(join(data_dir,'prefs.js'),join(ffprof_dir,'prefs.js'))
shutil.copyfile(join(data_dir,'localstore.rdf'),join(ffprof_dir,'localstore.rdf'))
shutil.copyfile(join(data_dir,'extensions.json'),join(ffprof_dir,'extensions.json'))
extension_path = join(ffprof_dir, 'extensions', 'tlsnotary@tlsnotary')
if not os.path.exists(extension_path):
shutil.copytree(join(data_dir, 'FF-addon', 'tlsnotary@tlsnotary'),extension_path)
#Disable addon compatibility check on startup (note: disabled for MacOS)
if OS != 'macos':
try:
application_ini_data = None
with open(join(firefox_install_path, 'application.ini'), 'r') as f: application_ini_data = f.read()
version_pos = application_ini_data.find('Version=')+len('Version=')
#version string can be 34.0 or 34.0.5
version_raw = application_ini_data[version_pos:version_pos+8]
version = ''.join(char for char in version_raw if char in '1234567890.')
with open(join(ffprof_dir, 'prefs.js'), 'a') as f:
f.write('user_pref("extensions.lastAppVersion", "' + version + '"); ')
except:
print ('Failed to disable add-on compatibility check')
os.putenv('FF_to_backend_port', str(FF_to_backend_port))
os.putenv('FF_first_window', 'true') #prevents addon confusion when websites open multiple FF windows
if not global_use_slowaes:
os.putenv('TLSNOTARY_USING_BROWSER_AES_DECRYPTION', 'true')
print ('Starting a new instance of Firefox with tlsnotary profile',end='\r\n')
try: ff_proc = Popen([firefox_exepath,'-no-remote', '-profile', ffprof_dir],
stdout=open(join(logs_dir, 'firefox.stdout'),'w'),
stderr=open(join(logs_dir, 'firefox.stderr'), 'w'))
except Exception,e: return ('Error starting Firefox: %s' %e,)
return ('success', ff_proc)
#HTTP server to talk with Firefox addon
def http_server(parentthread):
print ('Starting http server to communicate with Firefox addon')
try:
httpd = shared.StoppableHttpServer(('127.0.0.1', 0), HandleBrowserRequestsClass)
except Exception, e:
parentthread.retval = ('failure',)
return
#Caller checks thread.retval for httpd status
parentthread.retval = ('success', httpd.server_port)
print ('Serving HTTP on port ', str(httpd.server_port), end='\r\n')
httpd.serve_forever()
#use miniHTTP server to receive commands from Firefox addon and respond to them
def aes_decryption_thread(parentthread):
print ('Starting AES decryption server')
try:
aes_httpd = shared.StoppableHttpServer(('127.0.0.1', 0), HandlerClass_aes)
except Exception, e:
parentthread.retval = ('failure',)
return
#Caller checks thread.retval for httpd status
parentthread.retval = ('success', aes_httpd.server_port)
print ('Receiving decrypted AES on port ', str(aes_httpd.server_port), end='\r\n')
aes_httpd.serve_forever()
#cleanup
def quit_clean(sig=0, frame=0):
if firefox_pid != 0:
try: os.kill(firefox_pid, signal.SIGTERM)
except: pass #firefox not runnng
if selftest_pid != 0:
try: os.kill(selftest_pid, signal.SIGTERM)
except: pass #selftest not runnng
exit(1)
#unpack and check validity of Python modules
def first_run_check(modname,modhash):
if not modhash: return
mod_dir = join(data_dir, 'python', modname)
if not os.path.exists(mod_dir):
print ('Extracting '+modname + '.tar.gz...')
with open(join(data_dir, 'python', modname+'.tar.gz'), 'rb') as f: tarfile_data = f.read()
if md5(tarfile_data).hexdigest() != modhash:
raise Exception ('Wrong hash')
os.chdir(join(data_dir, 'python'))
tar = tarfile.open(join(data_dir, 'python', modname+'.tar.gz'), 'r:gz')
tar.extractall()
tar.close()
if __name__ == "__main__":
if ('test' in sys.argv): testing = True
if ('randomtest' in sys.argv):
testing = True
randomtest = True
if ('mode=addon' in sys.argv):
mode='addon'
else:
mode='normal'
#for md5 hash, see https://pypi.python.org/pypi/<module name>/<module version>
modules_to_load = {'rsa-3.1.4':'b6b1c80e1931d4eba8538fd5d4de1355',\
'pyasn1-0.1.7':'2cbd80fcd4c7b1c82180d3d76fee18c8',\
'slowaes':'','requests-2.3.0':'7449ffdc8ec9ac37bbcd286003c80f00'}
for x,h in modules_to_load.iteritems():
first_run_check(x,h)
sys.path.append(join(data_dir, 'python', x))
import rsa
import pyasn1
import requests
from pyasn1.type import univ
from pyasn1.codec.der import encoder, decoder
from slowaes import AESModeOfOperation
import shared
shared.load_program_config()
shared.import_reliable_sites(join(install_dir,'src','shared'))
hcts = httplib.HTTPConnection(shared.config.get("Notary","server_name")\
+":"+shared.config.get("Notary","server_port"))
#override default config values
if int(shared.config.get("General","tls_11")) == 0:
global_tlsver = bytearray('\x03\x01')
if int(shared.config.get("General","decrypt_with_slowaes")) == 1:
global_use_slowaes = True
if int(shared.config.get("General","gzip_disabled")) == 1:
global_use_gzip = False
if int(shared.config.get("General","use_paillier_scheme")) == 1:
global_use_paillier = True
firefox_install_path = None
if len(sys.argv) > 1: firefox_install_path = sys.argv[1]
if firefox_install_path in ('test', 'randomtest'): firefox_install_path = None
if mode == 'normal':
if not firefox_install_path:
if OS=='linux':
if not os.path.exists('/usr/lib/firefox'):
raise Exception ("Could not set firefox install path")
firefox_install_path = '/usr/lib/firefox'
elif OS=='mswin':
bFound = False
prog64 = os.getenv('ProgramW6432')
prog32 = os.getenv('ProgramFiles(x86)')
progxp = os.getenv('ProgramFiles')
if prog64:
if os.path.exists(join(prog64,'Mozilla Firefox')):
firefox_install_path = join(prog64,'Mozilla Firefox')
bFound = True
if not bFound and prog32:
if os.path.exists(join(prog32,'Mozilla Firefox')):
firefox_install_path = join(prog32,'Mozilla Firefox')
bFound = True
if not bFound and progxp:
if os.path.exists(join(progxp,'Mozilla Firefox')):
firefox_install_path = join(progxp,'Mozilla Firefox')
bFound = True
if not bFound:
raise Exception('Could not set firefox install path')
elif OS=='macos':
if not os.path.exists(join("/","Applications","Firefox.app")):
raise Exception('''Could not set firefox install path.
Please make sure Firefox is in your Applications folder''')
firefox_install_path = join("/","Applications","Firefox.app")
else:
raise Exception("Unrecognised operating system.")
print ("Firefox install path is: ",firefox_install_path)
if not os.path.exists(firefox_install_path):
raise Exception ("Could not find Firefox installation")
thread = shared.ThreadWithRetval(target= http_server)
thread.daemon = True
thread.start()
#wait for minihttpd thread to indicate its status and FF_to_backend_port
b_was_started = False
for i in range(10):
time.sleep(1)
if thread.retval == '': continue
#else
if thread.retval[0] != 'success':
raise Exception (
'Failed to start minihttpd server. Please investigate')
#else
b_was_started = True
break
if b_was_started == False:
raise Exception ('minihttpd failed to start in 10 secs. Please investigate')
FF_to_backend_port = thread.retval[1]
if mode == 'addon':
with open (join(data_dir, 'ports'), 'w') as f:
f.write(str(FF_to_backend_port))
elif mode == 'normal':
ff_retval = start_firefox(FF_to_backend_port, firefox_install_path)
if ff_retval[0] != 'success':
raise Exception (
'Error while starting Firefox: '+ ff_retval[0])
ff_proc = ff_retval[1]
firefox_pid = ff_proc.pid
signal.signal(signal.SIGTERM, quit_clean)
try:
while True:
time.sleep(1)
if mode == 'normal':
if ff_proc.poll() != None: quit_clean() #FF was closed
except KeyboardInterrupt: quit_clean() |
15,476 | a362c0ad1ca72b9625a07b6368fd389de807bbb8 | class PluginExercise:
def __init__(self, name, description, content, test_case_list, solution,language):
self.name = name
self.description = description
self.content = content
self.test_case_list = test_case_list
self.solution = solution
self.language = language |
15,477 | 9c6ae44ada190712300a458d5852a64f75a3b4b8 | import pretty_midi
import smidi
import pickle
import os
import re
import numpy as np
from hyperps import n_features_in, n_features_out
DATA_DIR = 'data'
PICKLE_DIR = os.path.join(DATA_DIR, 'pickles')
DUMP_SIZE = 600
def generate(dirs=None, pickle_name='data', accepted_keys=None):
'''
Loads smidis of dirs
Input:
dirs is a list of the directories in data/ to use
None uses all of the directories
pickle_name is the name of the file which to save the generated data
accepted_keys are the pretty_midi.KeySignature values to accept
seeting accepted_keys will transpose every song to C maj/ A min
'''
data = np.zeros((0, n_features_in))
files = get_files(dirs)
midi_file = re.compile('.*\.mid$')
for filename in files:
if midi_file.fullmatch(filename) is None:
continue
try:
pm = pretty_midi.PrettyMIDI(filename)
except:
continue
if accepted_keys is not None:
keys = pm.key_signature_changes
# Check key
if len(keys) == 0 or len(keys) > 1 or keys[0].key_number not in accepted_keys:
continue
# Transpose
key = keys[0].key_number
if key >= 12: # minor
key = (key+3)%12 # convert to major
transpose = get_transpose(key)
for instrument in pm.instruments:
if instrument.is_drum:
continue
for note in instrument.notes:
note.pitch += transpose
try:
data = np.concatenate((data, smidi.midi2smidi(pm)))
except smidi.TimeSignatureException:
print('Warning: failed to add {} because of time signature'.format(filename))
continue
except Exception:
print('Warning: failed to add {} for unknown reasons'.format(filename))
continue
pickle_file = pickle_filename(pickle_name)
pickle.dump(data, open(pickle_file, 'wb'))
def load(pickle_name='data'):
pickle_file = pickle_filename(pickle_name)
return pickle.load(open(pickle_file, 'rb'))
def shuffled_batches(data, L, n):
'''
Creates a generator object which returns batches of sequences
Input:
data is the loaded data
L is the length of each sequence
n is the sequence count returned by each yield
'''
N = len(data) - 1 - L # Number of sequences
if N < n:
print('Warning: not enough data supplied to create sequences')
return None
# Generate random indices from which to gather data
pool = np.arange(N)
np.random.shuffle(pool)
pool = pool[:N-N%n].reshape(N//n, n)
# Return groups of sequences
for drops in pool:
yield get_sequences(data, L, drops)
def get_sequences(data, L, ns):
'''
Returns a sample of sequences from a large loaded dataset
Inputs:
data is the loaded data
L is the length of each sequence
ns is the list of indices from which to gather sequences
'''
x = np.zeros(( len(ns), L, n_features_in ))
y = np.zeros(( len(ns), n_features_out ))
for i, n in enumerate(ns):
x[i] = data[n:n+L]
y[i] = data[n+L, :n_features_out]
return x,y
def get_files(dirs=None):
if dirs is not None:
dirs = [os.path.join(DATA_DIR, dir) for dir in dirs]
for root, subdirs, files in os.walk(DATA_DIR):
if dirs is not None and root not in dirs:
continue
for file in files:
filename = os.path.join(root, file)
yield filename
def get_transpose(key):
transpose = key%6
transpose *= -1
if key >= 6:
transpose = 6+transpose
return key
def pickle_filename(pickle_name):
return '{}.pickle'.format(os.path.join(PICKLE_DIR, pickle_name))
|
15,478 | 47d214429dd174598b7b838902415b40d4bd4966 | def druglike_molecules(self, idx):
"""
Restrictions are in order of [hbond_donors, hbonds_acceptors, has_disorder, has_3d_structure, is_organometallic, is_polymeric, is_organic, r_factor, molecular_weight]
"""
restrictions = {"hbond_donors":[None, 5], "hbond_acceptors":[None, 10], "has_disorder":[0, 0], "has_3d_structure":[1, 1], "is_organometallic":[0, 0], "is_polymeric":[0, 0], "is_organic":[1, 1], "r_factor":[0, 15], "molecular_weight":[150.0, 600.0]} #"temperature":[150, 310]}
_, _, df = db.get_success_data(idx, dataframe=True)
primary_key = db.primary_key_for(idx)
for rest in restrictions:
df = table(df).apply_rest(rest, restrictions[rest])
if self.ligand is None:
self.ligand = df
else:
self.ligand = self.ligand & df
folder_name = db.get_folder(idx)
self.ligand_folder = '{}_{}'.format(idx, folder_name)
self.data_type = 'csd'
return self
def nondruglike_molecules(self, idx):
"""
Restrictions are in order of [hbond_donors, hbonds_acceptors, has_disorder, has_3d_structure, is_organometallic, is_polymeric, is_organic, r_factor, molecular_weight]
"""
restrictions = {"hbond_donors":[None, 5], "hbond_acceptors":[None, 10], "has_disorder":[0, 0], "has_3d_structure":[1, 1], "is_organometallic":[0, 0], "is_polymeric":[0, 0], "is_organic":[1, 1], "r_factor":[0, 15], "molecular_weight":[150.0, 600.0]} #"temperature":[150, 310]}
_, _, df_all = db.get_success_data(idx, dataframe=True)
primary_key = db.primary_key_for(idx)
df_drugs = df_all.copy()
for rest in restrictions:
df_drugs = table(df_drugs).apply_rest(rest, restrictions[rest])
df_merge = df_all.merge(df_drugs, indicator=True, how='outer')
df_nondrug = df_merge[df_merge['_merge'] == 'left_only']
if self.ligand is None:
self.ligand = df_nondrug
else:
self.ligand = self.ligand & df_nondrug
folder_name = db.get_folder(idx)
self.ligand_folder = '{}_{}'.format(idx, folder_name)
self.data_type = 'csd'
return self
|
15,479 | 6d4b0d555831443382d322c28a40aae0c7bb2f82 | #-*- coding:utf-8 -*-
class Pen:
def write(self):
print "펜으로 필기를 해요"
print "myutil/Util.py 모둘에 실행순서가 들어왔습니다."
print 'Util __name__:',__name__
if __name__=='__main__':
print "Util.py 모듈이 main으로 실행 되었습니다." |
15,480 | 1f2efd980402681e3d433cd3ba6f09297df2da28 | """
Rotting Oranges
Q. In a given grid, each cell can have one of three values:
the value 0 representing an empty cell;
the value 1 representing a fresh orange;
the value 2 representing a rotten orange.
Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.
Return the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.
Example 1:
Input: [[2,1,1],[1,1,0],[0,1,1]]
Output: 4
Example 2:
Input: [[2,1,1],[0,1,1],[1,0,1]]
Output: -1
Explanation: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.
Example 3:
Input: [[0,2]]
Output: 0
Explanation: Since there are already no fresh oranges at minute 0, the answer is just 0.
Note:
1 <= grid.length <= 10
1 <= grid[0].length <= 10
grid[i][j] is only 0, 1, or 2.
"""
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
if not grid:
return 0
m, n = len(grid), len(grid[0])
queue = []
total = 0
for i in range(m):
for j in range(n):
if grid[i][j] > 0:
total += 1
if grid[i][j] == 2:
queue.append((i, j, 0))
cnt = len(queue)
if total == 0:
return 0
if cnt == 0:
return -1
k = 0
while queue:
i, j, k = queue.pop(0)
for x, y in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
a, b = i + x, j + y
if a >= 0 and a < m and b >= 0 and b < n:
if grid[a][b] == 1:
grid[a][b] = 2
queue.append((a, b, k + 1))
cnt += 1
if cnt < total:
return -1
return k |
15,481 | 1cb3eaf190e51ba6b7e5280566434f09c85bd101 | import math
def snt(n):
if n<2: return False
for i in range(2, int(math.sqrt(n))+1):
if n%i==0:
return False
return True
t=int(input())
for i in range(t):
s=input()
check=True
for i in range(0, len(s)):
if snt(i)==True and snt(int(s[i]))!=True:
check=False
break
if snt(i)!=True and snt(int(s[i]))==True:
check=False
break
if check==True: print("YES")
else: print("NO") |
15,482 | 3b918b3af84ad9383b600c8d9e709757e243e0df | from django import template
import string
from ..api import request_song
register = template.Library()
@register.simple_tag
def get_slug(value):
temp = ''
for i in value:
if i == '0' or i== '1' or i == '2' or i== '3' or i == '4' or i == '5' or i == '6' or i == '7' or i == '8' or i == '9':
temp += i
return temp
@register.simple_tag
def get_liked_songs(list):
my_list = str(list).split(",")
final_list = []
for i in my_list:
final_list.append(request_song(i))
return final_list
|
15,483 | 0e69df9a1426639abb416463f7532979ae3c9468 | def main():
x, y, z = map(int, input().split())
ans = 0
while x > 0 and y > 0:
x -= 1
y -= 1
ans += 1
w = max(x, y)
while w > 0 and z > 0:
w -= 1
z -= 1
ans += 1
while z > 1:
z -= 2
ans += 1
print(ans)
if __name__ == "__main__":
main() |
15,484 | 9d2a61454a420960efce2556ca75635aa8c83560 | """Automated breaking of the Atbash Cipher."""
from lantern.modules import simplesubstitution
from typing import Iterable
# TODO: Consider using an affine cipher for transformation in future
KEY = 'ZYXWVUTSRQPONMLKJIHGFEDCBA'
def decrypt(ciphertext: str) -> Iterable:
"""Decrypt Atbash enciphered ``ciphertext``.
Examples:
>>> ''.join(decrypt("SVOOL"))
HELLO
Args:
ciphertext (str): English string to decrypt
Returns:
Decrypted text
"""
return simplesubstitution.decrypt(KEY, ciphertext)
def encrypt(plaintext: str) -> Iterable:
"""Encrypt ``plaintext`` using the Atbash cipher.
Examples:
>>> ''.join(encrypt("HELLO"))
SVOOL
Args:
plaintext (str): English string to encrypt
Returns:
Encrypted text
"""
return simplesubstitution.encrypt(KEY, plaintext)
|
15,485 | e6ca3b9881f0f2b8b14e8246e6587bbdbafdd639 | # Input
import speech_recognition
from gtts import gTTS
import os
robot_ear = speech_recognition.Recognizer()
with speech_recognition.Microphone() as mic:
print("[AI]: listening...")
audio = robot_ear.listen(mic)
robot_brain = "I don't know wtf you r talking 'bout! Have a nice day :)"
try:
your_order = robot_ear.recognize_google(audio, Language = 'vi-VN')
print("[You]: ", your_order)
if "Xin chào" in your_order or "chào" in your_order:
robot_brain = "Chào em! Anh đứng đây từ chiều :)"
except:
print("[You]: ", robot_brain)
# Output
tts = gTTS(text = robot_brain, lang = 'vi')
tts.save("pcvoice.mp3")
os.system("start pcvoice.mp3")
|
15,486 | fac0d29023aff111a19c99521921b9e8e4ee04bd | #######################################################
# Everything to do with reading the grid
# This relies on a NetCDF grid file. To create it, run MITgcm just long enough to produce one grid.t*.nc file for each tile, and then glue them together using gluemnc (utils/scripts/gluemnc in the MITgcm distribution).
#######################################################
import numpy as np
import sys
from file_io import read_netcdf
from utils import fix_lon_range
from constants import fris_bounds
# Given a 3D hfac array on any grid, create the land mask.
def build_land_mask (hfac):
return np.sum(hfac, axis=0)==0
# Given a 3D hfac array on any grid, create the ice shelf mask.
def build_zice_mask (hfac):
return (np.sum(hfac, axis=0)!=0)*(hfac[0,:]==0)
# Create a mask just containing FRIS ice shelf points.
# Arguments:
# zice_mask, lon, lat: 2D arrays of the ice shelf mask, longitude, and latitude on any grid
def build_fris_mask (zice_mask, lon, lat):
fris_mask = np.zeros(zice_mask.shape, dtype='bool')
# Identify FRIS in two parts, split along the line 45W
# Each set of 4 bounds is in form [lon_min, lon_max, lat_min, lat_max]
regions = [[fris_bounds[0], -45, fris_bounds[2], -74.7], [-45, fris_bounds[1], fris_bounds[2], -77.85]]
for bounds in regions:
# Select the ice shelf points within these bounds
index = zice_mask*(lon >= bounds[0])*(lon <= bounds[1])*(lat >= bounds[2])*(lat <= bounds[3])
fris_mask[index] = True
return fris_mask
# Grid object containing lots of grid variables.
class Grid:
# Initialisation arguments:
# file_path: path to NetCDF grid file
def __init__ (self, file_path):
# 1D lon and lat axes on regular grids
# Make sure longitude is between -180 and 180
# Cell centres
self.lon_1d = fix_lon_range(read_netcdf(file_path, 'X'))
self.lat_1d = read_netcdf(file_path, 'Y')
# Cell corners (southwest)
self.lon_corners_1d = fix_lon_range(read_netcdf(file_path, 'Xp1'))
self.lat_corners_1d = read_netcdf(file_path, 'Yp1')
# 2D lon and lat fields on any grid
# Cell centres
self.lon_2d = fix_lon_range(read_netcdf(file_path, 'XC'))
self.lat_2d = read_netcdf(file_path, 'YC')
# Cell corners
self.lon_corners_2d = fix_lon_range(read_netcdf(file_path, 'XG'))
self.lat_corners_2d = read_netcdf(file_path, 'YG')
# 2D integrands of distance
# Across faces
self.dx = read_netcdf(file_path, 'dxF')
self.dy = read_netcdf(file_path, 'dyF')
# Between centres
self.dx_t = read_netcdf(file_path, 'dxC')
self.dy_t = read_netcdf(file_path, 'dyC')
# Between u-points
self.dx_u = self.dx # Equivalent to distance across face
self.dy_u = read_netcdf(file_path, 'dyU')
# Between v-points
self.dx_v = read_netcdf(file_path, 'dxV')
self.dy_v = self.dy # Equivalent to distance across face
# Between corners
self.dx_psi = read_netcdf(file_path, 'dxG')
self.dy_psi = read_netcdf(file_path, 'dyG')
# 2D integrands of area
# Area of faces
self.dA = read_netcdf(file_path, 'rA')
# Centered on u-points
self.dA_u = read_netcdf(file_path, 'rAw')
# Centered on v-points
self.dA_v = read_netcdf(file_path, 'rAs')
# Centered on corners
self.dA_psi = read_netcdf(file_path, 'rAz')
# Vertical grid
# Assumes we're in the ocean so using z-levels - not sure how this
# would handle atmospheric pressure levels.
# Depth axis at centres of z-levels
self.z = read_netcdf(file_path, 'Z')
# Depth axis at edges of z-levels
self.z_edges = read_netcdf(file_path, 'Zp1')
# Depth axis at w-points
self.z_w = read_netcdf(file_path, 'Zl')
# Vertical integrands of distance
# Across cells
self.dz = read_netcdf(file_path, 'drF')
# Between centres
self.dz_t = read_netcdf(file_path, 'drC')
# Dimension lengths (on tracer grid)
self.nx = self.lon_1d.size
self.ny = self.lat_1d.size
self.nz = self.z.size
# Partial cell fractions
# At centres
self.hfac = read_netcdf(file_path, 'HFacC')
# On western edges
self.hfac_w = read_netcdf(file_path, 'HFacW')
# On southern edges
self.hfac_s = read_netcdf(file_path, 'HFacS')
# Create masks on the t, u, and v grids
# We can't do the psi grid because there is no hfac there
# Land masks
self.land_mask = build_land_mask(self.hfac)
self.land_mask_u = build_land_mask(self.hfac_w)
self.land_mask_v = build_land_mask(self.hfac_s)
# Ice shelf masks
self.zice_mask = build_zice_mask(self.hfac)
self.zice_mask_u = build_zice_mask(self.hfac_w)
self.zice_mask_v = build_zice_mask(self.hfac_s)
# FRIS masks
self.fris_mask = build_fris_mask(self.zice_mask, self.lon_2d, self.lat_2d)
self.fris_mask_u = build_fris_mask(self.zice_mask_u, self.lon_corners_2d, self.lat_2d)
self.fris_mask_v = build_fris_mask(self.zice_mask_v, self.lon_2d, self.lat_corners_2d)
# Topography (as seen by the model after adjustment for eg hfacMin - not necessarily equal to what is specified by the user)
# Bathymetry (bottom depth)
self.bathy = read_netcdf(file_path, 'R_low')
# Ice shelf draft (surface depth, enforce 0 in land or open-ocean points)
self.zice = read_netcdf(file_path, 'Ro_surf')
self.zice[np.invert(self.zice_mask)] = 0
# Water column thickness
self.wct = read_netcdf(file_path, 'Depth')
# Return the longitude and latitude arrays for the given grid type.
# 't' (default), 'u', 'v', 'psi', and 'w' are all supported.
# Default returns the 2D meshed arrays; can set dim=1 to get 1D axes.
def get_lon_lat (self, gtype='t', dim=2):
if dim == 1:
lon = self.lon_1d
lon_corners = self.lon_corners_1d
lat = self.lat_1d
lat_corners = self.lat_corners_1d
elif dim == 2:
lon = self.lon_2d
lon_corners = self.lon_corners_2d
lat = self.lat_2d
lat_corners = self.lat_corners_2d
else:
print 'Error (get_lon_lat): dim must be 1 or 2'
sys.exit()
if gtype in ['t', 'w']:
return lon, lat
elif gtype == 'u':
return lon_corners, lat
elif gtype == 'v':
return lon, lat_corners
elif gtype == 'psi':
return lon_corners, lat_corners
else:
print 'Error (get_lon_lat): invalid gtype ' + gtype
sys.exit()
# Return the hfac array for the given grid type.
# 'psi' and 'w' have no hfac arrays so they are not supported
def get_hfac (self, gtype='t'):
if gtype == 't':
return self.hfac
elif gtype == 'u':
return self.hfac_w
elif gtype == 'v':
return self.hfac_s
else:
print 'Error (get_hfac): no hfac exists for the ' + gtype + ' grid'
sys.exit()
# Return the land mask for the given grid type.
def get_land_mask (self, gtype='t'):
if gtype == 't':
return self.land_mask
elif gtype == 'u':
return self.land_mask_u
elif gtype == 'v':
return self.land_mask_v
else:
print 'Error (get_land_mask): no mask exists for the ' + gtype + ' grid'
sys.exit()
# Return the ice shelf mask for the given grid type.
def get_zice_mask (self, gtype='t'):
if gtype == 't':
return self.zice_mask
elif gtype == 'u':
return self.zice_mask_u
elif gtype == 'v':
return self.zice_mask_v
else:
print 'Error (get_zice_mask): no mask exists for the ' + gtype + ' grid'
sys.exit()
# Return the FRIS mask for the given grid type.
def get_fris_mask (self, gtype='t'):
if gtype == 't':
return self.fris_mask
elif gtype == 'u':
return self.fris_mask_u
elif gtype == 'v':
return self.fris_mask_v
else:
print 'Error (get_fris_mask): no mask exists for the ' + gtype + ' grid'
sys.exit()
|
15,487 | f7344cb4a1e9c8796f5203958bec7780aa5e52db | #server.py
from wsgiref.simple_server import make_server
from hello import application
# 创建一个server
httpd = make_server('', 8000, application)
print('Server Http on port 8000')
# 开始监听http请求
httpd.serve_forever()
|
15,488 | d6f87dcd4d5b0aaa302907372d3e9f51e434f801 | from selenium.webdriver.common.by import By
class MainPageLocators():
MAIN_LINK = (By.CSS_SELECTOR, '#login_link')
BROWSE_STORE = (By.ID, "browse")
ALL_BOOKS = (By.XPATH, "//*[@id='browse']/li/ul/li[1]/a")
HEADER_ALLBOOKS = (By.CLASS_NAME, "action")
class LoginPageLocators():
LOGIN_FORM = (By.ID, 'login_form')
REG_FORM = (By.ID, 'register_form')
REG_LOGIN = (By.ID, 'id_registration-email')
REG_PASSWORD1 = (By.ID, 'id_registration-password1')
REG_PASSWORD2 = (By.ID, 'id_registration-password2')
SUBMIT_BUTTON = (By.CSS_SELECTOR, 'button[name^="registration_submit"]')
class ProductPageLocators():
ADD_TO_BASKET_BUTTON = (By.CLASS_NAME, "btn-add-to-basket")
ITEM_NAME = (By.CSS_SELECTOR, '.product_main>h1')
SUCCESS_MESSAGE = (By.CSS_SELECTOR, '.alertinner>strong:nth-child(1)')
PRICE = (By.CSS_SELECTOR, '.product_main>.price_color')
PRICE_MESSAGE = (By.CSS_SELECTOR, '.alertinner>p>strong')
BASKET_BUTTON = (By.CSS_SELECTOR, '.basket-mini>span>a')
EMPTY_BASKET = (By.ID, "content_inner")
BASKET_EMPTY_MESSAGE = (By.XPATH, '//div[@id="content_inner"]//p[contains(text(),"Your basket is empty.")]')
class BasePageLocators():
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
LOGIN_LINK_INVALID = (By.CSS_SELECTOR, "#login_link_inc")
USER_ICON = (By.CLASS_NAME, "icon-user")
|
15,489 | e0374da62c3669725daf1b69fa67ec88868d1b4f | import tensorflow as tf
from PIL import Image
from time import time
import numpy as np
from ops import *
import parameters
import os
dict = parameters.import_parameters( 'PARAMETERS.txt' )
NUM_BLOCKS = int( dict['num_blocks'] )
ALPHA = int( dict['alpha'] )
FIRST_CHANNELS = int( dict['first_channels'] )
SCALE = int( dict['scale'] )
input = tf.placeholder( tf.float32 , [ None , None , None , 3 ] )
network_output = build_network( input , NUM_BLOCKS , alpha = ALPHA , first_channels = FIRST_CHANNELS )
with tf.Session() as sess :
saver = tf.train.Saver()
model_path = 'Models/best_model'
saver.restore( sess , model_path )
os.system('rm -r output_images')
os.system('mkdir output_images')
img = Image.open('input.tif')
inputs = np.array( [ np.array(img) ] ).astype(np.float32)
img.close()
start_time = time()
network_images = sess.run( network_output , feed_dict = { input : inputs } )
network = np.clip( network_images[0] , 0 , 255 ).astype('uint8')
Image.fromarray( network ).save( 'output_images/network.bmp' )
print( time() - start_time , "sec" )
|
15,490 | 92e8ab65bfe4d76476ef4bb1978d42d4a772dac2 | import sys
import socket
import argparse
# Resolve the DNS/IP address of a given domain
# data returned in the format: (name, aliaslist, addresslist)
# Returns the first IP that responds
def getIP(d):
try:
data = socket.gethostbyname(d)
ip = repr(data)
return ip
except Exception:
return False
# Returns Host name for given IP
def getHost(ip):
try:
data = socket.gethostbyaddr(ip)
host = repr(data[0])
return host
except Exception:
return False
def main()
# Process command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--inputfile', type=str, required=True,
help="input csv file")
flags = parser.parse_args()
with open(flags.inputfile, 'r') as infile:
domain = infile.readlines()
for i in domain:
try:
result = socket.inet_aton(i)
hostname = getHost(i)
if hostname: print " " + hostname.replace('\'', '' )
except socket.error:
ip = getIP(i)
if ip: print " " + ip.replace('\'', '' )
if __name__ == "__main__":
main() |
15,491 | 15cee00fdc26cde07785fa228cea8c11f816e9e2 | import abc
class XmlFile:
@abc.abstractmethod
def get_xml_file_name(self):
pass
def as_string(self):
xml_string = "<log>"
with open(self.get_xml_file_name()) as f:
for line in f:
xml_string += line
xml_string += "</log>"
return xml_string
|
15,492 | dd510f2899334787445b3c6f3de9b28894289e25 | Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> j = 9
>>> for i in range(1,10,2):
print(" "*j+i*"*")
j=j-1
|
15,493 | fbfaf4d8cfc9b2567ef663d119268f32d8e4bb55 | #!/usr/bin/env python
"Plot the concentration field rho(x,y) in the course of time"
from __future__ import print_function, division
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file', type=str, help='H5MD datafile')
parser.add_argument('--species', type=int, default=1)
parser.add_argument('--interval', type=int, default=20)
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.animation as animation
f = plt.figure()
c = None
with h5py.File(args.file, 'r') as a:
rho_xy = a['fields/rho_xy/value']
traj = a['particles/dimer/position/value'][:]
edges = a['particles/dimer/box/edges'][:]
traj += a['particles/dimer/image/value'][:]*edges.reshape((1,1,-1))
x = np.arange(rho_xy.shape[1]+1)
y = np.arange(rho_xy.shape[2]+1)
X, Y = np.meshgrid(x, y)
def update_plot(i):
global c
plt.title(str(i))
l = plt.pcolormesh(X, Y, rho_xy[i*args.interval,:,:,args.species].T)
if c is None:
c = plt.colorbar()
plt.plot(traj[:i*args.interval,0,0],traj[:i*args.interval,0,1], c='r', lw=4)
plt.plot(traj[:i*args.interval,1,0],traj[:i*args.interval,1,1], c='k', lw=4)
return l
ani = animation.FuncAnimation(f, update_plot, rho_xy.shape[0]//args.interval, interval=100, repeat=False)
plt.show()
|
15,494 | 498067c160f0f77bdaed10afce9718f7bfc17614 | import random
import string
f = open("twist.txt","r") #opens file with name of "test.txt"
me=f.readline()
print ("Loading word list from file...")
me = str.split(me)
men=len(me)
me=random.choice(me)
##print(f.read(55))
print (me)
print (men)
f.close()
##print ("Loading word list from file...")
### inFile: file
##inFile = open("twist.txt", 'r')
### line: string
##line = inFile.readline()
### wordlist: list of strings
##wordlist = str.split(line)
###print(line)
##
##
##print (" ", len(wordlist), "words loaded.")
|
15,495 | d2604a6ee92ab6155f0e567261df729f51451f45 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CNN_using_persistence_images_on_patch.py
The aim of this script is to perform the training of a CNN using persistence
images as a input. This script is inspired from this script:
BorgwardtLab/ADNI_MRI_Analysis/blob/mixed_CNN/mixed_CNN/run_Sarah.py
To get real time information into the model training and structure, run
$ tensorboard --logdir logs/fit
once this script has been started.
NOTES:
- One loaded, the "big" 100x100x3 images aren't that big (>400MB in RAM) so
NO GENERATOR NEEDED
"""
__author__ = "Philip Hartout"
__email__ = "philip.hartout@protonmail.com"
import dotenv
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, make_scorer
DOTENV_KEY2VAL = dotenv.dotenv_values()
N_BINS = 100
N_LAYERS = 50
################################################################################
# Functions
################################################################################
persistence_landscape_location = (
DOTENV_KEY2VAL["DATA_DIR"] + "/patch_91_persistence_landscapes/"
)
partitions_location = DOTENV_KEY2VAL["DATA_DIR"] + "/partitions/"
diagnosis_json = (
DOTENV_KEY2VAL["DATA_DIR"] + "/collected_diagnoses_complete.json"
)
def get_partitions(partitions_location):
partition = []
labels = []
for root, dirs, files in os.walk(partitions_location):
for file in files:
if file.split("_")[0] == "partition":
partition.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
elif file.split("_")[0] == "labels":
labels.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
else:
print(f"File {file} is neither partition nor labels file")
return partition, labels
################################################################################
# Main
################################################################################
def main():
############################################################################
# Data loading and processing
############################################################################
inits = 1
partitions, labels = get_partitions(partitions_location)
histories = []
for partition, label in zip(partitions, labels):
for i in range(inits):
# Make sure there aren't the same patients in train and test
X_train_lst = []
y_train_lst = []
for landscape in partition["train"]:
X_train_lst.append(
np.load(persistence_landscape_location + landscape + ".npy")
)
y_train_lst.append(label[landscape])
X_train, y_train = (
np.stack(X_train_lst, axis=0).reshape(
len(X_train_lst), N_BINS, N_LAYERS, 3
),
np.vstack(y_train_lst),
)
X_val_lst = []
y_val_lst = []
for landscape in partition["validation"]:
X_val_lst.append(
np.load(persistence_landscape_location + landscape + ".npy")
)
y_val_lst.append(label[landscape])
X_val, y_val = (
np.stack(X_val_lst, axis=0).reshape(
len(X_val_lst), N_BINS, N_LAYERS, 3
),
np.vstack(y_val_lst),
)
####################################################################
# Model definition
####################################################################
model = RandomForestClassifier()
params = {
"n_estimators": [300],
"criterion": ["gini"],
"max_depth": [None],
"min_samples_split": [2],
"min_samples_leaf": [1],
"min_weight_fraction_leaf": [0.0],
"max_features": ["auto"],
"max_leaf_nodes": [None],
"min_impurity_decrease": [0.0],
"min_impurity_split": [None],
"bootstrap": [True],
"oob_score": [False],
"n_jobs": [None],
"random_state": [42],
"verbose": [0],
"warm_start": [False],
"class_weight": [None],
"ccp_alpha": [0.0],
"max_samples": [None],
}
search = GridSearchCV(
RandomForestClassifier(),
param_grid=params,
cv=10,
scoring=make_scorer(accuracy_score),
n_jobs=-1,
)
search.fit(X_train.reshape(len(X_train),-1), y_train.ravel())
y_val_pred = search.best_estimator_.predict(X_val.reshape(len(
X_val),-1))
print(
f"Best model training set cross validated score using RF is"
f" {search.best_score_}"
)
print(
f"Best performance is achieved using " f"{search.best_params_}")
print(
f"Best test set score using RF is "
f"{accuracy_score(y_val, y_val_pred)}"
)
if __name__ == "__main__":
main()
|
15,496 | dfecc9483db97c935ce32eae7529acd7e3849dea | from statistics import YouTubeStats
API_KEY = 'set your youtube data api here'
channel_id ='channel id'
channel_title = 'channel title'
if __name__ == "__main__":
channel_stats = YouTubeStats(API_KEY, channel_id, channel_title)
channel_stats.get_channel_stats()
channel_stats.stats_to_json_file()
|
15,497 | 655dd2b7327e51f2d4008fcdb5f48478a9e5fe22 | from tests.unit.dataactcore.factories.domain import TASFactory
from tests.unit.dataactcore.factories.staging import AppropriationFactory, ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'a35_cross_file'
_TAS = 'a35_cross_file_tas'
def test_column_headers(database):
expected_subset = {'source_row_number', 'source_value_deobligations_recoveries_r_cpe',
'target_value_ussgl487100_downward_adjus_cpe_sum',
'target_value_ussgl497100_downward_adjus_cpe_sum',
'target_value_ussgl487200_downward_adjus_cpe_sum',
'target_value_ussgl497200_downward_adjus_cpe_sum', 'difference', 'uniqueid_TAS'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that, for entries with the matching TAS, Appropriations
deobligations_recoveries_r_cpe equals the sum of all corresponding entries
for Object Class Program Acitivity fields ussgl487100_downward_adjus_cpe,
ussgl497100_downward_adjus_cpe, ussgl487200_downward_adjus_cpe,
ussgl497200_downward_adjus_cpe"""
tas = TASFactory()
database.session.add(tas)
database.session.flush()
ap = AppropriationFactory(account_num=tas.account_num, deobligations_recoveries_r_cpe=8)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2]) == 0
def test_success_scenario2(database):
tas1 = TASFactory()
tas2 = TASFactory()
database.session.add_all([tas1, tas2])
database.session.flush()
ap = AppropriationFactory(account_num=tas1.account_num, deobligations_recoveries_r_cpe=8)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
account_num=tas1.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
account_num=tas1.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Doesn't contribute, different TAS
op_3 = ObjectClassProgramActivityFactory(
account_num=tas2.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2, op_3]) == 0
def test_failure(database):
""" Tests that, for entries with the matching TAS, Appropriations
deobligations_recoveries_r_cpe does not equals the sum of all
corresponding entries for Object Class Program Acitivity fields
ussgl487100_downward_adjus_cpe, ussgl497100_downward_adjus_cpe,
ussgl487200_downward_adjus_cpe, ussgl497200_downward_adjus_cpe"""
tas = TASFactory()
database.session.add(tas)
database.session.flush()
ap = AppropriationFactory(account_num=tas.account_num, deobligations_recoveries_r_cpe=7)
# Contributes 4
op_1 = ObjectClassProgramActivityFactory(
account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
# Contributes another 4
op_2 = ObjectClassProgramActivityFactory(
account_num=tas.account_num, ussgl487100_downward_adjus_cpe=1, ussgl497100_downward_adjus_cpe=1,
ussgl487200_downward_adjus_cpe=1, ussgl497200_downward_adjus_cpe=1)
assert number_of_errors(_FILE, database, models=[ap, op_1, op_2]) == 1
|
15,498 | faf4a65493554480327c606e40ad540f2699eb82 | weekdays = ['mon','tues','wed','thurs','fri']
print(weekdays)
print(type(weekdays))
days = weekdays[0] # elemento 0
print(days)
days = weekdays[0:3] # elementos 0, 1, 2
print(days)
days = weekdays[:3] # elementos 0, 1, 2
print(days)
days = weekdays[-1] # ultimo elemento
print(days)
test = weekdays[3:] # elementos 3, 4
print(test)
print('iiiiiii')
days = weekdays[-2] # ultimo elemento (elemento 4
print(days)
days = weekdays[::] # all elementos
print(days)
days = weekdays[::2] # cada segundo elemento (0, 2, 4)******
print(days)
days = weekdays[::-1] # reverso (4, 3, 2, 1, 0)
print(days)
all_days = weekdays + ['sat','sun'] # concatenar
print(all_days)
print('iiiiiiiiiiiiiii')
days_list = ['mon','tues','wed','thurs','fri']
days_list.append('sat')
days_list.append('sun')
print(days_list)
print(days_list == all_days)
list = ['a', 1, 3.14159265359]
print(list)
print(type(list))
# list.reverse()
# print(list)
print('eeeeeeeeeeeeee')
# Como selecionar 'wed' pelo indice?
days = weekdays[2]
print(days)
# Como verificar o tipo de 'mon'?
days = weekdays[0]
print(days)
print(type(days))
# Como separar 'wed' até 'fri'?
days = weekdays[2:]
print(days)
# Quais as maneiras de selecionar 'fri' por indice?
days = weekdays[4]
print(days)
days = weekdays[-1]
print(days)
# Qual eh o tamanho dos dias e days_list?
print(len (days_list))
print(len (days_list[0]))
print(len (days_list[1]))
print(len (days_list[2]))
print(len (days_list[3]))
print(len (days_list[4]))
print(len (days_list[5]))
print(len (days_list[6]))
# Como inverter a ordem dos dias?
days1 = days_list[::-1] # reverso (4, 3, 2, 1, 0)
print(days1)
# Como inserir a palavra 'zero' entre 'a' e 1 de list?
list.insert(1, 'zero')
print(list)
# Como limpar list?
list.clear()
print(list)
# Como deletar list?
del(list)
print(list)
print('ooooo')
# Como atribuir o ultimo elemento de list na variavel ultimo_elemento e remove-lo de list?
list = ['a', 1, 3.14159265359]
ultimo_elemento = list[-1]
del(list[-1])
print(list)
print(ultimo_elemento)
|
15,499 | 43a731a276e54b449fe01315d818af4d650fdb7b | import pprint
import sys
# The ANSI colours below break Conjure's ability to parse REPL input/output
# sys.ps1 = "\033[0;34m>>> \033[0m"
# sys.ps2 = "\033[1;34m... \033[0m"
sys.displayhook = pprint.pprint
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.