text stringlengths 38 1.54M |
|---|
N, M, d = [int(_) for _ in input().split()]
t = 0
from itertools import product
def calc0(N, M, d):
E = list(range(N))
t = 0
for xs in product(*([E] * M)):
r = sum(abs(xs[i]-xs[i+1]) == d for i in range(M-1))
t += r
return t, N**M, t/N**M
def calc(N, M, d):
if d == 0:
k = N
else:
k = (N - d) * 2
# r = k * N**(M - 2) * (M - 1) / (N**M)
r = k * (M - 1) / N**2
return r
#print(calc(N, 2, d))
#print(calc(N, 3, d))
print(calc(N, M, d))
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
from utils import get_media
from accounting.views import *
urlpatterns = patterns('',
(r'^$', index),
(r'^static/(.+)$', get_media),
(r'^orders/$', get_static_page, {'page':'orders.html'}),
(r'^presence/$', get_static_page, {'page':'presence.html'}),
(r'^incspend/$', get_static_page, {'page':'inout.html'}),
(r'^embedebts/$', get_static_page, {'page':'embedding_debts.html'}),
(r'^get_orders/$', get_orders),
(r'^get_all_order_items/$', get_all_order_items),
(r'^get_orders_items/$', get_orders_items),
(r'^update_order$', update_order),
(r'^remove_order$', remove_order),
(r'^sell_item$', sell_item),
(r'^cancel_sell$', cancel_sell),
(r'^change_item_location$', change_item_location),
(r'^update_seller$', update_seller, {'dict_type': 'order_sellers'}),
(r'^update_shop_seller$', update_seller, {'dict_type': 'shop_sellers'}),
(r'^update_order_item$', update_order_item),
(r'^update_item_price$', update_item_price),
(r'^remove_order_item$', remove_order_item),
(r'^get_sellers/$', get_sellers, {'dict_type': 'order_sellers'}),
(r'^get_shop_sellers/$', get_sellers, {'dict_type': 'shop_sellers'}),
(r'^get_json_dict/(\w+)$', get_json_dict),
(r'^get_incspend_list/$', get_incspend_list),
(r'^add_item_inout$', add_item_inout),
(r'^calculate_income$', calculate_income),
(r'^get_embedebts/$', get_embedebts),
(r'^get_order_item/$', get_order_item),
(r'^add_item_embedebts$', add_item_embedebts),
(r'^remove_item_embedebts$', remove_item_embedebts),
# Examples:
# url(r'^$', 'inna_trade.views.home', name='home'),
# url(r'^inna_trade/', include('inna_trade.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
from functions import *
from pymsgbox import *
'''
@param1: EmailId of the COURSE
@param2: Password of the COURSE email
@param3: Name of the Course
@param4: CourseID itself
'''
makingOfGroups('groups@nptel.iitm.ac.in', '12345group67890', 'Intro to DB Systems', 'noc20-cs24')
|
import numpy as np
import matplotlib.pyplot as plt
import itertools
from scipy.misc import imread
from keras.preprocessing import image
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_img_score(subsample, best_thresh, labels=['Other', 'Renault']):
"""
Plot all images contained in subsample data frame and their scores
Args:
subsample -- Pandas DataFrame containes the path of the images, the labels and the predictions
"""
n = len(subsample) # number of images to visualize
plt.figure(figsize=[12,12*n/2])
for i in range(n):
img_path = subsample.iloc[i]['image']
img = image.load_img(img_path)
pred_value = subsample.iloc[i]['pred']
pred_bool = np.int(subsample.iloc[i]['pred'] > best_thresh)
correct = 'Correct' if subsample.iloc[i]['label'] == pred_bool else 'Wrong'
title = correct + '\n' + labels[pred_bool] + ' (proba=' + str(round(pred_value,2)) + ')' + '\n' + img_path
plt.subplot(n,2,i+1)
plt.imshow(img)
plt.title(title, fontsize=14)
if i == n-1:
break |
import fugashi
# The Tagger object holds state about the dictionary.
tagger = fugashi.Tagger()
file = open("C:/Users/Anna-Maria/Desktop/SENIOR CAPSTONE/Computer Science/model trees/mixed.txt","r", encoding="utf-8")
text = file.readlines() # opening and reading the file with sentences line by line
# this is the list of special verbs which indicate that the level of formality is polite / humble formal
verbs = ["ใใ ใใ", "ใใใ", "ใใใใงใ", "ใใใซใใใ", "ใใใฃใใใ", "ใใใงใซใชใ", "ใใฃใใใ", "ใใใใซใชใ", "ใใใใซใชใ",
"ใใใใซใชใ", "ใใฟใใซใชใ", "ใชใใ", "ใใใใใ", "ใใใใซใชใ", "ใใใใใ", "ใใใ", "ใพใใ", "ใใ", "ใใใ",
"ใใใใใใ", "ใฏใใใใใใ", "ใใใใ", "ใใใใพใใ", "ใใใใ", "ใใใ", "ใใใ ใ", "ใฏใใใใใ"]
for t in text: # for every sentence in the file
points = total = 0
reg = False
for word in tagger(t): # for every word in the sentence
if word.feature.pos1 == "ๆฅ้ ญ่พ": # if it is a prefix
if word.surface == "ใ" or "ใ":
points += 1
elif word.feature.pos1 == "ๅ่ฉ": # if it is a verb
total += 1
for x in tagger(word.surface):
if x == "ใ" or "ใ" or "ใ" or "ใก" or "ใซ" or "ใฒ" or "ใฟ" or "ใ":
points += 1
if [v for v in verbs if word.feature.lemma in v]: # if the verb is a special verb
reg = True
sent = t.replace('\n', "")
if points > total and points != total or reg == True:
print('The sentence: "', sent, '" is in Honorific / Humble Form - Verbs')
elif points == total:
print('The sentence: "', sent, '" is in Plain Formal Form - Verbs')
else:
print('The sentence: "', sent, '" is NOT Honorific / Humble Form - Verbs')
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadLayer(nn.Module):
def __init__(self, in_dim, out_dim, num_heads, attn_drop_out, feat_embed_size, layer, merge='cat'):
super(MultiHeadLayer, self).__init__()
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(layer(in_dim, out_dim, feat_embed_size)) # [n_nodes, hidden_size]
self.merge = merge
self.dropout = nn.Dropout(attn_drop_out)
def forward(self, g, origin, neighbor):
head_outs = [attn_head(g, origin, self.dropout(neighbor)) for attn_head in self.heads] # n_head * [n_nodes, hidden_size]
if self.merge == 'cat':
# concat on the output feature dimension (dim=1)
result = torch.cat(head_outs, dim=1) # [n_nodes, hidden_size * n_head]
else:
# merge using average
result = torch.mean(torch.stack(head_outs))
return result
class ESGATLayer(nn.Module):
def __init__(self, in_dim, out_dim, feat_embed_size):
super().__init__()
self.fc_o = nn.Linear(in_dim, out_dim, bias=False)
self.fc_n = nn.Linear(in_dim, out_dim, bias=False)
self.feat_fc = nn.Linear(feat_embed_size, out_dim, bias=False)
self.attn_fc = nn.Linear(3 * out_dim, 1, bias=False)
def edge_attention(self, edges):
dfeat = self.feat_fc(edges.data["tfidfembed"]) # [edge_num, out_dim]
z3 = torch.cat([edges.src['z'], edges.dst['z'], dfeat], dim=1) # [edge_num, 3 * out_dim]
wa = F.leaky_relu(self.attn_fc(z3)) # [edge_num, 1]
return {'e': wa}
def message_func(self, edges):
# print("edge e ", edges.data['e'].size())
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'sh': h}
def forward(self, g, origin, neighbor):
enode_id = g.filter_nodes(lambda nodes: nodes.data["unit"] == 0)
snode_id = g.filter_nodes(lambda nodes: nodes.data["unit"] == 1)
esedge_id = g.filter_edges(lambda edges: (edges.src["unit"] == 0) & (edges.dst["unit"] == 1))
# print("id in WSGATLayer")
# print(wnode_id, snode_id, wsedge_id)
z1 = self.fc_o(origin)
z2 = self.fc_n(neighbor)
g.nodes[snode_id].data['z'] = z1
g.nodes[enode_id].data['z'] = z2
g.apply_edges(self.edge_attention, edges=esedge_id)
g.pull(snode_id, self.message_func, self.reduce_func)
g.ndata.pop('z')
h = g.ndata.pop('sh')
return h[snode_id]
class SEGATLayer(nn.Module):
def __init__(self, in_dim, out_dim, feat_embed_size):
super().__init__()
self.fc_o = nn.Linear(in_dim, out_dim, bias=False)
self.fc_n = nn.Linear(in_dim, out_dim, bias=False)
self.feat_fc = nn.Linear(feat_embed_size, out_dim)
self.attn_fc = nn.Linear(3 * out_dim, 1, bias=False)
def edge_attention(self, edges):
dfeat = self.feat_fc(edges.data["tfidfembed"]) # [edge_num, out_dim]
z3 = torch.cat([edges.src['z'], edges.dst['z'], dfeat], dim=1) # [edge_num, 3 * out_dim]
wa = F.leaky_relu(self.attn_fc(z3)) # [edge_num, 1]
return {'e': wa}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'sh': h}
def forward(self, g, origin, neighbor):
enode_id = g.filter_nodes(lambda nodes: nodes.data["unit"] == 0)
snode_id = g.filter_nodes(lambda nodes: nodes.data["unit"] == 1)
seedge_id = g.filter_edges(lambda edges: (edges.src["unit"] == 1) & (edges.dst["unit"] == 0))
z1 = self.fc_o(origin)
z2 = self.fc_n(neighbor)
g.nodes[snode_id].data['z'] = z2
g.nodes[enode_id].data['z'] = z1
g.apply_edges(self.edge_attention, edges=seedge_id)
g.pull(enode_id, self.message_func, self.reduce_func)
g.ndata.pop('z')
h = g.ndata.pop('sh')
return h[enode_id]
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
assert not torch.any(torch.isnan(x)), "FFN input"
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
assert not torch.any(torch.isnan(output)), "FFN output"
return output |
def genPrimes():
last = 1
primes = []
while True:
last += 1
if not any(last % p == 0 for p in primes):
primes.append(last)
yield last
|
from django.urls import path,include
from .views import get_user_liked_Question_View, get_user_liked_Answer_View, get_user_liked_replies_View, ReportIssueView, getNotification,getProfessionList ,getUsersList,getQuesList
urlpatterns = [
path('liked_Question/', get_user_liked_Question_View.as_view(), name="LikedQuestion"),
path('liked_Answer/', get_user_liked_Answer_View.as_view(), name="LikedAnswer"),
path('liked_replies/', get_user_liked_replies_View.as_view(), name="LikedReply"),
path('report-issue/', ReportIssueView.as_view(), name="ReportIssueView"),
path('get-notification/', getNotification.as_view(), name="getNotification"),
path('get-profession/', getProfessionList.as_view(), name="getNotification"),
path('get-users/', getUsersList.as_view(), name="getUsersList"),
path('get-ques/', getQuesList.as_view(), name="getQuesList"),
] |
# there must be views
from wsgi_app import App
app = App()
# sample view with routing:
'''
@app.route('/', method=['GET', 'POST'])
def index(headers):
return 'Home page'
'''
|
# example of a bimodal data sample
from matplotlib import pyplot
from numpy.random import normal
from numpy import hstack
# generate a sample
sample1 = normal(loc=20, scale=5, size=300)
sample2 = normal(loc=40, scale=5, size=700)
sample = hstack((sample1, sample2))
# plot the histogram
pyplot.hist(sample, bins=50)
pyplot.show() |
'''
Given an integer array nums and an integer k, return the number of subarrays of nums where the least common multiple of the subarray's elements is k.
A subarray is a contiguous non-empty sequence of elements within an array.
The least common multiple of an array is the smallest positive integer that is divisible by all the array elements.
Example 1:
Input: nums = [3,6,2,7,1], k = 6
Output: 4
Explanation: The subarrays of nums where 6 is the least common multiple of all the subarray's elements are:
- [3,6,2,7,1]
- [3,6,2,7,1]
- [3,6,2,7,1]
- [3,6,2,7,1]
Example 2:
Input: nums = [3], k = 2
Output: 0
Explanation: There are no subarrays of nums where 2 is the least common multiple of all the subarray's elements.
Constraints:
1 <= nums.length <= 1000
1 <= nums[i], k <= 1000
'''
import unittest
from typing import *
from math import gcd
class Solution:
def subarrayLCM(self, nums: List[int], k: int) -> int:
res = 0
for i in range(len(nums)):
l = nums[i]
if l == k:
res += 1
for j in range(i + 1, len(nums)):
l = l * nums[j] // gcd(l, nums[j])
if l == k:
res += 1
return res
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([3,6,2,7,1], 6), 4),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().subarrayLCM(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
# vim: sw=4:ts=4:et:ai
import itertools
from eulertools import fibonacci
def main():
return sum([i for i in itertools.takewhile(lambda x: x <= 4000000, fibonacci()) if i % 2 == 0])
if __name__ == '__main__':
print("Result: %i" % main())
|
'''
This file will read a log file and produce an CSV file with the data
'''
import csv
import argparse
from decimal import Decimal, getcontext
# Set precission to two digital positions
getcontext().prec = 2
US_LOCATIONS = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY',
'DC']
CAD_LOCATIONS = ['AB', 'BC', 'MB', 'NB', 'NL', 'NS', 'ON', 'PE', 'QC', 'SK',
'NT', 'NU', 'YT']
CAD_TO_USD = Decimal(0.76)
def add_price_by_location(row):
location = row['LOCATION']
if location in US_LOCATIONS:
row['COUNTRY'] = 'USA'
row['CURRENCY'] = 'USD'
row['USD'] = Decimal(row['PRICE'])
elif location in CAD_LOCATIONS:
row['COUNTRY'] = 'CANADA'
row['CURRENCY'] = 'CAD'
row['USD'] = Decimal(row['PRICE']) * CAD_TO_USD
else:
raise Exception('Location not found')
return row
def main(input_file, output_file):
reader = csv.DictReader(input_file)
result = [add_price_by_location(row) for row in reader]
# Save into csv format
header = result[0].keys()
writer = csv.DictWriter(output_file, fieldnames=header)
writer.writeheader()
writer.writerows(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(dest='input', type=argparse.FileType('r'),
help='input file')
parser.add_argument(dest='output', type=argparse.FileType('w'),
help='output file')
args = parser.parse_args()
main(args.input, args.output)
|
# -*- coding:utf-8 -*-
import cv2
import numpy as np
from cal_rect_xy import cal_rect_xy
def Crop_cnt(frame, cnt, color, wh_ratio): # ่ฃๅช่ฝฎๅปๅธๅ
"""
:param frame:
:param cnt:
:return: CropThing ่ฟๅ็ป่ฟ ๆ่ฝฌ่ฃๅช ๅ็ๅพ็
"""
print(" def Crop_cnt(frame, cnt, color, wh_ratio): >>>")
hull = cv2.convexHull(cnt) # ๆพๅฐๅธๅ
rect_min = cv2.minAreaRect(hull) # ๆๅฐๅคๆฅ็ฉๅฝข
x1, y1, w, h = cv2.boundingRect(hull) # ๅคๆฅ็ฉๅฝข
box01 = cv2.boxPoints(rect_min) # ๅฐไธญๅฟ็นๅฎฝๅบฆ้ซๅบฆๆ่ฝฌ่งๅบฆ็่กจ็คบๆนๆณ่ฝฌไธบ็นๅๆ
box = np.int0(box01) # ๆๅฐๅคๆฅ็ฉๅฝข
ColorThings_line = frame.copy() # ๆพ็คบๅพ็
# cv2.rectangle(ColorThings_line, (x1, y1), (x1 + w, y1 + h), (0, 255, 0), 2) # ็ปๅคๆฅ็ฉๅฝข
# cv2.imshow("Crop_cnt: ", ColorThings_line)
# print("box", type(box)) # box <class 'numpy.ndarray'> [[178 488] [156 444] [322 363] [343 407]]
# print("box:", box)
print("wh_ratio", wh_ratio)
if not wh_ratio:
if wh_ratio[1] > 1 and color == "red": # ๅชๆ้ฟๆก็บข่ฒ็ๅพๅฝข้่ฆ็บ ๆญฃๅพๆ
cx1, cx2, cy1, cy2 = cal_rect_xy(box)
CropThing = frame[cy1:cy2, cx1:cx2] # ่ฃๅชๅพ็
x0 = box[0][0] - cx1 # ๆไธ้ข็้ฃไธช็น๏ผ้ฆ้ๅทฆ่พน็๏ผ
y0 = box[0][1] - cy1
x1 = box[1][0] - cx1
y1 = box[1][1] - cy1
x2 = box[2][0] - cx1
y2 = box[2][1] - cy1
x3 = box[3][0] - cx1
y3 = box[3][1] - cy1
w = box[2][0] - box[1][0]
h = box[0][1] - box[1][1]
print(x0, x1, x2, x3, y0, y1, y2, y3)
rat = 1.1 # ็ผฉๆพๆฏไพ ๅฉ็จไธไธชๅๆ ็น้่งๅๆข ๏ผ็น็น๏ผๅๅๅนณ่ก็บฟไฟๆๅนณ่ก๏ผ
pts1 = np.float32([[x1, y1], [x0, y0], [x2, y2]])
pts2 = np.float32([[0, 0], [0, int(h * rat)], [int(w * rat), 0]])
# print("pts1, pts2", pts1, pts2)
# print("(int(w * rat), int(h * rat)):", (int(w * rat), int(h * rat)))
M = cv2.getAffineTransform(pts1, pts2)
CropThing = cv2.warpAffine(CropThing, M, (int(w * rat), int(h * rat))) # ็บ ๆญฃๅพๆๅ็่ฃๅชๅๅพๅฝข
cv2.drawContours(ColorThings_line, [box], 0, (0, 0, 255), 2) # ็ปๆๅฐๅคๆฅ็ฉๅฝข
cv2.imshow("ColorThings_line", ColorThings_line)
# cv2.waitKey(0)
# ่ฟ้้่ฆๅฐๅคๆฅๅพๆ็ฉๅฝข ็บ ๆญฃๆๆฐดๅนณ็๏ผ้่งๅๆข๏ผ
# ๅ
ๅๅๅผๅพๅๆๅๅคๅ
# ๅๅพไธ่ฃๅชๅบๅซcntๅธๅ
็้จๅ
return CropThing # ่ฟๅ่ฃๅชไธๆฅ็ๅพ็
else: # ๆญฃๆน็ๅพๅฝข๏ผไธ้่ฆ็บ ๆญฃ่งๅบฆ๏ผๅพ้พๅคๆญๆฏๅฆๆฏๅพๆ็๏ผ
cv2.rectangle(ColorThings_line, (x1, y1), (x1 + w, y1 + h), (0, 255, 0), 2) # ็ปๆๅฐๅคๆฅ็ฉๅฝข
cv2.imshow("ColorThings_line", ColorThings_line)
# cv2.waitKey(0)
return frame[y1: y1 + h, x1:x1 + w]
return None
|
import gdspy
import numpy as np
from resonator_coaxmon import*
class Coaxmon:
def __init__(self, center, r1, r2, r3, R4, outer_ground, arc):
self.center = center
self.R1 = r1*R4
self.R2 = r2*R4
self.R3 = r3*R4
self.R4 = R4
self.freq = 7e9
self.outer_ground = outer_ground
self.arc = arc
def Generate(self, mode, middle_TL):
#resonator, center = Resonator(self.x, top_TL , self.R3, self.outer_ground, self.R4, self.freq).Generate(DE, TL_ground, d)
ground = gdspy.Round((self.center.x,self.center.y), self.outer_ground, self.R4, initial_angle=0, final_angle=2*np.pi)
#ground = gdspy.boolean(ground, r3, 'not') #substract incoming resonator from qubit ground
coupler = gdspy.Round((self.center.x,self.center.y), self.R3, self.R2, initial_angle=(-self.arc-1/2)*np.pi, final_angle=(-1/2+self.arc)*np.pi)
result = gdspy.boolean(ground, coupler, 'or')
core = gdspy.Round((self.center.x,self.center.y), self.R1, inner_radius=0, initial_angle=0, final_angle=2*np.pi)
result = gdspy.boolean(result, core, 'or')
self.JJ_coordinates = coordinates(self.center.x, self.center.y + self.R1)
self.AB1_coordinates = coordinates(self.center.x, self.center.y + self.R4)
self.AB2_coordinates = coordinates(self.center.x, self.center.y - self.outer_ground)
if mode == 'up':
return result
else:
result.mirror([self.center.x, middle_TL], [self.center.x+100, middle_TL])
return result
class Airbridge:
def __init__(self, width, length, padsize, coordinate):
self.x = coordinate.x
self.y = coordinate.y
self.padsize = padsize
self.width = width
self.length = length
def Generate_contacts(self, layer, mode, mirror_x, mirror_y):
#first the two contacts
contact_1 = gdspy.Rectangle((self.x - self.length/2 - self.padsize/2, self.y), (self.x - self.length/2 + self.padsize/2, self.y + self.padsize))
contact_2 = gdspy.Rectangle((self.x + self.length/2 - self.padsize/2, self.y), (self.x + self.length/2 + self.padsize/2, self.y + self.padsize))
#now the bridge itself.
result = gdspy.boolean(contact_1, contact_2, 'or', layer=layer)
if mode == 'down':
result.mirror([mirror_x, mirror_y], [mirror_x + 100, mirror_y])
return result
def Generate_bridge(self, layer, mode, mirror_x, mirror_y):
bridge = gdspy.Rectangle((self.x - self.length/2, self.y + self.padsize/2 - self.width/2), (self.x + self.length/2, self.y + self.padsize/2 + self.width/2))
if mode == 'down':
bridge.mirror([mirror_x, mirror_y], [mirror_x + 100, mirror_y])
return bridge |
# coding: utf-8
# In[29]:
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
# In[30]:
alpha = 0.00
beta = 0.65
scenario='case2'
output_folder = '../output/%s-%.2f-%.2f'%(scenario,alpha,beta)
# In[31]:
n_plans = 16
n_agents = 4000
epos_iterations = 40
# In[32]:
hist = [0]*n_plans
plot_time = 0
plans_folder = os.path.join(output_folder,"t_%d"%plot_time,"traffic")
included = []
for agent in range(n_agents):
agent_plans_file = os.path.join(plans_folder,'agent_%d.plans'%agent)
f = open(agent_plans_file,'r')
if f.readline()[0]!='0':
included.append(agent)
f.close()
selected_plans_file = os.path.join(output_folder,"t_%d"%plot_time,'selected-plans.csv')
with open(selected_plans_file, mode='r') as infile:
reader = csv.reader(infile)
for i in range(epos_iterations):
next(reader)
for rows in reader:
selected_plans = rows[2:]
for agent in included:
hist[int(selected_plans[agent])]+=1
break
# In[33]:
freq = [x*1.0/sum(hist) for x in hist]
print(freq)
print(included)
# In[34]:
color = [x / 255 for x in [255,76,76]]
plt.bar(list(range(n_plans)),freq,width=1,color=color)
plt.xticks(list(range(n_plans)), list(range(n_plans)))
plt.xlabel('Plan index')
plt.ylabel('Frequency')
plt.title('Selected plans: alpha=%.2f, beta=%.2f'%(alpha,beta))
plt.savefig('selected-plans-t_%d-%.2f-%.2f.png'%(plot_time,alpha,beta))
# In[15]:
len(included)
|
import socket
import struct
import sys
import netifaces as ni
def myAddress(interface = 'enp0s3'):#retorna o endereco do script q esta sendo usado
ni.ifaddresses(interface)
ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
return ip
try:
expression = input("Entre com alguma expressao para ser resolvida: ")
eval(expression)
except:
print("Expressao Invalida!")
exit(0)#fecha o programa
multicast_group = ('224.3.29.71', 10000)
# Create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set a timeout so the socket does not block
# indefinitely when trying to receive data.
sock.settimeout(2)#tempo de 2 segundos para receber a resposta do servidor
# Set the time-to-live for messages to 1 so they do not
# go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
# Send data to the multicast group
print('Enviando expressao: {!r}'.format(expression))
sent = sock.sendto(str.encode(expression), multicast_group)
# Look for responses from all recipients
print('Esperando receber resposta...')
while True:
try:
data, server = sock.recvfrom(16)
except socket.timeout:#se o tempo de resposta execeder o esperado
print('timed out, no more responses')
break#conexao eh fechada apos o break
else:
print('Resultado obitido do servidor:%s รฉ :%s '%(server[0],data.decode()))
finally:
print('closing socket')
sock.close()
|
from django.urls import path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('',views.index,name = 'inputIndex'),
path('pdf/',views.html_to_pdf_view,name = 'ipdf'),
path('chart/',views.charts,name = 'charts'),
path('in/',views.datainput,name = 'get_input'),
path('get/',views.data_input,name = 'input'),
] |
#
# Copyright (c) 2010-2016, Fabric Software Inc. All rights reserved.
#
ext.add_cpp_quoted_include('CString.hpp')
ext.add_func('CStringParams', 'const char *', ['char const *', 'char const * const &'])\
.add_test("""
report("CxxCStringParams('value', 'constRef') = " + CxxCStringParams('value', Make_CxxCharConstPtrConstRef('constRef')));
report("CStringParams('value', 'constRef') = " + CStringParams('value', 'constRef'));
""", """
CxxCStringParams('value', 'constRef') = valueconstRef
CStringParams('value', 'constRef') = valueconstRef
""")
ext.add_func('CStringValueReturn', 'char const *')\
.add_test("""
report("CxxCStringValueReturn() = " + CxxCStringValueReturn());
report("CStringValueReturn() = " + CStringValueReturn());
""", """
CxxCStringValueReturn() = value
CStringValueReturn() = value
""")
ext.add_func('CStringConstRefReturn', 'char const * const &')\
.add_test("""
report("CxxCStringConstRefReturn() = " + CxxCStringConstRefReturn());
report("CStringConstRefReturn() = " + CStringConstRefReturn());
""", """
CxxCStringConstRefReturn() = constRef
CStringConstRefReturn() = constRef
""")
|
""" projectconfigdialog
ProjectConfigDialog Class - GUI implementation
Elements of this class are referenced in ProjectConfig
"""
from Tkinter import *
from constants import *
import tkMessageBox
import tkFileDialog
import os
import pickle
import shutil
from tkMessageBox import *
from PIL import Image, ImageTk
class ProjectConfigDialog():
def __init__(self, projconfig):
self.projectconfig = projconfig
self.lastselection = None
self.build_layout()
def remove_site(self):
""" Called when user clicks "Remove Site." Deletes site from the list. """
site = self.SiteStr.get()
oldSites = self.lbSiteList.curselection()
for i in oldSites:
if self.projectconfig.mySites[int(i)]['url'] == site:
self.lbSiteList.delete(i)
del self.projectconfig.mySites[int(i)]
self.look_for_site_field_edit(0)
def load_role_model_list(self):
""" Called once the project config dialog is created at the end of build_layout().
Populates the list of role models from the file"""
self.projectconfig.myRolesList = []
self.lbRoleModels.delete(0, END)
if os.path.isfile(ROLE_FILE_NAME):
roleConfigFile = open(ROLE_FILE_NAME, 'r')
if roleConfigFile != None:
self.projectconfig.myRolesList = pickle.load(roleConfigFile)
for role in self.projectconfig.myRolesList:
self.lbRoleModels.insert(END, role['Name'])
def load_role_model_picture(self):
""" Called when user clicks "Load Picture" from role model window. Prompts user for file
of image and saves it for future use when saving"""
self.imageFile = tkFileDialog.askopenfilename(parent=self.setting, title='Choose a file')
if self.imageFile != None:
shutil.copy(self.imageFile, PICS_DIR)
self.imageFile = os.getcwd() + '/' + PICS_DIR + os.path.basename(self.imageFile)
myPic = ImageTk.PhotoImage(Image.open(self.imageFile))
self.lbRolePrev.config(image=myPic)
self.lbRolePrev.image = myPic
self.top.focus_set()
def remove_role_model(self):
""" Called when user selects "Remove" under role model list. Removes role model from listing"""
selection = self.lbRoleModels.curselection()
if len(selection) != 1:
return
selectedIndex = int(selection[0])
self.lbRoleModels.delete(selectedIndex)
self.projectconfig.myRolesList.pop(selectedIndex)
self.save_role_model_list()
def clear_all_fields(self):
""" Clears all form elements. Done after adding a site when user should no longer
see previous values"""
self.intDetType.set(0)
self.intTimeType.set(0)
self.BreakLengthStr.set('')
self.WaitTimeStr.set('')
self.liTime.select_clear(0, END)
self.lbRoleModels.selection_clear(0, END)
self.look_for_site_field_edit(0)
def edit_role_model_window(self):
""" Called when user wants to edit a previously configured role model. Sets member variables
relating to information about this role model then calls role_model_window(). This function
populate the window fields with these values."""
selectedIndices = self.lbRoleModels.curselection()
if len(selectedIndices) != 1:
return
roleModel = self.projectconfig.myRolesList[int(selectedIndices[0])]
self.imageName = roleModel['Name']
self.imageText = roleModel['QuotesList']
self.imageFile = roleModel['ImagePath']
self.role_model_window()
def poll(self):
""" Checks every 200ms to see if the site field has changed. This is used to implement the
'edit site' function. When the site field is a site already listed, the add site button changes
to 'edit site.' If they start changing the field after this, we want to change the button back
to saying "add site." """
currentselection = self.lbSiteList.curselection()
if currentselection != self.lastselection or self.clickwaiting:
self.lastselection = currentselection
self.list_selection_changed(currentselection)
self.set_click_waiting(0)
self.lbSiteList.after(200, self.poll)
def set_click_waiting(self, num):
""" Used to help with 'edit site' functionality. Called when user clicks in site listbox. """
self.clickwaiting = num
def look_for_site_field_edit(self, setnow):
""" When the user clicks on a site, we call this to change the button to say 'edit site'
When the field changes after that, we check if this still makes sense. If not, we switch
back to "add site" """
if setnow:
for site in self.projectconfig.mySites:
if self.SiteStr.get() == site['url']:
self.bSite.config(text="Edit Site")
return
self.bSite.config(text="Add Site")
def list_selection_changed(self, selection):
""" When the user clicks on a value in the site list, we should populate the form values
with those related to this site policy"""
if len(selection) < 1:
return
self.BreakLengthStr.set('')
self.WaitTimeStr.set('')
configobj = self.projectconfig.mySites[int(selection[0])]
self.liTime.select_clear(0, END)
self.SiteStr.set(configobj['url'])
# Set block config piece of site policy
blockmethod = configobj['BlockConfig']['Method']
self.rbTimeRadios[blockmethod].select()
if (blockmethod == TIME_TYPE_ALLOW_BREAKS):
self.BreakLengthStr.set(str(configobj['BlockConfig']['BreakLength']))
self.WaitTimeStr.set(str(configobj['BlockConfig']['TimeBetweenBreaks']))
elif (blockmethod == TIME_TYPE_BLOCK_SCHEDULING):
breaks = configobj['BlockConfig']['AllowedTime']
for allowedtime in breaks:
self.liTime.selection_set(allowedtime)
# Set deterrent piece of site policy
deterrentmethod = configobj['Deterrents']['Method']
self.intDetType.set(deterrentmethod)
self.lbRoleModels.select_clear(0, END)
if deterrentmethod == DET_TYPE_ROLES:
roleName = configobj['Deterrents']['RoleModelName']
for i in range(0, len(self.projectconfig.myRolesList)):
if self.projectconfig.myRolesList[i]['Name'] == roleName:
self.lbRoleModels.selection_set(i)
self.look_for_site_field_edit(1)
def save_role_model_list(self):
""" Save the current list of role models and their corresponding configurations
to the disk for future use"""
fileOpen = open(ROLE_FILE_NAME, 'wb')
if fileOpen != None:
pickle.dump(self.projectconfig.myRolesList, fileOpen)
fileOpen.close()
def add_role_model_window(self):
""" Open role model window. Because form fields are populated with imageText, imageFile,
and imageName members, clear these first"""
self.imageText = []
self.imageFile = ""
self.imageName = ""
self.role_model_window()
def save_role_model(self):
""" Called when committing a role model. Save the configuration internally and make
entry in list if it's not already there"""
if askyesno("Commit changes?", "Are you sure you want to commit changes to this role model?"):
deterrentconfig = {}
deterrentconfig['Name'] = self.eRoleName.get()
deterrentconfig['ImagePath'] = self.imageFile
deterrentconfig['QuotesList'] = self.tbQuotes.get(1.0, END).rstrip().split('\n')
found = 0
for i in range(0, len(self.projectconfig.myRolesList)):
if deterrentconfig['Name'] == self.projectconfig.myRolesList[i]['Name']:
self.projectconfig.myRolesList[i] = deterrentconfig
found = 1
break
if not found:
self.projectconfig.myRolesList.append(deterrentconfig)
self.lbRoleModels.insert(END, deterrentconfig['Name'])
self.save_role_model_list()
self.top.destroy()
def role_model_window(self):
""" Mostly gui implementation of role model window. We populate the name, text, and picture
if this is an edit -- in that case the 3 corresponding members are set to non blank values"""
self.top = Toplevel(self.setting)
lbRoleMo = Label(self.top, text="Role Model")
lbRoleMo.grid(row=0, column=1)
fRoleName = Frame(self.top)
lbRoleName = Label(fRoleName, text="Name")
lbRoleName.grid(row=0, column=0)
self.eRoleName = Entry(fRoleName)
self.eRoleName.grid(row=0, column=1)
self.eRoleName.insert(END, self.imageName)
fRoleName.grid(row=1, column=0, sticky=W)
lbPreview = Label(self.top, text="Preview")
lbPreview.grid(row=2, column=0, sticky=W)
self.lbRolePrev = Label(self.top)
self.lbRolePrev.grid(row=2, column=0, sticky=W)
try:
if self.imageFile != "":
imageLab = ImageTk.PhotoImage(Image.open(self.imageFile))
self.lbRolePrev.config(image=imageLab)
self.lbRolePrev.image = imageLab
except:
showerror("Error loading image", "Could not load the image: " + self.imageFile)
bSetPicture = Button(self.top, text="Select Picture", command=self.load_role_model_picture)
bSetPicture.grid(row=4, column=2)
lbQuotes = Label(self.top, text="Quotes")
lbQuotes.grid(row=4, column=0, sticky=W)
self.tbQuotes = Text(self.top)
self.tbQuotes.grid(row=5, column=0)
for i in self.imageText:
j = i + "\n"
self.tbQuotes.insert(END, j)
but = Button(self.top, text="commit", command=self.save_role_model)
but.grid(row=6, column=0)
def build_layout(self):
""" GUI implementation of the main project config dialog"""
self.setting = Toplevel(self.projectconfig.projects_dialog)
menubar = Menu(self.setting)
# create a pulldown menu, and add it to the menu bar
filemenu = Menu(menubar, tearoff=0)
filemenu.add_separator()
# Placeholder for Help menu
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=lambda: showinfo("Pineapple!", "Pineapple is a filtering program for\n people with short attention spans"))
menubar.add_cascade(label="Help", menu=helpmenu)
# display the menu
self.setting.config(menu=menubar)
# create the website list
fWebsites = Frame(self.setting)
frame = Frame(fWebsites)
frame.grid(row=2, column=0)
laWeb = Label(fWebsites, text="Websites")
laWeb.grid(row=0, column=0)
self.clickwaiting = 0
self.SiteStr = StringVar()
entSite = Entry(frame, textvariable=self.SiteStr)
entSite.grid(row=0, column=1)
entSite.bind('<Key>', lambda e: self.look_for_site_field_edit(0))
laSite = Label(frame, text="Address")
laSite.grid(row=0, column=0)
self.bSite = Button(frame, text="Add Site", command=self.projectconfig.add_site)
self.bSite.grid(row=0, column=2)
self.lbSiteList = Listbox(fWebsites, selectmode=EXTENDED, exportselection=0)
self.lbSiteList.grid(row=3, column=0)
self.lbSiteList.bind('<Button-1>', lambda e: self.set_click_waiting(1))
bRemoveSite = Button(fWebsites, text="Remove Site", command=self.remove_site)
bRemoveSite.grid(row=4, column=0)
self.poll() # Wait for user to click on site in list -- populate right side accordingly
# Blacklist / Whitelist
self.intListType = IntVar(value=1)
fWebsites.grid(row=3, column=0)
# Blocking Method
fBlock = Frame(self.setting)
lTimeSection = Label(fBlock, text="Blocking Method")
lTimeSection.grid(row=1, column=4)
fTimeList = Frame(fBlock)
fTimeList.grid(row=3, column=4, padx=15)
self.intTimeType = IntVar()
self.rbTimeRadios = []
self.rbTimeRadios.append(Radiobutton(fTimeList, text="Deter Once", variable=self.intTimeType, value=TIME_TYPE_DENY_ALWAYS))
self.rbTimeRadios.append(Radiobutton(fTimeList, text="Allow Breaks", variable=self.intTimeType, value=TIME_TYPE_ALLOW_BREAKS))
self.rbTimeRadios.append(Radiobutton(fTimeList, text="Block Scheduling", variable=self.intTimeType, value=TIME_TYPE_BLOCK_SCHEDULING))
self.rbTimeRadios[0].grid(row=0, column=0, sticky=W)
self.rbTimeRadios[1].grid(row=1, column=0, sticky=W)
self.rbTimeRadios[2].grid(row=4, column=0, sticky=W)
# Blocking Method / Allow Breaks
fBreakFrame = Frame(fTimeList)
lbBreakLength = Label(fBreakFrame, text="Break Length")
lbBreakLength.grid(row=2, column=0, sticky=E, padx=2)
self.BreakLengthStr = StringVar()
entBreakLength = Entry(fBreakFrame, textvariable=self.BreakLengthStr)
entBreakLength.grid(row=2, column=1)
lbWaitTime = Label(fBreakFrame, text="Time Between Breaks")
lbWaitTime.grid(row=3, column=0, sticky=E, padx=2)
self.WaitTimeStr = StringVar()
entWaitTime = Entry(fBreakFrame, textvariable=self.WaitTimeStr)
entWaitTime.grid(row=3, column=1)
fBreakFrame.grid(row=3, column=0, sticky=E, padx=20)
# Blocking Method / Block Scheduling
fTimeScroll = Frame(fTimeList)
fTimeScroll.grid(row=6, column=0)
scTime = Scrollbar(fTimeScroll, orient=VERTICAL)
self.liTime = Listbox(fTimeScroll, selectmode=EXTENDED, yscrollcommand=scTime.set, exportselection=0)
self.liTime.grid(row=0, column=0, sticky=N + S, rowspan=1)
scTime.config(command=self.liTime.yview)
scTime.grid(row=0, column=1, rowspan=1, sticky=N + S)
for item in ["12 am", "1 am", "2 am", "3 am", "4 am", "5 am", "6 am",
"7 am", "8 am", "9 am", "10 am", "11 am", "12 pm", "1 pm", "2 pm",
"3 pm", "4 pm", "5 pm", "6 pm", "7 pm", "8 pm", "9 pm", "10 pm",
"11 pm"]:
self.liTime.insert(END, item)
fBlock.grid(row=3, column=4)
#Deterrents
fDeterrents = Frame(self.setting)
self.intDetType = IntVar()
lbDet = Label(fDeterrents, text="Deterrents")
lSpace = Label(fDeterrents, text="")
lSpace.grid(row=0, column=0)
lbDet.grid(row=1, column=6)
fDets = Frame(fDeterrents)
rbDetRadios = []
rbDetRadios.append(Radiobutton(fDets, text="Only Block", variable=self.intDetType, value=DET_TYPE_DENY))
rbDetRadios.append(Radiobutton(fDets, text="Type Deterrent", variable=self.intDetType, value=DET_TYPE_TYPE))
rbDetRadios.append(Radiobutton(fDets, text="Explain Value", variable=self.intDetType, value=DET_TYPE_EXPLAIN))
rbDetRadios.append(Radiobutton(fDets, text="Role Models", variable=self.intDetType, value=DET_TYPE_ROLES))
rbDetRadios[0].grid(row=0, column=0, sticky=W)
rbDetRadios[1].grid(row=1, column=0, sticky=W)
rbDetRadios[2].grid(row=2, column=0, sticky=W)
rbDetRadios[3].grid(row=3, column=0, sticky=W)
self.lbRoleModels = Listbox(fDets, exportselection=0)
self.lbRoleModels.grid(row=4, column=0)
self.lbRoleModels.bind("<Double-Button-1>", lambda e: self.edit_role_model_window())
roleFrame = Frame(fDets)
bAddWindow = Button(roleFrame, text="Add New")
bAddWindow.config(command=self.add_role_model_window)
bAddWindow.grid(row=0, column=0)
bEditWindow = Button(roleFrame, text="Edit Selection")
bEditWindow.config(command=self.edit_role_model_window)
bEditWindow.grid(row=0, column=1)
bDelete = Button(roleFrame, text="Delete Role Model")
bDelete.config(command=self.remove_role_model)
bDelete.grid(row=0, column=2)
roleFrame.grid(row=5, column=0)
fDets.grid(row=3, column=6)
fDeterrents.grid(row=3, column=6)
self.load_role_model_list()
fProjInfo = Frame(self.setting)
lProjName = Label(fProjInfo, text="Project Name")
self.eProjName = Entry(fProjInfo)
bProjCommit = Button(fProjInfo, text="Commit Changes", command=self.projectconfig.save_project)
lProjName.grid(row=0, column=0)
self.eProjName.grid(row=0, column=1)
bProjCommit.grid(row=0, column=2,)
fProjInfo.grid(row=0, column=4)
self.setting.title("Project Configuration")
|
from typing import Callable
class SegmentTree:
def __init__(self, arr, operator: Callable, ide_ele):
"""
arr: ๅ
ใฎ้
ๅ
operator: ้ขๆฐ
ide_ele: ๅไฝๅ
"""
n = len(arr)
num = 1 << (n - 1).bit_length() # nไปฅไธใฎๆๅฐใฎ2ในใ
# bit_lengthใฎ็นฐใไธใใใจๆๅฐใฎ2ในใใฎๆดๆฐใ1ใใใฆใใ
self.offset = num # ๅ
ใฎ0-indexedใฎ้
ๅใจใปใฐๆจใฎ้
ๅ(1-indexed)ใฎ่ใฎใคใณใใใฏในใฎๅทฎ
self.ide_ele = ide_ele
self.operator = operator
self.tree = [ide_ele] * (self.offset << 1)
for i in range(n):
self.tree[i + self.offset] = arr[i]
# ๆง็ฏ
for i in range(self.offset - 1, 0, -1):
self.tree[i] = self.operator(self.tree[2 * i], self.tree[2 * i + 1])
def update(self, k, x):
"""
ๅ
ใฎ้
ๅใฎindexใkใฎๅคใxใซๆดๆฐ
"""
k += self.offset
self.tree[k] = x
while k > 1: # ่ฆชใๅญๅจใใใชใ
# ๅถๆฐindexใๅทฆ
if k % 2 == 1:
self.tree[k >> 1] = self.operator(self.tree[k ^ 1], self.tree[k])
else:
self.tree[k >> 1] = self.operator(self.tree[k], self.tree[k ^ 1])
k >>= 1
def get(self, i):
return self.tree[i + self.offset]
def query(self, l, r):
"""
ๅ้ๅบ้[l,r)ใฎoperatorใซใใ่จ็ฎ็ตๆใๅพใ
"""
l += self.offset
r += self.offset
lret = self.ide_ele
rret = self.ide_ele
while l < r:
if l & 1:
lret = self.operator(lret, self.tree[l])
l += 1
if r & 1:
rret = self.operator(self.tree[r - 1], rret)
l >>= 1 # ่ฆชใฎ้ๅฑคใธ็งปๅ
r >>= 1
return self.operator(lret, rret)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 10:13:25 2018
@author: NTPU
"""
form time import sleep
form mcpi.minecraft import Minecraft
thomas = Minecraft.create()
block=[46+,46]
r = choice(block)
myID = thomas.getPlayerEntityId("Thomas0217")
x,y,z=thomas.entity.getTilePos(myID)
thomas.setBlock(x,y,z,r) |
# Generated by Django 2.2.11 on 2020-03-22 20:02
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('contentPages', '0007_delete_resourceitempreview'),
]
operations = [
migrations.AlterField(
model_name='resourceitempage',
name='description',
field=wagtail.core.fields.RichTextField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='resourceitempage',
name='file_size',
field=models.CharField(blank=True, default='', max_length=256, null=True),
),
migrations.AlterField(
model_name='resourceitempage',
name='format',
field=models.CharField(blank=True, default='', max_length=256, null=True),
),
migrations.AlterField(
model_name='resourceitempage',
name='product_code',
field=models.CharField(blank=True, default='', max_length=256, null=True),
),
]
|
#!/usr/bin/env python3
import serial
import re
import time
if __name__ == '__main__':
light_control_state = 'off'
serial_com = serial.Serial('/dev/ttyUSB0', 9600, timeout=1)
serial_com.flush()
while True:
if light_control_state == 'on':
light_control_state = 'off'
if light_control_state == 'off':
light_control_state = 'on'
#light control
serial_com.write((light_control_state + '\n').encode())
time.sleep(5) |
from PIL import Image
strip = Image.open("2019-01-19.gif")
strip.load()
strip.show()
width, height = strip.size # Get dimensions
panelW = 360
midX = width/2
cpyRt = 20
left0 = midX - panelW/2 - cpyRt
for i in range(3):
left = left0 + (panelW + cpyRt) * i
top = 0
right = left + panelW + cpyRt
right = min(right, width)
bottom = height
panel = strip.crop((left, top, right, bottom))
panel.show()
#Display Image
#Applying Grayscale filter to image
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Saving filtered image to new file
#cv2.imwrite('graytest.jpg',img)
|
# -*- coding: utf-8 -*-
# Control keys
#
# NOTE: these Control key definitions are intended only to provide
# mnemonic names for the ASCII control codes. They cannot be used
# to define menu hotkeys, etc., which require scan codes.
kbCtrlA = 0x0001
kbCtrlB = 0x0002
kbCtrlC = 0x0003
kbCtrlD = 0x0004
kbCtrlE = 0x0005
kbCtrlF = 0x0006
kbCtrlG = 0x0007
kbCtrlH = 0x0008
kbCtrlI = 0x0009
kbCtrlJ = 0x000a
kbCtrlK = 0x000b
kbCtrlL = 0x000c
kbCtrlM = 0x000d
kbCtrlN = 0x000e
kbCtrlO = 0x000f
kbCtrlP = 0x0010
kbCtrlQ = 0x0011
kbCtrlR = 0x0012
kbCtrlS = 0x0013
kbCtrlT = 0x0014
kbCtrlU = 0x0015
kbCtrlV = 0x0016
kbCtrlW = 0x0017
kbCtrlX = 0x0018
kbCtrlY = 0x0019
kbCtrlZ = 0x001a
# Extended key codes
kbEsc = 0x011b
kbAltSpace = 0x0200
kbCtrlIns = 0x0400
kbShiftIns = 0x0500
kbCtrlDel = 0x0600
kbShiftDel = 0x0700
kbBackSpace = 0x0e08
kbCtrlBackSpace = 0x0e7f
kbShiftTab = 0x0f00
kbTab = 0x0f09
kbCtrlTab = 0x0f0a
kbAltQ = 0x1000
kbAltW = 0x1100
kbAltE = 0x1200
kbAltR = 0x1300
kbAltT = 0x1400
kbAltY = 0x1500
kbAltU = 0x1600
kbAltI = 0x1700
kbAltO = 0x1800
kbAltP = 0x1900
kbCtrlEnter = 0x1c0a
kbEnter = 0x1c0d
kbAltA = 0x1e00
kbAltS = 0x1f00
kbAltD = 0x2000
kbAltF = 0x2100
kbAltG = 0x2200
kbAltH = 0x2300
kbAltJ = 0x2400
kbAltK = 0x2500
kbAltL = 0x2600
kbAltZ = 0x2c00
kbAltX = 0x2d00
kbAltC = 0x2e00
kbAltV = 0x2f00
kbAltB = 0x3000
kbAltN = 0x3100
kbAltM = 0x3200
kbF1 = 0x3b00
kbF2 = 0x3c00
kbF3 = 0x3d00
kbF4 = 0x3e00
kbF5 = 0x3f00
kbF6 = 0x4000
kbF7 = 0x4100
kbF8 = 0x4200
kbF9 = 0x4300
kbF10 = 0x4400
kbF11 = 0x4500
kbF12 = 0x4600
kbHome = 0x4700
kbUp = 0x4800
kbShiftUp = 0X4801
kbCtrlUp = 0x4810
kbPgUp = 0x4900
kbGrayMinus = 0x4a2d
kbLeft = 0x4b00
kbShiftLeft = 0x4b01
kbRight = 0x4d00
kbShiftRight = 0x4d01
kbGrayPlus = 0x4e2b
kbEnd = 0x4f00
kbDown = 0x5000
kbShiftDown = 0x5001
kbCtrlDown = 0x5010
kbPgDn = 0x5100
kbIns = 0x5200
kbDel = 0x5300
kbShiftF1 = 0x5400
kbShiftF2 = 0x5500
kbShiftF3 = 0x5600
kbShiftF4 = 0x5700
kbShiftF5 = 0x5800
kbShiftF6 = 0x5900
kbShiftF7 = 0x5a00
kbShiftF8 = 0x5b00
kbShiftF9 = 0x5c00
kbShiftF10 = 0x5d00
kbShiftF11 = 0x5d10
kbShiftF12 = 0x5d20
kbCtrlF1 = 0x5e00
kbCtrlF2 = 0x5f00
kbCtrlF3 = 0x6000
kbCtrlF4 = 0x6100
kbCtrlF5 = 0x6200
kbCtrlF6 = 0x6300
kbCtrlF7 = 0x6400
kbCtrlF8 = 0x6500
kbCtrlF9 = 0x6600
kbCtrlF10 = 0x6700
kbCtrlF11 = 0x6710
kbCtrlF12 = 0x6720
kbAltF1 = 0x6800
kbAltF2 = 0x6900
kbAltF3 = 0x6a00
kbAltF4 = 0x6b00
kbAltF5 = 0x6c00
kbAltF6 = 0x6d00
kbAltF7 = 0x6e00
kbAltF8 = 0x6f00
kbAltF9 = 0x7000
kbAltF10 = 0x7100
kbAltF11 = 0x7110
kbAltF12 = 0x7120
kbCtrlPrtSc = 0x7200
kbCtrlLeft = 0x7300
kbCtrlRight = 0x7400
kbCtrlEnd = 0x7500
kbShiftEnd = 0x7501
kbCtrlPgDn = 0x7600
kbCtrlHome = 0x7700
kbShiftHome = 0x7701
kbAlt1 = 0x7800
kbAlt2 = 0x7900
kbAlt3 = 0x7a00
kbAlt4 = 0x7b00
kbAlt5 = 0x7c00
kbAlt6 = 0x7d00
kbAlt7 = 0x7e00
kbAlt8 = 0x7f00
kbAlt9 = 0x8000
kbAlt0 = 0x8100
kbAltMinus = 0x8200
kbAltEqual = 0x8300
kbCtrlPgUp = 0x8400
kbAltBackSpace = 0x0800
kbNoKey = 0x0000
# Keyboard state and shift masks
kbLeftShift = 0x0001
kbRightShift = 0x0002
kbShift = kbLeftShift | kbRightShift
kbLeftCtrl = 0x0004
kbRightCtrl = 0x0004
kbCtrlShift = kbLeftCtrl | kbRightCtrl
kbLeftAlt = 0x0008
kbRightAlt = 0x0008
kbAltShift = kbLeftAlt | kbRightAlt
kbScrollState = 0x0010
kbNumState = 0x0020
kbCapsState = 0x0040
kbInsState = 0x0080
|
#-*- coding:UTF-8 -*-
from unittest import TestCase
import unittest
from selenium import webdriver
from helpers.supportPage import supportPage
from helpers.championsPage import championsPage
class SeleniumTest(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.supportPage = supportPage(self.driver)
self.championsPage = championsPage(self.driver)
self.driver.implicitly_wait(10)
def tearDown(self):
self.driver.close()
#ะะพะธัะบ ััะฐัะตะน
def test_search_support(self):
self.supportPage.open()
self.supportPage.sup_search_bar().search('Ranked')
self.assertEqual(u"ะะพะผะฐะฝะดะฝัะต ัะฐะฝะณะพะฒัะต ะธะณัั - ะฟะตัะฒัะต ัะฐะณะธ", self.driver.find_element_by_link_text(u'ะะพะผะฐะฝะดะฝัะต ัะฐะฝะณะพะฒัะต ะธะณัั - ะฟะตัะฒัะต ัะฐะณะธ').text)
#ะขะตัั ะฐะฒัะพัะธะทะฐัะธะธ
def test_login_support(self):
self.supportPage.open()
self.page.support().clickLogin()
self.page.sup_login().login('deathtotti@yandex.ru', '******')
self.assertEqual(u"ะะธั
ะฐะธะป", self.driver.find_element_by_id("top-right-name").text)
#ะขะตัั ัะฐะทะปะพะณะธะฝะธะฒะฐะฝะธั
def test_logout_support(self):
self.supportPage.open()
self.page.support().clickLogin()
self.page.sup_login().login('deathtotti@yandex.ru', '******')
self.page.sup_logout().logout()
self.assertEqual(u"ะั ะทะฐะฒะตััะธะปะธ ัะตะฐะฝั ัะฐะฑะพัั.", self.driver.find_element_by_id("notice").text)
#ะขะตัั ัะธะปัััะฐ ัะตะผะฟะธะพะฝะพะฒ
def test_champions_filter(self):
self.page.championsPage().open()
self.page.champ_filter().check()
self.assertEqual(u"ะััะพะบั", self.driver.find_element_by_link_text(u'ะััะพะบั').text)
#ะขะตัั ะฟะพะธัะบะฐ ัะตะผะฟะธะพะฝะพะฒ
def test_champions_search(self):
self.page.championsPage().open()
self.page.champ_search().search(u'ะั')
self.assertEqual(u"ะัะบะพะฝะณ", self.driver.find_element_by_link_text(u'ะัะบะพะฝะณ').text)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
import logging
import pika
import json
import os
import couchdb
UPDATED_TWEET_QUEUE = 'updated_tweets'
COUCHDB_TWEET_DATABASE = 'tweets'
logging.basicConfig(level=logging.DEBUG)
def do_consume(ch, method, properties, body):
"""Consume a tweet from RabbitMQ."""
message = json.loads(body)
logging.info(f'Consumed tweet: {message}')
# Only save the tweet in the DB if we don't have it already
message_id = message.pop('id')
if message_id not in db:
db[message_id] = message
logging.info(f'Added tweet {message_id} to the database.')
else:
logging.info(f'Tweet {message_id} is already in the database. '
f'Skipping.')
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == '__main__':
# RabbitMQ Setup
credentials = pika.PlainCredentials(os.environ['RABBITMQ_USER'],
os.environ['RABBITMQ_PASS'])
parameters = pika.ConnectionParameters(host='rabbitmq',
credentials=credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue=UPDATED_TWEET_QUEUE)
# CouchDB Setup
couch = couchdb.Server('http://couchdb:5984/')
couch.resource.credentials = (os.environ['COUCHDB_USER'],
os.environ['COUCHDB_PASSWORD'])
if COUCHDB_TWEET_DATABASE in couch:
db = couch[COUCHDB_TWEET_DATABASE]
else:
db = couch.create(COUCHDB_TWEET_DATABASE)
channel.basic_qos(prefetch_count=3)
channel.basic_consume(do_consume, queue=UPDATED_TWEET_QUEUE)
channel.start_consuming()
|
#!/usr/bin/env python3
"""sum_double
Given two int values, return their sum.
Unless the two values are the same, then return double their sum.
sum_double(1, 2) โ 3
sum_double(3, 2) โ 5
sum_double(2, 2) โ 8
source: https://codingbat.com/prob/p141905
"""
def sum_double(a: int, b: int) -> int:
"""Sum Double.
Return the sum or if a == b return double the sum.
"""
multiply = 1
if a == b:
multiply += 1
return (a + b) * multiply
if __name__ == "__main__":
print(sum_double(1, 2))
print(sum_double(3, 2))
print(sum_double(2, 2))
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 06:20:42 2019
@author: berna
"""
#Grafico dell'angolo per la diffusione con 1 riflessione interna
import numpy as np
from matplotlib import pyplot as plt
n=4./3
deg = 180./np.pi
x = np.linspace(0.083, 1.8, 200)
def diffusion(x):
return (4*np.arcsin(np.sin(x)/n) -2*x)*deg
y=diffusion(x)
#Plot
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig, ax = plt.subplots()
ax.set_ylabel('Angolo di diffusione $\delta_1$ $[\deg]$')
ax.set_xlabel(r'Angolo di incidenza $\i_1$ $[\deg]$', x=0.84)
ax.grid(color = 'gray', linestyle = '--', alpha=0.7)
ax.plot(x*deg, y, '-', label='$\delta_1$', zorder =10)
ax.plot(1.037*deg, np.max(diffusion(x)),'o',ms=4., label='Massimo')
ax.xaxis.set_major_locator(plt.MultipleLocator(10))
ax.xaxis.set_minor_locator(plt.MultipleLocator(2))
ax.yaxis.set_major_locator(plt.MultipleLocator(10))
ax.yaxis.set_minor_locator(plt.MultipleLocator(2))
ax.tick_params(direction='in', length=5, width=1., top=True, right=True)
ax.tick_params(which='minor', direction='in', width=1., top=True, right=True)
legend = ax.legend(loc ='best')
plt.show() |
import os
from warnings import warn
from __main__ import ctk
from __main__ import qt
from __main__ import slicer
from __main__ import vtk
from . import __slicer_module__, postgresDatabase
try:
import ConfigParser as cParser
import logging
import logging.handlers
except ImportError:
print "External modules not found!"
raise ImportError
class DerivedImageQALogic(object):
""" Logic class to be used 'under the hood' of the evaluator """
def __init__(self, widget, test=False):
self.widget = widget
self.logging = self.widget.logging
self.regions = self.widget.regions
self.images = self.widget.images
self.qaValueMap = {'good':'1', 'bad':'0', 'follow up':'-1'}
self.user_id = None
self.database = None
self.config = None
self.batchSize = 1
self.batchRows = None
self.count = 0 # Starting value
self.maxCount = 0
self.currentSession = None
self.currentValues = (None,) * len(self.images + self.regions)
self.sessionFiles = {}
self.testing = test
if self.testing:
self.logging.info("TESTING is ON")
self.setup()
def setup(self):
print "setup()"
config = cParser.SafeConfigParser()
self.config = cParser.SafeConfigParser()
logicConfig = os.path.abspath(os.environ['QA_MODULE_CONFIG'])
self.logging.info("Setting module configuration to %s", logicConfig)
if self.testing:
databaseConfig = os.path.join(__slicer_module__, 'testdatabase.cfg')
self.user_id = 'user1'
self.logging.info("TESTING: Setting database user to %s", self.user_id)
else:
databaseConfig = os.path.abspath(os.environ['QA_DB_CONFIG'])
self.logging.info("Setting database configuration to %s", databaseConfig)
self.user_id = os.environ['USER']
self.logging.info("logic.py: Setting database user to %s", self.user_id)
for configFile in [databaseConfig, logicConfig]:
if not os.path.exists(configFile):
raise IOError("File {0} not found!".format(configFile))
config.read(databaseConfig)
host = config.get('Postgres', 'Host')
port = config.getint('Postgres', 'Port')
database = config.get('Postgres', 'Database')
db_user = config.get('Postgres', 'User')
password = config.get('Postgres', 'Password')
## TODO: Use secure password handling (see RunSynchronization.py in phdxnat project)
# import hashlib as md5
# md5Password = md5.new(password)
### HACK
if not self.testing:
self.database = postgresDatabase(host, port, db_user, database, password,
self.user_id, self.batchSize)
### END HACK
self.config.read(logicConfig)
self.logging.info("logic.py: Reading logic configuration from %s", logicConfig)
def selectRegion(self, buttonName):
""" Load the outline of the selected region into the scene
"""
self.logging.debug("call")
nodeName = self.constructLabelNodeName(buttonName)
if nodeName == '':
return -1
labelNode = slicer.util.getNode(nodeName)
if labelNode.GetLabelMap():
compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*')
for compositeNode in compositeNodes.values():
compositeNode.SetLabelVolumeID(labelNode.GetID())
compositeNode.SetLabelOpacity(1.0)
# Set the label outline to ON
sliceNodes = slicer.util.getNodes('vtkMRMLSliceNode*')
for sliceNode in sliceNodes.values():
sliceNode.UseLabelOutlineOn()
else:
self.loadBackgroundNodeToMRMLScene(labelNode)
def constructLabelNodeName(self, buttonName):
""" Create the names for the volume and label nodes """
self.logging.debug("call")
if not self.currentSession is None:
nodeName = '_'.join([self.currentSession, buttonName])
return nodeName
return ''
def onCancelButtonClicked(self):
self.logging.debug("call")
# TODO: Populate this function
# onNextButtonClicked WITHOUT the write to database
self.logging.info("Cancel button clicked!")
def writeToDatabase(self, evaluations):
self.logging.debug("call")
if self.testing:
recordID = str(self.batchRows[self.count]['record_id'])
else:
recordID = self.batchRows[self.count][0]
values = (recordID,) + evaluations
try:
if self.testing:
self.database.writeAndUnlockRecord(values)
else:
self.database.writeReview(values)
self.database.unlockRecord('R', recordID)
except:
# TODO: Prompt user with popup
self.logging.error("Error writing to database for record %d", recordID)
raise
def _getLabelFileNameFromRegion(self, regionName):
self.logging.debug("call")
try:
region, side = regionName.split('_')
fileName = '_'.join([side[0], region.capitalize(), 'seg.nii.gz'])
except ValueError:
region = regionName
fileName = '_'.join([region, 'seg.nii.gz'])
return fileName
def onGetBatchFilesClicked(self):
""" """
self.logging.debug("call")
self.count = 0
self.batchRows = self.database.lockAndReadRecords()
self.maxCount = len(self.batchRows)
self.constructFilePaths()
self.setCurrentSession()
self.loadData()
reviewColumnsCount = 8
print self.batchRows[self.count]
if len(self.batchRows[self.count]) > reviewColumnsCount:
# roboRater has done this already
self.currentReviewValues = self.batchRows[self.count][reviewColumnsCount:]
else:
self.currentReviewValues = []
def setCurrentSession(self):
self.logging.debug("call")
self.currentSession = self.sessionFiles['session']
self.widget.currentSession = self.currentSession
def _all_Labels_seg(self, oldfilename, nodeName, level, session):
"""
From PREDICTIMG-2335: Derived Images QA has been loading the individual segmentations in the
CleanedDenoisedRFSegmentations folder, not the combined all_Labels_seg file which has the final segmentations after
competition. Load the correct labels from all_Labels_seg.nii.gz and have the corresponding labels display for each label
choice in the module.
"""
print "_all_Labels_seg()"
import numpy
allLabelName = 'allLabels_seg_{0}'.format(session)
labelNode = slicer.util.getNode(allLabelName)
if labelNode is None:
labelNode = self.loadLabelVolume(allLabelName, oldfilename)
la = slicer.util.array(labelNode.GetID())
outputLabelNode = slicer.modules.volumes.logic().CloneVolume(slicer.mrmlScene, labelNode, nodeName)
ma = slicer.util.array(outputLabelNode.GetID())
mask = numpy.ndarray.copy(la)
mask[mask != level] = 0
mask[mask == level] = 1
ma[:] = mask
outputLabelNode.GetImageData().Modified()
def constructFilePaths(self):
"""
>>> import DerivedImagesQA as diqa
External modules not found!
/Volumes/scratch/welchdm/src/Slicer-extensions/SlicerQAExtension
External modules not found!
>>> test = diqa.DerivedImageQAWidget(None, True)
Testing logic is ON
>>> test.logic.count = 0 ### HACK
>>> test.logic.batchRows = [['rid','exp', 'site', 'sbj', 'ses', 'loc']] ### HACK
>>> test.logic.constructFilePaths()
Test: loc/exp/site/sbj/ses/TissueClassify/t1_average_BRAINSABC.nii.gz
File not found for file: t2_average
Skipping session...
Test: loc/exp/site/sbj/ses/TissueClassify/t1_average_BRAINSABC.nii.gz
File not found for file: t1_average
Skipping session...
Test: loc/exp/site/sbj/ses/TissueClassify/fixed_brainlabels_seg.nii.gz
File not found for file: labels_tissue
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_caudate_seg.nii.gz
File not found for file: caudate_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_caudate_seg.nii.gz
File not found for file: caudate_right
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_accumben_seg.nii.gz
File not found for file: accumben_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_accumben_seg.nii.gz
File not found for file: accumben_right
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_putamen_seg.nii.gz
File not found for file: putamen_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_putamen_seg.nii.gz
File not found for file: putamen_right
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_globus_seg.nii.gz
File not found for file: globus_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_globus_seg.nii.gz
File not found for file: globus_right
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_thalamus_seg.nii.gz
File not found for file: thalamus_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_thalamus_seg.nii.gz
File not found for file: thalamus_right
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/l_hippocampus_seg.nii.gz
File not found for file: hippocampus_left
Skipping session...
Test: loc/exp/site/sbj/ses/DenoisedRFSegmentations/r_hippocampus_seg.nii.gz
File not found for file: hippocampus_right
Skipping session...
"""
self.logging.debug("call")
row = self.batchRows[self.count]
sessionFiles = {}
# Due to a poor choice in our database creation, the 'location' column is the 6th, NOT the 2nd
baseDirectory = os.path.join(row[5], row[1], row[2], row[3], row[4])
sessionFiles['session'] = row[4]
sessionFiles['record_id'] = row[0]
for image in self.images + self.regions:
imageDirs = eval(self.config.get(image, 'directories'))
imageFiles = eval(self.config.get(image, 'filenames'))
for _dir in imageDirs:
for _file in imageFiles:
temp = os.path.join(baseDirectory, _dir, _file)
if self.testing:
print "**** Test: ", temp
if os.path.exists(temp):
sessionFiles[image] = temp
break; break
elif image == 't2_average': # Assume this is a T1-only session
sessionFiles[image] = os.path.join(__slicer_module__, 'Resources', 'images', 'emptyImage.nii.gz')
break; break
else:
sessionFiles[image] = None
print "**** File not found: %s" % temp
if sessionFiles[image] is None:
self.logging.info("Skipping session %s", sessionFiles['session'])
# raise IOError("File not found!\nFile: %s" % sessionFiles[image])
if not self.testing:
self.database.unlockRecord('M', sessionFiles['record_id'])
self.logging.debug("image = %s", image)
break
if None in sessionFiles.values():
self.logging.debug("'None' value in sessionFiles - recursive call initiated")
self.onGetBatchFilesClicked()
else:
self.sessionFiles = sessionFiles
def loadScalarVolume(self, nodeName, filename):
isLoaded, volumeNode = slicer.util.loadVolume(filename, properties={'name':nodeName}, returnNode=True)
assert isLoaded, "File failed to load: {0}".format(filename)
volumeNode.GetDisplayNode().AutoWindowLevelOn()
return volumeNode
def loadLabelVolume(self, nodeName, filename):
""" Load a label volume into the MRML scene and set the display node """
isLoaded, volumeNode = slicer.util.loadLabelVolume(filename, properties={'labelmap':True, 'name':nodeName}, returnNode=True)
assert isLoaded, "File failed to load: {0}".format(filename)
return volumeNode
def loadData(self):
""" Load some default data for development and set up a viewing scenario for it.
"""
self.logging.debug("call")
dataDialog = qt.QPushButton();
dataDialog.setText('Loading files for session %s...' % self.currentSession);
dataDialog.show()
t1NodeName = '%s_t1_average' % self.currentSession
self.loadScalarVolume(t1NodeName, self.sessionFiles['t1_average'])
t2NodeName = '%s_t2_average' % self.currentSession
self.loadScalarVolume(t2NodeName, self.sessionFiles['t2_average'])
for image in self.regions:
regionNodeName = "%s_%s" % (self.currentSession, image)
if self.config.has_option(image, 'label'): # uses all_Labels_seg.nii.gz
imageThreshold = eval(self.config.get(image, 'label')) # Threshold value for all_Labels_seg.nii.gz
self._all_Labels_seg(self.sessionFiles[image], nodeName=regionNodeName, level=imageThreshold, session=self.currentSession) # Create nodes in mrmlScene
else: # TissueClassify image
self.loadLabelVolume(regionNodeName, self.sessionFiles[image])
dataDialog.close()
def loadBackgroundNodeToMRMLScene(self, volumeNode):
# Set up template scene
self.logging.debug("call")
compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*')
for compositeNode in compositeNodes.values():
try:
compositeNode.SetBackgroundVolumeID(volumeNode.GetID())
except AttributeError:
raise IOError("Could not find nodes for session %s" % self.currentSession)
applicationLogic = slicer.app.applicationLogic()
applicationLogic.FitSliceToAll()
def getEvaluationValues(self):
""" Get the evaluation values from the widget """
self.logging.debug("call")
values = ()
for region in self.regions:
goodButton, badButton = self.widget._findRadioButtons(region)
if goodButton.isChecked():
values = values + (self.qaValueMap['good'],)
elif badButton.isChecked():
values = values + (self.qaValueMap['bad'],)
else:
Exception('Session cannot be changed until all regions are evaluated. Missing region: %s' % region)
return values
def onNextButtonClicked(self):
""" Capture the evaluation values, write them to the database, reset the widgets, then load the next dataset """
self.logging.debug("call")
try:
evaluations = self.getEvaluationValues()
except:
return
columns = ('record_id',) + self.regions
values = (self.sessionFiles['record_id'], ) + evaluations
try:
self.writeToDatabase(values)
except sqlite3.OperationalError:
self.logging.error("SQL Error")
count = self.count + 1
if count <= self.maxCount - 1:
self.count = count
else:
self.count = 0
self.loadNewSession()
self.widget.resetWidget()
def onPreviousButtonClicked(self):
self.logging.debug("call")
try:
evaluations = self.getEvaluationValues()
except:
return
columns = ('record_id', ) + self.regions
values = (self.sessionFiles['record_id'], ) + evaluations
self.writeToDatabase(values)
count = self.count - 1
if count >= 0:
self.count = count
else:
self.count = self.maxCount - 1
self.loadNewSession()
self.widget.resetWidget()
def loadNewSession(self):
self.logging.debug("call")
self.constructFilePaths()
self.setCurrentSession()
self.loadData()
def exit(self):
self.logging.debug("call")
self.database.unlockRecord('U')
# if __name__ == '__main__':
# import doctest
# doctest.testmod()
|
import json
csv_file = open('sample.csv', 'r')
jason_dump = open('json_dump.json', 'w+')
file_data =[]
jason_data = []
for line in csv_file:
line = line.strip('\n')
file_data.append(line.split(','))
for e in file_data[1:]:
payload = {'verificationLevel': int(e[0]), 'userID': int(e[1])}
jason_data.append(payload)
#print(jason_data)
jason_dump.write(json.dumps(jason_data))
csv_file.close()
jason_dump.close()
|
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets
from django.core import serializers
from .serializers import UserSerializer, ActivityPeriodSerializer, TimelineSerializer
from .models import User, ActivityPeriod
from django.http import JsonResponse
from django.views.generic import ListView
from django.shortcuts import render
from django.core import serializers
from rest_framework.response import Response
from django.http import HttpResponse
from .models import User
import json
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework import status
from rest_framework import generics
from rest_framework.permissions import IsAdminUser
from collections import namedtuple
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class ActivityPeriodViewSet(viewsets.ModelViewSet):
queryset = ActivityPeriod.objects.all()
serializer_class = ActivityPeriodSerializer
class TimelineViewSet(viewsets.ViewSet):
"""
A simple ViewSet for listing the Tweets and Articles in your Timeline.
"""
def list(self, request):
Timeline = namedtuple('Timeline', ('ok', 'members'))
timeline = Timeline(
ok=True,
members=User.objects.all(),
)
serializer = TimelineSerializer(timeline, context={'request': request})
return Response(serializer.data)
# class AllViewSet(generics.ListCreateAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
# permission_classes = [IsAdminUser]
# def list(self, request):
# # Note the use of `get_queryset()` instead of `self.queryset`
# queryset = self.get_queryset()
# serializer = UserSerializer(queryset, many=True)
# return Response(serializer.data)
# queryset = User.objects.all()
# serializer_class = UserSerializer
# # serializer_context = {'request': Request(request._request)}
# def list(self, request, *args, **kwargs):
# # User_list = User.objects.all()
# # # serializer_class = UserSerializer
# # # for user in User_list:
# # # userInfo = User.objects.filter(id=user.pk)
# # # user_serializer = UserSerializer(userInfo, many=True)
# # # activity_list = ActivityPeriod.objects.filter(user=user.pk)
# # # user_dic = {
# # # 'ok':True,
# # # 'members': User_list
# # # }
# # # data_a = json.dumps(User_list)
# # data = serializers.serialize('json', self.get_queryset())
# # # data = serializers.serialize('json', self.get_queryset())
# # # prices = Price.objects.filter(product=product).values_list('price','valid_from')
# # content = JSONRenderer().render(data)
# # # data = json.dumps(User_list)
# # return JsonResponse(content, safe=False)
# # queryset = User.objects.all()
# instance = self.get_object()
# serializer = self.get_serializer(instance)
# # return Response(serializer.data)
# data = {
# 'ok':True,
# 'members': serializer.data
# }
# return JsonResponse(data) |
### Masks out mangrove and planted forest pixels from WHRC biomass 2000 raster so that
### only non-mangrove, non-planted forest pixels are left of the WHRC biomass 2000 raster
import datetime
import rasterio
import os
from shutil import copyfile
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
def mask_biomass(tile_id):
print "Processing:", tile_id
# Start time
start = datetime.datetime.now()
# Names of the input files
WHRC_biomass = '{0}_{1}.tif'.format(tile_id, cn.pattern_WHRC_biomass_2000_unmasked)
mangrove_biomass = '{0}_{1}.tif'.format(tile_id, cn.pattern_mangrove_biomass_2000)
planted_forest_gain = '{0}_{1}.tif'.format(tile_id, cn.pattern_annual_gain_AGC_BGC_planted_forest_unmasked)
# Name of the output file
WHRC_biomass_non_mang_non_planted = '{0}_{1}.tif'.format(tile_id, cn.pattern_WHRC_biomass_2000_non_mang_non_planted)
print "Checking if there are mangrove or planted forest tiles for", tile_id
# Doing this avoids unnecessarily processing biomass tiles
if os.path.exists(mangrove_biomass) or os.path.exists(planted_forest_gain):
print " Mangrove or planted forest tiles found for {}. Masking WHRC biomass using them...".format(tile_id)
# Opens the unmasked WHRC biomass 2000
WHRC_src = rasterio.open(WHRC_biomass)
# Grabs metadata about the unmasked biomass, like its location/projection/cellsize
kwargs = WHRC_src.meta
# Grabs the windows of the tile (stripes) to iterate over the entire tif without running out of memory
windows = WHRC_src.block_windows(1)
# Checks whether there are mangrove or planted forest tiles. If so, they are opened.
try:
mangrove_src = rasterio.open(mangrove_biomass)
print " Mangrove tile found for {}".format(tile_id)
except:
print " No mangrove tile for {}".format(tile_id)
try:
planted_forest_src = rasterio.open(planted_forest_gain)
print " Planted forest tile found for {}".format(tile_id)
except:
print " No planted forest tile for {}".format(tile_id)
# Updates kwargs for the output dataset
kwargs.update(
driver='GTiff',
compress='lzw'
)
# The output file, biomass masked by mangroves and planted forests
dst_WHRC = rasterio.open(WHRC_biomass_non_mang_non_planted, 'w', **kwargs)
# Iterates across the windows (1 pixel strips) of the input tiles
for idx, window in windows:
# Creates a processing window the WHRC raster
WHRC_masked = WHRC_src.read(1, window=window)
# print WHRC_masked[0][:20]
# If there is a mangrove tile, this masks the mangrove biomass pixels so that only non-mangrove pixels are output
if os.path.exists(mangrove_biomass):
# Reads in the mangrove tile's window
mangrove_AGB = mangrove_src.read(1, window=window)
# print mangrove_AGB[0][:20]
# Gets the NoData value of the mangrove biomass tile
nodata = uu.get_raster_nodata_value(mangrove_biomass)
# Reclassifies mangrove biomass to 1 or 0 to make a mask of mangrove pixels.
# Ultimately, only these pixels (ones without mangrove biomass) will get values.
# I couldn't figure out how to do this without first converting the NoData values to an intermediate value (99)
mangrove_AGB[mangrove_AGB > nodata] = 99
mangrove_AGB[mangrove_AGB == nodata] = 1
mangrove_AGB[mangrove_AGB == 99] = nodata
# print mangrove_AGB[0][:20]
# Casts the mangrove biomass mask array into int16 so that it can be combined with the WHRC int array
mangrove_AGB = mangrove_AGB.astype('byte')
# print mangrove_AGB[0][:20]
# Applies the mask
WHRC_masked = WHRC_masked * mangrove_AGB
# print WHRC_masked[0][:20]
# If there is a planted forest tile, this masks the planted forest pixels so that only non-planted forest pixels
# are output.
# Process is same as for mangroves-- non-planted forest pixels are the only ones output
if os.path.exists(planted_forest_gain):
planted_forest = planted_forest_src.read(1, window=window)
nodata = uu.get_raster_nodata_value(planted_forest_gain)
planted_forest[planted_forest > nodata] = 99
planted_forest[planted_forest == nodata] = 1
planted_forest[planted_forest == 99] = nodata
planted_forest = planted_forest.astype('int16')
WHRC_masked = WHRC_masked * planted_forest
# Writes the output window to the output file
dst_WHRC.write_band(1, WHRC_masked, window=window)
# sys.exit()
# If no mangrove or planted forest tile was found, the original biomass tile is simply copied with a new name
# so it can be copied to s3 with the rest of the outputs.
else:
print " No mangrove or planted forest tile found for {}. Copying tile with output pattern...".format(tile_id)
copyfile(WHRC_biomass, WHRC_biomass_non_mang_non_planted)
end = datetime.datetime.now()
elapsed_time = end-start
print " Processing time for tile", tile_id, ":", elapsed_time
|
n = int(input('Enter the number: '))
fact = 1
for i in range(1,n+1):
fact *= i
print(fact)
#------using recursive function-------
def fact(f,n):
f = f * n
return f
n = int(input('Enter the number: '))
f = 1
for i in range(1,n+1):
f = fact(f,i)
print(f) |
#!/bin/python
t = int(raw_input().strip())
for _ in range(t):
delete = 0
s = list(raw_input().strip())
for i in range(0, len(s)- 1):
if s[i] == s[i+1]:
delete += 1
print delete
|
# -*- coding: utf-8 -*-
"""
main_window.py -- GUI main window.
"""
# This software is distributed under the FreeBSD License.
# See the accompanying file LICENSE for details.
#
# Copyright 2011 Benjamin Hepp
import sys, os, random
import numpy
import logging
from PyQt4.QtCore import *
from PyQt4.QtGui import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from results_window import ResultsWindow
from channel_description_widgets import ChannelDescriptionTab
from cluster_configuration_widgets import ClusterConfigurationTab
from gallery_window import GalleryWindow
from gui_utils import ImagePixmapFactory, ImageFeatureTextFactory
import parameter_widgets
from ..core import pipeline
from ..core import importer
from ..core import db_importer
from ..core import cp_importer
from ..core import analyse
from ..core import parameter_utils as utils
class ActionButton(QPushButton):
__pyqtSignals__ = ('action',)
def __init__(self, descr, module, action_name, parent=None):
QPushButton.__init__(self, descr, parent)
self.module = module
self.action_name = action_name
self.connect(self, SIGNAL('clicked()'), self.on_clicked)
def on_clicked(self):
self.emit(SIGNAL('action'), self.module, self.action_name)
class MainWindow(QMainWindow):
def __init__(self, simple_ui=False, parent=None):
self.__simple_ui = simple_ui
QMainWindow.__init__(self, parent)
self.setWindowTitle('Main')
self.project_unsaved = False
self.results_window = None
self.image_viewer = None
self.results_window = None
self.channelDescriptionTab = None
self.clusterConfigurationTab = None
self.__pipeline_running = False
self.pl = None
self.build_menu()
self.build_main_frame()
self.build_status_bar()
"""self.channelDescription = {}
self.channelDescription['R'] = 'Nucleus staining (A568)'
self.channelDescription['G'] = 'Protein staining (A488)'
self.channelDescription['B'] = 'Cell staining (DAPI)'
self.channelDescription['O1'] = 'Cell segmentation'
self.channelDescription['O2'] = 'Nucleus segmentation'"""
def closeEvent(self, event):
if self.project_saved():
if self.results_window:
self.results_window.close()
if self.image_viewer:
self.image_viewer.close()
event.accept()
else:
event.ignore()
def project_saved(self):
if self.project_unsaved:
msgBox = QMessageBox(
QMessageBox.Question,
'Unsaved project',
'The current project has not been saved!',
QMessageBox.Cancel | QMessageBox.Save | QMessageBox.Discard,
self
)
result = msgBox.exec_()
if result == QMessageBox.Save:
self.on_save_project()
elif result == QMessageBox.Cancel:
return False
elif result == QMessageBox.Discard:
self.project_unsaved = False
return True
def on_new_project(self):
if self.project_saved():
utils.reset_module_configuration()
self.reset_module_tab_widget(self.tab_widget)
self.update_module_tab_widget(self.tab_widget)
self.statusBar().showMessage('New project')
def load_project_file(self, path):
self.statusBar().showMessage('Loading project file...')
utils.load_module_configuration(path)
self.reset_module_tab_widget(self.tab_widget)
self.update_module_tab_widget(self.tab_widget)
self.statusBar().showMessage('Project file loaded')
def save_project_file(self, path):
self.statusBar().showMessage('Saving project file...')
utils.save_module_configuration(path)
self.statusBar().showMessage('Project file saved')
def load_configuration_file(self, path):
self.statusBar().showMessage('Loading configuration file...')
utils.load_module_configuration(path)
self.reset_module_tab_widget(self.tab_widget)
self.update_module_tab_widget(self.tab_widget)
self.statusBar().showMessage('Configuration file loaded')
def on_open_project(self):
if self.project_saved():
file_choices = "Project file (*.phn *.yaml);;All files (*)"
path = unicode(QFileDialog.getOpenFileName(self,
'Open file', '',
file_choices))
if path:
self.on_new_project()
self.load_project_file(path)
self.statusBar().showMessage('Opened %s' % path)
return True
return False
def on_open_configuration(self):
if self.project_saved():
file_choices = "Configuration file (*.yaml);;All files (*)"
path = unicode(QFileDialog.getOpenFileName(self,
'Open file', '',
file_choices))
if path:
self.load_configuration_file(path)
self.statusBar().showMessage('Opened configuration file %s' % path)
return True
return False
def on_save_project(self):
file_choices = "Project file (*.phn *.yaml);;All files (*)"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.save_project_file(path)
self.project_unsaved = False
self.statusBar().showMessage('Saved to %s' % path)
return True
return False
def on_close(self):
if self.project_saved():
self.close()
def on_about(self):
msg = """ GUI """
QMessageBox.about(self, "About", msg.strip())
def on_update_progress(self, progress):
self.progress_bar.setValue(progress)
def on_start_cancel(self):
run_pipeline = True
modules = utils.list_modules()
for module in modules:
if not utils.all_parameters_set(module):
QMessageBox(
QMessageBox.Warning,
'Not all required parameters for module %s have been set' % module,
'Unable to start pipeline',
QMessageBox.Ok,
self
).exec_()
run_pipeline = False
break
elif not utils.all_requirements_met(module):
QMessageBox(
QMessageBox.Warning,
'Not all requirements for module %s have been fulfilled' % module,
'Unable to start pipeline',
QMessageBox.Ok,
self
).exec_()
run_pipeline = False
break
if run_pipeline:
if self.__pipeline_running:
self.start_cancel_button.setText('Perform cell selection')
self.pl.stop()
self.__pipeline_running = False
else:
self.start_cancel_button.setText('Cancel')
self.progress_bar.setRange(0, 100)
self.progress_bar.setFormat('processing input data - %p%')
try:
pdc = importer.Importer().get_pdc()
clusterConfiguration = self.clusterConfigurationTab.clusterConfiguration
self.pl = pipeline.Pipeline(pdc, clusterConfiguration)
self.pl.connect(self.pl, pipeline.SIGNAL('updateProgress'), self.on_update_progress)
self.pl.connect(self.pl, pipeline.SIGNAL('finished()'), self.on_pipeline_finished)
self.__pipeline_running = True
self.__quality_control_done = False
self.pl.start_quality_control()
#self.pl.start()
#pl.run(self.on_update_progress)
except:
self.progress_bar.setFormat('Idling...')
self.statusBar().showMessage('Unable to start pipeline thread!')
raise
def on_pipeline_finished(self):
try:
pl_result = self.pl.get_result()
except Exception, e:
pl_result = False
if pl_result:
if self.__quality_control_done:
self.statusBar().showMessage('Showing results window')
print 'creating results window...'
channelMapping = self.channelDescriptionTab.channelMapping
channelDescription = self.channelDescriptionTab.channelDescription
if self.results_window:
self.results_window.close()
self.results_window = ResultsWindow(self.pl, channelMapping, channelDescription, self.__simple_ui)
self.results_window.show()
self.progress_bar.setFormat('Idling...')
self.start_cancel_button.setText('Perform cell selection')
self.__pipeline_running = False
self.pl.disconnect(self.pl, pipeline.SIGNAL('updateProgress'), self.on_update_progress)
self.pl.disconnect(self.pl, pipeline.SIGNAL('finished()'), self.on_pipeline_finished)
#del self.pl
else:
self.__quality_control_done = True
self.pl.start_pre_filtering()
else:
self.statusBar().showMessage('Error while running pipeline')
self.progress_bar.setFormat('Idling...')
self.start_cancel_button.setText('Perform cell selection')
self.__pipeline_running = False
self.pl.disconnect(self.pl, pipeline.SIGNAL('updateProgress'), self.on_update_progress)
self.pl.disconnect(self.pl, pipeline.SIGNAL('finished()'), self.on_pipeline_finished)
#del self.pl
def on_parameter_changed(self, module, param_name):
self.project_unsaved = True
self.update_module_tab_widget(self.tab_widget)
def on_module_action(self, module, action_name):
self.project_unsaved = True
try:
result = utils.trigger_action(module, action_name)
self.statusBar().showMessage(str(result))
self.update_module_tab_widget(self.tab_widget)
except Exception,e:
QMessageBox(
QMessageBox.Warning,
'%s/%s' % (module,action_name),
str(e),
QMessageBox.Ok,
self
).exec_()
raise
def build_module_tab_widget(self):
tab_widget = QTabWidget()
self.modules_used = []
self.update_module_tab_widget(tab_widget)
return tab_widget
def reset_module_tab_widget(self, tab_widget):
while tab_widget.count() > 0:
widget = tab_widget.widget(0)
tab_widget.removeTab(0)
del widget
self.modules_used = []
def update_module_tab_widget(self, tab_widget):
all_requirements_and_parameters_met = True
modules = utils.list_modules()
for module in modules:
if not utils.all_parameters_set(module):
#print 'Not all required parameters for module %s have been set' % module
all_requirements_and_parameters_met = False
self.statusBar().showMessage('Not all parameters for module %s have been set' % module)
logger.info('Not all parameters for module %s have been set' % module)
break
elif not utils.all_requirements_met(module):
#print 'Not all requirements for module %s have been fulfilled' % module
all_requirements_and_parameters_met = False
self.statusBar().showMessage('Not all requirements for module %s have been met' % module)
logger.info('Not all requirements for module %s have been met' % module)
break
if all_requirements_and_parameters_met:
self.statusBar().showMessage('Ready')
self.start_cancel_button.setEnabled(all_requirements_and_parameters_met)
self.view_images_button.setEnabled(all_requirements_and_parameters_met)
reset_tabs = False
for module in modules:
if utils.is_module_invalid(module):
if module in self.modules_used:
reset_tabs = True
utils.validate_module(module)
if reset_tabs:
currentTabIndex = self.tab_widget.currentIndex()
currentTabName = self.modules_used[currentTabIndex]
self.reset_module_tab_widget(self.tab_widget)
modules = utils.list_modules()
for module in modules:
if module not in self.modules_used and utils.all_requirements_met(module):
params = utils.list_parameters(module)
actions = utils.list_actions(module)
if len(params) > 0 or len(actions) > 0:
self.modules_used.append(module)
layout = QVBoxLayout()
for param in params:
param_widget = parameter_widgets.create_widget(module, param, importer.Importer().get_pdc())
self.connect(param_widget, SIGNAL('parameterChanged'), self.on_parameter_changed)
layout.addWidget(param_widget)
for action in actions:
descr = utils.get_action_descr(module, action)
btn = ActionButton(descr, module, action)
self.connect(btn, SIGNAL('action'), self.on_module_action)
layout.addWidget(btn)
widget = QWidget()
widget.setLayout(layout)
scrollarea = QScrollArea()
scrollarea.setWidgetResizable(True)
scrollarea.setWidget(widget)
tab_widget.addTab(scrollarea, utils.get_module_descr(module))
if importer.Importer().get_pdc() != None:
if self.channelDescriptionTab == None:
self.channelDescriptionTab = ChannelDescriptionTab(importer.Importer().get_pdc())
if self.clusterConfigurationTab == None:
self.clusterConfigurationTab = ClusterConfigurationTab(importer.Importer().get_pdc())
if self.channelDescriptionTab != None:
tab_widget.addTab(self.channelDescriptionTab, 'Channels')
if self.clusterConfigurationTab != None:
tab_widget.addTab(self.clusterConfigurationTab, 'Clustering configuration')
if reset_tabs:
for i, module in enumerate(self.modules_used):
if module == currentTabName:
self.tab_widget.setCurrentIndex(i)
break
def on_view_images(self):
channelMapping = self.channelDescriptionTab.channelMapping
channelDescription = self.channelDescriptionTab.channelDescription
pdc = importer.Importer().get_pdc()
if self.pl == None:
self.pl = pipeline.Pipeline(pdc, self.clusterConfigurationTab.clusterConfiguration)
self.image_viewer = GalleryWindow(self.pl, pdc.imgFeatureIds, channelMapping, channelDescription, True)
selectionIds = numpy.arange(len(pdc.images))
pixmapFactory = ImagePixmapFactory(pdc, channelMapping)
featureFactory = ImageFeatureTextFactory(pdc)
self.image_viewer.on_selection_changed(-1, selectionIds, pixmapFactory, featureFactory)
self.image_viewer.show()
def build_main_frame(self):
self.main_frame = QWidget()
self.start_cancel_button = QPushButton('Perform cell selection')
self.start_cancel_button.setEnabled(False)
self.connect(self.start_cancel_button, SIGNAL('clicked()'), self.on_start_cancel)
self.view_images_button = QPushButton('View images')
self.view_images_button.setEnabled(False)
self.connect(self.view_images_button, SIGNAL('clicked()'), self.on_view_images)
self.progress_bar = QProgressBar()
self.progress_bar.setFormat('Idling...')
self.progress_bar.setValue(self.progress_bar.minimum())
self.tab_widget = self.build_module_tab_widget()
#
# Layout with box sizers
#
hbox1 = QHBoxLayout()
hbox1.addWidget(self.start_cancel_button)
hbox1.addWidget(self.progress_bar, 1)
hbox1.addWidget(self.view_images_button)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
if not self.__simple_ui:
vbox.addWidget(self.tab_widget, 1)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def build_status_bar(self):
self.status_text = QLabel('Main')
self.statusBar().addWidget(self.status_text, 1)
def build_menu(self):
self.project_menu = self.menuBar().addMenu("&Project")
new_project_action = self.make_action("&New project",
shortcut="Ctrl+N", slot=self.on_new_project,
tip="Create a new project")
open_project_action = self.make_action("&Open project",
shortcut="Ctrl+O", slot=self.on_open_project,
tip="Open a project file")
save_project_action = self.make_action("&Save project",
shortcut="Ctrl+S", slot=self.on_save_project,
tip="Save the current project to a file")
open_configuration_action = self.make_action("Open &configuration file",
shortcut="Ctrl+C", slot=self.on_open_configuration,
tip="Open a configuration file overriding or extending the current project configuration")
quit_action = self.make_action("&Quit", slot=self.on_close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.project_menu,
(new_project_action, open_project_action, save_project_action, None,
open_configuration_action, None, quit_action)
)
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.make_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def make_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
|
import pandas as pd
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
#Enter your credentials
your_email = 'enter_your_email_here@gmail.com'
your_password = 'enter_your_password_here'
smtp_protocol = 'smtp.gmail.com' # This is unique as per email service of your choice
#Initiate SMTP connection to your email service. You can google these if unsure
try:
server = smtplib.SMTP_SSL(smtp_protocol, 465)
server.ehlo()
server.login(your_email, your_password)
except:
print("We couldn't connect to your email")
#Access excel detaisl with pandas and introduce new classes to store all data.
email_list = pd.read_excel('C:/Users/pmita/Desktop/Python/Email/drafts/list_of_customers.xlsx')
names = email_list['NAME'] #This will return a new class containing names
emails = email_list['EMAIL'] #This will return a new class containing emails
invoices = email_list['INVOICE']
dates = email_list['DATE']
#Loop through each individual item and send email one at a time
for email_index in range(len(emails)):
print("Sending Email ... Please Wait \n")
#Extract data from each line as per email_index
name = names[email_index]
email = emails[email_index]
invoice = invoices[email_index]
date = dates[email_index]
#Construct the multipart email with all necessary details
msg = MIMEMultipart()
msg['From'] = your_email
msg['To'] = email
msg['Subject'] = 'Invoice ' + invoice
message = 'Dear ' + name + ',\n\n Thank you so much for choosing our service.\n\n Your order was comolpeted on ' + date + 'with a invoice number of ' + invoice + '. Please keep those in hand. \n\n Regards,\n The X team'
msg.attach(MIMEText(message, 'plain'))
email_body = msg.as_string()
#Send the combined email
try:
server.sendmail(your_email, [email], email_body)
print("Email was sent to %s \n" % name)
except:
print("Something went wrong ... Email not sent")
#Shut down the connection with the server once finished
server.close()
|
import datetime
import os
import shutil
import numpy as np
def makedir(dirname):
"""Safely creates a new directory.
"""
if not os.path.exists(dirname):
os.makedirs(dirname)
def rmdir(dirname):
"""Deletes a non-empty directory.
"""
answer = ""
while answer not in ["y", "n"]:
answer = input("Permanently delete {} [Y/N]?".format(dirname)).lower()
if answer == "y":
shutil.rmtree(dirname, ignore_errors=True)
else:
return
def gen_timestamp():
"""Generates a timestamp in YYYY-MM-DD-hh-mm-ss format.
"""
date = str(datetime.datetime.now()).split(".")[0]
return date.split(" ")[0] + "-" + "-".join(date.split(" ")[1].split(":"))
def gen_checkerboard(n, s):
"""Creates an nxn checkerboard of size sxs.
"""
row_even = (n // 2) * [0, 1]
row_odd = (n // 2) * [1, 0]
checkerboard = np.row_stack((n // 2) * (row_even, row_odd))
return checkerboard.repeat(s, axis=0).repeat(s, axis=1)
|
from .HALResponse import HALResponse
from .MomentResponse import MomentResponse
from .ApiIndexResponse import ApiIndexResponse |
import threading
from subprocess import Popen, PIPE
import time
def popenAndCall(onExit, *popenArgs, **popenKWArgs):
"""
Runs a subprocess.Popen, and then calls the function onExit when the
subprocess completes.
Use it exactly the way you'd normally use subprocess.Popen, except include a
callable to execute as the first argument. onExit is a callable object, and
*popenArgs and **popenKWArgs are simply passed up to subprocess.Popen.
"""
def runInThread(onExit, popenArgs, popenKWArgs):
proc = Popen(*popenArgs, **popenKWArgs)
proc.wait()
onExit()
return
thread = threading.Thread(target=runInThread,
args=(onExit, popenArgs, popenKWArgs))
thread.start()
return thread
def onExit():
print("Success baby.")
p = Popen(["sudo", "python", "zplay.py", "hi"], shell=False, stdin=PIPE, stdout=PIPE)
time.sleep(10)
p.terminate() |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 14:32:38 2020
@author: Ashima
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the superReducedString function below.
def superReducedString(string):
string_list = list(string)
flag = True
while len(string_list) > 0 and flag:
flag = False
i = 0
while i < (len(string_list) - 1):
if string_list[i] == string_list[i+1]:
del string_list[i]
del string_list[i]
flag = True
else:
i = i + 1
if len(string_list) == 0:
return "Empty String"
else:
s = ""
for i in range(len(string_list)):
s += string_list[i]
return s
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = superReducedString(s)
fptr.write(result + '\n')
fptr.close() |
import time
import matplotlib
import pandas as pd
matplotlib.use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from sklearn import manifold
from sklearn.decomposition import PCA
SNP_Raw = np.load('SNP_Raw.npy').transpose()
Nation_Raw = np.load('Nation_Raw.npy', allow_pickle=True)
starttime = time.perf_counter()
nmds = manifold.MDS(n_components=2, normalized_stress="auto").fit(SNP_Raw)
endtime = time.perf_counter()
embedding = nmds.embedding_.copy()
dataframe = pd.DataFrame({'X': embedding[:, 0], 'Y': embedding[:, 1], 'CPU Time': (endtime - starttime)})
dataframe.to_csv('..\\Preliminary_Result\\Direct_MDS_Embedding.csv', index=False, sep=',')
pca = PCA(n_components=2)
embedding = pca.fit_transform(embedding)
Nations = list(set(Nation_Raw))
plt.figure()
lw = 1.5
colors = ['blue', 'orange', 'green', 'red', 'brown', 'pink', 'olive']
legend_used = []
for i in range(0, embedding.shape[0]):
i_nation = Nation_Raw[i]
i_color = colors[Nations.index(i_nation)]
if i_nation in legend_used:
plt.scatter(embedding[i, 0], embedding[i, 1], color=i_color, alpha=0.8, lw=lw)
else:
plt.scatter(embedding[i, 0], embedding[i, 1], color=i_color, alpha=0.8, lw=lw, label=i_nation)
legend_used.append(i_nation)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.legend(bbox_to_anchor=(1.05, 0), loc=3, borderaxespad=0)
plt.axis('scaled')
plt.savefig('..\\Preliminary_Result\\Direct_MDS_followed_by_PCA_Embedding.png', dpi=600, bbox_inches='tight')
plt.show()
|
#
# @lc app=leetcode id=877 lang=python3
#
# [877] Stone Game
#
# It is the same as 486.
# @lc code=start
class Solution:
def stoneGame(self, piles: List[int]) -> bool:
return self.helper(0, len(piles) - 1, piles, {})
def helper(self, left, right, piles, cache):
if left == right:
return piles[left]
if (left, right) in cache:
return cache[left, right]
cache[left, right] = max(piles[left] - self.helper(left + 1, right, piles, cache),
piles[right] - self.helper(left, right - 1, piles, cache))
return cache[left, right]
# @lc code=end
|
import argparse
from flask import Flask, request, jsonify
from speech_engine import YandexSpeechEngine, GoogleSpeechEngine
parser = argparse.ArgumentParser(description='Speaker')
parser.add_argument('-p', '--port', type=int, default=8080,
help='port to use')
app = Flask(__name__)
gse = GoogleSpeechEngine()
yse = YandexSpeechEngine()
@app.route('/speak', methods=['POST', 'GET'])
def speak():
response = {}
text = request.args.get("text")
language = request.args.get("language")
engine = request.args.get("engine")
if text is None:
response["code"] = 500
response["result"] = '"text" parameter is necessary'
return jsonify(**response)
if language is None:
language = "en"
if engine == "yandex":
engine = yse
elif engine == "google":
engine = gse
else:
engine = gse
print text, language, engine
engine.speak(text, language)
response["code"] = 200
response["result"] = 'ok'
return jsonify(**response)
def main():
args = parser.parse_args()
app.run(host='0.0.0.0', port=args.port)
if __name__ == "__main__":
main(debug=True) |
import os
from flask_platform import app
def main():
app.run(host='0.0.0.0',port=5000)
return
if __name__=="__main__":
main()
|
#!/usr/bin/env python
import yaml
import multiprocessing
import twitterneo4j.variables as variables
def configure(config_yaml_file):
#
# Read the config
#
config_yaml = open(config_yaml_file, "r")
config = yaml.load(config_yaml)
config_yaml.close()
# Required Configuration
variables.neo4j_host = config["neo4j_host"]
variables.neo4j_username = config["neo4j_username"]
variables.neo4j_password = config["neo4j_password"]
variables.graph_url = config["graph_url"]
variables.cypher_query = config["cypher_query"]
if config["cpu_count"] == "all":
variables.cpu_count = multiprocessing.cpu_count()
else:
variables.cpu_count = int(config["cpu_count"])
variables.threads_per_cpu = config["threads_per_cpu"]
variables.tweet_path = config["tweet_path"]
# Optional Configuration
if "uniqueness_constraints" in config:
variables.uniqueness_constraints = config["uniqueness_constraints"]
if "indexes" in config:
variables.indexes = config["indexes"]
|
#Student Name:Wentao Wu; Student ID#:112524704
class Node:
def __init__(self):
print("init node")
def evaluate(self):
return 0
def execute(self):
return 0
class BlockNode(Node):
def __init__(self,sl):
self.statementList = sl
def evaluate(self):
for statement in self.statementList:
statement.evaluate()
class AssignNode1(Node):
def __init__(self, v1, v2):
self.name = v1
self.value = v2
def evaluate(self):
if self.name in d:
if self.value in d:
d[self.name] = d[self.value]
else:
d[self.name] = self.value.evaluate()
else:
if self.value in d:
d[self.name.evaluate()] = d[self.value]
else:
d[self.name.evaluate()] = self.value.evaluate()
class AssignNode2(Node):
def __init__(self, v1, i, v2):
self.name = v1
self.index = i
self.value = v2
def evaluate(self):
if self.name in d:
name = self.name
if self.index in d:
index = d[self.index]
if self.value in d:
value = d[self.value]
else:
value = self.value.evaluate()
else:
index = self.index.evaluate()
if self.value in d:
value = d[self.value]
else:
value = self.value.evaluate()
d[name][index] = value
else:
print('SEMANTIC ERROR')
class IfNode(Node):
def __init__(self, v1, v2):
self.boolean = v1
self.block = v2
def evaluate(self):
if self.boolean in d:
if d[self.boolean]:
self.block.evaluate()
else:
pass
else:
if self.boolean.evaluate():
self.block.evaluate()
else:
pass
class IfElseNode(Node):
def __init__(self, v1, v2, v3):
self.boolean = v1
self.block1 = v2
self.block2 = v3
def evaluate(self):
if self.boolean in d:
if d[self.boolean]:
self.block1.evaluate()
else:
self.block2.evaluate()
else:
if self.boolean.evaluate():
self.block1.evaluate()
else:
self.block2.evaluate()
class WhileNode(Node):
def __init__(self, v1, v2):
self.boolean = v1
self.block = v2
def evaluate(self):
if self.boolean in d:
while d[self.boolean]:
self.block.evaluate()
else:
while self.boolean.evaluate():
self.block.evaluate()
class PrintNode(Node):
def __init__(self, e):
self.e = e
def evaluate(self):
if self.e in d:
print (d[self.e])
else:
print(self.e.evaluate())
class EmptyNode(Node):
def __init__(self, v):
self.v = v
def evaluate(self):
return None
class VariableNode(Node):
def __init__(self, k):
self.name = k
def evaluate(self):
return self.name
class NumberNode(Node):
def __init__(self, v):
if('.' in v):
self.value = float(v)
else:
self.value = int(v)
def evaluate(self):
return self.value
class StringNode(Node):
def __init__(self, v):
self.value = str(v)
def evaluate(self):
return self.value
class TupleNode(Node):
def __init__(self, v):
self.value = v
def evaluate(self):
return self.value
class IndexNodeT(Node):
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def evaluate(self):
return self.v1.evaluate()[self.v2.evaluate()-1]
class BlankListNode(Node):
def __init__(self, v):
self.value = v
def evaluate(self):
return self.value
class ListNode(Node):
def __init__(self, v):
self.value = v
def evaluate(self):
i=0
n = []
while i<len(self.value):
if self.value[i] in d:
n = n + [d[self.value[i]]]
else:
n = n + [self.value[i].evaluate()]
i = i + 1
return n
class BooleanNode(Node):
def __init__(self, v):
if v == 'true' or v == 'True':
self.value = bool(True)
elif v == 'false' or v == 'False':
self.value = bool(False)
def evaluate(self):
return self.value
class IndexNodeL(Node):
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def evaluate(self):
if self.v1 in d:
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
return v1[v2]
class IndexNodeS(Node):
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def evaluate(self):
if self.v1 in d:
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
return v1[v2]
class BopNode(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
if (self.op == '+'):
return v1 + v2
elif (self.op == '-'):
return v1 - v2
elif (self.op == '*'):
return v1 * v2
elif (self.op == '/'):
return v1 / v2
elif (self.op == '**'):
return v1 ** v2
elif (self.op == 'div'):
return v1 // v2
elif (self.op == 'mod'):
return v1 % v2
class UnaryNode(Node):
def __init__(self, v):
self.v = v
def evaluate(self):
return -self.v.evaluate()
class BooleanInNode(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
return v1 in v2
class ElementConcatNode(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
return [self.v1.evaluate()] + self.v2.evaluate()
class StringConcatNode(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2
else:
v1 = self.v1
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2
return v1.evaluate() + v2.evaluate()
class ListConcatNode(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
return v1 + v2
class BooleanNode1(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
if (self.op == '<>'):
return v1 != v2
elif (self.op == '>'):
return v1 > v2
elif (self.op == '<'):
return v1 < v2
elif (self.op == '>='):
return v1 >= v2
elif (self.op == '<='):
return v1 <= v2
elif (self.op == '=='):
return v1 == v2
class BooleanNotNode(Node):
def __init__(self, v):
self.value = v
def evaluate(self):
if self.value in d:
return not d[self.value]
else:
return not self.value.evaluate()
class BooleanNode2(Node):
def __init__(self, op, v1, v2):
self.v1 = v1
self.v2 = v2
self.op = op
def evaluate(self):
if self.v1 in d :
v1 = d[self.v1]
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
else:
v1 = self.v1.evaluate()
if self.v2 in d:
v2 = d[self.v2]
else:
v2 = self.v2.evaluate()
if (self.op == 'andalso'):
return v1 and v2
elif (self.op == 'orelse'):
return v1 or v2
reserved = {
'if' : 'IF',
'else' : 'ELSE',
'while' : 'WHILE',
'print' : 'PRINT',
'div' : 'DIV',
'mod' : 'MOD',
'not' : 'NOT',
'andalso' : 'AND',
'orelse' : 'OR',
'in' : 'IN'
}
tokens = list(reserved.values()) + [
'NUMBER', 'STRING',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER', 'CONCAT',
'EQUALS', 'LT', 'LE', 'GT', 'GE', 'NE', 'BOOLEAN',
'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'LCURLY', 'RCURLY',
'COMMA', 'SEMICOLON', 'TUPLEINDEX',
'ID', 'ASSIGN']
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_POWER =r'\*\*'
t_CONCAT = r'\:\:'
t_NE = r'<>'
t_EQUALS = r'=='
t_LT = r'<'
t_LE = r'<='
t_GT = r'>'
t_GE = r'>='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LCURLY = r'{'
t_RCURLY = r'}'
t_COMMA = r'\,'
t_SEMICOLON = r';'
t_TUPLEINDEX = r'\#'
t_ASSIGN = '='
def t_NUMBER(t):
r'-?\d*(\d\.|\.\d)\d*([eE][-+]? \d+)? | \d+'
try:
t.value = NumberNode(t.value)
except ValueError:
print("NUMBER value too large %d", t.value)
t.value = 0
return t
def t_STRING(t):
r'(\"(([^\"]|\\\"|\\\')*[^\\])?\")|(\'(([^\']|\\\"|\\\')*[^\\])?\')'
t.value = StringNode(t.value[1:-1])
return t
def t_BOOLEAN(t):
r'\btrue\b | \bfalse\b | \bTrue\b | \bFalse\b'
t.value = BooleanNode(t.value)
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID')
return t
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
# print("Illegal character '%s'" % t.value[0])
t.lexer.skip(0)
# Build the lexer
import ply.lex as lex
lex.lex(debug=0)
# Precedence rules for operators
precedence = (
('left','NOT','AND','OR'),
('left','EQUALS','LT','LE','NE','GT','GE'),
('left','CONCAT'),
('left','IN'),
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE','DIV','MOD'),
('right','POWER'),
('left','TUPLEINDEX'),
('right','UMINUS')
)
def p_block(p):
'block : LCURLY statement_list RCURLY'
p[0] = BlockNode(p[2])
def p_statement_list(p):
'statement_list : statement_list statement'
p[0] = p[1] + [p[2]]
def p_statement_list_val(p):
'''statement_list : statement'''
p[0] = [p[1]]
def p_statement(p):
'''statement : assign_statement
| print_statement
| conditional_statement
| while_statement
| empty_statement
| block'''
p[0] = p[1]
# dictionary of variables
d = {}
def p_variable_ID(p):
'variable : ID'
p[0] = VariableNode(p[1])
def p_assign_statement1(p):
'assign_statement : variable ASSIGN expression SEMICOLON'
p[0] = AssignNode1(p[1], p[3])
def p_assign_statement2(p):
'assign_statement : ID LBRACKET expression RBRACKET ASSIGN expression SEMICOLON'
p[0] = AssignNode2(p[1], p[3], p[6])
#Print statement
def p_print_statement(p):
'''print_statement : PRINT LPAREN expression RPAREN SEMICOLON'''
p[0] = PrintNode(p[3])
#conditional statements, including if and ifelse statements
def p_conditional_statement(p):
'''conditional_statement : if_statement
| if_else_statement'''
p[0] = p[1]
def p_if_statement(p):
'if_statement : IF LPAREN expression RPAREN block'
p[0] = IfNode(p[3], p[5])
def p_if_else_statement(p):
'if_else_statement : IF LPAREN expression RPAREN block ELSE block'
p[0] = IfElseNode(p[3], p[5], p[7])
def p_while_statement(p):
'while_statement : WHILE LPAREN expression RPAREN block'
p[0] = WhileNode(p[3], p[5])
def p_empty_statement(p):
'empty_statement : '
p[0] = EmptyNode(None)
#ID reductions
def p_expression_variable(p):
'''factor : ID
list : ID
string : ID
expression : ID'''
p[0] = p[1]
#Parenthesized expression
def p_expression_group(p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
#Tuple Rules
#Create a tuple
def p_expression_tuple(p):
'expression : tuple'
p[0] = p[1]
def p_tuple1(p):
'tuple : LPAREN expression tupletail'
p[0] = [p[2].evaluate()] + p[3]
p[0] = tuple(p[0])
p[0] = TupleNode(p[0])
def p_tuple2(p):
'tupletail : COMMA expression tupletail'
p[0] = [p[2].evaluate()] + p[3]
def p_tupletail(p):
'tupletail : RPAREN'
p[0] = list()
def p_tuple_index(p):
'expression : TUPLEINDEX expression tuple'
p[0] = IndexNodeT(p[3], p[2])
#Arithmetic Opreations
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression'''
p[0] = BopNode(p[2], p[1], p[3])
def p_expression_term(p):
'expression : term'
p[0] = p[1]
def p_expression_binop1(p):
'''term : term TIMES factor
| term DIVIDE factor
| factor POWER term
| term DIV factor
| term MOD factor'''
p[0] = BopNode(p[2], p[1], p[3])
def p_term_factor(p):
'term : factor'
p[0] = p[1]
def p_factor_group(p):
'factor : LPAREN expression RPAREN'
p[0] = p[2]
def p_factor_number(p):
'factor : NUMBER'
p[0] = p[1]
def p_factor_uminus(p):
'factor : MINUS factor %prec UMINUS'
p[0] = UnaryNode(p[2])
#String rules
def p_expression_string(p):
'expression : string'
p[0] = p[1]
def p_string_s(p):
'string : STRING'
p[0] = p[1]
#String concatenation
def p_string_concatenation(p):
'expression : string PLUS expression'
p[0] = StringConcatNode(p[2], p[1], p[3])
#String indexing
def p_string_index(p):
'expression : string LBRACKET expression RBRACKET'
p[0] = IndexNodeS(p[1], p[3])
#List
#List Creation
def p_expression_list(p):
'expression : list'
p[0] = p[1]
def p_list0(p):
'list : LBRACKET tail'
p[0] = list()
p[0] = BlankListNode(p[0])
def p_list1(p):
'list : LBRACKET expression tail'
p[0] = [p[2]] + p[3]
p[0] = ListNode(p[0])
def p_list2(p):
'tail : COMMA expression tail'
p[0] = [p[2]] + p[3]
def p_tail(p):
'tail : RBRACKET'
p[0] = list()
#List Concatenation
def p_list_concat(p):
'list : list PLUS list'
p[0] = ListConcatNode(p[2], p[1], p[3])
#List Indexing
def p_list_index0(p):
'expression : list LBRACKET expression RBRACKET'
p[0] = IndexNodeL(p[1], p[3])
def p_list_index1(p):
'list : list LBRACKET expression RBRACKET'
p[0] = IndexNodeL(p[1], p[3])
#Boolean_in rule
def p_boolean_in(p):
'boolean : expression IN expression'
p[0] = BooleanInNode(p[2], p[1], p[3])
#Element concatenation (e::list)
def p_expression_concat(p):
'list : expression CONCAT list'
p[0] = ElementConcatNode(p[2], p[1], p[3])
#Comparison (numbers and strings)
def p_compare(p):
'''boolean : expression LT expression
| expression LE expression
| expression EQUALS expression
| expression NE expression
| expression GT expression
| expression GE expression'''
p[0] = BooleanNode1(p[2], p[1], p[3])
#boolean not, and, or
def p_expression_boolean(p):
'expression : boolean'
p[0] = p[1]
def p_boolean(p):
'boolean : BOOLEAN'
p[0] = p[1]
def p_boolean_not(p):
'expression : NOT expression'
p[0] = BooleanNotNode(p[2])
def p_boolean_op(p):
'''expression : expression AND expression
| expression OR expression'''
p[0] = BooleanNode2(p[2], p[1], p[3])
def p_error(p):
# print("Syntax error at '%s'" % p.value)
p.parser.skip(1)
import ply.yacc as yacc
yacc.yacc(debug=0)
import sys
with open(sys.argv[1], 'r') as myfile:
data = myfile.read().replace('\n', '')
try:
lex.input(data)
except Exception:
print('SYNTAX ERROR')
sys.exit()
try:
root = yacc.parse(data)
except Exception:
print('SYNTAX ERROR')
sys.exit()
try:
root.evaluate()
except Exception:
print('SEMANTIC ERROR')
sys.exit()
|
from .base import SimpleService
class GlusterdService(SimpleService):
name = 'glusterd'
systemd_unit = 'glusterd'
restartable = True
async def after_start(self):
# the glustereventsd daemon is started via the
# ctdb.shared.volume.mount method. See comment there
# to know why we do this.
if await (
await self.middleware.call('ctdb.shared.volume.mount')
).wait(raise_error=True):
await self.middleware.call('service.start', 'ctdb')
async def after_restart(self):
# bounce the glustereventsd service
await self.middleware.call('service.restart', 'glustereventsd')
async def before_stop(self):
await self.middleware.call('service.stop', 'ctdb')
await (
await self.middleware.call('ctdb.shared.volume.umount')
).wait(raise_error=True)
async def after_stop(self):
await self.middleware.call('service.stop', 'glustereventsd')
|
"""API Views"""
from rest_framework import generics
from orders.api.serializers import OrderSerializer
class OrderCreateAPIView(generics.CreateAPIView):
"""As documentation explains"""
serializer_class = OrderSerializer
|
from django.test import TestCase
import datetime
from django.utils import timezone
from tplatform.models import Article, Author, Tag, Type
from django.core.urlresolvers import reverse
class DataSetUp(TestCase):
@classmethod
def setUpTestData(cls):
# Tags and Types
Tag.objects.create(name = 'someTag')
Tag.objects.create(name = 'someOtherTag')
Type.objects.create(name = 'someType', picture_handle = 'someHandle')
Type.objects.create(name = 'someOtherType', picture_handle = 'someOtherHandle')
# Authors
Author.objects.create( \
name = 'someName', \
description = 'someDescription', \
picture_handle = 'someAuthorHandle', \
status = 1)
Author.objects.create( \
name = 'someOtherName', \
description = 'someOtherDescription', \
picture_handle = 'someOtherAuthorHandle', \
status = 3)
# Articles
number_of_articles = 5
for num in range(number_of_articles):
article = Article.objects.create( \
title = 'Title %s' % num, \
description = 'Description %s' % num, \
date_added = timezone.now(), \
content = 'Content %s' % num, \
category = num % 3 + 1)
article.save()
article.authors.add(Author.objects.get(pk = num%2 + 1))
article.tags.add(Tag.objects.get(pk = num%2 + 1))
article.types.add(Type.objects.get(pk = num%2 + 1))
article.save()
class IndexViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('index'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('index'))
self.assertTemplateUsed(resp, 'tplatform/index.html')
class ArticleDetailViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('browse.article_detail', args=[1]))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('browse.article_detail', args=[1]))
self.assertTemplateUsed(resp, 'tplatform/article_detail.html')
def test_view_returns_correct_context(self):
resp = self.client.get(reverse('browse.article_detail', args=[1]))
self.assertEqual(resp.context['article'].id, 1)
class FilterDetailViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('browse.filter_detail', args=['1', '1', '1', '1']))
self.assertEqual(resp.status_code, 200);
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('browse.filter_detail', args=['1', '1', '1', '1']))
self.assertTemplateUsed(resp, 'tplatform/filter_detail.html')
class TrainersViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('trainers'))
self.assertEqual(resp.status_code, 200);
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('trainers'))
self.assertTemplateUsed(resp, 'tplatform/trainers.html')
def test_view_returns_correct_context(self):
resp = self.client.get(reverse('trainers'))
expected_trainers = Author.objects.all()
self.assertQuerysetEqual(resp.context['trainers'], [repr(x) for x in expected_trainers])
class TrainerDetailViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('trainers.trainer_detail', args=[1]))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('trainers.trainer_detail', args=[1]))
self.assertTemplateUsed(resp, 'tplatform/trainer_detail.html')
def test_view_returns_correct_context(self):
resp = self.client.get(reverse('trainers.trainer_detail', args=[1]))
self.assertEqual(resp.context['trainer'].id, 1)
class ContactViewTest(DataSetUp):
def test_view_url_by_name(self):
resp = self.client.get(reverse('contact'))
self.assertEqual(resp.status_code, 200)
def test_view_uses_correct_template(self):
resp = self.client.get(reverse('contact'))
self.assertTemplateUsed(resp, 'tplatform/contact_us.html')
|
'''
Created on 29 mai 2016
@author: PASTOR Robert
Manage the display mode, either weekly or monthly
'''
class DisplayMode(object):
monthlyMode = "monthly"
weeklyMode = "weekly"
defaultMode = weeklyMode
displayMode = defaultMode
week_number = 0
month_number = 0
year = 0
def __init__(self):
self.displayMode = self.defaultMode
self.week_number = 0
self.month_number = 0
self.year = 0
def __str__(self):
return self.displayMode
def initialise(self, request):
if (request is not None):
#print 'display Mode - request = {0}'.format(request)
if request.method == 'GET':
#print 'display Mode - request method = {0} - request = {1}'.format(request.method, request.GET)
try:
self.year = int(request.GET['year'])
except:
self.year = 0
try:
self.week_number = int(request.GET['week'])
self.displayMode = self.weeklyMode
#print 'display Mode = {0} - year = {1} - week number = {2}'.format(self.displayMode, self.year, self.week_number)
except:
self.week_number = 0
self.month_number = int(request.GET['month'])
self.displayMode = self.monthlyMode
#print 'display Mode = {0} - year = {1} - month number = {2}'.format(self.displayMode, self.year, self.month_number)
else:
#print 'display Mode - request method = {0} - request = {1}'.format(request.method, request.POST)
try:
self.year = int(request.POST['year'])
except:
self.year = 0
''' request is a POST '''
try:
self.week_number = int(request.POST['week'])
self.displayMode = self.weeklyMode
#print 'display Mode = {0} - year = {1} - week number = {2}'.format(self.displayMode, self.year, self.week_number)
except:
self.week_number = 0
self.month_number = int(request.POST['month'])
self.displayMode = self.monthlyMode
#print 'display Mode = {0} - year = {1} - month number = {2}'.format(self.displayMode, self.year, self.month_number)
else:
#print 'display Mode - request is NONE'
self.displayMode = self.defaultValue
self.year = 0
self.week_number = 0
self.month_number = 0
def isWeeklyDisplayMode(self):
return self.displayMode == self.weeklyMode
def isMonthlyDisplayMode(self):
return self.displayMode == self.monthlyMode
def getDisplayMode(self):
return self.displayMode
def getYear(self):
return self.year
def getWeekNumber(self):
return self.week_number
def getMonthNumber(self):
return self.month_number
|
# Title: Mathematical Algorithms Basics in Python
# Date: Oct/06/2015, Tuesday - Current
# Author: Minwoo Bae (minubae.nyc@gmail.com)
# Reference: http://wphooper.com/teaching/2015-fall-308/python/Numbers.html
import math
# 01) Find Divisors of a Natural Number P
# Write a Python function print_divisor(p) which takes as input an integer p >= 1 and prints all its positive divisors.
# Using while_statement
def print_divisor_01(p):
i=1
temp=list()
while i < p:
if p%i == 0:
temp.append(i)
i += 1
return temp
# Using for_statement
def print_divisor_02(p):
temp=list()
for i in range(1,p):
if p%i == 0:
temp.append(i)
return temp
# 02) Find a Factorial of a Non-negative Integer N
# Write a Python function called factorial(n) which takes as input a positive integer n and return n!.
# Using while_statement
def factorial_01(n):
i = 1
fact = 1
if n==0 or n==1:
return 1
while i<=n:
fact = fact*i
i+=1
return fact
# Using for_statement
def factorial_02(n):
fact = 1
if n==0 or n==1:
return 1
for i in range(1,n+1):
fact = fact*i
return fact
# 03) Is_Square(n), n is a Non-negative Integer Number
# Write a Python function called is_square(n) which takes as input an integer n
# and returns truth-value of the statement "There is an integer k so that n = k^2".
def is_square(n):
for k in range(n):
if k**2 == n:
return True
return False
# 04) Sum of Cubes of a Positive Integer n
# Write a function called sum_of_cubes(n) which takes as input a positive integer n
# and returns the sum 1^3 + 2^3 + 3^3 + ... + n^3 = โ from j = 1 to n j^3.
def sum_of_cubes(n):
temp = 0
num = n+1
for i in range(1, num):
temp = temp + i**3
return temp
# 05) Find a Slope of a Secent Line
# Functions can be passed to functions which allows them to solve more general problems.
# The following function secant_slope will take as input a real valued function f:โโโf:โโโ
# and two input values x1 and x2 and produce the slope of the associated secant line (the line joining (x1, f(x1)) to (x1, f(x1)) ).
def secant_slope(f, x1, x2):
return (f(x1)-f(x2))/(x1-x2)
# 06) Find an Area of a Triangle from its Three Side Lengths
# The following function computes the area of a triangle from its three side lengths.
# This is the semiperimeter formula for area of a triangle. Heron's formulaHeron's formula states
# that the area of a triangle whose sides have lengths a, b, and c is:
# A = sqrt(s*(s-a)(s-b)(s-c)), where s = a+b+c / 2
def area(a, b, c):
s = (a+b+c)/2
return math.sqrt(s*(s-a)*(s-b)*(s-c))
# 07) Check that a Natural Number p is a Prime Number
# An integer p >= 2 is prime if the only positive numbers which divide it
# evenly are itself and one.
# Write a funuction is_prime(p) which takes as input an integer p >= 2 and
# returns True if p is prime and False if p is not prime.
# Hints: The number p is not prime if and only if there ia n with 2 <= n < p
# so that the remainder when dividing p by n is zero.
def is_prime(p):
if p < 2:
return False
else:
for i in range(2,p-1):
if p%i == 0:
return False
else:
return True
# 08) A Root finding Algorithm with the Newton's Method
# Newton's method is a very efficient way to find a root of a differentiable
# function f starting with a point near the root. The method gives a sequence
# x0, x1, x2,... of numbers which rapidly approach the root if the initial
# point is sufficiently close to the root. The value x0 is the starting point.
# Given xk the value of xk+1 by intersecting the x-axis with tangent line to
# the graph of f at the point (xk, f(fk)). That is,
# xk+1 = xk - f(xk)/f'(xk).
# An illustration of this process is shown at the end of this question.
# Write a function newtons_method(f, df, x0, n) which takes as input a function
# f:R ---> R, its derivative df = f' (alse a function from R to R), an initial
# point x0 and an integer n >= 1. The function should return the value xn obtained
# by iterating Newton's method n times.
def f(x):
return 2-x**2
def df(x):
return -2*x
# Using while_statement
def newtons_method(f, df, x0, n):
i = 1
while i <= n:
if df(x0) == 0:
return x0
x = x0 - f(x0)/df(x0)
x0 = x
i+=1
return x
# Using for_statement
def newtons_method_01(f, df, x0, n):
for i in range(1, n+1):
if df(x0) == 0:
return x0
x = x0 - f(x0)/df(x0)
x0 = x
return x
# 09) Collatz Conjecture
# The Collatz conjecture can be summarized as follows. Take any natural number n .
# If n is even, divide it by 2 to get n2 . If n is odd, multiply it by 3 and add 1 to obtain 3n+1 .
# Repeat the process (which has been called "Half Or Triple Plus One", or HOTPO[6]) indefinitely.
# The conjecture is that no matter what number you start with, you will always eventually reach 1.
# The property has also been called oneness. In modular arithmetic notation, define the function f as follows:
# f(n) = n/2 if n%2==0, 3*n+1 if n%2==1
def collatz(n):
temp = list()
while n!=1:
if n%2==0:
n = n//2
temp.append(n)
else:
n = 3*n+1
temp.append(n)
return temp
# 10) An approximation to a definite integral
# An approximation to a definite integral of a real valued function is
# โซ from a to b f(x)dx โ (bโa)/n * โ from i=0 to nโ1 f(a + i(bโa)/n)
# for large values of n. Here is a function which evaluates
# this sum on an arbitrary function.
# Using while_statement
def approximate_integral(f, a, b, n):
h = (b-a)/n
i = 0
sum = 0
while i<n:
sum = sum + f(a+i*(b-a)/n)
i += 1
return sum * (b-a) / n
# Using for_statement
def approximate_integral_01(f, a, b, n):
h = (b-a)/n
sum = 0
for i in range(n):
sum = sum + f(a+i*(b-a)/n)
return sum * (b-a) / n
|
import sys
from os.path import dirname,abspath
project_path =dirname(dirname(abspath(__file__)))
sys.path.append(project_path+"\\project1")
from calculator import add
print(add(4,5)) |
import logging
import fmcapi
import time
def test__phys_interfaces(fmc):
logging.info(
"Test PhysicalInterface. get, put PhysicalInterface Objects. Requires registered device"
)
sz1 = fmcapi.SecurityZones(fmc=fmc)
sz1.name = "SZ-OUTSIDE1"
sz1.post()
time.sleep(1)
sz2 = fmcapi.SecurityZones(fmc=fmc)
sz2.name = "SZ-OUTSIDE2"
sz2.post()
time.sleep(1)
intf1 = fmcapi.PhysicalInterfaces(fmc=fmc, device_name="device_name")
intf1.get(name="GigabitEthernet0/0")
intf1.enabled = True
intf1.ifname = "OUTSIDE1"
intf1.activeMACAddress = "0050.5686.718f"
intf1.standbyMACAddress = "0050.5686.0c2e"
intf1.static(ipv4addr="10.254.0.3", ipv4mask=24)
intf1.sz(name=sz1.name)
intf2 = fmcapi.PhysicalInterfaces(fmc=fmc, device_name="device_name")
intf2.get(name="GigabitEthernet0/1")
intf2.enabled = True
intf2.ifname = "OUTSIDE2"
intf2.activeMACAddress = "0050.5686.821d"
intf2.standbyMACAddress = "0050.5686.11cb"
intf2.dhcp()
intf2.sz(name=sz2.name)
intf1.put()
time.sleep(1)
intf2.put()
time.sleep(1)
intf1.get()
intf2.get()
intf1.enabled = False
intf1.activeMACAddress = ""
intf1.standbyMACAddress = ""
intf1.static(ipv4addr="", ipv4mask="")
intf1.securityZone = {}
intf1.activeMACAddress = ""
intf1.standbyMACAddress = ""
intf2.enabled = False
intf2.activeMACAddress = ""
intf2.standbyMACAddress = ""
intf2.static(ipv4addr="", ipv4mask="")
intf2.securityZone = {}
intf2.activeMACAddress = ""
intf2.standbyMACAddress = ""
intf1.put()
time.sleep(1)
intf2.put()
time.sleep(1)
intf1.get()
intf2.get()
intf1.ifname = ""
intf2.ifname = ""
intf1.put()
sz1.delete()
intf2.put()
sz2.delete()
|
#%%
from typing import List, Tuple, Dict, Union
import pandas as pd
df = pd.read_csv('../../data/processed.csv.gz', index_col="Id")
pd.set_option('display.max_colwidth', 999)
# %% ************ getting ners
import spacy
#%%
import pickle
spacy.prefer_gpu()
nlp = spacy.load("en_core_web_lg")
banned_ner = {('FAC', 'FAHRENHEIT'), ('GPE', 'NEW'),
('ORG', 'CLIMATE HOME NEWS'),
('ORG', 'E&E NEWS'), ('ORG', 'E&E'), ('ORG', 'AFP'),
('ORG', 'GETTY IMAGES'), ('ORG', 'GETTY'), ('ORG', 'GUARDIAN'),
('ORG', 'ASSOCIATED PRESS'), ('ORG', 'AP'), ('ORG', 'BLOOMBERG'),
('ORG', 'CBS NEWS'), ('ORG', 'CNN'), ('ORG', 'FOX NEWS'),
('ORG', 'NATURE COMMUNICATIONS'), ('ORG', 'NATURE'),
('ORG', 'NEW YORK TIMES'), ('ORG', 'REUTERS'),
('ORG', 'THE ASSOCIATED PRESS'), ('ORG', 'THE CANADIAN PRESS'),
('ORG', 'THE NEW YORK TIMES'), ('ORG', 'THE WASHINGTON POST'),
('ORG', 'THE'), ('ORG', 'THOMSON REUTERS FOUNDATION'),
('ORG', 'THOMSON REUTERS'), ('ORG', 'TIME'), ('ORG', 'TIMES'),
('PERSON', 'FAHRENHEIT'), ('PERSON', 'DAVID'),
('PERSON', 'EUREKALERT'), ('ORG', 'EUREKALERT'),
('PERSON', 'PH.D'), ('WORK_OF_ART', 'NATURE CLIMATE CHANGE'),
('WORK_OF_ART', 'NATURE'), ('WORK_OF_ART', 'PHD'),
('WORK_OF_ART', 'SCIENCE'), ('WORK_OF_ART', 'THE CONVERSATION')}
def get_ner_pairs(df: pd.DataFrame):
label_filter = {
'CARDINAL', 'ORDINAL', 'QUANTITY', 'MONEY', 'PERCENT', 'TIME', 'DATE'
}
for i, doc in enumerate(
nlp.pipe(df.Content, batch_size=200, disable=['tagger',
'parser'])):
site = df.SiteName.iloc[i]
pairs = [
pair for ent in doc.ents if ent.label_.upper() not in label_filter
and site.upper() not in ent.lemma_.upper() and (pair := (
ent.label_.upper(), ent.lemma_.upper())) not in banned_ner
]
yield pairs
#%% *****---- filtering them
from collections import defaultdict, Counter
def count_merge(pairs) -> Dict[Tuple, int]:
d = defaultdict(int)
for label, text in pairs:
text = text.strip()
text = text[4:] if text[:4] == "THE " else text
text = text[:-2] if text[-2:] in ('โS', "'S") else text
text = " ".join(text.split()) # removes double spaces
if len(text) > 1:
d[(label, text)] += 1
return dict(d)
from tqdm.auto import tqdm
tqdm.pandas()
docu_ner = {}
for i, pairs in tqdm(enumerate(get_ner_pairs(df)), total=len(df)):
counts = count_merge(pairs)
docu_ner[df.index.values[i]] = counts
#%% ****** inverted index
inv_ner = defaultdict(list)
for docu_id, counts in tqdm(docu_ner.items()):
for (label, text), count in counts.items():
inv_ner[(label, text)].append((docu_id, count))
inv_ner = dict(inv_ner)
#%% ****** NER merging: business rules
def ner_merger(merge_to: Union[Tuple, str],
*merge_from: Union[Tuple, str],
store=inv_ner) -> None:
if not merge_to in store:
store[merge_to] = []
c = Counter(dict(store[merge_to]))
for mergee in merge_from:
if merge_to == mergee:
continue
elif not mergee in store:
print(f"{mergee} not in dict")
else:
c += Counter(dict(store[mergee]))
del store[mergee]
store[merge_to] = list((c).items())
#%%%
ner_merger(('GPE', 'UNITED KINGDOM'), ('GPE', 'THE UNITED KINGDOM'),
('GPE', 'U.K'), ('GPE', 'UK'), ('GPE', 'U.K.'))
ner_merger(('GPE', 'UNITED STATES'), ('GPE', 'THE UNITED STATES'),
('GPE', 'THE UNITED STATES OF AMERICA'), ('GPE', 'U.S'),
('GPE', 'U.S.'), ('GPE', 'U.S.A.'), ('GPE', 'US'), ('GPE', 'USA'))
ner_merger(('GPE', 'UNITED ARAB EMIRATES'),
('GPE', 'THE UNITED ARAB EMIRATES'), ('GPE', 'U.A.E.'),
('GPE', 'UAE'))
ner_merger(('LOC', 'INDIAN OCEAN'), ('LOC', 'THE INDIAN OCEAN'))
ner_merger(('LOC', 'MIDDLE EAST'), ('LOC', 'THE MIDDLE EAST'))
ner_merger(('LOC', 'PACIFIC OCEAN'), ('LOC', 'PACIFIC'),
('LOC', 'THE PACIFIC OCEAN'))
ner_merger(('ORG', 'BRITISH PETROL'), ('ORG', 'BP'))
ner_merger(('ORG', 'CENTER FOR DISEASE CONTROL'), ('ORG', 'CDC'))
ner_merger(('ORG', 'EXXON'), ('ORG', 'EXXONMOBIL'))
ner_merger(('ORG', 'HARVARD UNIVERSITY'), ('ORG', 'HARVARD'))
ner_merger(('ORG', 'STANFORD UNIVERSITY'), ('ORG', 'STANFORD'))
ner_merger(('ORG', 'EU'), ('ORG', 'EUROPEAN UNION'), ('ORG', 'E.U.'))
ner_merger(('ORG', 'EPA'), ('ORG', 'THE ENVIRONMENTAL PROTECTION AGENCY'))
ner_merger(('PERSON', 'BERNIE SANDERS'), ('PERSON', 'BERNIE'),
('PERSON', 'SANDERS'))
ner_merger(('PERSON', 'JOE BIDEN'), ('PERSON', 'BIDEN'))
ner_merger(('PERSON', 'GRETA THUNBERG'), ('PERSON', 'GRETA'),
('PERSON', 'THUNBERG'))
ner_merger(('PERSON', 'DONALD TRUMP'), ('PERSON', 'DONALD'),
('PERSON', 'TRUMP'), ('PERSON', 'PRESIDENT DONALD TRUMP'),
('PERSON', 'PRESIDENT TRUMP'))
ner_merger(('PERSON', 'SCOTT MORRISON'), ('PERSON', 'SCOTT'),
('PERSON', 'MORRISON'))
ner_merger(('PERSON', 'OBAMA'), ('PERSON', 'BARACK OBAMA'),
('PERSON', 'BARACK'), ('PERSON', 'PRESIDENT OBAMA'))
ner_merger(('PERSON', 'NANCY PELOSI'), ('PERSON', 'PELOSI'))
ner_merger(('PERSON', 'JUSTIN TRUDEAU'), ('PERSON', 'TRUDEAU'))
ner_merger(('PERSON', 'JEFF BEZOS'), ('PERSON', 'BEZOS'))
ner_merger(('PERSON', 'GEORGE FLOYD'), ('PERSON', 'FLOYD'))
ner_merger(('PERSON', 'GEORGE BUSH'), ('PERSON', 'BUSH'))
ner_merger(('PERSON', 'ANGELA MERKEL'), ('PERSON', 'MERKEL'))
ner_merger(('PERSON', 'BILL GATES'), ('PERSON', 'GATES'))
ner_merger(('PERSON', 'JAIR BOLSONARO'), ('PERSON', 'BOLSONARO'))
ner_merger(('PERSON', 'WARREN BUFFET'), ('PERSON', 'BUFFET'))
ner_merger(('ORG', 'INTERNATIONAL ENERGY AGENCY'), ('ORG', 'IEA'))
ner_merger(('GPE', 'WASHINGTON, D.C.'), ('GPE', 'WASHINGTON DC'),
('GPE', 'WASHINGTON, DC'), ('GPE', 'DC'), ('GPE', 'D.C.'),
('GPE', 'D.C'))
#%% **********manual merging
def merg_counter(merge_to: Union[Tuple, str], counted: Counter, store=inv_ner):
if merge_to not in store:
print(f"{merge_to} not in store")
return
c = Counter(dict(store[merge_to]))
c += counted
store[merge_to] = list((c).items())
if ('GPE', 'WASHINGTON STATE') in inv_ner:
state_ids = {x for x, y in inv_ner[('GPE', 'WASHINGTON STATE')]}
else:
state_ids = set()
merg_counter(
('GPE', 'WASHINGTON STATE'),
Counter(
{x: y
for x, y in inv_ner[('GPE', 'WASHINGTON')] if x in state_ids}))
merg_counter(('GPE', 'WASHINGTON, D.C.'),
Counter({
x: y
for x, y in inv_ner[('GPE', 'WASHINGTON')]
if x not in state_ids
}))
del inv_ner[('GPE', 'WASHINGTON')]
buffet_ids = {x for x, y in inv_ner[('PERSON', 'WARREN BUFFET')]}
elizabeth_ids = {x for x, y in inv_ner[('PERSON', 'ELIZABETH WARREN')]}
merg_counter(
('PERSON', 'WARREN BUFFET'),
Counter(
{x: y
for x, y in inv_ner[('PERSON', 'WARREN')] if x in buffet_ids}))
merg_counter(
('PERSON', 'ELIZABETH WARREN'),
Counter(
{x: y
for x, y in inv_ner[('PERSON', 'WARREN')] if x in elizabeth_ids}))
del inv_ner[('PERSON', 'WARREN')]
#%%
import math
RELEVANCE_THRES = math.floor(len(df) * 0.005)
# %% ----inverted df to get which entitites are present with multiple tags
inv_df = pd.DataFrame.from_dict({k: len(v)
for k, v in inv_ner.items()},
orient='index',
columns=['Len'])
inv_df['Tag'], inv_df['Text'] = zip(*inv_df.index)
inv_df.reset_index()
inv_df.set_index(['Text', 'Tag'], inplace=True)
inv_df.sort_index(inplace=True)
# %% ---- grouping and filtering would be relevants
g = inv_df.reset_index().groupby(['Text']).agg(tc=('Tag', 'count'),
ls=('Len', 'sum'))
g = g[g.tc * g.ls >= RELEVANCE_THRES]
#%% ----- NER that otherwise would not be present
duped = inv_df.loc[inv_df.index.get_level_values(0).isin(
g.index)][inv_df.Len < RELEVANCE_THRES].copy()
# %% merging counts for better filtering, multi index
duped.reset_index(inplace=True)
duped = duped.merge(g, left_on='Text', right_on='Text')
duped.set_index(['Text', 'Tag'], inplace=True)
duped.sort_index(inplace=True)
# %%
def get_example(text, tag):
arts = inv_ner[(tag, text)]
for x in arts:
yield df.loc[x[0]].Content
# print(get_example('YUKON', 'ORG'))
#%% getting rid of tags
def merge_same_entity(inv: Dict[Tuple, List[Tuple]]) -> Dict[str, List[Tuple]]:
d = defaultdict(list)
for tag, text in inv:
ext = Counter(dict(d[text]))
ext += Counter(dict(inv[tag, text]))
d[text] = list((ext).items())
return dict(d)
merged_inv = merge_same_entity(inv_ner)
#%%
def kwfilterer(k, *must_contain: str, store=merged_inv, stripped=True):
if k not in store:
print(f"{k}" not in store)
return
targ = 'StrippedContent' if stripped else 'Content'
res = [(art, c) for (art, c) in store[k]
if any((m.lower() if stripped else m) in df.loc[art][targ]
for m in must_contain)]
if not res:
del store[k]
else:
store[k] = res
def kwmerger(k, target, store=merged_inv, stripped=True):
if k not in store:
print(f"{k}" not in store)
return
if target not in store:
print(f"{target}" not in store)
return
targ = 'StrippedContent' if stripped else 'Content'
res = [(art, c) for (art, c) in store[k]
if (target.lower() if stripped else target) in df.loc[art][targ]]
if not res:
return
else:
c = Counter(dict(store[target]))
c += Counter(dict(res))
store[target] = list((c).items())
#%% housecleaning after merging
ner_merger('ALEXANDRIA OCASIO - CORTEZ',
'AOC',
*[k for k in merged_inv if 'OCASIO' in k],
store=merged_inv)
kwfilterer('ALEXANDRIA', 'Egypt')
ner_merger('United Nations Climate Change Conference',
'U.N. CLIMATE CHANGE CONFERENCE',
'UN CLIMATE CHANGE CONFERENCE',
'UNITED NATIONS CLIMATE CHANGE CONFERENCE',
'UN CLIMATE CHANGE CONFERENCE ( COP 25',
'UN CLIMATE CHANGE CONFERENCE COP 25',
'CLIMATE CHANGE CONFERENCE COP',
'UNITED NATIONS CLIMATE CHANGE CONFERENCE 2009',
"UNITED NATIONS ' CLIMATE CHANGE CONFERENCE",
*[k for k in merged_inv if 'COP26' in k],
store=merged_inv)
ner_merger('CORSIA',
*[
k for k in merged_inv
if 'CARBON OFFSETTING AND REDUCTION' in k or 'CORSIA' in k
],
store=merged_inv)
kwfilterer('MCDONALD', "McDonald's", 'McDonaldโs', stripped=False)
merged_inv["MCDONALD'S"] = merged_inv.pop("MCDONALD")
kwfilterer('GEORGE', 'bush')
ner_merger('GEORGE BUSH', 'GEORGE', store=merged_inv)
kwfilterer('MARK', 'ZUCKERBERG')
ner_merger('MARK ZUCKERBER', 'MARK', 'ZUCKERBERG', store=merged_inv)
kwfilterer('FRANCIS', 'POPE')
ner_merger('POPE FRANCIS', 'FRANCIS', store=merged_inv)
kwfilterer('GORE', 'AL')
ner_merger('AL GORE', 'GORE', store=merged_inv)
ner_merger('PETE BUTTIGIEG', 'BUTTIGIEG', store=merged_inv)
ner_merger('PARIS AGREEMENT', 'PARIS CLIMATE AGREEMENT', store=merged_inv)
kwmerger('PARIS', 'PARIS AGREEMENT')
kwfilterer('PARIS', 'France')
kwmerger('NEW YORK', 'NEW YORK STATE')
kwfilterer('NEW YORK', 'NEW YORK CITY')
ner_merger('NEW YORK CITY', 'NEW YORK', store=merged_inv)
ner_merger('CENTER FOR DISEASE CONTROL',
*[
'CENTERS FOR DISEASE CONTROL',
'CENTERS FOR DISEASE CONTROL AND PREVENTION',
'CENTRES FOR DISEASE CONTROL',
'U.S. CENTERS FOR DISEASE CONTROL AND PREVENTION',
'U.S. CENTERS FOR DISEASE CONTROL',
'CENTERS FOR DISEASE CONTROL AND PREVENTION OFFICE',
'CENTRE FOR DISEASE CONTROL', 'CENTER FOR DISEASE CONTROL',
'US CENTERS FOR DISEASE CONTROL',
'US CENTERS FOR DISEASE CONTROL AND PREVENTION',
'NATIONAL CENTER FOR DISEASE CONTROL',
'CENTER FOR DISEASE CONTROL AND PREVENTION',
'CENTER FOR DISEASE CONTROL โS NATIONAL INSTITUTE',
'CENTRES FOR DISEASE CONTROL AND PREVENTION',
'CENTRE FOR DISEASE CONTROL AND PREVENTION',
'CENTER FOR DISEASE CONTROL(CDC'
],
store=merged_inv)
ner_merger(
'WORLD HEALTH ORGANIZATION',
'WHO',
*[
k for k in merged_inv
if 'WORLD HEALTH ORGANISATION' in k or 'WORLD HEALTH ORGANIZATION' in k
],
store=merged_inv)
ner_merger('EMMANUEL MACRON', 'MACRON', store=merged_inv)
ner_merger('ENVIRONMENTAL PROTECTION AGENCY', 'EPA', store=merged_inv)
ner_merger('NATIONAL OCEANIC AND ATMOSPHERIC ADMINISTRATION',
'NOAA',
store=merged_inv)
ner_merger('INTERGOVERNMENTAL PANEL ON CLIMATE CHANGE',
'IPCC',
store=merged_inv)
ner_merger('GRETA THUNBERG', 'GRETA', 'THUNBERG', store=merged_inv)
ner_merger('DONALD TRUMP',
'DONALD',
'TRUMP',
'PRESIDENT DONALD TRUMP',
'PRESIDENT TRUMP',
store=merged_inv)
ner_merger('OBAMA',
'BARACK OBAMA',
'BARACK',
'PRESIDENT OBAMA',
store=merged_inv)
ner_merger('COVID-19',
'COVID',
'SARS-COV2',
'CORONAVIRUS',
'COVID19',
store=merged_inv) #
ner_merger('UNITED NATIONS', 'U.N.', 'UN', store=merged_inv) #
for bad_entity in [
'FRANKLIN', 'ACT', 'GETTY IMAGES IMAGE', 'TELEGRAM',
'CARBON OFFSETTING AND REDUCTION', 'CHRIS', 'MURPHY', 'THOMAS',
'MARTIN', 'LEE', 'DOI', 'GUARDIAN AUSTRALIA', 'ENVIRONMENT', 'NATION',
'SUSTAINABILITY', 'CREATIVE COMMONS ATTRIBUTION - SHARE', 'PH.D.',
'SCIENTIFIC REPORTS', 'NEW YORKER', 'NPR',
'THOMSON REUTERS FOUNDATION', 'MICHAEL BLOOMBERG',
'NATIONAL GEOGRAPHIC', 'LOS ANGELES TIMES', 'INDEPENDENT PREMIUM',
'GOOGLE NEWS', 'FINANCIAL TIMES', 'BBC', 'TWITTER', 'FACEBOOK',
'INSTAGRAM', 'UNIVERSITY', 'SOUTH', 'NORTH'
]:
if bad_entity in merged_inv: del merged_inv[bad_entity]
#%% -------- keeping rare turtles only
PART = math.floor(len(df) * 0.01) # TODO ezzel jatszani
filtered = {k: v for k, v in merged_inv.items() if len(v) >= PART}
print(len(filtered))
#%%
from nltk.metrics import edit_distance
def lev_sim(s1: str, s2: str):
return 1 - edit_distance(
s1, s2, substitution_cost=1.5, transpositions=True) / max(
len(s1), len(s2))
def leven_mix(l, conf=0.85):
combos = []
while True:
if len(l) == 0:
return combos
curr = l[0]
pairs = [curr]
passing = []
for i in range(1, len(l)):
if lev_sim(curr, l[i]) >= conf:
pairs.append(l[i])
else:
passing.append(l[i])
if len(pairs) > 1:
combos.append(pairs)
l = passing
return combos
#%%
keys = list(filtered)
to_merge = leven_mix(keys)
for i in to_merge:
i.sort(key=lambda x: len(x))
ner_merger(i[0], *i[1:], store=filtered)
with open('../../data/filtered_dict.pt', 'wb') as f:
pickle.dump(filtered, f, pickle.HIGHEST_PROTOCOL)
|
"""ModelAdmin for MailingList"""
from datetime import datetime
from django.contrib import admin
from django.urls import path
from django.utils.encoding import smart_str
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext_lazy as _
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from ..models import Contact
from ..models import MailingList
from ..utils.excel import ExcelResponse
class MailingListAdmin(admin.ModelAdmin):
date_hierarchy = 'creation_date'
list_display = ('name', 'creation_date',
'subscribers_count', 'unsubscribers_count',
'exportation_links')
list_filter = ('creation_date', 'modification_date')
search_fields = ('name', 'description',)
filter_horizontal = ['subscribers', 'unsubscribers',]
fieldsets = ((None, {'fields': ('name', 'description',)}),
)
actions = ['merge_mailinglist']
actions_on_top = False
actions_on_bottom = True
@admin.action(
description=_('Merge selected mailinglists')
)
def merge_mailinglist(self, request, queryset):
"""Merge multiple mailing list"""
if queryset.count() == 1:
self.message_user(request, _('Please select a least 2 mailing list.'))
return None
subscribers = {}
for ml in queryset:
for contact in ml.subscribers.all():
subscribers[contact] = ''
when = str(datetime.now()).split('.')[0]
new_mailing = MailingList(name=_('Merging list at %s') % when,
description=_('Mailing list created by merging at %s') % when)
new_mailing.save()
new_mailing.subscribers = list(subscribers.keys())
self.message_user(request, _('%s succesfully created by merging.') % new_mailing)
return HttpResponseRedirect(reverse('admin:aoml_mailinglist_change',
args=[new_mailing.pk]))
@admin.display(
description=_('Export')
)
def exportation_links(self, mailinglist):
"""Display links for exportation"""
return mark_safe('<a href="%s">%s</a>' % (
reverse('admin:newsletter_mailinglist_export_csv',
args=[mailinglist.pk]), _('CSV')))
def export_csv(self, request, mailinglist_id):
"""Export subscribers in the mailing in CSV"""
mailinglist = get_object_or_404(MailingList, pk=mailinglist_id)
name = 'contacts_%s' % smart_str(mailinglist.name)
return ExcelResponse(mailinglist.subscribers.all(), name)
def get_urls(self):
urls = super(MailingListAdmin, self).get_urls()
my_urls = [ path('export/csv/<int:mailinglist_id>/',
self.admin_site.admin_view(self.export_csv),
name='newsletter_mailinglist_export_csv')]
return my_urls + urls
|
from django.shortcuts import render_to_response
from django.template import RequestContext
def _printError(request, errormsg):
context = {'errormsg' : errormsg}
return render_to_response('errormsg.html', context, context_instance = RequestContext(request))
def _printMessage(request, message):
context = {'message' : message}
return render_to_response('message.html', context, context_instance = RequestContext(request))
def _isRunningContest(contest):
now = datetime.now()
start_time = contest.start_time.replace(tzinfo = None)
end_time = contest.end_time.replace(tzinfo = None)
return start_time <= now and now <= end_time
def _isRunningContestProblem(pid):
contestlist = [x.cid for x in ContestProblem.objects.filter(pid = pid)]
for contest in contestlist:
if isRunningContest(contest):
return True
return False
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
import modelcluster.fields
import wagtail.core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0005_make_filter_spec_unique'),
('wagtailcore', '0013_update_golive_expire_help_text'),
('portal_pages', '0011_cmspage'),
]
operations = [
migrations.CreateModel(
name='HighlightItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(editable=False, blank=True, null=True)),
('title', models.CharField(max_length=255)),
('blurb', wagtail.core.fields.RichTextField()),
('home_page', modelcluster.fields.ParentalKey(to='portal_pages.HomePage', related_name='highlights')),
('icon', models.ForeignKey(to='wagtailimages.Image', on_delete=django.db.models.deletion.CASCADE)),
('target_page', models.ForeignKey(to='wagtailcore.Page', on_delete=django.db.models.deletion.CASCADE)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
]
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.value_generators_domain."""
from __future__ import annotations
import importlib
import inspect
import re
from core.domain import value_generators_domain
from core.tests import test_utils
from extensions.value_generators.models import generators
class ValueGeneratorsUnitTests(test_utils.GenericTestBase):
"""Test the value generator registry."""
def test_registry_generator_not_found(self) -> None:
"""Tests that get_generator_class_by_id raises exception
when it isn't found.
"""
generator_id = 'aajfejaekj'
with self.assertRaisesRegex(
KeyError, generator_id
):
value_generators_domain.Registry.get_generator_class_by_id(
generator_id
)
def test_value_generator_registry(self) -> None:
copier_id = 'Copier'
copier = value_generators_domain.Registry.get_generator_class_by_id(
copier_id)
self.assertEqual(copier().id, copier_id)
all_generator_classes = (
value_generators_domain.Registry.get_all_generator_classes())
self.assertEqual(len(all_generator_classes), 2)
def test_generate_value_of_base_value_generator_raises_error(self) -> None:
base_generator = value_generators_domain.BaseValueGenerator()
with self.assertRaisesRegex(
NotImplementedError,
re.escape(
'generate_value() method has not yet been implemented')):
base_generator.generate_value()
def test_registry_template_random_selector_contents(self) -> None:
contents_registry = (
'<schema-based-editor [schema]="SCHEMA" '
'[(ngModel)]="customizationArgs.list_of_values">\n'
'</schema-based-editor>\n'
)
class_object = value_generators_domain.Registry()
self.assertEqual(
contents_registry,
class_object.get_generator_class_by_id(
'RandomSelector'
).get_html_template()
)
def test_registry_template_copier_contents(self) -> None:
contents_registry = (
'<span class="d-inline-block align-middle">\n '
'<object-editor [objType]="objType" [initArgs]="initArgs" '
'[(value)]="customizationArgs.value" [alwaysEditable]="true">\n '
'</object-editor>\n</span>\n'
)
class_object = value_generators_domain.Registry()
self.assertEqual(
contents_registry,
class_object.get_generator_class_by_id(
'Copier'
).get_html_template()
)
def test_get_value_generator_classes_not_subclass(self) -> None:
"""Test that the value generator registry discovers all classes
correctly and excludes classes that are not subclasses of
BaseValueGenerator.
"""
class MockCopier():
"""This is a dummy class for self.swap to test that the value
generator registry discovers all classes correctly and excludes
classes that are not subclasses of BaseValueGenerator.
We need to have a class in the returned list of value generators
that isn't a subclass of BaseValueGenerator to test.
"""
pass
module = importlib.import_module(
'extensions.value_generators.models.generators'
)
expected_generators = {
'RandomSelector': type(generators.RandomSelector())
}
with self.swap(module, 'Copier', MockCopier):
value_generators = (
value_generators_domain.Registry.get_all_generator_classes()
)
self.assertEqual(expected_generators, value_generators)
class ValueGeneratorNameTests(test_utils.GenericTestBase):
def test_value_generator_names(self) -> None:
"""This function checks for duplicate value generators."""
all_python_files = self.get_all_python_files()
all_value_generators = []
for file_name in all_python_files:
python_module = importlib.import_module(file_name)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
all_base_classes = [base_class.__name__ for base_class in
(inspect.getmro(clazz))]
# Check that it is a subclass of 'BaseValueGenerator'.
if 'BaseValueGenerator' in all_base_classes:
all_value_generators.append(name)
expected_value_generators = ['BaseValueGenerator', 'Copier',
'RandomSelector']
self.assertEqual(
sorted(all_value_generators), sorted(expected_value_generators))
|
# Generated by Django 2.0.6 on 2020-09-11 09:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_user', models.CharField(blank=True, max_length=20, null=True)),
('address', models.CharField(blank=True, max_length=20, null=True)),
('clear_address', models.CharField(blank=True, max_length=100, null=True)),
('post_code', models.CharField(blank=True, max_length=6, null=True)),
('phone', models.CharField(blank=True, max_length=20, null=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
],
options={
'db_table': 't_address',
},
),
migrations.CreateModel(
name='TBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_name', models.CharField(blank=True, max_length=20, null=True)),
('author', models.CharField(blank=True, max_length=20, null=True)),
('press', models.CharField(blank=True, max_length=20, null=True)),
('time', models.DateField(blank=True, null=True)),
('edition', models.SmallIntegerField(blank=True, null=True)),
('isbn', models.CharField(blank=True, db_column='ISBN', max_length=20, null=True)),
('numbers', models.IntegerField(blank=True, null=True)),
('pages', models.IntegerField(blank=True, null=True)),
('size', models.CharField(blank=True, max_length=10, null=True)),
('paper', models.CharField(blank=True, max_length=10, null=True)),
('pack', models.CharField(blank=True, max_length=10, null=True)),
('book_pic', models.CharField(blank=True, max_length=200, null=True)),
('book_price', models.IntegerField(blank=True, null=True)),
('new_price', models.IntegerField(blank=True, null=True)),
('bool_count', models.CharField(blank=True, max_length=20, null=True)),
('sell_count', models.CharField(blank=True, max_length=20, null=True)),
('author_inf', models.CharField(blank=True, max_length=1000, null=True)),
('synopsis', models.CharField(blank=True, max_length=1000, null=True)),
('ed_recom', models.CharField(blank=True, max_length=1000, null=True)),
('con_recom', models.CharField(blank=True, max_length=1000, null=True)),
('list', models.CharField(blank=True, max_length=1000, null=True)),
('m_review', models.CharField(blank=True, max_length=1000, null=True)),
('try_read', models.CharField(blank=True, max_length=1000, null=True)),
('make_time', models.DateField(blank=True, null=True)),
('impression', models.SmallIntegerField(blank=True, null=True)),
('flow', models.IntegerField(blank=True, null=True)),
('comment', models.CharField(blank=True, max_length=200, null=True)),
('suit', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 't_book',
},
),
migrations.CreateModel(
name='TCar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.CharField(blank=True, max_length=20, null=True)),
('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TBook')),
],
options={
'db_table': 't_car',
},
),
migrations.CreateModel(
name='TCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_name', models.CharField(max_length=20)),
('level', models.IntegerField()),
('parent_id', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 't_category',
},
),
migrations.CreateModel(
name='TOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_item', models.CharField(blank=True, max_length=50, null=True)),
('price', models.IntegerField(blank=True, null=True)),
('create_time', models.DateField(blank=True, null=True)),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TAddress')),
],
options={
'db_table': 't_order',
},
),
migrations.CreateModel(
name='TOrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.CharField(blank=True, max_length=20, null=True)),
('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TBook')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TOrder')),
],
options={
'db_table': 't_order_item',
},
),
migrations.CreateModel(
name='TUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(blank=True, max_length=20, null=True)),
('password', models.CharField(blank=True, max_length=20, null=True)),
],
options={
'db_table': 't_user',
},
),
migrations.AddField(
model_name='torder',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TUser'),
),
migrations.AddField(
model_name='tcar',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TUser'),
),
migrations.AddField(
model_name='tbook',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TCategory'),
),
migrations.AddField(
model_name='taddress',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='index.TUser'),
),
]
|
word = input("que dice el don: ")
print("\n"*69)
letters = list(word)
guess_arr = []
wrong_counter = 1
side = 0
import sys
from random import randint
#array for hangman
brojon = [" ---|\n",
" o\n",
" |\n",
" /","|","\\","\n",
" |\n",
" /"," ","\\","\n"]
for i in letters:
guess_arr.append("_")
#lets start
while True:
trubro = ""
for i in range(0,wrong_counter):
trubro += brojon[i]
print(trubro)
#print the guesses
current_letters = ""
for i in guess_arr:
current_letters += i + " "
print(current_letters)
#guesses
guess = input("whatcha got stud? ")
#checking guess
if guess not in letters:
wrong_counter += 1
side = randint(1,6)
if side == 1:
print("nah chief, that aint it")
if side == 2:
print("do better")
if side == 3:
print("ok buddy")
if side == 4:
print("dummy dummy stupid little boy")
if side == 5:
print("ignorant bastard")
if side == 6:
print("jesus christ")
if brojon[wrong_counter] == "\n" or brojon[wrong_counter] == " ":
wrong_counter +=1
else:
for i in letters:
if guess in letters:
letters_pos = letters.index(guess)
guess_arr[letters_pos] = guess
letters[letters_pos] = " "
print("yee")
else:
break
#win
if guess_arr == list(word):
print("you have paid the troll toll, you may now have the boys soul")
break
if wrong_counter == len(brojon):
trubro = ""
for i in range(0, wrong_counter):
trubro += brojon[i]
print(trubro)
print("you have failed to pay the troll toll, now the wungi are coming for your soul")
break
|
class Kobe:
def __init__(self, text, value, row_b, row_e, col_b, col_e, trans, write=None, variable=None):
self.text = text
self.value = value
self.row_b = row_b
self.row_e = row_e
self.col_b = col_b
self.col_e = col_e
self.trans = trans
self.write = write
self.variable = variable
def __repr__(self):
return self.value
def __str__(self):
#for debugging
return self.text + ' >> ' + str(self.value) + ' loc: ' + str(self.row_b) + ',' + str(self.row_e) + ',' + str(self.col_b) + ',' + str(self.col_e)
|
import numpy as np
import streamlit as st
import math
import csv
from PIL import Image
import pandas as pd
def app():
options = ["Sin", "Cos", "Tan"]
choice = st.radio("Choose the Operation", options)
if choice == "Sin":
multi_sin = ["simple Sin" , "arc Sin" , "hyperbolic Sin"]
choices = st.radio("Choose the appropriate sin function", multi_sin)
st.write("sins")
if choices == "simple Sin":
a = st.number_input("Please Entre Number for sin")
c=math.sin(a)
st.text("The result of sin is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "arc Sin":
a = st.number_input("Please Entre Number for arc sin")
c=math.asin(a)
st.text("The result of arc sin is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "hyperbolic Sin":
a = st.number_input("Please Entre Number for hyperbolic sin")
c=math.sinh(a)
st.text("The result of hyperbolic sin is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choice == "Cos":
multi_cos = ["simple Cos" , "arc Cos" , "hyperbolic Cos"]
choices = st.radio("Choose the appropriate sin function", multi_cos)
if choices == "simple Cos":
a = st.number_input("Please Entre Number for cos")
c=math.cos(a)
st.text("The result of Cos is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "arc Cos":
a = st.number_input("Please Entre Number for arc cos")
c=math.acos(a)
st.text("The result of arc cos is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "hyperbolic Cos":
a = st.number_input("Please Entre Number for hyperbolic Cos")
c=math.sinh(a)
st.text("The result of hyperbolic Cos is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choice == "Tan":
multi_tan = ["simple Tan" , "arc Tan" , "hyperbolic Tan"]
choices = st.radio("Choose the appropriate sin function", multi_tan)
if choices == "simple Tan":
a = st.number_input("Please Entre Number for Tan")
c=math.tan(a)
st.text("The result of Tan is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "arc Tan":
a = st.number_input("Please Entre Number for arc Tan")
c=math.atan(a)
st.text("The result of arc Tan is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
elif choices == "hyperbolic Tan":
a = st.number_input("Please Entre Number for hyperbolic Tan")
c=math.tanh(a)
st.text("The result of hyperbolic Cos is")
st.write(c)
image = Image.open('today.jpeg')
st.image(image)
st.text("i ate some pie ... ")
|
config = {
"zone_dir": "../../private-circles/zonefiles",
"intermediate_dir": "snakemake-output",
"build_dir": "../../publishable-circles",
"input_dir": "../../private-circles"
} # find snakemake-output/ -type f | grep .err | sed 's/^.\{17\}//' | xargs -n 1 sh -c 'cp snakemake-output/$1 ../../publishable-circles/$1' sh
ZONEFILE_DIR = config['zone_dir']
BUILD_DIR = config['build_dir']
INTERMEDIATE_DIR = config['intermediate_dir']
INPUT_DIR = config['input_dir']
CAN_WILD = glob_wildcards(INPUT_DIR + "/{vin,.{17}}/libpanda/{day_folder,.{10}}/{day,.{10}}-{time,.{8}}_{vin2,.{17}}_CAN_Messages.csv") # CAN_Message not supported
ZONE_WILD = glob_wildcards(ZONEFILE_DIR+"/zonefile_{vin,.{17}}.json")
def remove_not_in_vin(wildcard, check_vin):
i = 0
while i < len(wildcard.vin):
if (wildcard.vin[i] not in check_vin):
for field in wildcard._fields:
del getattr(wildcard, field)[i]
i -= 1
i += 1
def remove_mismatching_vins(wildcard):
i = 0
while i < len(wildcard.vin):
if (wildcard.vin[i] != wildcard.vin2[i]):
for field in wildcard._fields:
del getattr(wildcard, field)[i]
i -= 1
i += 1
remove_not_in_vin(CAN_WILD, ZONE_WILD.vin)
remove_mismatching_vins(CAN_WILD)
OUTPUT_FILES = expand(INTERMEDIATE_DIR + "/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}.out", zip, day_folder=CAN_WILD.day_folder, day=CAN_WILD.day, time=CAN_WILD.time, vin=CAN_WILD.vin)
rule all:
input:
OUTPUT_FILES
rule create:
input:
canfile_find=INPUT_DIR + "/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}_CAN_Messages.csv",
gpsfile_find=INPUT_DIR + "/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}_GPS_Messages.csv",
zonefile_find=ZONEFILE_DIR + "/zonefile_{vin}.json"
params:
canfile="/input/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}_CAN_Messages.csv",
gpsfile="/input/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}_GPS_Messages.csv",
zonefile="/zonefiles/zonefile_{vin}.json",
errfile=INTERMEDIATE_DIR + "/{vin}/libpanda/{day_folder}/{day}-{time}_{vin}.err",
build_dir=BUILD_DIR + "/{vin}/libpanda/{day_folder}/",
zone_dir=ZONEFILE_DIR,
input_dir=INPUT_DIR
output:
INTERMEDIATE_DIR + "/{vin}/libpanda/{day_folder}/{day,.{10}}-{time}_{vin}.out"
shell:
"""
res=$(mkdir -p {params.build_dir} && \
docker run --rm \
-u $(id -u):$(id -g) \
-v $(pwd)/{params.zone_dir}:/zonefiles \
-v $(pwd)/{params.build_dir}:/build \
-v $(pwd)/{params.input_dir}:/input \
-v $(pwd):/data \
rpgolota/privpurge \
{params.canfile} \
{params.gpsfile} \
-z /{params.zonefile} \
-o /build 2>&1) && echo "$res" > {output} || echo "$res" > {params.errfile}
"""
rule clean:
shell:
"""
rm -rf {config[intermediate_dir]}
""" |
#!/usr/bin/python3
import fileinput
f=fileinput.input()
T=int(f.readline())
for case in range(T):
N=int(f.readline())
if N==0:
print("Case #"+str(case+1)+":","INSOMNIA")
continue
curr=N
mset=set(str(N))
while len(mset)<10:
curr+=N
mset.update(str(curr))
print("Case #"+str(case+1)+":",curr)
|
import os
config_file = "%s%s.leaprc" % (os.environ['HOME'], os.sep)
config_directory = "%s%s.leap" % (os.environ['HOME'], os.sep)
|
#!/usr/bin/env python3
import argparse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from pkcs1_breaker import *
_TESTKEY = b"""\
-----BEGIN PRIVATE KEY-----
MIIB5gIBADANBgkqhkiG9w0BAQEFAASCAdAwggHMAgEAAmEAy6bkAC6U7uMOlTZ5
vMr1KTp2J7bYx5UOeW5u2bJxdFpsJZazkW7zywkubN/X1aAkMxA4+L1MzIBDbjMQ
pu26NhzacUCUfqCXXN5KZAXW3pLdu20/axXM0Lzb6lwTxYHTAgMBAAECYQCJQvcx
2DOxv4A4ufrbcMBFBY5VvjvmaWTUG8bDHC60Ca4St7xYLbxMAOg1obnL1p7brF5Z
RYER2ogKXItaNAet/AMgRM3WlaEiO3y856x7DngQlTWeSYgrhFJWXGals6ECMQDm
rQj3LRMGmJ+jaUbk/ddgliUiUroxfgAj9Pthbg0FKIOszJfXaZUN17vIGhLkQocC
MQDiAl4JSKwQh8VU24My0Hahq2wY/Bj3UWuSOnmjxpXKJP766lZ3L9TiMCHPgUjk
/VUCMQDIEy8ijJLi9sAH0UkvVshXOwNsWMqsILhod5UNtZFPdwt2dmIA8c/ZmNOD
xLG8D8MCMEH+0PjDCMN28e9afhAbgViqFtGg46VsWA4GFzj0pw61COO6A++fvvkh
du4B4YhEVQIxANYNTxbxUgqeIm9qcHAUHnakjoYs4a4QfvwSSs4VlFYAuAZDZqu9
UZYeATxPJn8Kjg==
-----END PRIVATE KEY-----
"""
def load_private_pem(data):
return serialization.load_pem_private_key(data, password=None, backend=default_backend())
def encrypt_pkcs1(key, msg):
return key.encrypt(msg, PKCS1v15())
def encrypt_unpadded(key, m):
n = key.public_numbers().n
e = key.public_numbers().e
c_int = RSAEP(n, e, OS2IP(m))
return I2OSP(c_int, n.bit_length())
def decrypt_unpadded(key, c):
d = key.private_numbers().d
n = key.public_key().public_numbers().n
m_int = RSADP(n, d, OS2IP(c))
return I2OSP(m_int, n.bit_length())
def unpad_pkcs1(m):
for i in range(2, len(m)):
if m[i] == 0:
return m[i+1:]
class TestOracle(Oracle):
def __init__(self, key):
self.key = key
def __call__(self, ciphertext):
padded_msg = decrypt_unpadded(self.key, ciphertext)
if padded_msg[0] == 0 and padded_msg[1] == 2:
return Oracle.OK
else:
return Oracle.ERROR_PADDING_HEADER
def main():
parser = argparse.ArgumentParser(
description='Simulates the PKCS#1v15 padding attack'
)
parser.add_argument(
'-k', '--key',
help='PEM formatted file containing a private key to use with oracle'
)
parser.add_argument(
'--unpadded',
action='store_true',
help='Message is not padded before encrypting (this tests steps 1 and 4)'
)
parser.add_argument(
'm',
metavar='message',
nargs='?',
default='kick it, CC',
help='Message to use'
)
args = parser.parse_args()
if args.key:
with open(args.key, "rb") as f:
key = load_private_pem(f.read())
else:
key = load_private_pem(_TESTKEY)
m = args.m
pubkey = key.public_key()
nums = pubkey.public_numbers()
print('Using a {}-bit key and message "{}"'.format(nums.n.bit_length(), m), file=sys.stderr)
if args.unpadded:
c = encrypt_unpadded(pubkey, m.encode())
else:
c = encrypt_pkcs1(pubkey, m.encode())
attack = BB98_Attack(nums.n, nums.e, c, TestOracle(key))
msg = unpad_pkcs1(attack.find_message())
print(attack.stats(), file=sys.stderr)
print("Message was:", msg.decode())
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
BOT_NAME = 'getStockList'
SPIDER_MODULES = ['getStockList.spiders']
NEWSPIDER_MODULE = 'getStockList.spiders'
ITEM_PIPELINES = {'getStockList.pipelines.MongoDBPipeline': 1000, }
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,
'getStockList.rotate_useragent.RotateUserAgentMiddleware' :400
}
# put your mongoDB's account info
MONGODB_SERVER = "etc-dbs"
MONGODB_PORT = 27017
MONGODB_DB = "memo"
MONGODB_COLLECTION = "stockInfo"
DOWNLOAD_DELAY = 30
CONCURRENT_REQUESTS = 1 |
# Write a Python code that takes the degree as input from the user and convert it into radian
# importing math
import math
print("Convert the degree to radian")
# initializing value
degree = int(input("Enter the degree : "))
radian = degree*(math.pi/180)
print(radian)
|
import sys
import os
import csv
import subprocess
import jsonschema
import json
from datetime import time, datetime, timedelta
import itertools
from itertools import cycle
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as md
import matplotlib as mpl
from matplotlib.pyplot import cm
import numpy as np
import validateJson
import re
##scrabble score
SCORES = {"a": 1, "c": 3, "b": 3, "e": 1, "d": 2, "g": 2,
"f": 4, "i": 1, "h": 4, "k": 5, "j": 8, "m": 3,
"l": 1, "o": 1, "n": 1, "q": 10, "p": 3, "s": 1,
"r": 1, "u": 1, "t": 1, "w": 4, "v": 4, "y": 4,
"x": 8, "z": 10}
## ------------------------------
def scrabble_score(word):
"""
word: word we eant the scrabble score form
"""
score=sum(SCORES[letter] for letter in word)
return score
def is_allowed_specific_char(letters,word):
charRe = re.compile(r'[^%s]' % letters)
string = charRe.search(word)
return not bool(string)
## ------------------------------
def filescsv(n,d,numseconds,numlines,phases,csvfileall,windowsize,all_words_file,csvfileallP):
nd="n"+str(n)+"d"+str(d)
if not os.path.exists(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/'+nd):
os.makedirs(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/'+nd)
csvfile = open(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/'+nd+'/tsData.csv', 'w')
csvfile.write('session,player,type,letters,time,requestsSent,repliesReceived,requestsReceived,repliesSent,words,LevenshteinDistance\n')
csvfileheat = open(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/'+nd+'HeatMap.csv', 'w')
csvfileheat.write('day\thour\tvalue\n')
makepath=os.getcwd()+'/data-analytics-pipeline/src/h8'
exepath='cd '+makepath+';'
subprocess.Popen(["make"], cwd=makepath)
countplayer=0
for i in range(numlines):
phase=phases[i]["phaseid"]
players=phases[i]["players"]
numplayers=len(players)
for ii in range(numplayers):
countplayer=countplayer+1
playerid=players[ii]["playerid"]
initialletters=players[ii]["initialLetters"]
timeline=players[ii]["timeline"]
numtimeline=len(timeline)
countbin=0
lastword=0
w1=[]
w2=[]
allwords=[]
initiallettersString=initialletters.split("-")
initiallettersString = "".join(str(valueword) for valueword in initiallettersString)
allLettersRec= initialletters.split("-")
iScrabble1=0
iScrabble2=0
iScrabble3=0
iScrabble4=0
iScrabble5=0
for iii in range(numtimeline):
if iii<(numseconds):
countbin=countbin+1
requestsSent=timeline[iii]["requestsSent"]
repliesReceived=timeline[iii]["repliesReceived"]
requestsReceived=timeline[iii]["requestsReceived"]
repliesSent=timeline[iii]["repliesSent"]
words=timeline[iii]["words"].lower()
if words!='':
allwords.append(words)
if repliesReceived!='':
if '-' in repliesReceived:
listrep=repliesReceived.split('-')
for rep in listrep:
allLettersRec.append(rep)
else:
allLettersRec.append(repliesReceived)
ld=''
minld=''
if windowsize==1 and words!='' and lastword!=0:
#if playerid=='p4f3glnm':
#print('lastword:'+lastword)
#print('newword:'+words)
if '-' in words:
listwords=words.split('-')
ldlist=[]
ldlistMin=[]
for wordrow in listwords:
arguments= ' '+str(lastword) + ' '+str(wordrow)
ldlist.append(subprocess.call (exepath+' ./main'+arguments,shell=True))
lastword=wordrow
ld = "-".join(str(valueword) for valueword in ldlist)
else:
arguments= ' '+str(lastword) + ' '+str(words)
ld=subprocess.call (exepath+' ./main'+arguments,shell=True)
#lastword=words
#if playerid=='p4f3glnm':
#print('ld:'+str(ld))
if words!='' and '-' not in words:
lastword=words
allLettersRecS="".join(str(valueword) for valueword in allLettersRec)
if iii==59:
iScrabble1=scrabble_score(allLettersRecS.lower())
if iii==119:
iScrabble2=scrabble_score(allLettersRecS.lower())
if iii==179:
iScrabble3=scrabble_score(allLettersRecS.lower())
if iii==239:
iScrabble4=scrabble_score(allLettersRecS.lower())
if iii==299:
iScrabble5=scrabble_score(allLettersRecS.lower())
csvfile.write(str(phase)+','+str(playerid)+','+str(nd)+','+str(allLettersRecS)+','+str(iii)+','+str(requestsSent)+','+str(repliesReceived)+','+str(requestsReceived)+','+str(repliesSent)+','+str(words)+','+str(ld)+'\n')
csvfileall.write(str(phase)+','+str(playerid)+','+str(nd)+','+str(allLettersRecS)+','+str(iii)+','+str(requestsSent)+','+str(repliesReceived)+','+str(requestsReceived)+','+str(repliesSent)+','+str(words)+','+str(ld)+'\n')
countrequestsSent=0
if '-' in requestsSent:
listrequestsSent=requestsSent.split('-')
countrequestsSent=len(listrequestsSent)
else:
if requestsSent!='':
countrequestsSent=1
csvfileheat.write(str(countplayer)+'\t'+str(countbin)+'\t'+str(countrequestsSent)+'\n')
countOwnWords=0
for rowword in allwords:
wordfile=rowword.lower()
if is_allowed_specific_char(initiallettersString.lower(),wordfile)==True:
countOwnWords=countOwnWords+1
numwords=len(allwords)
fracWords=0
initiallettersScrabble=scrabble_score(initiallettersString.lower())
if numwords>0:
fracWords=float(countOwnWords/numwords)
csvfileallP.write(str(phase)+','+str(playerid)+','+str(n)+','+str(d)+','+str(numwords)+','+str(fracWords)+','+str(initiallettersString)+','+str(initiallettersScrabble)+','+str(iScrabble1)+','+str(iScrabble2)+','+str(iScrabble3)+','+str(iScrabble4)+','+str(iScrabble5)+'\n')
### -----------------------------
### Start.
def main(filename,schemaname):
value=validateJson.validate(schemaname,filename)
if value=='False':
sys.exit()
json_file = open(filename, 'r')
json_data = json.load(json_file)
numlines= (len(json_data))
#print(numlines)
if not os.path.exists(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/all'):
os.makedirs(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/all')
csvfileallP = open(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/all/tsDataParameters.csv', 'w')
csvfileall = open(os.getcwd()+'/data-analytics-pipeline/test/results/h8/output/all/tsData.csv', 'w')
csvfileall.write('session,player,type,letters,time,requestsSent,repliesReceived,requestsReceived,repliesSent,words,LevenshteinDistance\n')
csvfileallP.write('session,player,neighbors,n,numWords,fracWordsOwnL,initialLetters,iLScrabbleSco,iLScrabbleSco1,iLScrabbleSco2,iLScrabbleSco3,iLScrabbleSco4,iLScrabbleSco5\n')
with open(os.getcwd()+'/data-analytics-pipeline/src/h8/all_words.txt', 'r') as f:
all_words_file = f.readlines()
print("num words:",len(all_words_file))
for i in range(numlines):
actionrequest=json_data[i]["features"]
for index,actions in enumerate(actionrequest):
n=actionrequest[index]["n"]
d=actionrequest[index]["d"]
windowsize=actionrequest[index]["windowsize"]
numseconds=actionrequest[index]["numseconds"]
#if not os.path.exists(os.getcwd()+'/data-analytics-pipeline/test/results/h7/output/'+action):
# os.makedirs(os.getcwd()+'/data-analytics-pipeline/test/results/h7/output/'+action)
phases=actionrequest[index]["phases"]
filescsv(n,d,numseconds,len(phases),phases,csvfileall,windowsize,all_words_file,csvfileallP)
print (" -- h8 --")
print (" -- good termination --")
## --------------------------
## Execution starts.
if __name__ == '__main__':
if (len(sys.argv) != 3):
print (" Error. Incorrect usage.")
print (" usage: exec infile outfile.")
print (" Halt.")
quit()
filename=sys.argv[1]
schemaname=sys.argv[2]
main(filename,schemaname)
|
from utility import dataset_function as reader
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
from sacred import Experiment
def fill_categorical_features(data):
for column in data.columns:
nunique_value = data[column].nunique()
if nunique_value < 10:
data[column] = data[column].astype(object)
for column in data.select_dtypes(include=[np.object]):
data[column] = data[column].astype('category')
if data[column].isnull().any():
data[column] = data[column].cat.add_categories(['MISSING'])
data[column] = data[column].fillna('MISSING')
return data
def get_categorical_stat(data):
columns = ['name_contract_type',
'name_contract_status', 'name_payment_type', 'code_reject_reason',
'name_type_suite', 'name_client_type', 'name_goods_category',
'name_portfolio', 'name_product_type', 'name_yield_group',
'nflag_insured_on_approval']
stat_categorical = pd.DataFrame(data=data['application_number'], columns=['application_number'])
for column in columns:
stat_categorical = pd.concat([stat_categorical, pd.get_dummies(data[column], prefix=str(column))], axis=1);
stat_categorical['prev_application_number'] = 1
stat_categorical = stat_categorical.groupby(by=['application_number']).sum()
stat_categorical.columns = [f"{x}_count" for x in stat_categorical.columns]
return stat_categorical
def get_stat_numbers_by_features(data, feature=None, value_feature=None):
columns = ['amount_annuity', 'amount_credit', 'amount_goods_payment',
'amount_payment', 'amt_application', 'application_number',
'cnt_payment', 'days_decision', 'days_first_drawing',
'days_first_due', 'days_last_due', 'days_last_due_1st_version',
'days_termination']
if value_feature is None:
stat_by_feature = data.loc[:, columns].copy()
value_feature = 'All'
else:
stat_by_feature = data.loc[data[feature] == value_feature, columns].copy()
stat_by_feature['diff_amount_goods_amount_credit'] = stat_by_feature['amount_goods_payment'] - stat_by_feature[
'amount_credit']
stat_by_feature = stat_by_feature.groupby(by="application_number").agg(
["min", "max", "mean", np.nanmedian, "var"]).reset_index()
stat_by_feature.columns = ["_".join(x) for x in stat_by_feature.columns.ravel()]
stat_by_feature.columns = [f"{value_feature}_{x}" for x in stat_by_feature.columns]
stat_by_feature = stat_by_feature.rename(columns={f'{value_feature}_application_number_': 'application_number'})
return stat_by_feature
def add_preffix(data, prefix):
data.columns = [f"{prefix}_{x}" for x in data.columns]
data = data.rename(columns={f'{prefix}_application_number': 'application_number'})
return data
class AppHistory(TransformerMixin):
stat_categorical: pd.DataFrame
stat_by_Canceled: pd.DataFrame
stat_by_Approved: pd.DataFrame
stat_by_all: pd.DataFrame
def __init__(self, filename: str, ex: Experiment):
data = reader.get_input(filename, ex)
data = fill_categorical_features(data)
self.stat_categorical = add_preffix(get_categorical_stat(data), 'ah')
self.stat_by_Canceled = add_preffix(get_stat_numbers_by_features(data, 'name_contract_status', 'Canceled'), 'ah')
self.stat_by_Approved = add_preffix(get_stat_numbers_by_features(data, 'name_contract_status', 'Approved'), 'ah')
self.stat_by_all = add_preffix(get_stat_numbers_by_features(data, 'All', None), 'ah')
del data
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
Xt = X.merge(self.stat_categorical, how="left", on="application_number")
Xt = Xt.merge(self.stat_by_Canceled, how="left", on="application_number")
Xt = Xt.merge(self.stat_by_Approved, how="left", on="application_number")
Xt = Xt.merge(self.stat_by_all, how="left", on="application_number")
return Xt
|
import openpyxl # ้
ฑ้
ฑ็ๆณจ้๏ผ็ไป็ปๅฝ๏ผ่ฟๆฏไธไธชๅฝๆฐๅบopenpyxl ๏ผ็จpip installๅฎ่ฃ
import re
def Exceldivide(file_dir):
wb = openpyxl.load_workbook(file_dir) # ๆๅผๅๆ็excel่กจ
sheet = wb.get_sheet_by_name('Sheet1')
tuple(sheet['A1':'C3'])
wb.create_sheet('Sheet2') # ๆฐๅปบไธไธช่กจ
sheet2 = wb.get_sheet_by_name('Sheet2')
tuple(sheet2['A1':'C3'])
L1 = re.compile(r'\d\d/\d\d/\d\d\d\d') # ๆฅๆๆ ผๅผ
L2 = re.compile(r'[a-zA-Z0-9_]+@[a-zA-Z0-9-]+.com') # ้ฎไปถๆ ผๅผ
l1 = []
l2 = []
for rows in sheet['A1':'C3']: # ๆๅๆฅๆๅ้ฎไปถๆฐๆฎ
for cell in rows:
A = L1.search(cell.value)
a = A.group()
B = L2.search(cell.value)
try:
b = B.group()
except AttributeError:
pass
for rows in sheet2['A1':'A9']: # ๆๆฅๆๆฐๆฎๅๅ
ฅๆฐ่กจ
for cell in rows:
cell.value = a
print(cell.coordinate, cell.value)
for rows in sheet2['B1':'B9']: # ๆ้ฎไปถๆฐๆฎๅๅ
ฅๆฐ่กจ
for cell in rows:
cell.value = b
print(cell.coordinate, cell.value)
return wb
g = Exceldivide(r'c:\Users\asus\Desktop\Python\้ฃๅPython็ฌ่ซ็ฒพ่ฟ\็ฌ่ซ้ถๆฎต็ปไน \19ๅนด7ๆ\7.09\source.xlsx')
g.save(r'c:\Users\asus\Desktop\Python\้ฃๅPython็ฌ่ซ็ฒพ่ฟ\็ฌ่ซ้ถๆฎต็ปไน \19ๅนด7ๆ\7.09\11_copy.xlsx') # ไฟๅญ |
class HashTable():
def __init__(self, capacity):
self._capacity = capacity
self._data = [None for i in range(capacity)]
def hash(self, str_val):
val = 0
for i in range(len(str_val)):
val += (i+1) * ord(str_val[i])
return val % self._capacity
def insert(self,str_val):
key = self.hash(str_val)
if self._data[key] == None:
self._data[key] = str_val
return 1
else:
for i in range(self._capacity):
current_index = (key+1) % self._capacity
key += 1
if self._data[current_index] == None:
self._data[current_index] = str_val
return 1
print("Couldn't insert")
def print(self):
print(self._data)
def delete(self,str_val):
key = self.hash(str_val)
if self._data[key] == str_val:
self._data[key] = None
return 1
else:
for i in range(self._capacity):
current_index = (key+1) % self._capacity
key += 1
if self._data[current_index] == str_val:
self._data[current_index] = None
return 1
print("Couldn't delete")
|
# Given an integer array nums that may contain duplicates, return all possible subsets (the power set).
# The solution set must not contain duplicate subsets. Return the solution in any order.
# Example 1:
# Input: nums = [1,2,2]
# Output: [[],[1],[1,2],[1,2,2],[2],[2,2]]
# Example 2:
# Input: nums = [0]
# Output: [[],[0]]
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
result = [[]]
startIndex = 0
endIndex = 0
nums.sort()
for i in range(len(nums)):
startIndex = 0
if i > 0 and nums[i] == nums[i -1]: #If duplicate, i want to loop through only previous subnets added
startIndex = endIndex #So we take the previous end len. Dont have to add 1 as it is length
endIndex = len(result) #This is your normal end of subset length
for j in range(startIndex, endIndex): #Looping through your subsets
subset = result[j].copy()
subset.append(nums[i])
result.append(subset)
return result
|
# -*- coding: utf-8 -*-
import xlrd
from django.conf import settings
from budgetelem.models import Document
import sys
import unicodedata
sys.setrecursionlimit(200)
class ExcelParser(object):
def read_excel(self, excel_name):
if excel_name.unit == '1':
unit_koef = 1
elif excel_name.unit == '2':
unit_koef = 1000
elif excel_name.unit == '3':
unit_koef = 1000000
list_elems = []
path = settings.MEDIA_ROOT+excel_name.docfile.name
book = xlrd.open_workbook(path)
# print "The number of worksheets is", book.nsheets
# print "Worksheet name(s):", book.sheet_names()
sh = book.sheet_by_index( int(excel_name.sheet_number)-1 )
name = sh.cell_value(int(excel_name.sum_row)-1, int(excel_name.name_column)-1)
amount = sh.cell_value(int(excel_name.sum_row)-1, int(excel_name.amount_column)-1)
if amount == "": amount = 0
try:
amount = unicodedata.normalize('NFKD', amount).encode('ascii','ignore')
amount = amount.replace(" ", "").replace(",", ".")
except:
pass
if amount == "":
amount = 0
else:
amount = float(amount)*unit_koef
list_temp = {'name': name, 'amount': amount, 'deep': '-', 'id':'id0', 'parent': ''}
list_elems.append(list_temp)
id_count = 1
for rx in range(int(excel_name.start_budget)-1, int(excel_name.finish_budget)):
name = sh.cell_value(rx, int(excel_name.name_column)-1)
amount = sh.cell_value(rx, int(excel_name.amount_column)-1)
if amount == "": amount = 0
try:
amount = unicodedata.normalize('NFKD', amount).encode('ascii','ignore')
amount = amount.replace(" ", "").replace(",", ".")
except:
pass
amount = float(amount)*unit_koef
if sh.cell_value(rx, int(excel_name.amount_column)-1) != "" and amount != 0:
list_temp = {'name': name, 'amount': amount, 'deep': '-', 'id':'id'+str(id_count), 'parent': ''}
list_elems.append(list_temp)
id_count += 1
basic_list = list_elems
for elem in list_elems:
elem['deep'] = '1'
flag_of_end, basic_list = make_deep(list_elems)
# print flag_of_end
json_obj = make_json(basic_list)
csv_budget = make_csv(basic_list)
excel_name.csv_obj = csv_budget
excel_name.save()
return flag_of_end, json_obj
def make_csv(csv_budget):
csv_budget_string = ""
for elem in csv_budget:
csv_budget_string += elem['id'] + "," + elem['parent'] + ",\"" + elem['name'] + "\"," + str(elem['amount']) + "\n"
return csv_budget_string
def make_json(list_elems):
json_budget = ""
global csv_budget
csv_budget = list_elems
json_budget = make_json_element(list_elems, 'id0', json_budget)
return json_budget
def make_json_element(list_elems, id_elem, json_budget):
for elem in list_elems:
flag = 0
if elem['id'] == id_elem and elem['amount'] != 0:
json_budget = json_budget + '{ \"label\":\"'+ elem['name'].replace('"', '\'').replace("\n", " ") + '\", \"amount\":\"' + str(elem['amount']) + '\"'
for elem2 in list_elems:
if elem2['parent'] == id_elem:
json_budget = json_budget + ',\"children\": ['
break
for elem2 in list_elems:
if elem2['parent'] == id_elem:
flag = 1
json_budget = make_json_element(list_elems, elem2['id'], json_budget)
json_budget = json_budget + ']'
json_budget = json_budget + '}'
json_budget = json_budget.replace('}]{', '},{')
return json_budget
def count_elems(children_list):
count_elems_var = 0
for elem in children_list:
if elem['deep'] == '1':
count_elems_var += 1
return count_elems_var - 1
#def print_without_children(children_list):
# for elem in children_list:
# if elem['deep'] == '1':
# print elem['id'], elem['parent'], elem['name'], elem['amount']
# raw_input()
def make_deep(children_list):
stop_loop = 0
list_elems = children_list
list_id = []
count_elem = len(list_elems) - 1
while len(list_elems) > 1 and stop_loop < 10000:
children_list = find_children(list_elems, children_list, count_elem)
second_list = []
for elem1 in children_list:
if elem1['deep'] != '2':
second_list_element = {'name': elem1['name'], 'amount': elem1['amount'], 'deep': '1', 'id': elem1['id'], 'parent': elem1['parent']}
second_list.append(second_list_element)
if list_elems == second_list:
if count_elem > 0:
count_elem -= 1
else:
count_elem = len(second_list) - 1
else:
count_elem = len(second_list) - 1
list_elems = second_list
stop_loop += 1
if stop_loop == 10000:
return 1, children_list
else:
return 0, children_list
def find_children(list_elems, children_list, count_elem):
list_id = []
summ = round(list_elems[count_elem]['amount'], 2)
list_id.append(list_elems[count_elem]['id'])
while count_elem > 0:
if round(list_elems[count_elem - 1]['amount'], 2) == round(summ, 2):
children_list = make_parent_for_children(children_list, list_id, list_elems[count_elem - 1]['id'])
break
else:
summ += round(list_elems[count_elem - 1]['amount'],2)
list_id.append(list_elems[count_elem-1]['id'])
count_elem -= 1
return children_list
def make_parent_for_children(children_list, list_of_id, parent_id):
for elem in list_of_id:
for elem1 in children_list:
if elem1['id'] == elem:
elem1['deep'] = '2'
elem1['parent'] = parent_id
return children_list
|
# Can be used to flatten nested lists of any depth.
flatten = lambda x: [y for l in x for y in flatten(l)] if type(x) is list else [x]
|
import json
import os
import getpass
from cryptography.fernet import Fernet
from common import *
def check_config(config_path=os.path.join(BASE_DIR, "config.json")):
if os.path.exists(config_path):
logger.debug("Config file exists")
else:
logger.info("A config does not exist, please make one")
with open(config_path, "w") as config_file:
json.dump(create_config(), config_file, indent=4)
def get_config(config_path=os.path.join(BASE_DIR, "config.json")):
check_config()
with open(config_path, "r") as config_json:
config = json.load(config_json)
return config
def encrypt_password(key, password):
cipher_suite = Fernet(key)
password_encrypted = cipher_suite.encrypt(password)
return password_encrypted
def decrypt_password(key, password_encrypted):
cipher_suite = Fernet(key)
password_decoded = cipher_suite.decrypt(password_encrypted)
return password_decoded
def create_config():
encrypt_key = Fernet.generate_key()
logger.info("Enter your Reddit client ID")
client_id = input()
logger.info("Enter your Reddit client secret")
client_secret = input()
logger.info("Enter your Reddit username")
reddit_username = input()
logger.info("Enter your Reddit password")
reddit_password = getpass.getpass()
logger.info("Enter your Instagram username")
instagram_username = input()
logger.info("Enter your Instagram password")
instagram_password = getpass.getpass()
logger.info("Enter Instagram tags")
instagram_tags = input()
reddit_password_encrypted = encrypt_password(encrypt_key, reddit_password.encode('UTF-8'))
instagram_password_encrypted = encrypt_password(encrypt_key, instagram_password.encode('UTF-8'))
return {
"encrypt_key": encrypt_key.decode('UTF-8'),
"reddit": {
"client_id": client_id,
"client_secret": client_secret,
"username": reddit_username,
"password": reddit_password_encrypted.decode('UTF-8')
},
"instagram": {
"username": instagram_username,
"password": instagram_password_encrypted.decode('UTF-8'),
"tags": instagram_tags
}
}
|
# s=int(input()) #s=7
# print(s+2) #s+2=9
# print(s%2) #s%2=1 -- ะพััะฐัะพะบ
# print(s+2-(s%2)) #s+2-1
s = int(input())
print(s + 2 - (s % 2))
|
from bs4 import BeautifulSoup
html = """
<html><body>
<ul>
<li><a href="http://www.naver.com">naver</a></li>
<li><a href="http://www.daum.net">daum</a></li>
<li><a href="http://www.daum.com">daum</a></li>
<li><a href="http://www.goolgle.com">google</a></li>
<li><a href="http://www.tistory.com">tistory</a></li>
</ul>
</body></html>
"""
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all("a")
#print(type(links))
a = soup.find_all("a", string="daum")
print('a', a)
#๊ฐ์ฅ ์์ ํ๋๋ง ๊ฐ์ ธ์จ๋ค
#b = soup.find("a")
#์์ 3๊ฐ ๊ฐ์ ธ์จ๋ค
b = soup.find_all("a", limit=3)
print('b', b)
#์ ๊ทํํ์ ์ฌ์ฉํด์ ์ด๋ค ๋ฌธ์ ๋ถ๋ฌ์ฌ๋ ์ ์ฉํ๊ฒ ์ฌ์ฉ
c = soup.find_all(string=["naver","google"])
print('c', c)
for a in links:
#print('a', a);
href = a.attrs['href']
txt = a.string
print('txt >> ', txt, 'href >> ', href)
|
#!/Users/bernardo.branco/Documents/Personal/Projects/sidewake/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
#!/usr/bin/python
import os
import json
import base64
import io
import re
import uuid
import time
import shutil
import argparse
import shelve
import sys
import glob
import subprocess
import socket
from pyDes import *
cur_dir = os.path.dirname(os.path.realpath(__file__))
package_type = None
if os.path.exists('/etc/yum.repos.d/'):
package_type = 'rpm'
elif os.path.exists('/etc/apt/sources.list'):
package_type = 'deb'
missing_packages = []
try:
import ldap
except:
missing_packages.append('python-ldap')
if missing_packages:
packages_str = ' '.join(missing_packages)
result = raw_input("Missing package(s): {0}. Install now? (Y|n): ".format(packages_str))
if result.strip() and result.strip().lower()[0] == 'n':
sys.exit("Can't continue without installing these packages. Exiting ...")
if package_type == 'rpm':
cmd = 'yum install -y epel-release'
os.system(cmd)
cmd = 'yum clean all'
os.system(cmd)
cmd = "yum install -y {0}".format(packages_str)
else:
os.system('apt-get update')
cmd = "apt-get install -y {0}".format(packages_str)
print "Installing package(s) with command: "+ cmd
os.system(cmd)
import ldap
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
testSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
detectedIP = [(testSocket.connect(('8.8.8.8', 80)),
testSocket.getsockname()[0],
testSocket.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
hostname = socket.gethostbyaddr(detectedIP)[0]
def get_ldap_admin_serevers_password(ox_ldap_properties_file):
salt_file = open('/etc/gluu/conf/salt').read()
salt = salt_file.split('=')[1].strip()
for l in open(ox_ldap_properties_file):
if l.startswith('bindPassword'):
s = l.split(':')[1].strip()
engine = triple_des(salt, ECB, pad=None, padmode=PAD_PKCS5)
cipher = triple_des(salt)
decrypted = cipher.decrypt(base64.b64decode(s), padmode=PAD_PKCS5)
passwd = decrypted
elif l.startswith('servers'):
s = l.split(':')[1].strip()
servers_s = s.split(',')
servers = [ ss.split(':')[0] for ss in servers_s ]
elif l.startswith('bindDN'):
binddn = l.split(':')[1].strip()
return passwd, servers, binddn
class GluuUpdater:
def __init__(self):
self.gluu_app_dir = '/opt/gluu/jetty'
self.backup_time = time.strftime('%Y-%m-%d.%H:%M:%S')
self.update_version = '3.1.8'
self.update_base_dir = '/opt/upd/{}-sp1-upg'.format(self.update_version)
self.backup_folder = '{0}/backup_{1}'.format(self.update_base_dir, self.backup_time)
self.app_dir = os.path.join(self.update_base_dir,'app')
self.jreArchive = "amazon-corretto-8.222.10.1-linux-x64.tar.gz"
for cdir in (self.app_dir, self.backup_folder):
if not os.path.exists(cdir):
self.logIt("Creating folder " + cdir)
os.makedirs(cdir)
def logIt(self, msg):
with open('update.log', 'a') as w:
w.write('%s %s\n' % (time.strftime('%X %x'), msg))
def backup_(self, f, keep=False):
if os.path.exists(f):
if keep:
self.run(['cp','-r', '-f', f, self.backup_folder])
else:
self.run(['mv', f, self.backup_folder])
def run(self, args):
msg = 'Running ' + ' '.join(args)
self.logIt(msg)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output, err = p.communicate()
if output:
self.logIt(output)
if err:
self.logIt(err)
return output
def download_apps(self):
self.run(['wget', '-nv', 'https://ox.gluu.org/maven/org/xdi/oxshibbolethIdp/{0}.Final/oxshibbolethIdp-{0}.Final.war'.format(self.update_version), '-O', os.path.join(self.app_dir, 'idp.war')])
self.run(['wget', '-nv', 'https://ox.gluu.org/maven/org/xdi/oxtrust-server/{0}.Final/oxtrust-server-{0}.Final.war'.format(self.update_version), '-O', os.path.join(self.app_dir, 'identity.war')])
self.run(['wget', '-nv', 'https://ox.gluu.org/maven/org/xdi/oxauth-server/{0}.Final/oxauth-server-{0}.Final.war'.format(self.update_version), '-O', os.path.join(self.app_dir, 'oxauth.war')])
self.run(['wget', '-nv', 'https://ox.gluu.org/maven/org/xdi/oxShibbolethStatic/{0}.Final/oxShibbolethStatic-{0}.Final.jar'.format(self.update_version), '-O', os.path.join(self.app_dir, 'shibboleth-idp.jar')])
self.run(['wget', '-nv', 'https://ox.gluu.org/npm/passport/passport-{}.tgz'.format(self.update_version), '-O', os.path.join(self.app_dir, 'passport.tgz')])
self.run(['wget', '-nv', 'https://ox.gluu.org/npm/passport/passport-version_{}-node_modules.tar.gz'.format(self.update_version), '-O', os.path.join(self.app_dir, 'passport-node_modules.tar.gz')])
self.run(['wget', '-nv', 'https://d3pxv6yz143wms.cloudfront.net/8.222.10.1/'+self.jreArchive, '-O', os.path.join(self.app_dir, self.jreArchive)])
def updateLdapConfig(self):
self.ldap_bind_pw, self.ldap_servers, self.ldap_bind_dn = get_ldap_admin_serevers_password('/etc/gluu/conf/ox-ldap.properties')
ldap_host = self.ldap_servers[0]
for i in range(5):
try:
self.conn = ldap.initialize('ldaps://{0}:1636'.format(ldap_host))
self.conn.simple_bind_s(self.ldap_bind_dn, self.ldap_bind_pw)
break
except:
print "Can't connect to LDAP Server. Retrying in 5 secs ..."
time.sleep(5)
else:
sys.exit("Max retry reached. Exiting...")
#update client uris
result = self.conn.search_s('o=gluu', ldap.SCOPE_SUBTREE,'(&(objectClass=oxTrustConfiguration)(ou=oxtrust))', ['oxTrustConfApplication'])
if result:
dn = result[0][0]
oxTrustConfApplication = json.loads(result[0][1]['oxTrustConfApplication'][0])
oxAuthClientId = oxTrustConfApplication['oxAuthClientId']
oxTrustConfApplication['loginRedirectUrl'] = 'https://{0}/identity/authcode.htm'.format(hostname)
oxTrustConfApplication['logoutRedirectUrl'] = '"https://{0}/identity/finishlogout.htm'.format(hostname)
oxTrustConfApplication_str = json.dumps(oxTrustConfApplication, indent=2)
self.conn.modify_s(dn, [( ldap.MOD_REPLACE, 'oxTrustConfApplication', oxTrustConfApplication_str)])
result2 = self.conn.search_s('o=gluu', ldap.SCOPE_SUBTREE,'(&(objectClass=oxAuthClient)(inum={0}))'.format(oxAuthClientId))
dn2 = result2[0][0]
changes = [
('oxAuthLogoutURI', ['https://{0}/identity/ssologout.htm'.format(hostname)]),
('oxAuthRedirectURI', [
'https://{0}/identity/scim/auth'.format(hostname),
'https://{0}/identity/authcode.htm'.format(hostname),
'https://{0}/cas/login'.format(hostname),
'https://{0}/identity/ssologout.htm'.format(hostname),
]),
('oxAuthPostLogoutRedirectURI', ['https://{0}/identity/finishlogout.htm'.format(hostname)]),
]
for attrib, val in changes:
self.conn.modify_s(dn2, [( ldap.MOD_REPLACE, attrib, val)])
else:
sys.exit("Can't find oxTrustConfiguration. Exiting...")
def updateWar(self):
for app in os.listdir(self.gluu_app_dir):
war_app = app+'.war'
new_war_app_file = os.path.join(self.app_dir, war_app)
if os.path.exists(new_war_app_file):
app_dir = os.path.join(self.gluu_app_dir, app, 'webapps')
cur_war = os.path.join(app_dir, war_app)
if os.path.exists(cur_war):
print "Backing up", war_app, "to", self.backup_folder
self.backup_(cur_war)
print "Updating", war_app
self.run(['cp', '-f', new_war_app_file, app_dir])
def updateJava(self):
print "Upgrading Java"
cacerts = []
#get host specific certs in current cacerts
cmd =['/opt/jre/bin/keytool', '-list', '-keystore', '/opt/jre/jre/lib/security/cacerts', '-storepass', 'changeit']
result = self.run(cmd)
for l in result.split('\n'):
if hostname in l:
ls=l.split(', ')
if ls and (hostname in ls[0]) and (not 'opendj' in l):
alias = ls[0]
crt_file = os.path.join(cur_dir, ls[0]+'.crt')
self.run(['/opt/jre/bin/keytool', '-export', '-alias', alias, '-file', crt_file, '-keystore', '/opt/jre/jre/lib/security/cacerts', '-storepass', 'changeit'])
cacerts.append((alias, crt_file))
for cur_version in glob.glob('/opt/jdk*'):
self.run(['rm', '-r', cur_version])
if os.path.islink('/opt/jre'):
self.run(['unlink', '/opt/jre'])
print "Extracting {} into /opt/".format(self.jreArchive)
self.run(['tar', '-xzf', os.path.join(self.app_dir, self.jreArchive), '-C', '/opt/', '--no-xattrs', '--no-same-owner', '--no-same-permissions'])
self.run(['ln', '-sf', '/opt/amazon-corretto-8.222.10.1-linux-x64', '/opt/jre'])
self.run(['chmod', '-R', '755', '/opt/jre/bin/'])
self.run(['chown', '-R', 'root:root', '/opt/jre'])
self.run(['chown', '-h', 'root:root', '/opt/jre'])
#import certs
for alias, crt_file in cacerts:
#ensure cert is not exists in keystore
result = self.run(['/opt/jre/bin/keytool', '-list', '-alias', alias, '-keystore', '/opt/jre/jre/lib/security/cacerts', '-storepass', 'changeit', '-noprompt'])
if 'trustedCertEntry' in result:
self.run(['/opt/jre/bin/keytool', '-delete ', '-alias', alias, '-keystore', '/opt/jre/jre/lib/security/cacerts', '-storepass', 'changeit', '-noprompt'])
self.run(['/opt/jre/bin/keytool', '-import', '-alias', alias, '-file', crt_file, '-keystore', '/opt/jre/jre/lib/security/cacerts', '-storepass', 'changeit', '-noprompt', '-trustcacerts'])
def fix_https_gluu_conf(self):
https_conf_fn = '/etc/httpd/conf.d/https_gluu.conf' if os.path.exists('/etc/httpd/conf.d/https_gluu.conf') else '/etc/apache2/sites-available/https_gluu.conf'
with open(https_conf_fn, 'r') as f:
https_conf = f.read()
https_conf = https_conf.replace('Header edit Set-Cookie ^((?!session_state).*)$ $1;HttpOnly', 'Header edit Set-Cookie ^((?!opbs|session_state).*)$ $1;HttpOnly')
with open(https_conf_fn, 'w') as w:
w.write(https_conf)
parser = argparse.ArgumentParser(description="This script upgrades OpenDJ gluu-servers (>3.0) to 4.0")
parser.add_argument('-o', '--online', help="online installation", action='store_true')
argsp = parser.parse_args()
updaterObject = GluuUpdater()
updaterObject.download_apps()
updaterObject.updateWar()
updaterObject.updateLdapConfig()
updaterObject.fix_https_gluu_conf()
update_java = raw_input("Do you want to replace java with {} [Y/n] ".format(updaterObject.jreArchive))
if not (update_java and update_java[0].lower() == 'n'):
updaterObject.updateJava()
print """
Update is complete, please exit from container and restart gluu server
"""
|
import re
import unittest
class Solver(object):
def __init__(self, ):
pass
def solve(self, inputs):
pass
TEST_DATA='''
/*0*/ test("d3d4e3e4d9h7h9j3j4j7j9,f4f6g4g5g6h5h6", "5,3")
/*1*/ test("a1,s19", "0,0")
/*2*/ test("a1a2b1b2,r18r19s18s19", "1,1")
/*3*/ test("b1d1b2d2e2f2b5d5e5f5b6d6,b3d3b4d4", "7,2")
/*4*/ test("b2c2e2f2b3c3e3f3b5c5e5f5,d4", "8,0")
/*5*/ test("a1a18s1s18,a19", "306,0")
/*6*/ test("b11b15i11i15,a13h3m1q9", "28,0")
/*7*/ test("b7b8b15f7f8q8,d1d14j16n14o19p7", "4,0")
/*8*/ test("c3e9g17h4p17p19r12s13,c5c6d18k5k6k10n14p6", "0,8")
/*9*/ test("b7c12c13i8j12j13k7k16l1r18,a15b18d18f16h5j6m10n6n7o8", "7,0")
/*10*/ test("a12a17b10b17b19d7e12g3g17g19q18s2,c10c16f5g7i5j13k6l4n19o7p9q19", "10,0")
/*11*/ test("a19b2b3d2e13f5f14g15h1i7o2p19q3r17,a4a6a8a18b17c4c8f13h8j8l13n12n17p5", "0,8")
/*12*/ test("b13b14b17c4d12e3f16h5l19m2n1n19o3o4o14q13q16,d3d8d14e1e16f12g6j8o1p5p7q1r9r15r16s5s7", "0,6")
/*13*/ test("b6b17c4c9c12c16d14e14g5g9i10j10j13m8q2q5q18r1r6,b8c11e13f3h11h18j7m1m3m7n3n4n11o4p12q3q4q17s13", "0,3")
/*14*/ test("a9a10a15d2d16d17d18g18i11k11k15l14m15n5n11o1p6p14q11q14r7r15,a11c3c10c12d4f1f16f19g3g6g10i16k2k16l12n12o12p1r5r6s9s11", "0,28")
/*15*/ test("a10c4c16c19e12g11g17h1h9i8i12i17i19k18l5l16n10n19p12p19q5q9r6r9r16,b8b19c1c10d11d15e1f3f8f13i7j5j8k1k6l2l4l6l11m2m13p2r1r11s18", "0,0")
/*16*/ test("a12a19b6b7c13c19d5d12d19e8f7h1h14i3k4k5k15l10m3n3o2p18q11q17q19r11r14s18,c6c7c9e16f8f9f13g14h9h12h15j5j10j18k11k14k18n5n18o1o18p4p10p14r13s2s7s13", "21,0")
/*17*/ test("a8b18d6e6f12f16g9g12h15i7j5j6j12k2k10k11k19l6n18o15p6q6r1r2r5r6r9r17s1s19,a10a13a15b6b10c1c5c18d18e1e3e5e19g3h6j7j15j17k18l3l9m2m4m14m16n9o17q18s7s13", "8,8")
/*18*/ test("a3a15b17d1e4e19f1f10f13f15g8h3h6h11i6i12j18l5l8l9l11m3m8n7n8n18o3o19p13r7r17s2s11,a6c1c2c5c13d13e16f2f5f14g1g2g4g5g14i10j9k1k7k15m2m4n13n14n19p6p18q7q13r3r18s17s18", "0,3")
/*19*/ test("a6a8a9b1b12d1d3d4f4f13f16g7h10j13j15j17k2k6k7k15k17l2l17m5m8n4n12n18o11o13p9q6r15s2s3s17,a2b7b14c2c8c19d8e1e3e15f5f14g9g14h15i11i18k16l10l11l18m9o2o3o5p10p17q2q5q19r1r6r18s1s7s19", "0,6")
/*20*/ test("a2b1c1c5c12c14d12e2e17g4g7g19h3h9h18i2i8i14j3j15j17k13k19l8l15l16l18n8n12o14o15o18o19p17q6q9q12r3r5,a11a14a19b7c11e6e10e13e14e19f3f4f14h15h17i1j19k1k2k3k7k10l11l19m4m13n1n5n13n16o1o7o17p5q16r2r8r12s6", "0,24")
/*21*/ test("a15a18b15b16b18c4c10c13c18d7d10e15g6g9g14i6i19j9j15k1k5k16k17l2l14l18m5m9n3n7n14o5o7o8p13p17q5q9r11r12r16s11,a6a10a12a19b6c6d1d3d8d13e4f3f4f9f12f15f17g2g5g8g17h6h16i14i18l1l7m12m14n16n18o2o10o12p7p8q13q15q17r3r7s10", "3,0")
/*22*/ test("a10a18b4b7b16c6c11d13d17e14e19f9f14h7h9h10i3i6j12k1k7k10k19l1l3l12m8m15n4n14o7o10o17p8p16q7q10q18r1r8r11r13r15s2s14,a2a4a9a12a13a19b12d5d15e1e13e15f1f10g9g12h3h4h6i2i12i19j3j11j19l2l7l10l14l19m3m14n3n5n11o5o9o18o19p18q4q11r2r17r19", "9,0")
/*23*/ test("a6b19c12c13c17e3e15e17e18f9f10f17g16g17h4h7h13h17i8i10i11i13i14i19j2j5j8k16l18m10n7n8n11n18o2o7o8o12p1p6p7p9p15p18p19r1r15r18s1s6s14s16,a4b6b9b13c5c9d1d5d14e8e10e16f1f6f12f18g3g14h1h3h8h15i2i3j6j10j17k8l4l8l9l11m7m11m17m18n10n12n14n15o1o9o17p2q12r2r4r8r14s12s15s17", "7,0")
/*24*/ test("a12b4b11b13b17c2c3c4d11d18d19e13e18e19f11f13f17g2g4h1i9i11j3j4j11j17k3k10k13l7l10l14m1m2m5m12m18n10n12o2o5o7o14p2p13p16p18p19q16q18r5r6r19s12s18,a2b2b6b9c8c9c12c17d5d17e4e10e15e16f7f12f15f19g8g9h4h9h11h18i1i13i16i17i18j7k1k19l1l12m11n5n17o9o12p1p5p7p9p12p17q3q5q7q19r1s4s5s6s7s19", "1,9")
/*25*/ test("a10a11a16b1b2b4b14c5c6c9c17d2d11d18d19e11e15f2f6f16g4g5g14h4h10h12i5i15i18j7j11j15j16k3k8k9k18l13l14l16l18m3m4m12m18n7n11n15o5o9p1p4p16q5q6q8q9q17r15r17s3s6,a1a3a7a9b3b8b11b18d1d5d6d14e13e19f4f5f12f13g1g2g9g13g15h6h8h11h13h14i3i14i17k2k7k16k17l6m2m10n2n4n12n14n18o2o15p3p13p14p15p18q3q13q14r1s1s5s7s10s11s16s18s19", "12,1")
/*26*/ test("a2a3a14b15b18c1c10c14c16c17d8d12d19e1e2e4e5e8e9e11e13f4f19g5h4h15h17i19j2j9j12j16j18k2k5k8l7l10l17m13n7n9n10n15o1o6o9o10p3p14p16q10q11q12q16q17q18q19r7r10s3s5s11s14s16,a4a10a11a12b1b2b11b13c4c11c13d3d4d5d9d11e6e10e15e19f10f12g1g8g12g19h6h19i2i3j7j19k1k16k18l11l12l13l14m1m14n1n2n13o8o12o15p2p4p5p7p8p15p17q1q6q8r3r4r6r11r12r18s15s19", "7,2")
/*27*/ test("a4a10b2b16c3c13d9d10d14d16d17e3e10e11e16f1f5f7f8f10f18g7g17h3h5h8i1i5i7i14i18j3j13j17k10k15k17l1l5l6l9l11l14l15m3m4m15m19n3n15o5o8o11o12o13o17p4p6p16q2q5q6q8q11q13q15r5r7r15s5s14s15,a6a15a17b1b6b7b8b14b18b19c2c8c9c14c16e5e15f2f17g1g8g14g15g19h9h14h19i4i6i8i11i13j2j6j14j15j16j18k9k14l8l12l13m5m7m8m12m13m17m18n2n4o15o18p2p8p18q16q17q19r2r3r8r10r11r13r16r17s1s8s13s19", "4,7")
/*28*/ test("a13a14a18b1b2b3b11b18b19c1c3c5c11d2d4d12d18e3e5e7e8e10f3f7g6g18h1h3h7h11h14h15h19i1i2i8i9i10j9j10j17k7k15l6l8l10l12m5m6m7m12m17n2n4n5o2o18p2p7p8p10p14q2q4q7q14q16r2r5r19s1s2s9s12s17s19,a1a5a6a8b8c15c17d6d11d13d15d17e9e11e12e15e17f1f6f9f11f16f18g2g5g7g13g15g19h2h10h12i7i15j2j4j5k5k9k10k11k13k14k18l2l19m1m4m11m13m16n12n17n18o4o15o17p6p9p11p12p17q13q15q17q18r3r4r6r9r10s4s7s10s11s14", "7,10")
/*29*/ test("a10a11a12a14a17b4b6b9c13c15c16d5d9d11d19e5e6e7e16f3f6g1g5g13g16g18h2h4h8h17h18h19i2i3i4i9i10i14i19j1j6j7j13j18j19k8l3l7l10m3m6m18n8n10n11n16o1o7o12o13o14o17o19p6p9p10p17q1q11q18r7r10r12r15r18s3s4s9s11,a2a6a16b1b2b15b18b19c3c7c8c18c19d2d3d7d8e2e3e4e13e18f9f11g3g4g10g14g15g17g19h9h12h15i6i8i16i17j3j4j5j11j12k1k2k11k12k15k16l1l2l6l16l19m1m14n2n6n14n15n18o2o6o10o11p19q2q3q4q6q8q17r2s6s7s12s13s15s18", "4,9")
/*30*/ test("a3a5a9a11b2b3b4b5b6b9b19c5c11c14c16c18d5d6d11d19e5e7e12e15f4f8f11g5g14g19h2h3h5h10h16h17h19i17i18j1j4j5j6j9j12k7k15k17k19l1l5l16l17m1m2m11n6n8n12n14n18o1o11o12o16p1p12p14p15p16p18q2q12q13q14q18r5r10r13r14r15r16s13,a1a4a6a10a13a17a19b15b18c1c2c4c6c17d10d16e3e4e10e19f5f9f10f13f14f17f19g10g16h9h12i1i2i5i11i13i15i19j2j15j17k2k6k9k12k13k18l2l6l9l12l19m5m6m14m17n2n3n4n5n9n10n11o2o5o7o15o17p8p9p11p19q5q6q8q9q15r4r9s5s9s12s14", "7,7")
'''
class Test(unittest.TestCase):
def test_solve(self):
for test_num, line in enumerate(TEST_DATA.split('\n')[1:-2]):
test_num += 1
first_pattern = re.compile('^.*test\("*')
end_pattern = re.compile('\) *')
line = first_pattern.sub('', line)
line = end_pattern.sub('', line)
line = line.replace(', ', ' ')
line = line.replace('"', '')
inputs, expect = line.split(' ')
solver = Solver()
actual = solver.solve(inputs)
print('----TEST {0}----'.format(test_num))
print('inputs: {0}'.format(inputs))
print('actual: {0}'.format(actual))
self.assertEqual(actual, expect)
print('OK')
# For one testcase
#for test_num, line in enumerate(TEST_DATA.split('\n')):
# test_num += 1
# if test_num == 12:
# line = line.rstrip()
# inputs, expect = line.split(' ')
# field = Field()
# actual = field.solve(inputs)
# print('----TEST {0}----'.format(test_num))
# print('inputs: {0}'.format(inputs))
# print('actual: {0}'.format(actual))
# self.assertEqual(actual, expect)
# print('OK')
if __name__ == '__main__':
unittest.main()
|
def multtable(start, stop, number):
"""
Print multiplication table for <number>
from <start> to including <stop>
"""
for i in range(start, stop+1):
print(f"{i} x {number} = {i*number}")
def powertable(power, stop):
"""
Prints the powers of i from 1 to
including <stop> using <power>
as power term
"""
for i in range (1, stop+1):
print(i**power)
if __name__ == '__main__:
multtable(1, 4, 7)
powertable(2,4)
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import statistics
import math
import plot
def densitygraph(star_file,window,save_file):
star_table=np.loadtxt(star_file, delimiter=' ')
starTranspose=star_table.transpose()
newStar=np.append(starTranspose,[np.zeros(len(star_table))], axis=0)
for k in range(len(star_table)):
if newStar[1][k]!=0:
newStar[2][k]=1
newYlist=[]
#print(starTranspose)
starsum=sum(newStar[2][:window])
for j in range(window):
newYlist.append(starsum)
for i in range(len(star_table)-window):
starsum=sum(newStar[2][i:i+window])
newYlist.append(starsum)
saveTabl=np.column_stack((starTranspose[0],np.array(newYlist)))
np.savetxt(save_file,saveTabl)
plot.plotgraph(saveTabl.transpose()[0],saveTabl.transpose()[1],'b','Time(s)','Density','Density of changes')
return
def meangraph(star_file,window,save_file):
star_table=np.loadtxt(star_file, delimiter=' ')
starTranspose=star_table.transpose()
summean=0
for j in range(window):
if starTranspose[1][j]>=0:
summean+=starTranspose[1][j]
mean=summean/window
newY=[mean]*window
k=window
while k<len(star_table):
summean=0
for i in range(window):
if starTranspose[1][i]>=0:
summean+=starTranspose[1][j]
mean=summean/window
newY.append(newY)
k+=1
saveTabl=np.column_stack((starTranspose[0],np.array(newY)))
plot.plotgraph(saveTabl.transpose()[0],saveTabl.transpose()[1],'b','Time(s)','mean','mean of changes')
return
|
from pylab import *
import sys
import pymc
from pymc import Metropolis
import cosmolopy
from McMc import mcmc
from astropy.io import fits
from McMc import cosmo_utils
import scipy
import pickle
#### run in a shell
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_HPlanck1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_HRiess1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_HPlanck1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_HRiess1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_HPlanck1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_HRiess1s_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm BAO_HRiessPlanck_obh2Planck1s &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11+BAO &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11+BAO &
#xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11+BAO &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_BAO_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_BAO_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_BAO_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm LyaDR11_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm LyaDR11_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm LyaDR11_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py olambdacdm BAO_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py flatwcdm BAO_HRiessPlanck_obh2BBN &
xterm -e python ~/Python/Boss/McMc/mcmc_launcher.py owcdm BAO_HRiessPlanck_obh2BBN &
rep='/Users/hamilton/SDSS/LymanAlpha/JCMC_Chains/'
ext='.db'
repfig = '/Users/hamilton/SDSS/LymanAlpha/JCMC_Chains/Fev2014/'
######################## Ok ################################
model='olambdacdm'
lya_hp1_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HPlanck1s_obh2Planck1s'+ext,add_extra=True)
lya_hr2_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiess2s_obh2Planck1s'+ext,add_extra=True)
lya_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao=mcmc.readchains(rep+model+'-'+'LyaDR11+BAO'+ext,add_extra=True)
###################################################
reload(mcmc)
clf()
limits=[[0,1],[0,1.5],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','omega_lambda_0','h','obh2']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'red',8,limits=limits,alpha=0.5)
a0=mcmc.matrixplot(lya_hp1_obh2p1,vars,'blue',8,limits=limits,alpha=0.5)
a1=mcmc.matrixplot(lya_hr1_obh2p1,vars,'green',8,limits=limits,alpha=0.5)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h Planck (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
reload(mcmc)
clf()
limits=[[0.,1],[0,1.5]]
vars=['omega_M_0','omega_lambda_0']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',8,limits=limits,alpha=0.5)
subplot(len(vars),len(vars),len(vars))
axis('off')
legend([a2],['BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
clf()
a2=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['omega_lambda_0'],nsmooth=3)
xx=linspace(0,1,100)
plot(xx,1.-xx,'k--')
plot(xx*0+omplanckwp,2*xx,'k:')
plot(xx,xx*0+1-omplanckwp,'k:')
a1=plot(omplanckwp,1-omplanckwp,'*',color='yellow',ms=20)
xlim([0,1])
ylim([0,2])
xlabel('$\Omega_M$')
ylabel('$\Omega_\Lambda$')
legend([a2],['BAO Lyman-alpha + h Riess/Planck + $\Omega_b h^2$ Planck'],frameon=False)
title('Open $\Lambda$CDM')
savefig(repfig+'olambdacdm_lya.png',bbox_inches="tight")
reload(mcmc)
clf()
limits=[[0,1],[0,1.5],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','omega_lambda_0','h','obh2']
a0=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',4,limits=limits,alpha=0.5)
a1=mcmc.matrixplot(bao_hrp_obh2p1,vars,'green',4,limits=limits,alpha=0.5)
a2=mcmc.matrixplot(lya_bao_hrp_obh2p1,vars,'red',4,limits=limits,alpha=0.5)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],title='Open $\Lambda$CDM')
savefig(repfig+'olambdacdm_combined.png',bbox_inches="tight")
clf()
xx=linspace(0,1,100)
plot(xx,1.-xx,'k--')
plot(xx*0+omplanckwp,2*xx,'k:')
plot(xx,xx*0+1-omplanckwp,'k:')
a0=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='blue',alpha=0.5)
a1=mcmc.cont(bao_hrp_obh2p1['omega_M_0'],bao_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='green',alpha=0.5)
a2=mcmc.cont(lya_bao_hrp_obh2p1['omega_M_0'],lya_bao_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='red',alpha=0.5)
aa=plot(omplanckwp,1-omplanckwp,'*',color='yellow',ms=20)
xlim([0,1])
ylim([0,1.8])
xlabel('$\Omega_M$')
ylabel('$\Omega_\Lambda$')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],frameon=False)
title('Open $\Lambda$CDM')
savefig(repfig+'olambdacdm_OmOl_combined.png',bbox_inches="tight")
clf()
xx=linspace(0,1,100)
plot(xx,1.-xx,'k--')
plot(xx*0+omplanckwp,2*xx,'k:')
plot(xx,xx*0+1-omplanckwp,'k:')
a0=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='blue',alpha=0.5)
a1=mcmc.cont(bao_hrp_obh2p1['omega_M_0'],bao_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='green',alpha=0.5)
a3=mcmc.cont(lya_bao['omega_M_0'],lya_bao['omega_lambda_0'],nsmooth=3,color='brown',alpha=0.5)
a2=mcmc.cont(lya_bao_hrp_obh2p1['omega_M_0'],lya_bao_hrp_obh2p1['omega_lambda_0'],nsmooth=3,color='red',alpha=0.5)
aa=plot(omplanckwp,1-omplanckwp,'*',color='yellow',ms=20)
xlim([0,1])
ylim([0,1.8])
xlabel('$\Omega_M$')
ylabel('$\Omega_\Lambda$')
legend([a0,a1,a2,a3],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG'],frameon=False)
title('Open $\Lambda$CDM')
savefig(repfig+'olambdacdm_OmOl_combined_checkpriors.png',bbox_inches="tight")
######################## w ################################
model='flatwcdm'
lya_hp1_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HPlanck1s_obh2Planck1s'+ext,add_extra=True)
lya_hr2_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiess2s_obh2Planck1s'+ext,add_extra=True)
lya_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao=mcmc.readchains(rep+model+'-'+'LyaDR11+BAO'+ext,add_extra=True)
###################################################
reload(mcmc)
clf()
limits=[[0,0.4],[-2.5,0],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','w','h','obh2']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'red',8,limits=limits,alpha=0.5)
a0=mcmc.matrixplot(lya_hp1_obh2p1,vars,'blue',8,limits=limits,alpha=0.5)
a1=mcmc.matrixplot(lya_hr1_obh2p1,vars,'green',8,limits=limits,alpha=0.5)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h Planck (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
reload(mcmc)
clf()
limits=[[0,0.4],[-2.5,0],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','w','h','obh2']
a0=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',3,limits=limits,alpha=0.5)
a1=mcmc.matrixplot(bao_hrp_obh2p1,vars,'green',3,limits=limits,alpha=0.5)
a2=mcmc.matrixplot(lya_bao_hrp_obh2p1,vars,'red',3,limits=limits,alpha=0.5)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],title='Open $\Lambda$CDM')
savefig(repfig+'flatwcdm_combined.png',bbox_inches="tight")
reload(mcmc)
clf()
limits=[[0.,0.4],[-2.5,0]]
vars=['omega_M_0','w']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',3,limits=limits,alpha=0.5)
subplot(len(vars),len(vars),len(vars))
axis('off')
legend([a2],['BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
omplanckwp=0.3183
clf()
a2=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['w'],alpha=0.7,nsmooth=3)
xx=linspace(0,1,100)
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,-2*xx,'k:')
a1=plot(omplanckwp,-1,'*',color='yellow',ms=20)
xlim([0,0.4])
ylim([-2,0])
xlabel('$\Omega_M$')
ylabel('$w$')
legend([a2],['BAO Lyman-alpha + h Riess/Planck + $\Omega_b h^2$ Planck'],frameon=False)
title('Flat $w$CDM')
savefig(repfig+'flatwcdm_lya.png',bbox_inches="tight")
omplanckwp=0.3183
clf()
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,-2*xx,'k:')
a0=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['w'],alpha=0.5,color='blue',nsmooth=3)
a1=mcmc.cont(bao_hrp_obh2p1['omega_M_0'],bao_hrp_obh2p1['w'],alpha=0.5,color='green',nsmooth=3)
a2=mcmc.cont(lya_bao_hrp_obh2p1['omega_M_0'],lya_bao_hrp_obh2p1['w'],alpha=0.5,color='red',nsmooth=3)
xx=linspace(0,1,100)
aa=plot(omplanckwp,-1,'*',color='yellow',ms=20)
xlim([0,0.4])
ylim([-2,0])
xlabel('$\Omega_M$')
ylabel('$w$')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],frameon=False)
title('Flat $w$CDM')
savefig(repfig+'flatwcdm_Omw_lya.png',bbox_inches="tight")
omplanckwp=0.3183
clf()
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,-2*xx,'k:')
a0=mcmc.cont(lya_hrp_obh2p1['omega_M_0'],lya_hrp_obh2p1['w'],alpha=0.5,color='blue',nsmooth=3)
a1=mcmc.cont(bao_hrp_obh2p1['omega_M_0'],bao_hrp_obh2p1['w'],alpha=0.5,color='green',nsmooth=3)
a3=mcmc.cont(lya_bao['omega_M_0'],lya_bao['w'],alpha=0.5,color='brown',nsmooth=3)
a2=mcmc.cont(lya_bao_hrp_obh2p1['omega_M_0'],lya_bao_hrp_obh2p1['w'],alpha=0.5,color='red',nsmooth=3)
xx=linspace(0,1,100)
aa=plot(omplanckwp,-1,'*',color='yellow',ms=20)
xlim([0,0.4])
ylim([-2,0])
xlabel('$\Omega_M$')
ylabel('$w$')
legend([a0,a1,a2,a3],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG'],frameon=False)
title('Flat $w$CDM')
savefig(repfig+'flatwcdm_Omw_lya_checkpriors.png',bbox_inches="tight")
######################## Ok,w ################################
model='owcdm'
lya_hp1_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HPlanck1s_obh2Planck1s'+ext,add_extra=True)
lya_hr2_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiess2s_obh2Planck1s'+ext,add_extra=True)
lya_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao_hrp_obh2p1=mcmc.readchains(rep+model+'-'+'LyaDR11_BAO_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
lya_bao=mcmc.readchains(rep+model+'-'+'LyaDR11+BAO'+ext,add_extra=True)
###################################################
reload(mcmc)
clf()
alpha=0.4
limits=[[0,1],[0,2],[-2.5,0],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','omega_lambda_0','w','h','obh2']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'red',3,limits=limits,alpha=alpha)
a0=mcmc.matrixplot(lya_hp1_obh2p1,vars,'blue',3,limits=limits,alpha=alpha)
a1=mcmc.matrixplot(lya_hr1_obh2p1,vars,'green',3,limits=limits,alpha=alpha)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h Planck (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess (1s) + Obh2 Planck (1s)',
'BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
reload(mcmc)
clf()
alpha=0.4
limits=[[0,1],[0,2],[-2.5,0],[0.6,0.8],[0.021,0.023]]
vars=['omega_M_0','omega_lambda_0','w','h','obh2']
a0=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',4,limits=limits,alpha=0.5)
a1=mcmc.matrixplot(bao_hrp_obh2p1,vars,'green',8,limits=limits,alpha=0.5)
a2=mcmc.matrixplot(lya_bao_hrp_obh2p1,vars,'red',4,limits=limits,alpha=0.5)
subplot(2,2,2)
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],title='Open wCDM')
reload(mcmc)
clf()
alpha=0.7
limits=[[0.,1],[0,2],[-2.5,0]]
vars=['omega_M_0','omega_lambda_0','w']
a2=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',8,limits=limits,alpha=alpha)
subplot(len(vars),len(vars),len(vars))
axis('off')
legend([a2],[ 'BAO Lyman-alpha + h Riess/Planck + Obh2 Planck (1s)'],title=model)
subplot(3,3,4)
xx=linspace(0,1,100)
plot(xx,xx*0+1-omplanckwp,'k:')
plot(xx*0+omplanckwp,xx*2,'k:')
plot(xx,1-xx,'k--')
a1=plot(omplanckwp,1.-omplanckwp,'*',color='yellow',ms=10)
subplot(3,3,7)
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,xx*3-3,'k:')
a1=plot(omplanckwp,-1,'*',color='yellow',ms=10)
subplot(3,3,8)
plot(xx*2,xx*0-1,'k:')
plot(xx*0+1-omplanckwp,xx*3-3,'k:')
a1=plot(1.-omplanckwp,-1,'*',color='yellow',ms=10)
savefig(repfig+'owcdm_lya.png',bbox_inches="tight")
reload(mcmc)
clf()
alpha=0.5
limits=[[0.,1],[0,2],[-2.5,0]]
vars=['omega_M_0','omega_lambda_0','w']
a0=mcmc.matrixplot(lya_hrp_obh2p1,vars,'blue',4,limits=limits,alpha=alpha)
a1=mcmc.matrixplot(bao_hrp_obh2p1,vars,'green',8,limits=limits,alpha=alpha)
a2=mcmc.matrixplot(lya_bao_hrp_obh2p1,vars,'red',4,limits=limits,alpha=alpha)
subplot(len(vars),len(vars),len(vars))
axis('off')
legend([a0,a1,a2],['BAO Lyman-alpha + h + $\Omega_b h^2$',
'BAO LRG + h + $\Omega_b h^2$',
'BAO Lyman-alpha + LRG + h + $\Omega_b h^2$'],title='Open wCDM')
subplot(3,3,4)
xx=linspace(0,1,100)
plot(xx,xx*0+1-omplanckwp,'k:')
plot(xx*0+omplanckwp,xx*2,'k:')
plot(xx,1-xx,'k--')
a1=plot(omplanckwp,1.-omplanckwp,'*',color='yellow',ms=10)
subplot(3,3,7)
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,xx*3-3,'k:')
a1=plot(omplanckwp,-1,'*',color='yellow',ms=10)
subplot(3,3,8)
plot(xx*2,xx*0-1,'k:')
plot(xx*0+1-omplanckwp,xx*3-3,'k:')
a1=plot(1.-omplanckwp,-1,'*',color='yellow',ms=10)
savefig(repfig+'owcdm_lya_combined.png',bbox_inches="tight")
###### fixing h and obh2
model='olambdacdm_fixed_h_obh2'
lya_fixed=mcmc.readchains(rep+model+'-'+'LyaDR11'+ext)
model='olambdacdm'
lya_priors=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
clf()
a2=mcmc.cont(lya_priors['omega_M_0'],lya_priors['omega_lambda_0'],nsmooth=3,color='blue',alpha=0.5)
a3=mcmc.cont(lya_fixed['omega_M_0'],lya_fixed['omega_lambda_0'],nsmooth=3,color='red',alpha=0.5)
xx=linspace(0,1,100)
plot(xx,1.-xx,'k--')
plot(xx*0+omplanckwp,2*xx,'k:')
plot(xx,xx*0+1-omplanckwp,'k:')
a1=plot(omplanckwp,1-omplanckwp,'*',color='yellow',ms=20)
xlim([0,1])
ylim([0,2])
xlabel('$\Omega_M$')
ylabel('$\Omega_\Lambda$')
legend([a2,a3],['BAO Lyman-alpha + $h$ Riess/Planck + $\Omega_b h^2$ Planck', 'BAO Lyman-alpha + $h=0.706$ and $\Omega_b h^2=0.02207$ fixed'],frameon=False)
title('Open $\Lambda$CDM')
savefig(repfig+'olambdacdm_lya_priors_or_fixed.png',bbox_inches="tight")
model='flatwcdm_fixed_h_obh2'
lya_fixed=mcmc.readchains(rep+model+'-'+'LyaDR11'+ext)
model='flatwcdm'
lya_priors=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
omplanckwp=0.3183
clf()
a2=mcmc.cont(lya_priors['omega_M_0'],lya_priors['w'],alpha=0.5,nsmooth=2,color='blue')
a3=mcmc.cont(lya_fixed['omega_M_0'],lya_fixed['w'],alpha=0.5,nsmooth=2,color='red')
xx=linspace(0,1,100)
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,-2*xx,'k:')
a1=plot(omplanckwp,-1,'*',color='yellow',ms=20)
xlim([0,0.4])
ylim([-2,0])
xlabel('$\Omega_M$')
ylabel('$w$')
legend([a2,a3],['BAO Lyman-alpha + $h$ Riess/Planck + $\Omega_b h^2$ Planck', 'BAO Lyman-alpha + $h=0.706$ and $\Omega_b h^2=0.02207$ fixed'],frameon=False)
title('Flat $w$CDM')
savefig(repfig+'flatwcdm_lya_priors_or_fixed.png.png',bbox_inches="tight")
model='owcdm_fixed_h_obh2'
lya_fixed=mcmc.readchains(rep+model+'-'+'LyaDR11'+ext)
model='owcdm'
lya_priors=mcmc.readchains(rep+model+'-'+'LyaDR11_HRiessPlanck_obh2Planck1s'+ext,add_extra=True)
reload(mcmc)
clf()
alpha=0.5
limits=[[0.,1],[0,2],[-2.5,0]]
vars=['omega_M_0','omega_lambda_0','w']
a2=mcmc.matrixplot(lya_priors,vars,'blue',5,limits=limits,alpha=alpha)
a3=mcmc.matrixplot(lya_fixed,vars,'red',5,limits=limits,alpha=alpha)
subplot(len(vars),len(vars),len(vars))
axis('off')
legend([a2,a3],['BAO Lyman-alpha + $h$ Riess/Planck + $\Omega_b h^2$ Planck', 'BAO Lyman-alpha + $h=0.706$ and $\Omega_b h^2=0.02207$ fixed'],frameon=False,title='Open $w$CDM')
subplot(3,3,4)
xx=linspace(0,1,100)
plot(xx,xx*0+1-omplanckwp,'k:')
plot(xx*0+omplanckwp,xx*2,'k:')
plot(xx,1-xx,'k--')
a1=plot(omplanckwp,1.-omplanckwp,'*',color='yellow',ms=10)
subplot(3,3,7)
plot(xx,xx*0-1,'k:')
plot(xx*0+omplanckwp,xx*3-3,'k:')
a1=plot(omplanckwp,-1,'*',color='yellow',ms=10)
subplot(3,3,8)
plot(xx*2,xx*0-1,'k:')
plot(xx*0+1-omplanckwp,xx*3-3,'k:')
a1=plot(1.-omplanckwp,-1,'*',color='yellow',ms=10)
savefig(repfig+'owcdm_lya_priors_or_fixed.png',bbox_inches="tight")
|
import os
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
results_dir = script_dir + "/results"
results_files = os.listdir(results_dir)
if len(results_files) <= 1: # There may be 1 file which is just a .gitkeep file
print('Found no test results in the results folder ' + results_dir + ' ... counting this as a test failure')
sys.exit(1)
test_fail_string = "FAIL"
found_failures = 0
for file_name in results_files:
if os.path.isfile(results_dir + os.sep + file_name):
f = open(results_dir + os.sep + file_name, 'r')
file_contents = f.read()
if test_fail_string in file_contents:
print('Found one or more FAILs in file %s' % file_name)
print('File content was: ' + file_contents)
found_failures += 1
f.close()
if (found_failures > 0):
print('\n\nFound ' + str(found_failures) + ' failures. Tests failed\n\n')
sys.exit(1)
print('\n\nTests passed without reporting any failures to the results folder\n\n')
sys.exit(0) |
# Generated by Django 2.0 on 2018-01-09 23:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('progress', '0003_auto_20180108_2046'),
]
operations = [
migrations.AlterField(
model_name='trainday',
name='mood',
field=models.CharField(
help_text='๐ ๐ ๐ ๐ ๐ ๐
๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ \n๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ก ๐ข ๐ฃ ๐ค ๐ฅ ๐ฆ \n๐ง ๐จ ๐ฉ ๐ช ๐ซ ๐ฌ ๐ญ ๐ฎ ๐ฏ ๐ฐ ๐ฑ ๐ฒ ๐ณ ๐ด ๐ต ๐ถ ๐ท ๐ ๐ ๐ ๐ \n ', max_length=5),
),
]
|
#!/usr/bin/env python
# filterdaemon.py
# This background process creates indexes files for threads and filters.
#
import sys
import os
import pyinotify
import threading
import datetime
import time
import email.parser
import threads
import headers
import users
from cabinet import DatetimeCabinet
import maildir
# The architecture is pretty simple. This program is multithreaded.
# One thread is the producer. It uses inotify to detect changes to the
# fs. It writes in the events_queue.
#
# Another thread is the consumer. It processes the files to build an index of
# threads (threads_index).
#
# Finally, there's a thread which periodically dumps the thread index.
events_queue = []
DUMPER_SLEEP_DURATION=20
EVENTS_QUEUE_PROCESSING_DELAY=10
class WatcherThread(threading.Thread):
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, path):
self.path = path
def process_IN_CREATE(self, event):
if event.pathname != self.path: # inotify also logs events at the root of the folder, which we don't care about
events_queue.append({"type": "create", "path": event.pathname})
def process_IN_DELETE(self, event):
if event.pathname != self.path:
events_queue.append({"type": "delete", "path": event.pathname})
def __init__(self, path):
threading.Thread.__init__(self)
self.path = os.path.realpath(path)
def run(self):
wm = pyinotify.WatchManager()
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE
handler = WatcherThread.EventHandler(self.path)
notifier = pyinotify.Notifier(wm, handler)
wdd = wm.add_watch(self.path, mask, rec=True)
notifier.loop()
class DumperThread(threading.Thread):
def __init__(self, path, threads_index):
threading.Thread.__init__(self)
self.path = path
self.threads_index = threads_index
def run(self):
while True:
time.sleep(DUMPER_SLEEP_DURATION)
self.threads_index.sync()
def process_new_email(path, threads_index):
with open(path, "r") as fd:
parser = email.parser.HeaderParser()
email_headers = parser.parse(fd)
subject = email_headers["subject"]
from_field = {}
from_field["name"], from_field["address"] = email.utils.parseaddr(email_headers["From"])
to_field = {}
to_field["addresses"] = email.utils.getaddresses(email_headers["to"])
if subject != None:
subject = headers.cleanup_subject(subject)
thread = None
for index, thr in enumerate(threads_index):
if thr["subject"] == subject:
thread = threads_index.pop(index)
break
if not thread:
# create a new thread
thread = threads.create_thread_structure()
thread["subject"] = subject
thread["creator"] = from_field
msg_id = os.path.basename(path)
thread["messages"].append(msg_id)
thread["date"] = datetime.datetime.utcnow()
if from_field["address"] != thread["creator"]["address"]:
thread["lastreplyfrom"] = from_field
threads_index.insert(0, thread)
class ProcessorThread(threading.Thread):
def __init__(self, path, threads_index):
threading.Thread.__init__(self)
self.path = path
self.threads_index= threads_index
def run(self):
while True:
while len(events_queue) != 0:
event = events_queue.pop(0)
if event["type"] == "create":
try:
username = users.get_username_from_folder(event["path"])
print "username: %s, path: %s\n" % (username, event["path"])
if username not in self.threads_index:
print "Setting threads_index for user : %s" % username
self.threads_index[username] = {"threads_index": [], "dirty": True, "unread_count": 0}
process_new_email(event["path"], self.threads_index[username]["threads_index"])
self.threads_index[username]["dirty"] = True
self.threads_index[username]["unread_count"] += 1
except IOError as e:
# This may be a Postfix/Dovecot temporary file. Ignore it.
print "caught ioerror %s" % e.strerror
pass
time.sleep(EVENTS_QUEUE_PROCESSING_DELAY)
if __name__ == "__main__":
path = sys.argv[1]
print "Watching %s..." % path
threads_index = DatetimeCabinet("/home/kite/threads.db")
watcher_thread = WatcherThread(path)
processor_thread = ProcessorThread(path, threads_index)
dumper_thread = DumperThread(path, threads_index)
processor_thread.start()
watcher_thread.start()
dumper_thread.start()
|
import time
from termenu.app import AppMenu
def leave():
print("Leave...")
AppMenu.quit()
def go():
def back():
print("Going back.")
AppMenu.back()
def there():
ret = AppMenu.show("Where's there?",
"Spain France Albania".split() + [("Quit", AppMenu.quit)],
multiselect=True, back_on_abort=True)
print(ret)
return ret
return AppMenu.show("Go Where?", [
("YELLOW<<Back>>", back),
("GREEN<<There>>", there)
])
if __name__ == "__main__":
AppMenu.show("Make your MAGENTA<<decision>>", [
("RED<<Leave>>", leave),
("BLUE<<Go>>", go)
]) |
from django.db import models
class Address(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True) # Field name made lowercase.
street_address = models.CharField(db_column='Street_Address', max_length=300) # Field name made lowercase.
upazilla_city_corporation = models.CharField(db_column='Upazilla_City_Corporation', max_length=50) # Field name made lowercase.
ward_no = models.IntegerField(db_column='Ward_No') # Field name made lowercase.
district = models.CharField(db_column='District', max_length=50) # Field name made lowercase.
unionn = models.CharField(db_column='Unionn', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'address'
class Center(models.Model):
center_id = models.IntegerField(db_column='Center_ID', primary_key=True) # Field name made lowercase.
center_name = models.CharField(db_column='Center_Name', max_length=300) # Field name made lowercase.
center_address = models.ForeignKey('CenterAddress', models.DO_NOTHING, db_column='Center_Address_ID', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'center'
class CenterAddress(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True) # Field name made lowercase.
street_address = models.CharField(db_column='Street_Address', max_length=300) # Field name made lowercase.
upazilla_city_corporation = models.CharField(db_column='Upazilla_City_Corporation', max_length=50) # Field name made lowercase.
ward_no = models.IntegerField(db_column='Ward_No') # Field name made lowercase.
district = models.CharField(db_column='District', max_length=50) # Field name made lowercase.
unionn = models.CharField(db_column='Unionn', max_length=50, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'center_address'
class Citizen(models.Model):
nid = models.OneToOneField('Nid', models.DO_NOTHING, db_column='NID_ID', primary_key=True) # Field name made lowercase.
occupation = models.CharField(db_column='Occupation', max_length=200) # Field name made lowercase.
job_title = models.CharField(db_column='Job_Title', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'citizen'
class GovernmentEmployee(models.Model):
nid = models.OneToOneField('Nid', models.DO_NOTHING, db_column='NID_ID', primary_key=True) # Field name made lowercase.
department = models.CharField(db_column='Department', max_length=150) # Field name made lowercase.
job_title = models.CharField(db_column='Job_Title', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'government_employee'
class MedicalPersonel(models.Model):
nid = models.OneToOneField('Nid', models.DO_NOTHING, db_column='NID_ID', primary_key=True) # Field name made lowercase.
medical_institution_name = models.CharField(db_column='Medical_Institution_Name', max_length=200) # Field name made lowercase.
class Meta:
managed = False
db_table = 'medical_personel'
class Nid(models.Model):
id = models.BigIntegerField(primary_key=True)
fname = models.CharField(db_column='FName', max_length=100, blank=True, null=True) # Field name made lowercase.
lname = models.CharField(db_column='LName', max_length=100, blank=True, null=True) # Field name made lowercase.
dob = models.DateField()
fathers_name = models.CharField(db_column='Fathers_Name', max_length=150, blank=True, null=True) # Field name made lowercase.
mothers_name = models.CharField(db_column='Mothers_Name', max_length=150, blank=True, null=True) # Field name made lowercase.
address = models.ForeignKey(Address, models.DO_NOTHING, db_column='Address_ID', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'nid'
class Otp(models.Model):
otpkey = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'otp'
class Registration(models.Model):
nid = models.OneToOneField(Nid, models.DO_NOTHING, db_column='NID', primary_key=True) # Field name made lowercase.
date = models.DateTimeField(db_column='Date') # Field name made lowercase.
center = models.ForeignKey(Center, models.DO_NOTHING, db_column='Center_ID', blank=True, null=True) # Field name made lowercase.
mobile_no = models.BigIntegerField(db_column='Mobile_no', blank=True, null=True) # Field name made lowercase.
age = models.IntegerField(db_column='Age') # Field name made lowercase.
class Meta:
managed = False
db_table = 'registration'
unique_together = (('mobile_no', 'center'),)
class Student(models.Model):
nid = models.OneToOneField(Nid, models.DO_NOTHING, db_column='NID_ID', primary_key=True) # Field name made lowercase.
university_name = models.CharField(db_column='University_Name', max_length=200) # Field name made lowercase.
class Meta:
managed = False
db_table = 'student'
class Volunteering(models.Model):
nid = models.OneToOneField(Nid, models.DO_NOTHING, db_column='NID_ID', primary_key=True) # Field name made lowercase.
organization = models.CharField(db_column='Organization', max_length=150) # Field name made lowercase.
job_title = models.CharField(db_column='Job_Title', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'volunteering' |
# /models.py
from django.db import models
from level0.contacts.models import Entity, Person
class Job(models.Model):
"""
A job experience to be listed on the resume
"""
company =
title = models.CharField(max_length=250)
start_date =
end_date =
city =
state =
description =
slug = models.SlugField(unique=True, help_text="Suggested value automatically generated from title. Must be unique.")
def __unicode__(self):
return u'%s' % (self.name)
class Education(models.Model):
school =
degrees =
minor =
activities =
awards =
slug = models.SlugField(unique=True, help_text="Suggested value automatically generated from title. Must be unique.")
class Skill(models.Model):
name = models.CharField(max_length=250)
slug = models.SlugField(unique=True, help_text="Suggested value automatically generated from title. Must be unique.")
def __unicode__(self):
return u'%s' % (self.name)
class Software(models.Model):
name = models.CharField(max_length=250)
slug = models.SlugField(unique=True, help_text="Suggested value automatically generated from title. Must be unique.")
class Award(models.Model):
name = models.CharField(max_length=250)
giver =
slug = models.SlugField(unique=True, help_text="Suggested value automatically generated from title. Must be unique.") |
#!/usr/bin/env python
# ref: https://gist.github.com/gregorynicholas/3152237
'''
Module that runs pylint on all python scripts found in a directory tree.
'''
import os
import sys
def check(module):
'''
apply pylint to the file specified if it is a *.py file
'''
if module[-3:] == ".py":
print("CHECKING ", module)
pout = os.popen('pylint %s'% module, 'r')
if pout:
for line in pout:
if not "*" in line:
print(line, end="")
if __name__ == "__main__":
try:
# print(sys.argv)
BASE_DIRECTORY = sys.argv[1]
except IndexError:
BASE_DIRECTORY = os.getcwd()
print("linting *.py files beneath \n {0}".format(BASE_DIRECTORY))
print("=" * 80)
for root, dirs, files in os.walk(BASE_DIRECTORY):
for name in files:
filepath = os.path.join(root, name)
check(filepath)
print("=" * 80)
|
print("-----------------currency-----------------")
import random
MAX_INCREASE = 0.1 # 10%
MAX_DECREASE = 0.05 # 5%
MIN_PRICE = 0.01
MAX_PRICE = 1000.0
INITIAL_PRICE = 10.0
count=0
price = INITIAL_PRICE
print("starting price ${:,.2f}".format(price))
while price >= MIN_PRICE and price <= MAX_PRICE:
priceChange = 0
count=count+1
# generate a random integer of 1 or 2
# if it's 1, the price increases, otherwise it decreases
if random.randint(1, 2) == 1:
# generate a random floating-ยญโpoint number
# between 0 and MAX_INCREASE
priceChange = random.uniform(0, MAX_INCREASE)
else:
# generate a random floating-ยญโpoint number
# between negative MAX_INCREASE and 0
priceChange = random.uniform(---MAX_DECREASE, 0)
price *= (1 + priceChange)
print("on day {} prise is ${:,.2f}".format(count,price))
print("******************************************* exception ****************************************************")
finished = False
result = 0
while finished == False:
try:
result = int(input("Enter an integer: "))
finished = True
except ValueError:
print("Invalid vlaue")
print("Valid result is: ", result)
print("********************************************* password **************************************************")
import string
def validate(password):
length = False
uppercase = False
lowercase = False
num = False
spec = False
if len(password) >= 5 and len(password) <= 15:
length = True
for i in string.ascii_uppercase:
if i in password:
uppercase = True
break
for i in string.ascii_lowercase:
if i in password:
lowercase = True
break
for i in string.digits:
if i in password:
num = True
break
for i in string.punctuation:
if i in password:
spec = True
break
if not length:
print("Password showld be between 5 and 15 characters long")
if not uppercase:
print("The password should contain atleast 1 uppercase letter")
if not lowercase:
print("The password should contain atleast 1 lowercase letter")
if not num:
print("The password should contain atleast 1 number")
if not spec:
print("The password should contain atleast 1 special character")
if length and uppercase and lowercase and num and spec:
return True
else:
return False
def main():
while True:
password = str(input("Enter password: "))
valid = validate(password)
if valid:
print("Your password")
break
main()
print("***************************************************************************************************")
|
import re
from container import Row
class FormatSpecifierCannotProcessError(Exception):
pass
class FormatSpecifierFactory():
_fox_sports_regex = re.compile("(\d+)\.\s(.+)\s\((\w+)\s-\s(\w+)\).+(\d+)")
def __init__(self):
pass
def get_format_specifier(self, fs):
"""
Gets a format specifier
Available format specifiers
- fox_sports
:param fs: The format specifier to get
:return: The format specifier
"""
if (fs == "fox_sports"):
return FormatSpecifierFactory._fox_sports_format_specifier
@staticmethod
def _fox_sports_format_specifier(row):
match = FormatSpecifierFactory._fox_sports_regex.match(row)
if not match:
raise FormatSpecifierCannotProcessError("Could not parse: " + row)
num_groups = len(match.groups())
if num_groups != 5:
raise FormatSpecifierCannotProcessError("Incorrect number of groups (%d!=5) in successful match on: %s" % (num_groups, row))
g = match.groups()
return Row(
g[1], # name
g[3], # position
int(g[0]), # rank
g[2], # team
g[4] # bye
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""WriteBuffer Object"""
__author__ = "Arthur Messner <arthur.messner@gmail.com>"
__copyright__ = "Copyright (c) 2013 Arthur Messner"
__license__ = "GPL"
__version__ = "$Revision$"
__date__ = "$Date$"
# $Id
import logging
class WriteBuffer(object):
"""Object to hold data with maximum length blocksize, the flush"""
def __init__(self, meta_storage, block_storage, blocksize, hashfunc):
logging.debug("WriteBuffer.__init__()")
self.blocksize = blocksize
# hashfunc has to be use like hashlib.sha1
self.hashfunc = hashfunc
self.meta_storage = meta_storage
self.block_storage = block_storage
# initialize some variables, they are filled in __reinit
# look in __reninit for comments
self.buf = None
self.bytecounter = None
self.filehash = None
self.deduphash = None
self.sequence = None
# set values to start first block
self.__reinit()
def flush(self):
"""
write self.buf to block_storage,
maximum length is blocksize,
but block can already be smaller
"""
logging.debug("WriteBuffer.flush()")
# flush self.buf
# blocklevel hash
blockhash = self.hashfunc()
blockhash.update(self.buf)
blockhexhash = blockhash.hexdigest()
# filelevel hash, runs through hole file, with update()
self.filehash.update(self.buf)
self.bytecounter += len(self.buf)
# put digest in dict
if self.deduphash.has_key(blockhexhash):
self.deduphash[blockhexhash] += 1
else:
self.deduphash[blockhexhash] = 1
# store block on disk
self.block_storage.put(self.buf, blockhexhash)
# add blockhexdigest to sequence of blocks of file
self.sequence.append(blockhexhash)
def add(self, data):
"""adds data to buffer and flushes if length > blocksize"""
# logging.debug("WriteBuffer.add(<buf>)")
l_data = len(data)
l_buf = len(self.buf)
if (l_buf + l_data) >= self.blocksize:
# add only remaining bytes to internal buffer
self.buf += data[:self.blocksize-l_buf]
# logging.debug("Buffer flush")
# assert len(self.buf) == self.blocksize
self.flush()
# begin next block buffer
self.buf = data[self.blocksize:]
else:
# logging.debug("Adding buffer")
# assert len(self.buf) < self.blocksize
self.buf += data
return(l_data)
def __reinit(self):
"""set some counters to initial values"""
logging.debug("WriteBuffer.__reinit()")
self.buf = ""
# counting bytes = len
self.bytecounter = 0
# hash for whole file
self.filehash = self.hashfunc()
# deduphash
self.deduphash = {}
# sequence of blocks
self.sequence = []
def release(self):
"""write remaining data, and closes file"""
logging.debug("WriteBuffer.release()")
if len(self.buf) != 0:
self.flush()
# write meta information
# save informations for return
filehash = self.filehash.hexdigest()
sequence = self.sequence
size = self.bytecounter
# reinitialize counters for next file
self.__reinit()
return(filehash, sequence, size)
|
from setuptools import setup, Extension
import os
exec(open('openctm/version.py').read())
long_description = ''
if os.path.exists('README.md'):
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='python-openctm',
version=__version__,
description='Python Interface for the OpenCTM File Format',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/lejafar/python-openctm',
author='Rafael Hautekiet',
author_email='rafaelhautekiet@gmail.com',
license='zlib License',
ext_modules=[Extension('_foo', ['stub.cc'])],
packages=['openctm'],
package_data={
'openctm': ['libs/libopenctm.dylib',
'libs/libopenctm.so'],
},
install_requires=[
'numpy>=1.14.2',
],
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: OS Independent",
]
)
|
import tensorflow as tf
from tensorflow.python.framework import ops
from .backprojecting_op import backproject_grad
'''
@tf.RegisterShape("Backproject")
def _backproject_shape(op):
"""Shape function for the Backproject op.
"""
dims_data = op.inputs[0].get_shape().as_list()
batch_size = dims_data[0]
channels = dims_data[3]
dims_label = op.inputs[1].get_shape().as_list()
num_classes = dims_label[3]
grid_size = op.get_attr('grid_size')
output_shape = tf.TensorShape([batch_size, grid_size, grid_size, grid_size, channels])
output_shape_label = tf.TensorShape([batch_size, grid_size, grid_size, grid_size, num_classes])
output_shape_flag = tf.TensorShape([batch_size, grid_size, grid_size, grid_size, 1])
return [output_shape, output_shape_label, output_shape_flag]
'''
@ops.RegisterGradient("Backproject")
def _backproject_grad(op, grad, tmp, _):
"""The gradients for `backproject`.
Args:
op: The `backproject` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `backproject` op.
Returns:
Gradients with respect to the input of `backproject`.
"""
data = op.inputs[0]
depth = op.inputs[2]
meta_data = op.inputs[3]
grid_size = op.get_attr('grid_size')
kernel_size = op.get_attr('kernel_size')
threshold = op.get_attr('threshold')
# compute gradient
data_grad = backproject_grad(data, depth, meta_data, grad, grid_size, kernel_size, threshold)
return [data_grad, None, None, None, None] # List of one Tensor, since we have five input
|
import pandas as pd
import numpy as np
import os
import datetime
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='arguments')
parser.add_argument('data_folder', type=str, help='folder to save data')
parser.add_argument('ticker_name', type=str, default='BTC-USD',
help='ticker name to collect data')
parser.add_argument('window_size', type=int, help='folder to save data')
args = parser.parse_args()
ticker_name = args.ticker_name
data_path = args.data_folder
window_size = args.window_size
averages_dir = os.path.join(data_path, ticker_name, "average")
print(averages_dir)
all_df = pd.DataFrame()
for date in os.listdir(averages_dir):
date_avg_dir = os.path.join(averages_dir, date)
for h in os.listdir(date_avg_dir):
hour_avg = pd.read_csv(os.path.join(date_avg_dir, h))
hour_avg['time'] = [datetime.datetime.strptime(f"{str(date)} {h.split('.')[0]}", '%Y-%m-%d %H')]
hour_avg.set_index('time', inplace=True, drop=True)
all_df = all_df.append(hour_avg)
all_df = all_df.drop("Unnamed: 0", axis=1)
all_df = all_df.sort_index().reset_index()
sma = all_df.iloc[:, 1:].rolling(window=window_size).mean()
sma['time'] = all_df['time']
moving_averages_dir = averages_dir.replace('average', 'moving_averages')
os.makedirs(moving_averages_dir, exist_ok=True)
for i, row in sma.iterrows():
if not pd.isna(row['Low_mean']):
sma_date_dir = os.path.join(moving_averages_dir,str(row['time'].date()))
os.makedirs(sma_date_dir, exist_ok=True)
out_csv = os.path.join(moving_averages_dir,
str(row['time'].date()),
f'{window_size}SMA_'+str(row['time'].hour) + '.csv')
print(out_csv)
pd.DataFrame({f'Low_mean_{window_size}_sma': [row['Low_mean']],
f'High_mean_{window_size}_sma': [row['High_mean']],
'time': [row['time']]}).to_csv(out_csv, index=False)
|
# rraman and 3d dimenssion - plus
#! /usr/bin/env python
import numpy as np
import pylab
from scipy.optimize import leastsq
def lorentzian(x,p):
numerator = (p[0]**2 )
denominator = ( x - (p[1]) )**2 + p[0]**2
y = p[2]*(numerator/denominator)
return y
def gaussian(x,p):
c = p[0] / 2 / np.sqrt(2*np.log(2))
numerator = (x-p[1])**2
denominator = 2*c**2
y = p[2]*np.exp(-numerator/denominator)
return y
def residuals(p,y,x):
err = y - lorentzian(x,p)
return err
def local_fit(x, y, section):
x=x[section[0]:section[1]]
y=y[section[0]:section[1]]
y_bg=y.min()
p = [(x.max()-x.min())/2, (x.max()-x.min())/2+x.min() , x.max()-x.min()]
# [fwhm, peak center, intensity] #
pbest = leastsq(residuals, p, args=(y-y_bg,x), full_output=1)
best_parameters = pbest[0]
best_parameters[0] *= 2
fit = lorentzian(x,best_parameters) + y_bg
return best_parameters, x, fit
def main(f_object):
ys = np.loadtxt(f_object)
x_orig = np.linspace(1240.0691, 2919.5986, 1024)
sections = [[0, 150], [150, 300], [800, 1000]]
fit_results = []
for line in ys:
fit_result_for_a_line = [line[0]]
for section in sections:
fit_result = local_fit(x_orig, line[1:], section)
fit_result_for_a_line = np.append(fit_result_for_a_line, np.abs(fit_result[0]), axis = 0)
fit_result_for_a_line = np.append(fit_result_for_a_line, fit_result_for_a_line[9] / fit_result_for_a_line[6])
fit_results = np.append(fit_results, fit_result_for_a_line, axis=1)
print(line[0])
fit_results=np.reshape(fit_results,(ys[:,0].size,11))
Header_text = 'Time\t' + 'D_FWHM\tD_Center\tD_Amplitude\t' + \
'G_FWHM\tG_Center\tG_Amplitude\t' + '2D_FWHM\t2D_Center\t2D_Amplitude\t2D/G Ratio'
np.savetxt(f_object+'.fitted.txt', fit_results, delimiter='\t', newline='\r\n', header=Header_text)
for line in ys:
for section in sections:
fit_result = local_fit(x_orig, line[1:], section)
print(fit_result[0])
if section == [0,150] :
fit_x = fit_result[1]
fit_y = fit_result[2]
elif section == [800,1000] :
fit_x = np.append(fit_x, fit_result[1],axis=0)
fit_y = np.append(fit_y,np.linspace(245,245,500).tolist(),axis=0)
fit_y = np.append(fit_y, fit_result[2],axis=0)
else :
fit_x = np.append(fit_x, fit_result[1],axis=0)
fit_y = np.append(fit_y, fit_result[2],axis=0)
if line[0] == ys[0][0]:
totalfit_x = [fit_x]
totalfit_y = [fit_y]
else :
totalfit_x = np.append(totalfit_x, [fit_x], axis=0)
totalfit_y = np.append(totalfit_y, [fit_y], axis=0)
return totalfit_x, totalfit_y
if __name__ == '__main__':
import time
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
f_object = 'C:\\Users\\sec\\Desktop\\manual\\graphene_3-3-C_200slit_0.2sec_5.1desity'
start = time.time()
x,z = main(f_object)
end = time.time()
i = range(1135)
fig = plt.figure()
ax = fig.gca(projection='3d')
bx = fig.gca(projection='3d')
for i in range(1135):
if i ==0 :
X = np.array([np.linspace(1,1000,1000)])
else :
X = np.append(X,[np.linspace(1,1000,1000)],axis=0)
for i in range(1135):
if i ==0 :
Y = np.array([np.linspace(1,1,1000)])
else :
Y = np.append(Y,[np.linspace(i+1,i+1,1000)],axis=0)
X = X[300:]
Y = Y[300:]
Z = z[300:]
ax.plot_surface(X, Y, Z, rstride=50, cstride=50, alpha=0.2)
cset = ax.contourf(X, Y, Z, zdir='z', offset=220, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=1200, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(0, 1100)
ax.set_ylabel('Y')
ax.set_ylim(0,1200 )
ax.set_zlabel('Z')
ax.set_zlim(220, 300)
plt.show()
print (start-end) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.