max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
health_check/urls.py | ashexpertVersion2/django-health-check | 0 | 6612451 | <filename>health_check/urls.py<gh_stars>0
from django.conf.urls import url
from health_check.views import MainView
app_name = 'health_check'
urlpatterns = [
url(r'^$', MainView.as_view(), name='health_check_home'),
]
| <filename>health_check/urls.py<gh_stars>0
from django.conf.urls import url
from health_check.views import MainView
app_name = 'health_check'
urlpatterns = [
url(r'^$', MainView.as_view(), name='health_check_home'),
]
| none | 1 | 1.490439 | 1 | |
bin/gdb/check_GNU_style.py | a74nh/dotfiles | 3 | 6612452 | <filename>bin/gdb/check_GNU_style.py<gh_stars>1-10
#!/usr/bin/env python3
#
# Checks some of the GNU style formatting rules in a set of patches.
# The script is a rewritten of the same bash script and should eventually
# replace the former script.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
import argparse
import sys
from check_GNU_style_lib import check_GNU_style_file
def main():
parser = argparse.ArgumentParser(description='Check GNU coding style.')
parser.add_argument('file', help = 'File with a patch')
parser.add_argument('-f', '--format', default = 'stdio',
help = 'Display format',
choices = ['stdio', 'quickfix'])
args = parser.parse_args()
filename = args.file
format = args.format
if filename == '-':
check_GNU_style_file(sys.stdin, None, format)
else:
with open(filename, 'rb') as diff_file:
check_GNU_style_file(diff_file, 'utf-8', format)
main()
| <filename>bin/gdb/check_GNU_style.py<gh_stars>1-10
#!/usr/bin/env python3
#
# Checks some of the GNU style formatting rules in a set of patches.
# The script is a rewritten of the same bash script and should eventually
# replace the former script.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
import argparse
import sys
from check_GNU_style_lib import check_GNU_style_file
def main():
parser = argparse.ArgumentParser(description='Check GNU coding style.')
parser.add_argument('file', help = 'File with a patch')
parser.add_argument('-f', '--format', default = 'stdio',
help = 'Display format',
choices = ['stdio', 'quickfix'])
args = parser.parse_args()
filename = args.file
format = args.format
if filename == '-':
check_GNU_style_file(sys.stdin, None, format)
else:
with open(filename, 'rb') as diff_file:
check_GNU_style_file(diff_file, 'utf-8', format)
main()
| en | 0.881626 | #!/usr/bin/env python3 # # Checks some of the GNU style formatting rules in a set of patches. # The script is a rewritten of the same bash script and should eventually # replace the former script. # # This file is part of GCC. # # GCC is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3, or (at your option) any later # version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License # along with GCC; see the file COPYING3. If not see # <http://www.gnu.org/licenses/>. */ | 2.725935 | 3 |
eval/examine_survey.py | CristianViorelPopa/BART-TL-topic-label-generation | 6 | 6612453 | <gh_stars>1-10
import sys
import numpy as np
from survey_utils import get_model_scores
def dcg(scores):
result = 0
for idx, score in enumerate(scores):
result += (2 ** score - 1) / np.log2(idx + 2)
return result
def ndcg(scores, n):
target_scores = scores[:n]
perfect_scores = sorted(scores, reverse=True)[:n]
return dcg(target_scores) / dcg(perfect_scores)
def main():
if len(sys.argv) < 4:
print("Usage: " + sys.argv[0] + " [<survey csv responses>]+ <topics csv> <model hypos>")
exit(0)
subject_partitioning = [
('all', 2, None, False),
('english', 0 * 10 + 2, 18 * 10 + 2, True),
('biology', 18 * 10 + 2, 37 * 10 + 2, True),
('economics', 37 * 10 + 2, 55 * 10 + 2, True),
('law', 55 * 10 + 2, 73 * 10 + 2, True),
('photo', 73 * 10 + 2, 91 * 10 + 2, True)
]
for subject, start, end, ignore_validation in subject_partitioning:
model_scores, sufficiently_annotated_labels, insufficiently_annotated_labels, total_annotated_labels = get_model_scores(sys.argv[1:-2], sys.argv[-2], sys.argv[-1], start, end, ignore_validation)
model_scores = np.array(list(filter(bool, [[float(score[1]) for score in scores] for scores in model_scores])))
print(subject.upper())
print("--------")
print("Top-1 Average Rating: " + str(np.mean([scores[:1] for scores in model_scores if len(scores) >= 1])))
print("Top-3 Average Rating: " + str(np.mean([scores[:3] for scores in model_scores if len(scores) >= 3])))
print("Top-5 Average Rating: " + str(np.mean([scores[:5] for scores in model_scores if len(scores) >= 5])))
print("All-labels Average Rating: " + str(np.mean([np.mean(scores) for scores in model_scores])))
print("nDCG-1: " + str(np.mean([ndcg(scores, 1) for scores in model_scores])))
print("nDCG-3: " + str(np.mean([ndcg(scores, 3) for scores in model_scores])))
print("nDCG-5: " + str(np.mean([ndcg(scores, 5) for scores in model_scores])))
print("")
total_labels = sum([len(scores) for scores in model_scores])
print("Total number of labels: " + str(total_labels))
print("============")
print("Sufficiently annotated labels: " + str(sufficiently_annotated_labels))
print("Insufficiently annotated labels: " + str(insufficiently_annotated_labels))
print("Total annotated labels: " + str(total_annotated_labels))
print("")
if __name__ == '__main__':
main()
| import sys
import numpy as np
from survey_utils import get_model_scores
def dcg(scores):
result = 0
for idx, score in enumerate(scores):
result += (2 ** score - 1) / np.log2(idx + 2)
return result
def ndcg(scores, n):
target_scores = scores[:n]
perfect_scores = sorted(scores, reverse=True)[:n]
return dcg(target_scores) / dcg(perfect_scores)
def main():
if len(sys.argv) < 4:
print("Usage: " + sys.argv[0] + " [<survey csv responses>]+ <topics csv> <model hypos>")
exit(0)
subject_partitioning = [
('all', 2, None, False),
('english', 0 * 10 + 2, 18 * 10 + 2, True),
('biology', 18 * 10 + 2, 37 * 10 + 2, True),
('economics', 37 * 10 + 2, 55 * 10 + 2, True),
('law', 55 * 10 + 2, 73 * 10 + 2, True),
('photo', 73 * 10 + 2, 91 * 10 + 2, True)
]
for subject, start, end, ignore_validation in subject_partitioning:
model_scores, sufficiently_annotated_labels, insufficiently_annotated_labels, total_annotated_labels = get_model_scores(sys.argv[1:-2], sys.argv[-2], sys.argv[-1], start, end, ignore_validation)
model_scores = np.array(list(filter(bool, [[float(score[1]) for score in scores] for scores in model_scores])))
print(subject.upper())
print("--------")
print("Top-1 Average Rating: " + str(np.mean([scores[:1] for scores in model_scores if len(scores) >= 1])))
print("Top-3 Average Rating: " + str(np.mean([scores[:3] for scores in model_scores if len(scores) >= 3])))
print("Top-5 Average Rating: " + str(np.mean([scores[:5] for scores in model_scores if len(scores) >= 5])))
print("All-labels Average Rating: " + str(np.mean([np.mean(scores) for scores in model_scores])))
print("nDCG-1: " + str(np.mean([ndcg(scores, 1) for scores in model_scores])))
print("nDCG-3: " + str(np.mean([ndcg(scores, 3) for scores in model_scores])))
print("nDCG-5: " + str(np.mean([ndcg(scores, 5) for scores in model_scores])))
print("")
total_labels = sum([len(scores) for scores in model_scores])
print("Total number of labels: " + str(total_labels))
print("============")
print("Sufficiently annotated labels: " + str(sufficiently_annotated_labels))
print("Insufficiently annotated labels: " + str(insufficiently_annotated_labels))
print("Total annotated labels: " + str(total_annotated_labels))
print("")
if __name__ == '__main__':
main() | none | 1 | 2.636513 | 3 | |
cogs/bot/stats.py | iomintz/Chiaki-Nanami | 1 | 6612454 | import discord
import psutil
from discord.ext import commands
class Stats:
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
@commands.command(name='stats')
@commands.bot_has_permissions(embed_links=True)
async def stats(self, ctx):
"""Shows some general statistics about the bot.
Do not confuse this with `{prefix}about` which is just the
general info. This is just numbers.
"""
bot = self.bot
with self.process.oneshot():
memory_usage_in_mb = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
uptime_seconds = bot.uptime.total_seconds()
presence = (
f'{bot.guild_count} Servers\n'
f'{ilen(bot.get_all_channels())} Channels\n'
f'{bot.user_count} Users'
)
chiaki_embed = (discord.Embed(description=bot.appinfo.description, colour=self.bot.colour)
.set_author(name=str(ctx.bot.user), icon_url=bot.user.avatar_url)
.add_field(name='CPU Usage', value=f'{cpu_usage}%\n{memory_usage_in_mb :.2f}MB')
.add_field(name='Presence', value=presence)
.add_field(name='Uptime', value=self.bot.str_uptime.replace(', ', '\n'))
)
await ctx.send(embed=chiaki_embed)
def setup(bot):
bot.add_cog(Stats(bot))
| import discord
import psutil
from discord.ext import commands
class Stats:
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
@commands.command(name='stats')
@commands.bot_has_permissions(embed_links=True)
async def stats(self, ctx):
"""Shows some general statistics about the bot.
Do not confuse this with `{prefix}about` which is just the
general info. This is just numbers.
"""
bot = self.bot
with self.process.oneshot():
memory_usage_in_mb = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
uptime_seconds = bot.uptime.total_seconds()
presence = (
f'{bot.guild_count} Servers\n'
f'{ilen(bot.get_all_channels())} Channels\n'
f'{bot.user_count} Users'
)
chiaki_embed = (discord.Embed(description=bot.appinfo.description, colour=self.bot.colour)
.set_author(name=str(ctx.bot.user), icon_url=bot.user.avatar_url)
.add_field(name='CPU Usage', value=f'{cpu_usage}%\n{memory_usage_in_mb :.2f}MB')
.add_field(name='Presence', value=presence)
.add_field(name='Uptime', value=self.bot.str_uptime.replace(', ', '\n'))
)
await ctx.send(embed=chiaki_embed)
def setup(bot):
bot.add_cog(Stats(bot))
| en | 0.943889 | Shows some general statistics about the bot. Do not confuse this with `{prefix}about` which is just the general info. This is just numbers. | 2.633778 | 3 |
bag_detection-master/scripts/Frame_diff_Test.py | jamesxiu/Oystermaran2021 | 1 | 6612455 | import cv2
cap = cv2.VideoCapture('GP066349.MP4')
ret, current_frame = cap.read()
previous_frame = current_frame
frames = 0
while(cap.isOpened()):
if current_frame is not None and previous_frame is not None:
current_frame_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
previous_frame_gray = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
frame_diff = cv2.absdiff(current_frame_gray,previous_frame_gray)
frame_diff = cv2.convertScaleAbs(frame_diff, alpha=3, beta=0)
cv2.imshow('frame diff ',frame_diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if current_frame is not None:
previous_frame = current_frame.copy()
else:
previous_frame = None
ret, current_frame = cap.read()
frames += 1
if frames > 1000:
break
cap.release()
cv2.destroyAllWindows()
# import cv2
# cap = cv2.VideoCapture('GP066349.MP4')
# frames = 0
# while(cap.isOpened()):
# # Capture frame-by-frame
# ret, frame = cap.read()
# print(ret, frame)
# frames += 1
# if ret:
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow("Output", gray)
# if cv2.waitKey(33) & 0xFF == ord('q'):
# break
# else:
# continue
# if frames > 1000:
# break
# cap.release()
# cv2.destroyAllWindows()
# print(frames) | import cv2
cap = cv2.VideoCapture('GP066349.MP4')
ret, current_frame = cap.read()
previous_frame = current_frame
frames = 0
while(cap.isOpened()):
if current_frame is not None and previous_frame is not None:
current_frame_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
previous_frame_gray = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
frame_diff = cv2.absdiff(current_frame_gray,previous_frame_gray)
frame_diff = cv2.convertScaleAbs(frame_diff, alpha=3, beta=0)
cv2.imshow('frame diff ',frame_diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if current_frame is not None:
previous_frame = current_frame.copy()
else:
previous_frame = None
ret, current_frame = cap.read()
frames += 1
if frames > 1000:
break
cap.release()
cv2.destroyAllWindows()
# import cv2
# cap = cv2.VideoCapture('GP066349.MP4')
# frames = 0
# while(cap.isOpened()):
# # Capture frame-by-frame
# ret, frame = cap.read()
# print(ret, frame)
# frames += 1
# if ret:
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow("Output", gray)
# if cv2.waitKey(33) & 0xFF == ord('q'):
# break
# else:
# continue
# if frames > 1000:
# break
# cap.release()
# cv2.destroyAllWindows()
# print(frames) | en | 0.217641 | # import cv2 # cap = cv2.VideoCapture('GP066349.MP4') # frames = 0 # while(cap.isOpened()): # # Capture frame-by-frame # ret, frame = cap.read() # print(ret, frame) # frames += 1 # if ret: # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # cv2.imshow("Output", gray) # if cv2.waitKey(33) & 0xFF == ord('q'): # break # else: # continue # if frames > 1000: # break # cap.release() # cv2.destroyAllWindows() # print(frames) | 2.836832 | 3 |
image_k_means.py | bircanarslann/k-means-image-compression | 9 | 6612456 | import argparse
import numpy as np
from skimage import io, img_as_float
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def print_distortion_distance(cluster_prototypes, points_by_label, k):
distances = np.zeros((k,))
for k_i in range(k):
if (points_by_label[k_i] is not None):
distances[k_i] += np.linalg.norm(points_by_label[k_i] - cluster_prototypes[k_i], axis=1).sum()
else:
distances[k_i] = -1
print('Distortion Distances:')
print(distances)
def k_means_clustering(image_vectors, k, num_iterations):
# Create corresponding label array (Initialize with Label: -1)
labels = np.full((image_vectors.shape[0],), -1)
# Assign Initial Cluster Prototypes
cluster_prototypes = np.random.rand(k, 3)
# Iteration Loop
for i in range(num_iterations):
print('Iteration: ' + str(i + 1))
points_by_label = [None for k_i in range(k)]
# Label them via closest point
for rgb_i, rgb in enumerate(image_vectors):
# [rgb, rgb, rgb, rgb, ...]
rgb_row = np.repeat(rgb, k).reshape(3, k).T
# Find the Closest Label via L2 Norm
closest_label = np.argmin(np.linalg.norm(rgb_row - cluster_prototypes, axis=1))
labels[rgb_i] = closest_label
if (points_by_label[closest_label] is None):
points_by_label[closest_label] = []
points_by_label[closest_label].append(rgb)
# Optimize Cluster Prototypes (Center of Mass of Cluster)
for k_i in range(k):
if (points_by_label[k_i] is not None):
new_cluster_prototype = np.asarray(points_by_label[k_i]).sum(axis=0) / len(points_by_label[k_i])
cluster_prototypes[k_i] = new_cluster_prototype
# Find Current Distortion Distances
print_distortion_distance(cluster_prototypes, points_by_label, k)
return (labels, cluster_prototypes)
# NOTE: UNUSED
def assign_image_color_by_label(labels, cluster_prototypes):
output = np.zeros(labels.shape + (3,))
for label_i, label in enumerate(labels):
output[label_i] = cluster_prototypes[label]
return output
def plot_image_colors_by_color(name, image_vectors):
fig = plt.figure()
ax = Axes3D(fig)
for rgb in image_vectors:
ax.scatter(rgb[0], rgb[1], rgb[2], c=rgb, marker='o')
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
fig.savefig(name + '.png')
def plot_image_colors_by_label(name, image_vectors, labels, cluster_prototypes):
fig = plt.figure()
ax = Axes3D(fig)
for rgb_i, rgb in enumerate(image_vectors):
ax.scatter(rgb[0], rgb[1], rgb[2], c=cluster_prototypes[labels[rgb_i]], marker='o')
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
fig.savefig(name + '.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='image_compression', description='k-means Image Compressor', add_help=False)
parser.add_argument('image_name', type=str, help='Image Filename')
parser.add_argument('-k', type=int, dest='k', help='Number of Clusters', default=10)
parser.add_argument('-i', '--iterations', type=int, dest='iterations', help='Number of Iterations', default=20)
parser.add_argument('--save-scatter', dest='scatter', action='store_true')
parser.set_defaults(scatter=False)
args = parser.parse_args()
params = vars(args)
image = io.imread(params['image_name'])[:, :, :3] # Always read it as RGB (ignoring the Alpha)
image = img_as_float(image)
image_dimensions = image.shape
# Get Image Name without the extension
image_tokens = params['image_name'].split('.')
image_name = '.'.join(params['image_name'].split('.')[:-1]) if len(image_tokens) > 1 else params['image_name']
# -1 infers dimensions from the length of the matrix, while keeping the last dimension a 3-tuple
image_vectors = image.reshape(-1, image.shape[-1])
if (params['scatter']):
print('Creating Initial Scatter Plot! Might take a while...')
plot_image_colors_by_color('Initial_Colors_' + image_name, image_vectors)
print('Scatter Plot Complete')
labels, color_centroids = k_means_clustering(image_vectors, k=params['k'], num_iterations=params['iterations'])
output_image = np.zeros(image_vectors.shape)
for i in range(output_image.shape[0]):
output_image[i] = color_centroids[labels[i]]
output_image = output_image.reshape(image_dimensions)
print('Saving Compressed Image...')
io.imsave(image_name + '_compressed_' + str(params['k']) + '.png', output_image, dtype=float)
print('Image Compression Completed!')
if (params['scatter']):
print('Creating Output Scatter Plot! Might take a while...')
plot_image_colors_by_label(str(params['k']) + '_Cluster_Colors_' + image_name, image_vectors, labels, color_centroids)
print('Scatter Plot Complete')
| import argparse
import numpy as np
from skimage import io, img_as_float
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def print_distortion_distance(cluster_prototypes, points_by_label, k):
distances = np.zeros((k,))
for k_i in range(k):
if (points_by_label[k_i] is not None):
distances[k_i] += np.linalg.norm(points_by_label[k_i] - cluster_prototypes[k_i], axis=1).sum()
else:
distances[k_i] = -1
print('Distortion Distances:')
print(distances)
def k_means_clustering(image_vectors, k, num_iterations):
# Create corresponding label array (Initialize with Label: -1)
labels = np.full((image_vectors.shape[0],), -1)
# Assign Initial Cluster Prototypes
cluster_prototypes = np.random.rand(k, 3)
# Iteration Loop
for i in range(num_iterations):
print('Iteration: ' + str(i + 1))
points_by_label = [None for k_i in range(k)]
# Label them via closest point
for rgb_i, rgb in enumerate(image_vectors):
# [rgb, rgb, rgb, rgb, ...]
rgb_row = np.repeat(rgb, k).reshape(3, k).T
# Find the Closest Label via L2 Norm
closest_label = np.argmin(np.linalg.norm(rgb_row - cluster_prototypes, axis=1))
labels[rgb_i] = closest_label
if (points_by_label[closest_label] is None):
points_by_label[closest_label] = []
points_by_label[closest_label].append(rgb)
# Optimize Cluster Prototypes (Center of Mass of Cluster)
for k_i in range(k):
if (points_by_label[k_i] is not None):
new_cluster_prototype = np.asarray(points_by_label[k_i]).sum(axis=0) / len(points_by_label[k_i])
cluster_prototypes[k_i] = new_cluster_prototype
# Find Current Distortion Distances
print_distortion_distance(cluster_prototypes, points_by_label, k)
return (labels, cluster_prototypes)
# NOTE: UNUSED
def assign_image_color_by_label(labels, cluster_prototypes):
output = np.zeros(labels.shape + (3,))
for label_i, label in enumerate(labels):
output[label_i] = cluster_prototypes[label]
return output
def plot_image_colors_by_color(name, image_vectors):
fig = plt.figure()
ax = Axes3D(fig)
for rgb in image_vectors:
ax.scatter(rgb[0], rgb[1], rgb[2], c=rgb, marker='o')
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
fig.savefig(name + '.png')
def plot_image_colors_by_label(name, image_vectors, labels, cluster_prototypes):
fig = plt.figure()
ax = Axes3D(fig)
for rgb_i, rgb in enumerate(image_vectors):
ax.scatter(rgb[0], rgb[1], rgb[2], c=cluster_prototypes[labels[rgb_i]], marker='o')
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
fig.savefig(name + '.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='image_compression', description='k-means Image Compressor', add_help=False)
parser.add_argument('image_name', type=str, help='Image Filename')
parser.add_argument('-k', type=int, dest='k', help='Number of Clusters', default=10)
parser.add_argument('-i', '--iterations', type=int, dest='iterations', help='Number of Iterations', default=20)
parser.add_argument('--save-scatter', dest='scatter', action='store_true')
parser.set_defaults(scatter=False)
args = parser.parse_args()
params = vars(args)
image = io.imread(params['image_name'])[:, :, :3] # Always read it as RGB (ignoring the Alpha)
image = img_as_float(image)
image_dimensions = image.shape
# Get Image Name without the extension
image_tokens = params['image_name'].split('.')
image_name = '.'.join(params['image_name'].split('.')[:-1]) if len(image_tokens) > 1 else params['image_name']
# -1 infers dimensions from the length of the matrix, while keeping the last dimension a 3-tuple
image_vectors = image.reshape(-1, image.shape[-1])
if (params['scatter']):
print('Creating Initial Scatter Plot! Might take a while...')
plot_image_colors_by_color('Initial_Colors_' + image_name, image_vectors)
print('Scatter Plot Complete')
labels, color_centroids = k_means_clustering(image_vectors, k=params['k'], num_iterations=params['iterations'])
output_image = np.zeros(image_vectors.shape)
for i in range(output_image.shape[0]):
output_image[i] = color_centroids[labels[i]]
output_image = output_image.reshape(image_dimensions)
print('Saving Compressed Image...')
io.imsave(image_name + '_compressed_' + str(params['k']) + '.png', output_image, dtype=float)
print('Image Compression Completed!')
if (params['scatter']):
print('Creating Output Scatter Plot! Might take a while...')
plot_image_colors_by_label(str(params['k']) + '_Cluster_Colors_' + image_name, image_vectors, labels, color_centroids)
print('Scatter Plot Complete')
| en | 0.735689 | # Create corresponding label array (Initialize with Label: -1) # Assign Initial Cluster Prototypes # Iteration Loop # Label them via closest point # [rgb, rgb, rgb, rgb, ...] # Find the Closest Label via L2 Norm # Optimize Cluster Prototypes (Center of Mass of Cluster) # Find Current Distortion Distances # NOTE: UNUSED # Always read it as RGB (ignoring the Alpha) # Get Image Name without the extension # -1 infers dimensions from the length of the matrix, while keeping the last dimension a 3-tuple | 2.690857 | 3 |
simuvex/procedures/cgc/transmit.py | praetorian-inc/simuvex | 8 | 6612457 | import simuvex
class transmit(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd, buf, count, tx_bytes):
if self.state.mode == 'fastpath':
# Special case for CFG generation
self.state.memory.store(tx_bytes, count, endness='Iend_LE')
return self.state.se.BVV(0, self.state.arch.bits)
if ABSTRACT_MEMORY in self.state.options:
data = self.state.memory.load(buf, count)
self.state.posix.write(fd, data, count)
self.state.memory.store(tx_bytes, count, endness='Iend_LE')
else:
if self.state.satisfiable(extra_constraints=[count != 0]):
data = self.state.memory.load(buf, count)
self.state.posix.write(fd, data, count)
self.data = data
else:
self.data = None
self.size = count
self.state.memory.store(tx_bytes, count, endness='Iend_LE', condition=tx_bytes != 0)
# TODO: transmit failure
return self.state.se.BVV(0, self.state.arch.bits)
from simuvex.s_options import ABSTRACT_MEMORY
| import simuvex
class transmit(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, fd, buf, count, tx_bytes):
if self.state.mode == 'fastpath':
# Special case for CFG generation
self.state.memory.store(tx_bytes, count, endness='Iend_LE')
return self.state.se.BVV(0, self.state.arch.bits)
if ABSTRACT_MEMORY in self.state.options:
data = self.state.memory.load(buf, count)
self.state.posix.write(fd, data, count)
self.state.memory.store(tx_bytes, count, endness='Iend_LE')
else:
if self.state.satisfiable(extra_constraints=[count != 0]):
data = self.state.memory.load(buf, count)
self.state.posix.write(fd, data, count)
self.data = data
else:
self.data = None
self.size = count
self.state.memory.store(tx_bytes, count, endness='Iend_LE', condition=tx_bytes != 0)
# TODO: transmit failure
return self.state.se.BVV(0, self.state.arch.bits)
from simuvex.s_options import ABSTRACT_MEMORY
| en | 0.359612 | #pylint:disable=arguments-differ # Special case for CFG generation # TODO: transmit failure | 2.445276 | 2 |
codechecker_kernel/kernel.py | esstorm/codechecker_kernel | 0 | 6612458 | from ipykernel.kernelbase import Kernel
from subprocess import Popen, PIPE, call, CalledProcessError
import re
class CCKernel(Kernel):
implementation = 'CodeChecker'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {'name': 'c++',
'codemirror_mode': 'text/x-c++src',
'mimetype': ' text/x-c++src',
'file_extension': '.cc'
}
banner = "Analyze c/c++ code inside Jupyter notebooks using Clang SA"
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
if not silent:
with open("/tmp/file.cc", "w") as tmpFile:
tmpFile.write(code)
runCmd = """clang --version | head -1
CodeChecker check --print-steps -b "clang -c /tmp/file.cc" \
| grep -v '\[INFO' \
| sed -e '/----==== Summary ====----/,$ d'
"""
proc = Popen([runCmd], stdout=PIPE, shell=True)
res, _ = proc.communicate()
stream_content = {'name': 'stdout', 'text': res}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
| from ipykernel.kernelbase import Kernel
from subprocess import Popen, PIPE, call, CalledProcessError
import re
class CCKernel(Kernel):
implementation = 'CodeChecker'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {'name': 'c++',
'codemirror_mode': 'text/x-c++src',
'mimetype': ' text/x-c++src',
'file_extension': '.cc'
}
banner = "Analyze c/c++ code inside Jupyter notebooks using Clang SA"
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
if not silent:
with open("/tmp/file.cc", "w") as tmpFile:
tmpFile.write(code)
runCmd = """clang --version | head -1
CodeChecker check --print-steps -b "clang -c /tmp/file.cc" \
| grep -v '\[INFO' \
| sed -e '/----==== Summary ====----/,$ d'
"""
proc = Popen([runCmd], stdout=PIPE, shell=True)
res, _ = proc.communicate()
stream_content = {'name': 'stdout', 'text': res}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
| en | 0.24617 | clang --version | head -1 CodeChecker check --print-steps -b "clang -c /tmp/file.cc" \ | grep -v '\[INFO' \ | sed -e '/----==== Summary ====----/,$ d' # The base class increments the execution count | 2.184156 | 2 |
strava/api/_helpers.py | bwilczynski/strava-cli | 15 | 6612459 | from requests_oauthlib import OAuth2Session
from strava.config.creds_store import save_access_token, get_access_token
from strava.settings import (
STRAVA_API_BASE_URL,
STRAVA_CLIENT_ID,
REFRESH_TOKEN_URL,
STRAVA_CLIENT_SECRET,
)
def url(path):
return STRAVA_API_BASE_URL + path
def json(response):
response.raise_for_status()
return response.json()
token = get_access_token()
client = OAuth2Session(
STRAVA_CLIENT_ID,
token=token,
auto_refresh_url=REFRESH_TOKEN_URL,
auto_refresh_kwargs=dict(
client_id=STRAVA_CLIENT_ID, client_secret=STRAVA_CLIENT_SECRET
),
token_updater=save_access_token,
)
| from requests_oauthlib import OAuth2Session
from strava.config.creds_store import save_access_token, get_access_token
from strava.settings import (
STRAVA_API_BASE_URL,
STRAVA_CLIENT_ID,
REFRESH_TOKEN_URL,
STRAVA_CLIENT_SECRET,
)
def url(path):
return STRAVA_API_BASE_URL + path
def json(response):
response.raise_for_status()
return response.json()
token = get_access_token()
client = OAuth2Session(
STRAVA_CLIENT_ID,
token=token,
auto_refresh_url=REFRESH_TOKEN_URL,
auto_refresh_kwargs=dict(
client_id=STRAVA_CLIENT_ID, client_secret=STRAVA_CLIENT_SECRET
),
token_updater=save_access_token,
)
| none | 1 | 2.608942 | 3 | |
right_paddle.py | danielkubovic/PongTk | 0 | 6612460 | #!/usr/bin/env python3
from paddle import *
class RightPaddle(Paddle):
def __init__(self, x, y, width, height, canvas, color):
super().__init__(x, y, width, height, canvas, color)
def draw(self):
self.canvas.move(self.id, 0, self.speed)
paddle_position = self.canvas.coords(self.id)
# If paddle is out of canvas, disable movement
if paddle_position[1] > 0:
self.canvas.bind_all('<KeyPress-Up>', lambda event:
self.move(event, self.active_speed * (-1)))
self.canvas.bind_all('<KeyRelease-Up>', self.stop_up)
if paddle_position[3] < game.window_height:
self.canvas.bind_all('<KeyPress-Down>', lambda event:
self.move(event, self.active_speed))
self.canvas.bind_all('<KeyRelease-Down>', self.stop_down)
# If paddle is out of canvas, disable movement
if paddle_position[1] <= 0:
self.speed = 0
self.canvas.unbind_all('<KeyPress-Up>')
if paddle_position[3] >= game.window_height:
self.speed = 0
self.canvas.unbind_all('<KeyPress-Down>')
# Paddle movement through x axis
def move(self, evt, speed):
self.speed = speed
def stop_up(self, evt):
if not self.speed > 0:
self.speed = 0
def stop_down(self, evt):
if not self.speed < 0:
self.speed = 0
right_paddle = RightPaddle(game.window_width - 10,
game.window_ycenter-game.window_yquarter // 3,
10, 100, game.canvas, 'blue')
| #!/usr/bin/env python3
from paddle import *
class RightPaddle(Paddle):
def __init__(self, x, y, width, height, canvas, color):
super().__init__(x, y, width, height, canvas, color)
def draw(self):
self.canvas.move(self.id, 0, self.speed)
paddle_position = self.canvas.coords(self.id)
# If paddle is out of canvas, disable movement
if paddle_position[1] > 0:
self.canvas.bind_all('<KeyPress-Up>', lambda event:
self.move(event, self.active_speed * (-1)))
self.canvas.bind_all('<KeyRelease-Up>', self.stop_up)
if paddle_position[3] < game.window_height:
self.canvas.bind_all('<KeyPress-Down>', lambda event:
self.move(event, self.active_speed))
self.canvas.bind_all('<KeyRelease-Down>', self.stop_down)
# If paddle is out of canvas, disable movement
if paddle_position[1] <= 0:
self.speed = 0
self.canvas.unbind_all('<KeyPress-Up>')
if paddle_position[3] >= game.window_height:
self.speed = 0
self.canvas.unbind_all('<KeyPress-Down>')
# Paddle movement through x axis
def move(self, evt, speed):
self.speed = speed
def stop_up(self, evt):
if not self.speed > 0:
self.speed = 0
def stop_down(self, evt):
if not self.speed < 0:
self.speed = 0
right_paddle = RightPaddle(game.window_width - 10,
game.window_ycenter-game.window_yquarter // 3,
10, 100, game.canvas, 'blue')
| en | 0.778957 | #!/usr/bin/env python3 # If paddle is out of canvas, disable movement # If paddle is out of canvas, disable movement # Paddle movement through x axis | 3.398118 | 3 |
rotation/ops.py | vihari/CSD | 41 | 6612461 | import tensorflow as tf
import tensorflow.contrib as tf_contrib
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf_contrib.layers.variance_scaling_initializer()
weight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias, padding=padding)
return x
def fully_conneted(x, units, use_bias=True, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock') :
with tf.variable_scope(scope) :
x = batch_norm(x_init, is_training, scope='batch_norm_0')
x = relu(x)
if downsample :
x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')
x_init = conv(x_init, channels, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')
else :
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_1')
return x + x_init
def bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock') :
with tf.variable_scope(scope) :
x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')
shortcut = relu(x)
x = conv(shortcut, channels, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_front')
x = batch_norm(x, is_training, scope='batch_norm_3x3')
x = relu(x)
if downsample :
x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels*4, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')
else :
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels * 4, kernel=1, stride=1, use_bias=use_bias, scope='conv_init')
x = batch_norm(x, is_training, scope='batch_norm_1x1_back')
x = relu(x)
x = conv(x, channels*4, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_back')
return x + shortcut
def get_residual_layer(res_n) :
x = []
if res_n == 4 :
x = [1]
if res_n == 18 :
x = [2, 2, 2, 2]
if res_n == 34 :
x = [3, 4, 6, 3]
if res_n == 50 :
x = [3, 4, 6, 3]
if res_n == 101 :
x = [3, 4, 23, 3]
if res_n == 152 :
x = [3, 8, 36, 3]
return x
##################################################################################
# Sampling
##################################################################################
def flatten(x) :
return tf.layers.flatten(x)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def avg_pooling(x) :
return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
##################################################################################
# Activation function
##################################################################################
def relu(x):
return tf.nn.relu(x)
##################################################################################
# Normalization function
##################################################################################
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
##################################################################################
# Loss function
##################################################################################
def regression_loss(reprs, label, domain, num_domains):
_, accuracy, all_losses = mos_regression_lossv2(reprs, label, domain, num_domains, debug=True)
return all_losses[1], accuracy
def regression_loss2(reprs, label, reuse=False, normalize=False) :
num_classes = label.get_shape()[-1]
logit = tf.layers.dense(reprs, num_classes, name='softmax_layer', reuse=reuse)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def mos_regression_loss(reprs, label, domain, num_domains):
DIM = 2
batch_size = tf.shape(reprs)[0]
EMB_SIZE = 128
num_classes = label.get_shape()[-1]
emb_matrix = tf.get_variable("emb_matrix", shape=[num_domains, DIM], initializer=tf.random_normal_initializer)
common_var = tf.get_variable("common_var", shape=[DIM], initializer=tf.zeros_initializer)
common_cwt = tf.sigmoid(common_var)
common_cwt /= tf.norm(common_cwt)
emb_matrix = tf.sigmoid(emb_matrix)
emb_matrix /= tf.expand_dims(tf.norm(emb_matrix, axis=1), 1)
# Batch size x DIM
c_wts = tf.nn.embedding_lookup(emb_matrix, domain)
c_wts = tf.reshape(c_wts, [batch_size, DIM])
sms = tf.get_variable("sm_matrices", shape=[DIM, EMB_SIZE, num_classes], trainable=True)
biases = tf.get_variable("sm_bias", shape=[DIM, num_classes], trainable=True)
specific_sms = tf.einsum("ij,jkl->ikl", c_wts, sms)
specific_bias = tf.einsum("ij,jl->il", c_wts, biases)
common_sm = tf.einsum("j,jkl->kl", common_cwt, sms)
common_bias = tf.einsum("j,jl->l", common_cwt, biases)
logits1 = tf.einsum("ik,ikl->il", reprs, specific_sms) + specific_bias
logits2 = tf.matmul(reprs, common_sm) + common_bias
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits2))
loss = 0.5*loss1 + 0.5*loss2
predictions = tf.equal(tf.argmax(logits2, axis=-1), tf.argmax(label, axis=1))
accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))
return loss, accuracy
def mos_regression_lossv2(reprs, label, domain, num_domains, L=2, cs_wt=0, debug=False):
DIM = L
batch_size = tf.shape(reprs)[0]
EMB_SIZE = 128
num_classes = label.get_shape()[-1]
emb_matrix = tf.get_variable("emb_mat", shape=[num_domains, DIM-1], initializer=tf.random_normal_initializer(0, 1e-4))
common_wt = tf.get_variable("common_wt", shape=[1], initializer=tf.ones_initializer)
common_specialized_wt = tf.get_variable("common_specialized_wt", shape=[], initializer=tf.random_normal_initializer(cs_wt, 1e-2))
common_cwt = tf.concat([common_wt, tf.zeros([DIM-1])], axis=0)
# Batch size x DIM
c_wts = tf.nn.embedding_lookup(emb_matrix, domain)
c_wts = tf.concat([tf.ones([batch_size, 1])*common_specialized_wt, c_wts], axis=1)
c_wts = tf.reshape(c_wts, [batch_size, DIM])
c_wts = tf.nn.sigmoid(c_wts)
sms = tf.get_variable("sm_matrices", shape=[DIM, EMB_SIZE, num_classes], trainable=True, initializer=tf.random_normal_initializer(0, 0.05))
biases = tf.get_variable("sm_bias", shape=[DIM, num_classes], trainable=True, initializer=tf.random_normal_initializer(0, 0.05))
specific_sms = tf.einsum("ij,jkl->ikl", c_wts, sms)
specific_bias = tf.einsum("ij,jl->il", c_wts, biases)
common_sm = tf.einsum("j,jkl->kl", common_cwt, sms)
common_bias = tf.einsum("j,jl->l", common_cwt, biases)
logits1 = tf.einsum("ik,ikl->il", reprs, specific_sms) + specific_bias
logits2 = tf.matmul(reprs, common_sm) + common_bias
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits2))
# C x L x L
diag_tensor = tf.eye(DIM, batch_shape=[num_classes])
cps = tf.stack([tf.matmul(sms[:, :, _], sms[:, :, _], transpose_b=True) for _ in range(num_classes)])
# (1 - diag_tensor) *
orthn_loss = tf.reduce_mean((cps - diag_tensor)**2)
loss = 0.5*(loss1 + loss2) + orthn_loss
predictions = tf.equal(tf.argmax(logits2, axis=-1), tf.argmax(label, axis=1))
accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))
if debug:
return loss, accuracy, [loss1, loss2, orthn_loss]
else:
return loss, accuracy
| import tensorflow as tf
import tensorflow.contrib as tf_contrib
# Xavier : tf_contrib.layers.xavier_initializer()
# He : tf_contrib.layers.variance_scaling_initializer()
# Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02)
# l2_decay : tf_contrib.layers.l2_regularizer(0.0001)
weight_init = tf_contrib.layers.variance_scaling_initializer()
weight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)
##################################################################################
# Layer
##################################################################################
def conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias, padding=padding)
return x
def fully_conneted(x, units, use_bias=True, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock') :
with tf.variable_scope(scope) :
x = batch_norm(x_init, is_training, scope='batch_norm_0')
x = relu(x)
if downsample :
x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')
x_init = conv(x_init, channels, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')
else :
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_1')
return x + x_init
def bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock') :
with tf.variable_scope(scope) :
x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')
shortcut = relu(x)
x = conv(shortcut, channels, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_front')
x = batch_norm(x, is_training, scope='batch_norm_3x3')
x = relu(x)
if downsample :
x = conv(x, channels, kernel=3, stride=2, use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels*4, kernel=1, stride=2, use_bias=use_bias, scope='conv_init')
else :
x = conv(x, channels, kernel=3, stride=1, use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels * 4, kernel=1, stride=1, use_bias=use_bias, scope='conv_init')
x = batch_norm(x, is_training, scope='batch_norm_1x1_back')
x = relu(x)
x = conv(x, channels*4, kernel=1, stride=1, use_bias=use_bias, scope='conv_1x1_back')
return x + shortcut
def get_residual_layer(res_n) :
x = []
if res_n == 4 :
x = [1]
if res_n == 18 :
x = [2, 2, 2, 2]
if res_n == 34 :
x = [3, 4, 6, 3]
if res_n == 50 :
x = [3, 4, 6, 3]
if res_n == 101 :
x = [3, 4, 23, 3]
if res_n == 152 :
x = [3, 8, 36, 3]
return x
##################################################################################
# Sampling
##################################################################################
def flatten(x) :
return tf.layers.flatten(x)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def avg_pooling(x) :
return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
##################################################################################
# Activation function
##################################################################################
def relu(x):
return tf.nn.relu(x)
##################################################################################
# Normalization function
##################################################################################
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
##################################################################################
# Loss function
##################################################################################
def regression_loss(reprs, label, domain, num_domains):
_, accuracy, all_losses = mos_regression_lossv2(reprs, label, domain, num_domains, debug=True)
return all_losses[1], accuracy
def regression_loss2(reprs, label, reuse=False, normalize=False) :
num_classes = label.get_shape()[-1]
logit = tf.layers.dense(reprs, num_classes, name='softmax_layer', reuse=reuse)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def mos_regression_loss(reprs, label, domain, num_domains):
DIM = 2
batch_size = tf.shape(reprs)[0]
EMB_SIZE = 128
num_classes = label.get_shape()[-1]
emb_matrix = tf.get_variable("emb_matrix", shape=[num_domains, DIM], initializer=tf.random_normal_initializer)
common_var = tf.get_variable("common_var", shape=[DIM], initializer=tf.zeros_initializer)
common_cwt = tf.sigmoid(common_var)
common_cwt /= tf.norm(common_cwt)
emb_matrix = tf.sigmoid(emb_matrix)
emb_matrix /= tf.expand_dims(tf.norm(emb_matrix, axis=1), 1)
# Batch size x DIM
c_wts = tf.nn.embedding_lookup(emb_matrix, domain)
c_wts = tf.reshape(c_wts, [batch_size, DIM])
sms = tf.get_variable("sm_matrices", shape=[DIM, EMB_SIZE, num_classes], trainable=True)
biases = tf.get_variable("sm_bias", shape=[DIM, num_classes], trainable=True)
specific_sms = tf.einsum("ij,jkl->ikl", c_wts, sms)
specific_bias = tf.einsum("ij,jl->il", c_wts, biases)
common_sm = tf.einsum("j,jkl->kl", common_cwt, sms)
common_bias = tf.einsum("j,jl->l", common_cwt, biases)
logits1 = tf.einsum("ik,ikl->il", reprs, specific_sms) + specific_bias
logits2 = tf.matmul(reprs, common_sm) + common_bias
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits2))
loss = 0.5*loss1 + 0.5*loss2
predictions = tf.equal(tf.argmax(logits2, axis=-1), tf.argmax(label, axis=1))
accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))
return loss, accuracy
def mos_regression_lossv2(reprs, label, domain, num_domains, L=2, cs_wt=0, debug=False):
DIM = L
batch_size = tf.shape(reprs)[0]
EMB_SIZE = 128
num_classes = label.get_shape()[-1]
emb_matrix = tf.get_variable("emb_mat", shape=[num_domains, DIM-1], initializer=tf.random_normal_initializer(0, 1e-4))
common_wt = tf.get_variable("common_wt", shape=[1], initializer=tf.ones_initializer)
common_specialized_wt = tf.get_variable("common_specialized_wt", shape=[], initializer=tf.random_normal_initializer(cs_wt, 1e-2))
common_cwt = tf.concat([common_wt, tf.zeros([DIM-1])], axis=0)
# Batch size x DIM
c_wts = tf.nn.embedding_lookup(emb_matrix, domain)
c_wts = tf.concat([tf.ones([batch_size, 1])*common_specialized_wt, c_wts], axis=1)
c_wts = tf.reshape(c_wts, [batch_size, DIM])
c_wts = tf.nn.sigmoid(c_wts)
sms = tf.get_variable("sm_matrices", shape=[DIM, EMB_SIZE, num_classes], trainable=True, initializer=tf.random_normal_initializer(0, 0.05))
biases = tf.get_variable("sm_bias", shape=[DIM, num_classes], trainable=True, initializer=tf.random_normal_initializer(0, 0.05))
specific_sms = tf.einsum("ij,jkl->ikl", c_wts, sms)
specific_bias = tf.einsum("ij,jl->il", c_wts, biases)
common_sm = tf.einsum("j,jkl->kl", common_cwt, sms)
common_bias = tf.einsum("j,jl->l", common_cwt, biases)
logits1 = tf.einsum("ik,ikl->il", reprs, specific_sms) + specific_bias
logits2 = tf.matmul(reprs, common_sm) + common_bias
loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits1))
loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits2))
# C x L x L
diag_tensor = tf.eye(DIM, batch_shape=[num_classes])
cps = tf.stack([tf.matmul(sms[:, :, _], sms[:, :, _], transpose_b=True) for _ in range(num_classes)])
# (1 - diag_tensor) *
orthn_loss = tf.reduce_mean((cps - diag_tensor)**2)
loss = 0.5*(loss1 + loss2) + orthn_loss
predictions = tf.equal(tf.argmax(logits2, axis=-1), tf.argmax(label, axis=1))
accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))
if debug:
return loss, accuracy, [loss1, loss2, orthn_loss]
else:
return loss, accuracy
| de | 0.735853 | # Xavier : tf_contrib.layers.xavier_initializer() # He : tf_contrib.layers.variance_scaling_initializer() # Normal : tf.random_normal_initializer(mean=0.0, stddev=0.02) # l2_decay : tf_contrib.layers.l2_regularizer(0.0001) ################################################################################## # Layer ################################################################################## ################################################################################## # Sampling ################################################################################## ################################################################################## # Activation function ################################################################################## ################################################################################## # Normalization function ################################################################################## ################################################################################## # Loss function ################################################################################## # Batch size x DIM # Batch size x DIM # C x L x L # (1 - diag_tensor) * | 2.535968 | 3 |
tests/snippets/async_stuff.py | Blanen/RustPython | 1 | 6612462 | import asyncio_slow as asyncio
class ContextManager:
async def __aenter__(self):
print("Entrada")
ls.append(1)
return 1
def __str__(self):
ls.append(2)
return "c'est moi!"
async def __aexit__(self, exc_type, exc_val, exc_tb):
ls.append(3)
print("Wiedersehen")
ls = []
class AIterWrap:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
async def a(s, m):
async with ContextManager() as b:
print(f"val = {b}")
await asyncio.sleep(s)
async for i in AIterWrap(range(0, 2)):
print(i)
ls.append(m)
await asyncio.sleep(1)
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.wait(
[a(0, "hello1"), a(0.75, "hello2"), a(1.5, "hello3"), a(2.25, "hello4")]
)
)
assert ls == [
1,
3,
"hello1",
1,
3,
1,
3,
1,
3,
"hello2",
"hello1",
"hello3",
"hello2",
"hello4",
"hello3",
"hello4",
]
| import asyncio_slow as asyncio
class ContextManager:
async def __aenter__(self):
print("Entrada")
ls.append(1)
return 1
def __str__(self):
ls.append(2)
return "c'est moi!"
async def __aexit__(self, exc_type, exc_val, exc_tb):
ls.append(3)
print("Wiedersehen")
ls = []
class AIterWrap:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
async def a(s, m):
async with ContextManager() as b:
print(f"val = {b}")
await asyncio.sleep(s)
async for i in AIterWrap(range(0, 2)):
print(i)
ls.append(m)
await asyncio.sleep(1)
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.wait(
[a(0, "hello1"), a(0.75, "hello2"), a(1.5, "hello3"), a(2.25, "hello4")]
)
)
assert ls == [
1,
3,
"hello1",
1,
3,
1,
3,
1,
3,
"hello2",
"hello1",
"hello3",
"hello2",
"hello4",
"hello3",
"hello4",
]
| none | 1 | 3.001055 | 3 | |
Successful Leetcodes/136. Single Number/solution.py | Bryan-Rojas/Algorithm-Practice | 0 | 6612463 | def singleNumber(self, nums: List[int]) -> int:
countTracker = {}
for num in nums:
if num not in countTracker:
countTracker[num] = 1
else:
countTracker[num] += 1
for key, val in countTracker.items():
if val == 1:
return key | def singleNumber(self, nums: List[int]) -> int:
countTracker = {}
for num in nums:
if num not in countTracker:
countTracker[num] = 1
else:
countTracker[num] += 1
for key, val in countTracker.items():
if val == 1:
return key | none | 1 | 3.381751 | 3 | |
detect/util/convert_tiny_darknet_fcn.py | jyyang42/sonic | 0 | 6612464 | #! /usr/bin/env python
import os
import json
import struct
import numpy as np
import tensorflow as tf
from keras import backend as K
from ..backbone.frontend import YOLO
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def store_weights(sess, weights_file=None):
data = open(weights_file, 'wb')
graph = sess.graph
print([v.name for v in tf.trainable_variables()])
total = 0
nb_conv = 19
for i in range(1, 20):
if i < nb_conv:
kernel = graph.get_tensor_by_name("conv_{}/kernel:0".format(i))
beta = graph.get_tensor_by_name("norm_{}/beta:0".format(i))
gamma = graph.get_tensor_by_name("norm_{}/gamma:0".format(i))
mean = graph.get_tensor_by_name("norm_{}/moving_mean:0".format(i))
var = graph.get_tensor_by_name("norm_{}/moving_variance:0".format(i))
beta, gamma, mean, var, kernel = sess.run([beta, gamma, mean, var, kernel])
k_bias = np.zeros(kernel.shape[-1])
std = np.sqrt(var + 0.001)
scale = gamma / std
bias = beta - gamma * mean / std
kernel = kernel * scale
values = [kernel, bias]
else:
i = 23 # hard code
kernel = graph.get_tensor_by_name("conv_{}/kernel:0".format(i))
bias = graph.get_tensor_by_name("conv_{}/bias:0".format(i))
kernel, bias = sess.run([kernel, bias])
values = [kernel, bias]
for v in values:
if len(v.shape) > 1:
v = np.transpose(v, (3, 2, 0, 1))
print('{}:{}'.format(i, v.shape))
v = v.ravel()
total += v.shape[0]
ff = 'f' * v.shape[0]
d = struct.pack(ff, *v)
data.write(d)
data.close()
print('total parameters: {}'.format(total))
def convert(args):
config_path = args.conf
weights_path = args.weights
output_path = args.output
with open(config_path) as config_buffer:
config = json.load(config_buffer)
###############################
# Make the model
###############################
yolo = YOLO(architecture=config['model']['architecture'],
input_size=config['model']['input_size'],
labels=config['model']['labels'],
max_box_per_image=config['model']['max_box_per_image'],
anchors=config['model']['anchors'])
###############################
# Load trained weights
###############################
print weights_path
yolo.load_weights(weights_path)
###############################
# Convert Keras model to TF
###############################
sess = K.get_session()
store_weights(sess, output_path)
| #! /usr/bin/env python
import os
import json
import struct
import numpy as np
import tensorflow as tf
from keras import backend as K
from ..backbone.frontend import YOLO
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def store_weights(sess, weights_file=None):
data = open(weights_file, 'wb')
graph = sess.graph
print([v.name for v in tf.trainable_variables()])
total = 0
nb_conv = 19
for i in range(1, 20):
if i < nb_conv:
kernel = graph.get_tensor_by_name("conv_{}/kernel:0".format(i))
beta = graph.get_tensor_by_name("norm_{}/beta:0".format(i))
gamma = graph.get_tensor_by_name("norm_{}/gamma:0".format(i))
mean = graph.get_tensor_by_name("norm_{}/moving_mean:0".format(i))
var = graph.get_tensor_by_name("norm_{}/moving_variance:0".format(i))
beta, gamma, mean, var, kernel = sess.run([beta, gamma, mean, var, kernel])
k_bias = np.zeros(kernel.shape[-1])
std = np.sqrt(var + 0.001)
scale = gamma / std
bias = beta - gamma * mean / std
kernel = kernel * scale
values = [kernel, bias]
else:
i = 23 # hard code
kernel = graph.get_tensor_by_name("conv_{}/kernel:0".format(i))
bias = graph.get_tensor_by_name("conv_{}/bias:0".format(i))
kernel, bias = sess.run([kernel, bias])
values = [kernel, bias]
for v in values:
if len(v.shape) > 1:
v = np.transpose(v, (3, 2, 0, 1))
print('{}:{}'.format(i, v.shape))
v = v.ravel()
total += v.shape[0]
ff = 'f' * v.shape[0]
d = struct.pack(ff, *v)
data.write(d)
data.close()
print('total parameters: {}'.format(total))
def convert(args):
config_path = args.conf
weights_path = args.weights
output_path = args.output
with open(config_path) as config_buffer:
config = json.load(config_buffer)
###############################
# Make the model
###############################
yolo = YOLO(architecture=config['model']['architecture'],
input_size=config['model']['input_size'],
labels=config['model']['labels'],
max_box_per_image=config['model']['max_box_per_image'],
anchors=config['model']['anchors'])
###############################
# Load trained weights
###############################
print weights_path
yolo.load_weights(weights_path)
###############################
# Convert Keras model to TF
###############################
sess = K.get_session()
store_weights(sess, output_path)
| de | 0.7285 | #! /usr/bin/env python # hard code ############################### # Make the model ############################### ############################### # Load trained weights ############################### ############################### # Convert Keras model to TF ############################### | 2.218133 | 2 |
src/main/python/utils/sparse.py | meowpunch/bobsim-research | 2 | 6612465 | <gh_stars>1-10
import pandas as pd
def filter_sparse(column: pd.Series, std_list: list):
sparse_list = list(filter(lambda x: x not in std_list, column.unique()))
return column.replace(sparse_list, "others")
| import pandas as pd
def filter_sparse(column: pd.Series, std_list: list):
sparse_list = list(filter(lambda x: x not in std_list, column.unique()))
return column.replace(sparse_list, "others") | none | 1 | 3.026178 | 3 | |
infra/python/bootstrap_virtualenv.py | henryr/minimised-impala | 0 | 6612466 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module will create a python virtual env and install external dependencies. If
# the virtualenv already exists and the list of dependencies matches the list of
# installed dependencies, nothing will be done.
#
# This module can be run with python >= 2.4 but python >= 2.6 must be installed on the
# system. If the default 'python' command refers to < 2.6, python 2.6 will be used
# instead.
import glob
import logging
import optparse
import os
import shutil
import subprocess
import tarfile
import tempfile
import textwrap
import urllib
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
DEPS_DIR = os.path.join(os.path.dirname(__file__), "deps")
ENV_DIR = os.path.join(os.path.dirname(__file__), "env")
# Generated using "pip install --download <DIR> -r requirements.txt"
REQS_PATH = os.path.join(DEPS_DIR, "requirements.txt")
# After installing, the requirements.txt will be copied into the virtualenv to
# record what was installed.
INSTALLED_REQS_PATH = os.path.join(ENV_DIR, "installed-requirements.txt")
def delete_virtualenv_if_exist():
if os.path.exists(ENV_DIR):
shutil.rmtree(ENV_DIR)
def create_virtualenv():
LOG.info("Creating python virtualenv")
build_dir = tempfile.mkdtemp()
file = tarfile.open(find_file(DEPS_DIR, "virtualenv*.tar.gz"), "r:gz")
for member in file.getmembers():
file.extract(member, build_dir)
file.close()
python_cmd = detect_python_cmd()
exec_cmd([python_cmd, find_file(build_dir, "virtualenv*", "virtualenv.py"), "--quiet",
"--python", python_cmd, ENV_DIR])
shutil.rmtree(build_dir)
def exec_cmd(args):
'''Executes a command and waits for it to finish, raises an exception if the return
status is not zero.
'args' uses the same format as subprocess.Popen().
'''
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = process.communicate()[0]
if process.returncode != 0:
raise Exception("Command returned non-zero status\nCommand: %s\nOutput: %s"
% (args, output))
def find_file(*paths):
'''Returns the path specified by the glob 'paths', raises an exception if no file is
found.
Ex: find_file('/etc', 'h*sts') --> /etc/hosts
'''
path = os.path.join(*paths)
files = glob.glob(path)
if len(files) > 1:
raise Exception("Found too many files at %s: %s" % (path, files))
if len(files) == 0:
raise Exception("No file found at %s" % path)
return files[0]
def detect_python_cmd():
'''Returns the system command that provides python 2.6 or greater.'''
paths = os.getenv("PATH").split(os.path.pathsep)
for cmd in ("python", "python27", "python2.7", "python-27", "python-2.7", "python26",
"python2.6", "python-26", "python-2.6"):
for path in paths:
cmd_path = os.path.join(path, cmd)
if not os.path.exists(cmd_path) or not os.access(cmd_path, os.X_OK):
continue
exit = subprocess.call([cmd_path, "-c", textwrap.dedent("""
import sys
sys.exit(int(sys.version_info[:2] < (2, 6)))""")])
if exit == 0:
return cmd_path
raise Exception("Could not find minimum required python version 2.6")
def install_deps():
LOG.info("Installing packages into virtualenv")
# Don't call the virtualenv pip directly, it uses a hashbang to to call the python
# virtualenv using an absolute path. If the path to the virtualenv is very long, the
# hashbang won't work.
# --no-cache-dir is used because the dev version of Impyla may be the same even though
# the contents are different. Since the version doesn't change, pip may use its cached
# build.
exec_cmd([os.path.join(ENV_DIR, "bin", "python"), os.path.join(ENV_DIR, "bin", "pip"),
"install", "--no-cache-dir", "--no-index", "--find-links",
"file://%s" % urllib.pathname2url(os.path.abspath(DEPS_DIR)), "-r", REQS_PATH])
shutil.copyfile(REQS_PATH, INSTALLED_REQS_PATH)
def deps_are_installed():
if not os.path.exists(INSTALLED_REQS_PATH):
return False
installed_reqs_file = open(INSTALLED_REQS_PATH)
try:
reqs_file = open(REQS_PATH)
try:
if reqs_file.read() == installed_reqs_file.read():
return True
else:
LOG.info("Virtualenv upgrade needed")
return False
finally:
reqs_file.close()
finally:
installed_reqs_file.close()
def setup_virtualenv_if_not_exists():
if not deps_are_installed():
delete_virtualenv_if_exist()
create_virtualenv()
install_deps()
LOG.info("Virtualenv setup complete")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = optparse.OptionParser()
parser.add_option("-r", "--rebuild", action="store_true", help="Force a rebuild of"
" the virtualenv even if it exists and appears to be completely up-to-date.")
options, args = parser.parse_args()
if options.rebuild:
delete_virtualenv_if_exist()
setup_virtualenv_if_not_exists()
| # Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module will create a python virtual env and install external dependencies. If
# the virtualenv already exists and the list of dependencies matches the list of
# installed dependencies, nothing will be done.
#
# This module can be run with python >= 2.4 but python >= 2.6 must be installed on the
# system. If the default 'python' command refers to < 2.6, python 2.6 will be used
# instead.
import glob
import logging
import optparse
import os
import shutil
import subprocess
import tarfile
import tempfile
import textwrap
import urllib
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
DEPS_DIR = os.path.join(os.path.dirname(__file__), "deps")
ENV_DIR = os.path.join(os.path.dirname(__file__), "env")
# Generated using "pip install --download <DIR> -r requirements.txt"
REQS_PATH = os.path.join(DEPS_DIR, "requirements.txt")
# After installing, the requirements.txt will be copied into the virtualenv to
# record what was installed.
INSTALLED_REQS_PATH = os.path.join(ENV_DIR, "installed-requirements.txt")
def delete_virtualenv_if_exist():
if os.path.exists(ENV_DIR):
shutil.rmtree(ENV_DIR)
def create_virtualenv():
LOG.info("Creating python virtualenv")
build_dir = tempfile.mkdtemp()
file = tarfile.open(find_file(DEPS_DIR, "virtualenv*.tar.gz"), "r:gz")
for member in file.getmembers():
file.extract(member, build_dir)
file.close()
python_cmd = detect_python_cmd()
exec_cmd([python_cmd, find_file(build_dir, "virtualenv*", "virtualenv.py"), "--quiet",
"--python", python_cmd, ENV_DIR])
shutil.rmtree(build_dir)
def exec_cmd(args):
'''Executes a command and waits for it to finish, raises an exception if the return
status is not zero.
'args' uses the same format as subprocess.Popen().
'''
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = process.communicate()[0]
if process.returncode != 0:
raise Exception("Command returned non-zero status\nCommand: %s\nOutput: %s"
% (args, output))
def find_file(*paths):
'''Returns the path specified by the glob 'paths', raises an exception if no file is
found.
Ex: find_file('/etc', 'h*sts') --> /etc/hosts
'''
path = os.path.join(*paths)
files = glob.glob(path)
if len(files) > 1:
raise Exception("Found too many files at %s: %s" % (path, files))
if len(files) == 0:
raise Exception("No file found at %s" % path)
return files[0]
def detect_python_cmd():
'''Returns the system command that provides python 2.6 or greater.'''
paths = os.getenv("PATH").split(os.path.pathsep)
for cmd in ("python", "python27", "python2.7", "python-27", "python-2.7", "python26",
"python2.6", "python-26", "python-2.6"):
for path in paths:
cmd_path = os.path.join(path, cmd)
if not os.path.exists(cmd_path) or not os.access(cmd_path, os.X_OK):
continue
exit = subprocess.call([cmd_path, "-c", textwrap.dedent("""
import sys
sys.exit(int(sys.version_info[:2] < (2, 6)))""")])
if exit == 0:
return cmd_path
raise Exception("Could not find minimum required python version 2.6")
def install_deps():
LOG.info("Installing packages into virtualenv")
# Don't call the virtualenv pip directly, it uses a hashbang to to call the python
# virtualenv using an absolute path. If the path to the virtualenv is very long, the
# hashbang won't work.
# --no-cache-dir is used because the dev version of Impyla may be the same even though
# the contents are different. Since the version doesn't change, pip may use its cached
# build.
exec_cmd([os.path.join(ENV_DIR, "bin", "python"), os.path.join(ENV_DIR, "bin", "pip"),
"install", "--no-cache-dir", "--no-index", "--find-links",
"file://%s" % urllib.pathname2url(os.path.abspath(DEPS_DIR)), "-r", REQS_PATH])
shutil.copyfile(REQS_PATH, INSTALLED_REQS_PATH)
def deps_are_installed():
if not os.path.exists(INSTALLED_REQS_PATH):
return False
installed_reqs_file = open(INSTALLED_REQS_PATH)
try:
reqs_file = open(REQS_PATH)
try:
if reqs_file.read() == installed_reqs_file.read():
return True
else:
LOG.info("Virtualenv upgrade needed")
return False
finally:
reqs_file.close()
finally:
installed_reqs_file.close()
def setup_virtualenv_if_not_exists():
if not deps_are_installed():
delete_virtualenv_if_exist()
create_virtualenv()
install_deps()
LOG.info("Virtualenv setup complete")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = optparse.OptionParser()
parser.add_option("-r", "--rebuild", action="store_true", help="Force a rebuild of"
" the virtualenv even if it exists and appears to be completely up-to-date.")
options, args = parser.parse_args()
if options.rebuild:
delete_virtualenv_if_exist()
setup_virtualenv_if_not_exists()
| en | 0.784051 | # Copyright (c) 2015 Cloudera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module will create a python virtual env and install external dependencies. If # the virtualenv already exists and the list of dependencies matches the list of # installed dependencies, nothing will be done. # # This module can be run with python >= 2.4 but python >= 2.6 must be installed on the # system. If the default 'python' command refers to < 2.6, python 2.6 will be used # instead. # Generated using "pip install --download <DIR> -r requirements.txt" # After installing, the requirements.txt will be copied into the virtualenv to # record what was installed. Executes a command and waits for it to finish, raises an exception if the return status is not zero. 'args' uses the same format as subprocess.Popen(). Returns the path specified by the glob 'paths', raises an exception if no file is found. Ex: find_file('/etc', 'h*sts') --> /etc/hosts Returns the system command that provides python 2.6 or greater. import sys sys.exit(int(sys.version_info[:2] < (2, 6))) # Don't call the virtualenv pip directly, it uses a hashbang to to call the python # virtualenv using an absolute path. If the path to the virtualenv is very long, the # hashbang won't work. # --no-cache-dir is used because the dev version of Impyla may be the same even though # the contents are different. Since the version doesn't change, pip may use its cached # build. | 2.032452 | 2 |
text_processors/punkt.py | Babelruins/BlackCAT | 3 | 6612467 | import nltk.data, os, chardet, pycountry
from PyQt5 import QtCore
from core import db_op
def import_file(options):
#First lets try to detect the encoding
file = open(options['file_path'], 'rb')
detected_encoding = chardet.detect(file.read())['encoding']
file.close()
filename = os.path.basename(options['file_path'])
#Select appropriate tokenizer according to language
punkt_languages = ['cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'es', 'sv', 'tr']
if options['source_language'] in punkt_languages:
tokenizer = nltk.data.load('tokenizers/punkt/' + pycountry.languages.get(alpha_2=options['source_language']).name.lower() + '.pickle')
elif options['source_language'] == 'el':
tokenizer = nltk.data.load('tokenizers/punkt/greek.pickle')
elif options['source_language'] == 'sl':
tokenizer = nltk.data.load('tokenizers/punkt/slovene.pickle')
elif options['source_language'] == 'ja':
tokenizer = nltk.RegexpTokenizer(u'[^ 「」!?。.)]*[!?。]')
else:
tokenizer = nltk.LineTokenizer(blanklines='keep')
file = open(options['file_path'], encoding=detected_encoding)
text = file.read()
#Lets see if the file already has imported segments
imported_segments = db_op.get_source_segments_in_db(options['project_path'], options['source_language'], filename)
#Get the segments in file
divisions_in_file = tokenizer.tokenize(text)
segments_in_file = []
for division in divisions_in_file:
for line in division.split('\n'):
if line != '':
segments_in_file.append(line)
#If the segment exists in the db but not in the file...
for row in imported_segments:
if row not in segments_in_file:
db_op.recycle_segment(options['project_path'], imported_segments[row])
#Add the file to the source_files table
db_op.import_source_file(options['project_path'], filename, 'punkt', options['m_time'], detected_encoding)
#Insert the new segments
seen = set()
for index, segment in enumerate(segments_in_file):
if segment not in seen:
db_op.import_source_segment(options['project_path'], segment, options['source_language'], filename, index)
seen.add(segment)
file.close()
def generate_file(options):
filename = os.path.basename(options['file_path'])
#segments_in_db = db_op.get_segments_in_db(options['project_path'], options['source_language'], options['target_language'], filename)
encoding = db_op.get_encoding(options['project_path'], filename)
#Select appropriate tokenizer according to language
punkt_languages = ['cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'es', 'sv', 'tr']
lang = pycountry.languages.get(alpha_2=options['source_language'])
if options['source_language'] in punkt_languages:
tokenizer = nltk.data.load('tokenizers/punkt/' + lang.name.lower() + '.pickle')
elif options['source_language'] == 'el':
tokenizer = nltk.data.load('tokenizers/punkt/greek.pickle')
elif options['source_language'] == 'sl':
tokenizer = nltk.data.load('tokenizers/punkt/slovene.pickle')
elif options['source_language'] == 'ja':
tokenizer = nltk.RegexpTokenizer(u'[^ 「」!?。.)]*[!?。]')
else:
tokenizer = nltk.LineTokenizer(blanklines='keep')
file = open(options['file_path'], encoding=encoding)
text = file.read()
translated_data = ''
divisions = tokenizer.tokenize(text)
positions = tokenizer.span_tokenize(text)
last_segment_ending_position = 0
for division, position in zip(divisions, positions):
translated_data = translated_data + text[last_segment_ending_position:position[0]]
first_line = True
for line in division.split('\n'):
translated_segment = db_op.get_translated_segment(options['project_path'], options['source_language'], options['target_language'], filename, line)
if not first_line:
translated_data = translated_data + '\n'
else:
first_line = False
if translated_segment is None:
translated_data = translated_data + line
else:
translated_data = translated_data + translated_segment[1]
last_segment_ending_position = position[1]
file.close()
target_file = open(os.path.join(options['project_dir'], 'processed_files', filename), 'w', encoding='utf-8')
target_file.write(translated_data)
target_file.close() | import nltk.data, os, chardet, pycountry
from PyQt5 import QtCore
from core import db_op
def import_file(options):
#First lets try to detect the encoding
file = open(options['file_path'], 'rb')
detected_encoding = chardet.detect(file.read())['encoding']
file.close()
filename = os.path.basename(options['file_path'])
#Select appropriate tokenizer according to language
punkt_languages = ['cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'es', 'sv', 'tr']
if options['source_language'] in punkt_languages:
tokenizer = nltk.data.load('tokenizers/punkt/' + pycountry.languages.get(alpha_2=options['source_language']).name.lower() + '.pickle')
elif options['source_language'] == 'el':
tokenizer = nltk.data.load('tokenizers/punkt/greek.pickle')
elif options['source_language'] == 'sl':
tokenizer = nltk.data.load('tokenizers/punkt/slovene.pickle')
elif options['source_language'] == 'ja':
tokenizer = nltk.RegexpTokenizer(u'[^ 「」!?。.)]*[!?。]')
else:
tokenizer = nltk.LineTokenizer(blanklines='keep')
file = open(options['file_path'], encoding=detected_encoding)
text = file.read()
#Lets see if the file already has imported segments
imported_segments = db_op.get_source_segments_in_db(options['project_path'], options['source_language'], filename)
#Get the segments in file
divisions_in_file = tokenizer.tokenize(text)
segments_in_file = []
for division in divisions_in_file:
for line in division.split('\n'):
if line != '':
segments_in_file.append(line)
#If the segment exists in the db but not in the file...
for row in imported_segments:
if row not in segments_in_file:
db_op.recycle_segment(options['project_path'], imported_segments[row])
#Add the file to the source_files table
db_op.import_source_file(options['project_path'], filename, 'punkt', options['m_time'], detected_encoding)
#Insert the new segments
seen = set()
for index, segment in enumerate(segments_in_file):
if segment not in seen:
db_op.import_source_segment(options['project_path'], segment, options['source_language'], filename, index)
seen.add(segment)
file.close()
def generate_file(options):
filename = os.path.basename(options['file_path'])
#segments_in_db = db_op.get_segments_in_db(options['project_path'], options['source_language'], options['target_language'], filename)
encoding = db_op.get_encoding(options['project_path'], filename)
#Select appropriate tokenizer according to language
punkt_languages = ['cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'de', 'it', 'no', 'pl', 'pt', 'es', 'sv', 'tr']
lang = pycountry.languages.get(alpha_2=options['source_language'])
if options['source_language'] in punkt_languages:
tokenizer = nltk.data.load('tokenizers/punkt/' + lang.name.lower() + '.pickle')
elif options['source_language'] == 'el':
tokenizer = nltk.data.load('tokenizers/punkt/greek.pickle')
elif options['source_language'] == 'sl':
tokenizer = nltk.data.load('tokenizers/punkt/slovene.pickle')
elif options['source_language'] == 'ja':
tokenizer = nltk.RegexpTokenizer(u'[^ 「」!?。.)]*[!?。]')
else:
tokenizer = nltk.LineTokenizer(blanklines='keep')
file = open(options['file_path'], encoding=encoding)
text = file.read()
translated_data = ''
divisions = tokenizer.tokenize(text)
positions = tokenizer.span_tokenize(text)
last_segment_ending_position = 0
for division, position in zip(divisions, positions):
translated_data = translated_data + text[last_segment_ending_position:position[0]]
first_line = True
for line in division.split('\n'):
translated_segment = db_op.get_translated_segment(options['project_path'], options['source_language'], options['target_language'], filename, line)
if not first_line:
translated_data = translated_data + '\n'
else:
first_line = False
if translated_segment is None:
translated_data = translated_data + line
else:
translated_data = translated_data + translated_segment[1]
last_segment_ending_position = position[1]
file.close()
target_file = open(os.path.join(options['project_dir'], 'processed_files', filename), 'w', encoding='utf-8')
target_file.write(translated_data)
target_file.close() | en | 0.757824 | #First lets try to detect the encoding #Select appropriate tokenizer according to language #Lets see if the file already has imported segments #Get the segments in file #If the segment exists in the db but not in the file... #Add the file to the source_files table #Insert the new segments #segments_in_db = db_op.get_segments_in_db(options['project_path'], options['source_language'], options['target_language'], filename) #Select appropriate tokenizer according to language | 2.65649 | 3 |
pandas/io/tests/test_yahoo.py | stonebig/pandas | 0 | 6612468 | import unittest
import nose
from datetime import datetime
from pandas.util.py3compat import StringIO, BytesIO
import pandas as pd
import pandas.io.data as web
from pandas.util.testing import (network, assert_frame_equal,
assert_series_equal,
assert_almost_equal)
from numpy.testing.decorators import slow
import urllib2
class TestYahoo(unittest.TestCase):
@slow
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
# an excecption when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 01, 27)
try:
self.assertEquals(
web.DataReader("F", 'yahoo', start, end)['Close'][-1],
13.68)
self.assertRaises(
Exception,
lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo',
start, end))
except urllib2.URLError:
try:
urllib2.urlopen('http://www.google.com')
except urllib2.URLError:
raise nose.SkipTest
else:
raise
@slow
@network
def test_get_quote(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
@slow
@network
def test_get_components(self):
df = web.get_components_yahoo('^DJI') #Dow Jones
assert isinstance(df, pd.DataFrame)
assert len(df) == 30
df = web.get_components_yahoo('^GDAXI') #DAX
assert isinstance(df, pd.DataFrame)
assert len(df) == 30
assert df[df.name.str.contains('adidas', case=False)].index == 'ADS.DE'
df = web.get_components_yahoo('^NDX') #NASDAQ-100
assert isinstance(df, pd.DataFrame)
#assert len(df) == 100
#Usual culprits, should be around for a while
assert 'AAPL' in df.index
assert 'GOOG' in df.index
assert 'AMZN' in df.index
@slow
@network
def test_get_data(self):
import numpy as np
#single symbol
#http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d
df = web.get_data_yahoo('GOOG')
assert df.Volume.ix['OCT-08-2010'] == 2859200
sl = ['AAPL', 'AMZN', 'GOOG']
pan = web.get_data_yahoo(sl, '2012')
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
assert ts[0].dayofyear == 96
#dfi = web.get_components_yahoo('^DJI')
#pan = web.get_data_yahoo(dfi, 'JAN-01-12', 'JAN-31-12')
pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
expected = [19.02, 28.23, 25.39]
result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
assert result == expected
# sanity checking
t= np.array(result)
assert np.issubdtype(t.dtype, np.floating)
assert t.shape == (3,)
expected = [[ 18.99, 28.4 , 25.18],
[ 18.58, 28.31, 25.13],
[ 19.03, 28.16, 25.52],
[ 18.81, 28.82, 25.87]]
result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
assert (result == expected).all()
#Check ret_index
pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987',
ret_index=True)
tstamp = pan.Ret_Index.INTC.first_valid_index()
result = pan.Ret_Index.ix[tstamp]['INTC']
expected = 1.0
assert result == expected
# sanity checking
t= np.array(pan)
assert np.issubdtype(t.dtype, np.floating)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| import unittest
import nose
from datetime import datetime
from pandas.util.py3compat import StringIO, BytesIO
import pandas as pd
import pandas.io.data as web
from pandas.util.testing import (network, assert_frame_equal,
assert_series_equal,
assert_almost_equal)
from numpy.testing.decorators import slow
import urllib2
class TestYahoo(unittest.TestCase):
@slow
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
# an excecption when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 01, 27)
try:
self.assertEquals(
web.DataReader("F", 'yahoo', start, end)['Close'][-1],
13.68)
self.assertRaises(
Exception,
lambda: web.DataReader("NON EXISTENT TICKER", 'yahoo',
start, end))
except urllib2.URLError:
try:
urllib2.urlopen('http://www.google.com')
except urllib2.URLError:
raise nose.SkipTest
else:
raise
@slow
@network
def test_get_quote(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
@slow
@network
def test_get_components(self):
df = web.get_components_yahoo('^DJI') #Dow Jones
assert isinstance(df, pd.DataFrame)
assert len(df) == 30
df = web.get_components_yahoo('^GDAXI') #DAX
assert isinstance(df, pd.DataFrame)
assert len(df) == 30
assert df[df.name.str.contains('adidas', case=False)].index == 'ADS.DE'
df = web.get_components_yahoo('^NDX') #NASDAQ-100
assert isinstance(df, pd.DataFrame)
#assert len(df) == 100
#Usual culprits, should be around for a while
assert 'AAPL' in df.index
assert 'GOOG' in df.index
assert 'AMZN' in df.index
@slow
@network
def test_get_data(self):
import numpy as np
#single symbol
#http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d
df = web.get_data_yahoo('GOOG')
assert df.Volume.ix['OCT-08-2010'] == 2859200
sl = ['AAPL', 'AMZN', 'GOOG']
pan = web.get_data_yahoo(sl, '2012')
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
assert ts[0].dayofyear == 96
#dfi = web.get_components_yahoo('^DJI')
#pan = web.get_data_yahoo(dfi, 'JAN-01-12', 'JAN-31-12')
pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
expected = [19.02, 28.23, 25.39]
result = pan.Close.ix['01-18-12'][['GE', 'MSFT', 'INTC']].tolist()
assert result == expected
# sanity checking
t= np.array(result)
assert np.issubdtype(t.dtype, np.floating)
assert t.shape == (3,)
expected = [[ 18.99, 28.4 , 25.18],
[ 18.58, 28.31, 25.13],
[ 19.03, 28.16, 25.52],
[ 18.81, 28.82, 25.87]]
result = pan.Open.ix['Jan-15-12':'Jan-20-12'][['GE', 'MSFT', 'INTC']].values
assert (result == expected).all()
#Check ret_index
pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987',
ret_index=True)
tstamp = pan.Ret_Index.INTC.first_valid_index()
result = pan.Ret_Index.ix[tstamp]['INTC']
expected = 1.0
assert result == expected
# sanity checking
t= np.array(pan)
assert np.issubdtype(t.dtype, np.floating)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| en | 0.664134 | # asserts that yahoo is minimally working and that it throws # an excecption when DataReader can't get a 200 response from # yahoo #Dow Jones #DAX #NASDAQ-100 #assert len(df) == 100 #Usual culprits, should be around for a while #single symbol #http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d #dfi = web.get_components_yahoo('^DJI') #pan = web.get_data_yahoo(dfi, 'JAN-01-12', 'JAN-31-12') # sanity checking #Check ret_index # sanity checking | 2.81927 | 3 |
DLWP/data/era5.py | jweyn/DLWP | 75 | 6612469 | #
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Utilities for retrieving and processing ERA5 reanalysis data using XArray.
"""
import os
import warnings
import itertools as it
import numpy as np
import netCDF4 as nc
import pandas as pd
import xarray as xr
from datetime import datetime, timedelta
try:
import cdsapi
except ImportError:
warnings.warn("module 'cdsapi' not found; retrieval of ERA5 data unavailable.")
# ==================================================================================================================== #
# Universal parameters and functions
# ==================================================================================================================== #
def _check_exists(file_name, path=False):
if os.path.exists(file_name):
exists = True
local_file = file_name
else:
exists = False
local_file = None
if path:
return exists, local_file
else:
return exists
# For some reason, multiprocessing.Pool.map is placing arguments passed to the function inside another length-1 tuple.
# Much clearer programming would have required arguments of obj, m, month, *args here so that the user knows to include
# the ERA5 object and other arguments correctly.
def call_fetch(args):
obj = args[0]
obj._fetch(*args[1:])
# Format strings for files to write
netcdf_file_format = ''
# Start and end dates of available data
data_start_date = datetime(1979, 1, 1)
data_end_date = datetime(2018, 12, 31)
reforecast_start_date = datetime(1999, 1, 1)
reforecast_end_date = datetime(2009, 12, 31, 18)
# netCDF fill value
fill_value = np.array(nc.default_fillvals['f4']).astype(np.float32)
# Dictionary mapping request variables to netCDF variable naming conventions
variable_names = {
'divergence': 'd',
'fraction_of_cloud_cover': 'cc',
'geopotential': 'z',
'ozone_mass_mixing_ratio': 'o3',
'potential_vorticity': 'pv',
'relative_humidity': 'r',
'specific_cloud_ice_water_content': 'ciwc',
'specific_cloud_liquid_water_content': 'clwc',
'specific_humidity': 'q',
'specific_rain_water_content': 'crwc',
'specific_snow_water_content': 'cswc',
'temperature': 't',
'u_component_of_wind': 'u',
'v_component_of_wind': 'v',
'vertical_velocity': 'w',
'vorticity': 'vo'
}
# ==================================================================================================================== #
# ERA5Reanalysis object class
# ==================================================================================================================== #
class ERA5Reanalysis(object):
"""
Class for manipulating ERA5 Reanalysis data with xarray. Class methods include functions to download,
process, and export data.
"""
def __init__(self, root_directory=None, file_id=''):
"""
Initialize an instance of the ERA5Reanalysis class.
:param root_directory: str: local directory where raw files are stored. If None, defaults to ~/.era5
:param file_id: str: prepended to the processed file names. Useful if files for the same dates will be created
with different parameters, i.e., hours or variables or levels.
"""
self.raw_files = []
self.dataset_variables = []
self.dataset_levels = []
self.dataset_dates = None
if root_directory is None:
self._root_directory = '%s/.era5' % os.path.expanduser('~')
else:
self._root_directory = root_directory
self._file_id = file_id
self._delete_temp = False
self.level_coord = [1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300, 350, 400, 450,
500, 550, 600, 650, 700, 750] + list(range(775, 1001, 25))
self.inverse_lat = True
# Data
self.Dataset = None
self.basemap = None
self._lat_array = None
self._lon_array = None
@property
def lat(self):
if self._lat_array is not None:
return self._lat_array
try:
lat = self.Dataset.variables['lat'][:]
if len(lat.shape) > 2:
self._lat_array = lat[0, ...].values
return self._lat_array
else:
self._lat_array = lat.values
return self._lat_array
except AttributeError:
raise AttributeError('Call to lat method is only valid after data are opened.')
except KeyError:
return
@property
def lon(self):
if self._lon_array is not None:
return self._lon_array
try:
lon = self.Dataset.variables['lon'][:]
if len(lon.shape) > 2:
self._lon_array = lon[0, ...].values
return self._lon_array
else:
self._lon_array = lon.values
return self._lon_array
except AttributeError:
raise AttributeError('Call to lon method is only valid after data are opened.')
except KeyError:
return
def set_variables(self, variables):
"""
Set the variables to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method.
:param variables: list of string variable names
:return:
"""
for v in variables:
try:
assert str(v) in list(variable_names.keys())
except TypeError:
raise TypeError('variables must be convertible to string types')
except AssertionError:
raise ValueError('variables must be within the available levels for the dataset (%s)' %
list(variable_names.keys()))
self.dataset_variables = sorted(list(variables))
def set_levels(self, levels):
"""
Set the levels to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method.
:param levels: list of integer pressure height levels (mb / hPa)
:return:
"""
for l in levels:
try:
assert int(l) in self.level_coord
except TypeError:
raise ValueError('levels must be integers in hPa')
except AssertionError:
raise ValueError('levels must be within the available levels for the dataset (%s)' % self.level_coord)
self.dataset_levels = sorted(list(levels))
def closest_lat_lon(self, lat, lon):
"""
Find the grid-point index of the closest point to the specified latitude and longitude values in loaded
CFS reanalysis data.
:param lat: float or int: latitude in degrees
:param lon: float or int: longitude in degrees
:return:
"""
if lon < 0.:
lon += 360.
distance = (self.lat - lat) ** 2 + (self.lon - lon) ** 2
min_dist = 2.5
if np.min(distance) > min_dist:
raise ValueError('no latitude/longitude points close to requested lat/lon!')
return np.unravel_index(np.argmin(distance, axis=None), distance.shape)
def _set_file_names(self):
# Sets a list of file names.
for variable in self.dataset_variables:
for level in self.dataset_levels:
self.raw_files.append('%s/%s%s_%s.nc' % (self._root_directory, self._file_id, variable, level))
def retrieve(self, variables, levels, years='all', months='all', days='all', hourly=3, n_proc=4, verbose=False,
request_kwargs=None, delete_temporary=False):
"""
Retrieve netCDF files of ERA5 reanalysis data. Must specify the variables and pressure levels desired.
Iterates over variable/level pairs for each API request. Note that with 3-hourly data, one variable/level pair
can be retrieved with a single API request for all dates between 1979-2018. If more dates or higher hourly
resolution is required, it is currently up to the user to perform separate retrieval requests. DO NOT use the
same retrieve function in the same instance of a class to request more dates as this will overwrite
previously downloaded files. Instead, create a new instance of ERA5Reanalysis, give a different file_id, and
then manually concatenate the datasets loaded on each instance.
:param variables: iterable of str: variables to retrieve, one at a time
:param levels: iterable of int: pressure levels to retrieve, one at a time
:param years: iterable: years of data. If 'all', use 1979-2018.
:param months: iterable: months of data. If 'all', get all months.
:param days: iterable: month days of data. If 'all', get all days.
:param hourly: int: hourly time resolution; e.g., 6 for data every 6 hours.
:param n_proc: int: number of processes for parallel retrieval
:param verbose: bool: if True, print progress statements. The API already lists progress statements.
:param request_kwargs: dict: other keywords passed to the retrieval. For example, 'grid' can be used to modify
the lat/lon resolution.
:param delete_temporary: bool: if True, delete the temporary files from the server in favor of the edited
files with correct dimensions. May be risky to delete the raw files.
"""
# Parameter checks
request_kwargs = {} or request_kwargs
self.set_variables(variables)
self.set_levels(levels)
if delete_temporary:
self._delete_temp = True
if years == 'all':
years = list(range(data_start_date.year, data_end_date.year + 1))
else:
for y in years:
try:
assert data_start_date.year <= int(y) <= data_end_date.year
except TypeError:
raise ValueError('years must be integers')
except AssertionError:
raise ValueError('years must be within the available dates for ERA5 (%d-%d)' %
(data_start_date.year, data_end_date.year))
years = [str(y) for y in years]
if months == 'all':
months = list(range(1, 13))
else:
for m in months:
try:
assert 1 <= int(m) <= 12
except TypeError:
raise ValueError('months must be integers')
except AssertionError:
raise ValueError('months must be integers from 1 to 12')
months = ['%02d' % m for m in months]
if days == 'all':
days = list(range(1, 32))
else:
for d in days:
try:
assert 1 <= int(d) <= 31
except TypeError:
raise ValueError('days must be integers')
except AssertionError:
raise ValueError('days must be integers from 1 to 31')
days = ['%02d' % d for d in days]
if hourly < 1 or hourly > 24:
raise ValueError('hourly interval must be between 1 and 24')
hour_daterange = pd.date_range('2000-01-01 00:00', '2000-01-01 23:00', freq='%dh' % hourly)
hours = [d.strftime('%H:%M') for d in hour_daterange]
if len(variables) == 0:
print('ERA5Reanalysis.retrieve: no variables specified; will do nothing.')
return
if len(levels) == 0:
print('ERA5Reanalysis.retrieve: no pressure levels specified; will do nothing.')
return
if int(n_proc) < 0:
raise ValueError("'multiprocess' must be an integer >= 0")
# Create the requests
requests = []
self._set_file_names()
for variable in variables:
for level in levels:
request = {
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': variable,
'pressure_level': level,
'year': years,
'month': months,
'day': days,
'time': hours
}
request.update(request_kwargs)
requests.append(request)
# Create a multi-processing tool, if necessary
if n_proc == 0 or n_proc > 1:
try:
import multiprocessing
if n_proc == 0:
n_proc = multiprocessing.cpu_count()
except ImportError:
warnings.warn("'multiprocessing' module not available; falling back to serial")
n_proc = 1
if n_proc == 1:
for file, request in zip(self.raw_files, requests):
call_fetch((self, request, file, verbose))
else:
pool = multiprocessing.Pool(processes=n_proc)
pool.map(call_fetch, zip(it.repeat(self), requests, self.raw_files, it.repeat(verbose)))
pool.close()
pool.terminate()
pool.join()
def _fetch(self, request, file_name, verbose):
# Fetch the file
c = cdsapi.Client()
if verbose:
print('ERA5Reanalysis.retrieve: fetching %s at %s mb' % (request['variable'], request['pressure_level']))
c.retrieve('reanalysis-era5-pressure-levels', request, file_name + '.tmp')
# Add a level dimension to the file (not present by default
if verbose:
print('Adding level dimension')
self._process_temp_file(file_name, float(request['pressure_level']))
def _process_temp_file(self, file_name, level):
ds = xr.open_dataset(file_name + '.tmp')
ds = ds.expand_dims('level', axis=1).assign_coords(level=np.array([level], dtype=np.float32))
ds.to_netcdf(file_name)
if self._delete_temp:
os.remove(file_name + '.tmp')
def open(self, **dataset_kwargs):
"""
Open an xarray multi-file Dataset for the processed files. Set the variables and levels with the instance
set_variables and set_levels methods. Once opened, this Dataset is accessible by self.Dataset.
:param dataset_kwargs: kwargs passed to xarray.open_mfdataset()
"""
if len(self.dataset_variables) == 0:
raise ValueError('set the variables to open with the set_variables() method')
if len(self.dataset_levels) == 0:
raise ValueError('set the pressure levels to open with the set_levels() method')
self._set_file_names()
self.Dataset = xr.open_mfdataset(self.raw_files, **dataset_kwargs)
self.dataset_dates = self.Dataset['time']
def close(self):
"""
Close an opened Dataset on self.
"""
if self.Dataset is not None:
self.Dataset.close()
self.Dataset = None
self.dataset_dates = None
self._lon_array = None
self._lat_array = None
else:
raise ValueError('no Dataset to close')
def generate_basemap(self, llcrnrlat=None, llcrnrlon=None, urcrnrlat=None, urcrnrlon=None):
"""
Generates a Basemap object for graphical plot of ERA5 data on a 2-D plane. Bounding box parameters
are either given, or if None, read from the extremes of the loaded lat/lon data. Other projection parameters
are set to the default ERA5 configuration.
:param llcrnrlat: float: lower left corner latitude
:param llcrnrlon: float: lower left corner longitude
:param urcrnrlat: float: upper right corner latitude
:param urcrnrlon: float: upper right corner longitude
:return:
"""
from mpl_toolkits.basemap import Basemap
try:
default = llcrnrlat * llcrnrlon * urcrnrlat * urcrnrlon # error if any are None
default = False
except TypeError:
default = True
if default:
try:
lat = self.lat
lon = self.lon
except (AttributeError, KeyError):
raise ValueError('I can generate a default Basemap with None parameters, but only if I have some '
'data loaded first!')
llcrnrlon, llcrnrlat = lon[0, 0], lat[-1, -1]
urcrnrlon, urcrnrlat = lon[-1, -1], lat[0, 0]
basemap = Basemap(projection='cyl', llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution='l')
self.basemap = basemap
| #
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Utilities for retrieving and processing ERA5 reanalysis data using XArray.
"""
import os
import warnings
import itertools as it
import numpy as np
import netCDF4 as nc
import pandas as pd
import xarray as xr
from datetime import datetime, timedelta
try:
import cdsapi
except ImportError:
warnings.warn("module 'cdsapi' not found; retrieval of ERA5 data unavailable.")
# ==================================================================================================================== #
# Universal parameters and functions
# ==================================================================================================================== #
def _check_exists(file_name, path=False):
if os.path.exists(file_name):
exists = True
local_file = file_name
else:
exists = False
local_file = None
if path:
return exists, local_file
else:
return exists
# For some reason, multiprocessing.Pool.map is placing arguments passed to the function inside another length-1 tuple.
# Much clearer programming would have required arguments of obj, m, month, *args here so that the user knows to include
# the ERA5 object and other arguments correctly.
def call_fetch(args):
obj = args[0]
obj._fetch(*args[1:])
# Format strings for files to write
netcdf_file_format = ''
# Start and end dates of available data
data_start_date = datetime(1979, 1, 1)
data_end_date = datetime(2018, 12, 31)
reforecast_start_date = datetime(1999, 1, 1)
reforecast_end_date = datetime(2009, 12, 31, 18)
# netCDF fill value
fill_value = np.array(nc.default_fillvals['f4']).astype(np.float32)
# Dictionary mapping request variables to netCDF variable naming conventions
variable_names = {
'divergence': 'd',
'fraction_of_cloud_cover': 'cc',
'geopotential': 'z',
'ozone_mass_mixing_ratio': 'o3',
'potential_vorticity': 'pv',
'relative_humidity': 'r',
'specific_cloud_ice_water_content': 'ciwc',
'specific_cloud_liquid_water_content': 'clwc',
'specific_humidity': 'q',
'specific_rain_water_content': 'crwc',
'specific_snow_water_content': 'cswc',
'temperature': 't',
'u_component_of_wind': 'u',
'v_component_of_wind': 'v',
'vertical_velocity': 'w',
'vorticity': 'vo'
}
# ==================================================================================================================== #
# ERA5Reanalysis object class
# ==================================================================================================================== #
class ERA5Reanalysis(object):
"""
Class for manipulating ERA5 Reanalysis data with xarray. Class methods include functions to download,
process, and export data.
"""
def __init__(self, root_directory=None, file_id=''):
"""
Initialize an instance of the ERA5Reanalysis class.
:param root_directory: str: local directory where raw files are stored. If None, defaults to ~/.era5
:param file_id: str: prepended to the processed file names. Useful if files for the same dates will be created
with different parameters, i.e., hours or variables or levels.
"""
self.raw_files = []
self.dataset_variables = []
self.dataset_levels = []
self.dataset_dates = None
if root_directory is None:
self._root_directory = '%s/.era5' % os.path.expanduser('~')
else:
self._root_directory = root_directory
self._file_id = file_id
self._delete_temp = False
self.level_coord = [1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300, 350, 400, 450,
500, 550, 600, 650, 700, 750] + list(range(775, 1001, 25))
self.inverse_lat = True
# Data
self.Dataset = None
self.basemap = None
self._lat_array = None
self._lon_array = None
@property
def lat(self):
if self._lat_array is not None:
return self._lat_array
try:
lat = self.Dataset.variables['lat'][:]
if len(lat.shape) > 2:
self._lat_array = lat[0, ...].values
return self._lat_array
else:
self._lat_array = lat.values
return self._lat_array
except AttributeError:
raise AttributeError('Call to lat method is only valid after data are opened.')
except KeyError:
return
@property
def lon(self):
if self._lon_array is not None:
return self._lon_array
try:
lon = self.Dataset.variables['lon'][:]
if len(lon.shape) > 2:
self._lon_array = lon[0, ...].values
return self._lon_array
else:
self._lon_array = lon.values
return self._lon_array
except AttributeError:
raise AttributeError('Call to lon method is only valid after data are opened.')
except KeyError:
return
def set_variables(self, variables):
"""
Set the variables to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method.
:param variables: list of string variable names
:return:
"""
for v in variables:
try:
assert str(v) in list(variable_names.keys())
except TypeError:
raise TypeError('variables must be convertible to string types')
except AssertionError:
raise ValueError('variables must be within the available levels for the dataset (%s)' %
list(variable_names.keys()))
self.dataset_variables = sorted(list(variables))
def set_levels(self, levels):
"""
Set the levels to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method.
:param levels: list of integer pressure height levels (mb / hPa)
:return:
"""
for l in levels:
try:
assert int(l) in self.level_coord
except TypeError:
raise ValueError('levels must be integers in hPa')
except AssertionError:
raise ValueError('levels must be within the available levels for the dataset (%s)' % self.level_coord)
self.dataset_levels = sorted(list(levels))
def closest_lat_lon(self, lat, lon):
"""
Find the grid-point index of the closest point to the specified latitude and longitude values in loaded
CFS reanalysis data.
:param lat: float or int: latitude in degrees
:param lon: float or int: longitude in degrees
:return:
"""
if lon < 0.:
lon += 360.
distance = (self.lat - lat) ** 2 + (self.lon - lon) ** 2
min_dist = 2.5
if np.min(distance) > min_dist:
raise ValueError('no latitude/longitude points close to requested lat/lon!')
return np.unravel_index(np.argmin(distance, axis=None), distance.shape)
def _set_file_names(self):
# Sets a list of file names.
for variable in self.dataset_variables:
for level in self.dataset_levels:
self.raw_files.append('%s/%s%s_%s.nc' % (self._root_directory, self._file_id, variable, level))
def retrieve(self, variables, levels, years='all', months='all', days='all', hourly=3, n_proc=4, verbose=False,
request_kwargs=None, delete_temporary=False):
"""
Retrieve netCDF files of ERA5 reanalysis data. Must specify the variables and pressure levels desired.
Iterates over variable/level pairs for each API request. Note that with 3-hourly data, one variable/level pair
can be retrieved with a single API request for all dates between 1979-2018. If more dates or higher hourly
resolution is required, it is currently up to the user to perform separate retrieval requests. DO NOT use the
same retrieve function in the same instance of a class to request more dates as this will overwrite
previously downloaded files. Instead, create a new instance of ERA5Reanalysis, give a different file_id, and
then manually concatenate the datasets loaded on each instance.
:param variables: iterable of str: variables to retrieve, one at a time
:param levels: iterable of int: pressure levels to retrieve, one at a time
:param years: iterable: years of data. If 'all', use 1979-2018.
:param months: iterable: months of data. If 'all', get all months.
:param days: iterable: month days of data. If 'all', get all days.
:param hourly: int: hourly time resolution; e.g., 6 for data every 6 hours.
:param n_proc: int: number of processes for parallel retrieval
:param verbose: bool: if True, print progress statements. The API already lists progress statements.
:param request_kwargs: dict: other keywords passed to the retrieval. For example, 'grid' can be used to modify
the lat/lon resolution.
:param delete_temporary: bool: if True, delete the temporary files from the server in favor of the edited
files with correct dimensions. May be risky to delete the raw files.
"""
# Parameter checks
request_kwargs = {} or request_kwargs
self.set_variables(variables)
self.set_levels(levels)
if delete_temporary:
self._delete_temp = True
if years == 'all':
years = list(range(data_start_date.year, data_end_date.year + 1))
else:
for y in years:
try:
assert data_start_date.year <= int(y) <= data_end_date.year
except TypeError:
raise ValueError('years must be integers')
except AssertionError:
raise ValueError('years must be within the available dates for ERA5 (%d-%d)' %
(data_start_date.year, data_end_date.year))
years = [str(y) for y in years]
if months == 'all':
months = list(range(1, 13))
else:
for m in months:
try:
assert 1 <= int(m) <= 12
except TypeError:
raise ValueError('months must be integers')
except AssertionError:
raise ValueError('months must be integers from 1 to 12')
months = ['%02d' % m for m in months]
if days == 'all':
days = list(range(1, 32))
else:
for d in days:
try:
assert 1 <= int(d) <= 31
except TypeError:
raise ValueError('days must be integers')
except AssertionError:
raise ValueError('days must be integers from 1 to 31')
days = ['%02d' % d for d in days]
if hourly < 1 or hourly > 24:
raise ValueError('hourly interval must be between 1 and 24')
hour_daterange = pd.date_range('2000-01-01 00:00', '2000-01-01 23:00', freq='%dh' % hourly)
hours = [d.strftime('%H:%M') for d in hour_daterange]
if len(variables) == 0:
print('ERA5Reanalysis.retrieve: no variables specified; will do nothing.')
return
if len(levels) == 0:
print('ERA5Reanalysis.retrieve: no pressure levels specified; will do nothing.')
return
if int(n_proc) < 0:
raise ValueError("'multiprocess' must be an integer >= 0")
# Create the requests
requests = []
self._set_file_names()
for variable in variables:
for level in levels:
request = {
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': variable,
'pressure_level': level,
'year': years,
'month': months,
'day': days,
'time': hours
}
request.update(request_kwargs)
requests.append(request)
# Create a multi-processing tool, if necessary
if n_proc == 0 or n_proc > 1:
try:
import multiprocessing
if n_proc == 0:
n_proc = multiprocessing.cpu_count()
except ImportError:
warnings.warn("'multiprocessing' module not available; falling back to serial")
n_proc = 1
if n_proc == 1:
for file, request in zip(self.raw_files, requests):
call_fetch((self, request, file, verbose))
else:
pool = multiprocessing.Pool(processes=n_proc)
pool.map(call_fetch, zip(it.repeat(self), requests, self.raw_files, it.repeat(verbose)))
pool.close()
pool.terminate()
pool.join()
def _fetch(self, request, file_name, verbose):
# Fetch the file
c = cdsapi.Client()
if verbose:
print('ERA5Reanalysis.retrieve: fetching %s at %s mb' % (request['variable'], request['pressure_level']))
c.retrieve('reanalysis-era5-pressure-levels', request, file_name + '.tmp')
# Add a level dimension to the file (not present by default
if verbose:
print('Adding level dimension')
self._process_temp_file(file_name, float(request['pressure_level']))
def _process_temp_file(self, file_name, level):
ds = xr.open_dataset(file_name + '.tmp')
ds = ds.expand_dims('level', axis=1).assign_coords(level=np.array([level], dtype=np.float32))
ds.to_netcdf(file_name)
if self._delete_temp:
os.remove(file_name + '.tmp')
def open(self, **dataset_kwargs):
"""
Open an xarray multi-file Dataset for the processed files. Set the variables and levels with the instance
set_variables and set_levels methods. Once opened, this Dataset is accessible by self.Dataset.
:param dataset_kwargs: kwargs passed to xarray.open_mfdataset()
"""
if len(self.dataset_variables) == 0:
raise ValueError('set the variables to open with the set_variables() method')
if len(self.dataset_levels) == 0:
raise ValueError('set the pressure levels to open with the set_levels() method')
self._set_file_names()
self.Dataset = xr.open_mfdataset(self.raw_files, **dataset_kwargs)
self.dataset_dates = self.Dataset['time']
def close(self):
"""
Close an opened Dataset on self.
"""
if self.Dataset is not None:
self.Dataset.close()
self.Dataset = None
self.dataset_dates = None
self._lon_array = None
self._lat_array = None
else:
raise ValueError('no Dataset to close')
def generate_basemap(self, llcrnrlat=None, llcrnrlon=None, urcrnrlat=None, urcrnrlon=None):
"""
Generates a Basemap object for graphical plot of ERA5 data on a 2-D plane. Bounding box parameters
are either given, or if None, read from the extremes of the loaded lat/lon data. Other projection parameters
are set to the default ERA5 configuration.
:param llcrnrlat: float: lower left corner latitude
:param llcrnrlon: float: lower left corner longitude
:param urcrnrlat: float: upper right corner latitude
:param urcrnrlon: float: upper right corner longitude
:return:
"""
from mpl_toolkits.basemap import Basemap
try:
default = llcrnrlat * llcrnrlon * urcrnrlat * urcrnrlon # error if any are None
default = False
except TypeError:
default = True
if default:
try:
lat = self.lat
lon = self.lon
except (AttributeError, KeyError):
raise ValueError('I can generate a default Basemap with None parameters, but only if I have some '
'data loaded first!')
llcrnrlon, llcrnrlat = lon[0, 0], lat[-1, -1]
urcrnrlon, urcrnrlat = lon[-1, -1], lat[0, 0]
basemap = Basemap(projection='cyl', llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon, resolution='l')
self.basemap = basemap
| en | 0.630228 | # # Copyright (c) 2019 <NAME> <<EMAIL>> # # See the file LICENSE for your rights. # Utilities for retrieving and processing ERA5 reanalysis data using XArray. # ==================================================================================================================== # # Universal parameters and functions # ==================================================================================================================== # # For some reason, multiprocessing.Pool.map is placing arguments passed to the function inside another length-1 tuple. # Much clearer programming would have required arguments of obj, m, month, *args here so that the user knows to include # the ERA5 object and other arguments correctly. # Format strings for files to write # Start and end dates of available data # netCDF fill value # Dictionary mapping request variables to netCDF variable naming conventions # ==================================================================================================================== # # ERA5Reanalysis object class # ==================================================================================================================== # Class for manipulating ERA5 Reanalysis data with xarray. Class methods include functions to download, process, and export data. Initialize an instance of the ERA5Reanalysis class. :param root_directory: str: local directory where raw files are stored. If None, defaults to ~/.era5 :param file_id: str: prepended to the processed file names. Useful if files for the same dates will be created with different parameters, i.e., hours or variables or levels. # Data Set the variables to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method. :param variables: list of string variable names :return: Set the levels to retrieve or open in the dataset. Overridden by arguments to the 'retrieve' method. :param levels: list of integer pressure height levels (mb / hPa) :return: Find the grid-point index of the closest point to the specified latitude and longitude values in loaded CFS reanalysis data. :param lat: float or int: latitude in degrees :param lon: float or int: longitude in degrees :return: # Sets a list of file names. Retrieve netCDF files of ERA5 reanalysis data. Must specify the variables and pressure levels desired. Iterates over variable/level pairs for each API request. Note that with 3-hourly data, one variable/level pair can be retrieved with a single API request for all dates between 1979-2018. If more dates or higher hourly resolution is required, it is currently up to the user to perform separate retrieval requests. DO NOT use the same retrieve function in the same instance of a class to request more dates as this will overwrite previously downloaded files. Instead, create a new instance of ERA5Reanalysis, give a different file_id, and then manually concatenate the datasets loaded on each instance. :param variables: iterable of str: variables to retrieve, one at a time :param levels: iterable of int: pressure levels to retrieve, one at a time :param years: iterable: years of data. If 'all', use 1979-2018. :param months: iterable: months of data. If 'all', get all months. :param days: iterable: month days of data. If 'all', get all days. :param hourly: int: hourly time resolution; e.g., 6 for data every 6 hours. :param n_proc: int: number of processes for parallel retrieval :param verbose: bool: if True, print progress statements. The API already lists progress statements. :param request_kwargs: dict: other keywords passed to the retrieval. For example, 'grid' can be used to modify the lat/lon resolution. :param delete_temporary: bool: if True, delete the temporary files from the server in favor of the edited files with correct dimensions. May be risky to delete the raw files. # Parameter checks # Create the requests # Create a multi-processing tool, if necessary # Fetch the file # Add a level dimension to the file (not present by default Open an xarray multi-file Dataset for the processed files. Set the variables and levels with the instance set_variables and set_levels methods. Once opened, this Dataset is accessible by self.Dataset. :param dataset_kwargs: kwargs passed to xarray.open_mfdataset() Close an opened Dataset on self. Generates a Basemap object for graphical plot of ERA5 data on a 2-D plane. Bounding box parameters are either given, or if None, read from the extremes of the loaded lat/lon data. Other projection parameters are set to the default ERA5 configuration. :param llcrnrlat: float: lower left corner latitude :param llcrnrlon: float: lower left corner longitude :param urcrnrlat: float: upper right corner latitude :param urcrnrlon: float: upper right corner longitude :return: # error if any are None | 2.543387 | 3 |
Curso de Cisco/Actividades/Act 12 - Modulo 3.py | tomasfriz/Curso-de-Cisco | 0 | 6612470 | <filename>Curso de Cisco/Actividades/Act 12 - Modulo 3.py
listaSombrero = [1, 2, 3, 4, 5] # Esta es una lista existente de números ocultos en el sombrero.
# Paso 1: escribe una línea de código que solicite al usuario
# para reemplazar el número de en medio con un número entero ingresado por el usuario.
listaSombrero[2] = int(input("Ingrese un numero entero: "))
# Paso 2: escribe aquí una línea de código que elimine el último elemento de la lista.
del listaSombrero[-1]
# Paso 3: escribe aquí una línea de código que imprima la longitud de la lista existente.
print("Longitud de la nueva lista:", len(listaSombrero))
print(listaSombrero) | <filename>Curso de Cisco/Actividades/Act 12 - Modulo 3.py
listaSombrero = [1, 2, 3, 4, 5] # Esta es una lista existente de números ocultos en el sombrero.
# Paso 1: escribe una línea de código que solicite al usuario
# para reemplazar el número de en medio con un número entero ingresado por el usuario.
listaSombrero[2] = int(input("Ingrese un numero entero: "))
# Paso 2: escribe aquí una línea de código que elimine el último elemento de la lista.
del listaSombrero[-1]
# Paso 3: escribe aquí una línea de código que imprima la longitud de la lista existente.
print("Longitud de la nueva lista:", len(listaSombrero))
print(listaSombrero) | es | 0.985734 | # Esta es una lista existente de números ocultos en el sombrero. # Paso 1: escribe una línea de código que solicite al usuario # para reemplazar el número de en medio con un número entero ingresado por el usuario. # Paso 2: escribe aquí una línea de código que elimine el último elemento de la lista. # Paso 3: escribe aquí una línea de código que imprima la longitud de la lista existente. | 3.895515 | 4 |
openapi/service/env.py | ProjectJinBao/nirvana7 | 0 | 6612471 | # -*- coding: utf-8 -*-
from openapi.db.models.gitfile import GitFile
from openapi.db.models.env import Env
from logbook import Logger
from openapi.db.models.variable import Variable
from openapi.utils.exception_handle import IsExist, IsNotExist, DefalutError
log = Logger('service/env')
def create_env(project_id, file_path, ref, body, user):
file_id = GitFile.get_obj_pk_by_project_id_and_file_path(project_id, file_path)
env = body.get('env')
description = body.get('description')
env_id = Env.create(file_id, env, description, user)
Variable.update_variable(env_id, {}, {}, user)
def env_list(project_id, file_path, ref, user):
file_id = GitFile.get_obj_pk_by_project_id_and_file_path(project_id, file_path)
env_list = Env.list(file_id)
return env_list
def delete_env_by_id(env_id, user):
Env.delete_env_by_id(env_id, user)
# todo 删除环境的时候,把这个环境下的变量也删除
def all_env_list(project_id):
gitfile_id_list = GitFile.get_file_list_by_project_id(project_id)
for git_file in gitfile_id_list:
data = Env.list(git_file.get('id'))
git_file['env'] = data
return gitfile_id_list | # -*- coding: utf-8 -*-
from openapi.db.models.gitfile import GitFile
from openapi.db.models.env import Env
from logbook import Logger
from openapi.db.models.variable import Variable
from openapi.utils.exception_handle import IsExist, IsNotExist, DefalutError
log = Logger('service/env')
def create_env(project_id, file_path, ref, body, user):
file_id = GitFile.get_obj_pk_by_project_id_and_file_path(project_id, file_path)
env = body.get('env')
description = body.get('description')
env_id = Env.create(file_id, env, description, user)
Variable.update_variable(env_id, {}, {}, user)
def env_list(project_id, file_path, ref, user):
file_id = GitFile.get_obj_pk_by_project_id_and_file_path(project_id, file_path)
env_list = Env.list(file_id)
return env_list
def delete_env_by_id(env_id, user):
Env.delete_env_by_id(env_id, user)
# todo 删除环境的时候,把这个环境下的变量也删除
def all_env_list(project_id):
gitfile_id_list = GitFile.get_file_list_by_project_id(project_id)
for git_file in gitfile_id_list:
data = Env.list(git_file.get('id'))
git_file['env'] = data
return gitfile_id_list | zh | 0.83139 | # -*- coding: utf-8 -*- # todo 删除环境的时候,把这个环境下的变量也删除 | 2.095734 | 2 |
BOJ2810.py | INYEONGKIM/BOJ | 2 | 6612472 | <reponame>INYEONGKIM/BOJ
l=int(input());s=input();c=0
for i in s:
if i=='S': c+=1
x=c+(l-c)//2+1;print((x>l) and l or x)
| l=int(input());s=input();c=0
for i in s:
if i=='S': c+=1
x=c+(l-c)//2+1;print((x>l) and l or x) | none | 1 | 3.294765 | 3 | |
portals/wwits/groups/service/wo_part_line_update/models.py | jalanb/portals | 0 | 6612473 | from dataclasses import dataclass
@dataclass
class ParmModel:
UserID: str
Version: str
Env: str
Source: str
Session: int
Ordernum: str
Entity: str
EntityRecid: int
EntityCount: int
Mode: str
LineNum: int
PartNum: str
PartDesc: str
QtyOrd: int
PartRetCd: int
NonRetCd: str
RetWaybill: str
RetCarrier: str
RetDate: str
CustFields: str
SerialInstalled: str
SerialDeinstalled: str
PartNumDeinstalled: str
FromWhse: str
Location: str
ShipCarrier: str
ShipWaybill: str
ShipDT: str
ChgWhse: bool
RC: int
ResultMsg: str
@dataclass
class WOPartLineUpdateModel:
Parms: ParmModel
| from dataclasses import dataclass
@dataclass
class ParmModel:
UserID: str
Version: str
Env: str
Source: str
Session: int
Ordernum: str
Entity: str
EntityRecid: int
EntityCount: int
Mode: str
LineNum: int
PartNum: str
PartDesc: str
QtyOrd: int
PartRetCd: int
NonRetCd: str
RetWaybill: str
RetCarrier: str
RetDate: str
CustFields: str
SerialInstalled: str
SerialDeinstalled: str
PartNumDeinstalled: str
FromWhse: str
Location: str
ShipCarrier: str
ShipWaybill: str
ShipDT: str
ChgWhse: bool
RC: int
ResultMsg: str
@dataclass
class WOPartLineUpdateModel:
Parms: ParmModel
| none | 1 | 2.16314 | 2 | |
onlinecourse/migrations/0002_auto_20210629_1917.py | FredericBruening/final-cloud-app-with-database | 0 | 6612474 | <gh_stars>0
# Generated by Django 3.1.3 on 2021-06-29 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='choice',
old_name='content',
new_name='choice_text',
),
migrations.RenameField(
model_name='question',
old_name='content',
new_name='question_text',
),
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(default=False),
),
]
| # Generated by Django 3.1.3 on 2021-06-29 19:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='choice',
old_name='content',
new_name='choice_text',
),
migrations.RenameField(
model_name='question',
old_name='content',
new_name='question_text',
),
migrations.AlterField(
model_name='choice',
name='is_correct',
field=models.BooleanField(default=False),
),
] | en | 0.803794 | # Generated by Django 3.1.3 on 2021-06-29 19:17 | 1.862786 | 2 |
tests/features/steps/source/version.py | brittancreek/walker-1.0 | 0 | 6612475 | <reponame>brittancreek/walker-1.0
Major = 0
Minor = 0
Patch = 1
| Major = 0
Minor = 0
Patch = 1 | none | 1 | 1.079314 | 1 | |
nthuoj/settings.py | henryyang42/NTHUOJ_web | 0 | 6612476 | <reponame>henryyang42/NTHUOJ_web<filename>nthuoj/settings.py
"""
Django settings for nthuoj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# -*- encoding=UTF-8 -*-
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from utils.config_info import get_config
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."),)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'utils',
'problem',
'index',
'contest',
'users',
'team',
'group',
'status',
'axes',
'bootstrapform',
'djangobower',
'datetimewidget',
'ckeditor',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'utils.render_helper.CustomHttpExceptionMiddleware',
'axes.middleware.FailedLoginMiddleware',
)
ROOT_URLCONF = 'nthuoj.urls'
WSGI_APPLICATION = 'nthuoj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
CONFIG_PATH = os.path.join(BASE_DIR, 'nthuoj/config/nthuoj.cfg')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': CONFIG_PATH,
},
}
}
# Custom User auth
AUTH_USER_MODEL = 'users.User'
# where @login_required will redirect to
LOGIN_URL = '/users/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# django-axes 1.3.8 configurations
# https://pypi.python.org/pypi/django-axes/
# redirect to broken page when exceed wrong-try limits
AXES_LOCKOUT_URL = '/users/block_wrong_tries'
# freeze login access for that ip for 0.1*60 = 6 minites
AXES_COOLOFF_TIME = 0.1
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_config('email', 'user')
EMAIL_HOST_PASSWORD = get_config('email', 'password')
EMAIL_PORT = 587
# django-ckeditor configurations
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
},
}
# django-bower settings
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components')
BOWER_INSTALLED_APPS = (
'Chart.js',
'jquery',
'jquery-ui#1.9.2',
'https://github.com/thomaspark/bootswatch.git', # bootswatch
'https://github.com/dimsemenov/Magnific-Popup.git', # Magnific-Popup
'https://github.com/codemirror/CodeMirror.git', # CodeMirror
# bootstrap fileinput
'http://gregpike.net/demos/bootstrap-file-input/bootstrap.file-input.js',
'https://github.com/lou/multi-select.git', # multiselect
'https://github.com/riklomas/quicksearch.git', # quicksearch
# jquery url plugin
'https://gantry.googlecode.com/svn/trunk/root/js/jquery.url.min.js',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
# maximum of public users for a single contest
MAX_PUBLIC_USER = 200
# public user username prefix
PUBLIC_USER_PREFIX = "TEAM"
PUBLIC_USER_DEFAULT_PASSWORD = "<PASSWORD>"
| """
Django settings for nthuoj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# -*- encoding=UTF-8 -*-
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from utils.config_info import get_config
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."),)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'utils',
'problem',
'index',
'contest',
'users',
'team',
'group',
'status',
'axes',
'bootstrapform',
'djangobower',
'datetimewidget',
'ckeditor',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'utils.render_helper.CustomHttpExceptionMiddleware',
'axes.middleware.FailedLoginMiddleware',
)
ROOT_URLCONF = 'nthuoj.urls'
WSGI_APPLICATION = 'nthuoj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
CONFIG_PATH = os.path.join(BASE_DIR, 'nthuoj/config/nthuoj.cfg')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': CONFIG_PATH,
},
}
}
# Custom User auth
AUTH_USER_MODEL = 'users.User'
# where @login_required will redirect to
LOGIN_URL = '/users/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# django-axes 1.3.8 configurations
# https://pypi.python.org/pypi/django-axes/
# redirect to broken page when exceed wrong-try limits
AXES_LOCKOUT_URL = '/users/block_wrong_tries'
# freeze login access for that ip for 0.1*60 = 6 minites
AXES_COOLOFF_TIME = 0.1
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_config('email', 'user')
EMAIL_HOST_PASSWORD = get_config('email', 'password')
EMAIL_PORT = 587
# django-ckeditor configurations
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
},
}
# django-bower settings
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components')
BOWER_INSTALLED_APPS = (
'Chart.js',
'jquery',
'jquery-ui#1.9.2',
'https://github.com/thomaspark/bootswatch.git', # bootswatch
'https://github.com/dimsemenov/Magnific-Popup.git', # Magnific-Popup
'https://github.com/codemirror/CodeMirror.git', # CodeMirror
# bootstrap fileinput
'http://gregpike.net/demos/bootstrap-file-input/bootstrap.file-input.js',
'https://github.com/lou/multi-select.git', # multiselect
'https://github.com/riklomas/quicksearch.git', # quicksearch
# jquery url plugin
'https://gantry.googlecode.com/svn/trunk/root/js/jquery.url.min.js',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
# maximum of public users for a single contest
MAX_PUBLIC_USER = 200
# public user username prefix
PUBLIC_USER_PREFIX = "TEAM"
PUBLIC_USER_DEFAULT_PASSWORD = "<PASSWORD>" | en | 0.610876 | Django settings for nthuoj project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ # -*- encoding=UTF-8 -*- # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases # Custom User auth # where @login_required will redirect to # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ # django-axes 1.3.8 configurations # https://pypi.python.org/pypi/django-axes/ # redirect to broken page when exceed wrong-try limits # freeze login access for that ip for 0.1*60 = 6 minites # django-ckeditor configurations # django-bower settings #1.9.2', # bootswatch # Magnific-Popup # CodeMirror # bootstrap fileinput # multiselect # quicksearch # jquery url plugin # maximum of public users for a single contest # public user username prefix | 1.921028 | 2 |
dumpy/domain.py | lukeolson/dumpy | 0 | 6612477 | <filename>dumpy/domain.py<gh_stars>0
import numpy as np
from scipy.optimize import bisect
class mesh:
"""
attributes
---
X, Y : array
n x 1 list of x, y cooridnates of a regular grid that covers this domain
I : array
array of points interior to the domain (for which a discretization is sought
xybdy : array
list of points on the boundary. xybdy is not in X, Y
IN, IS, IE, IW : array
subset of [0,len(I)] on the boundaries
dN, dS, dE, dW : array
(signed) distance to the boundary from each IN, IS, IE, IW points
functions
---
set_boundary : function
a function of x, y that describes f(x,y) = 0, the boundary curve
f(x, y) < 0 is inside the curve
f(x, y) > 0 is outside the curve
"""
nx = 1
ny = 1
hx = 0.0
hy = 0.0
X = None
Y = None
boundary_set = False
def __init__(self, name, extent, nx, ny):
self.set_boundary(name)
self.set_mesh(extent, nx, ny)
def _boundary(self):
"""
a blank boundary function
"""
pass
def set_boundary(self, name):
"""
a function of x, y that describes f(x,y) = 0, the boundary curve
f(x, y) < 0 is inside the curve
f(x, y) > 0 is outside the curve
"""
if name == 'circle':
self._boundary = lambda x, y: x**2 + y**2 - 1.0
self.boundary_set = True
if name == 'square':
self._boundary = lambda x, y: np.maximum(np.abs(x), np.abs(y)) + 0.0*x - 1.0
self.boundary_set = True
elif callable(name):
self._boundary = name
self.boundary_set = True
def set_mesh(self, extent, nx, ny):
"""
sets a mesh that should overlap the boundary
extent: array
array of xmin, xmax, ymin, ymax for the mesh
nx, ny: int
mesh sizes
"""
tol = 1e-14
if not self.boundary_set:
raise Error('need to set_boundary description first')
xmin, xmax, ymin, ymax = extent
hx = (xmax - xmin) / (nx - 1)
hy = (ymax - ymin) / (ny - 1)
X, Y = np.meshgrid(np.linspace(xmin, xmax, nx),
np.linspace(ymin, ymax, ny))
# keep 2D indexing
I2D = np.where(self._boundary(X, Y) < -tol)
I = np.ravel_multi_index(I2D, (nx,ny))
n = len(I)
indexmap = -np.ones(X.shape, dtype=int)
indexmap[I2D] = np.arange(n, dtype=int)
IN = np.zeros((n,), dtype=bool)
IS = np.zeros((n,), dtype=bool)
IE = np.zeros((n,), dtype=bool)
IW = np.zeros((n,), dtype=bool)
dN = hy * np.ones((n,))
dS = hy * np.ones((n,))
dE = hx * np.ones((n,))
dW = hx * np.ones((n,))
X = X.ravel()
Y = Y.ravel()
for i in range(len(I)):
x, y = X[I[i]], Y[I[i]]
boundaryx = lambda xx: self._boundary(xx, y)
boundaryy = lambda yy: self._boundary(x, yy)
if self._boundary(x, y + hy) > -tol:
IN[i] = True
dN[i] = bisect(boundaryy, y, y + 2*hy) - y
if self._boundary(x, y - hy) > -tol:
IS[i] = True
dS[i] = bisect(boundaryy, y, y - 2*hy) - y
if self._boundary(x + hx, y) > -tol:
IE[i] = True
dE[i] = bisect(boundaryx, x, x + 2*hx) - x
if self._boundary(x - hx, y) > -tol:
IW[i] = True
dW[i] = bisect(boundaryx, x, x - 2*hx) - x
try:
assert(len(np.where(IN)) == len(np.where(dN < hy)))
except AssertionError:
print('Problem finding distances to the boundary')
raise
att = {'X': X, 'Y': Y,
'nx': nx, 'ny': ny, 'hx': hx, 'hy': hy,
'I': I, 'I2D': I2D,
'IN': IN, 'IS': IS, 'IE': IE, 'IW': IW,
'dN': dN, 'dS': dS, 'dE': dE, 'dW': dW,
'indexmap': indexmap,
}
for k in att:
setattr(self, k, att[k])
if __name__ == '__main__':
nx=18
ny=18
run1 = mesh(name='circle', extent=[-2,2,-2,2], nx=nx, ny=ny)
I = run1.I
IN = run1.IN
IS = run1.IS
IE = run1.IE
IW = run1.IW
import disc
A = disc.shortlyweller(run1)
u = 1 - run1.X[I]**2 - run1.Y[I]**2
f = 4*np.ones(run1.X[I].shape)
import scipy.sparse.linalg as spla
uh = spla.spsolve(A, f)
import matplotlib.pyplot as plt
plt.figure()
uhgrid = np.zeros(run1.X.shape) * np.nan
uhgrid[run1.I] = uh
plt.pcolormesh(run1.X.reshape((nx,ny)), run1.Y.reshape((nx,ny)), uhgrid.reshape((nx,ny)))
plt.figure()
plt.plot(run1.X, run1.Y, 'o', clip_on=False);
plt.plot(run1.X[I], run1.Y[I],
'r*', clip_on=False, ms=10, label='interior')
plt.plot(run1.X[I[IN]], run1.Y[I[IN]],
'mo', clip_on=False, ms=15, label='north',
mfc='None', mew=2, mec='m')
plt.plot(run1.X[I[IS]], run1.Y[I[IS]],
'yo', clip_on=False, ms=15, label='south',
mfc='None', mew=2, mec='y')
plt.plot(run1.X[I[IE]], run1.Y[I[IE]],
'gs', clip_on=False, ms=10, label='east',
mfc='None', mew=2, mec='g')
plt.plot(run1.X[I[IW]], run1.Y[I[IW]],
'cs', clip_on=False, ms=10, label='west',
mfc='None', mew=2, mec='c')
plt.contour(run1.X.reshape((nx,ny)),
run1.Y.reshape((nx,ny)),
run1._boundary(run1.X, run1.Y).reshape((nx,ny)),
levels=[0])
plt.plot(run1.X[I[IN]],
run1.Y[I[IN]] + run1.dN[IN], 'k+', ms=10)
plt.plot(run1.X[I[IS]],
run1.Y[I[IS]] + run1.dS[IS], 'k+', ms=10)
plt.plot(run1.X[I[IE]] + run1.dE[IE],
run1.Y[I[IE]], 'k+', ms=10)
plt.plot(run1.X[I[IW]] + run1.dW[IW],
run1.Y[I[IW]], 'k+', ms=10)
plt.legend()
plt.show()
| <filename>dumpy/domain.py<gh_stars>0
import numpy as np
from scipy.optimize import bisect
class mesh:
"""
attributes
---
X, Y : array
n x 1 list of x, y cooridnates of a regular grid that covers this domain
I : array
array of points interior to the domain (for which a discretization is sought
xybdy : array
list of points on the boundary. xybdy is not in X, Y
IN, IS, IE, IW : array
subset of [0,len(I)] on the boundaries
dN, dS, dE, dW : array
(signed) distance to the boundary from each IN, IS, IE, IW points
functions
---
set_boundary : function
a function of x, y that describes f(x,y) = 0, the boundary curve
f(x, y) < 0 is inside the curve
f(x, y) > 0 is outside the curve
"""
nx = 1
ny = 1
hx = 0.0
hy = 0.0
X = None
Y = None
boundary_set = False
def __init__(self, name, extent, nx, ny):
self.set_boundary(name)
self.set_mesh(extent, nx, ny)
def _boundary(self):
"""
a blank boundary function
"""
pass
def set_boundary(self, name):
"""
a function of x, y that describes f(x,y) = 0, the boundary curve
f(x, y) < 0 is inside the curve
f(x, y) > 0 is outside the curve
"""
if name == 'circle':
self._boundary = lambda x, y: x**2 + y**2 - 1.0
self.boundary_set = True
if name == 'square':
self._boundary = lambda x, y: np.maximum(np.abs(x), np.abs(y)) + 0.0*x - 1.0
self.boundary_set = True
elif callable(name):
self._boundary = name
self.boundary_set = True
def set_mesh(self, extent, nx, ny):
"""
sets a mesh that should overlap the boundary
extent: array
array of xmin, xmax, ymin, ymax for the mesh
nx, ny: int
mesh sizes
"""
tol = 1e-14
if not self.boundary_set:
raise Error('need to set_boundary description first')
xmin, xmax, ymin, ymax = extent
hx = (xmax - xmin) / (nx - 1)
hy = (ymax - ymin) / (ny - 1)
X, Y = np.meshgrid(np.linspace(xmin, xmax, nx),
np.linspace(ymin, ymax, ny))
# keep 2D indexing
I2D = np.where(self._boundary(X, Y) < -tol)
I = np.ravel_multi_index(I2D, (nx,ny))
n = len(I)
indexmap = -np.ones(X.shape, dtype=int)
indexmap[I2D] = np.arange(n, dtype=int)
IN = np.zeros((n,), dtype=bool)
IS = np.zeros((n,), dtype=bool)
IE = np.zeros((n,), dtype=bool)
IW = np.zeros((n,), dtype=bool)
dN = hy * np.ones((n,))
dS = hy * np.ones((n,))
dE = hx * np.ones((n,))
dW = hx * np.ones((n,))
X = X.ravel()
Y = Y.ravel()
for i in range(len(I)):
x, y = X[I[i]], Y[I[i]]
boundaryx = lambda xx: self._boundary(xx, y)
boundaryy = lambda yy: self._boundary(x, yy)
if self._boundary(x, y + hy) > -tol:
IN[i] = True
dN[i] = bisect(boundaryy, y, y + 2*hy) - y
if self._boundary(x, y - hy) > -tol:
IS[i] = True
dS[i] = bisect(boundaryy, y, y - 2*hy) - y
if self._boundary(x + hx, y) > -tol:
IE[i] = True
dE[i] = bisect(boundaryx, x, x + 2*hx) - x
if self._boundary(x - hx, y) > -tol:
IW[i] = True
dW[i] = bisect(boundaryx, x, x - 2*hx) - x
try:
assert(len(np.where(IN)) == len(np.where(dN < hy)))
except AssertionError:
print('Problem finding distances to the boundary')
raise
att = {'X': X, 'Y': Y,
'nx': nx, 'ny': ny, 'hx': hx, 'hy': hy,
'I': I, 'I2D': I2D,
'IN': IN, 'IS': IS, 'IE': IE, 'IW': IW,
'dN': dN, 'dS': dS, 'dE': dE, 'dW': dW,
'indexmap': indexmap,
}
for k in att:
setattr(self, k, att[k])
if __name__ == '__main__':
nx=18
ny=18
run1 = mesh(name='circle', extent=[-2,2,-2,2], nx=nx, ny=ny)
I = run1.I
IN = run1.IN
IS = run1.IS
IE = run1.IE
IW = run1.IW
import disc
A = disc.shortlyweller(run1)
u = 1 - run1.X[I]**2 - run1.Y[I]**2
f = 4*np.ones(run1.X[I].shape)
import scipy.sparse.linalg as spla
uh = spla.spsolve(A, f)
import matplotlib.pyplot as plt
plt.figure()
uhgrid = np.zeros(run1.X.shape) * np.nan
uhgrid[run1.I] = uh
plt.pcolormesh(run1.X.reshape((nx,ny)), run1.Y.reshape((nx,ny)), uhgrid.reshape((nx,ny)))
plt.figure()
plt.plot(run1.X, run1.Y, 'o', clip_on=False);
plt.plot(run1.X[I], run1.Y[I],
'r*', clip_on=False, ms=10, label='interior')
plt.plot(run1.X[I[IN]], run1.Y[I[IN]],
'mo', clip_on=False, ms=15, label='north',
mfc='None', mew=2, mec='m')
plt.plot(run1.X[I[IS]], run1.Y[I[IS]],
'yo', clip_on=False, ms=15, label='south',
mfc='None', mew=2, mec='y')
plt.plot(run1.X[I[IE]], run1.Y[I[IE]],
'gs', clip_on=False, ms=10, label='east',
mfc='None', mew=2, mec='g')
plt.plot(run1.X[I[IW]], run1.Y[I[IW]],
'cs', clip_on=False, ms=10, label='west',
mfc='None', mew=2, mec='c')
plt.contour(run1.X.reshape((nx,ny)),
run1.Y.reshape((nx,ny)),
run1._boundary(run1.X, run1.Y).reshape((nx,ny)),
levels=[0])
plt.plot(run1.X[I[IN]],
run1.Y[I[IN]] + run1.dN[IN], 'k+', ms=10)
plt.plot(run1.X[I[IS]],
run1.Y[I[IS]] + run1.dS[IS], 'k+', ms=10)
plt.plot(run1.X[I[IE]] + run1.dE[IE],
run1.Y[I[IE]], 'k+', ms=10)
plt.plot(run1.X[I[IW]] + run1.dW[IW],
run1.Y[I[IW]], 'k+', ms=10)
plt.legend()
plt.show()
| en | 0.860991 | attributes --- X, Y : array n x 1 list of x, y cooridnates of a regular grid that covers this domain I : array array of points interior to the domain (for which a discretization is sought xybdy : array list of points on the boundary. xybdy is not in X, Y IN, IS, IE, IW : array subset of [0,len(I)] on the boundaries dN, dS, dE, dW : array (signed) distance to the boundary from each IN, IS, IE, IW points functions --- set_boundary : function a function of x, y that describes f(x,y) = 0, the boundary curve f(x, y) < 0 is inside the curve f(x, y) > 0 is outside the curve a blank boundary function a function of x, y that describes f(x,y) = 0, the boundary curve f(x, y) < 0 is inside the curve f(x, y) > 0 is outside the curve sets a mesh that should overlap the boundary extent: array array of xmin, xmax, ymin, ymax for the mesh nx, ny: int mesh sizes # keep 2D indexing | 3.008198 | 3 |
al5d/scripts/control_al5d.py | pouyaAB/ros_teleoperate | 0 | 6612478 | #!/usr/bin/env python
import numpy as np
import rospy
import sys
sys.path.append("../../")
import configparser
import tf
import actionlib
import signal
import std_msgs.msg
import geometry_msgs.msg
import sensor_msgs.msg
from geometry_msgs.msg import Pose
from std_msgs.msg import Float32MultiArray
from controllers.psmove.psmove_controller import PSmoveController
from controllers.ps4_controller.ps4_controller import PS4Controller
#[-13.5040283203125, 69.99999999999994, 330.0, -0.10000000000000001, 0.0, 1, 0, human]
pre_pos_command = [0.0, 0.0, 0.0]
pre_orientation_command = [0.0, 0.0, 0.0, 0.0]
open_gripper = 1.0
SPEED = 20
waiting_robot_position = [-13.5040283203125, 250, 250.0]
waiting_robot_orientation = [-0.10000000000000001, 0.0]
sitting_robot_position = [-13.5040283203125, 50, 250.0]
sitting_robot_orientation = [-0.10000000000000001, 0.0]
robotCharge = True
pause = False
print 'robot in charge: ' + str(robotCharge)
curr_robot_position = [-13.5040283203125, 250, 250.0]
curr_robot_orientation = [-0.10000000000000001, 0.0]
initialized = False
ranges = [ # MIN, CENTER, MAX
[600, 1500, 2400],# BASE
[600, 1500, 2200],# SHOULDER
[600, 1250, 2200],# ELBOW
[600, 1500, 2400],# WRIST
[600, 1350, 2400],# WRIST_ROTATE
[600, 1600, 2400] # GRIPPER
]
config = configparser.ConfigParser()
config.read('../../conf.ini')
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# def robot_position_callback(msg):
# global curr_robot_position
# global curr_robot_orientation
#
# curr_robot_position = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]
# curr_robot_orientation = [msg.pose.orientation.x, msg.pose.orientation.y,
# msg.pose.orientation.z, msg.pose.orientation.w]
# def cartesian_velocity_client(linear, angular):
# """Send a cartesian goal to the action server."""
# vel_goal = geometry_msgs.msg.TwistStamped()
# vel_goal.header = std_msgs.msg.Header(frame_id=(config["arm"]["type"] + '_api_origin'))
# vel_goal.twist.linear = geometry_msgs.msg.Vector3(
# x=linear[0], y=linear[1], z=linear[2])
# vel_goal.twist.angular = geometry_msgs.msg.Vector3(
# x=angular[0], y=angular[1], z=angular[2])
#
# velocity_topic.publish(vel_goal)
def check_ranges():
curr_robot_position[0] = np.clip([curr_robot_position[0]], ranges[0][0], ranges[0][2])[0]
curr_robot_position[1] = np.clip([curr_robot_position[1]], ranges[1][0], ranges[1][2])[0]
curr_robot_position[2] = np.clip([curr_robot_position[2]], ranges[2][0], ranges[2][2])[0]
curr_robot_orientation[0] = np.clip([curr_robot_orientation[0]], ranges[3][0], ranges[3][2])[0]
curr_robot_orientation[1] = np.clip([curr_robot_orientation[1]], ranges[4][0], ranges[4][2])[0]
def reset_robot():
global curr_robot_position, curr_robot_orientation, open_gripper, robotCharge
global sitting_robot_position, sitting_robot_orientation
command_message = Float32MultiArray()
if robotCharge:
curr_robot_position = waiting_robot_position
curr_robot_orientation = waiting_robot_orientation
else:
curr_robot_position = sitting_robot_position
curr_robot_orientation = sitting_robot_orientation
# check_ranges()
open_gripper = 1
command_message.data = np.concatenate([curr_robot_position, curr_robot_orientation, [1]])
# command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0]
al5d_publisher.publish(command_message)
def command_received(msg):
global curr_robot_position, curr_robot_orientation, open_gripper, robotCharge, pause
diff_orientation = controller.get_orientation(msg)
pos = controller.get_diff_position(msg)
# cartesian_velocity_client(pos, diff_orientation)
pre_pos_command = curr_robot_position
pre_orientation_command = curr_robot_orientation
bt = controller.get_button(msg)
if bt == "gripper-toggle":
open_gripper = 1 - open_gripper
elif bt == "circle":
if not pause:
robotCharge = not robotCharge
if not pause:
reset_robot()
pause = not pause
print 'robot in charge: ' + str(robotCharge)
return
elif bt == "triangle":
robotCharge = not robotCharge
print 'robot in charge: ' + str(robotCharge)
command_message = Float32MultiArray()
curr_robot_position = np.add(curr_robot_position, np.multiply([pos[1], pos[0], pos[2]], SPEED))
curr_robot_orientation = np.add(curr_robot_orientation, np.multiply(diff_orientation[0:2], 0.1))
# check_ranges()
command_message.data = np.concatenate([curr_robot_position, curr_robot_orientation, [open_gripper]])
# command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0]
if not pause and robotCharge:
al5d_publisher.publish(command_message)
controller_type = config["controller"]["type"]
rospy.init_node(config["arm"]["type"] + '_controller')
if controller_type == "psmove":
rospy.Subscriber(config["channels"][controller_type], Float32MultiArray, command_received)
controller = PSmoveController() # default controller
if controller_type == "ps4_controller":
rospy.Subscriber(config["channels"][controller_type], sensor_msgs.msg.Joy, command_received)
controller = PS4Controller() # default controller
# rospy.Subscriber('/' + config["arm"]["type"] + '_arm_driver/out/tool_position',
# geometry_msgs.msg.PoseStamped, robot_position_callback)
al5d_publisher = rospy.Publisher('/move_info',
Float32MultiArray, queue_size=100)
def main():
rospy.spin()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import numpy as np
import rospy
import sys
sys.path.append("../../")
import configparser
import tf
import actionlib
import signal
import std_msgs.msg
import geometry_msgs.msg
import sensor_msgs.msg
from geometry_msgs.msg import Pose
from std_msgs.msg import Float32MultiArray
from controllers.psmove.psmove_controller import PSmoveController
from controllers.ps4_controller.ps4_controller import PS4Controller
#[-13.5040283203125, 69.99999999999994, 330.0, -0.10000000000000001, 0.0, 1, 0, human]
pre_pos_command = [0.0, 0.0, 0.0]
pre_orientation_command = [0.0, 0.0, 0.0, 0.0]
open_gripper = 1.0
SPEED = 20
waiting_robot_position = [-13.5040283203125, 250, 250.0]
waiting_robot_orientation = [-0.10000000000000001, 0.0]
sitting_robot_position = [-13.5040283203125, 50, 250.0]
sitting_robot_orientation = [-0.10000000000000001, 0.0]
robotCharge = True
pause = False
print 'robot in charge: ' + str(robotCharge)
curr_robot_position = [-13.5040283203125, 250, 250.0]
curr_robot_orientation = [-0.10000000000000001, 0.0]
initialized = False
ranges = [ # MIN, CENTER, MAX
[600, 1500, 2400],# BASE
[600, 1500, 2200],# SHOULDER
[600, 1250, 2200],# ELBOW
[600, 1500, 2400],# WRIST
[600, 1350, 2400],# WRIST_ROTATE
[600, 1600, 2400] # GRIPPER
]
config = configparser.ConfigParser()
config.read('../../conf.ini')
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# def robot_position_callback(msg):
# global curr_robot_position
# global curr_robot_orientation
#
# curr_robot_position = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]
# curr_robot_orientation = [msg.pose.orientation.x, msg.pose.orientation.y,
# msg.pose.orientation.z, msg.pose.orientation.w]
# def cartesian_velocity_client(linear, angular):
# """Send a cartesian goal to the action server."""
# vel_goal = geometry_msgs.msg.TwistStamped()
# vel_goal.header = std_msgs.msg.Header(frame_id=(config["arm"]["type"] + '_api_origin'))
# vel_goal.twist.linear = geometry_msgs.msg.Vector3(
# x=linear[0], y=linear[1], z=linear[2])
# vel_goal.twist.angular = geometry_msgs.msg.Vector3(
# x=angular[0], y=angular[1], z=angular[2])
#
# velocity_topic.publish(vel_goal)
def check_ranges():
curr_robot_position[0] = np.clip([curr_robot_position[0]], ranges[0][0], ranges[0][2])[0]
curr_robot_position[1] = np.clip([curr_robot_position[1]], ranges[1][0], ranges[1][2])[0]
curr_robot_position[2] = np.clip([curr_robot_position[2]], ranges[2][0], ranges[2][2])[0]
curr_robot_orientation[0] = np.clip([curr_robot_orientation[0]], ranges[3][0], ranges[3][2])[0]
curr_robot_orientation[1] = np.clip([curr_robot_orientation[1]], ranges[4][0], ranges[4][2])[0]
def reset_robot():
global curr_robot_position, curr_robot_orientation, open_gripper, robotCharge
global sitting_robot_position, sitting_robot_orientation
command_message = Float32MultiArray()
if robotCharge:
curr_robot_position = waiting_robot_position
curr_robot_orientation = waiting_robot_orientation
else:
curr_robot_position = sitting_robot_position
curr_robot_orientation = sitting_robot_orientation
# check_ranges()
open_gripper = 1
command_message.data = np.concatenate([curr_robot_position, curr_robot_orientation, [1]])
# command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0]
al5d_publisher.publish(command_message)
def command_received(msg):
global curr_robot_position, curr_robot_orientation, open_gripper, robotCharge, pause
diff_orientation = controller.get_orientation(msg)
pos = controller.get_diff_position(msg)
# cartesian_velocity_client(pos, diff_orientation)
pre_pos_command = curr_robot_position
pre_orientation_command = curr_robot_orientation
bt = controller.get_button(msg)
if bt == "gripper-toggle":
open_gripper = 1 - open_gripper
elif bt == "circle":
if not pause:
robotCharge = not robotCharge
if not pause:
reset_robot()
pause = not pause
print 'robot in charge: ' + str(robotCharge)
return
elif bt == "triangle":
robotCharge = not robotCharge
print 'robot in charge: ' + str(robotCharge)
command_message = Float32MultiArray()
curr_robot_position = np.add(curr_robot_position, np.multiply([pos[1], pos[0], pos[2]], SPEED))
curr_robot_orientation = np.add(curr_robot_orientation, np.multiply(diff_orientation[0:2], 0.1))
# check_ranges()
command_message.data = np.concatenate([curr_robot_position, curr_robot_orientation, [open_gripper]])
# command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0]
if not pause and robotCharge:
al5d_publisher.publish(command_message)
controller_type = config["controller"]["type"]
rospy.init_node(config["arm"]["type"] + '_controller')
if controller_type == "psmove":
rospy.Subscriber(config["channels"][controller_type], Float32MultiArray, command_received)
controller = PSmoveController() # default controller
if controller_type == "ps4_controller":
rospy.Subscriber(config["channels"][controller_type], sensor_msgs.msg.Joy, command_received)
controller = PS4Controller() # default controller
# rospy.Subscriber('/' + config["arm"]["type"] + '_arm_driver/out/tool_position',
# geometry_msgs.msg.PoseStamped, robot_position_callback)
al5d_publisher = rospy.Publisher('/move_info',
Float32MultiArray, queue_size=100)
def main():
rospy.spin()
if __name__ == '__main__':
main()
| en | 0.39298 | #!/usr/bin/env python #[-13.5040283203125, 69.99999999999994, 330.0, -0.10000000000000001, 0.0, 1, 0, human] # MIN, CENTER, MAX # BASE # SHOULDER # ELBOW # WRIST # WRIST_ROTATE # GRIPPER # def robot_position_callback(msg): # global curr_robot_position # global curr_robot_orientation # # curr_robot_position = [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z] # curr_robot_orientation = [msg.pose.orientation.x, msg.pose.orientation.y, # msg.pose.orientation.z, msg.pose.orientation.w] # def cartesian_velocity_client(linear, angular): # """Send a cartesian goal to the action server.""" # vel_goal = geometry_msgs.msg.TwistStamped() # vel_goal.header = std_msgs.msg.Header(frame_id=(config["arm"]["type"] + '_api_origin')) # vel_goal.twist.linear = geometry_msgs.msg.Vector3( # x=linear[0], y=linear[1], z=linear[2]) # vel_goal.twist.angular = geometry_msgs.msg.Vector3( # x=angular[0], y=angular[1], z=angular[2]) # # velocity_topic.publish(vel_goal) # check_ranges() # command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0] # cartesian_velocity_client(pos, diff_orientation) # check_ranges() # command_message.data = [-13.5040283203125, 250, 250.0, -0.10000000000000001, 0.0, 1, 0, 0] # default controller # default controller # rospy.Subscriber('/' + config["arm"]["type"] + '_arm_driver/out/tool_position', # geometry_msgs.msg.PoseStamped, robot_position_callback) | 2.059845 | 2 |
hacker/challenges/crypto/newsgroup_cipher.py | Tenebrar/codebase | 1 | 6612479 | from hacker.ciphers import rot
from hacker.decoder import decode
value = 'Guvf zrffntr vf rapelcgrq va ebg 13. Lbhe nafjre vf svfupnxr.'
result = decode(value, lambda x: rot(x, 13))
print(result)
| from hacker.ciphers import rot
from hacker.decoder import decode
value = 'Guvf zrffntr vf rapelcgrq va ebg 13. Lbhe nafjre vf svfupnxr.'
result = decode(value, lambda x: rot(x, 13))
print(result)
| none | 1 | 2.99236 | 3 | |
26_remove_dupes/remove_dupes.py | daniel-hocking/leetcode | 0 | 6612480 | <reponame>daniel-hocking/leetcode
'''
Description: Given a sorted array nums, remove the duplicates in-place
such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this by
modifying the input array in-place with O(1) extra memory.
eg.
Input: [0,0,1,1,1,2,2,3,3,4]
Output: 5
and array = [0, 1, 2, 3, 4]
Written by: <NAME>
Date created: 26/05/2018
https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/
'''
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
original_len = len(nums)
if original_len < 2:
return original_len
pointer = 0
for i in range(1, original_len):
if nums[i] != nums[pointer]:
pointer += 1
nums[pointer] = nums[i]
return pointer + 1
'''
Too slow, times out on last test case
'''
def removeDuplicatesOld(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
original_len = current_len = len(nums)
if original_len < 2:
return original_len
pointer = 0
while pointer < (current_len - 1):
if nums[pointer] == nums[pointer + 1]:
current_len -= 1
prev_num = nums[pointer + 1]
for i in range(current_len, pointer, -1):
nums[i], prev_num = prev_num, nums[i]
else:
pointer += 1
return current_len
def test_remove_duplicates(nums):
'''
>>> test_remove_duplicates([])
(0, [])
>>> test_remove_duplicates([1, 2])
(2, [1, 2])
>>> test_remove_duplicates([1, 2, 2, 4])
(3, [1, 2, 4])
>>> test_remove_duplicates([1, 1, 1, 1, 2, 2, 4])
(3, [1, 2, 4])
>>> test_remove_duplicates([0,0,1,1,1,2,2,3,3,4])
(5, [0, 1, 2, 3, 4])
'''
sol = Solution()
num = sol.removeDuplicates(nums)
return num, nums[:num:]
if __name__ == '__main__':
import doctest
doctest.testmod()
| '''
Description: Given a sorted array nums, remove the duplicates in-place
such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this by
modifying the input array in-place with O(1) extra memory.
eg.
Input: [0,0,1,1,1,2,2,3,3,4]
Output: 5
and array = [0, 1, 2, 3, 4]
Written by: <NAME>
Date created: 26/05/2018
https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/
'''
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
original_len = len(nums)
if original_len < 2:
return original_len
pointer = 0
for i in range(1, original_len):
if nums[i] != nums[pointer]:
pointer += 1
nums[pointer] = nums[i]
return pointer + 1
'''
Too slow, times out on last test case
'''
def removeDuplicatesOld(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
original_len = current_len = len(nums)
if original_len < 2:
return original_len
pointer = 0
while pointer < (current_len - 1):
if nums[pointer] == nums[pointer + 1]:
current_len -= 1
prev_num = nums[pointer + 1]
for i in range(current_len, pointer, -1):
nums[i], prev_num = prev_num, nums[i]
else:
pointer += 1
return current_len
def test_remove_duplicates(nums):
'''
>>> test_remove_duplicates([])
(0, [])
>>> test_remove_duplicates([1, 2])
(2, [1, 2])
>>> test_remove_duplicates([1, 2, 2, 4])
(3, [1, 2, 4])
>>> test_remove_duplicates([1, 1, 1, 1, 2, 2, 4])
(3, [1, 2, 4])
>>> test_remove_duplicates([0,0,1,1,1,2,2,3,3,4])
(5, [0, 1, 2, 3, 4])
'''
sol = Solution()
num = sol.removeDuplicates(nums)
return num, nums[:num:]
if __name__ == '__main__':
import doctest
doctest.testmod() | en | 0.562595 | Description: Given a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length. Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory. eg. Input: [0,0,1,1,1,2,2,3,3,4] Output: 5 and array = [0, 1, 2, 3, 4] Written by: <NAME> Date created: 26/05/2018 https://leetcode.com/problems/remove-duplicates-from-sorted-array/description/ :type nums: List[int] :rtype: int Too slow, times out on last test case :type nums: List[int] :rtype: int >>> test_remove_duplicates([]) (0, []) >>> test_remove_duplicates([1, 2]) (2, [1, 2]) >>> test_remove_duplicates([1, 2, 2, 4]) (3, [1, 2, 4]) >>> test_remove_duplicates([1, 1, 1, 1, 2, 2, 4]) (3, [1, 2, 4]) >>> test_remove_duplicates([0,0,1,1,1,2,2,3,3,4]) (5, [0, 1, 2, 3, 4]) | 3.699042 | 4 |
seqtools/utils.py | jd-jones/seqtools | 1 | 6612481 | <gh_stars>1-10
import warnings
import collections
import logging
import numpy as np
from matplotlib import pyplot as plt
logger = logging.getLogger(__name__)
def smoothCounts(
edge_counts, state_counts, init_states, final_states, num_states=None,
init_regularizer=0, final_regularizer=0,
uniform_regularizer=0, diag_regularizer=0,
override_transitions=False, structure_only=False, as_numpy=False, as_scores=False):
if num_states is None:
num_states = max(state_counts.keys()) + 1
bigram_counts = np.zeros((num_states, num_states))
for (i, j), count in edge_counts.items():
bigram_counts[i, j] = count
unigram_counts = np.zeros(num_states)
for i, count in state_counts.items():
unigram_counts[i] = count
initial_counts = np.zeros(num_states)
for i, count in init_states.items():
initial_counts[i] = count
final_counts = np.zeros(num_states)
for i, count in final_states.items():
final_counts[i] = count
# Regularize the heck out of these counts
initial_states = initial_counts.nonzero()[0]
for i in initial_states:
bigram_counts[i, i] += init_regularizer
final_states = final_counts.nonzero()[0]
for i in final_states:
bigram_counts[i, i] += final_regularizer
bigram_counts += uniform_regularizer
diag_indices = np.diag_indices(bigram_counts.shape[0])
bigram_counts[diag_indices] += diag_regularizer
if override_transitions:
logger.info('Overriding bigram_counts with an array of all ones')
bigram_counts = np.ones_like(bigram_counts)
if structure_only:
bigram_counts = (bigram_counts > 0).astype(float)
initial_counts = (initial_counts > 0).astype(float)
final_counts = (final_counts > 0).astype(float)
denominator = bigram_counts.sum(1)
transition_probs = np.divide(
bigram_counts, denominator[:, None],
out=np.zeros_like(bigram_counts),
where=denominator[:, None] != 0
)
final_probs = np.divide(
final_counts, denominator,
out=np.zeros_like(final_counts),
where=denominator != 0
)
initial_probs = initial_counts / initial_counts.sum()
if as_numpy:
def to_numpy(x):
return x.numpy().astype(float)
return tuple(map(to_numpy, (transition_probs, initial_probs, final_probs)))
if as_scores:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='divide by zero')
transition_probs = np.log(transition_probs)
initial_probs = np.log(initial_probs)
final_probs = np.log(final_probs)
return transition_probs, initial_probs, final_probs
def countSeqs(seqs):
""" Count n-gram statistics on a collection of sequences.
Parameters
----------
seqs : iterable( iterable(Hashable) )
Returns
-------
bigram_counts : collections.defaultdict((Hashable, Hashable) -> int)
unigram_counts : collections.defaultdict(Hashable -> int)
initial_counts : collections.defaultdict(Hashable -> int)
final_counts : collections.defaultdict(Hashable -> int)
"""
bigram_counts = collections.defaultdict(int)
unigram_counts = collections.defaultdict(int)
initial_counts = collections.defaultdict(int)
final_counts = collections.defaultdict(int)
for seq in seqs:
initial_counts[seq[0]] += 1
final_counts[seq[-1]] += 1
for state in seq:
unigram_counts[state] += 1
for prev, cur in zip(seq[:-1], seq[1:]):
bigram_counts[prev, cur] += 1
return bigram_counts, unigram_counts, initial_counts, final_counts
def plot_transitions(fn, transition_probs, initial_probs, final_probs):
plt.figure()
plt.matshow(transition_probs)
plt.title('Transitions')
plt.savefig(fn)
plt.close()
| import warnings
import collections
import logging
import numpy as np
from matplotlib import pyplot as plt
logger = logging.getLogger(__name__)
def smoothCounts(
edge_counts, state_counts, init_states, final_states, num_states=None,
init_regularizer=0, final_regularizer=0,
uniform_regularizer=0, diag_regularizer=0,
override_transitions=False, structure_only=False, as_numpy=False, as_scores=False):
if num_states is None:
num_states = max(state_counts.keys()) + 1
bigram_counts = np.zeros((num_states, num_states))
for (i, j), count in edge_counts.items():
bigram_counts[i, j] = count
unigram_counts = np.zeros(num_states)
for i, count in state_counts.items():
unigram_counts[i] = count
initial_counts = np.zeros(num_states)
for i, count in init_states.items():
initial_counts[i] = count
final_counts = np.zeros(num_states)
for i, count in final_states.items():
final_counts[i] = count
# Regularize the heck out of these counts
initial_states = initial_counts.nonzero()[0]
for i in initial_states:
bigram_counts[i, i] += init_regularizer
final_states = final_counts.nonzero()[0]
for i in final_states:
bigram_counts[i, i] += final_regularizer
bigram_counts += uniform_regularizer
diag_indices = np.diag_indices(bigram_counts.shape[0])
bigram_counts[diag_indices] += diag_regularizer
if override_transitions:
logger.info('Overriding bigram_counts with an array of all ones')
bigram_counts = np.ones_like(bigram_counts)
if structure_only:
bigram_counts = (bigram_counts > 0).astype(float)
initial_counts = (initial_counts > 0).astype(float)
final_counts = (final_counts > 0).astype(float)
denominator = bigram_counts.sum(1)
transition_probs = np.divide(
bigram_counts, denominator[:, None],
out=np.zeros_like(bigram_counts),
where=denominator[:, None] != 0
)
final_probs = np.divide(
final_counts, denominator,
out=np.zeros_like(final_counts),
where=denominator != 0
)
initial_probs = initial_counts / initial_counts.sum()
if as_numpy:
def to_numpy(x):
return x.numpy().astype(float)
return tuple(map(to_numpy, (transition_probs, initial_probs, final_probs)))
if as_scores:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='divide by zero')
transition_probs = np.log(transition_probs)
initial_probs = np.log(initial_probs)
final_probs = np.log(final_probs)
return transition_probs, initial_probs, final_probs
def countSeqs(seqs):
""" Count n-gram statistics on a collection of sequences.
Parameters
----------
seqs : iterable( iterable(Hashable) )
Returns
-------
bigram_counts : collections.defaultdict((Hashable, Hashable) -> int)
unigram_counts : collections.defaultdict(Hashable -> int)
initial_counts : collections.defaultdict(Hashable -> int)
final_counts : collections.defaultdict(Hashable -> int)
"""
bigram_counts = collections.defaultdict(int)
unigram_counts = collections.defaultdict(int)
initial_counts = collections.defaultdict(int)
final_counts = collections.defaultdict(int)
for seq in seqs:
initial_counts[seq[0]] += 1
final_counts[seq[-1]] += 1
for state in seq:
unigram_counts[state] += 1
for prev, cur in zip(seq[:-1], seq[1:]):
bigram_counts[prev, cur] += 1
return bigram_counts, unigram_counts, initial_counts, final_counts
def plot_transitions(fn, transition_probs, initial_probs, final_probs):
plt.figure()
plt.matshow(transition_probs)
plt.title('Transitions')
plt.savefig(fn)
plt.close() | en | 0.354604 | # Regularize the heck out of these counts Count n-gram statistics on a collection of sequences. Parameters ---------- seqs : iterable( iterable(Hashable) ) Returns ------- bigram_counts : collections.defaultdict((Hashable, Hashable) -> int) unigram_counts : collections.defaultdict(Hashable -> int) initial_counts : collections.defaultdict(Hashable -> int) final_counts : collections.defaultdict(Hashable -> int) | 2.119016 | 2 |
tests/server/__init__.py | bodik/sner4-web | 9 | 6612482 | # This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
sner.server tests
"""
from flask import url_for
def get_csrf_token(clnt):
"""fetch index and parse csrf token"""
response = clnt.get(url_for('index_route'))
return response.lxml.xpath('//meta[@name="csrf-token"]/@content')[0]
class DummyPostData(dict):
"""used for testing edge-cases on forms processing"""
def getlist(self, key):
"""accessor; taken from wtforms testsuite"""
v = self[key] # pylint: disable=invalid-name
if not isinstance(v, (list, tuple)):
v = [v] # pylint: disable=invalid-name
return v
| # This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.
"""
sner.server tests
"""
from flask import url_for
def get_csrf_token(clnt):
"""fetch index and parse csrf token"""
response = clnt.get(url_for('index_route'))
return response.lxml.xpath('//meta[@name="csrf-token"]/@content')[0]
class DummyPostData(dict):
"""used for testing edge-cases on forms processing"""
def getlist(self, key):
"""accessor; taken from wtforms testsuite"""
v = self[key] # pylint: disable=invalid-name
if not isinstance(v, (list, tuple)):
v = [v] # pylint: disable=invalid-name
return v
| en | 0.759254 | # This file is part of sner4 project governed by MIT license, see the LICENSE.txt file. sner.server tests fetch index and parse csrf token used for testing edge-cases on forms processing accessor; taken from wtforms testsuite # pylint: disable=invalid-name # pylint: disable=invalid-name | 2.206264 | 2 |
app/models.py | kevahere/blog | 0 | 6612483 | from . import DB
| from . import DB
| none | 1 | 1.157068 | 1 | |
src/RE.py | jblowe/RE | 0 | 6612484 | <filename>src/RE.py<gh_stars>0
import itertools
import read
import regex as re
import os
import sys
import serialize
import collections
import mel
from copy import deepcopy
class Debug:
debug = False
class SyllableCanon:
def __init__(self, sound_classes, syllable_regex, supra_segmentals, context_match_type):
self.sound_classes = sound_classes
self.regex = re.compile(syllable_regex)
self.supra_segmentals = supra_segmentals
self.context_match_type = context_match_type
class Correspondence:
def __init__(self, id, context, syllable_types, proto_form, daughter_forms):
self.id = id
# context is a tuple of left and right contexts
self.context = context
self.syllable_types = syllable_types
self.proto_form = proto_form
# daughter forms indexed by language
self.daughter_forms = daughter_forms
def __repr__(self):
return f'<Correspondence({self.id}, {self.syllable_types}, {self.proto_form})>'
class Lexicon:
def __init__(self, language, forms, statistics=None):
self.language = language
self.forms = forms
self.statistics = statistics
def key_forms_by_glyphs_and_gloss(self):
return {(form.glyphs, form.gloss): form for form in self.forms}
def correspondences_as_proto_form_string(cs):
return ''.join(c.proto_form for c in cs)
def correspondences_as_ids(cs):
return ' '.join('%4s' % c.id for c in cs)
def syllable_structure(cs):
return ''.join(pretty_join(c.syllable_types) for c in cs)
def pretty_join(c):
if len(c) > 1:
return f'({",".join(c)})'
else:
return c[0]
def context_as_string(context):
return ('' if context == (None, None) else
','.join(context[0] or '') + '_'
+ ','.join(context[1] or ''))
def read_context_from_string(string):
return ((None, None) if string == '' else
tuple(None if x == '' else
[y.strip() for y in x.split(',')]
for x in string.split('_')))
# build a map from tokens to lists of correspondences containing the
# token key.
# also return all possible token lengths
def partition_correspondences(correspondences, accessor):
partitions = collections.defaultdict(list)
for c in correspondences:
for token in accessor(c):
partitions[token].append(c)
return partitions, list(set.union(*(set(map(len, accessor(c)))
for c in correspondences)))
# imperative interface
class TableOfCorrespondences:
initial_marker = Correspondence('', (None, None), '', '$', [])
def __init__(self, family_name, daughter_languages):
self.correspondences = []
self.family_name = family_name
self.daughter_languages = daughter_languages
def add_correspondence(self, correspondence):
self.correspondences.append(correspondence)
def rule_view(self):
# make a rule view of the form
# |Rule|Type|*|Outcome|Context|Language(s)
partitions, lengths = partition_correspondences(self.correspondences,
lambda c: c.proto_form)
def outcomes(c):
outcomes = collections.defaultdict(list)
for lang, forms in c.daughter_forms.items():
for form in forms:
outcomes[form].append(lang)
return outcomes
return [[c.id, c.syllable_types, c.proto_form,
outcome, c.context, langs]
for token, cs in partitions.items()
for c in cs
for outcome, langs in outcomes(c).items()]
class Parameters:
def __init__(self, table, syllable_canon, proto_language_name, mels):
self.table = table
self.syllable_canon = syllable_canon
self.proto_language_name = proto_language_name
self.mels = mels
def serialize(self, filename):
serialize.serialize_correspondence_file(filename, self)
class Form:
def __init__(self, language, glyphs):
self.language = language
self.glyphs = glyphs
def __str__(self):
return f'{self.language} {self.glyphs}'
class ModernForm(Form):
def __init__(self, language, glyphs, gloss, id):
super().__init__(language, glyphs)
self.gloss = gloss
self.id = id
def __str__(self):
return f'{super().__str__()}\t{self.gloss}\t{self.id}'
class ProtoForm(Form):
def __init__(self, language, correspondences, supporting_forms,
attested_support, mel):
super().__init__(language,
correspondences_as_proto_form_string(
correspondences))
self.correspondences = correspondences
self.supporting_forms = supporting_forms
self.attested_support = attested_support
self.mel = mel
def __str__(self):
return f'{self.language} *{self.glyphs} = {correspondences_as_ids(self.correspondences)} {syllable_structure(self.correspondences)}'
class ProjectSettings:
def __init__(self, directory_path, mel_filename, attested, proto_languages,
target, upstream, downstream, other):
self.mel_filename = (os.path.join(directory_path,
mel_filename)
if mel_filename else None)
self.directory_path = directory_path
self.attested = attested
self.proto_languages = proto_languages
self.upstream_target = target
self.upstream = upstream
self.downstream = downstream
self.other = other
class Statistics:
def __init__(self):
self.failed_parses = set()
self.singleton_support = set()
self.summary_stats = {}
self.language_stats = {}
self.correspondences_used_in_recons = collections.Counter()
self.correspondences_used_in_sets = collections.Counter()
self.notes = []
self.debug_notes = []
def add_note(self, note):
print(note)
self.notes.append(note)
def add_stat(self, stat, value):
self.summary_stats[stat] = value
def add_debug_note(self, note):
# print(note)
self.debug_notes.append(note)
def expanded_contexts(rule, i, sound_classes):
contexts = set()
if rule.context[i] is None:
return None
for context in rule.context[i]:
if context in sound_classes:
contexts.update(sound_classes[context])
else:
contexts.add(context)
return contexts
# statically compute which correspondences can actually follow from
# others based on context
def next_correspondence_map(parameters):
regex = parameters.syllable_canon.regex
sound_classes = parameters.syllable_canon.sound_classes
correspondences = parameters.table.correspondences
supra_segmentals = parameters.syllable_canon.supra_segmentals
context_match_type = parameters.syllable_canon.context_match_type
# expand out the cover class abbreviations
for correspondence in correspondences:
correspondence.expanded_context = (
expanded_contexts(correspondence, 0, sound_classes),
expanded_contexts(correspondence, 1, sound_classes))
def matches_this_left_context(c, last):
return (c.context[0] is None or
(any(last.proto_form.startswith(context)
for context in c.expanded_context[0])
if context_match_type == 'glyphs' else
last.proto_form in c.expanded_context[0]))
def matches_last_right_context(c, last):
# implements bypassing of suprasegmentals the other way
if c.proto_form in supra_segmentals:
return True
return (last.context[1] is None or
(any(c.proto_form.startswith(context)
for context in last.expanded_context[1])
if context_match_type == 'glyphs' else
c.proto_form in last.expanded_context[1]))
def matches_context(c, last):
return (matches_this_left_context(c, last) and
matches_last_right_context(c, last))
next_map = collections.defaultdict(set)
for c in [parameters.table.initial_marker] + correspondences:
for nextc in correspondences:
if matches_context(nextc, c):
next_map[c].add(nextc)
return next_map
# tokenize an input string and return the set of all parses
# which also conform to the syllable canon
def make_tokenizer(parameters, accessor, next_map):
regex = parameters.syllable_canon.regex
sound_classes = parameters.syllable_canon.sound_classes
supra_segmentals = parameters.syllable_canon.supra_segmentals
correspondences = parameters.table.correspondences
rule_map, token_lengths = partition_correspondences(
correspondences,
accessor)
def tokenize(form, statistics):
parses = set()
attempts = set()
def gen(form, parse, last, syllable_parse):
'''We generate context and "phonotactic" sensitive parses recursively,
making sure to skip over suprasegmental features when matching
contexts.
'''
# we can abandon parses that we know can't be completed
# to satisfy the syllable canon. for DEMO93 this cuts the
# number of branches from 182146 to 61631
if regex.fullmatch(syllable_parse, partial=True) is None:
if Debug.debug:
pass
#filler = '. ' * len(parse)
#statistics.add_debug_note(f'{filler}canon cannot match: {len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}')
return
if Debug.debug:
pass
#filler = '. ' * len(parse)
#statistics.add_debug_note(f'{filler}{len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}')
if form == '':
# check whether the last token's right context had a word final
# marker or a catch all environment
if (last.context[1] is None or
'#' in last.expanded_context[1]):
if regex.fullmatch(syllable_parse):
parses.add(tuple(parse))
attempts.add(tuple(parse))
# if the last token was marked as only word final then stop
if last.context[1] and last.expanded_context[1] == {'#'}:
return
# otherwise keep building parses from epenthesis rules
for c in rule_map['∅']:
if c in next_map[last]:
for syllable_type in c.syllable_types:
gen(form,
parse + [c],
last if c.proto_form in supra_segmentals else c,
syllable_parse + syllable_type)
if form == '':
#if Debug.debug:
# statistics.add_debug_note(f'reached end of form!')
return
for token_length in token_lengths:
for c in rule_map[form[:token_length]]:
if c in next_map[last]:
for syllable_type in c.syllable_types:
gen(form[token_length:],
parse + [c],
last if c.proto_form in supra_segmentals else c,
syllable_parse + syllable_type)
gen(form, [], parameters.table.initial_marker, '')
if Debug.debug:
statistics.add_debug_note(f'{len(parses)} reconstructions generated')
for p in attempts:
if p in parses:
statistics.add_debug_note(f' *{correspondences_as_proto_form_string(p)} - {correspondences_as_ids(p)} {syllable_structure(p)}')
else:
statistics.add_debug_note(f' xx {correspondences_as_proto_form_string(p)} - {correspondences_as_ids(p)} {syllable_structure(p)}')
return parses
return tokenize
# set of all possible forms for a daughter language given correspondences
def postdict_forms_for_daughter(correspondences, daughter):
return frozenset(''.join(token) for token in
itertools.product(*[c.daughter_forms[daughter]
for c in correspondences]))
# return a mapping from daughter language to all possible forms given a proto-form
def postdict_daughter_forms(proto_form, parameters):
postdict = {} # pun?
# big speed difference between [c.proto_form] vs c.proto_form
# c.proto_form does lots of slow substring comparisons. [c.proto_form]
# parallels the usage of the other daughter form accessors
tokenize = make_tokenizer(parameters, lambda c: [c.proto_form])
for cs in iter(tokenize(proto_form)):
for language in parameters.table.daughter_languages:
postdict[language] = postdict_forms_for_daughter(cs, language)
return postdict
# return a mapping from reconstructions to its supporting forms
def project_back(lexicons, parameters, statistics):
reconstructions = collections.defaultdict(list)
next_map = next_correspondence_map(parameters)
number_of_forms = 0
for lexicon in lexicons:
# we don't want to tokenize the same glyphs more than once, so
# memoize each parse
memo = {}
daughter_form = lambda c: c.daughter_forms[lexicon.language]
count_of_parses = 0
count_of_no_parses = 0
tokenize = make_tokenizer(parameters, daughter_form, next_map)
for form in lexicon.forms:
if form.glyphs == '':
continue
if Debug.debug:
statistics.add_debug_note(f'!Parsing {form}...')
if form.glyphs:
parses = memo.setdefault(form.glyphs, tokenize(form.glyphs, statistics))
else:
statistics.add_note(f'form missing: {form.language} {form.gloss}')
parses = None
if parses:
for cs in parses:
count_of_parses += 1
reconstructions[cs].append(form)
else:
count_of_no_parses += 1
statistics.failed_parses.add(form)
number_of_forms += len(lexicon.forms)
statistics.language_stats[lexicon.language] = {'forms': len(lexicon.forms), 'no_parses': count_of_no_parses, 'reconstructions': count_of_parses}
statistics.add_note(f'{lexicon.language}: {len(lexicon.forms)} forms, {count_of_no_parses} no parses, {count_of_parses} reconstructions')
statistics.correspondences_used_in_recons = count_correspondences_used_in_reconstructions(reconstructions)
statistics.add_stat('lexicons', len(lexicons))
statistics.add_stat('reflexes', number_of_forms)
statistics.add_note(f'{number_of_forms} input forms')
statistics.keys = reconstructions
return reconstructions, statistics
def count_correspondences_used_in_reconstructions(reconstructions):
correspondences_used = collections.Counter()
for r in reconstructions:
correspondences_used.update([correspondence for correspondence in r])
return correspondences_used
def count_correspondences_used_in_sets(cognate_sets):
correspondences_used = collections.Counter()
for c in cognate_sets:
correspondences_used.update([correspondence for correspondence in c[0]])
return correspondences_used
# we create cognate sets by comparing meaning.
def create_sets(projections, statistics, mels, only_with_mel, root=True):
cognate_sets = set()
def attested_forms(support):
attested = set()
for x in support:
if isinstance(x, ModernForm):
attested.add(x)
else:
attested |= x.attested_support
return attested
def all_glosses(projections):
all_glosses = set()
for support in projections.values():
for supporting_form in support:
if isinstance(supporting_form, ModernForm):
if supporting_form.gloss:
all_glosses.add(supporting_form.gloss)
return all_glosses
associated_mels_table = mel.compile_associated_mels(mels,
all_glosses(projections))
def add_cognate_sets(reconstruction, support):
distinct_mels = collections.defaultdict(list)
for supporting_form in support:
if isinstance(supporting_form, ModernForm):
for associated_mel in mel.associated_mels(associated_mels_table,
supporting_form.gloss):
if not (only_with_mel and associated_mel.id == '') or mels is None:
distinct_mels[associated_mel].append(supporting_form)
else:
distinct_mels[mel.default_mel].append(supporting_form)
for distinct_mel, support in distinct_mels.items():
if not root or len({form.language for form in support}) > 1:
cognate_sets.add((reconstruction,
frozenset(support),
frozenset(attested_forms(support)),
distinct_mel))
else:
statistics.singleton_support.add((reconstruction,
frozenset(support),
frozenset(attested_forms(support)),
distinct_mel))
for reconstruction, support in projections.items():
add_cognate_sets(reconstruction, support)
statistics.add_note(
f'{len(cognate_sets)} sets supported by multiple languages'
if root else
f'{len(cognate_sets)} cognate sets')
statistics.correspondences_used_in_sets = count_correspondences_used_in_sets(cognate_sets)
return cognate_sets, statistics
# given a collection of sets, we want to find all maximal sets,
# i.e. ones which are not proper subsets of any other set in the
# collection. we do this by partitioning the collection of sets by
# each set's length to reduce unnecessary comparison
def filter_subsets(cognate_sets, statistics, root=True):
partitions = collections.defaultdict(list)
index = 2 if root else 1
support_class = collections.defaultdict(list)
# collect all sets with the same support. if one loses, they all lose.
for cognate_set in cognate_sets:
support_class[cognate_set[index]].append(cognate_set)
for support in support_class.keys():
partitions[len(support)].append(support)
losers = set()
# the largest sets are never losers
for key1, sets1 in sorted(partitions.items(), reverse=True)[1:]:
larger_sets = [set for key2, sets2 in partitions.items()
if key2 > key1
for set in sets2
if support_class[set][0] not in losers]
for support_set in sets1:
for support_set2 in larger_sets:
if support_set < support_set2:
for cognate_set in support_class[support_set]:
losers.add(cognate_set)
break
statistics.subsets = losers
statistics.add_note(f'threw away {len(losers)} subsets')
statistics.add_stat('subsets_tossed', len(losers))
return cognate_sets - losers, statistics
# pick a representative derivation, i.e. choose a reconstruction from
# reconstructions with the same supporting forms yielding the same
# surface string
def pick_derivation(cognate_sets, statistics, only_with_mel):
uniques = {}
for cognate_set in cognate_sets:
uniques[(correspondences_as_proto_form_string(cognate_set[0]), cognate_set[1])] = cognate_set
statistics.add_note(f'{len(uniques)} distinct reconstructions with distinct supporting forms')
reflexes = sum([len(x[1]) for x in list(uniques.values())])
statistics.add_note(f'{reflexes} reflexes in sets')
statistics.add_stat('reflexes_in_sets', reflexes)
return uniques.values(), statistics
def batch_upstream(lexicons, params, only_with_mel, root):
return pick_derivation(
*filter_subsets(
*create_sets(
*project_back(lexicons, params, Statistics()),
params.mels,
only_with_mel,
root),
root),
only_with_mel)
def upstream_tree(target, tree, param_tree, attested_lexicons, only_with_mel):
# batch upstream repeatedly up the action graph tree from leaves,
# which are necessarily attested. we filter forms with singleton
# supporting sets for the root language
def rec(target, root):
if target in attested_lexicons:
return attested_lexicons[target]
daughter_lexicons = [rec(daughter, False)
for daughter in tree[target]]
forms, statistics = batch_upstream(daughter_lexicons,
param_tree[target],
only_with_mel,
root)
return Lexicon(
target,
[ProtoForm(target, correspondences, supporting_forms,
attested_support, mel)
for (correspondences, supporting_forms, attested_support, mel)
in forms],
statistics)
return rec(target, True)
def all_parameters(settings):
# Return a mapping from protolanguage to its associated parameter object
mapping = {}
def rec(target):
if target in settings.attested:
return
mapping[target] = \
read.read_correspondence_file(
os.path.join(settings.directory_path,
settings.proto_languages[target]),
'------',
list(settings.upstream[target]),
target,
settings.mel_filename)
for daughter in settings.upstream[target]:
rec(daughter)
rec(settings.upstream_target)
return mapping
def batch_all_upstream(settings, only_with_mel=False):
attested_lexicons = read.read_attested_lexicons(settings)
return upstream_tree(settings.upstream_target,
settings.upstream,
all_parameters(settings),
attested_lexicons,
only_with_mel)
def interactive_upstream(settings, attested_lexicons, only_with_mel=False):
# attested_lexicons are passed in for this type of upstream...
return upstream_tree(settings.upstream_target,
settings.upstream,
all_parameters(settings),
attested_lexicons,
only_with_mel)
def print_form(form, level):
if isinstance(form, ModernForm):
print(' ' * level + str(form))
elif isinstance(form, ProtoForm):
print(' ' * level + str(form) + ' ' +
correspondences_as_ids(form.correspondences))
# TODO: the output order should be the 'preferred order', not just alphabetical. but how?
for supporting_form in sorted(form.supporting_forms, key=lambda x: x.language + x.glyphs):
print_form(supporting_form, level + 1)
def print_sets(lexicon):
# for form in lexicon.forms:
for form in sorted(lexicon.forms, key=lambda corrs: correspondences_as_ids(corrs.correspondences)):
print_form(form, 0)
def dump_sets(lexicon, filename):
out = sys.stdout
with open(filename, 'w', encoding='utf-8') as sys.stdout:
print_sets(lexicon)
sys.stdout = out
def dump_xml_sets(sets, languages, filename, only_with_mel):
serialize.serialize_sets(sets, languages, filename, only_with_mel)
def dump_keys(lexicon, filename):
out = sys.stdout
forms = []
with open(filename, 'w', encoding='utf-8') as sys.stdout:
for reconstruction, support in lexicon.statistics.keys.items():
for support1 in support:
forms.append(str(support1) + f'\t*{correspondences_as_proto_form_string(reconstruction)}\t*{correspondences_as_ids(reconstruction)}')
print('\t'.join('language & form,gloss,id,protoform,correspondences'.split(',')))
for i,f in enumerate(sorted(forms)):
if i > 100000:
print('**** output terminated after 100,000 lines')
break
print(f)
print('***failures')
for failure in lexicon.statistics.failed_parses:
print(f'{str(failure)}')
sys.stdout = out
def compare_support(lex1_forms, forms):
# FIXME: there's a subtle dependency here on the Form.str method.
return sorted([str(k) for k in lex1_forms]) == sorted([str(k) for k in forms])
def set_compare(lex1, lex2, languages):
union = lex1 + lex2
list_of_sf = collections.defaultdict(list)
overlap = collections.defaultdict(list)
for i in union:
# print(i)
for j in i.supporting_forms:
# print(f' {j}')
if not i in list_of_sf[j]:
list_of_sf[j].append(i)
graph = collections.defaultdict(list)
pfms = set()
for i, form in enumerate(list_of_sf):
sets = list_of_sf[form]
for protoform in sets:
pformshort = f'{protoform.glyphs} {correspondences_as_ids(protoform.correspondences)}'
reflexshort = f'{form.language} {form.glyphs} {form.gloss}'
pfms.add(pformshort)
if not pformshort in graph[reflexshort]:
graph[reflexshort].append(pformshort)
pfms = list(pfms)
refs = list(graph.keys())
return list_of_sf, graph, pfms, refs
def make_set(l1,l2,diff, union):
MF = []
for s in sorted(diff):
for x in diff[s]:
m = deepcopy(union[x])
m.membership = s
MF.append(m)
# ProtoForm(lexicon.language, correspondences, supporting_forms, attested_support, mel)
return ProtoForm(l1.language,
l1.correspondences,
MF,
MF,
l1.mel
)
def compare_proto_lexicons(lexicon1, lexicon2, languages):
table = collections.defaultdict(list)
common = set()
only_lex2 = set()
for form in lexicon1.forms:
table[form.glyphs].append(form)
for form in lexicon2.forms:
if table[form.glyphs] == []:
only_lex2.add(form)
else:
lex1_forms = table[form.glyphs]
form_matched = False
for lex1_form in lex1_forms:
if compare_support(lex1_form.supporting_forms, form.supporting_forms):
common.add(lex1_form)
form_matched = True
if not form_matched:
only_lex2.add(form)
only_lex1 = set(lexicon1.forms).difference(common)
ncommon = len(common)
nl1 = len(lexicon1.forms)
nl2 = len(lexicon2.forms)
precision = ncommon / nl1
recall = ncommon / nl2
fscore = 2 * (precision * recall) / (precision + recall)
# TODO: is it useful to simply print these stats. Ever?
print(f'Number of sets in lexicon 1: {nl1}')
print(f'Number of sets in lexicon 2: {nl2}')
print(f'Number of sets in common: {ncommon}')
print(f'Number of sets only in lexicon 1: {len(only_lex1)}')
print(f'Number of sets only in lexicon 2: {len(only_lex2)}')
print('Assuming set 1 is gold:')
print(f' Precision: {precision}')
print(f' Recall: {recall}')
print(f' F-score: {fscore}')
# TODO: leave in for now, but figure out how to render the diff better..
# print(f'Sets only in lexicon1:')
# for form in only_lex1:
# print_form(form, 0)
# print(f'Sets only in lexicon2:')
# for form in only_lex2:
# print_form(form, 0)
# print('Sets in common:')
# for form in common:
# print_form(form, 0)
list_of_sf, graph, pfms, refs = set_compare(list(only_lex1), list(only_lex2), languages)
return {
'number_of_sets_in_lexicon_1': nl1,
'number_of_sets_in_lexicon_2': nl2,
'number_of_sets_in_common': ncommon,
'number_of_sets_only_in_lexicon_1': len(only_lex1),
'number_of_sets_only_in_lexicon_2': len(only_lex2),
'venn': f'{len(only_lex1)},{len(only_lex2)},{ncommon}',
'precision': ('{:04.3f}'.format(precision), 'float'),
'recall': ('{:04.3f}'.format(recall), 'float'),
'fscore': ('{:04.3f}'.format(fscore), 'float'),
'sets_in_common': list(common),
'sets_only_in_lexicon1': list(only_lex1),
'sets_only_in_lexicon2': list(only_lex2),
'list_of_sf': list_of_sf,
'number_of_pfms': len(pfms),
'number_of_refs': len(refs),
'graph': graph,
'pfms': pfms,
'refs': refs
}
# def compare_isomorphic_proto_lexicons(lexicon1, lexicon2, compare_type):
# # replace_underlying_lexicons(lexicon1, attested_lexicons)
# # replace_underlying_lexicons(lexicon2, attested_lexicons)
# return compare_proto_lexicons(lexicon1, lexicon2)
# create a fake cognate set with the forms that failed to reconstruct
def extract_failures(lexicon):
return Lexicon(
lexicon.language,
[ProtoForm('failed', (), sorted(lexicon.statistics.failed_parses, key=lambda x: x.language),
(), [])],
[],
lexicon.statistics)
# create "cognate sets" for the isolates
# (and we need to check to see that the singletons really are isolates -- not in any set)
def extract_isolates(lexicon):
forms_used = collections.Counter()
def is_in(item, list_of_forms):
for form in item[1]:
if form in list_of_forms:
return True
return False
for set in lexicon.forms:
forms_used.update([supporting_form for supporting_form in set.supporting_forms])
isolates = [item for item in lexicon.statistics.singleton_support if not is_in(item, forms_used)]
duplicates = {}
new_isolates = []
for item in isolates:
x = next(iter(item[1]))
if not x in duplicates:
duplicates[x] = 1
new_isolates.append(item)
return [ProtoForm(lexicon.language, correspondences, supporting_forms, attested_support, mel)
for (correspondences, supporting_forms, attested_support, mel)
in new_isolates]
# given a proto lexicon whose underlying attested forms are drawn
# from lexicons isomorphic to attested_lexicons, destructively replace
# the in-memory Form objects with those in attested_lexicons.
# daughter_lexicons is a hash table mapping language -> Lexicon.
# only works for non-tree lexicons for now.
def replace_underlying_lexicons(proto_lexicon, attested_lexicons):
keyed_forms = {language: lexicon.key_forms_by_glyphs_and_gloss()
for (language, lexicon) in attested_lexicons.items()}
for form in proto_lexicon.forms:
def intern(form_set):
return frozenset((keyed_forms[form.language][(form.glyphs, form.gloss)]
for form in form_set))
form.attested_support = intern(form.attested_support)
form.supporting_forms = intern(form.supporting_forms)
# Given a lexicon of protoforms, return a mapping between cognate sets
# and possible reconstructions.
def collate_proto_lexicon(proto_lexicon):
mapping = collections.defaultdict(list)
for proto_form in proto_lexicon:
mapping[proto_form.supporting_forms].append(proto_form)
return mapping
| <filename>src/RE.py<gh_stars>0
import itertools
import read
import regex as re
import os
import sys
import serialize
import collections
import mel
from copy import deepcopy
class Debug:
debug = False
class SyllableCanon:
def __init__(self, sound_classes, syllable_regex, supra_segmentals, context_match_type):
self.sound_classes = sound_classes
self.regex = re.compile(syllable_regex)
self.supra_segmentals = supra_segmentals
self.context_match_type = context_match_type
class Correspondence:
def __init__(self, id, context, syllable_types, proto_form, daughter_forms):
self.id = id
# context is a tuple of left and right contexts
self.context = context
self.syllable_types = syllable_types
self.proto_form = proto_form
# daughter forms indexed by language
self.daughter_forms = daughter_forms
def __repr__(self):
return f'<Correspondence({self.id}, {self.syllable_types}, {self.proto_form})>'
class Lexicon:
def __init__(self, language, forms, statistics=None):
self.language = language
self.forms = forms
self.statistics = statistics
def key_forms_by_glyphs_and_gloss(self):
return {(form.glyphs, form.gloss): form for form in self.forms}
def correspondences_as_proto_form_string(cs):
return ''.join(c.proto_form for c in cs)
def correspondences_as_ids(cs):
return ' '.join('%4s' % c.id for c in cs)
def syllable_structure(cs):
return ''.join(pretty_join(c.syllable_types) for c in cs)
def pretty_join(c):
if len(c) > 1:
return f'({",".join(c)})'
else:
return c[0]
def context_as_string(context):
return ('' if context == (None, None) else
','.join(context[0] or '') + '_'
+ ','.join(context[1] or ''))
def read_context_from_string(string):
return ((None, None) if string == '' else
tuple(None if x == '' else
[y.strip() for y in x.split(',')]
for x in string.split('_')))
# build a map from tokens to lists of correspondences containing the
# token key.
# also return all possible token lengths
def partition_correspondences(correspondences, accessor):
partitions = collections.defaultdict(list)
for c in correspondences:
for token in accessor(c):
partitions[token].append(c)
return partitions, list(set.union(*(set(map(len, accessor(c)))
for c in correspondences)))
# imperative interface
class TableOfCorrespondences:
initial_marker = Correspondence('', (None, None), '', '$', [])
def __init__(self, family_name, daughter_languages):
self.correspondences = []
self.family_name = family_name
self.daughter_languages = daughter_languages
def add_correspondence(self, correspondence):
self.correspondences.append(correspondence)
def rule_view(self):
# make a rule view of the form
# |Rule|Type|*|Outcome|Context|Language(s)
partitions, lengths = partition_correspondences(self.correspondences,
lambda c: c.proto_form)
def outcomes(c):
outcomes = collections.defaultdict(list)
for lang, forms in c.daughter_forms.items():
for form in forms:
outcomes[form].append(lang)
return outcomes
return [[c.id, c.syllable_types, c.proto_form,
outcome, c.context, langs]
for token, cs in partitions.items()
for c in cs
for outcome, langs in outcomes(c).items()]
class Parameters:
def __init__(self, table, syllable_canon, proto_language_name, mels):
self.table = table
self.syllable_canon = syllable_canon
self.proto_language_name = proto_language_name
self.mels = mels
def serialize(self, filename):
serialize.serialize_correspondence_file(filename, self)
class Form:
def __init__(self, language, glyphs):
self.language = language
self.glyphs = glyphs
def __str__(self):
return f'{self.language} {self.glyphs}'
class ModernForm(Form):
def __init__(self, language, glyphs, gloss, id):
super().__init__(language, glyphs)
self.gloss = gloss
self.id = id
def __str__(self):
return f'{super().__str__()}\t{self.gloss}\t{self.id}'
class ProtoForm(Form):
def __init__(self, language, correspondences, supporting_forms,
attested_support, mel):
super().__init__(language,
correspondences_as_proto_form_string(
correspondences))
self.correspondences = correspondences
self.supporting_forms = supporting_forms
self.attested_support = attested_support
self.mel = mel
def __str__(self):
return f'{self.language} *{self.glyphs} = {correspondences_as_ids(self.correspondences)} {syllable_structure(self.correspondences)}'
class ProjectSettings:
def __init__(self, directory_path, mel_filename, attested, proto_languages,
target, upstream, downstream, other):
self.mel_filename = (os.path.join(directory_path,
mel_filename)
if mel_filename else None)
self.directory_path = directory_path
self.attested = attested
self.proto_languages = proto_languages
self.upstream_target = target
self.upstream = upstream
self.downstream = downstream
self.other = other
class Statistics:
def __init__(self):
self.failed_parses = set()
self.singleton_support = set()
self.summary_stats = {}
self.language_stats = {}
self.correspondences_used_in_recons = collections.Counter()
self.correspondences_used_in_sets = collections.Counter()
self.notes = []
self.debug_notes = []
def add_note(self, note):
print(note)
self.notes.append(note)
def add_stat(self, stat, value):
self.summary_stats[stat] = value
def add_debug_note(self, note):
# print(note)
self.debug_notes.append(note)
def expanded_contexts(rule, i, sound_classes):
contexts = set()
if rule.context[i] is None:
return None
for context in rule.context[i]:
if context in sound_classes:
contexts.update(sound_classes[context])
else:
contexts.add(context)
return contexts
# statically compute which correspondences can actually follow from
# others based on context
def next_correspondence_map(parameters):
regex = parameters.syllable_canon.regex
sound_classes = parameters.syllable_canon.sound_classes
correspondences = parameters.table.correspondences
supra_segmentals = parameters.syllable_canon.supra_segmentals
context_match_type = parameters.syllable_canon.context_match_type
# expand out the cover class abbreviations
for correspondence in correspondences:
correspondence.expanded_context = (
expanded_contexts(correspondence, 0, sound_classes),
expanded_contexts(correspondence, 1, sound_classes))
def matches_this_left_context(c, last):
return (c.context[0] is None or
(any(last.proto_form.startswith(context)
for context in c.expanded_context[0])
if context_match_type == 'glyphs' else
last.proto_form in c.expanded_context[0]))
def matches_last_right_context(c, last):
# implements bypassing of suprasegmentals the other way
if c.proto_form in supra_segmentals:
return True
return (last.context[1] is None or
(any(c.proto_form.startswith(context)
for context in last.expanded_context[1])
if context_match_type == 'glyphs' else
c.proto_form in last.expanded_context[1]))
def matches_context(c, last):
return (matches_this_left_context(c, last) and
matches_last_right_context(c, last))
next_map = collections.defaultdict(set)
for c in [parameters.table.initial_marker] + correspondences:
for nextc in correspondences:
if matches_context(nextc, c):
next_map[c].add(nextc)
return next_map
# tokenize an input string and return the set of all parses
# which also conform to the syllable canon
def make_tokenizer(parameters, accessor, next_map):
regex = parameters.syllable_canon.regex
sound_classes = parameters.syllable_canon.sound_classes
supra_segmentals = parameters.syllable_canon.supra_segmentals
correspondences = parameters.table.correspondences
rule_map, token_lengths = partition_correspondences(
correspondences,
accessor)
def tokenize(form, statistics):
parses = set()
attempts = set()
def gen(form, parse, last, syllable_parse):
'''We generate context and "phonotactic" sensitive parses recursively,
making sure to skip over suprasegmental features when matching
contexts.
'''
# we can abandon parses that we know can't be completed
# to satisfy the syllable canon. for DEMO93 this cuts the
# number of branches from 182146 to 61631
if regex.fullmatch(syllable_parse, partial=True) is None:
if Debug.debug:
pass
#filler = '. ' * len(parse)
#statistics.add_debug_note(f'{filler}canon cannot match: {len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}')
return
if Debug.debug:
pass
#filler = '. ' * len(parse)
#statistics.add_debug_note(f'{filler}{len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}')
if form == '':
# check whether the last token's right context had a word final
# marker or a catch all environment
if (last.context[1] is None or
'#' in last.expanded_context[1]):
if regex.fullmatch(syllable_parse):
parses.add(tuple(parse))
attempts.add(tuple(parse))
# if the last token was marked as only word final then stop
if last.context[1] and last.expanded_context[1] == {'#'}:
return
# otherwise keep building parses from epenthesis rules
for c in rule_map['∅']:
if c in next_map[last]:
for syllable_type in c.syllable_types:
gen(form,
parse + [c],
last if c.proto_form in supra_segmentals else c,
syllable_parse + syllable_type)
if form == '':
#if Debug.debug:
# statistics.add_debug_note(f'reached end of form!')
return
for token_length in token_lengths:
for c in rule_map[form[:token_length]]:
if c in next_map[last]:
for syllable_type in c.syllable_types:
gen(form[token_length:],
parse + [c],
last if c.proto_form in supra_segmentals else c,
syllable_parse + syllable_type)
gen(form, [], parameters.table.initial_marker, '')
if Debug.debug:
statistics.add_debug_note(f'{len(parses)} reconstructions generated')
for p in attempts:
if p in parses:
statistics.add_debug_note(f' *{correspondences_as_proto_form_string(p)} - {correspondences_as_ids(p)} {syllable_structure(p)}')
else:
statistics.add_debug_note(f' xx {correspondences_as_proto_form_string(p)} - {correspondences_as_ids(p)} {syllable_structure(p)}')
return parses
return tokenize
# set of all possible forms for a daughter language given correspondences
def postdict_forms_for_daughter(correspondences, daughter):
return frozenset(''.join(token) for token in
itertools.product(*[c.daughter_forms[daughter]
for c in correspondences]))
# return a mapping from daughter language to all possible forms given a proto-form
def postdict_daughter_forms(proto_form, parameters):
postdict = {} # pun?
# big speed difference between [c.proto_form] vs c.proto_form
# c.proto_form does lots of slow substring comparisons. [c.proto_form]
# parallels the usage of the other daughter form accessors
tokenize = make_tokenizer(parameters, lambda c: [c.proto_form])
for cs in iter(tokenize(proto_form)):
for language in parameters.table.daughter_languages:
postdict[language] = postdict_forms_for_daughter(cs, language)
return postdict
# return a mapping from reconstructions to its supporting forms
def project_back(lexicons, parameters, statistics):
reconstructions = collections.defaultdict(list)
next_map = next_correspondence_map(parameters)
number_of_forms = 0
for lexicon in lexicons:
# we don't want to tokenize the same glyphs more than once, so
# memoize each parse
memo = {}
daughter_form = lambda c: c.daughter_forms[lexicon.language]
count_of_parses = 0
count_of_no_parses = 0
tokenize = make_tokenizer(parameters, daughter_form, next_map)
for form in lexicon.forms:
if form.glyphs == '':
continue
if Debug.debug:
statistics.add_debug_note(f'!Parsing {form}...')
if form.glyphs:
parses = memo.setdefault(form.glyphs, tokenize(form.glyphs, statistics))
else:
statistics.add_note(f'form missing: {form.language} {form.gloss}')
parses = None
if parses:
for cs in parses:
count_of_parses += 1
reconstructions[cs].append(form)
else:
count_of_no_parses += 1
statistics.failed_parses.add(form)
number_of_forms += len(lexicon.forms)
statistics.language_stats[lexicon.language] = {'forms': len(lexicon.forms), 'no_parses': count_of_no_parses, 'reconstructions': count_of_parses}
statistics.add_note(f'{lexicon.language}: {len(lexicon.forms)} forms, {count_of_no_parses} no parses, {count_of_parses} reconstructions')
statistics.correspondences_used_in_recons = count_correspondences_used_in_reconstructions(reconstructions)
statistics.add_stat('lexicons', len(lexicons))
statistics.add_stat('reflexes', number_of_forms)
statistics.add_note(f'{number_of_forms} input forms')
statistics.keys = reconstructions
return reconstructions, statistics
def count_correspondences_used_in_reconstructions(reconstructions):
correspondences_used = collections.Counter()
for r in reconstructions:
correspondences_used.update([correspondence for correspondence in r])
return correspondences_used
def count_correspondences_used_in_sets(cognate_sets):
correspondences_used = collections.Counter()
for c in cognate_sets:
correspondences_used.update([correspondence for correspondence in c[0]])
return correspondences_used
# we create cognate sets by comparing meaning.
def create_sets(projections, statistics, mels, only_with_mel, root=True):
cognate_sets = set()
def attested_forms(support):
attested = set()
for x in support:
if isinstance(x, ModernForm):
attested.add(x)
else:
attested |= x.attested_support
return attested
def all_glosses(projections):
all_glosses = set()
for support in projections.values():
for supporting_form in support:
if isinstance(supporting_form, ModernForm):
if supporting_form.gloss:
all_glosses.add(supporting_form.gloss)
return all_glosses
associated_mels_table = mel.compile_associated_mels(mels,
all_glosses(projections))
def add_cognate_sets(reconstruction, support):
distinct_mels = collections.defaultdict(list)
for supporting_form in support:
if isinstance(supporting_form, ModernForm):
for associated_mel in mel.associated_mels(associated_mels_table,
supporting_form.gloss):
if not (only_with_mel and associated_mel.id == '') or mels is None:
distinct_mels[associated_mel].append(supporting_form)
else:
distinct_mels[mel.default_mel].append(supporting_form)
for distinct_mel, support in distinct_mels.items():
if not root or len({form.language for form in support}) > 1:
cognate_sets.add((reconstruction,
frozenset(support),
frozenset(attested_forms(support)),
distinct_mel))
else:
statistics.singleton_support.add((reconstruction,
frozenset(support),
frozenset(attested_forms(support)),
distinct_mel))
for reconstruction, support in projections.items():
add_cognate_sets(reconstruction, support)
statistics.add_note(
f'{len(cognate_sets)} sets supported by multiple languages'
if root else
f'{len(cognate_sets)} cognate sets')
statistics.correspondences_used_in_sets = count_correspondences_used_in_sets(cognate_sets)
return cognate_sets, statistics
# given a collection of sets, we want to find all maximal sets,
# i.e. ones which are not proper subsets of any other set in the
# collection. we do this by partitioning the collection of sets by
# each set's length to reduce unnecessary comparison
def filter_subsets(cognate_sets, statistics, root=True):
partitions = collections.defaultdict(list)
index = 2 if root else 1
support_class = collections.defaultdict(list)
# collect all sets with the same support. if one loses, they all lose.
for cognate_set in cognate_sets:
support_class[cognate_set[index]].append(cognate_set)
for support in support_class.keys():
partitions[len(support)].append(support)
losers = set()
# the largest sets are never losers
for key1, sets1 in sorted(partitions.items(), reverse=True)[1:]:
larger_sets = [set for key2, sets2 in partitions.items()
if key2 > key1
for set in sets2
if support_class[set][0] not in losers]
for support_set in sets1:
for support_set2 in larger_sets:
if support_set < support_set2:
for cognate_set in support_class[support_set]:
losers.add(cognate_set)
break
statistics.subsets = losers
statistics.add_note(f'threw away {len(losers)} subsets')
statistics.add_stat('subsets_tossed', len(losers))
return cognate_sets - losers, statistics
# pick a representative derivation, i.e. choose a reconstruction from
# reconstructions with the same supporting forms yielding the same
# surface string
def pick_derivation(cognate_sets, statistics, only_with_mel):
uniques = {}
for cognate_set in cognate_sets:
uniques[(correspondences_as_proto_form_string(cognate_set[0]), cognate_set[1])] = cognate_set
statistics.add_note(f'{len(uniques)} distinct reconstructions with distinct supporting forms')
reflexes = sum([len(x[1]) for x in list(uniques.values())])
statistics.add_note(f'{reflexes} reflexes in sets')
statistics.add_stat('reflexes_in_sets', reflexes)
return uniques.values(), statistics
def batch_upstream(lexicons, params, only_with_mel, root):
return pick_derivation(
*filter_subsets(
*create_sets(
*project_back(lexicons, params, Statistics()),
params.mels,
only_with_mel,
root),
root),
only_with_mel)
def upstream_tree(target, tree, param_tree, attested_lexicons, only_with_mel):
# batch upstream repeatedly up the action graph tree from leaves,
# which are necessarily attested. we filter forms with singleton
# supporting sets for the root language
def rec(target, root):
if target in attested_lexicons:
return attested_lexicons[target]
daughter_lexicons = [rec(daughter, False)
for daughter in tree[target]]
forms, statistics = batch_upstream(daughter_lexicons,
param_tree[target],
only_with_mel,
root)
return Lexicon(
target,
[ProtoForm(target, correspondences, supporting_forms,
attested_support, mel)
for (correspondences, supporting_forms, attested_support, mel)
in forms],
statistics)
return rec(target, True)
def all_parameters(settings):
# Return a mapping from protolanguage to its associated parameter object
mapping = {}
def rec(target):
if target in settings.attested:
return
mapping[target] = \
read.read_correspondence_file(
os.path.join(settings.directory_path,
settings.proto_languages[target]),
'------',
list(settings.upstream[target]),
target,
settings.mel_filename)
for daughter in settings.upstream[target]:
rec(daughter)
rec(settings.upstream_target)
return mapping
def batch_all_upstream(settings, only_with_mel=False):
attested_lexicons = read.read_attested_lexicons(settings)
return upstream_tree(settings.upstream_target,
settings.upstream,
all_parameters(settings),
attested_lexicons,
only_with_mel)
def interactive_upstream(settings, attested_lexicons, only_with_mel=False):
# attested_lexicons are passed in for this type of upstream...
return upstream_tree(settings.upstream_target,
settings.upstream,
all_parameters(settings),
attested_lexicons,
only_with_mel)
def print_form(form, level):
if isinstance(form, ModernForm):
print(' ' * level + str(form))
elif isinstance(form, ProtoForm):
print(' ' * level + str(form) + ' ' +
correspondences_as_ids(form.correspondences))
# TODO: the output order should be the 'preferred order', not just alphabetical. but how?
for supporting_form in sorted(form.supporting_forms, key=lambda x: x.language + x.glyphs):
print_form(supporting_form, level + 1)
def print_sets(lexicon):
# for form in lexicon.forms:
for form in sorted(lexicon.forms, key=lambda corrs: correspondences_as_ids(corrs.correspondences)):
print_form(form, 0)
def dump_sets(lexicon, filename):
out = sys.stdout
with open(filename, 'w', encoding='utf-8') as sys.stdout:
print_sets(lexicon)
sys.stdout = out
def dump_xml_sets(sets, languages, filename, only_with_mel):
serialize.serialize_sets(sets, languages, filename, only_with_mel)
def dump_keys(lexicon, filename):
out = sys.stdout
forms = []
with open(filename, 'w', encoding='utf-8') as sys.stdout:
for reconstruction, support in lexicon.statistics.keys.items():
for support1 in support:
forms.append(str(support1) + f'\t*{correspondences_as_proto_form_string(reconstruction)}\t*{correspondences_as_ids(reconstruction)}')
print('\t'.join('language & form,gloss,id,protoform,correspondences'.split(',')))
for i,f in enumerate(sorted(forms)):
if i > 100000:
print('**** output terminated after 100,000 lines')
break
print(f)
print('***failures')
for failure in lexicon.statistics.failed_parses:
print(f'{str(failure)}')
sys.stdout = out
def compare_support(lex1_forms, forms):
# FIXME: there's a subtle dependency here on the Form.str method.
return sorted([str(k) for k in lex1_forms]) == sorted([str(k) for k in forms])
def set_compare(lex1, lex2, languages):
union = lex1 + lex2
list_of_sf = collections.defaultdict(list)
overlap = collections.defaultdict(list)
for i in union:
# print(i)
for j in i.supporting_forms:
# print(f' {j}')
if not i in list_of_sf[j]:
list_of_sf[j].append(i)
graph = collections.defaultdict(list)
pfms = set()
for i, form in enumerate(list_of_sf):
sets = list_of_sf[form]
for protoform in sets:
pformshort = f'{protoform.glyphs} {correspondences_as_ids(protoform.correspondences)}'
reflexshort = f'{form.language} {form.glyphs} {form.gloss}'
pfms.add(pformshort)
if not pformshort in graph[reflexshort]:
graph[reflexshort].append(pformshort)
pfms = list(pfms)
refs = list(graph.keys())
return list_of_sf, graph, pfms, refs
def make_set(l1,l2,diff, union):
MF = []
for s in sorted(diff):
for x in diff[s]:
m = deepcopy(union[x])
m.membership = s
MF.append(m)
# ProtoForm(lexicon.language, correspondences, supporting_forms, attested_support, mel)
return ProtoForm(l1.language,
l1.correspondences,
MF,
MF,
l1.mel
)
def compare_proto_lexicons(lexicon1, lexicon2, languages):
table = collections.defaultdict(list)
common = set()
only_lex2 = set()
for form in lexicon1.forms:
table[form.glyphs].append(form)
for form in lexicon2.forms:
if table[form.glyphs] == []:
only_lex2.add(form)
else:
lex1_forms = table[form.glyphs]
form_matched = False
for lex1_form in lex1_forms:
if compare_support(lex1_form.supporting_forms, form.supporting_forms):
common.add(lex1_form)
form_matched = True
if not form_matched:
only_lex2.add(form)
only_lex1 = set(lexicon1.forms).difference(common)
ncommon = len(common)
nl1 = len(lexicon1.forms)
nl2 = len(lexicon2.forms)
precision = ncommon / nl1
recall = ncommon / nl2
fscore = 2 * (precision * recall) / (precision + recall)
# TODO: is it useful to simply print these stats. Ever?
print(f'Number of sets in lexicon 1: {nl1}')
print(f'Number of sets in lexicon 2: {nl2}')
print(f'Number of sets in common: {ncommon}')
print(f'Number of sets only in lexicon 1: {len(only_lex1)}')
print(f'Number of sets only in lexicon 2: {len(only_lex2)}')
print('Assuming set 1 is gold:')
print(f' Precision: {precision}')
print(f' Recall: {recall}')
print(f' F-score: {fscore}')
# TODO: leave in for now, but figure out how to render the diff better..
# print(f'Sets only in lexicon1:')
# for form in only_lex1:
# print_form(form, 0)
# print(f'Sets only in lexicon2:')
# for form in only_lex2:
# print_form(form, 0)
# print('Sets in common:')
# for form in common:
# print_form(form, 0)
list_of_sf, graph, pfms, refs = set_compare(list(only_lex1), list(only_lex2), languages)
return {
'number_of_sets_in_lexicon_1': nl1,
'number_of_sets_in_lexicon_2': nl2,
'number_of_sets_in_common': ncommon,
'number_of_sets_only_in_lexicon_1': len(only_lex1),
'number_of_sets_only_in_lexicon_2': len(only_lex2),
'venn': f'{len(only_lex1)},{len(only_lex2)},{ncommon}',
'precision': ('{:04.3f}'.format(precision), 'float'),
'recall': ('{:04.3f}'.format(recall), 'float'),
'fscore': ('{:04.3f}'.format(fscore), 'float'),
'sets_in_common': list(common),
'sets_only_in_lexicon1': list(only_lex1),
'sets_only_in_lexicon2': list(only_lex2),
'list_of_sf': list_of_sf,
'number_of_pfms': len(pfms),
'number_of_refs': len(refs),
'graph': graph,
'pfms': pfms,
'refs': refs
}
# def compare_isomorphic_proto_lexicons(lexicon1, lexicon2, compare_type):
# # replace_underlying_lexicons(lexicon1, attested_lexicons)
# # replace_underlying_lexicons(lexicon2, attested_lexicons)
# return compare_proto_lexicons(lexicon1, lexicon2)
# create a fake cognate set with the forms that failed to reconstruct
def extract_failures(lexicon):
return Lexicon(
lexicon.language,
[ProtoForm('failed', (), sorted(lexicon.statistics.failed_parses, key=lambda x: x.language),
(), [])],
[],
lexicon.statistics)
# create "cognate sets" for the isolates
# (and we need to check to see that the singletons really are isolates -- not in any set)
def extract_isolates(lexicon):
forms_used = collections.Counter()
def is_in(item, list_of_forms):
for form in item[1]:
if form in list_of_forms:
return True
return False
for set in lexicon.forms:
forms_used.update([supporting_form for supporting_form in set.supporting_forms])
isolates = [item for item in lexicon.statistics.singleton_support if not is_in(item, forms_used)]
duplicates = {}
new_isolates = []
for item in isolates:
x = next(iter(item[1]))
if not x in duplicates:
duplicates[x] = 1
new_isolates.append(item)
return [ProtoForm(lexicon.language, correspondences, supporting_forms, attested_support, mel)
for (correspondences, supporting_forms, attested_support, mel)
in new_isolates]
# given a proto lexicon whose underlying attested forms are drawn
# from lexicons isomorphic to attested_lexicons, destructively replace
# the in-memory Form objects with those in attested_lexicons.
# daughter_lexicons is a hash table mapping language -> Lexicon.
# only works for non-tree lexicons for now.
def replace_underlying_lexicons(proto_lexicon, attested_lexicons):
keyed_forms = {language: lexicon.key_forms_by_glyphs_and_gloss()
for (language, lexicon) in attested_lexicons.items()}
for form in proto_lexicon.forms:
def intern(form_set):
return frozenset((keyed_forms[form.language][(form.glyphs, form.gloss)]
for form in form_set))
form.attested_support = intern(form.attested_support)
form.supporting_forms = intern(form.supporting_forms)
# Given a lexicon of protoforms, return a mapping between cognate sets
# and possible reconstructions.
def collate_proto_lexicon(proto_lexicon):
mapping = collections.defaultdict(list)
for proto_form in proto_lexicon:
mapping[proto_form.supporting_forms].append(proto_form)
return mapping
| en | 0.827956 | # context is a tuple of left and right contexts # daughter forms indexed by language # build a map from tokens to lists of correspondences containing the # token key. # also return all possible token lengths # imperative interface # make a rule view of the form # |Rule|Type|*|Outcome|Context|Language(s) # print(note) # statically compute which correspondences can actually follow from # others based on context # expand out the cover class abbreviations # implements bypassing of suprasegmentals the other way # tokenize an input string and return the set of all parses # which also conform to the syllable canon We generate context and "phonotactic" sensitive parses recursively, making sure to skip over suprasegmental features when matching contexts. # we can abandon parses that we know can't be completed # to satisfy the syllable canon. for DEMO93 this cuts the # number of branches from 182146 to 61631 #filler = '. ' * len(parse) #statistics.add_debug_note(f'{filler}canon cannot match: {len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}') #filler = '. ' * len(parse) #statistics.add_debug_note(f'{filler}{len(parse)}, {form}, *{correspondences_as_proto_form_string(parse)}, {correspondences_as_ids(parse)}, {syllable_parse}') # check whether the last token's right context had a word final # marker or a catch all environment # if the last token was marked as only word final then stop # otherwise keep building parses from epenthesis rules #if Debug.debug: # statistics.add_debug_note(f'reached end of form!') # set of all possible forms for a daughter language given correspondences # return a mapping from daughter language to all possible forms given a proto-form # pun? # big speed difference between [c.proto_form] vs c.proto_form # c.proto_form does lots of slow substring comparisons. [c.proto_form] # parallels the usage of the other daughter form accessors # return a mapping from reconstructions to its supporting forms # we don't want to tokenize the same glyphs more than once, so # memoize each parse # we create cognate sets by comparing meaning. # given a collection of sets, we want to find all maximal sets, # i.e. ones which are not proper subsets of any other set in the # collection. we do this by partitioning the collection of sets by # each set's length to reduce unnecessary comparison # collect all sets with the same support. if one loses, they all lose. # the largest sets are never losers # pick a representative derivation, i.e. choose a reconstruction from # reconstructions with the same supporting forms yielding the same # surface string # batch upstream repeatedly up the action graph tree from leaves, # which are necessarily attested. we filter forms with singleton # supporting sets for the root language # Return a mapping from protolanguage to its associated parameter object # attested_lexicons are passed in for this type of upstream... # TODO: the output order should be the 'preferred order', not just alphabetical. but how? # for form in lexicon.forms: # FIXME: there's a subtle dependency here on the Form.str method. # print(i) # print(f' {j}') # ProtoForm(lexicon.language, correspondences, supporting_forms, attested_support, mel) # TODO: is it useful to simply print these stats. Ever? # TODO: leave in for now, but figure out how to render the diff better.. # print(f'Sets only in lexicon1:') # for form in only_lex1: # print_form(form, 0) # print(f'Sets only in lexicon2:') # for form in only_lex2: # print_form(form, 0) # print('Sets in common:') # for form in common: # print_form(form, 0) # def compare_isomorphic_proto_lexicons(lexicon1, lexicon2, compare_type): # # replace_underlying_lexicons(lexicon1, attested_lexicons) # # replace_underlying_lexicons(lexicon2, attested_lexicons) # return compare_proto_lexicons(lexicon1, lexicon2) # create a fake cognate set with the forms that failed to reconstruct # create "cognate sets" for the isolates # (and we need to check to see that the singletons really are isolates -- not in any set) # given a proto lexicon whose underlying attested forms are drawn # from lexicons isomorphic to attested_lexicons, destructively replace # the in-memory Form objects with those in attested_lexicons. # daughter_lexicons is a hash table mapping language -> Lexicon. # only works for non-tree lexicons for now. # Given a lexicon of protoforms, return a mapping between cognate sets # and possible reconstructions. | 2.596986 | 3 |
generalizedtrees/vis/util.py | sverchkov/generalized_tree_models | 3 | 6612485 | <gh_stars>1-10
# Utility functions used by visualization components
#
# Licensed under the BSD 3-Clause License
# Copyright (c) 2020, <NAME>
def _ensure_native(value):
return getattr(value, 'tolist', lambda: value)() | # Utility functions used by visualization components
#
# Licensed under the BSD 3-Clause License
# Copyright (c) 2020, <NAME>
def _ensure_native(value):
return getattr(value, 'tolist', lambda: value)() | en | 0.806463 | # Utility functions used by visualization components # # Licensed under the BSD 3-Clause License # Copyright (c) 2020, <NAME> | 1.335329 | 1 |
models/image_load.py | Loe-s-Bois/Bot-tom-gear | 1 | 6612486 | import numpy as np
from urllib.request import urlopen
import cv2
import io
import aiohttp
async def url_to_image(url, message):
'''
Obtains the image and convert it into mutatable opencv matrix
'''
resp = await get_url(url, message)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
async def get_url(url, message):
'''
Grabs Image from URL using discord reccomdations
'''
data = None
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return await message.channel.send('Could not download file...')
data = io.BytesIO(await resp.read())
return data
return data
def bts_to_img(bts):
'''
Convert matix of value into byte array
'''
ret, img = cv2.imencode('.png', bts)
return np.array(img).tostring() | import numpy as np
from urllib.request import urlopen
import cv2
import io
import aiohttp
async def url_to_image(url, message):
'''
Obtains the image and convert it into mutatable opencv matrix
'''
resp = await get_url(url, message)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
async def get_url(url, message):
'''
Grabs Image from URL using discord reccomdations
'''
data = None
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status != 200:
return await message.channel.send('Could not download file...')
data = io.BytesIO(await resp.read())
return data
return data
def bts_to_img(bts):
'''
Convert matix of value into byte array
'''
ret, img = cv2.imencode('.png', bts)
return np.array(img).tostring() | en | 0.649943 | Obtains the image and convert it into mutatable opencv matrix # return the image Grabs Image from URL using discord reccomdations Convert matix of value into byte array | 3.306027 | 3 |
dbricks_setup/_cli.py | SindreOsnes/dbricks_setup | 0 | 6612487 | <reponame>SindreOsnes/dbricks_setup
import argparse
import logging
from .cluster import delete_cluster_cli, update_cluster_cli
from .scope import delete_scope_cli, update_scope_cli
def cli():
"""Wrapper around the cli
:return:
"""
logging.basicConfig(level=logging.INFO)
# Top level
parser = argparse.ArgumentParser(description='CLI for helping set up databricks.')
parser.set_defaults(which='base')
subparsers = parser.add_subparsers(help='Sub commands')
# Optional arguments
parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# cluster level commands
cluster_parser = subparsers.add_parser(
'cluster',
help='Cluster commands',
description='Cluster commands'
)
cluster_parser.set_defaults(which='cluster')
cluster_subparsers = cluster_parser.add_subparsers(help='Sub commands')
# Optional arguments
cluster_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# cluster create commands
cluster_update_parser = cluster_subparsers.add_parser(
'update',
help='cluster creation/update commands',
description="Create and/or update configure a cluster, updates include rbac/acls for the cluster"
)
cluster_update_parser.set_defaults(which='cluster_update')
# Optional arguments
cluster_update_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
cluster_update_parser.add_argument('-r', action='store_true', help='Allow cluster to run after creation')
cluster_update_parser.add_argument('-e', action='store_true', help='Force cluster reconfiguration, cluster must be terminated first')
# Required arguments
required_args = cluster_update_parser.add_argument_group('required arguments')
required_args.add_argument('--name', type=str, help='The cluster name', required=True)
# cluster delete commands
cluster_delete_parser = cluster_subparsers.add_parser(
'delete',
help='Cluster deletion commands',
description="Delete clusters and connected items"
)
cluster_delete_parser.set_defaults(which='cluster_delete')
# Optional arguments
cluster_delete_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
cluster_delete_parser.add_argument('-a', action='store_true', help='Delete all resources')
cluster_delete_parser.add_argument('-c', action='store_true', help='Delete control lists')
cluster_delete_parser.add_argument('-d', action='store_true', help='Debug, does not perform the deletes')
cluster_delete_parser.add_argument('-g', action='store_true', help='Delete groups')
cluster_delete_parser.add_argument('-q', action='store_true', help='Quiet')
cluster_delete_parser.add_argument('-s', action='store_true', help='Delete cluster')
# Required arguments
required_args = cluster_delete_parser.add_argument_group('required arguments')
required_args.add_argument('--name', type=str, help='Name override for cluster, case insensitive')
# scope level commands
scope_parser = subparsers.add_parser(
'scope',
help='Secret scope commands',
description='Scope level commands'
)
scope_parser.set_defaults(which='scope')
scope_subparsers = scope_parser.add_subparsers(help='Sub commands')
# Optional arguments
scope_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# scope update commands
scope_update_parser = scope_subparsers.add_parser(
'update',
help='Secret scope configuration update commands',
description="Update/create the configuration for a key vault backed secret scope"
)
scope_update_parser.set_defaults(which='scope_update')
# Optional arguments
scope_update_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
scope_update_parser.add_argument('--scope-name', type=str, help='Name override for the secret scope')
scope_update_parser.add_argument('-f', action='store_true', help='Force deletion of existing secret scope')
# Required arguments
required_args = scope_update_parser.add_argument_group('required arguments')
required_args.add_argument('--key-vault', type=str, help='The the key vault name', required=True)
required_args.add_argument('--resource-id', type=str, help='The the key vault resource id', required=True)
# scope delete commands
scope_delete_parser = scope_subparsers.add_parser(
'delete',
help='Secret scope deletion commands',
description="Delete secret scopes and connected items"
)
scope_delete_parser.set_defaults(which='scope_delete')
# Optional arguments
scope_delete_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
scope_delete_parser.add_argument('-a', action='store_true', help='Delete all resources')
scope_delete_parser.add_argument('-c', action='store_true', help='Delete control lists')
scope_delete_parser.add_argument('-d', action='store_true', help='Debug, does not perform the deletes')
scope_delete_parser.add_argument('-g', action='store_true', help='Delete groups')
scope_delete_parser.add_argument('-q', action='store_true', help='Quiet')
scope_delete_parser.add_argument('-s', action='store_true', help='Delete scope')
# Required arguments
required_args = scope_delete_parser.add_argument_group('required arguments')
required_args.add_argument('--scope-name', type=str, help='Name override for the secret scope')
# Initialize the cli
args = parser.parse_args()
print(args)
if args.which == 'scope_update':
update_scope_cli(args)
elif args.which == 'scope_delete':
delete_scope_cli(args)
elif args.which == 'cluster_update':
update_cluster_cli(args)
elif args.which == 'cluster_delete':
delete_cluster_cli(args)
if __name__ == '__main__':
cli()
| import argparse
import logging
from .cluster import delete_cluster_cli, update_cluster_cli
from .scope import delete_scope_cli, update_scope_cli
def cli():
"""Wrapper around the cli
:return:
"""
logging.basicConfig(level=logging.INFO)
# Top level
parser = argparse.ArgumentParser(description='CLI for helping set up databricks.')
parser.set_defaults(which='base')
subparsers = parser.add_subparsers(help='Sub commands')
# Optional arguments
parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# cluster level commands
cluster_parser = subparsers.add_parser(
'cluster',
help='Cluster commands',
description='Cluster commands'
)
cluster_parser.set_defaults(which='cluster')
cluster_subparsers = cluster_parser.add_subparsers(help='Sub commands')
# Optional arguments
cluster_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# cluster create commands
cluster_update_parser = cluster_subparsers.add_parser(
'update',
help='cluster creation/update commands',
description="Create and/or update configure a cluster, updates include rbac/acls for the cluster"
)
cluster_update_parser.set_defaults(which='cluster_update')
# Optional arguments
cluster_update_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
cluster_update_parser.add_argument('-r', action='store_true', help='Allow cluster to run after creation')
cluster_update_parser.add_argument('-e', action='store_true', help='Force cluster reconfiguration, cluster must be terminated first')
# Required arguments
required_args = cluster_update_parser.add_argument_group('required arguments')
required_args.add_argument('--name', type=str, help='The cluster name', required=True)
# cluster delete commands
cluster_delete_parser = cluster_subparsers.add_parser(
'delete',
help='Cluster deletion commands',
description="Delete clusters and connected items"
)
cluster_delete_parser.set_defaults(which='cluster_delete')
# Optional arguments
cluster_delete_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
cluster_delete_parser.add_argument('-a', action='store_true', help='Delete all resources')
cluster_delete_parser.add_argument('-c', action='store_true', help='Delete control lists')
cluster_delete_parser.add_argument('-d', action='store_true', help='Debug, does not perform the deletes')
cluster_delete_parser.add_argument('-g', action='store_true', help='Delete groups')
cluster_delete_parser.add_argument('-q', action='store_true', help='Quiet')
cluster_delete_parser.add_argument('-s', action='store_true', help='Delete cluster')
# Required arguments
required_args = cluster_delete_parser.add_argument_group('required arguments')
required_args.add_argument('--name', type=str, help='Name override for cluster, case insensitive')
# scope level commands
scope_parser = subparsers.add_parser(
'scope',
help='Secret scope commands',
description='Scope level commands'
)
scope_parser.set_defaults(which='scope')
scope_subparsers = scope_parser.add_subparsers(help='Sub commands')
# Optional arguments
scope_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
# scope update commands
scope_update_parser = scope_subparsers.add_parser(
'update',
help='Secret scope configuration update commands',
description="Update/create the configuration for a key vault backed secret scope"
)
scope_update_parser.set_defaults(which='scope_update')
# Optional arguments
scope_update_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
scope_update_parser.add_argument('--scope-name', type=str, help='Name override for the secret scope')
scope_update_parser.add_argument('-f', action='store_true', help='Force deletion of existing secret scope')
# Required arguments
required_args = scope_update_parser.add_argument_group('required arguments')
required_args.add_argument('--key-vault', type=str, help='The the key vault name', required=True)
required_args.add_argument('--resource-id', type=str, help='The the key vault resource id', required=True)
# scope delete commands
scope_delete_parser = scope_subparsers.add_parser(
'delete',
help='Secret scope deletion commands',
description="Delete secret scopes and connected items"
)
scope_delete_parser.set_defaults(which='scope_delete')
# Optional arguments
scope_delete_parser.add_argument('--profile', type=str, help='The databricks cli profile to use')
scope_delete_parser.add_argument('-a', action='store_true', help='Delete all resources')
scope_delete_parser.add_argument('-c', action='store_true', help='Delete control lists')
scope_delete_parser.add_argument('-d', action='store_true', help='Debug, does not perform the deletes')
scope_delete_parser.add_argument('-g', action='store_true', help='Delete groups')
scope_delete_parser.add_argument('-q', action='store_true', help='Quiet')
scope_delete_parser.add_argument('-s', action='store_true', help='Delete scope')
# Required arguments
required_args = scope_delete_parser.add_argument_group('required arguments')
required_args.add_argument('--scope-name', type=str, help='Name override for the secret scope')
# Initialize the cli
args = parser.parse_args()
print(args)
if args.which == 'scope_update':
update_scope_cli(args)
elif args.which == 'scope_delete':
delete_scope_cli(args)
elif args.which == 'cluster_update':
update_cluster_cli(args)
elif args.which == 'cluster_delete':
delete_cluster_cli(args)
if __name__ == '__main__':
cli() | en | 0.410048 | Wrapper around the cli :return: # Top level # Optional arguments # cluster level commands # Optional arguments # cluster create commands # Optional arguments # Required arguments # cluster delete commands # Optional arguments # Required arguments # scope level commands # Optional arguments # scope update commands # Optional arguments # Required arguments # scope delete commands # Optional arguments # Required arguments # Initialize the cli | 2.421096 | 2 |
test_all_images_have_stickers_8.py | nechaeva-irina/selenium_training | 0 | 6612488 | import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_all_images_have_sticker(driver):
driver.get("http://localhost/litecart/")
products_list = driver.find_elements_by_css_selector(('.product'))
for i in range(0, len(products_list)):
product_sticker = products_list[i].find_elements_by_css_selector("[class^='sticker ']")
if len(product_sticker) == 1:
print('Product %s contains one sticker' % (i + 1))
elif len(product_sticker) < 1:
print('There is not sticker for product %s' % (i + 1))
else:
print('Product %s contains more than one sticker' % (i + 1))
| import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_all_images_have_sticker(driver):
driver.get("http://localhost/litecart/")
products_list = driver.find_elements_by_css_selector(('.product'))
for i in range(0, len(products_list)):
product_sticker = products_list[i].find_elements_by_css_selector("[class^='sticker ']")
if len(product_sticker) == 1:
print('Product %s contains one sticker' % (i + 1))
elif len(product_sticker) < 1:
print('There is not sticker for product %s' % (i + 1))
else:
print('Product %s contains more than one sticker' % (i + 1))
| none | 1 | 2.916466 | 3 | |
main.py | yun0dev/dataExtractor | 2 | 6612489 | #Pandas will help for the output.
import pandas as pd
import numpy as np
#Using requests to get binance API.
import requests
class Extractor:
def getData(self):
def cleanOutput(tickerString):
if tickerString[-4:] == 'USDT':
return [tickerString.split('USDT')[0].lower(), 'usdt']
elif tickerString[-3:] == 'ETH':
return [tickerString.split('ETH')[0].lower(), 'eth']
elif tickerString[-3:] == 'BTC':
return [tickerString.split('BTC')[0].lower(), 'btc']
elif tickerString[-3:] == 'BNB':
return [tickerString.split('BNB')[0].lower(), 'bnb']
return np.nan
#Working with the data.
api = 'https://api.binance.com/api/v1/ticker/24hr'
data = pd.DataFrame(requests.get(api).json())
data['symbol'] = data.apply(lambda x: cleanOutput(x['symbol']), axis=1)
data = data.dropna()
data['base'] = data.apply(lambda x: x['symbol'][0], axis=1)
data['quote'] = data.apply(lambda x: x['symbol'][1], axis=1)
data['quote'] = data['quote'].str.replace('usdt', 'usd')
data = data.rename(index=str, columns={'askPrice': 'ask','bidPrice': 'bid','lastPrice': 'price'})
columns = ['ask', 'bid', 'price', 'volume']
data['exchange'] = 'binance'
data[columns] = data[columns].astype(float)
data['spread'] = data.ask - data.bid
columns.extend(['base', 'quote', 'spread', 'exchange'])
data = data[columns]
data['ticker'] = data.apply(lambda x: x['base'] + '-' + x['quote'], axis=1).tolist()
data = data[['base', 'quote', 'exchange', 'price', 'ask', 'bid', 'spread', 'volume', 'ticker']].set_index('ticker')
return data
dataFrame = Extractor().getData()
print(dataFrame.head())
#using pandas .to_csv() methode to convert dataframe into a csv file.
print(dataFrame.to_csv('data.csv',';'))
| #Pandas will help for the output.
import pandas as pd
import numpy as np
#Using requests to get binance API.
import requests
class Extractor:
def getData(self):
def cleanOutput(tickerString):
if tickerString[-4:] == 'USDT':
return [tickerString.split('USDT')[0].lower(), 'usdt']
elif tickerString[-3:] == 'ETH':
return [tickerString.split('ETH')[0].lower(), 'eth']
elif tickerString[-3:] == 'BTC':
return [tickerString.split('BTC')[0].lower(), 'btc']
elif tickerString[-3:] == 'BNB':
return [tickerString.split('BNB')[0].lower(), 'bnb']
return np.nan
#Working with the data.
api = 'https://api.binance.com/api/v1/ticker/24hr'
data = pd.DataFrame(requests.get(api).json())
data['symbol'] = data.apply(lambda x: cleanOutput(x['symbol']), axis=1)
data = data.dropna()
data['base'] = data.apply(lambda x: x['symbol'][0], axis=1)
data['quote'] = data.apply(lambda x: x['symbol'][1], axis=1)
data['quote'] = data['quote'].str.replace('usdt', 'usd')
data = data.rename(index=str, columns={'askPrice': 'ask','bidPrice': 'bid','lastPrice': 'price'})
columns = ['ask', 'bid', 'price', 'volume']
data['exchange'] = 'binance'
data[columns] = data[columns].astype(float)
data['spread'] = data.ask - data.bid
columns.extend(['base', 'quote', 'spread', 'exchange'])
data = data[columns]
data['ticker'] = data.apply(lambda x: x['base'] + '-' + x['quote'], axis=1).tolist()
data = data[['base', 'quote', 'exchange', 'price', 'ask', 'bid', 'spread', 'volume', 'ticker']].set_index('ticker')
return data
dataFrame = Extractor().getData()
print(dataFrame.head())
#using pandas .to_csv() methode to convert dataframe into a csv file.
print(dataFrame.to_csv('data.csv',';'))
| en | 0.602257 | #Pandas will help for the output. #Using requests to get binance API. #Working with the data. #using pandas .to_csv() methode to convert dataframe into a csv file. | 3.199053 | 3 |
web/articles/migrations/0005_auto_20201228_1311.py | OPI-py/django_blog | 0 | 6612490 | <reponame>OPI-py/django_blog
# Generated by Django 3.1.4 on 2020-12-28 11:11
from django.db import migrations
import froala_editor.fields
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_article_author'),
]
operations = [
migrations.AlterField(
model_name='article',
name='text',
field=froala_editor.fields.FroalaField(),
),
]
| # Generated by Django 3.1.4 on 2020-12-28 11:11
from django.db import migrations
import froala_editor.fields
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_article_author'),
]
operations = [
migrations.AlterField(
model_name='article',
name='text',
field=froala_editor.fields.FroalaField(),
),
] | en | 0.831036 | # Generated by Django 3.1.4 on 2020-12-28 11:11 | 1.426406 | 1 |
Bilibili-Notification/configs/services_config.py | cnscj/Bilibili-Notification | 0 | 6612491 | <gh_stars>0
#!/usr/bin/python
#coding:utf-8
# up主uid列表
#UID_LIST_MEMBER = [672328094,672346917,672353429,672342685,351609538]
UID_LIST_MEMBER = [672328094,672346917,672353429,672342685]
UID_LIST_OFFICIAL = [703007996]
#需要处理的动态类型
HANDLE_DYNAMIC_TYPES = [2,4,6,8,64]
# 扫描间隔秒数,不建议设置太频繁
INTERVALS_SECOND = 60
# 扫描起止时间,24小时制(目前不支持跨日期),例:07:00、23:59
BEGIN_TIME = "07:00"
END_TIME = "23:59"
#[proxy_pool]
# 是否启用 true/false
PROXY_ENABLE = False
# ip池地址,参考 https://github.com/jhao104/proxy_pool
PROXY_POOL_URL = "http://ip:port" | #!/usr/bin/python
#coding:utf-8
# up主uid列表
#UID_LIST_MEMBER = [672328094,672346917,672353429,672342685,351609538]
UID_LIST_MEMBER = [672328094,672346917,672353429,672342685]
UID_LIST_OFFICIAL = [703007996]
#需要处理的动态类型
HANDLE_DYNAMIC_TYPES = [2,4,6,8,64]
# 扫描间隔秒数,不建议设置太频繁
INTERVALS_SECOND = 60
# 扫描起止时间,24小时制(目前不支持跨日期),例:07:00、23:59
BEGIN_TIME = "07:00"
END_TIME = "23:59"
#[proxy_pool]
# 是否启用 true/false
PROXY_ENABLE = False
# ip池地址,参考 https://github.com/jhao104/proxy_pool
PROXY_POOL_URL = "http://ip:port" | zh | 0.652731 | #!/usr/bin/python #coding:utf-8 # up主uid列表 #UID_LIST_MEMBER = [672328094,672346917,672353429,672342685,351609538] #需要处理的动态类型 # 扫描间隔秒数,不建议设置太频繁 # 扫描起止时间,24小时制(目前不支持跨日期),例:07:00、23:59 #[proxy_pool] # 是否启用 true/false # ip池地址,参考 https://github.com/jhao104/proxy_pool | 1.906299 | 2 |
cont/urls.py | ryanprogrammer/port-ryansilva | 1 | 6612492 | <reponame>ryanprogrammer/port-ryansilva
from django import views
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('port/projects', views.ProjectView.as_view(), name='projects'),
path('port/about-me', views.about, name='about'),
path('port/hardskills', views.hardskills, name='hardskills')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | from django import views
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.index, name='index'),
path('port/projects', views.ProjectView.as_view(), name='projects'),
path('port/about-me', views.about, name='about'),
path('port/hardskills', views.hardskills, name='hardskills')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | none | 1 | 1.873128 | 2 | |
settings.py | PJO2/qclocktwo | 1 | 6612493 | # settings for qclock
import board
LUM = 40 # higher values make clock darker
# Led strip
nLEDs=196
LEDSTRIP_PIN=board.D18
# GPIO PIN for luminosity
PIN = 4 # use GPIO4
DARK = 250 # darkest value returned by lum
MEASURES=5 # do it 5 times to have a robust value
# The horloge Matrix
HORLOGE = [
"IL#ESTRUNELDIX" ,
"MINUITDEUXSEPT" ,
"QUATREMIDICING" ,
"HUIT-TROISNEUF" ,
"SIXONZE+HEURES" ,
"TRENTEQUARANTE" ,
"VINGTCINQUANTE" ,
"DIXQUATRESEPTI" ,
"UNE#TROISSEIZE" ,
"SIXEPILE!DEUXF" ,
"ONZELNEUFCHUIT" ,
"UEDOUZEACINQUE" ,
"QUATORZETREIZE" ,
"CQUINZEADEGRES" ,
]
| # settings for qclock
import board
LUM = 40 # higher values make clock darker
# Led strip
nLEDs=196
LEDSTRIP_PIN=board.D18
# GPIO PIN for luminosity
PIN = 4 # use GPIO4
DARK = 250 # darkest value returned by lum
MEASURES=5 # do it 5 times to have a robust value
# The horloge Matrix
HORLOGE = [
"IL#ESTRUNELDIX" ,
"MINUITDEUXSEPT" ,
"QUATREMIDICING" ,
"HUIT-TROISNEUF" ,
"SIXONZE+HEURES" ,
"TRENTEQUARANTE" ,
"VINGTCINQUANTE" ,
"DIXQUATRESEPTI" ,
"UNE#TROISSEIZE" ,
"SIXEPILE!DEUXF" ,
"ONZELNEUFCHUIT" ,
"UEDOUZEACINQUE" ,
"QUATORZETREIZE" ,
"CQUINZEADEGRES" ,
]
| en | 0.618647 | # settings for qclock # higher values make clock darker # Led strip # GPIO PIN for luminosity # use GPIO4 # darkest value returned by lum # do it 5 times to have a robust value # The horloge Matrix #ESTRUNELDIX" , #TROISSEIZE" , | 1.966909 | 2 |
cards/reload.py | MrCoft/EngiMod | 0 | 6612494 | from engi_mod import *
Card(
# DESIGN: fixes the issue of 0-cost cantrips being "free" by detering decks that don't want rounds in them
name = "Reload",
type = "skill",
target = "self",
rarity = "common",
cost = 0,
const = dict(
ROUND_NUM = 1,
MAGIC_NUMBER = "ROUND_NUM",
),
desc = "Shuffle a Round into your draw pile.",
upgrade_desc = "Shuffle a Round into your draw pile. NL Draw a card.",
code = """
AbstractDungeon.actionManager.addToBottom(
new MakeTempCardInDrawPileAction(p, p, new Round(), magicNumber, true, true)
);
if (upgraded)
AbstractDungeon.actionManager.addToBottom(new DrawCardAction(p, 1));
""",
upgrade_code = """
upgradeName();
rawDescription = UPGRADE_DESCRIPTION;
initializeDescription();
"""
)
| from engi_mod import *
Card(
# DESIGN: fixes the issue of 0-cost cantrips being "free" by detering decks that don't want rounds in them
name = "Reload",
type = "skill",
target = "self",
rarity = "common",
cost = 0,
const = dict(
ROUND_NUM = 1,
MAGIC_NUMBER = "ROUND_NUM",
),
desc = "Shuffle a Round into your draw pile.",
upgrade_desc = "Shuffle a Round into your draw pile. NL Draw a card.",
code = """
AbstractDungeon.actionManager.addToBottom(
new MakeTempCardInDrawPileAction(p, p, new Round(), magicNumber, true, true)
);
if (upgraded)
AbstractDungeon.actionManager.addToBottom(new DrawCardAction(p, 1));
""",
upgrade_code = """
upgradeName();
rawDescription = UPGRADE_DESCRIPTION;
initializeDescription();
"""
)
| en | 0.614056 | # DESIGN: fixes the issue of 0-cost cantrips being "free" by detering decks that don't want rounds in them AbstractDungeon.actionManager.addToBottom( new MakeTempCardInDrawPileAction(p, p, new Round(), magicNumber, true, true) ); if (upgraded) AbstractDungeon.actionManager.addToBottom(new DrawCardAction(p, 1)); upgradeName(); rawDescription = UPGRADE_DESCRIPTION; initializeDescription(); | 1.889829 | 2 |
tests/directory_module_test.py | beesperester/python-snapfs | 0 | 6612495 | <filename>tests/directory_module_test.py
import os
import unittest
import tempfile
import json
import random
import string
from pathlib import Path
from typing import List
from snapfs import (
fs,
transform,
stage,
directory,
file,
differences,
)
from snapfs.datatypes import File, Directory
def get_named_tmpfile_path() -> Path:
tmpfile = tempfile.NamedTemporaryFile(mode="wb", delete=False)
# tmpfile.write(file_contents)
tmpfile.close()
return Path(tmpfile.name)
def fill_tmpfile(path: Path) -> None:
with open(path, "w") as f:
f.write(
"".join(random.choice(string.ascii_letters) for x in range(512))
)
class TestTreeModule(unittest.TestCase):
def test_store_as_blob(self):
directory_instance = Directory()
result = ""
expected_result = transform.string_as_hashid(
transform.dict_as_json(
directory.serialize_as_dict(directory_instance)
)
)
with tempfile.TemporaryDirectory() as tmpdirname:
result = directory.store_as_blob(
Path(tmpdirname), directory_instance
)
self.assertEqual(result, expected_result)
def test_load_from_blob(self):
directory_instance = Directory()
result = {}
expected_result = {"directories": {}, "files": {}}
with tempfile.TemporaryDirectory() as tmpdirname:
directory_hashid = directory.store_as_blob(
Path(tmpdirname), directory_instance
)
result = directory.serialize_as_dict(
directory.load_from_blob(Path(tmpdirname), directory_hashid)
)
self.assertDictEqual(result, expected_result)
def test_serialize_as_hashid(self):
directory_instance = Directory()
data = {"directories": {}, "files": {}}
expected_result = transform.dict_as_hashid(data)
result = directory.serialize_as_hashid(directory_instance)
self.assertEqual(result, expected_result)
def test_serialize_as_dict(self):
directories = {"test": Directory()}
directory_instance = Directory(directories)
expected_result = {
"directories": {"test": {"directories": {}, "files": {}}},
"files": {},
}
result = directory.serialize_as_dict(directory_instance)
self.assertDictEqual(result, expected_result)
def test_transform_as_list(self):
file_a_instance = File(Path("file_a"))
file_b_instance = File(Path("foobar/file_b"))
directory_instance = Directory(
{"foobar": Directory({}, {"file_b": file_b_instance})},
{"file_a": file_a_instance},
)
expected_result = [
file.serialize_as_dict(file_b_instance),
file.serialize_as_dict(file_a_instance),
]
result = [
file.serialize_as_dict(x)
for x in directory.transform_as_list(Path(), directory_instance)
]
self.assertListEqual(result, expected_result)
def test_transform_from_list(self):
file_instance = File(Path("test/foobar"))
data = [file_instance]
expected_result = {
"directories": {
"test": {
"directories": {},
"files": {"foobar": file.serialize_as_dict(file_instance)},
}
},
"files": {},
}
result = directory.serialize_as_dict(
directory.transform_from_list(Path(), data)
)
self.assertEqual(result, expected_result)
def test_load_from_directory_path(self):
directory_instance = Directory()
fake_file_path = Path()
with tempfile.TemporaryDirectory() as tmpdirname:
ignore_file_path = Path(tmpdirname).joinpath(".ignore")
# add ignore file
with open(ignore_file_path, "w") as f:
f.write("\n".join(["*", "^*.c4d"]))
# create subdirectory "test"
test_directory_path = Path(tmpdirname).joinpath("test")
os.makedirs(test_directory_path)
# create fake binary file
fake_file_path = test_directory_path.joinpath("foo.c4d")
with open(fake_file_path, "wb") as f:
f.write(b"fake binary data")
directory_instance = directory.load_from_directory_path(
Path(tmpdirname)
)
expected_result = {
"directories": {
"test": {
"directories": {},
"files": {
"foo.c4d": file.serialize_as_dict(File(fake_file_path))
},
}
},
"files": {},
}
result = directory.serialize_as_dict(directory_instance)
self.assertDictEqual(result, expected_result)
def test_compare(self):
file_a_path = get_named_tmpfile_path()
file_b_path = get_named_tmpfile_path()
file_c_path = get_named_tmpfile_path()
fill_tmpfile(file_a_path)
fill_tmpfile(file_b_path)
file_a_instance = File(
file_a_path, True, Path(), transform.string_as_hashid("foo")
)
file_b_instance = File(file_b_path)
file_c_instance = File(file_c_path)
file_a_modified_instance = File(
file_a_path, True, Path(), transform.string_as_hashid("bar")
)
directory_old_instance = Directory(
{
"a": Directory(
{},
{
"file_a.txt": file_a_instance,
"file_c.txt": file_c_instance,
},
)
}
)
directory_new_instance = Directory(
{
"a": Directory({}, {"file_a.txt": file_a_modified_instance}),
"b": Directory({}, {"file_b.txt": file_b_instance}),
}
)
differences_instance = directory.compare(
Path(), directory_old_instance, directory_new_instance
)
expected_result = [
"added: b/file_b.txt",
"updated: a/file_a.txt",
"removed: a/file_c.txt",
]
result = differences.serialize_as_messages(differences_instance)
self.assertListEqual(result, expected_result)
| <filename>tests/directory_module_test.py
import os
import unittest
import tempfile
import json
import random
import string
from pathlib import Path
from typing import List
from snapfs import (
fs,
transform,
stage,
directory,
file,
differences,
)
from snapfs.datatypes import File, Directory
def get_named_tmpfile_path() -> Path:
tmpfile = tempfile.NamedTemporaryFile(mode="wb", delete=False)
# tmpfile.write(file_contents)
tmpfile.close()
return Path(tmpfile.name)
def fill_tmpfile(path: Path) -> None:
with open(path, "w") as f:
f.write(
"".join(random.choice(string.ascii_letters) for x in range(512))
)
class TestTreeModule(unittest.TestCase):
def test_store_as_blob(self):
directory_instance = Directory()
result = ""
expected_result = transform.string_as_hashid(
transform.dict_as_json(
directory.serialize_as_dict(directory_instance)
)
)
with tempfile.TemporaryDirectory() as tmpdirname:
result = directory.store_as_blob(
Path(tmpdirname), directory_instance
)
self.assertEqual(result, expected_result)
def test_load_from_blob(self):
directory_instance = Directory()
result = {}
expected_result = {"directories": {}, "files": {}}
with tempfile.TemporaryDirectory() as tmpdirname:
directory_hashid = directory.store_as_blob(
Path(tmpdirname), directory_instance
)
result = directory.serialize_as_dict(
directory.load_from_blob(Path(tmpdirname), directory_hashid)
)
self.assertDictEqual(result, expected_result)
def test_serialize_as_hashid(self):
directory_instance = Directory()
data = {"directories": {}, "files": {}}
expected_result = transform.dict_as_hashid(data)
result = directory.serialize_as_hashid(directory_instance)
self.assertEqual(result, expected_result)
def test_serialize_as_dict(self):
directories = {"test": Directory()}
directory_instance = Directory(directories)
expected_result = {
"directories": {"test": {"directories": {}, "files": {}}},
"files": {},
}
result = directory.serialize_as_dict(directory_instance)
self.assertDictEqual(result, expected_result)
def test_transform_as_list(self):
file_a_instance = File(Path("file_a"))
file_b_instance = File(Path("foobar/file_b"))
directory_instance = Directory(
{"foobar": Directory({}, {"file_b": file_b_instance})},
{"file_a": file_a_instance},
)
expected_result = [
file.serialize_as_dict(file_b_instance),
file.serialize_as_dict(file_a_instance),
]
result = [
file.serialize_as_dict(x)
for x in directory.transform_as_list(Path(), directory_instance)
]
self.assertListEqual(result, expected_result)
def test_transform_from_list(self):
file_instance = File(Path("test/foobar"))
data = [file_instance]
expected_result = {
"directories": {
"test": {
"directories": {},
"files": {"foobar": file.serialize_as_dict(file_instance)},
}
},
"files": {},
}
result = directory.serialize_as_dict(
directory.transform_from_list(Path(), data)
)
self.assertEqual(result, expected_result)
def test_load_from_directory_path(self):
directory_instance = Directory()
fake_file_path = Path()
with tempfile.TemporaryDirectory() as tmpdirname:
ignore_file_path = Path(tmpdirname).joinpath(".ignore")
# add ignore file
with open(ignore_file_path, "w") as f:
f.write("\n".join(["*", "^*.c4d"]))
# create subdirectory "test"
test_directory_path = Path(tmpdirname).joinpath("test")
os.makedirs(test_directory_path)
# create fake binary file
fake_file_path = test_directory_path.joinpath("foo.c4d")
with open(fake_file_path, "wb") as f:
f.write(b"fake binary data")
directory_instance = directory.load_from_directory_path(
Path(tmpdirname)
)
expected_result = {
"directories": {
"test": {
"directories": {},
"files": {
"foo.c4d": file.serialize_as_dict(File(fake_file_path))
},
}
},
"files": {},
}
result = directory.serialize_as_dict(directory_instance)
self.assertDictEqual(result, expected_result)
def test_compare(self):
file_a_path = get_named_tmpfile_path()
file_b_path = get_named_tmpfile_path()
file_c_path = get_named_tmpfile_path()
fill_tmpfile(file_a_path)
fill_tmpfile(file_b_path)
file_a_instance = File(
file_a_path, True, Path(), transform.string_as_hashid("foo")
)
file_b_instance = File(file_b_path)
file_c_instance = File(file_c_path)
file_a_modified_instance = File(
file_a_path, True, Path(), transform.string_as_hashid("bar")
)
directory_old_instance = Directory(
{
"a": Directory(
{},
{
"file_a.txt": file_a_instance,
"file_c.txt": file_c_instance,
},
)
}
)
directory_new_instance = Directory(
{
"a": Directory({}, {"file_a.txt": file_a_modified_instance}),
"b": Directory({}, {"file_b.txt": file_b_instance}),
}
)
differences_instance = directory.compare(
Path(), directory_old_instance, directory_new_instance
)
expected_result = [
"added: b/file_b.txt",
"updated: a/file_a.txt",
"removed: a/file_c.txt",
]
result = differences.serialize_as_messages(differences_instance)
self.assertListEqual(result, expected_result)
| en | 0.451087 | # tmpfile.write(file_contents) # add ignore file # create subdirectory "test" # create fake binary file | 2.606124 | 3 |
wsgi.py | changyubiao/fisher-demo | 0 | 6612496 | <reponame>changyubiao/fisher-demo
# -*- coding: utf-8 -*-
"""
@Time : 2020/5/17 09:19
@File : wsgi.py
@Author : <EMAIL>
"""
from app import app
| # -*- coding: utf-8 -*-
"""
@Time : 2020/5/17 09:19
@File : wsgi.py
@Author : <EMAIL>
"""
from app import app | fr | 0.356475 | # -*- coding: utf-8 -*- @Time : 2020/5/17 09:19 @File : wsgi.py @Author : <EMAIL> | 1.016975 | 1 |
jackhammer/scheduler.py | nicocoffo/jackhammer | 0 | 6612497 | <reponame>nicocoffo/jackhammer
# Standard Python libraries
from threading import Thread, Event
from time import sleep
from logging import getLogger
import traceback
# Package libraries
from jackhammer.worker import Worker
from jackhammer.job import Job, JobState
from jackhammer.queue import JobQueue
logger = getLogger("jackhammer")
class Scheduler(Thread):
"""
Scheduler to launch and manage workers.
Uses very basic scheduling behaviour with limited understanding
of dependencies. Complicated job structures may bring it to
failure.
TODO: Consider moving job cleanup to worker thread callback.
"""
def __init__(self, create_provider, config):
Thread.__init__(self)
self.name = "jackhammer.scheduler"
self.exception = None
# Threading
self.shutdownFlag = Event()
self.pending = JobQueue()
self.ready = JobQueue()
self.completed = JobQueue()
# State
self.workers = []
self.create_provider = create_provider
self.config = config
self.failureRate = 0
def add_job(self, job, quiet=False):
"""
Add a new job to the pending queue.
The job must have a corresponding pending state.
"""
if not quiet:
logger.debug("Adding job: %s", job)
assert job.state == JobState.Pending, "Attempt to add non-pending job"
self.pending.enqueue(job)
def shutdown(self):
"""
Stop the scheduler, cancelling any ongoing jobs.
"""
logger.debug("Early shutdown: %s", self)
self.shutdownFlag.set()
# Internal Functions
def run(self):
"""
Thread entry point.
Run the job scheduler, which will spin up machines
and ensure they are cleaned up.
In the event of failure, all remaining workers are given
some time to clean up, with a forced clean up through the
machine provider.
"""
logger.info("Scheduler Launch: %s", self)
try:
self.scheduler_loop()
except Exception as e:
logger.error("Scheduler Failure: %s", str(e))
logger.error(traceback.format_exc())
self.exception = e
logger.info("Scheduler Shutdown: %s", self)
self.shutdownFlag.set()
for worker in self.workers:
try:
self.worker_cleanup(worker, timeout=self.config['joinTimeout'])
except Exception as e:
logger.error("Worker Cleanup Failure: %s", str(e))
logger.error(traceback.format_exc())
self.create_provider().cleanup_machines()
def scheduler_loop(self):
"""
The scheduler loop, which manages job and worker life
cycles.
"""
while not self.shutdownFlag.is_set():
# Check for completed jobs, adding new jobs as pending
for job in self.completed.iter():
self.job_cleanup(job)
# Promote pending jobs to ready
pending = []
for job in self.pending.iter():
pending.extend(self.job_prepare(job))
for job in pending:
self.add_job(job, True)
# Check for any finished workers
for worker in self.workers:
if not worker.is_alive():
self.worker_cleanup(worker)
# Rate limiting
sleep(self.config['loopDelay'])
# Launch a new worker if not at limit
while len(self.workers) < self.config['maxWorkers']:
job = self.ready.dequeue()
if not job:
break
self.worker_launch(job)
# Job Functions
def job_prepare(self, job):
"""
Prepare the job for launch. Calls the prepare function to
determine an initial job state and acts accordingly.
Returns a list of jobs to place on the pending queue.
"""
job.prepare()
if job.state == JobState.Pending:
return [job]
elif job.state == JobState.Ready:
self.ready.enqueue(job)
else:
self.completed.enqueue(job)
return []
def job_cleanup(self, job):
"""
Handle job termination. Ensures the job ended with a valid state.
"""
if job.exception:
logger.error("Job %s failed, raising exception", job)
raise job.exception
for j in job.cleanup():
self.add_job(j)
# Worker Functions
def worker_launch(self, job):
"""
Launch a worker, with all necessary callbacks.
"""
w = Worker(job, self.worker_cycle_job,
self.create_provider(), self.shutdownFlag)
self.workers.append(w)
logger.info("Giving %s: %s", w, job)
w.start()
def worker_cleanup(self, worker, timeout=None):
"""
Handle worker termination, checking for infrastructure failures
and shutting down the scheduler if sufficiently troublesome.
"""
worker.join(timeout=timeout)
if worker.is_alive():
raise Exception("Failed to join worker: %s" % worker)
self.workers.remove(worker)
if worker.exception:
logger.error("%s failed, raising exception", worker)
raise worker.exception
if worker.duration < self.config['minWorkerDuration']:
logger.warning("%s had a short duration: %f", worker, worker.dur)
self.failureRate += 1
if worker.job:
logger.warning("%s ended without releasing job", worker)
if worker.job.state == JobState.Ready:
worker.job.reset()
self.pending.enqueue(worker.job)
else:
self.completed.enqueue(worker.job)
def worker_cycle_job(self, worker, job):
"""
Callback for a worker to cycle a job.
"""
logger.info("%s returned: %s", worker, job)
self.completed.enqueue(job)
if self.shutdownFlag.is_set():
job = None
else:
job = self.ready.dequeue(timeout=self.config['readyTimeout'])
logger.info("Giving %s: %s", worker, job)
return job
def __repr__(self):
return self.name
| # Standard Python libraries
from threading import Thread, Event
from time import sleep
from logging import getLogger
import traceback
# Package libraries
from jackhammer.worker import Worker
from jackhammer.job import Job, JobState
from jackhammer.queue import JobQueue
logger = getLogger("jackhammer")
class Scheduler(Thread):
"""
Scheduler to launch and manage workers.
Uses very basic scheduling behaviour with limited understanding
of dependencies. Complicated job structures may bring it to
failure.
TODO: Consider moving job cleanup to worker thread callback.
"""
def __init__(self, create_provider, config):
Thread.__init__(self)
self.name = "jackhammer.scheduler"
self.exception = None
# Threading
self.shutdownFlag = Event()
self.pending = JobQueue()
self.ready = JobQueue()
self.completed = JobQueue()
# State
self.workers = []
self.create_provider = create_provider
self.config = config
self.failureRate = 0
def add_job(self, job, quiet=False):
"""
Add a new job to the pending queue.
The job must have a corresponding pending state.
"""
if not quiet:
logger.debug("Adding job: %s", job)
assert job.state == JobState.Pending, "Attempt to add non-pending job"
self.pending.enqueue(job)
def shutdown(self):
"""
Stop the scheduler, cancelling any ongoing jobs.
"""
logger.debug("Early shutdown: %s", self)
self.shutdownFlag.set()
# Internal Functions
def run(self):
"""
Thread entry point.
Run the job scheduler, which will spin up machines
and ensure they are cleaned up.
In the event of failure, all remaining workers are given
some time to clean up, with a forced clean up through the
machine provider.
"""
logger.info("Scheduler Launch: %s", self)
try:
self.scheduler_loop()
except Exception as e:
logger.error("Scheduler Failure: %s", str(e))
logger.error(traceback.format_exc())
self.exception = e
logger.info("Scheduler Shutdown: %s", self)
self.shutdownFlag.set()
for worker in self.workers:
try:
self.worker_cleanup(worker, timeout=self.config['joinTimeout'])
except Exception as e:
logger.error("Worker Cleanup Failure: %s", str(e))
logger.error(traceback.format_exc())
self.create_provider().cleanup_machines()
def scheduler_loop(self):
"""
The scheduler loop, which manages job and worker life
cycles.
"""
while not self.shutdownFlag.is_set():
# Check for completed jobs, adding new jobs as pending
for job in self.completed.iter():
self.job_cleanup(job)
# Promote pending jobs to ready
pending = []
for job in self.pending.iter():
pending.extend(self.job_prepare(job))
for job in pending:
self.add_job(job, True)
# Check for any finished workers
for worker in self.workers:
if not worker.is_alive():
self.worker_cleanup(worker)
# Rate limiting
sleep(self.config['loopDelay'])
# Launch a new worker if not at limit
while len(self.workers) < self.config['maxWorkers']:
job = self.ready.dequeue()
if not job:
break
self.worker_launch(job)
# Job Functions
def job_prepare(self, job):
"""
Prepare the job for launch. Calls the prepare function to
determine an initial job state and acts accordingly.
Returns a list of jobs to place on the pending queue.
"""
job.prepare()
if job.state == JobState.Pending:
return [job]
elif job.state == JobState.Ready:
self.ready.enqueue(job)
else:
self.completed.enqueue(job)
return []
def job_cleanup(self, job):
"""
Handle job termination. Ensures the job ended with a valid state.
"""
if job.exception:
logger.error("Job %s failed, raising exception", job)
raise job.exception
for j in job.cleanup():
self.add_job(j)
# Worker Functions
def worker_launch(self, job):
"""
Launch a worker, with all necessary callbacks.
"""
w = Worker(job, self.worker_cycle_job,
self.create_provider(), self.shutdownFlag)
self.workers.append(w)
logger.info("Giving %s: %s", w, job)
w.start()
def worker_cleanup(self, worker, timeout=None):
"""
Handle worker termination, checking for infrastructure failures
and shutting down the scheduler if sufficiently troublesome.
"""
worker.join(timeout=timeout)
if worker.is_alive():
raise Exception("Failed to join worker: %s" % worker)
self.workers.remove(worker)
if worker.exception:
logger.error("%s failed, raising exception", worker)
raise worker.exception
if worker.duration < self.config['minWorkerDuration']:
logger.warning("%s had a short duration: %f", worker, worker.dur)
self.failureRate += 1
if worker.job:
logger.warning("%s ended without releasing job", worker)
if worker.job.state == JobState.Ready:
worker.job.reset()
self.pending.enqueue(worker.job)
else:
self.completed.enqueue(worker.job)
def worker_cycle_job(self, worker, job):
"""
Callback for a worker to cycle a job.
"""
logger.info("%s returned: %s", worker, job)
self.completed.enqueue(job)
if self.shutdownFlag.is_set():
job = None
else:
job = self.ready.dequeue(timeout=self.config['readyTimeout'])
logger.info("Giving %s: %s", worker, job)
return job
def __repr__(self):
return self.name | en | 0.908507 | # Standard Python libraries # Package libraries Scheduler to launch and manage workers. Uses very basic scheduling behaviour with limited understanding of dependencies. Complicated job structures may bring it to failure. TODO: Consider moving job cleanup to worker thread callback. # Threading # State Add a new job to the pending queue. The job must have a corresponding pending state. Stop the scheduler, cancelling any ongoing jobs. # Internal Functions Thread entry point. Run the job scheduler, which will spin up machines and ensure they are cleaned up. In the event of failure, all remaining workers are given some time to clean up, with a forced clean up through the machine provider. The scheduler loop, which manages job and worker life cycles. # Check for completed jobs, adding new jobs as pending # Promote pending jobs to ready # Check for any finished workers # Rate limiting # Launch a new worker if not at limit # Job Functions Prepare the job for launch. Calls the prepare function to determine an initial job state and acts accordingly. Returns a list of jobs to place on the pending queue. Handle job termination. Ensures the job ended with a valid state. # Worker Functions Launch a worker, with all necessary callbacks. Handle worker termination, checking for infrastructure failures and shutting down the scheduler if sufficiently troublesome. Callback for a worker to cycle a job. | 3.092266 | 3 |
train/multi_sh.py | mepear/flow | 1 | 6612498 | import multiprocessing
import os
def sh_script(idx):
os.system('bash train/plot_correlation.sh {}'.format(idx))
pool = multiprocessing.Pool(10)
for ckpt in range(100):
pool.apply_async(sh_script, (ckpt,))
pool.close()
pool.join() | import multiprocessing
import os
def sh_script(idx):
os.system('bash train/plot_correlation.sh {}'.format(idx))
pool = multiprocessing.Pool(10)
for ckpt in range(100):
pool.apply_async(sh_script, (ckpt,))
pool.close()
pool.join() | none | 1 | 2.3812 | 2 | |
src/py3helpers/scripts/methyl_bed_kmer_analysis.py | adbailey4/py3helpers | 0 | 6612499 | #!/usr/bin/env python
"""Generate kmer counts from a methyl bed file given a reference"""
########################################################################
# File: methyl_bed_kmer_analysis.py
# executable: methyl_bed_kmer_analysis.py
#
# Author: <NAME>
# History: 06/02/19 Created
########################################################################
import os
import pickle
import sys
from argparse import ArgumentParser
from collections import Counter
from py3helpers.seq_tools import (ReferenceHandler, ReverseComplement,
count_all_sequence_kmers)
from py3helpers.utils import time_it
def comma_separated_list(s):
tokens = s.strip().split(",")
return tokens
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--methyl_bed', '-m', action='store',
dest='methyl_bed', required=True,
type=str, default=None,
help="path methyl bed file")
parser.add_argument('--reference', '-r', action='store',
dest='reference', required=True,
type=str, default=None,
help="path to reference sequence")
parser.add_argument('--output', '-o', action='store',
dest='output', required=True,
type=str, default=None,
help="path to output directory")
parser.add_argument('--check_base', '-c', action='store',
dest='check_base', required=False,
type=str, default=None,
help="If argument is passed in, will confirm that all bases in bed file match in reference")
parser.add_argument('--kmer_length', '-k', action='store',
dest='kmer_length', required=False,
type=int, default=5,
help="Set kmer length. Default: 5")
parser.add_argument('--filter_by_coverage', action='store',
dest='filter_by_coverage', required=False,
type=comma_separated_list, default=None,
help="Pass in a min and max value for coverage")
parser.add_argument('--filter_by_percentage', action='store',
dest='filter_by_percentage', required=False,
type=comma_separated_list, default=None,
help="A low max and high min value for methylation percentage")
args = parser.parse_args()
return args
def parse_methyl_bed(path_to_bed):
"""Parse a 9+2 methyl bed file and yield each field
:param path_to_bed: path to
"""
with open(path_to_bed, 'r') as fh:
for line in fh:
chromosome, start1, stop1, name, coverage1, strand, start2, stop2, color, coverage2, percentage \
= line.split()
yield (chromosome, int(start1), int(stop1), name, int(coverage1), strand, int(start2), int(stop2),
color, int(coverage2), int(percentage))
class FilterBed(object):
"""Easy class to allow for filtering out bed rows by specific parameters"""
def __init__(self):
self.coverage_filter = False
self.coverage_params = None
self.percentage_filter = False
self.percentage_params = None
self.strand_filter = False
self.strand_params = None
self.chromosome_filter = False
self.chromosome_params = None
self.position_filter = False
self.position_params = None
self.filters = []
@staticmethod
def return_true(*args):
"""_return_true function to keep easy flow through get_kmer_counts_from_reference_given_bed
:param args: takes in an assortemnt of argumetns and returns true
:return:
"""
return True
def set_filter_by_coverage(self, *args):
"""Set filter by coverage parameters"""
self.coverage_filter = True
self.coverage_params = [*args]
def filter_by_coverage_min_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.coverage_params[0] <= coverage <= self.coverage_params[1]:
return True
return False
def filter_by_coverage_min_min_max_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.coverage_params[0] >= coverage or coverage >= self.coverage_params[1]:
return True
return False
def set_filter_by_percentage(self, *args):
"""Set filter by coverage parameters"""
self.percentage_filter = True
self.percentage_params = [*args]
def filter_by_percentage_min_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.percentage_params[0] <= percentage <= self.percentage_params[1]:
return True
return False
def filter_by_percentage_min_min_max_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.percentage_params[0] >= percentage or percentage >= self.percentage_params[1]:
return True
return False
def chain_logic(self, *args):
for x in args:
self.filters.append(x)
def function(self, chromosome, start, stop, strand, coverage, percentage):
start1 = True
for filter1 in self.filters:
start1 = start1 and filter1(chromosome, start, stop, strand, coverage, percentage)
return start1
def get_kmer_counts_from_reference_given_bed(reference, bed_file, k=5, param_filter=FilterBed.return_true,
check_base=None):
"""Generate kmer counts covering positions in a bed file"""
ref_handler = ReferenceHandler(reference)
kmers = Counter()
counter = 0
for chromosome, start, stop, _, _, strand, _, _, _, coverage, percentage in parse_methyl_bed(bed_file):
if param_filter(chromosome, start, stop, strand, coverage, percentage):
block_start = max(0, start - (k - 1))
block_end = min(ref_handler.get_chr_sequence_length(chromosome), stop + (k - 1))
seq = ref_handler.get_sequence(chromosome,
block_start,
block_end)
# Check if base in bed file matches the reference sequence
if check_base is not None:
base = ref_handler.get_sequence(chromosome, start, stop)
if strand == "-":
this_base = ReverseComplement().complement(check_base)
else:
this_base = check_base
assert this_base == base, \
"Check base is not the same as the one from the reference. " \
"{} != {}. {}".format(this_base, base, [chromosome, start, stop, strand, coverage, percentage])
kmers += count_all_sequence_kmers(seq, k=k, rev_comp_only=(strand == "-"))
# Print some updates because this takes a long time
counter += 1
if counter % 10000 == 0:
print(".", end="")
sys.stdout.flush()
if counter % 1000000 == 0:
print(counter)
return kmers
def main():
args = parse_args()
filter_bed = FilterBed()
filters = []
if args.filter_by_percentage is not None:
filter_bed.set_filter_by_percentage(*[float(x) for x in args.filter_by_percentage])
filters.append(filter_bed.filter_by_percentage_min_min_max_max)
if args.filter_by_coverage is not None:
filter_bed.set_filter_by_coverage(*[float(x) for x in args.filter_by_coverage])
filters.append(filter_bed.filter_by_coverage_min_max)
filter_bed.chain_logic(*filters)
kmers = get_kmer_counts_from_reference_given_bed(args.reference, args.methyl_bed,
k=args.kmer_length,
param_filter=filter_bed.function,
check_base=args.check_base)
print(kmers)
with open(os.path.join(args.output, "kmer_counts.pkl"), 'wb') as fh:
pickle.dump(kmers, fh)
if __name__ == '__main__':
_, time = time_it(main)
print(time, "seconds")
| #!/usr/bin/env python
"""Generate kmer counts from a methyl bed file given a reference"""
########################################################################
# File: methyl_bed_kmer_analysis.py
# executable: methyl_bed_kmer_analysis.py
#
# Author: <NAME>
# History: 06/02/19 Created
########################################################################
import os
import pickle
import sys
from argparse import ArgumentParser
from collections import Counter
from py3helpers.seq_tools import (ReferenceHandler, ReverseComplement,
count_all_sequence_kmers)
from py3helpers.utils import time_it
def comma_separated_list(s):
tokens = s.strip().split(",")
return tokens
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--methyl_bed', '-m', action='store',
dest='methyl_bed', required=True,
type=str, default=None,
help="path methyl bed file")
parser.add_argument('--reference', '-r', action='store',
dest='reference', required=True,
type=str, default=None,
help="path to reference sequence")
parser.add_argument('--output', '-o', action='store',
dest='output', required=True,
type=str, default=None,
help="path to output directory")
parser.add_argument('--check_base', '-c', action='store',
dest='check_base', required=False,
type=str, default=None,
help="If argument is passed in, will confirm that all bases in bed file match in reference")
parser.add_argument('--kmer_length', '-k', action='store',
dest='kmer_length', required=False,
type=int, default=5,
help="Set kmer length. Default: 5")
parser.add_argument('--filter_by_coverage', action='store',
dest='filter_by_coverage', required=False,
type=comma_separated_list, default=None,
help="Pass in a min and max value for coverage")
parser.add_argument('--filter_by_percentage', action='store',
dest='filter_by_percentage', required=False,
type=comma_separated_list, default=None,
help="A low max and high min value for methylation percentage")
args = parser.parse_args()
return args
def parse_methyl_bed(path_to_bed):
"""Parse a 9+2 methyl bed file and yield each field
:param path_to_bed: path to
"""
with open(path_to_bed, 'r') as fh:
for line in fh:
chromosome, start1, stop1, name, coverage1, strand, start2, stop2, color, coverage2, percentage \
= line.split()
yield (chromosome, int(start1), int(stop1), name, int(coverage1), strand, int(start2), int(stop2),
color, int(coverage2), int(percentage))
class FilterBed(object):
"""Easy class to allow for filtering out bed rows by specific parameters"""
def __init__(self):
self.coverage_filter = False
self.coverage_params = None
self.percentage_filter = False
self.percentage_params = None
self.strand_filter = False
self.strand_params = None
self.chromosome_filter = False
self.chromosome_params = None
self.position_filter = False
self.position_params = None
self.filters = []
@staticmethod
def return_true(*args):
"""_return_true function to keep easy flow through get_kmer_counts_from_reference_given_bed
:param args: takes in an assortemnt of argumetns and returns true
:return:
"""
return True
def set_filter_by_coverage(self, *args):
"""Set filter by coverage parameters"""
self.coverage_filter = True
self.coverage_params = [*args]
def filter_by_coverage_min_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.coverage_params[0] <= coverage <= self.coverage_params[1]:
return True
return False
def filter_by_coverage_min_min_max_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.coverage_params[0] >= coverage or coverage >= self.coverage_params[1]:
return True
return False
def set_filter_by_percentage(self, *args):
"""Set filter by coverage parameters"""
self.percentage_filter = True
self.percentage_params = [*args]
def filter_by_percentage_min_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.percentage_params[0] <= percentage <= self.percentage_params[1]:
return True
return False
def filter_by_percentage_min_min_max_max(self, chromosome, start, stop, strand, coverage, percentage):
"""Return true if coverage is between a min and max"""
if self.percentage_params[0] >= percentage or percentage >= self.percentage_params[1]:
return True
return False
def chain_logic(self, *args):
for x in args:
self.filters.append(x)
def function(self, chromosome, start, stop, strand, coverage, percentage):
start1 = True
for filter1 in self.filters:
start1 = start1 and filter1(chromosome, start, stop, strand, coverage, percentage)
return start1
def get_kmer_counts_from_reference_given_bed(reference, bed_file, k=5, param_filter=FilterBed.return_true,
check_base=None):
"""Generate kmer counts covering positions in a bed file"""
ref_handler = ReferenceHandler(reference)
kmers = Counter()
counter = 0
for chromosome, start, stop, _, _, strand, _, _, _, coverage, percentage in parse_methyl_bed(bed_file):
if param_filter(chromosome, start, stop, strand, coverage, percentage):
block_start = max(0, start - (k - 1))
block_end = min(ref_handler.get_chr_sequence_length(chromosome), stop + (k - 1))
seq = ref_handler.get_sequence(chromosome,
block_start,
block_end)
# Check if base in bed file matches the reference sequence
if check_base is not None:
base = ref_handler.get_sequence(chromosome, start, stop)
if strand == "-":
this_base = ReverseComplement().complement(check_base)
else:
this_base = check_base
assert this_base == base, \
"Check base is not the same as the one from the reference. " \
"{} != {}. {}".format(this_base, base, [chromosome, start, stop, strand, coverage, percentage])
kmers += count_all_sequence_kmers(seq, k=k, rev_comp_only=(strand == "-"))
# Print some updates because this takes a long time
counter += 1
if counter % 10000 == 0:
print(".", end="")
sys.stdout.flush()
if counter % 1000000 == 0:
print(counter)
return kmers
def main():
args = parse_args()
filter_bed = FilterBed()
filters = []
if args.filter_by_percentage is not None:
filter_bed.set_filter_by_percentage(*[float(x) for x in args.filter_by_percentage])
filters.append(filter_bed.filter_by_percentage_min_min_max_max)
if args.filter_by_coverage is not None:
filter_bed.set_filter_by_coverage(*[float(x) for x in args.filter_by_coverage])
filters.append(filter_bed.filter_by_coverage_min_max)
filter_bed.chain_logic(*filters)
kmers = get_kmer_counts_from_reference_given_bed(args.reference, args.methyl_bed,
k=args.kmer_length,
param_filter=filter_bed.function,
check_base=args.check_base)
print(kmers)
with open(os.path.join(args.output, "kmer_counts.pkl"), 'wb') as fh:
pickle.dump(kmers, fh)
if __name__ == '__main__':
_, time = time_it(main)
print(time, "seconds")
| en | 0.64307 | #!/usr/bin/env python Generate kmer counts from a methyl bed file given a reference ######################################################################## # File: methyl_bed_kmer_analysis.py # executable: methyl_bed_kmer_analysis.py # # Author: <NAME> # History: 06/02/19 Created ######################################################################## # required arguments Parse a 9+2 methyl bed file and yield each field :param path_to_bed: path to Easy class to allow for filtering out bed rows by specific parameters _return_true function to keep easy flow through get_kmer_counts_from_reference_given_bed :param args: takes in an assortemnt of argumetns and returns true :return: Set filter by coverage parameters Return true if coverage is between a min and max Return true if coverage is between a min and max Set filter by coverage parameters Return true if coverage is between a min and max Return true if coverage is between a min and max Generate kmer counts covering positions in a bed file # Check if base in bed file matches the reference sequence # Print some updates because this takes a long time | 2.739591 | 3 |
graveyard/replace_year.py | fundevogel/scribus-toolbox | 1 | 6612500 | <reponame>fundevogel/scribus-toolbox
#! /usr/bin/python
# ~*~ coding=utf-8 ~*~
##
# Replaces all instances in a file with the current year
#
# Usage:
# python document.sla --pattern %%YEAR%%
#
# License: MIT
# (c) <NAME>
##
import os
import re
import sys
import argparse
import datetime
import fileinput
parser = argparse.ArgumentParser(
description="Replaces all instances in a file with the current year"
)
parser.add_argument(
"files", nargs="*", default=None, help="File(s) that should be processed",
)
parser.add_argument(
"--pattern", default=None,
help="Pattern to be replaced"
)
def replace(file_name, pattern):
path = os.path.abspath(file_name)
file = fileinput.input(path, inplace=True)
now = datetime.datetime.now()
for line in file:
line = re.sub(pattern, str(now.year), line)
sys.stdout.write(line)
file.close()
if __name__ == "__main__":
args = parser.parse_args()
if args.pattern is None:
print('No pattern specified, exiting ..')
sys.exit()
for file in args.files:
replace(file, args.pattern)
| #! /usr/bin/python
# ~*~ coding=utf-8 ~*~
##
# Replaces all instances in a file with the current year
#
# Usage:
# python document.sla --pattern %%YEAR%%
#
# License: MIT
# (c) <NAME>
##
import os
import re
import sys
import argparse
import datetime
import fileinput
parser = argparse.ArgumentParser(
description="Replaces all instances in a file with the current year"
)
parser.add_argument(
"files", nargs="*", default=None, help="File(s) that should be processed",
)
parser.add_argument(
"--pattern", default=None,
help="Pattern to be replaced"
)
def replace(file_name, pattern):
path = os.path.abspath(file_name)
file = fileinput.input(path, inplace=True)
now = datetime.datetime.now()
for line in file:
line = re.sub(pattern, str(now.year), line)
sys.stdout.write(line)
file.close()
if __name__ == "__main__":
args = parser.parse_args()
if args.pattern is None:
print('No pattern specified, exiting ..')
sys.exit()
for file in args.files:
replace(file, args.pattern) | en | 0.452259 | #! /usr/bin/python # ~*~ coding=utf-8 ~*~ ## # Replaces all instances in a file with the current year # # Usage: # python document.sla --pattern %%YEAR%% # # License: MIT # (c) <NAME> ## | 3.823957 | 4 |
lightly/loss/my_ntx_ent_loss.py | tibe97/thesis-self-supervised-learning | 0 | 6612501 | """ Contrastive Loss Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from numpy import add
import torch
from torch import nn
from lightly.loss.memory_bank import MemoryBankModule
from lightly.models.modules.my_nn_memory_bank import MyNNMemoryBankModule
import ipdb
class MyNTXentLoss(MemoryBankModule):
"""Implementation of the Contrastive Cross Entropy Loss.
This implementation follows the SimCLR[0] paper. If you enable the memory
bank by setting the `memory_bank_size` value > 0 the loss behaves like
the one described in the MoCo[1] paper.
[0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
[1] MoCo, 2020, https://arxiv.org/abs/1911.05722
Attributes:
temperature:
Scale logits by the inverse of the temperature.
memory_bank_size:
Number of negative samples to store in the memory bank.
Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.
Raises:
ValueError if abs(temperature) < 1e-8 to prevent divide by zero.
Examples:
>>> # initialize loss function without memory bank
>>> loss_fn = NTXentLoss(memory_bank_size=0)
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimCLR or MoCo model
>>> batch = torch.cat((t0, t1), dim=0)
>>> output = model(batch)
>>>
>>> # calculate loss
>>> loss = loss_fn(output)
"""
def __init__(self,
temperature: float = 0.2,
num_negatives: int = 256,
memory_bank_size: int = 0,
add_swav_loss: bool = False):
super(MyNTXentLoss, self).__init__(size=memory_bank_size)
self.temperature = temperature
self.cross_entropy = torch.nn.CrossEntropyLoss(reduction="mean")
self.eps = 1e-8
self.num_negatives = num_negatives
self.softmax = nn.Softmax(dim=1)
self.add_swav_loss = add_swav_loss
if abs(self.temperature) < self.eps:
raise ValueError('Illegal temperature: abs({}) < 1e-8'
.format(self.temperature))
def forward(self,
out0: torch.Tensor,
out1: torch.Tensor,
q0_assign: torch.Tensor,
q1: torch.Tensor,
negatives: torch.Tensor):
"""Forward pass through Contrastive Cross-Entropy Loss.
If used with a memory bank, the samples from the memory bank are used
as negative examples. Otherwise, within-batch samples are used as
negative samples.
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
q0_assign:
Cluster assignments of the original samples used to compute nearest neighbors
Used for SwAV loss (optional)
q1:
Predicted cluster assignement directly taken from the output of the prototype
layer of the network.
Used for SwAV loss (optional)
sim_negatives:
Computed similarities between the nearest neighbors and the negatives
sampled with hard negative mining. We just return the similarities because
it's all we need to compute the loss.
Returns:
Contrastive Cross Entropy Loss value.
"""
device = out0.device
batch_size, _ = out0.shape
# normalize the output to length 1
out0 = torch.nn.functional.normalize(out0, dim=1)
out1 = torch.nn.functional.normalize(out1, dim=1)
# We use the cosine similarity, which is a dot product (einsum) here,
# as all vectors are already normalized to unit length.
# Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.
if negatives is not None:
# use negatives from memory bank
#negatives = torch.nn.functional.normalize(negatives, dim=1)
#ipdb.set_trace()
negatives = torch.transpose(negatives, 1, 2).to(device) # transpose to (batch_size, embedding_size, num_negatives)
# sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity
# of the i-th sample in the batch to its positive pair
sim_pos = torch.einsum('nc,nc->n', out0, out1).unsqueeze(-1).to(device)
# Compute sim_neg with negatives. Problem: for each positive there are different negatives.
# We can't use the same einsum. We can use batch matrix multiplication einsum:
# torch.einsum('i1c,icm->i1m', [a, b])
# Each positive becomes a sample indexed along "i", while the negatives for the i-th positive
# are stacked in a matrix at the i-th index. At the end we have to reshape the result into a vector
# We also have to prepare the tensor of negatives accordingly
#sim_neg = torch.einsum('nc,ck->nk', out0, negatives)
# n1c, ncm -> n1m
sim_neg = torch.einsum('nzc,ncm->nzm', torch.transpose(torch.unsqueeze(out0, 0), 0, 1), negatives)
sim_neg = torch.squeeze(sim_neg, 1)
# set the labels to the first "class", i.e. sim_pos,
# so that it is maximized in relation to sim_neg
logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature
labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)
else:
# use other samples from batch as negatives
output = torch.cat((out0, out1), axis=0).to(device)
# the logits are the similarity matrix divided by the temperature
logits = torch.einsum('nc,mc->nm', output, output) / self.temperature
# We need to removed the similarities of samples to themselves
logits = logits[~torch.eye(2*batch_size, dtype=torch.bool, device=out0.device)].view(2*batch_size, -1)
# The labels point from a sample in out_i to its equivalent in out_(1-i)
labels = torch.arange(batch_size, device=device, dtype=torch.long)
labels = torch.cat([labels + batch_size - 1, labels])
contrastive_loss = self.cross_entropy(logits, labels)
loss = contrastive_loss
swav_loss = None
alpha = torch.tensor(1.0) # swav_loss influence on the overall loss
if self.add_swav_loss: # and negatives is not None:
p1 = self.softmax(q1 / self.temperature)
swav_loss = - torch.mean(torch.sum(q0_assign * torch.log(p1), dim=1))
loss += alpha * swav_loss
return loss, swav_loss, contrastive_loss
| """ Contrastive Loss Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from numpy import add
import torch
from torch import nn
from lightly.loss.memory_bank import MemoryBankModule
from lightly.models.modules.my_nn_memory_bank import MyNNMemoryBankModule
import ipdb
class MyNTXentLoss(MemoryBankModule):
"""Implementation of the Contrastive Cross Entropy Loss.
This implementation follows the SimCLR[0] paper. If you enable the memory
bank by setting the `memory_bank_size` value > 0 the loss behaves like
the one described in the MoCo[1] paper.
[0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
[1] MoCo, 2020, https://arxiv.org/abs/1911.05722
Attributes:
temperature:
Scale logits by the inverse of the temperature.
memory_bank_size:
Number of negative samples to store in the memory bank.
Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.
Raises:
ValueError if abs(temperature) < 1e-8 to prevent divide by zero.
Examples:
>>> # initialize loss function without memory bank
>>> loss_fn = NTXentLoss(memory_bank_size=0)
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimCLR or MoCo model
>>> batch = torch.cat((t0, t1), dim=0)
>>> output = model(batch)
>>>
>>> # calculate loss
>>> loss = loss_fn(output)
"""
def __init__(self,
temperature: float = 0.2,
num_negatives: int = 256,
memory_bank_size: int = 0,
add_swav_loss: bool = False):
super(MyNTXentLoss, self).__init__(size=memory_bank_size)
self.temperature = temperature
self.cross_entropy = torch.nn.CrossEntropyLoss(reduction="mean")
self.eps = 1e-8
self.num_negatives = num_negatives
self.softmax = nn.Softmax(dim=1)
self.add_swav_loss = add_swav_loss
if abs(self.temperature) < self.eps:
raise ValueError('Illegal temperature: abs({}) < 1e-8'
.format(self.temperature))
def forward(self,
out0: torch.Tensor,
out1: torch.Tensor,
q0_assign: torch.Tensor,
q1: torch.Tensor,
negatives: torch.Tensor):
"""Forward pass through Contrastive Cross-Entropy Loss.
If used with a memory bank, the samples from the memory bank are used
as negative examples. Otherwise, within-batch samples are used as
negative samples.
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
q0_assign:
Cluster assignments of the original samples used to compute nearest neighbors
Used for SwAV loss (optional)
q1:
Predicted cluster assignement directly taken from the output of the prototype
layer of the network.
Used for SwAV loss (optional)
sim_negatives:
Computed similarities between the nearest neighbors and the negatives
sampled with hard negative mining. We just return the similarities because
it's all we need to compute the loss.
Returns:
Contrastive Cross Entropy Loss value.
"""
device = out0.device
batch_size, _ = out0.shape
# normalize the output to length 1
out0 = torch.nn.functional.normalize(out0, dim=1)
out1 = torch.nn.functional.normalize(out1, dim=1)
# We use the cosine similarity, which is a dot product (einsum) here,
# as all vectors are already normalized to unit length.
# Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.
if negatives is not None:
# use negatives from memory bank
#negatives = torch.nn.functional.normalize(negatives, dim=1)
#ipdb.set_trace()
negatives = torch.transpose(negatives, 1, 2).to(device) # transpose to (batch_size, embedding_size, num_negatives)
# sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity
# of the i-th sample in the batch to its positive pair
sim_pos = torch.einsum('nc,nc->n', out0, out1).unsqueeze(-1).to(device)
# Compute sim_neg with negatives. Problem: for each positive there are different negatives.
# We can't use the same einsum. We can use batch matrix multiplication einsum:
# torch.einsum('i1c,icm->i1m', [a, b])
# Each positive becomes a sample indexed along "i", while the negatives for the i-th positive
# are stacked in a matrix at the i-th index. At the end we have to reshape the result into a vector
# We also have to prepare the tensor of negatives accordingly
#sim_neg = torch.einsum('nc,ck->nk', out0, negatives)
# n1c, ncm -> n1m
sim_neg = torch.einsum('nzc,ncm->nzm', torch.transpose(torch.unsqueeze(out0, 0), 0, 1), negatives)
sim_neg = torch.squeeze(sim_neg, 1)
# set the labels to the first "class", i.e. sim_pos,
# so that it is maximized in relation to sim_neg
logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature
labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)
else:
# use other samples from batch as negatives
output = torch.cat((out0, out1), axis=0).to(device)
# the logits are the similarity matrix divided by the temperature
logits = torch.einsum('nc,mc->nm', output, output) / self.temperature
# We need to removed the similarities of samples to themselves
logits = logits[~torch.eye(2*batch_size, dtype=torch.bool, device=out0.device)].view(2*batch_size, -1)
# The labels point from a sample in out_i to its equivalent in out_(1-i)
labels = torch.arange(batch_size, device=device, dtype=torch.long)
labels = torch.cat([labels + batch_size - 1, labels])
contrastive_loss = self.cross_entropy(logits, labels)
loss = contrastive_loss
swav_loss = None
alpha = torch.tensor(1.0) # swav_loss influence on the overall loss
if self.add_swav_loss: # and negatives is not None:
p1 = self.softmax(q1 / self.temperature)
swav_loss = - torch.mean(torch.sum(q0_assign * torch.log(p1), dim=1))
loss += alpha * swav_loss
return loss, swav_loss, contrastive_loss
| en | 0.855401 | Contrastive Loss Functions # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved Implementation of the Contrastive Cross Entropy Loss. This implementation follows the SimCLR[0] paper. If you enable the memory bank by setting the `memory_bank_size` value > 0 the loss behaves like the one described in the MoCo[1] paper. [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709 [1] MoCo, 2020, https://arxiv.org/abs/1911.05722 Attributes: temperature: Scale logits by the inverse of the temperature. memory_bank_size: Number of negative samples to store in the memory bank. Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536. Raises: ValueError if abs(temperature) < 1e-8 to prevent divide by zero. Examples: >>> # initialize loss function without memory bank >>> loss_fn = NTXentLoss(memory_bank_size=0) >>> >>> # generate two random transforms of images >>> t0 = transforms(images) >>> t1 = transforms(images) >>> >>> # feed through SimCLR or MoCo model >>> batch = torch.cat((t0, t1), dim=0) >>> output = model(batch) >>> >>> # calculate loss >>> loss = loss_fn(output) Forward pass through Contrastive Cross-Entropy Loss. If used with a memory bank, the samples from the memory bank are used as negative examples. Otherwise, within-batch samples are used as negative samples. Args: out0: Output projections of the first set of transformed images. Shape: (batch_size, embedding_size) out1: Output projections of the second set of transformed images. Shape: (batch_size, embedding_size) q0_assign: Cluster assignments of the original samples used to compute nearest neighbors Used for SwAV loss (optional) q1: Predicted cluster assignement directly taken from the output of the prototype layer of the network. Used for SwAV loss (optional) sim_negatives: Computed similarities between the nearest neighbors and the negatives sampled with hard negative mining. We just return the similarities because it's all we need to compute the loss. Returns: Contrastive Cross Entropy Loss value. # normalize the output to length 1 # We use the cosine similarity, which is a dot product (einsum) here, # as all vectors are already normalized to unit length. # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size. # use negatives from memory bank #negatives = torch.nn.functional.normalize(negatives, dim=1) #ipdb.set_trace() # transpose to (batch_size, embedding_size, num_negatives) # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity # of the i-th sample in the batch to its positive pair # Compute sim_neg with negatives. Problem: for each positive there are different negatives. # We can't use the same einsum. We can use batch matrix multiplication einsum: # torch.einsum('i1c,icm->i1m', [a, b]) # Each positive becomes a sample indexed along "i", while the negatives for the i-th positive # are stacked in a matrix at the i-th index. At the end we have to reshape the result into a vector # We also have to prepare the tensor of negatives accordingly #sim_neg = torch.einsum('nc,ck->nk', out0, negatives) # n1c, ncm -> n1m # set the labels to the first "class", i.e. sim_pos, # so that it is maximized in relation to sim_neg # use other samples from batch as negatives # the logits are the similarity matrix divided by the temperature # We need to removed the similarities of samples to themselves # The labels point from a sample in out_i to its equivalent in out_(1-i) # swav_loss influence on the overall loss # and negatives is not None: | 2.541593 | 3 |
cluster_submission/confidence_threshold.py | Sharkmas6/Diamond_ML | 0 | 6612502 | <filename>cluster_submission/confidence_threshold.py
import sys
import os
import joblib
from sklearn.neighbors import KNeighborsClassifier
from read_data import data, pd, np, plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import LocalOutlierFactor
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, plot_confusion_matrix
from scipy.stats import randint, expon, uniform
from xgboost import XGBClassifier
plt.style.use("ggplot")
class n_layer_dist:
def __init__(self, low, high, n_layers_range, dist=randint):
self.dist = dist
self.low, self.high = low, high
self.n_layers_dist = dist
self.n_layers_range = n_layers_range
def rvs(self, *args, **kwargs):
size = self.n_layers_dist.rvs(*self.n_layers_range, *args, **kwargs)
layers = self.dist.rvs(self.low, self.high, size=size, *args, **kwargs)
return tuple(layers)
# system arguments
i = int(sys.argv[1])
# get database
if i <= 5:
db_kind = "dials"
else:
db_kind = "3dii"
i = i - 5
datum = data[db_kind]
workdir = r"/path/to/workdir"
if not os.path.isdir(workdir):
os.mkdir(workdir)
print(f"DATBASE No. {i} -- DATABASE: {db_kind}")
# use simplified model names
models_names = ["RandomForest", "XGBoost", "KNeighbors", "SVC", "MLP"]
model_name = models_names[i-1]
# get working directories
db_workdir = os.path.join(workdir, db_kind, "confidence_threshold")
db_logfile = os.path.join(db_workdir, f'confidence_{model_name}.log')
if not os.path.isdir(os.path.join(workdir, db_kind)):
os.mkdir(os.path.join(workdir, db_kind))
if not os.path.isdir(os.path.join(workdir, db_kind, "confidence_threshold")):
os.mkdir(os.path.join(workdir, db_kind, "confidence_threshold"))
# prepare data
r_etc = ["RMERGE_I", "RMERGE_DIFF_I", "RMEAS_I", "RMEAS_DIFF_I", "RPIM_I", "RPIM_DIFF_I"]
x, y = datum.unpack(drop_col=["DATASET_id", "RESOLUTION_LOW", "RESOLUTION_HIGH", "SPACEGROUP", "SHELXC_CFOM"] + r_etc)
# construct pipelines
seed = 1
print(f"Using seed: {seed}")
scaler = StandardScaler
forest = Pipeline([("scaler", scaler()), ("clf", RandomForestClassifier(class_weight="balanced", random_state=seed))])
xgb = Pipeline([("scaler", scaler()), ("clf", XGBClassifier(class_weight="balanced", random_state=seed))])
kneighbors = Pipeline([("scaler", scaler()), ("clf", KNeighborsClassifier())])
svc = Pipeline([("scaler", scaler()), ("clf", SVC(class_weight="balanced", probability=True, random_state=seed))])
mlp = Pipeline([("scaler", scaler()), ("clf", MLPClassifier(random_state=seed, max_iter=1000))])
models = [forest, xgb, kneighbors, svc, mlp]
# create parameter searches
forest_params = {"clf__criterion": ["gini", "entropy"],
"clf__n_estimators": randint(100, 10000), # number of trees in forest
"clf__max_features": randint(2, len(x.columns)), # max number of features when splitting
"clf__min_samples_split": randint(2, 20 + 1), # min samples per node to induce split
"clf__max_depth": randint(5, 20 + 1), # max number of splits to do
"clf__min_samples_leaf": randint(1, 10 + 1), # min number of samples in a leaf; may set to 1 anyway
"clf__max_leaf_nodes": randint(10, 20 + 1)} # max number of leaves}
xgb_params = {"clf__n_estimators": randint(100, 10000),
"clf__max_depth": randint(5, 20 + 1),
"clf__min_child_weight": randint(5, 10 + 1),
"clf__colsample_bytree": uniform(2/len(x.columns), 1),
"clf__subsample": uniform(0.1, 1),
"clf__learning_rate": uniform(0.005, 0.3)}
kneighbors_params = {"clf__weights": ["uniform", "distance"],
"clf__n_neighbors": randint(5, 50)}
svc_params = {'clf__C': expon(scale=100),
'clf__gamma': expon(scale=.1),
'clf__kernel': ['rbf', "poly"]}
mlp_params = {"clf__alpha": 10.0 ** -np.arange(1, 7),
"clf__hidden_layer_sizes": n_layer_dist(100, 1000, [1, 5])}
models_params = [forest_params, xgb_params, kneighbors_params, svc_params, mlp_params]
# use randomised search for best possible performance
n_iter = 1000
forest_search = RandomizedSearchCV(forest, forest_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
xgb_search = RandomizedSearchCV(xgb, xgb_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
kneighbors_search = RandomizedSearchCV(kneighbors, kneighbors_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
svc_search = RandomizedSearchCV(svc, svc_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
mlp_search = RandomizedSearchCV(mlp, mlp_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
models_search = [forest_search, xgb_search, kneighbors_search, svc_search, mlp_search]
# choose wanted model based on sys.argv
model = models[i-1]
model_params = models_params[i-1]
model_search = models_search[i-1]
# take only relevant data
mask = x["DATASET_NAME"] == 1
x, y = x[mask], y[mask]
x = x.drop("DATASET_NAME", axis=1)
# drop NaN
mask_nan = x.isna().any(axis=1)
x, y = x[~mask_nan], y[~mask_nan]
# drop outliers
if db_kind == "dials":
mask = LocalOutlierFactor(contamination=0.4).fit_predict(x)
mask = mask == 1
x, y = x.loc[mask, :], y[mask]
# split train/test datasets
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.2, stratify=y, random_state=seed)
# run randomized search
model_search.fit(X_train, y_train)
joblib.dump(model_search, os.path.join(db_workdir, f'random_search_{model_name}.pkl'))
joblib.dump(model_search.best_estimator_, os.path.join(db_workdir, f'best_estimator_{model_name}.pkl'))
# get predicted values for classification report and confusion matrix
y_pred = model_search.predict(X_test)
report = classification_report(y_test, y_pred)
matrix = plot_confusion_matrix(model_search, X_test, y_test, normalize="all").confusion_matrix
plt.savefig(os.path.join(db_workdir, f'confusion_matrix_{model_name}'))
confus = pd.DataFrame(matrix, index=["Actual Negative", "Actual Positive"], columns=["Predicted Negative", "Predicted Positive"])
# prepare results
best_params = pd.Series(model_search.best_params_)
# store results
best_params.to_csv(os.path.join(db_workdir, f'best_params_{model_name}.csv'))
log = (f"> Dataset no. 1\n"
f"> Database: {db_kind}\n"
f"> Using model: {model_name}\n"
f"> Best parameters:\n{best_params}\n"
f"> Best training F1 score: {model_search.best_score_:.2%}"
f"> Classification report:\n{report}\n"
f"> Confusion matrix:\n{confus}")
with open(db_logfile, "w") as fhandle:
fhandle.write(log)
| <filename>cluster_submission/confidence_threshold.py
import sys
import os
import joblib
from sklearn.neighbors import KNeighborsClassifier
from read_data import data, pd, np, plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import LocalOutlierFactor
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, plot_confusion_matrix
from scipy.stats import randint, expon, uniform
from xgboost import XGBClassifier
plt.style.use("ggplot")
class n_layer_dist:
def __init__(self, low, high, n_layers_range, dist=randint):
self.dist = dist
self.low, self.high = low, high
self.n_layers_dist = dist
self.n_layers_range = n_layers_range
def rvs(self, *args, **kwargs):
size = self.n_layers_dist.rvs(*self.n_layers_range, *args, **kwargs)
layers = self.dist.rvs(self.low, self.high, size=size, *args, **kwargs)
return tuple(layers)
# system arguments
i = int(sys.argv[1])
# get database
if i <= 5:
db_kind = "dials"
else:
db_kind = "3dii"
i = i - 5
datum = data[db_kind]
workdir = r"/path/to/workdir"
if not os.path.isdir(workdir):
os.mkdir(workdir)
print(f"DATBASE No. {i} -- DATABASE: {db_kind}")
# use simplified model names
models_names = ["RandomForest", "XGBoost", "KNeighbors", "SVC", "MLP"]
model_name = models_names[i-1]
# get working directories
db_workdir = os.path.join(workdir, db_kind, "confidence_threshold")
db_logfile = os.path.join(db_workdir, f'confidence_{model_name}.log')
if not os.path.isdir(os.path.join(workdir, db_kind)):
os.mkdir(os.path.join(workdir, db_kind))
if not os.path.isdir(os.path.join(workdir, db_kind, "confidence_threshold")):
os.mkdir(os.path.join(workdir, db_kind, "confidence_threshold"))
# prepare data
r_etc = ["RMERGE_I", "RMERGE_DIFF_I", "RMEAS_I", "RMEAS_DIFF_I", "RPIM_I", "RPIM_DIFF_I"]
x, y = datum.unpack(drop_col=["DATASET_id", "RESOLUTION_LOW", "RESOLUTION_HIGH", "SPACEGROUP", "SHELXC_CFOM"] + r_etc)
# construct pipelines
seed = 1
print(f"Using seed: {seed}")
scaler = StandardScaler
forest = Pipeline([("scaler", scaler()), ("clf", RandomForestClassifier(class_weight="balanced", random_state=seed))])
xgb = Pipeline([("scaler", scaler()), ("clf", XGBClassifier(class_weight="balanced", random_state=seed))])
kneighbors = Pipeline([("scaler", scaler()), ("clf", KNeighborsClassifier())])
svc = Pipeline([("scaler", scaler()), ("clf", SVC(class_weight="balanced", probability=True, random_state=seed))])
mlp = Pipeline([("scaler", scaler()), ("clf", MLPClassifier(random_state=seed, max_iter=1000))])
models = [forest, xgb, kneighbors, svc, mlp]
# create parameter searches
forest_params = {"clf__criterion": ["gini", "entropy"],
"clf__n_estimators": randint(100, 10000), # number of trees in forest
"clf__max_features": randint(2, len(x.columns)), # max number of features when splitting
"clf__min_samples_split": randint(2, 20 + 1), # min samples per node to induce split
"clf__max_depth": randint(5, 20 + 1), # max number of splits to do
"clf__min_samples_leaf": randint(1, 10 + 1), # min number of samples in a leaf; may set to 1 anyway
"clf__max_leaf_nodes": randint(10, 20 + 1)} # max number of leaves}
xgb_params = {"clf__n_estimators": randint(100, 10000),
"clf__max_depth": randint(5, 20 + 1),
"clf__min_child_weight": randint(5, 10 + 1),
"clf__colsample_bytree": uniform(2/len(x.columns), 1),
"clf__subsample": uniform(0.1, 1),
"clf__learning_rate": uniform(0.005, 0.3)}
kneighbors_params = {"clf__weights": ["uniform", "distance"],
"clf__n_neighbors": randint(5, 50)}
svc_params = {'clf__C': expon(scale=100),
'clf__gamma': expon(scale=.1),
'clf__kernel': ['rbf', "poly"]}
mlp_params = {"clf__alpha": 10.0 ** -np.arange(1, 7),
"clf__hidden_layer_sizes": n_layer_dist(100, 1000, [1, 5])}
models_params = [forest_params, xgb_params, kneighbors_params, svc_params, mlp_params]
# use randomised search for best possible performance
n_iter = 1000
forest_search = RandomizedSearchCV(forest, forest_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
xgb_search = RandomizedSearchCV(xgb, xgb_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
kneighbors_search = RandomizedSearchCV(kneighbors, kneighbors_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
svc_search = RandomizedSearchCV(svc, svc_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
mlp_search = RandomizedSearchCV(mlp, mlp_params, n_iter=n_iter, scoring="f1", cv=5, random_state=seed)
models_search = [forest_search, xgb_search, kneighbors_search, svc_search, mlp_search]
# choose wanted model based on sys.argv
model = models[i-1]
model_params = models_params[i-1]
model_search = models_search[i-1]
# take only relevant data
mask = x["DATASET_NAME"] == 1
x, y = x[mask], y[mask]
x = x.drop("DATASET_NAME", axis=1)
# drop NaN
mask_nan = x.isna().any(axis=1)
x, y = x[~mask_nan], y[~mask_nan]
# drop outliers
if db_kind == "dials":
mask = LocalOutlierFactor(contamination=0.4).fit_predict(x)
mask = mask == 1
x, y = x.loc[mask, :], y[mask]
# split train/test datasets
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.2, stratify=y, random_state=seed)
# run randomized search
model_search.fit(X_train, y_train)
joblib.dump(model_search, os.path.join(db_workdir, f'random_search_{model_name}.pkl'))
joblib.dump(model_search.best_estimator_, os.path.join(db_workdir, f'best_estimator_{model_name}.pkl'))
# get predicted values for classification report and confusion matrix
y_pred = model_search.predict(X_test)
report = classification_report(y_test, y_pred)
matrix = plot_confusion_matrix(model_search, X_test, y_test, normalize="all").confusion_matrix
plt.savefig(os.path.join(db_workdir, f'confusion_matrix_{model_name}'))
confus = pd.DataFrame(matrix, index=["Actual Negative", "Actual Positive"], columns=["Predicted Negative", "Predicted Positive"])
# prepare results
best_params = pd.Series(model_search.best_params_)
# store results
best_params.to_csv(os.path.join(db_workdir, f'best_params_{model_name}.csv'))
log = (f"> Dataset no. 1\n"
f"> Database: {db_kind}\n"
f"> Using model: {model_name}\n"
f"> Best parameters:\n{best_params}\n"
f"> Best training F1 score: {model_search.best_score_:.2%}"
f"> Classification report:\n{report}\n"
f"> Confusion matrix:\n{confus}")
with open(db_logfile, "w") as fhandle:
fhandle.write(log)
| en | 0.724976 | # system arguments # get database # use simplified model names # get working directories # prepare data # construct pipelines # create parameter searches # number of trees in forest # max number of features when splitting # min samples per node to induce split # max number of splits to do # min number of samples in a leaf; may set to 1 anyway # max number of leaves} # use randomised search for best possible performance # choose wanted model based on sys.argv # take only relevant data # drop NaN # drop outliers # split train/test datasets # run randomized search # get predicted values for classification report and confusion matrix # prepare results # store results | 2.486458 | 2 |
wopr/models.py | UrbanCCD-UChicago/sf-plenario-backend | 0 | 6612503 | <reponame>UrbanCCD-UChicago/sf-plenario-backend
import os
from sqlalchemy import Column, Integer, String, Boolean, Table, Date,\
DateTime, Time, Float, Numeric
from sqlalchemy.dialects.postgresql import TIMESTAMP, DOUBLE_PRECISION, TIME,\
DATE
from geoalchemy2 import Geometry
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from fiona import prop_width, prop_type
from datetime import datetime
from wopr.database import Base, app_engine as engine, Point
MetaTable = Table('sf_meta', Base.metadata,
autoload=True, autoload_with=engine)
MasterTable = Table('dat_master', Base.metadata,
autoload=True, autoload_with=engine)
def crime_table(name, metadata):
table = Table(name, metadata,
Column('id', Integer),
Column('case_number', String(length=10)),
Column('orig_date', TIMESTAMP),
Column('block', String(length=50)),
Column('iucr', String(length=10)),
Column('primary_type', String(length=100)),
Column('description', String(length=100)),
Column('location_description', String(length=50)),
Column('arrest', Boolean),
Column('domestic', Boolean),
Column('beat', String(length=10)),
Column('district', String(length=5)),
Column('ward', Integer),
Column('community_area', String(length=10)),
Column('fbi_code', String(length=10)),
Column('x_coordinate', Integer, nullable=True),
Column('y_coordinate', Integer, nullable=True),
Column('year', Integer),
Column('updated_on', TIMESTAMP, default=None),
Column('latitude', DOUBLE_PRECISION(precision=53)),
Column('longitude', DOUBLE_PRECISION(precision=53)),
Column('location', Point),
extend_existing=True)
return table
def sf_crime_table(name, metadata):
table = Table(name, metadata,
Column( 'id', Integer ),
Column( 'category', String(length=50) ),
Column( 'description', String(length=100) ),
Column( 'day_of_week', String(length=10) ),
Column( 'date', Date ),
Column( 'time', Time ),
Column( 'pd_district', String(length=20) ),
Column( 'resolution', String(length=50) ),
Column( 'location_str', String(length=100) ),
Column( 'longitude', DOUBLE_PRECISION(precision=53) ),
Column( 'latitude', DOUBLE_PRECISION(precision=53) ),
extend_existing=True)
return table
def sf_meta_table(metadata):
table = Table('sf_meta', metadata,
Column( 'row_id', Integer, primary_key=True ),
Column( 'table_name', String(length=40) ),
Column( 'human_name', String(length=60) ),
Column( 'description', String(length=200) ),
Column( 'file_name', String(length=30) ),
Column( 'last_update', DateTime, default=datetime.now() ),
Column( 'val_attr', String(length=30) ),
Column( 'duration', String(length=10) ),
Column( 'date_field', String(length=30) ),
Column( 'count_q', Boolean ),
Column( 'area_q', Boolean ),
Column( 'dist_q', Boolean ),
Column( 'temp_q', Boolean ),
Column( 'weighted_q', Boolean ),
Column( 'access_q', Boolean ),
Column( 'voronoi', Boolean ),
Column( 'demo', Boolean ),
extend_existing=True)
return table
def map_esri_type(esri_type):
""" Map esri type (extracted through fiona) to SQLAlchemy type """
tl = esri_type.split(':')
t = tl[0]
l = tl[1] if len(tl) > 1 else None
if t == 'int': return Integer
elif t == 'double': return Float(precision=15)
elif t == 'str': return String(length=int(l) if l else 80)
elif t == 'date': return Date
elif t == 'datetime': return DateTime
elif t == 'float':
if not l: return Float
else:
ps = l.split('.')
if len(ps) < 2: return Float(precision=ps[0])
else: return Numeric(int(ps[0])+3, int(ps[1]))
def shp2table(name, metadata, schema, force_multipoly=False):
""" Create a SQLAlchemy table schema from a shapefile schema opbtained through fiona
"""
# Create a list of columns for the features' properties
attr_list = []
for p in schema['properties'].iteritems():
attr_list.append(Column(p[0].lower(), map_esri_type(p[1])))
# Create the geometry column
geom_type = schema['geometry'].upper() if not force_multipoly \
else 'MULTIPOLYGON'
geom_col = Column('geom', Geometry(geom_type, srid=4326))
attr_list.append(geom_col)
table = Table(name, metadata, *attr_list, extend_existing=True)
return table
| import os
from sqlalchemy import Column, Integer, String, Boolean, Table, Date,\
DateTime, Time, Float, Numeric
from sqlalchemy.dialects.postgresql import TIMESTAMP, DOUBLE_PRECISION, TIME,\
DATE
from geoalchemy2 import Geometry
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from fiona import prop_width, prop_type
from datetime import datetime
from wopr.database import Base, app_engine as engine, Point
MetaTable = Table('sf_meta', Base.metadata,
autoload=True, autoload_with=engine)
MasterTable = Table('dat_master', Base.metadata,
autoload=True, autoload_with=engine)
def crime_table(name, metadata):
table = Table(name, metadata,
Column('id', Integer),
Column('case_number', String(length=10)),
Column('orig_date', TIMESTAMP),
Column('block', String(length=50)),
Column('iucr', String(length=10)),
Column('primary_type', String(length=100)),
Column('description', String(length=100)),
Column('location_description', String(length=50)),
Column('arrest', Boolean),
Column('domestic', Boolean),
Column('beat', String(length=10)),
Column('district', String(length=5)),
Column('ward', Integer),
Column('community_area', String(length=10)),
Column('fbi_code', String(length=10)),
Column('x_coordinate', Integer, nullable=True),
Column('y_coordinate', Integer, nullable=True),
Column('year', Integer),
Column('updated_on', TIMESTAMP, default=None),
Column('latitude', DOUBLE_PRECISION(precision=53)),
Column('longitude', DOUBLE_PRECISION(precision=53)),
Column('location', Point),
extend_existing=True)
return table
def sf_crime_table(name, metadata):
table = Table(name, metadata,
Column( 'id', Integer ),
Column( 'category', String(length=50) ),
Column( 'description', String(length=100) ),
Column( 'day_of_week', String(length=10) ),
Column( 'date', Date ),
Column( 'time', Time ),
Column( 'pd_district', String(length=20) ),
Column( 'resolution', String(length=50) ),
Column( 'location_str', String(length=100) ),
Column( 'longitude', DOUBLE_PRECISION(precision=53) ),
Column( 'latitude', DOUBLE_PRECISION(precision=53) ),
extend_existing=True)
return table
def sf_meta_table(metadata):
table = Table('sf_meta', metadata,
Column( 'row_id', Integer, primary_key=True ),
Column( 'table_name', String(length=40) ),
Column( 'human_name', String(length=60) ),
Column( 'description', String(length=200) ),
Column( 'file_name', String(length=30) ),
Column( 'last_update', DateTime, default=datetime.now() ),
Column( 'val_attr', String(length=30) ),
Column( 'duration', String(length=10) ),
Column( 'date_field', String(length=30) ),
Column( 'count_q', Boolean ),
Column( 'area_q', Boolean ),
Column( 'dist_q', Boolean ),
Column( 'temp_q', Boolean ),
Column( 'weighted_q', Boolean ),
Column( 'access_q', Boolean ),
Column( 'voronoi', Boolean ),
Column( 'demo', Boolean ),
extend_existing=True)
return table
def map_esri_type(esri_type):
""" Map esri type (extracted through fiona) to SQLAlchemy type """
tl = esri_type.split(':')
t = tl[0]
l = tl[1] if len(tl) > 1 else None
if t == 'int': return Integer
elif t == 'double': return Float(precision=15)
elif t == 'str': return String(length=int(l) if l else 80)
elif t == 'date': return Date
elif t == 'datetime': return DateTime
elif t == 'float':
if not l: return Float
else:
ps = l.split('.')
if len(ps) < 2: return Float(precision=ps[0])
else: return Numeric(int(ps[0])+3, int(ps[1]))
def shp2table(name, metadata, schema, force_multipoly=False):
""" Create a SQLAlchemy table schema from a shapefile schema opbtained through fiona
"""
# Create a list of columns for the features' properties
attr_list = []
for p in schema['properties'].iteritems():
attr_list.append(Column(p[0].lower(), map_esri_type(p[1])))
# Create the geometry column
geom_type = schema['geometry'].upper() if not force_multipoly \
else 'MULTIPOLYGON'
geom_col = Column('geom', Geometry(geom_type, srid=4326))
attr_list.append(geom_col)
table = Table(name, metadata, *attr_list, extend_existing=True)
return table | en | 0.688688 | Map esri type (extracted through fiona) to SQLAlchemy type Create a SQLAlchemy table schema from a shapefile schema opbtained through fiona # Create a list of columns for the features' properties # Create the geometry column | 2.27004 | 2 |
dateformat.py | stestagg/dateformat | 8 | 6612504 | import calendar
import datetime
import math
import time
import re
try:
import pytz
except ImportError:
HAVE_PYTZ = False
else:
HAVE_PYTZ = True
__version__ = "0.9.7"
RE_0_TO_60 = "[0-6]?[0-9]" # In some special cases, e.g. seconds, can actually be '60'
RE_00_TO_31 = "(?:[0-2][0-9])|(?:3[0-1])"
RE_0_TO_31 = "(?:[0-2]?[0-9])|(?:3[0-1])"
RE_0_TO_12 = "(?:0?[0-9])|(?:1[0-2])"
RE_00_TO_12 = "(?:0[0-9])|(?:1[0-2])"
RE_0_TO_24 = "(?:[0-1]?[0-9])|(?:2[0-4])"
SECONDS_IN_MINUTE = 60
SECONDS_IN_HOUR = SECONDS_IN_MINUTE * 60
SECONDS_IN_DAY = SECONDS_IN_HOUR * 24
ISOFORMAT_DATE = "YYYY-MM-DD"
# This format explicitly leaves out the micro/milli/nano second component,
# as the precision of the sub-second measurement in iso8601 is undefined,
# and it is easy to add in the correct .SSSS component once the precision
# is agreed/known
ISOFORMAT_TIME = "hh:mm:ss"
ISOFORMAT_DATETIME = f'{ISOFORMAT_DATE}␣{ISOFORMAT_TIME}'
ISOFORMAT_BASIC_DATE = "YYYY[MM][DD]"
ISOFORMAT_BASIC_TIME = "hhmmss"
RAISE = object()
class DateFormatPart:
"""
Responsible for an element of a date format, both parsing, and formatting.
For example, to parse 4-digit years, a DateFormatPart with format_str of
'YYYY' exists. This part has the logic to extract the year from a string,
and to format the year as a 4-digit string from a date.
"""
PARSER_RE_CONTAINS_GROUP = True
VALUE_MATCHES_DATE_COMPONENT_INT = False
def __init__(self, format_str, re_str):
self.format_str = format_str # What to look for in the format spec (e.g. YYYY)
self._parser_re_pattern = re_str # Re that matches this date value (e.g. \d{4})
def parser_re(self, format):
"""
Date parsing is done by matching the string to a regular expression
before converting each component to a date-relevant value.
This method returns the part of the full regular-expression pattern
that should match against the value.
"""
return f'({self._parser_re_pattern})'
def partition_spec(self, string):
"""
Given a string of the form: "YYYY-MM-DD", and assuming this part
matches on 'MM',
return a tuple of the form ("YYYY-", "MM", "-DD"), as per the standard
string.partition function
"""
return string.partition(self.format_str)
def __repr__(self):
return f"<{type(self).__name__}: '{self.format_str}'>"
def format_part(self, format):
raise NotImplementedError(
f"{type(self)} has not implemented 'format_part'"
)
def install_chain_handlers(self, format):
pass
class IgnorePart(DateFormatPart):
"""
Used for separators (T for example), during parsing, the matched value is
ignored (but checked for presence)
"""
PARSER_RE_CONTAINS_GROUP = False
def __init__(self, *args, format_value=None, **kwargs):
super().__init__(*args, **kwargs)
self.format_value = self.format_str if format_value is None else format_value
def parser_re(self, format):
return self._parser_re_pattern
def format_part(self, format):
return self.format_value
class DayOfMonthSuffixPart(IgnorePart):
SUFFIXES = {1: 'st', 2: 'nd', 3: 'rd'}
def add_format_context(self, format, date, context):
if date.day in {11, 12, 13}: # Special case
context['day_of_month_suffix'] = 'th'
else:
last = date.day % 10
context['day_of_month_suffix'] = self.SUFFIXES.get(last, 'th')
def format_part(self, format):
return '{day_of_month_suffix}'
def install_chain_handlers(self, format):
format.format_chain.append(self.add_format_context)
class SimplePart(DateFormatPart):
VALUE_MATCHES_DATE_COMPONENT_INT = True
def __init__(self, format_str, re_str, datepart):
self.datepart = datepart
super().__init__(format_str, re_str)
def format_part(self, format):
return f'{{date.{self.datepart}:0>{len(self.format_str)}}}'
class HourPart(SimplePart):
HOUR_24_to_12 = [
12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
]
def __init__(self, format_str):
super().__init__(format_str, None, "hour")
def parser_re(self, format):
re_part = RE_0_TO_24 if format.is_24hour else RE_0_TO_12
return f"({re_part})"
def format_part(self, format):
if format.is_24hour:
return super().format_part(format)
return '{HourPart.HOUR_24_to_12[date.hour]:0>2g}'
class ShortYearPart(SimplePart):
VALUE_MATCHES_DATE_COMPONENT_INT = False
def got_value(self, context, value):
year = int(value)
if year > 69: # python datetime uses 70 as the cutoff
year += 1900
else:
year += 2000
context[self.datepart] = year
def format_part(self, format):
return '{date.year % 100:0>2}'
class MonthNamePart(SimplePart):
VALUE_MATCHES_DATE_COMPONENT_INT = False
TO_NUM = dict((calendar.month_name[i].lower(), i) for i in range(1, 13))
FROM_NUM = calendar.month_name
def __init__(self, format_str, re_str):
super().__init__(format_str, re_str, datepart="month")
def got_value(self, context, value):
context['month'] = self.TO_NUM[value.lower()]
def format_part(self, format):
return '{MonthNamePart.FROM_NUM[date.month]}'
class ShortMonthNamePart(MonthNamePart):
# Short months to include 4-letter full month names too, as this sometimes can be used
TO_NUM = dict(((calendar.month_abbr[i].lower(), i) for i in range(1, 13)), june=6, july=7)
FROM_NUM = calendar.month_abbr
def format_part(self, format):
return '{ShortMonthNamePart.FROM_NUM[date.month]}'
class WeekdayNamePart(IgnorePart):
FROM_NUM = list(calendar.day_name)
def format_part(self, format):
return '{WeekdayNamePart.FROM_NUM[date.weekday()]}'
class ShortWeekdayNamePart(IgnorePart):
FROM_NUM = list(calendar.day_abbr)
def format_part(self, format):
return '{ShortWeekdayNamePart.FROM_NUM[date.weekday()]}'
class MicrosecondPart(DateFormatPart):
def __init__(self, format_str, re_str, value_multiplier):
self.multiplier = value_multiplier
super().__init__(format_str, re_str)
def got_value(self, context, value):
context['microsecond'] = int(value) * self.multiplier
def format_part(self, format):
return f'{{ int(round(date.microsecond / {self.multiplier}, 0)):0>{len(self.format_str)}g}}'
class FractionalSecond(DateFormatPart):
def got_value(self, context, value):
context['microsecond'] = int(float('0.' + value) * 1000000)
def format_part(self, format):
return '{date.microsecond.__format__("0>06g").rstrip("0") or "0"}'
EPOCH = datetime.datetime(1970, 1, 1)
if HAVE_PYTZ:
EPOCH_UTC = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
class TimestampPart(DateFormatPart):
def __init__(self, format_str, value_divisor):
self.divisor = value_divisor
max_digits = 10 + math.ceil(math.log10(value_divisor))
re_str = r'\d{1,%s}' % max_digits
super().__init__(format_str, re_str)
def got_value(self, context, value):
utc_val = datetime.datetime.utcfromtimestamp(int(value) / self.divisor)
context["year"] = utc_val.year
context["month"] = utc_val.month
context["day"] = utc_val.day
context["hour"] = utc_val.hour
context["minute"] = utc_val.minute
context["second"] = utc_val.second
if self.divisor > 1:
context["microsecond"] = utc_val.microsecond
def format_part(self, format):
if HAVE_PYTZ:
return f'{{int((date - (EPOCH_UTC if date.tzinfo else EPOCH)).total_seconds() * {self.divisor}) }}'
return f'{{int((date - EPOCH).total_seconds() * {self.divisor}) }}'
class AmPmPart(DateFormatPart):
RE = "am|pm"
def __init__(self, format_str):
super().__init__(format_str, self.RE)
def got_value(self, context, value):
context["is_pm"] = value.lower() == "pm"
def install_chain_handlers(self, format):
format.parse_chain.insert(0, self.prepare_parse_context)
def prepare_parse_context(self, parser, context, value):
hour = context.get("hour", 12)
if context.pop("is_pm"):
if hour != 12:
hour = hour + 12
else:
if hour == 12:
hour = 0
context['hour'] = hour
return value
def format_part(self, format):
if self.format_str.isupper():
return '{"PM" if date.hour % 24 >= 12 else "AM"}'
elif self.format_str.islower():
return '{"pm" if date.hour % 24 >= 12 else "am"}'
else:
return '{"Pm" if date.hour % 24 >= 12 else "Am"}'
class UTCOffsetPart(DateFormatPart):
def __init__(self, format_str, re_str, parser, to_str_format):
self.parser = parser
self.to_str_format = to_str_format
super().__init__(format_str, re_str)
def got_value(self, context, value):
sign, hours, minutes = self.parser(value)
hours, minutes = int(hours), int(minutes)
total_minutes = minutes + (hours * 60)
difference = datetime.timedelta(hours=hours, minutes=minutes)
if sign == "-":
difference = -difference
context["tzinfo"] = datetime.timezone(difference)
def add_format_context(self, format, date, context):
utc_offset = date.utcoffset()
total_seconds = utc_offset.total_seconds()
hours = int(total_seconds / SECONDS_IN_HOUR)
remaining = (total_seconds - (hours * SECONDS_IN_HOUR))
minutes = remaining / SECONDS_IN_MINUTE
context.update({
"utc_sign": "-" if total_seconds < 0 else "+",
"utc_hours_abs": abs(hours),
"utc_mins_abs": abs(minutes),
})
def install_chain_handlers(self, format):
format.format_chain.append(self.add_format_context)
def format_part(self, format):
return self.to_str_format
if HAVE_PYTZ:
class NamedTimezeonePart(DateFormatPart):
FULL_TZ_NAME_RE = r"(?:[A-Z_]{2,12}?/)+?[A-Z\-_]{3,20}[+-]?\d{0,2}"
SHORT_TZ_NAME_RE = r"[A-Z]{1}[A-Z+\-_\d]{0,8}"
RE = f"{FULL_TZ_NAME_RE}|{SHORT_TZ_NAME_RE}"
def __init__(self, format_str):
super().__init__(format_str, self.RE)
def got_value(self, context, value):
context["tzinfo"] = pytz.timezone(value)
def fixup_parsed_timezone(self, format, context, date):
"""
The correct timezone has been identified first-time round, BUT
pytz can't localize the date correctly without knowing what the
year/month/day is, due to the fickle nature of humans.
So extract the timezone, and re-localize correctly
"""
timezone = date.tzinfo
date = date.replace(tzinfo=None)
return timezone.localize(date)
def install_chain_handlers(self, format):
format.parse_chain.append(self.fixup_parsed_timezone)
format.format_chain.append(self.add_format_context)
def add_format_context(self, format, date, context):
if not date.tzinfo:
raise ValueError("Cannot format timezone for non-timezone aware dates")
zone = getattr(date.tzinfo, "zone", None)
if zone:
context['timezone_name'] = zone
else:
tz_name = date.tzinfo.tzname(date)
if not tz_name:
raise ValueError(f"Cannot get a timezone name for: {date.tzinfo}")
context['timezone_name'] = tz_name
def format_part(self, format):
return '{timezone_name}'
class DateFormat:
# The order matters here, for example. YYYY must match before YY
# (Or the dateformat will end up looking for two short-years right after each other
# rather than one long year
FORMAT_STR_TOKENS = [
TimestampPart('UNIX_TIMESTAMP', value_divisor=1),
TimestampPart('UNIX_MILLISECONDS', value_divisor=1000),
TimestampPart('UNIX_MICROSECONDS', value_divisor=1000000),
TimestampPart('UNIX_NANOSECONDS', value_divisor=1000000000),
UTCOffsetPart("+HHMM", r"[\+\-]\d{4}",
parser=lambda val: (val[0], val[1:3], val[3:5]),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}{utc_mins_abs:0>2g}"),
UTCOffsetPart("+HH:MM", r"[\+\-]\d{2}:\d{2}",
parser=lambda val: (val[0], val[1:3], val[4:6]),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}:{utc_mins_abs:0>2g}"),
UTCOffsetPart("+HH", r"[\+\-]\d{2}",
parser=lambda val: (val[0], val[1:3], 0),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}"),
WeekdayNamePart("Dddddd", r'[MSTFW]\w{5,8}'),
WeekdayNamePart("Ddddd", r'[MSTFW]\w{5,8}'),
ShortWeekdayNamePart("Ddd", r'[MSTFW]\w{2}'),
SimplePart("[MM]", RE_00_TO_12, "month"),
SimplePart("[DD]", RE_00_TO_31, "day"),
SimplePart("DD", RE_0_TO_31, "day"),
MonthNamePart("MMMMM", r'[ADFJMNOS]\w{2,8}'),
ShortMonthNamePart("MMM", r'[ADFJMNOS]\w{2,3}'),
SimplePart("MM", RE_0_TO_12, "month"),
SimplePart("YYYY", r"\d{4}", "year"),
ShortYearPart("YY", r"\d{2}", "year"),
HourPart("hh"),
SimplePart("mm", RE_0_TO_60, "minute"),
SimplePart("ss", RE_0_TO_60, "second"),
MicrosecondPart("SSSSSS", r"\d{6}", value_multiplier=1),
MicrosecondPart("SSSS", r"\d{4}", value_multiplier=100),
MicrosecondPart("SSS", r"\d{3}", value_multiplier=1000),
MicrosecondPart("SS", r"\d{2}", value_multiplier=10000),
FractionalSecond("S", r"\d{1,9}"),
AmPmPart("AM"),AmPmPart("Am"),AmPmPart("am"),
AmPmPart("PM"),AmPmPart("Pm"),AmPmPart("pm"),
IgnorePart(" ", r"\s+?"),
IgnorePart('of', 'of'),
DayOfMonthSuffixPart('st', '(?:st|nd|rd|th)'),
IgnorePart("␣", r"[T ]", format_value="T")
]
if HAVE_PYTZ:
for timezone in {"UTC", "GMT", "Europe/London", "Zulu"}:
FORMAT_STR_TOKENS.append(NamedTimezeonePart(timezone))
for char in ":/-.,TZ()":
FORMAT_STR_TOKENS.append(IgnorePart(char, re.escape(char)))
def __init__(self, spec, is_24hour=None):
self.spec_str = spec
self.tokens = self._tokenize_spec(spec)
if is_24hour is None:
self.is_24hour = not any(isinstance(t, AmPmPart) for t in self.tokens)
else:
self.is_24hour = is_24hour
# Pre-calculate some properties
full_date_re = "".join(token.parser_re(self) for token in self.tokens)
self._parser_re = re.compile("^%s$" % full_date_re, re.I)
self.re_tokens = [t for t in self.tokens if t.PARSER_RE_CONTAINS_GROUP]
self.format_code = self._make_format_code()
self.parse_chain = [None]
self.format_chain = []
for token in self.tokens:
token.install_chain_handlers(self)
def _make_format_code(self):
fstring_data = "".join(token.format_part(self) for token in self.tokens)
src = f"f'{fstring_data}'"
return compile(src, src, 'eval')
def _tokenize_spec(self, bit):
for component in self.FORMAT_STR_TOKENS:
before, match, after = component.partition_spec(bit)
if not match:
continue
parts = (component, )
if before:
parts = self._tokenize_spec(before) + parts
if after:
parts = parts + self._tokenize_spec(after)
return parts
if bit:
raise ValueError(f"Could not parse: {bit}")
return ()
def matches_format(self, data):
if not isinstance(data, str):
return False
return self._parser_re.match(data) is not None
def parse(self, data, default=RAISE):
matches = self._parser_re.match(data)
if matches is None:
if default is RAISE:
raise ValueError(f"date '{data}' does not match format '{self.spec_str}'")
return default
parts = matches.groups()
today = datetime.date.today()
context = {"year": today.year, "month": today.month, "day": today.day}
for token, value in zip(self.re_tokens, parts):
if token.VALUE_MATCHES_DATE_COMPONENT_INT:
context[token.datepart] = int(value)
else:
token.got_value(context, value)
result = None
for handler in self.parse_chain:
if handler is None:
result = datetime.datetime(**context)
else:
result = handler(self, context, result)
return result
def format(self, date):
"""
Given a datetime.datetime object, return a string representing this date/time,
formatted according to this dateformat.
"""
context = {'date': date}
for handler in self.format_chain:
handler(self, date, context)
return eval(self.format_code, globals(), context)
if __name__ == "__main__":
d = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
df = DateFormat(f"Dddddd {ISOFORMAT_DATETIME}.SSSS+HHMM")
print(df.format(d))
| import calendar
import datetime
import math
import time
import re
try:
import pytz
except ImportError:
HAVE_PYTZ = False
else:
HAVE_PYTZ = True
__version__ = "0.9.7"
RE_0_TO_60 = "[0-6]?[0-9]" # In some special cases, e.g. seconds, can actually be '60'
RE_00_TO_31 = "(?:[0-2][0-9])|(?:3[0-1])"
RE_0_TO_31 = "(?:[0-2]?[0-9])|(?:3[0-1])"
RE_0_TO_12 = "(?:0?[0-9])|(?:1[0-2])"
RE_00_TO_12 = "(?:0[0-9])|(?:1[0-2])"
RE_0_TO_24 = "(?:[0-1]?[0-9])|(?:2[0-4])"
SECONDS_IN_MINUTE = 60
SECONDS_IN_HOUR = SECONDS_IN_MINUTE * 60
SECONDS_IN_DAY = SECONDS_IN_HOUR * 24
ISOFORMAT_DATE = "YYYY-MM-DD"
# This format explicitly leaves out the micro/milli/nano second component,
# as the precision of the sub-second measurement in iso8601 is undefined,
# and it is easy to add in the correct .SSSS component once the precision
# is agreed/known
ISOFORMAT_TIME = "hh:mm:ss"
ISOFORMAT_DATETIME = f'{ISOFORMAT_DATE}␣{ISOFORMAT_TIME}'
ISOFORMAT_BASIC_DATE = "YYYY[MM][DD]"
ISOFORMAT_BASIC_TIME = "hhmmss"
RAISE = object()
class DateFormatPart:
"""
Responsible for an element of a date format, both parsing, and formatting.
For example, to parse 4-digit years, a DateFormatPart with format_str of
'YYYY' exists. This part has the logic to extract the year from a string,
and to format the year as a 4-digit string from a date.
"""
PARSER_RE_CONTAINS_GROUP = True
VALUE_MATCHES_DATE_COMPONENT_INT = False
def __init__(self, format_str, re_str):
self.format_str = format_str # What to look for in the format spec (e.g. YYYY)
self._parser_re_pattern = re_str # Re that matches this date value (e.g. \d{4})
def parser_re(self, format):
"""
Date parsing is done by matching the string to a regular expression
before converting each component to a date-relevant value.
This method returns the part of the full regular-expression pattern
that should match against the value.
"""
return f'({self._parser_re_pattern})'
def partition_spec(self, string):
"""
Given a string of the form: "YYYY-MM-DD", and assuming this part
matches on 'MM',
return a tuple of the form ("YYYY-", "MM", "-DD"), as per the standard
string.partition function
"""
return string.partition(self.format_str)
def __repr__(self):
return f"<{type(self).__name__}: '{self.format_str}'>"
def format_part(self, format):
raise NotImplementedError(
f"{type(self)} has not implemented 'format_part'"
)
def install_chain_handlers(self, format):
pass
class IgnorePart(DateFormatPart):
"""
Used for separators (T for example), during parsing, the matched value is
ignored (but checked for presence)
"""
PARSER_RE_CONTAINS_GROUP = False
def __init__(self, *args, format_value=None, **kwargs):
super().__init__(*args, **kwargs)
self.format_value = self.format_str if format_value is None else format_value
def parser_re(self, format):
return self._parser_re_pattern
def format_part(self, format):
return self.format_value
class DayOfMonthSuffixPart(IgnorePart):
SUFFIXES = {1: 'st', 2: 'nd', 3: 'rd'}
def add_format_context(self, format, date, context):
if date.day in {11, 12, 13}: # Special case
context['day_of_month_suffix'] = 'th'
else:
last = date.day % 10
context['day_of_month_suffix'] = self.SUFFIXES.get(last, 'th')
def format_part(self, format):
return '{day_of_month_suffix}'
def install_chain_handlers(self, format):
format.format_chain.append(self.add_format_context)
class SimplePart(DateFormatPart):
VALUE_MATCHES_DATE_COMPONENT_INT = True
def __init__(self, format_str, re_str, datepart):
self.datepart = datepart
super().__init__(format_str, re_str)
def format_part(self, format):
return f'{{date.{self.datepart}:0>{len(self.format_str)}}}'
class HourPart(SimplePart):
HOUR_24_to_12 = [
12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
]
def __init__(self, format_str):
super().__init__(format_str, None, "hour")
def parser_re(self, format):
re_part = RE_0_TO_24 if format.is_24hour else RE_0_TO_12
return f"({re_part})"
def format_part(self, format):
if format.is_24hour:
return super().format_part(format)
return '{HourPart.HOUR_24_to_12[date.hour]:0>2g}'
class ShortYearPart(SimplePart):
VALUE_MATCHES_DATE_COMPONENT_INT = False
def got_value(self, context, value):
year = int(value)
if year > 69: # python datetime uses 70 as the cutoff
year += 1900
else:
year += 2000
context[self.datepart] = year
def format_part(self, format):
return '{date.year % 100:0>2}'
class MonthNamePart(SimplePart):
VALUE_MATCHES_DATE_COMPONENT_INT = False
TO_NUM = dict((calendar.month_name[i].lower(), i) for i in range(1, 13))
FROM_NUM = calendar.month_name
def __init__(self, format_str, re_str):
super().__init__(format_str, re_str, datepart="month")
def got_value(self, context, value):
context['month'] = self.TO_NUM[value.lower()]
def format_part(self, format):
return '{MonthNamePart.FROM_NUM[date.month]}'
class ShortMonthNamePart(MonthNamePart):
# Short months to include 4-letter full month names too, as this sometimes can be used
TO_NUM = dict(((calendar.month_abbr[i].lower(), i) for i in range(1, 13)), june=6, july=7)
FROM_NUM = calendar.month_abbr
def format_part(self, format):
return '{ShortMonthNamePart.FROM_NUM[date.month]}'
class WeekdayNamePart(IgnorePart):
FROM_NUM = list(calendar.day_name)
def format_part(self, format):
return '{WeekdayNamePart.FROM_NUM[date.weekday()]}'
class ShortWeekdayNamePart(IgnorePart):
FROM_NUM = list(calendar.day_abbr)
def format_part(self, format):
return '{ShortWeekdayNamePart.FROM_NUM[date.weekday()]}'
class MicrosecondPart(DateFormatPart):
def __init__(self, format_str, re_str, value_multiplier):
self.multiplier = value_multiplier
super().__init__(format_str, re_str)
def got_value(self, context, value):
context['microsecond'] = int(value) * self.multiplier
def format_part(self, format):
return f'{{ int(round(date.microsecond / {self.multiplier}, 0)):0>{len(self.format_str)}g}}'
class FractionalSecond(DateFormatPart):
def got_value(self, context, value):
context['microsecond'] = int(float('0.' + value) * 1000000)
def format_part(self, format):
return '{date.microsecond.__format__("0>06g").rstrip("0") or "0"}'
EPOCH = datetime.datetime(1970, 1, 1)
if HAVE_PYTZ:
EPOCH_UTC = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC)
class TimestampPart(DateFormatPart):
def __init__(self, format_str, value_divisor):
self.divisor = value_divisor
max_digits = 10 + math.ceil(math.log10(value_divisor))
re_str = r'\d{1,%s}' % max_digits
super().__init__(format_str, re_str)
def got_value(self, context, value):
utc_val = datetime.datetime.utcfromtimestamp(int(value) / self.divisor)
context["year"] = utc_val.year
context["month"] = utc_val.month
context["day"] = utc_val.day
context["hour"] = utc_val.hour
context["minute"] = utc_val.minute
context["second"] = utc_val.second
if self.divisor > 1:
context["microsecond"] = utc_val.microsecond
def format_part(self, format):
if HAVE_PYTZ:
return f'{{int((date - (EPOCH_UTC if date.tzinfo else EPOCH)).total_seconds() * {self.divisor}) }}'
return f'{{int((date - EPOCH).total_seconds() * {self.divisor}) }}'
class AmPmPart(DateFormatPart):
RE = "am|pm"
def __init__(self, format_str):
super().__init__(format_str, self.RE)
def got_value(self, context, value):
context["is_pm"] = value.lower() == "pm"
def install_chain_handlers(self, format):
format.parse_chain.insert(0, self.prepare_parse_context)
def prepare_parse_context(self, parser, context, value):
hour = context.get("hour", 12)
if context.pop("is_pm"):
if hour != 12:
hour = hour + 12
else:
if hour == 12:
hour = 0
context['hour'] = hour
return value
def format_part(self, format):
if self.format_str.isupper():
return '{"PM" if date.hour % 24 >= 12 else "AM"}'
elif self.format_str.islower():
return '{"pm" if date.hour % 24 >= 12 else "am"}'
else:
return '{"Pm" if date.hour % 24 >= 12 else "Am"}'
class UTCOffsetPart(DateFormatPart):
def __init__(self, format_str, re_str, parser, to_str_format):
self.parser = parser
self.to_str_format = to_str_format
super().__init__(format_str, re_str)
def got_value(self, context, value):
sign, hours, minutes = self.parser(value)
hours, minutes = int(hours), int(minutes)
total_minutes = minutes + (hours * 60)
difference = datetime.timedelta(hours=hours, minutes=minutes)
if sign == "-":
difference = -difference
context["tzinfo"] = datetime.timezone(difference)
def add_format_context(self, format, date, context):
utc_offset = date.utcoffset()
total_seconds = utc_offset.total_seconds()
hours = int(total_seconds / SECONDS_IN_HOUR)
remaining = (total_seconds - (hours * SECONDS_IN_HOUR))
minutes = remaining / SECONDS_IN_MINUTE
context.update({
"utc_sign": "-" if total_seconds < 0 else "+",
"utc_hours_abs": abs(hours),
"utc_mins_abs": abs(minutes),
})
def install_chain_handlers(self, format):
format.format_chain.append(self.add_format_context)
def format_part(self, format):
return self.to_str_format
if HAVE_PYTZ:
class NamedTimezeonePart(DateFormatPart):
FULL_TZ_NAME_RE = r"(?:[A-Z_]{2,12}?/)+?[A-Z\-_]{3,20}[+-]?\d{0,2}"
SHORT_TZ_NAME_RE = r"[A-Z]{1}[A-Z+\-_\d]{0,8}"
RE = f"{FULL_TZ_NAME_RE}|{SHORT_TZ_NAME_RE}"
def __init__(self, format_str):
super().__init__(format_str, self.RE)
def got_value(self, context, value):
context["tzinfo"] = pytz.timezone(value)
def fixup_parsed_timezone(self, format, context, date):
"""
The correct timezone has been identified first-time round, BUT
pytz can't localize the date correctly without knowing what the
year/month/day is, due to the fickle nature of humans.
So extract the timezone, and re-localize correctly
"""
timezone = date.tzinfo
date = date.replace(tzinfo=None)
return timezone.localize(date)
def install_chain_handlers(self, format):
format.parse_chain.append(self.fixup_parsed_timezone)
format.format_chain.append(self.add_format_context)
def add_format_context(self, format, date, context):
if not date.tzinfo:
raise ValueError("Cannot format timezone for non-timezone aware dates")
zone = getattr(date.tzinfo, "zone", None)
if zone:
context['timezone_name'] = zone
else:
tz_name = date.tzinfo.tzname(date)
if not tz_name:
raise ValueError(f"Cannot get a timezone name for: {date.tzinfo}")
context['timezone_name'] = tz_name
def format_part(self, format):
return '{timezone_name}'
class DateFormat:
# The order matters here, for example. YYYY must match before YY
# (Or the dateformat will end up looking for two short-years right after each other
# rather than one long year
FORMAT_STR_TOKENS = [
TimestampPart('UNIX_TIMESTAMP', value_divisor=1),
TimestampPart('UNIX_MILLISECONDS', value_divisor=1000),
TimestampPart('UNIX_MICROSECONDS', value_divisor=1000000),
TimestampPart('UNIX_NANOSECONDS', value_divisor=1000000000),
UTCOffsetPart("+HHMM", r"[\+\-]\d{4}",
parser=lambda val: (val[0], val[1:3], val[3:5]),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}{utc_mins_abs:0>2g}"),
UTCOffsetPart("+HH:MM", r"[\+\-]\d{2}:\d{2}",
parser=lambda val: (val[0], val[1:3], val[4:6]),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}:{utc_mins_abs:0>2g}"),
UTCOffsetPart("+HH", r"[\+\-]\d{2}",
parser=lambda val: (val[0], val[1:3], 0),
to_str_format="{utc_sign}{utc_hours_abs:0>2g}"),
WeekdayNamePart("Dddddd", r'[MSTFW]\w{5,8}'),
WeekdayNamePart("Ddddd", r'[MSTFW]\w{5,8}'),
ShortWeekdayNamePart("Ddd", r'[MSTFW]\w{2}'),
SimplePart("[MM]", RE_00_TO_12, "month"),
SimplePart("[DD]", RE_00_TO_31, "day"),
SimplePart("DD", RE_0_TO_31, "day"),
MonthNamePart("MMMMM", r'[ADFJMNOS]\w{2,8}'),
ShortMonthNamePart("MMM", r'[ADFJMNOS]\w{2,3}'),
SimplePart("MM", RE_0_TO_12, "month"),
SimplePart("YYYY", r"\d{4}", "year"),
ShortYearPart("YY", r"\d{2}", "year"),
HourPart("hh"),
SimplePart("mm", RE_0_TO_60, "minute"),
SimplePart("ss", RE_0_TO_60, "second"),
MicrosecondPart("SSSSSS", r"\d{6}", value_multiplier=1),
MicrosecondPart("SSSS", r"\d{4}", value_multiplier=100),
MicrosecondPart("SSS", r"\d{3}", value_multiplier=1000),
MicrosecondPart("SS", r"\d{2}", value_multiplier=10000),
FractionalSecond("S", r"\d{1,9}"),
AmPmPart("AM"),AmPmPart("Am"),AmPmPart("am"),
AmPmPart("PM"),AmPmPart("Pm"),AmPmPart("pm"),
IgnorePart(" ", r"\s+?"),
IgnorePart('of', 'of'),
DayOfMonthSuffixPart('st', '(?:st|nd|rd|th)'),
IgnorePart("␣", r"[T ]", format_value="T")
]
if HAVE_PYTZ:
for timezone in {"UTC", "GMT", "Europe/London", "Zulu"}:
FORMAT_STR_TOKENS.append(NamedTimezeonePart(timezone))
for char in ":/-.,TZ()":
FORMAT_STR_TOKENS.append(IgnorePart(char, re.escape(char)))
def __init__(self, spec, is_24hour=None):
self.spec_str = spec
self.tokens = self._tokenize_spec(spec)
if is_24hour is None:
self.is_24hour = not any(isinstance(t, AmPmPart) for t in self.tokens)
else:
self.is_24hour = is_24hour
# Pre-calculate some properties
full_date_re = "".join(token.parser_re(self) for token in self.tokens)
self._parser_re = re.compile("^%s$" % full_date_re, re.I)
self.re_tokens = [t for t in self.tokens if t.PARSER_RE_CONTAINS_GROUP]
self.format_code = self._make_format_code()
self.parse_chain = [None]
self.format_chain = []
for token in self.tokens:
token.install_chain_handlers(self)
def _make_format_code(self):
fstring_data = "".join(token.format_part(self) for token in self.tokens)
src = f"f'{fstring_data}'"
return compile(src, src, 'eval')
def _tokenize_spec(self, bit):
for component in self.FORMAT_STR_TOKENS:
before, match, after = component.partition_spec(bit)
if not match:
continue
parts = (component, )
if before:
parts = self._tokenize_spec(before) + parts
if after:
parts = parts + self._tokenize_spec(after)
return parts
if bit:
raise ValueError(f"Could not parse: {bit}")
return ()
def matches_format(self, data):
if not isinstance(data, str):
return False
return self._parser_re.match(data) is not None
def parse(self, data, default=RAISE):
matches = self._parser_re.match(data)
if matches is None:
if default is RAISE:
raise ValueError(f"date '{data}' does not match format '{self.spec_str}'")
return default
parts = matches.groups()
today = datetime.date.today()
context = {"year": today.year, "month": today.month, "day": today.day}
for token, value in zip(self.re_tokens, parts):
if token.VALUE_MATCHES_DATE_COMPONENT_INT:
context[token.datepart] = int(value)
else:
token.got_value(context, value)
result = None
for handler in self.parse_chain:
if handler is None:
result = datetime.datetime(**context)
else:
result = handler(self, context, result)
return result
def format(self, date):
"""
Given a datetime.datetime object, return a string representing this date/time,
formatted according to this dateformat.
"""
context = {'date': date}
for handler in self.format_chain:
handler(self, date, context)
return eval(self.format_code, globals(), context)
if __name__ == "__main__":
d = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
df = DateFormat(f"Dddddd {ISOFORMAT_DATETIME}.SSSS+HHMM")
print(df.format(d))
| en | 0.840133 | # In some special cases, e.g. seconds, can actually be '60' # This format explicitly leaves out the micro/milli/nano second component, # as the precision of the sub-second measurement in iso8601 is undefined, # and it is easy to add in the correct .SSSS component once the precision # is agreed/known Responsible for an element of a date format, both parsing, and formatting. For example, to parse 4-digit years, a DateFormatPart with format_str of 'YYYY' exists. This part has the logic to extract the year from a string, and to format the year as a 4-digit string from a date. # What to look for in the format spec (e.g. YYYY) # Re that matches this date value (e.g. \d{4}) Date parsing is done by matching the string to a regular expression before converting each component to a date-relevant value. This method returns the part of the full regular-expression pattern that should match against the value. Given a string of the form: "YYYY-MM-DD", and assuming this part matches on 'MM', return a tuple of the form ("YYYY-", "MM", "-DD"), as per the standard string.partition function Used for separators (T for example), during parsing, the matched value is ignored (but checked for presence) # Special case # python datetime uses 70 as the cutoff # Short months to include 4-letter full month names too, as this sometimes can be used The correct timezone has been identified first-time round, BUT pytz can't localize the date correctly without knowing what the year/month/day is, due to the fickle nature of humans. So extract the timezone, and re-localize correctly # The order matters here, for example. YYYY must match before YY # (Or the dateformat will end up looking for two short-years right after each other # rather than one long year # Pre-calculate some properties Given a datetime.datetime object, return a string representing this date/time, formatted according to this dateformat. | 3.088528 | 3 |
application/service/pooling_service.py | singnet/snet-converter-services | 0 | 6612505 | <reponame>singnet/snet-converter-services
from common.logger import get_logger
from infrastructure.repositories.pooling_repository import PoolingRepository
logger = get_logger(__name__)
class PoolingService:
def __init__(self):
self.pooling_repo = PoolingRepository()
def get_message_group_pool(self):
logger.info("Getting the message group pool which is last least used for processing")
message_pool = self.pooling_repo.get_message_group_pool()
return message_pool.to_dict() if message_pool else None
def update_message_pool(self, id):
logger.info(f"Updating the message pool id={id}")
self.pooling_repo.update_message_pool(id=id)
| from common.logger import get_logger
from infrastructure.repositories.pooling_repository import PoolingRepository
logger = get_logger(__name__)
class PoolingService:
def __init__(self):
self.pooling_repo = PoolingRepository()
def get_message_group_pool(self):
logger.info("Getting the message group pool which is last least used for processing")
message_pool = self.pooling_repo.get_message_group_pool()
return message_pool.to_dict() if message_pool else None
def update_message_pool(self, id):
logger.info(f"Updating the message pool id={id}")
self.pooling_repo.update_message_pool(id=id) | none | 1 | 2.475405 | 2 | |
kickstart/2018b1.py | sogapalag/problems | 1 | 6612506 | <gh_stars>1-10
# No_Nine
def legal(num: str) -> int:
"""
one can prove in [0, 10^z], z~[1,.)
not cantain 9, there are 9^z
and 0~8 (mod 9) are equal 9^(z-1)
thus iter num_i
"""
l = len(num)
if l == 1:
d = int(num)
if d == 9:
d = 8
return d
res = 0
m = 0
for i, d in enumerate(num):
if i < l-1:
d = int(d)
m += d
# not robust, input num may not valid FL, but FL valid pass test
if d == 9:
d = 8
res += d * 8 * 9**(l-1 - i-1)
d = int(num[-1])
if d == 9:
d = 8
res += d+1
m = m%9
if (9-m)%9 <= d:
res -= 1
return res
def main():
T = int(input())
for i in range(T):
F, L = input().split(' ')
print('Case #{}: {}'.format(i+1, legal(L) - legal(F) + 1))
if __name__ == '__main__':
main()
| # No_Nine
def legal(num: str) -> int:
"""
one can prove in [0, 10^z], z~[1,.)
not cantain 9, there are 9^z
and 0~8 (mod 9) are equal 9^(z-1)
thus iter num_i
"""
l = len(num)
if l == 1:
d = int(num)
if d == 9:
d = 8
return d
res = 0
m = 0
for i, d in enumerate(num):
if i < l-1:
d = int(d)
m += d
# not robust, input num may not valid FL, but FL valid pass test
if d == 9:
d = 8
res += d * 8 * 9**(l-1 - i-1)
d = int(num[-1])
if d == 9:
d = 8
res += d+1
m = m%9
if (9-m)%9 <= d:
res -= 1
return res
def main():
T = int(input())
for i in range(T):
F, L = input().split(' ')
print('Case #{}: {}'.format(i+1, legal(L) - legal(F) + 1))
if __name__ == '__main__':
main() | en | 0.541656 | # No_Nine one can prove in [0, 10^z], z~[1,.) not cantain 9, there are 9^z and 0~8 (mod 9) are equal 9^(z-1) thus iter num_i # not robust, input num may not valid FL, but FL valid pass test #{}: {}'.format(i+1, legal(L) - legal(F) + 1)) | 3.345029 | 3 |
files/people/image_converter.py | tobyhodges/website | 1 | 6612507 | #!/usr/bin/env python
"Resize an image (by default, so that width is 120 pixels)."
import sys
from PIL import Image
SCALE = 120
if len(sys.argv) == 3:
infile, outfile = sys.argv[1:3]
elif len(sys.argv) == 4:
infile, outfile, SCALE = sys.argv[1:4]
SCALE = int(SCALE)
else:
assert False, 'usage: resize infile outfile [x_dim]'
src = Image.open(infile)
src_x, src_y = src.size
dst_y = int(src_y * SCALE / src_x)
dst_x = SCALE
print('({0}, {1}) => ({2}, {3})'.format(src_x, src_y, dst_x, dst_y))
dst = src.resize((dst_x, dst_y), Image.ANTIALIAS)
dst.save(outfile)
| #!/usr/bin/env python
"Resize an image (by default, so that width is 120 pixels)."
import sys
from PIL import Image
SCALE = 120
if len(sys.argv) == 3:
infile, outfile = sys.argv[1:3]
elif len(sys.argv) == 4:
infile, outfile, SCALE = sys.argv[1:4]
SCALE = int(SCALE)
else:
assert False, 'usage: resize infile outfile [x_dim]'
src = Image.open(infile)
src_x, src_y = src.size
dst_y = int(src_y * SCALE / src_x)
dst_x = SCALE
print('({0}, {1}) => ({2}, {3})'.format(src_x, src_y, dst_x, dst_y))
dst = src.resize((dst_x, dst_y), Image.ANTIALIAS)
dst.save(outfile)
| ru | 0.26433 | #!/usr/bin/env python | 3.531812 | 4 |
pyresource/query.py | aleontiev/django-resource | 1 | 6612508 | <gh_stars>1-10
import json
import base64
from collections import defaultdict
from urllib.parse import parse_qs
from .utils import (
merge as _merge,
cached_property,
coerce_query_value,
coerce_query_values,
)
from copy import deepcopy
from .exceptions import QueryValidationError, QueryExecutionError
from .features import (
get_feature,
get_feature_separator,
get_take_fields,
get_sort_fields,
NestedFeature,
ROOT_FEATURES,
QUERY,
PARAMETERS,
WHERE,
TAKE,
SORT,
)
from .boolean import WhereQueryMixin
class Query(WhereQueryMixin):
# methods
def __init__(self, state=None, server=None):
"""
Arguments:
state: internal query representation
"""
self._state = state or {}
self.server = server
def __call__(self, *args, **kwargs):
return self.from_querystring(*args, server=self.server, state=self.state)
def add(self, id=None, field=None, **context):
return self._call("add", id=id, field=field, **context)
def set(self, id=None, field=None, **context):
return self._call("set", id=id, field=field, **context)
def get(self, id=None, field=None, **context):
return self._call("get", id=id, field=field, **context)
def edit(self, id=None, field=None, **context):
return self._call("edit", id=id, field=field, **context)
def delete(self, id=None, field=None, **context):
return self._call("delete", id=id, field=field, **context)
def options(self, id=None, field=None, **context):
return self._call("options", id=id, field=field, **context)
def explain(self, id=None, field=None, **context):
return self._call("explain", id=id, field=field, **context)
def encode(self):
return base64.b64encode(json.dumps(self.state).encode('utf-8')).decode()
@cached_property
def executor(self):
return self.server.get_executor(self)
def execute(self, request=None, **context):
executor = self.executor
if not executor:
raise QueryExecutionError(f"Query cannot execute without executor")
action_name = self.state.get("action", "get")
if "action" not in self.state:
# add default action "get" into state
self.state["action"] = action_name
action = getattr(executor, action_name, None)
if not action:
raise QueryValidationError(f'Invalid action "{action_name}"')
return action(self, request=request, **context)
@property
def state(self):
return self._state
def get_state(self, level=None):
"""Get state at a particular level
If level is None, the root state will be returned
Otherwise, the level is used as a key to traverse the state
For example, if state = {"take": {"users": {"take": {"groups": True}}}
and level = "users", result = {"take": {"groups": True}}
and level = "users.groups", result = True
"""
state = self.state
if not level:
return state
parts = level.split(".") if isinstance(level, str) else level
for index, part in enumerate(parts):
if "take" not in state:
raise QueryValidationError(
f'Invalid level: "{level}" at part "{part}" ({index})'
)
take = state["take"]
if part not in take:
raise QueryValidationError(
f'Invalid level: "{level}" at part "{part}" ({index})'
)
state = take[part]
return state
# features
def data(self, data):
return self._update({"data": data})
def parameters(self, args=None, copy=True, **kwargs):
return self._update({"parameters": kwargs}, merge=True, copy=copy)
def id(self, name):
return self._update({"id": name})
def field(self, name):
return self._update({"field": name})
def space(self, name):
return self._update({"space": name})
def resource(self, name):
return self._update({"resource": name})
def action(self, name):
return self._update({"action": name})
@property
def take(self):
return NestedFeature(self, "take")
@property
def page(self):
return NestedFeature(self, "page")
@property
def sort(self):
return NestedFeature(self, "sort")
@property
def group(self):
return NestedFeature(self, "group")
def inspect(self, args=None, copy=True, **kwargs):
"""
Example:
.inspect(resource=True)
"""
if args:
kwargs = args
return self._update({"inspect": kwargs}, copy=copy, merge=True)
def _page(self, level, args=None, copy=True, **kwargs):
"""
Example:
.page('abcdef123a==', size=10)
"""
if args:
# cursor arg
if isinstance(args, list):
args = args[0]
kwargs["after"] = args
return self._update({"page": kwargs}, copy=copy, level=level, merge=True)
def _take(self, level, *args, copy=True):
kwargs = {}
for arg in args:
show = True
if arg.startswith("-"):
arg = arg[1:]
show = False
kwargs[arg] = show
return self._update({"take": kwargs}, copy=copy, level=level, merge=True)
def _call(self, action, id=None, field=None, **context):
if self.state.get("action") != action:
return getattr(self.action(action), action)(
id=id, field=field, **context
)
if id or field:
# redirect back through copy
args = {}
if id:
args["id"] = id
if field:
args["field"] = field
return getattr(self._update(args), action)(**context)
return self.execute(**context)
def _sort(self, level, *args, copy=True):
"""
Example:
.sort("name", "-created")
"""
return self._update({"sort": list(args)}, copy=copy, level=level)
def _group(self, level, args=None, copy=True, **kwargs):
"""
Example:
.group({"count": {"count": "id"})
"""
if args:
kwargs = args
return self._update({"group": kwargs}, copy=copy, level=level, merge=True)
def __str__(self):
return str(self.state)
def clone(self):
return self._update()
def _update(self, args=None, level=None, merge=False, copy=True, **kwargs):
if args:
kwargs = args
state = None
if copy:
state = deepcopy(self.state)
else:
state = self.state
sub = state
# adjust substate at particular level
# default: adjust root level
take = "take"
if level:
for part in level.split("."):
if take not in sub:
sub[take] = {}
fields = sub[take]
try:
new_sub = fields[part]
except KeyError:
fields[part] = {}
sub = fields[part]
else:
if isinstance(new_sub, bool):
fields[part] = {}
sub = fields[part]
else:
sub = new_sub
for key, value in kwargs.items():
if merge and isinstance(value, dict) and sub.get(key):
# deep merge
_merge(value, sub[key])
else:
# shallow merge, assign the state
sub[key] = value
if copy:
return Query(state=state, server=self.server)
else:
return self
def __getitem__(self, key):
return self._state[key]
def get_subquery(self, level=None):
state = self.state
substate = self.get_state(level)
last_level = level.split('.')[-1] if level else None
for feature in ROOT_FEATURES:
if feature in state:
substate[feature] = state[feature]
# resource-bound subqueries are resource-bound
if last_level and not state.get('resource'):
if state.get('space'):
# space-bound query, subquery becomes resource-bound
substate['resource'] = last_level
else:
# server-bound query, subquery becomes space-bound
substate['space'] = last_level
return Query(state=substate, server=self.server)
@classmethod
def _build_update(cls, parts, key, value):
update = {}
num_parts = len(parts)
if not key:
update = value
elif num_parts:
update[key] = {}
current = update[key]
for i, part in enumerate(parts):
if i != num_parts - 1:
current = current[part] = {}
else:
current[part] = value
else:
update[key] = value
return update
@classmethod
def decode_state(cls, state):
try:
return json.loads(base64.b64decode(state))
except Exception:
return None
@classmethod
def from_querystring(cls, querystring, **kwargs):
state = cls.decode_state(querystring)
if state is not None:
# querystring is encoded state
kwargs['state'] = state
return cls(**kwargs)
result = cls(**kwargs)
state = kwargs.get("state")
type = "server"
if "resource" in state:
type = "resource"
elif "space" in state:
type = "space"
remainder = None
space = resource = field = id = None
parts = querystring.split("?")
if len(parts) <= 2:
resource_parts = parts[0]
remainder = parts[1] if len(parts) == 2 else None
resource_parts = [r for r in resource_parts.split("/") if r]
update = {}
len_resource = len(resource_parts)
if len_resource == 1:
if type == "server":
space = resource_parts[0]
elif type == "space":
resource = resource_parts[0]
else:
field = resource_parts[0]
elif len_resource == 2:
# either resource/id or space/resource or id/field
if type == "server":
space, resource = resource_parts
elif type == "space":
resource, id = resource_parts
else:
id, field = resource_parts
elif len_resource == 3:
if type == "space":
resource, id, field = resource_parts
elif type == "server":
space, resource, id = resource_parts
else:
raise ValueError(f"Invalid querystring: {querystring}")
elif len_resource == 4:
if type == "server":
space, resource, id, field = resource_parts
else:
raise ValueError(f"Invalid querystring: {querystring}")
elif len_resource > 5:
raise ValueError(f"Invalid querystring: {querystring}")
if space is not None:
update["space"] = space
if resource is not None:
update["resource"] = resource
if id is not None:
update["id"] = id
if field is not None:
update["field"] = field
if update:
result._update(update, copy=False)
else:
raise ValueError(f"Invalid querystring: {querystring}")
if remainder:
query = parse_qs(remainder)
else:
query = {}
if QUERY in query:
query = query[QUERY]
if isinstance(query, list):
query = query[0]
state = cls.decode_state(query)
if state is not None:
# ?query=encoded-query
kwargs['state'] = state
return cls(**kwargs)
else:
raise ValueError(f'Invalid query: {query}')
where = defaultdict(list) # level -> [args]
for key, value in query.items():
feature = get_feature(key)
separator = get_feature_separator(feature)
level = None
if feature is None:
update_key = PARAMETERS
parts = [key]
value = coerce_query_values(value)
else:
# determine level
parts = key.split(separator)
feature_part = parts[0]
if "." in feature_part:
level = ".".join(feature_part.split(".")[1:])
if not level:
level = None
parts = parts[1:]
# handle WHERE separately because of special expression parsing
# that can join together multiple conditions
if feature == WHERE:
parts.append(value)
where[level].append(parts)
continue
# coerce value based on feature name
update_key = feature
if feature == TAKE:
value = get_take_fields(value)
elif feature == SORT:
value = get_sort_fields(value)
else:
value = coerce_query_values(value)
if update_key == "page" and not parts:
# default key for page = cursor
parts = ["cursor"]
update = cls._build_update(parts, update_key, value)
result._update(update, level=level, merge=feature != SORT, copy=False)
if where:
# WhereQueryMixin
# special handling
cls.update_where(result, where)
return result
@property
def where(self):
return NestedFeature(self, "where")
def _where(self, level, query, copy=True):
"""
Example:
.where({
'or': [
{'contains': ['users.location.name', '"New York"']},
{'not': {'in': ['users', [1, 2]]}}
]
})
"""
return self._update({"where": query}, copy=copy, level=level)
| import json
import base64
from collections import defaultdict
from urllib.parse import parse_qs
from .utils import (
merge as _merge,
cached_property,
coerce_query_value,
coerce_query_values,
)
from copy import deepcopy
from .exceptions import QueryValidationError, QueryExecutionError
from .features import (
get_feature,
get_feature_separator,
get_take_fields,
get_sort_fields,
NestedFeature,
ROOT_FEATURES,
QUERY,
PARAMETERS,
WHERE,
TAKE,
SORT,
)
from .boolean import WhereQueryMixin
class Query(WhereQueryMixin):
# methods
def __init__(self, state=None, server=None):
"""
Arguments:
state: internal query representation
"""
self._state = state or {}
self.server = server
def __call__(self, *args, **kwargs):
return self.from_querystring(*args, server=self.server, state=self.state)
def add(self, id=None, field=None, **context):
return self._call("add", id=id, field=field, **context)
def set(self, id=None, field=None, **context):
return self._call("set", id=id, field=field, **context)
def get(self, id=None, field=None, **context):
return self._call("get", id=id, field=field, **context)
def edit(self, id=None, field=None, **context):
return self._call("edit", id=id, field=field, **context)
def delete(self, id=None, field=None, **context):
return self._call("delete", id=id, field=field, **context)
def options(self, id=None, field=None, **context):
return self._call("options", id=id, field=field, **context)
def explain(self, id=None, field=None, **context):
return self._call("explain", id=id, field=field, **context)
def encode(self):
return base64.b64encode(json.dumps(self.state).encode('utf-8')).decode()
@cached_property
def executor(self):
return self.server.get_executor(self)
def execute(self, request=None, **context):
executor = self.executor
if not executor:
raise QueryExecutionError(f"Query cannot execute without executor")
action_name = self.state.get("action", "get")
if "action" not in self.state:
# add default action "get" into state
self.state["action"] = action_name
action = getattr(executor, action_name, None)
if not action:
raise QueryValidationError(f'Invalid action "{action_name}"')
return action(self, request=request, **context)
@property
def state(self):
return self._state
def get_state(self, level=None):
"""Get state at a particular level
If level is None, the root state will be returned
Otherwise, the level is used as a key to traverse the state
For example, if state = {"take": {"users": {"take": {"groups": True}}}
and level = "users", result = {"take": {"groups": True}}
and level = "users.groups", result = True
"""
state = self.state
if not level:
return state
parts = level.split(".") if isinstance(level, str) else level
for index, part in enumerate(parts):
if "take" not in state:
raise QueryValidationError(
f'Invalid level: "{level}" at part "{part}" ({index})'
)
take = state["take"]
if part not in take:
raise QueryValidationError(
f'Invalid level: "{level}" at part "{part}" ({index})'
)
state = take[part]
return state
# features
def data(self, data):
return self._update({"data": data})
def parameters(self, args=None, copy=True, **kwargs):
return self._update({"parameters": kwargs}, merge=True, copy=copy)
def id(self, name):
return self._update({"id": name})
def field(self, name):
return self._update({"field": name})
def space(self, name):
return self._update({"space": name})
def resource(self, name):
return self._update({"resource": name})
def action(self, name):
return self._update({"action": name})
@property
def take(self):
return NestedFeature(self, "take")
@property
def page(self):
return NestedFeature(self, "page")
@property
def sort(self):
return NestedFeature(self, "sort")
@property
def group(self):
return NestedFeature(self, "group")
def inspect(self, args=None, copy=True, **kwargs):
"""
Example:
.inspect(resource=True)
"""
if args:
kwargs = args
return self._update({"inspect": kwargs}, copy=copy, merge=True)
def _page(self, level, args=None, copy=True, **kwargs):
"""
Example:
.page('abcdef123a==', size=10)
"""
if args:
# cursor arg
if isinstance(args, list):
args = args[0]
kwargs["after"] = args
return self._update({"page": kwargs}, copy=copy, level=level, merge=True)
def _take(self, level, *args, copy=True):
kwargs = {}
for arg in args:
show = True
if arg.startswith("-"):
arg = arg[1:]
show = False
kwargs[arg] = show
return self._update({"take": kwargs}, copy=copy, level=level, merge=True)
def _call(self, action, id=None, field=None, **context):
if self.state.get("action") != action:
return getattr(self.action(action), action)(
id=id, field=field, **context
)
if id or field:
# redirect back through copy
args = {}
if id:
args["id"] = id
if field:
args["field"] = field
return getattr(self._update(args), action)(**context)
return self.execute(**context)
def _sort(self, level, *args, copy=True):
"""
Example:
.sort("name", "-created")
"""
return self._update({"sort": list(args)}, copy=copy, level=level)
def _group(self, level, args=None, copy=True, **kwargs):
"""
Example:
.group({"count": {"count": "id"})
"""
if args:
kwargs = args
return self._update({"group": kwargs}, copy=copy, level=level, merge=True)
def __str__(self):
return str(self.state)
def clone(self):
return self._update()
def _update(self, args=None, level=None, merge=False, copy=True, **kwargs):
if args:
kwargs = args
state = None
if copy:
state = deepcopy(self.state)
else:
state = self.state
sub = state
# adjust substate at particular level
# default: adjust root level
take = "take"
if level:
for part in level.split("."):
if take not in sub:
sub[take] = {}
fields = sub[take]
try:
new_sub = fields[part]
except KeyError:
fields[part] = {}
sub = fields[part]
else:
if isinstance(new_sub, bool):
fields[part] = {}
sub = fields[part]
else:
sub = new_sub
for key, value in kwargs.items():
if merge and isinstance(value, dict) and sub.get(key):
# deep merge
_merge(value, sub[key])
else:
# shallow merge, assign the state
sub[key] = value
if copy:
return Query(state=state, server=self.server)
else:
return self
def __getitem__(self, key):
return self._state[key]
def get_subquery(self, level=None):
state = self.state
substate = self.get_state(level)
last_level = level.split('.')[-1] if level else None
for feature in ROOT_FEATURES:
if feature in state:
substate[feature] = state[feature]
# resource-bound subqueries are resource-bound
if last_level and not state.get('resource'):
if state.get('space'):
# space-bound query, subquery becomes resource-bound
substate['resource'] = last_level
else:
# server-bound query, subquery becomes space-bound
substate['space'] = last_level
return Query(state=substate, server=self.server)
@classmethod
def _build_update(cls, parts, key, value):
update = {}
num_parts = len(parts)
if not key:
update = value
elif num_parts:
update[key] = {}
current = update[key]
for i, part in enumerate(parts):
if i != num_parts - 1:
current = current[part] = {}
else:
current[part] = value
else:
update[key] = value
return update
@classmethod
def decode_state(cls, state):
try:
return json.loads(base64.b64decode(state))
except Exception:
return None
@classmethod
def from_querystring(cls, querystring, **kwargs):
state = cls.decode_state(querystring)
if state is not None:
# querystring is encoded state
kwargs['state'] = state
return cls(**kwargs)
result = cls(**kwargs)
state = kwargs.get("state")
type = "server"
if "resource" in state:
type = "resource"
elif "space" in state:
type = "space"
remainder = None
space = resource = field = id = None
parts = querystring.split("?")
if len(parts) <= 2:
resource_parts = parts[0]
remainder = parts[1] if len(parts) == 2 else None
resource_parts = [r for r in resource_parts.split("/") if r]
update = {}
len_resource = len(resource_parts)
if len_resource == 1:
if type == "server":
space = resource_parts[0]
elif type == "space":
resource = resource_parts[0]
else:
field = resource_parts[0]
elif len_resource == 2:
# either resource/id or space/resource or id/field
if type == "server":
space, resource = resource_parts
elif type == "space":
resource, id = resource_parts
else:
id, field = resource_parts
elif len_resource == 3:
if type == "space":
resource, id, field = resource_parts
elif type == "server":
space, resource, id = resource_parts
else:
raise ValueError(f"Invalid querystring: {querystring}")
elif len_resource == 4:
if type == "server":
space, resource, id, field = resource_parts
else:
raise ValueError(f"Invalid querystring: {querystring}")
elif len_resource > 5:
raise ValueError(f"Invalid querystring: {querystring}")
if space is not None:
update["space"] = space
if resource is not None:
update["resource"] = resource
if id is not None:
update["id"] = id
if field is not None:
update["field"] = field
if update:
result._update(update, copy=False)
else:
raise ValueError(f"Invalid querystring: {querystring}")
if remainder:
query = parse_qs(remainder)
else:
query = {}
if QUERY in query:
query = query[QUERY]
if isinstance(query, list):
query = query[0]
state = cls.decode_state(query)
if state is not None:
# ?query=encoded-query
kwargs['state'] = state
return cls(**kwargs)
else:
raise ValueError(f'Invalid query: {query}')
where = defaultdict(list) # level -> [args]
for key, value in query.items():
feature = get_feature(key)
separator = get_feature_separator(feature)
level = None
if feature is None:
update_key = PARAMETERS
parts = [key]
value = coerce_query_values(value)
else:
# determine level
parts = key.split(separator)
feature_part = parts[0]
if "." in feature_part:
level = ".".join(feature_part.split(".")[1:])
if not level:
level = None
parts = parts[1:]
# handle WHERE separately because of special expression parsing
# that can join together multiple conditions
if feature == WHERE:
parts.append(value)
where[level].append(parts)
continue
# coerce value based on feature name
update_key = feature
if feature == TAKE:
value = get_take_fields(value)
elif feature == SORT:
value = get_sort_fields(value)
else:
value = coerce_query_values(value)
if update_key == "page" and not parts:
# default key for page = cursor
parts = ["cursor"]
update = cls._build_update(parts, update_key, value)
result._update(update, level=level, merge=feature != SORT, copy=False)
if where:
# WhereQueryMixin
# special handling
cls.update_where(result, where)
return result
@property
def where(self):
return NestedFeature(self, "where")
def _where(self, level, query, copy=True):
"""
Example:
.where({
'or': [
{'contains': ['users.location.name', '"New York"']},
{'not': {'in': ['users', [1, 2]]}}
]
})
"""
return self._update({"where": query}, copy=copy, level=level) | en | 0.607657 | # methods Arguments: state: internal query representation # add default action "get" into state Get state at a particular level If level is None, the root state will be returned Otherwise, the level is used as a key to traverse the state For example, if state = {"take": {"users": {"take": {"groups": True}}} and level = "users", result = {"take": {"groups": True}} and level = "users.groups", result = True # features Example: .inspect(resource=True) Example: .page('abcdef123a==', size=10) # cursor arg # redirect back through copy Example: .sort("name", "-created") Example: .group({"count": {"count": "id"}) # adjust substate at particular level # default: adjust root level # deep merge # shallow merge, assign the state # resource-bound subqueries are resource-bound # space-bound query, subquery becomes resource-bound # server-bound query, subquery becomes space-bound # querystring is encoded state # either resource/id or space/resource or id/field # ?query=encoded-query # level -> [args] # determine level # handle WHERE separately because of special expression parsing # that can join together multiple conditions # coerce value based on feature name # default key for page = cursor # WhereQueryMixin # special handling Example: .where({ 'or': [ {'contains': ['users.location.name', '"New York"']}, {'not': {'in': ['users', [1, 2]]}} ] }) | 2.229029 | 2 |
reconstruct_decoder.py | FrankieYin/master_project | 0 | 6612509 | import json
import random
import torch
import numpy as np
import deep_sdf
from networks.deep_sdf_decoder import Decoder
from networks.sdf_net_decoder import SDFNet
def reconstruct(
decoder,
num_iterations,
latent_size,
test_sdf,
stat,
clamp_dist,
num_samples=30000,
lr=5e-4,
l2reg=False,
):
def adjust_learning_rate(
initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every
):
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
decreased_by = 10
adjust_lr_every = int(num_iterations / 2)
# init latent codes
if type(stat) == type(0.1):
latent = torch.ones(1, latent_size).normal_(mean=0, std=stat).cuda()
else: # or else i can calculate the empirical stat
latent = torch.normal(stat[0].detach(), stat[1].detach()).cuda()
latent.requires_grad = True
optimizer = torch.optim.Adam([latent], lr=lr)
loss_num = 0
loss_l1 = torch.nn.L1Loss() # with no aggregation
for e in range(num_iterations):
decoder.eval()
sdf_data = deep_sdf.data.unpack_sdf_samples_from_ram(
test_sdf, num_samples
).to('cuda')
xyz = sdf_data[:, 0:3]
sdf_gt = sdf_data[:, 3].unsqueeze(1)
sdf_gt = torch.clamp(sdf_gt, -clamp_dist, clamp_dist)
adjust_learning_rate(lr, optimizer, e, decreased_by, adjust_lr_every)
optimizer.zero_grad()
latent_inputs = latent.expand(num_samples, -1)
inputs = torch.cat([latent_inputs, xyz], 1).to('cuda')
pred_sdf = decoder(inputs)
# TODO: why is this needed?
if e == 0:
pred_sdf = decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -clamp_dist, clamp_dist)
loss = loss_l1(pred_sdf, sdf_gt)
if l2reg:
loss += 1e-4 * torch.mean(latent.pow(2))
loss.backward()
optimizer.step()
loss_num = loss.item()
return loss_num, latent
if __name__ == '__main__':
experiment = 'train_decoder'
experiment_path = f'checkpoints/{experiment}'
latent_size = 128
hidden_dim = latent_size * 2
num_samp_per_scene = 16384
scene_per_batch = 5
code_bound = 1
clamp_dist = 0.1
code_reg_lambda = 1e-4
decoder = SDFNet(latent_size).to('cuda')
split_file = json.load(open('5_sample.json'))
checkpoint = torch.load(f'{experiment_path}/500.pth')
decoder.load_state_dict(checkpoint['model_state_dict'])
shape_names = split_file['ShapeNetV2']['03001627']
shape_filenames = [f'data/SdfSamples/ShapeNetV2/03001627/{shape_name}.npz' for shape_name in shape_names]
random.shuffle(shape_filenames)
# mesh_to_reconstruct = shape_filenames[0]
mesh_to_reconstruct = 'data/SdfSamples/ShapeNetV2/03001627/117930a8f2e37f9b707cdefe012d0353.npz'
npz = np.load(mesh_to_reconstruct)
pos_tensor = torch.from_numpy(npz["pos"])
neg_tensor = torch.from_numpy(npz["neg"])
data_sdf = [pos_tensor, neg_tensor]
data_sdf[0] = data_sdf[0][torch.randperm(data_sdf[0].shape[0])]
data_sdf[1] = data_sdf[1][torch.randperm(data_sdf[1].shape[0])]
print("learning latent code")
err, latent = reconstruct(
decoder,
int(800),
latent_size,
data_sdf,
0.01, # [emp_mean,emp_var],
0.1,
num_samples=8000,
lr=5e-3,
l2reg=True,
)
with torch.no_grad():
print("creating mesh")
decoder.eval()
deep_sdf.mesh.create_mesh(
decoder, latent, f'{experiment_path}/mesh', N=256, max_batch=int(2 ** 18)
)
torch.save(latent, f'{experiment_path}/latent.pth') | import json
import random
import torch
import numpy as np
import deep_sdf
from networks.deep_sdf_decoder import Decoder
from networks.sdf_net_decoder import SDFNet
def reconstruct(
decoder,
num_iterations,
latent_size,
test_sdf,
stat,
clamp_dist,
num_samples=30000,
lr=5e-4,
l2reg=False,
):
def adjust_learning_rate(
initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every
):
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
decreased_by = 10
adjust_lr_every = int(num_iterations / 2)
# init latent codes
if type(stat) == type(0.1):
latent = torch.ones(1, latent_size).normal_(mean=0, std=stat).cuda()
else: # or else i can calculate the empirical stat
latent = torch.normal(stat[0].detach(), stat[1].detach()).cuda()
latent.requires_grad = True
optimizer = torch.optim.Adam([latent], lr=lr)
loss_num = 0
loss_l1 = torch.nn.L1Loss() # with no aggregation
for e in range(num_iterations):
decoder.eval()
sdf_data = deep_sdf.data.unpack_sdf_samples_from_ram(
test_sdf, num_samples
).to('cuda')
xyz = sdf_data[:, 0:3]
sdf_gt = sdf_data[:, 3].unsqueeze(1)
sdf_gt = torch.clamp(sdf_gt, -clamp_dist, clamp_dist)
adjust_learning_rate(lr, optimizer, e, decreased_by, adjust_lr_every)
optimizer.zero_grad()
latent_inputs = latent.expand(num_samples, -1)
inputs = torch.cat([latent_inputs, xyz], 1).to('cuda')
pred_sdf = decoder(inputs)
# TODO: why is this needed?
if e == 0:
pred_sdf = decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -clamp_dist, clamp_dist)
loss = loss_l1(pred_sdf, sdf_gt)
if l2reg:
loss += 1e-4 * torch.mean(latent.pow(2))
loss.backward()
optimizer.step()
loss_num = loss.item()
return loss_num, latent
if __name__ == '__main__':
experiment = 'train_decoder'
experiment_path = f'checkpoints/{experiment}'
latent_size = 128
hidden_dim = latent_size * 2
num_samp_per_scene = 16384
scene_per_batch = 5
code_bound = 1
clamp_dist = 0.1
code_reg_lambda = 1e-4
decoder = SDFNet(latent_size).to('cuda')
split_file = json.load(open('5_sample.json'))
checkpoint = torch.load(f'{experiment_path}/500.pth')
decoder.load_state_dict(checkpoint['model_state_dict'])
shape_names = split_file['ShapeNetV2']['03001627']
shape_filenames = [f'data/SdfSamples/ShapeNetV2/03001627/{shape_name}.npz' for shape_name in shape_names]
random.shuffle(shape_filenames)
# mesh_to_reconstruct = shape_filenames[0]
mesh_to_reconstruct = 'data/SdfSamples/ShapeNetV2/03001627/117930a8f2e37f9b707cdefe012d0353.npz'
npz = np.load(mesh_to_reconstruct)
pos_tensor = torch.from_numpy(npz["pos"])
neg_tensor = torch.from_numpy(npz["neg"])
data_sdf = [pos_tensor, neg_tensor]
data_sdf[0] = data_sdf[0][torch.randperm(data_sdf[0].shape[0])]
data_sdf[1] = data_sdf[1][torch.randperm(data_sdf[1].shape[0])]
print("learning latent code")
err, latent = reconstruct(
decoder,
int(800),
latent_size,
data_sdf,
0.01, # [emp_mean,emp_var],
0.1,
num_samples=8000,
lr=5e-3,
l2reg=True,
)
with torch.no_grad():
print("creating mesh")
decoder.eval()
deep_sdf.mesh.create_mesh(
decoder, latent, f'{experiment_path}/mesh', N=256, max_batch=int(2 ** 18)
)
torch.save(latent, f'{experiment_path}/latent.pth') | en | 0.799658 | # init latent codes # or else i can calculate the empirical stat # with no aggregation # TODO: why is this needed? # mesh_to_reconstruct = shape_filenames[0] # [emp_mean,emp_var], | 2.128655 | 2 |
declare_qtquick/qmlside/__ext__.py | likianta/declare-qtquick | 3 | 6612510 | try:
from .. import common
from ..application import app
from ..pyside import pyside
from ..typehint import TsQmlSide as T # noqa
except Exception as e:
raise e
| try:
from .. import common
from ..application import app
from ..pyside import pyside
from ..typehint import TsQmlSide as T # noqa
except Exception as e:
raise e
| none | 1 | 1.107935 | 1 | |
contact-tracing/code/slurm/pct.py | sbenthall/privacy-abm | 0 | 6612511 |
sys.path.append('../Python')
import model
print(dir(model))
|
sys.path.append('../Python')
import model
print(dir(model))
| none | 1 | 1.70327 | 2 | |
server/app.py | krista2811/NewsVisualization | 0 | 6612512 | import uuid
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS
from flask_restful import Resource, Api, reqparse
import elastic
import json
import sys
import matplotlib.pyplot as plt
from datetime import datetime
from dateutil.relativedelta import relativedelta
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
# enable CORS
CORS(app)
def get_plot(time, keywords):
for (key, value) in keywords.items():
plt.plot(time, value, label=key)
size = len(time)
print("a")
sizes = [0, int(size/4), int(size/2), int(size*3/4), int(size-1)]
print("b")
ticks = [time[sizes[0]], time[sizes[1]], time[sizes[2]], time[sizes[3]], time[sizes[4]]]
plt.xticks(ticks, [a.split("-")[0] for a in ticks])
plt.legend(loc='upper left')
fig = plt.figure()
return fig
def show_plot(time, keywords):
get_plot(time, keywords)
plt.show()
def get_keyword_hist(titles):
time = []
keywords = {}
for key in titles:
keywords[key] = []
for aggr in elastic.get_hist(titles):
if aggr['doc_count'] == 0:
continue
time.append(aggr['key_as_string'])
for (key, value) in aggr['5']['buckets'].items():
keywords[key].append(value['doc_count'])
# plot(time, keywords)
return {"time": time, "keywords": keywords}
def get_table_data(keywords, start, end):
start_str = start.strftime('%Y-%m-%d')
end_str = end.strftime('%Y-%m-%d')
mod_data = []
for doc in elastic.get_table(keywords, start_str, end_str)['hits']:
mod = doc['_source']
mod_data.append(mod)
return {'data': mod_data}
@app.route('/')
def index():
return render_template('index.html')
@app.route('/table', methods=['POST'])
def table():
try:
req_data = json.loads(request.data)
keywords = req_data["keywords"]
start = datetime.strptime(req_data['start'], "%Y-%m-%d")
end = start + relativedelta(months=1)
return jsonify(get_table_data(keywords, start, end))
except Exception as e:
return e
@app.route('/hist', methods=['POST'])
def post():
try:
keywords = json.loads(request.data)['query']
data = get_keyword_hist(keywords)
print(data)
time = data['time']
keywords = data['keywords']
return jsonify(data)
except Exception as e:
return e
if __name__ == '__main__':
app.run('0.0.0.0') | import uuid
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS
from flask_restful import Resource, Api, reqparse
import elastic
import json
import sys
import matplotlib.pyplot as plt
from datetime import datetime
from dateutil.relativedelta import relativedelta
# configuration
DEBUG = True
# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)
# enable CORS
CORS(app)
def get_plot(time, keywords):
for (key, value) in keywords.items():
plt.plot(time, value, label=key)
size = len(time)
print("a")
sizes = [0, int(size/4), int(size/2), int(size*3/4), int(size-1)]
print("b")
ticks = [time[sizes[0]], time[sizes[1]], time[sizes[2]], time[sizes[3]], time[sizes[4]]]
plt.xticks(ticks, [a.split("-")[0] for a in ticks])
plt.legend(loc='upper left')
fig = plt.figure()
return fig
def show_plot(time, keywords):
get_plot(time, keywords)
plt.show()
def get_keyword_hist(titles):
time = []
keywords = {}
for key in titles:
keywords[key] = []
for aggr in elastic.get_hist(titles):
if aggr['doc_count'] == 0:
continue
time.append(aggr['key_as_string'])
for (key, value) in aggr['5']['buckets'].items():
keywords[key].append(value['doc_count'])
# plot(time, keywords)
return {"time": time, "keywords": keywords}
def get_table_data(keywords, start, end):
start_str = start.strftime('%Y-%m-%d')
end_str = end.strftime('%Y-%m-%d')
mod_data = []
for doc in elastic.get_table(keywords, start_str, end_str)['hits']:
mod = doc['_source']
mod_data.append(mod)
return {'data': mod_data}
@app.route('/')
def index():
return render_template('index.html')
@app.route('/table', methods=['POST'])
def table():
try:
req_data = json.loads(request.data)
keywords = req_data["keywords"]
start = datetime.strptime(req_data['start'], "%Y-%m-%d")
end = start + relativedelta(months=1)
return jsonify(get_table_data(keywords, start, end))
except Exception as e:
return e
@app.route('/hist', methods=['POST'])
def post():
try:
keywords = json.loads(request.data)['query']
data = get_keyword_hist(keywords)
print(data)
time = data['time']
keywords = data['keywords']
return jsonify(data)
except Exception as e:
return e
if __name__ == '__main__':
app.run('0.0.0.0') | en | 0.374457 | # configuration # instantiate the app # enable CORS # plot(time, keywords) | 2.377446 | 2 |
depl_model.py | efecanxrd/LSTM-Derin-renme-Modeli-le-Enerji-T-ketimi | 2 | 6612513 | ###########################################
######### #########
##### #####
### Hello World! ###
### Author: efecanxrd ###
##### #####
######### #########
###########################################
#Modüllerimizi tanımlayalım
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense
#The Model
class DeepLearningModel():
def __init__(self, data: pd.DataFrame, Y_var: str,lag: int,LSTM_layer_depth: int,epochs=int,batch_size=256,train_test_split=0):
self.data = data
self.Y_var = Y_var
self.lag = lag
self.LSTM_layer_depth = LSTM_layer_depth
self.batch_size = batch_size
self.epochs = epochs
self.train_test_split = train_test_split
@staticmethod
def create_X_Y(ts: list, lag: int) -> tuple:
X, Y = [], []
if len(ts) - lag <= 0:
X.append(ts)
else:
for i in range(len(ts) - lag):
Y.append(ts[i + lag])
X.append(ts[i:(i + lag)])
X, Y = np.array(X), np.array(Y)
X = np.reshape(X, (X.shape[0], X.shape[1], 1))
return X, Y
def create_data_for_NN(
self,
use_last_n=None
):
y = self.data[self.Y_var].tolist()
if use_last_n is not None:
y = y[-use_last_n:]
X, Y = self.create_X_Y(y, self.lag)
X_train = X
X_test = []
Y_train = Y
Y_test = []
if self.train_test_split > 0:
index = round(len(X) * self.train_test_split)
X_train = X[:(len(X) - index)]
X_test = X[-index:]
Y_train = Y[:(len(X) - index)]
Y_test = Y[-index:]
return X_train, X_test, Y_train, Y_test
def LSTModel(self):
X_train, X_test, Y_train, Y_test = self.create_data_for_NN()
model = Sequential()
model.add(LSTM(self.LSTM_layer_depth, activation='relu', input_shape=(self.lag, 1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
keras_dict = {'x': X_train,'y': Y_train,'batch_size': self.batch_size,'epochs': self.epochs,'shuffle': False}
if self.train_test_split > 0:
keras_dict.update({'validation_data': (X_test, Y_test)})
model.fit(**keras_dict)
self.model = model
return model
def predict(self) -> list:
yhat = []
if(self.train_test_split > 0):
_, X_test, _, _ = self.create_data_for_NN()
yhat = [y[0] for y in self.model.predict(X_test)]
return yhat
def predict_n_ahead(self, n_ahead: int):
X, _, _, _ = self.create_data_for_NN(use_last_n=self.lag)
yhat = []
for _ in range(n_ahead):
fc = self.model.predict(X)
yhat.append(fc)
X = np.append(X, fc)
X = np.delete(X, 0)
X = np.reshape(X, (1, len(X), 1))
return yhat
| ###########################################
######### #########
##### #####
### Hello World! ###
### Author: efecanxrd ###
##### #####
######### #########
###########################################
#Modüllerimizi tanımlayalım
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense
#The Model
class DeepLearningModel():
def __init__(self, data: pd.DataFrame, Y_var: str,lag: int,LSTM_layer_depth: int,epochs=int,batch_size=256,train_test_split=0):
self.data = data
self.Y_var = Y_var
self.lag = lag
self.LSTM_layer_depth = LSTM_layer_depth
self.batch_size = batch_size
self.epochs = epochs
self.train_test_split = train_test_split
@staticmethod
def create_X_Y(ts: list, lag: int) -> tuple:
X, Y = [], []
if len(ts) - lag <= 0:
X.append(ts)
else:
for i in range(len(ts) - lag):
Y.append(ts[i + lag])
X.append(ts[i:(i + lag)])
X, Y = np.array(X), np.array(Y)
X = np.reshape(X, (X.shape[0], X.shape[1], 1))
return X, Y
def create_data_for_NN(
self,
use_last_n=None
):
y = self.data[self.Y_var].tolist()
if use_last_n is not None:
y = y[-use_last_n:]
X, Y = self.create_X_Y(y, self.lag)
X_train = X
X_test = []
Y_train = Y
Y_test = []
if self.train_test_split > 0:
index = round(len(X) * self.train_test_split)
X_train = X[:(len(X) - index)]
X_test = X[-index:]
Y_train = Y[:(len(X) - index)]
Y_test = Y[-index:]
return X_train, X_test, Y_train, Y_test
def LSTModel(self):
X_train, X_test, Y_train, Y_test = self.create_data_for_NN()
model = Sequential()
model.add(LSTM(self.LSTM_layer_depth, activation='relu', input_shape=(self.lag, 1)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
keras_dict = {'x': X_train,'y': Y_train,'batch_size': self.batch_size,'epochs': self.epochs,'shuffle': False}
if self.train_test_split > 0:
keras_dict.update({'validation_data': (X_test, Y_test)})
model.fit(**keras_dict)
self.model = model
return model
def predict(self) -> list:
yhat = []
if(self.train_test_split > 0):
_, X_test, _, _ = self.create_data_for_NN()
yhat = [y[0] for y in self.model.predict(X_test)]
return yhat
def predict_n_ahead(self, n_ahead: int):
X, _, _, _ = self.create_data_for_NN(use_last_n=self.lag)
yhat = []
for _ in range(n_ahead):
fc = self.model.predict(X)
yhat.append(fc)
X = np.append(X, fc)
X = np.delete(X, 0)
X = np.reshape(X, (1, len(X), 1))
return yhat
| de | 0.748721 | ########################################### ######### ######### ##### ##### ### Hello World! ### ### Author: efecanxrd ### ##### ##### ######### ######### ########################################### #Modüllerimizi tanımlayalım #The Model | 3.008274 | 3 |
mltk/utils/process_pool_manager.py | SiliconLabs/mltk | 0 | 6612514 | <reponame>SiliconLabs/mltk
import sys
import logging
import signal
import traceback
import functools
import os
import multiprocessing
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import threading
from .logger import get_logger
class ProcessPoolManager(object):
"""This allows for running Python functions across multiple, independent processes"""
@staticmethod
def set_logger(logger: logging.Logger):
globals()['_logger'] = logger
def __init__(
self,
callback=None,
cores=-1,
pool: Pool=None,
debug=False,
logger:logging.Logger=None,
env:dict=None
):
max_cores = multiprocessing.cpu_count()
logger = logger or globals()['_logger'] or get_logger()
if cores == -1:
cores = max_cores
elif isinstance(cores, float):
cores = round(max_cores * cores)
cores = min(max(cores, 1), max_cores)
if pool is None:
logger.info(
f'ProcessPoolManager using {cores} of {max_cores} CPU cores\n'
'NOTE: You may need to adjust the "cores" parameter of the data generator if you\'re experiencing performance issues'
)
# ThreadPool is easier to debug as it allows for single-stepping
if debug:
logger.debug('NOTE: ProcessPoolManager using ThreadPool (instead of ProcessPool)')
pool = ThreadPool(processes=cores)
else:
pool = Pool(processes=cores, initializer=functools.partial(_init_pool_worker, env))
self.logger = logger
self.pool = pool
self.callback = callback
self._pending_count = 0
self._max_pending_count = cores + 2
self._pending_lock = threading.Condition()
self._reset_active = threading.Event()
self._is_closed = threading.Event()
self._consecutive_errors = 0
def reset(self):
"""Reset all processes and clear any pending"""
self._reset_active.set()
with self._pending_lock:
self._pending_lock.notify_all()
self.wait(timeout=3)
self._reset_active.clear()
def wait(self, timeout=None) -> bool:
"""Wait for all processes to complete"""
if self._is_closed.is_set():
return False
with self._pending_lock:
while self._pending_count > 0:
if not self._pending_lock.wait(timeout=timeout):
return False
return True
def close(self):
"""Close the processing pool"""
self._is_closed.set()
self.pool.close()
self.pool.terminate()
def process(self, func, *args, **kwargs):
"""Process the given function in the process pool"""
with self._pending_lock:
if self._consecutive_errors > 1:
raise RuntimeError('Max subprocess consecutive errors exceeded. Aborting.')
if self._reset_active.is_set() or self._is_closed.is_set():
return
while self._pending_count >= self._max_pending_count:
if self._reset_active.is_set():
return
self._pending_lock.wait(timeout=.1)
self._pending_count += 1
kwargs['__func__'] = func
if self._is_closed.is_set():
return
try:
self.pool.apply_async(
_on_process, args, kwargs,
callback=self._on_complete,
error_callback=self._on_error
)
except Exception as e:
if 'Pool not running' in f'{e}':
return
raise e
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.wait()
self.close()
def _on_complete(self, result):
try:
if self.callback is not None and not self._reset_active.is_set():
try:
self.callback(result)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
self.logger.error(f'Error in pool callback {self.callback}, err: {e}', exc_info=e)
finally:
with self._pending_lock:
self._consecutive_errors = 0
self._pending_count -= 1
self._pending_lock.notify_all()
def _on_error(self, e):
with self._pending_lock:
self._consecutive_errors += 1
self._pending_count -= 1
self._pending_lock.notify_all()
if self._consecutive_errors == 1:
self.logger.error(
'Error in pool subprocess\n\n'
'HINT: Use the debug=True option to single-step debug the following stacktrace\n\n'
f'Error details: {e}'
)
def _on_process(*args, **kwargs):
"""Process the given function in the current subprocess"""
try:
func = kwargs['__func__']
del kwargs['__func__']
return func(*args, **kwargs)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
raise type(e)(traceback.format_exc())
# Set up the worker processes to ignore SIGINT altogether,
# and confine all the cleanup code to the parent process.
# This fixes the problem for both idle and busy worker processes,
# and requires no error handling code in your child processes.
def _init_pool_worker(env:dict):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if env:
os.environ.update(env)
if '_logger' not in globals():
_logger = None
| import sys
import logging
import signal
import traceback
import functools
import os
import multiprocessing
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import threading
from .logger import get_logger
class ProcessPoolManager(object):
"""This allows for running Python functions across multiple, independent processes"""
@staticmethod
def set_logger(logger: logging.Logger):
globals()['_logger'] = logger
def __init__(
self,
callback=None,
cores=-1,
pool: Pool=None,
debug=False,
logger:logging.Logger=None,
env:dict=None
):
max_cores = multiprocessing.cpu_count()
logger = logger or globals()['_logger'] or get_logger()
if cores == -1:
cores = max_cores
elif isinstance(cores, float):
cores = round(max_cores * cores)
cores = min(max(cores, 1), max_cores)
if pool is None:
logger.info(
f'ProcessPoolManager using {cores} of {max_cores} CPU cores\n'
'NOTE: You may need to adjust the "cores" parameter of the data generator if you\'re experiencing performance issues'
)
# ThreadPool is easier to debug as it allows for single-stepping
if debug:
logger.debug('NOTE: ProcessPoolManager using ThreadPool (instead of ProcessPool)')
pool = ThreadPool(processes=cores)
else:
pool = Pool(processes=cores, initializer=functools.partial(_init_pool_worker, env))
self.logger = logger
self.pool = pool
self.callback = callback
self._pending_count = 0
self._max_pending_count = cores + 2
self._pending_lock = threading.Condition()
self._reset_active = threading.Event()
self._is_closed = threading.Event()
self._consecutive_errors = 0
def reset(self):
"""Reset all processes and clear any pending"""
self._reset_active.set()
with self._pending_lock:
self._pending_lock.notify_all()
self.wait(timeout=3)
self._reset_active.clear()
def wait(self, timeout=None) -> bool:
"""Wait for all processes to complete"""
if self._is_closed.is_set():
return False
with self._pending_lock:
while self._pending_count > 0:
if not self._pending_lock.wait(timeout=timeout):
return False
return True
def close(self):
"""Close the processing pool"""
self._is_closed.set()
self.pool.close()
self.pool.terminate()
def process(self, func, *args, **kwargs):
"""Process the given function in the process pool"""
with self._pending_lock:
if self._consecutive_errors > 1:
raise RuntimeError('Max subprocess consecutive errors exceeded. Aborting.')
if self._reset_active.is_set() or self._is_closed.is_set():
return
while self._pending_count >= self._max_pending_count:
if self._reset_active.is_set():
return
self._pending_lock.wait(timeout=.1)
self._pending_count += 1
kwargs['__func__'] = func
if self._is_closed.is_set():
return
try:
self.pool.apply_async(
_on_process, args, kwargs,
callback=self._on_complete,
error_callback=self._on_error
)
except Exception as e:
if 'Pool not running' in f'{e}':
return
raise e
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.wait()
self.close()
def _on_complete(self, result):
try:
if self.callback is not None and not self._reset_active.is_set():
try:
self.callback(result)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
self.logger.error(f'Error in pool callback {self.callback}, err: {e}', exc_info=e)
finally:
with self._pending_lock:
self._consecutive_errors = 0
self._pending_count -= 1
self._pending_lock.notify_all()
def _on_error(self, e):
with self._pending_lock:
self._consecutive_errors += 1
self._pending_count -= 1
self._pending_lock.notify_all()
if self._consecutive_errors == 1:
self.logger.error(
'Error in pool subprocess\n\n'
'HINT: Use the debug=True option to single-step debug the following stacktrace\n\n'
f'Error details: {e}'
)
def _on_process(*args, **kwargs):
"""Process the given function in the current subprocess"""
try:
func = kwargs['__func__']
del kwargs['__func__']
return func(*args, **kwargs)
except KeyboardInterrupt:
sys.exit()
except Exception as e:
raise type(e)(traceback.format_exc())
# Set up the worker processes to ignore SIGINT altogether,
# and confine all the cleanup code to the parent process.
# This fixes the problem for both idle and busy worker processes,
# and requires no error handling code in your child processes.
def _init_pool_worker(env:dict):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if env:
os.environ.update(env)
if '_logger' not in globals():
_logger = None | en | 0.823849 | This allows for running Python functions across multiple, independent processes # ThreadPool is easier to debug as it allows for single-stepping Reset all processes and clear any pending Wait for all processes to complete Close the processing pool Process the given function in the process pool Process the given function in the current subprocess # Set up the worker processes to ignore SIGINT altogether, # and confine all the cleanup code to the parent process. # This fixes the problem for both idle and busy worker processes, # and requires no error handling code in your child processes. | 2.908052 | 3 |
tests/test_login.py | Lisafiluz/calendar | 35 | 6612515 | <reponame>Lisafiluz/calendar
import pytest
from starlette.status import HTTP_302_FOUND
from app.database.models import User
from app.internal.security.ouath2 import create_jwt_token
from app.internal.security.schema import LoginUser
def test_login_route_ok(security_test_client):
response = security_test_client.get("/login")
assert response.ok
REGISTER_DETAIL = {
'username': 'correct_user', 'full_name': 'full_name',
'password': '<PASSWORD>', 'confirm_password': '<PASSWORD>',
'email': '<EMAIL>', 'description': ""}
LOGIN_WRONG_DETAILS = [
('wrong_user', 'wrong_password', b'Please check your credentials'),
('correct_user', 'wrong_password', b'Please check your credentials'),
('wrong_user', 'correct_password', b'Please check your credentials'),
('', 'correct_password', b'Please check your credentials'),
('correct_user', '', b'Please check your credentials'),
('', '', b'Please check your credentials'),
]
LOGIN_DATA = {'username': 'correct_user', 'password': '<PASSWORD>'}
WRONG_LOGIN_DATA = {
'username': 'incorrect_user', 'password': '<PASSWORD>'}
@pytest.mark.parametrize(
"username, password, expected_response", LOGIN_WRONG_DETAILS)
def test_login_fails(
session, security_test_client, username, password, expected_response):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
data = {'username': username, 'password': password}
data = security_test_client.post(
security_test_client.app.url_path_for('login'),
data=data).content
assert expected_response in data
def test_login_successfull(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
res = security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
assert res.status_code == HTTP_302_FOUND
def test_is_logged_in_dependency_with_logged_in_user(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert res.json() == {"user": True}
def test_is_logged_in_dependency_without_logged_in_user(
session, security_test_client):
res = security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Please log in' in res.content
def test_is_manager_in_dependency_with_logged_in_regular_user(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_manager'))
assert b"have a permition" in res.content
def test_is_manager_in_dependency_with_logged_in_manager(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
manager = session.query(User).filter(
User.username == 'correct_user').first()
manager.is_manager = True
session.commit()
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_manager'))
assert res.json() == {"manager": True}
def test_logout(session, security_test_client):
res = security_test_client.get(
security_test_client.app.url_path_for('logout'))
assert b'Login' in res.content
def test_incorrect_secret_key_in_token(session, security_test_client):
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user, jwt_key="wrong secret key")
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Your token is incorrect' in res.content
def test_expired_token(session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user, jwt_min_exp=-1)
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'expired' in res.content
def test_corrupted_token(session, security_test_client):
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user) + "s"
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Your token is incorrect' in res.content
def test_current_user_from_db_dependency_ok(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert res.json() == {"user": 'correct_user'}
def test_current_user_from_db_dependency_not_logged_in(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert b'Please log in' in res.content
def test_current_user_from_db_dependency_wrong_details(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
user = LoginUser(**WRONG_LOGIN_DATA)
incorrect_token = create_jwt_token(user)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert b'Your token is incorrect' in res.content
def test_current_user_dependency_ok(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user'))
assert res.json() == {"user": 'correct_user'}
def test_current_user_dependency_not_logged_in(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('current_user'))
assert b'Please log in' in res.content
| import pytest
from starlette.status import HTTP_302_FOUND
from app.database.models import User
from app.internal.security.ouath2 import create_jwt_token
from app.internal.security.schema import LoginUser
def test_login_route_ok(security_test_client):
response = security_test_client.get("/login")
assert response.ok
REGISTER_DETAIL = {
'username': 'correct_user', 'full_name': 'full_name',
'password': '<PASSWORD>', 'confirm_password': '<PASSWORD>',
'email': '<EMAIL>', 'description': ""}
LOGIN_WRONG_DETAILS = [
('wrong_user', 'wrong_password', b'Please check your credentials'),
('correct_user', 'wrong_password', b'Please check your credentials'),
('wrong_user', 'correct_password', b'Please check your credentials'),
('', 'correct_password', b'Please check your credentials'),
('correct_user', '', b'Please check your credentials'),
('', '', b'Please check your credentials'),
]
LOGIN_DATA = {'username': 'correct_user', 'password': '<PASSWORD>'}
WRONG_LOGIN_DATA = {
'username': 'incorrect_user', 'password': '<PASSWORD>'}
@pytest.mark.parametrize(
"username, password, expected_response", LOGIN_WRONG_DETAILS)
def test_login_fails(
session, security_test_client, username, password, expected_response):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
data = {'username': username, 'password': password}
data = security_test_client.post(
security_test_client.app.url_path_for('login'),
data=data).content
assert expected_response in data
def test_login_successfull(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
res = security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
assert res.status_code == HTTP_302_FOUND
def test_is_logged_in_dependency_with_logged_in_user(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert res.json() == {"user": True}
def test_is_logged_in_dependency_without_logged_in_user(
session, security_test_client):
res = security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Please log in' in res.content
def test_is_manager_in_dependency_with_logged_in_regular_user(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'),
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_manager'))
assert b"have a permition" in res.content
def test_is_manager_in_dependency_with_logged_in_manager(
session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
manager = session.query(User).filter(
User.username == 'correct_user').first()
manager.is_manager = True
session.commit()
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_manager'))
assert res.json() == {"manager": True}
def test_logout(session, security_test_client):
res = security_test_client.get(
security_test_client.app.url_path_for('logout'))
assert b'Login' in res.content
def test_incorrect_secret_key_in_token(session, security_test_client):
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user, jwt_key="wrong secret key")
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Your token is incorrect' in res.content
def test_expired_token(session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user, jwt_min_exp=-1)
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'expired' in res.content
def test_corrupted_token(session, security_test_client):
user = LoginUser(**LOGIN_DATA)
incorrect_token = create_jwt_token(user) + "s"
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('is_logged_in'))
assert b'Your token is incorrect' in res.content
def test_current_user_from_db_dependency_ok(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert res.json() == {"user": 'correct_user'}
def test_current_user_from_db_dependency_not_logged_in(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert b'Please log in' in res.content
def test_current_user_from_db_dependency_wrong_details(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
user = LoginUser(**WRONG_LOGIN_DATA)
incorrect_token = create_jwt_token(user)
params = f"?existing_jwt={incorrect_token}"
security_test_client.post(
security_test_client.app.url_path_for('login') + f'{params}',
data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user_from_db'))
assert b'Your token is incorrect' in res.content
def test_current_user_dependency_ok(session, security_test_client):
security_test_client.post(
security_test_client.app.url_path_for('register'),
data=REGISTER_DETAIL)
security_test_client.post(
security_test_client.app.url_path_for('login'), data=LOGIN_DATA)
res = security_test_client.get(
security_test_client.app.url_path_for('current_user'))
assert res.json() == {"user": 'correct_user'}
def test_current_user_dependency_not_logged_in(
session, security_test_client):
security_test_client.get(
security_test_client.app.url_path_for('logout'))
res = security_test_client.get(
security_test_client.app.url_path_for('current_user'))
assert b'Please log in' in res.content | none | 1 | 2.406907 | 2 | |
src/openbci_interface/channel_config.py | hellomoto-ai/pybci | 0 | 6612516 | """Helper module for generating channel config command and caching values."""
def get_channel_config_command(
channel, power_down, gain, input_type, bias, srb2, srb1):
"""Get command string for the given parameters.
See
:func:`Cyton.configure_channel<openbci_interface.cyton.Cyton.configure_channel>`
"""
command = [b'x']
vals = {
1: b'1', 9: b'Q',
2: b'2', 10: b'W',
3: b'3', 11: b'E',
4: b'4', 12: b'R',
5: b'5', 13: b'T',
6: b'6', 14: b'Y',
7: b'7', 15: b'U',
8: b'8', 16: b'I',
}
if channel not in vals:
raise ValueError('`channel` value must be one of %s' % vals.keys())
command.append(vals[channel])
vals = {
0: b'0', 'ON': b'0',
1: b'1', 'OFF': b'1',
}
if power_down not in vals:
raise ValueError('`power_down` must be one of %s' % vals.keys())
command.append(vals[power_down])
vals = {
1: b'0',
2: b'1',
4: b'2',
6: b'3',
8: b'4',
12: b'5',
24: b'6',
}
if gain not in vals:
raise ValueError('`gain` value must be one of %s' % vals.keys())
command.append(vals[gain])
vals = {
0: b'0', 'NORMAL': b'0',
1: b'1', 'SHORTED': b'1',
2: b'2', 'BIAS_MEAS': b'2',
3: b'3', 'MVDD': b'3',
4: b'4', 'TEMP': b'4',
5: b'5', 'TESTSIG': b'5',
6: b'6', 'BIAS_DRP': b'6',
7: b'7', 'BIAS_DRN': b'7',
}
if input_type not in vals:
raise ValueError(
'`input_type` type value must be one of %s.' % vals.keys())
command.append(vals[input_type])
vals = {0: b'0', 1: b'1'}
if bias not in vals.keys():
raise ValueError('`bias` must be either 0 or 1.')
command.append(vals[bias])
vals = {0: b'0', 1: b'1'}
if srb2 not in vals.keys():
raise ValueError('`srb2` must be either 0 or 1.')
command.append(vals[srb2])
vals = {0: b'0', 1: b'1'}
if srb1 not in vals.keys():
raise ValueError('`srb1` must be either 0 or 1.')
command.append(vals[srb1])
command.append(b'X')
return b''.join(command)
class ChannelConfig:
"""Class for holding channel configuration, set by Cyton.
You should not use this class directly. Instead, you can use instances of
this class managed by Cyton object, in read-only manner.
Examples
--------
>>> cyton = openbci_interface.cyton.Cyton(port)
>>> cyton.initialize()
>>> print(cyton.channel_config[0].power_down)
'ON'
>>> cyton.configure_channel(1, power_down='OFF', ...)
>>> print(cyton.channel_config[0].power_down)
'OFF'
:ivar bool enabled:
If corresponding channel is enabled True, if disabled, False.
None if not known. (initial value)
:ivar str power_down:
``POWER_DOWN`` value. ``ON`` or ``OFF``
:ivar int gain:
``GAIN_SET`` value. One of 1, 2, 4, 6, 8, 12, 24.
:ivar str input_type:
``INPUT_TYPE_SET`` value. One of ``NORMAL``, ``SHORTED``, ``BIAS_MEAS``,
``MVDD``, ``TEMP``, ``TESTSIG``, ``BIAS_DRP``, or ``BIAS_DRN``.
:ivar str bias:
``BIAS_SET`` value. 0 or 1.
:ivar str srb2:
``SRB2_SET`` value. 0 or 1.
:ivar str srb1:
``SRB1_SET`` value. 0 or 1.
References
----------
http://docs.openbci.com/OpenBCI%20Software/04-OpenBCI_Cyton_SDK#openbci-cyton-sdk-command-set-channel-setting-commands
"""
def __init__(
self, channel,
enabled=None, power_down=None, gain=None,
input_type=None, bias=None, srb2=None, srb1=None):
self.channel = channel
self.enabled = enabled
self.power_down = power_down
self.gain = gain
self.input_type = input_type
self.bias = bias
self.srb2 = srb2
self.srb1 = srb1
def set_config(self, power_down, gain, input_type, bias, srb2, srb1):
"""Used by Cyton board to set values.
See :func:`get_channel_config_command` for parameters.
"""
# Assumption:
# The provided argument values went through
# `get_channel_config_command`, thus have valid values.
# Normalize to str
if isinstance(power_down, int):
power_down = {0: 'ON', 1: 'OFF'}[power_down]
if isinstance(input_type, int):
input_type = {
0: 'NORMAL',
1: 'SHORTED',
2: 'BIAS_MEAS',
3: 'MVDD',
4: 'TEMP',
5: 'TESTSIG',
6: 'BIAS_DRP',
7: 'BIAS_DRN',
}[input_type]
self.power_down = power_down
self.gain = gain
self.input_type = input_type
self.bias = bias
self.srb2 = srb2
self.srb1 = srb1
def __repr__(self):
return (
'Channel: %d (%s), POWER_DOWN: %s, GAIN: %s, '
'INPUT_TYPE: %s, BIAS_SET: %s, SRB2: %s, SRB1: %s'
) % (
self.channel,
'Enabled' if self.enabled else 'Disabled',
self.power_down, self.gain, self.input_type,
self.bias, self.srb2, self.srb1,
)
| """Helper module for generating channel config command and caching values."""
def get_channel_config_command(
channel, power_down, gain, input_type, bias, srb2, srb1):
"""Get command string for the given parameters.
See
:func:`Cyton.configure_channel<openbci_interface.cyton.Cyton.configure_channel>`
"""
command = [b'x']
vals = {
1: b'1', 9: b'Q',
2: b'2', 10: b'W',
3: b'3', 11: b'E',
4: b'4', 12: b'R',
5: b'5', 13: b'T',
6: b'6', 14: b'Y',
7: b'7', 15: b'U',
8: b'8', 16: b'I',
}
if channel not in vals:
raise ValueError('`channel` value must be one of %s' % vals.keys())
command.append(vals[channel])
vals = {
0: b'0', 'ON': b'0',
1: b'1', 'OFF': b'1',
}
if power_down not in vals:
raise ValueError('`power_down` must be one of %s' % vals.keys())
command.append(vals[power_down])
vals = {
1: b'0',
2: b'1',
4: b'2',
6: b'3',
8: b'4',
12: b'5',
24: b'6',
}
if gain not in vals:
raise ValueError('`gain` value must be one of %s' % vals.keys())
command.append(vals[gain])
vals = {
0: b'0', 'NORMAL': b'0',
1: b'1', 'SHORTED': b'1',
2: b'2', 'BIAS_MEAS': b'2',
3: b'3', 'MVDD': b'3',
4: b'4', 'TEMP': b'4',
5: b'5', 'TESTSIG': b'5',
6: b'6', 'BIAS_DRP': b'6',
7: b'7', 'BIAS_DRN': b'7',
}
if input_type not in vals:
raise ValueError(
'`input_type` type value must be one of %s.' % vals.keys())
command.append(vals[input_type])
vals = {0: b'0', 1: b'1'}
if bias not in vals.keys():
raise ValueError('`bias` must be either 0 or 1.')
command.append(vals[bias])
vals = {0: b'0', 1: b'1'}
if srb2 not in vals.keys():
raise ValueError('`srb2` must be either 0 or 1.')
command.append(vals[srb2])
vals = {0: b'0', 1: b'1'}
if srb1 not in vals.keys():
raise ValueError('`srb1` must be either 0 or 1.')
command.append(vals[srb1])
command.append(b'X')
return b''.join(command)
class ChannelConfig:
"""Class for holding channel configuration, set by Cyton.
You should not use this class directly. Instead, you can use instances of
this class managed by Cyton object, in read-only manner.
Examples
--------
>>> cyton = openbci_interface.cyton.Cyton(port)
>>> cyton.initialize()
>>> print(cyton.channel_config[0].power_down)
'ON'
>>> cyton.configure_channel(1, power_down='OFF', ...)
>>> print(cyton.channel_config[0].power_down)
'OFF'
:ivar bool enabled:
If corresponding channel is enabled True, if disabled, False.
None if not known. (initial value)
:ivar str power_down:
``POWER_DOWN`` value. ``ON`` or ``OFF``
:ivar int gain:
``GAIN_SET`` value. One of 1, 2, 4, 6, 8, 12, 24.
:ivar str input_type:
``INPUT_TYPE_SET`` value. One of ``NORMAL``, ``SHORTED``, ``BIAS_MEAS``,
``MVDD``, ``TEMP``, ``TESTSIG``, ``BIAS_DRP``, or ``BIAS_DRN``.
:ivar str bias:
``BIAS_SET`` value. 0 or 1.
:ivar str srb2:
``SRB2_SET`` value. 0 or 1.
:ivar str srb1:
``SRB1_SET`` value. 0 or 1.
References
----------
http://docs.openbci.com/OpenBCI%20Software/04-OpenBCI_Cyton_SDK#openbci-cyton-sdk-command-set-channel-setting-commands
"""
def __init__(
self, channel,
enabled=None, power_down=None, gain=None,
input_type=None, bias=None, srb2=None, srb1=None):
self.channel = channel
self.enabled = enabled
self.power_down = power_down
self.gain = gain
self.input_type = input_type
self.bias = bias
self.srb2 = srb2
self.srb1 = srb1
def set_config(self, power_down, gain, input_type, bias, srb2, srb1):
"""Used by Cyton board to set values.
See :func:`get_channel_config_command` for parameters.
"""
# Assumption:
# The provided argument values went through
# `get_channel_config_command`, thus have valid values.
# Normalize to str
if isinstance(power_down, int):
power_down = {0: 'ON', 1: 'OFF'}[power_down]
if isinstance(input_type, int):
input_type = {
0: 'NORMAL',
1: 'SHORTED',
2: 'BIAS_MEAS',
3: 'MVDD',
4: 'TEMP',
5: 'TESTSIG',
6: 'BIAS_DRP',
7: 'BIAS_DRN',
}[input_type]
self.power_down = power_down
self.gain = gain
self.input_type = input_type
self.bias = bias
self.srb2 = srb2
self.srb1 = srb1
def __repr__(self):
return (
'Channel: %d (%s), POWER_DOWN: %s, GAIN: %s, '
'INPUT_TYPE: %s, BIAS_SET: %s, SRB2: %s, SRB1: %s'
) % (
self.channel,
'Enabled' if self.enabled else 'Disabled',
self.power_down, self.gain, self.input_type,
self.bias, self.srb2, self.srb1,
)
| en | 0.424431 | Helper module for generating channel config command and caching values. Get command string for the given parameters. See :func:`Cyton.configure_channel<openbci_interface.cyton.Cyton.configure_channel>` Class for holding channel configuration, set by Cyton. You should not use this class directly. Instead, you can use instances of this class managed by Cyton object, in read-only manner. Examples -------- >>> cyton = openbci_interface.cyton.Cyton(port) >>> cyton.initialize() >>> print(cyton.channel_config[0].power_down) 'ON' >>> cyton.configure_channel(1, power_down='OFF', ...) >>> print(cyton.channel_config[0].power_down) 'OFF' :ivar bool enabled: If corresponding channel is enabled True, if disabled, False. None if not known. (initial value) :ivar str power_down: ``POWER_DOWN`` value. ``ON`` or ``OFF`` :ivar int gain: ``GAIN_SET`` value. One of 1, 2, 4, 6, 8, 12, 24. :ivar str input_type: ``INPUT_TYPE_SET`` value. One of ``NORMAL``, ``SHORTED``, ``BIAS_MEAS``, ``MVDD``, ``TEMP``, ``TESTSIG``, ``BIAS_DRP``, or ``BIAS_DRN``. :ivar str bias: ``BIAS_SET`` value. 0 or 1. :ivar str srb2: ``SRB2_SET`` value. 0 or 1. :ivar str srb1: ``SRB1_SET`` value. 0 or 1. References ---------- http://docs.openbci.com/OpenBCI%20Software/04-OpenBCI_Cyton_SDK#openbci-cyton-sdk-command-set-channel-setting-commands Used by Cyton board to set values. See :func:`get_channel_config_command` for parameters. # Assumption: # The provided argument values went through # `get_channel_config_command`, thus have valid values. # Normalize to str | 2.70993 | 3 |
runserver.py | zen4ever/route53manager | 21 | 6612517 | #!/usr/bin/env python
from route53 import app
app.run()
| #!/usr/bin/env python
from route53 import app
app.run()
| ru | 0.26433 | #!/usr/bin/env python | 1.177949 | 1 |
chapter_02/12_stock_transaction.py | SergeHall/Tony-Gaddis-Python-4th | 2 | 6612518 | # Программа расчета купли-продажи акций.
num_share = 2000
per_share_before = 40
com_rate = 0.03
per_share_after = 42.75
commission1 = per_share_before * num_share * com_rate
commission2 = per_share_after * num_share * com_rate
print("The amount of money Joe paid for the stock is $",
num_share * per_share_before, sep='')
print(
"The amount of commission Joe paid his broker when he bought "
"the stock is $", num_share * per_share_before * com_rate, sep='')
print("The amount that Joe sold the stock for is $",
num_share * per_share_after, sep='')
print(
"The amount of commission Joe paid his broker when he sold the "
"stock is $", num_share * per_share_after * com_rate, sep='')
print("Joe made a profit amount of $",
((per_share_after - per_share_before) * 2000) - (
commission1 + commission2))
| # Программа расчета купли-продажи акций.
num_share = 2000
per_share_before = 40
com_rate = 0.03
per_share_after = 42.75
commission1 = per_share_before * num_share * com_rate
commission2 = per_share_after * num_share * com_rate
print("The amount of money Joe paid for the stock is $",
num_share * per_share_before, sep='')
print(
"The amount of commission Joe paid his broker when he bought "
"the stock is $", num_share * per_share_before * com_rate, sep='')
print("The amount that Joe sold the stock for is $",
num_share * per_share_after, sep='')
print(
"The amount of commission Joe paid his broker when he sold the "
"stock is $", num_share * per_share_after * com_rate, sep='')
print("Joe made a profit amount of $",
((per_share_after - per_share_before) * 2000) - (
commission1 + commission2))
| ru | 0.949471 | # Программа расчета купли-продажи акций. | 3.735686 | 4 |
fetcher.py | J471/Telegram-ID_AzanBot | 2 | 6612519 | #!/home/untorojati/python/id_azanbot/id_azanbot_env/bin/python
# Constants
from constants import KEMENAG_DIR
# Enable logging
import logging
logging.basicConfig(format='%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# HTTP get utilities
import requests, shutil
# Date/Time utilities
import datetime
# Argument utilities
import sys, getopt
# File utilities
import os
# JSON utilities
import json
# MongoDB connection
from pymongo import MongoClient
from credentials import DBNAME, DBUSER, DBPASS, DBAUTH
client = MongoClient()
db = client[DBNAME]
db.authenticate(DBUSER, DBPASS, source=DBAUTH)
# Global variable
gv_usage = 'fetcher.py -m <month delta in positive integer>'
def main(argv):
# [START checking arguments]
if not argv:
print(gv_usage)
sys.exit()
else:
try:
opts, args = getopt.getopt(argv,"hm:",["help", "month="])
except getopt.GetoptError:
print(gv_usage)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(gv_usage)
sys.exit()
elif opt in ('-m', '--month'):
try:
lv_curmo = int(arg)
except ValueError:
print(gv_usage)
sys.exit()
else:
if lv_curmo < 0:
print(gv_usage)
sys.exit()
# [END checking arguments]
utctime = datetime.datetime.utcnow()
utctime = utctime.replace(hour=0, minute=0, second=0, microsecond=0)
utctime = utctime + datetime.timedelta(lv_curmo*365/12)
curyrmo = utctime.date().year*100 + utctime.date().month
lv_year = utctime.date().year
lv_month = utctime.date().month
#To get zones that already migrated to new key. fxpara is used for this checking
czones_qry = db.czones.find({"flstfl": {"$lt": curyrmo}, "fxpara": {"$ne": "default"}})
#Testing purpose
#czones_qry = db.czones.find({"_id": 667, "flstfl": {"$lt": curyrmo}})
if czones_qry.count() > 0:
#lt_headers = []
lv_cookies = {
'PHPSESSID': '9e7o76vqm58eko31pbi3n82pu1',
'cookiesession1': '52CC16C5LVZCRPUOGEJQNB3AMDK781A8',
'ci_session': 'a%3A5%3A%7Bs%3A10%3A%22session_id%22%3Bs%3A32%3A%228f7a3746f330b4f5c1bb8cd434c63ed3%22%3Bs%3A10%3A%22ip_address%22%3Bs%3A12%3A%22192.168.3.11%22%3Bs%3A10%3A%22user_agent%22%3Bs%3A115%3A%22Mozilla%2F5.0+%28Windows+NT+10.0%3B+Win64%3B+x64%29+AppleWebKit%2F537.36+%28KHTML%2C+like+Gecko%29+Chrome%2F73.0.3683.103+Safari%2F537.36%22%3Bs%3A13%3A%22last_activity%22%3Bi%3A1556065208%3Bs%3A9%3A%22user_data%22%3Bs%3A0%3A%22%22%3B%7D5f7e5c53d6dbe752d5fca3534c689c9f',
}
lv_headers = {
'Origin': 'https://bimasislam.kemenag.go.id',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://bimasislam.kemenag.go.id/jadwalshalat',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
lv_url = 'https://bimasislam.kemenag.go.id/ajax/getShalatbln'
for doc_czones_qry in czones_qry:
lv_data = {
'x': doc_czones_qry.get('fxpara'),
'y': doc_czones_qry.get('fypara'),
'bln': str(lv_month),
'thn': str(lv_year),
}
#url = 'https://bimasislam.kemenag.go.id/ajax/getShalatbln{}&bulan={}&lokasi={}&h=0&type=html'.format(lv_year, lv_month, doc_czones_qry.get('fnewid'))
try:
r = requests.post(lv_url, headers=lv_headers, cookies=lv_cookies, data=lv_data)
except requests.exceptions.RequestException as e:
logging.exception('Requests Exception: {}'.format(e))
break
else:
if r.status_code == 200:
#lv_filename = '{}{}_{}.html'.format(KEMENAG_DIR, curyrmo, doc_czones_qry.get('_id'))
#Change from html and start using json
lv_filename = '{}{}_{}.json'.format(KEMENAG_DIR, curyrmo, doc_czones_qry.get('_id'))
if r.json()['message'] == 'Success':
#with open(lv_filename, 'wb') as out_file:
with open(lv_filename, 'w') as out_file:
#shutil.copyfileobj(r.raw, out_file)
json.dump(r.json(), out_file)
if os.path.getsize(lv_filename) < 1:
os.remove(lv_filename)
logging.exception('Error creating file {}!'.format(lv_filename))
del r
else:
czones_upd = db.czones.update_one(
{"_id": doc_czones_qry.get('_id')},
{"$set": {"flstfl": curyrmo }}
)
logging.info('File {} is created successfully.'.format(lv_filename))
del r
else:
logging.exception('Requests Parameter Error: {} - {}'.format(r, doc_czones_qry.get('_id')))
break
else:
logging.exception('Requests Status Error: {}'.format(r))
break
else:
logging.info('No files need to be downloaded!')
if __name__ == "__main__":
main(sys.argv[1:]) | #!/home/untorojati/python/id_azanbot/id_azanbot_env/bin/python
# Constants
from constants import KEMENAG_DIR
# Enable logging
import logging
logging.basicConfig(format='%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# HTTP get utilities
import requests, shutil
# Date/Time utilities
import datetime
# Argument utilities
import sys, getopt
# File utilities
import os
# JSON utilities
import json
# MongoDB connection
from pymongo import MongoClient
from credentials import DBNAME, DBUSER, DBPASS, DBAUTH
client = MongoClient()
db = client[DBNAME]
db.authenticate(DBUSER, DBPASS, source=DBAUTH)
# Global variable
gv_usage = 'fetcher.py -m <month delta in positive integer>'
def main(argv):
# [START checking arguments]
if not argv:
print(gv_usage)
sys.exit()
else:
try:
opts, args = getopt.getopt(argv,"hm:",["help", "month="])
except getopt.GetoptError:
print(gv_usage)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(gv_usage)
sys.exit()
elif opt in ('-m', '--month'):
try:
lv_curmo = int(arg)
except ValueError:
print(gv_usage)
sys.exit()
else:
if lv_curmo < 0:
print(gv_usage)
sys.exit()
# [END checking arguments]
utctime = datetime.datetime.utcnow()
utctime = utctime.replace(hour=0, minute=0, second=0, microsecond=0)
utctime = utctime + datetime.timedelta(lv_curmo*365/12)
curyrmo = utctime.date().year*100 + utctime.date().month
lv_year = utctime.date().year
lv_month = utctime.date().month
#To get zones that already migrated to new key. fxpara is used for this checking
czones_qry = db.czones.find({"flstfl": {"$lt": curyrmo}, "fxpara": {"$ne": "default"}})
#Testing purpose
#czones_qry = db.czones.find({"_id": 667, "flstfl": {"$lt": curyrmo}})
if czones_qry.count() > 0:
#lt_headers = []
lv_cookies = {
'PHPSESSID': '9e7o76vqm58eko31pbi3n82pu1',
'cookiesession1': '52CC16C5LVZCRPUOGEJQNB3AMDK781A8',
'ci_session': 'a%3A5%3A%7Bs%3A10%3A%22session_id%22%3Bs%3A32%3A%228f7a3746f330b4f5c1bb8cd434c63ed3%22%3Bs%3A10%3A%22ip_address%22%3Bs%3A12%3A%22192.168.3.11%22%3Bs%3A10%3A%22user_agent%22%3Bs%3A115%3A%22Mozilla%2F5.0+%28Windows+NT+10.0%3B+Win64%3B+x64%29+AppleWebKit%2F537.36+%28KHTML%2C+like+Gecko%29+Chrome%2F73.0.3683.103+Safari%2F537.36%22%3Bs%3A13%3A%22last_activity%22%3Bi%3A1556065208%3Bs%3A9%3A%22user_data%22%3Bs%3A0%3A%22%22%3B%7D5f7e5c53d6dbe752d5fca3534c689c9f',
}
lv_headers = {
'Origin': 'https://bimasislam.kemenag.go.id',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://bimasislam.kemenag.go.id/jadwalshalat',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
lv_url = 'https://bimasislam.kemenag.go.id/ajax/getShalatbln'
for doc_czones_qry in czones_qry:
lv_data = {
'x': doc_czones_qry.get('fxpara'),
'y': doc_czones_qry.get('fypara'),
'bln': str(lv_month),
'thn': str(lv_year),
}
#url = 'https://bimasislam.kemenag.go.id/ajax/getShalatbln{}&bulan={}&lokasi={}&h=0&type=html'.format(lv_year, lv_month, doc_czones_qry.get('fnewid'))
try:
r = requests.post(lv_url, headers=lv_headers, cookies=lv_cookies, data=lv_data)
except requests.exceptions.RequestException as e:
logging.exception('Requests Exception: {}'.format(e))
break
else:
if r.status_code == 200:
#lv_filename = '{}{}_{}.html'.format(KEMENAG_DIR, curyrmo, doc_czones_qry.get('_id'))
#Change from html and start using json
lv_filename = '{}{}_{}.json'.format(KEMENAG_DIR, curyrmo, doc_czones_qry.get('_id'))
if r.json()['message'] == 'Success':
#with open(lv_filename, 'wb') as out_file:
with open(lv_filename, 'w') as out_file:
#shutil.copyfileobj(r.raw, out_file)
json.dump(r.json(), out_file)
if os.path.getsize(lv_filename) < 1:
os.remove(lv_filename)
logging.exception('Error creating file {}!'.format(lv_filename))
del r
else:
czones_upd = db.czones.update_one(
{"_id": doc_czones_qry.get('_id')},
{"$set": {"flstfl": curyrmo }}
)
logging.info('File {} is created successfully.'.format(lv_filename))
del r
else:
logging.exception('Requests Parameter Error: {} - {}'.format(r, doc_czones_qry.get('_id')))
break
else:
logging.exception('Requests Status Error: {}'.format(r))
break
else:
logging.info('No files need to be downloaded!')
if __name__ == "__main__":
main(sys.argv[1:]) | en | 0.386562 | #!/home/untorojati/python/id_azanbot/id_azanbot_env/bin/python # Constants # Enable logging # HTTP get utilities # Date/Time utilities # Argument utilities # File utilities # JSON utilities # MongoDB connection # Global variable # [START checking arguments] # [END checking arguments] #To get zones that already migrated to new key. fxpara is used for this checking #Testing purpose #czones_qry = db.czones.find({"_id": 667, "flstfl": {"$lt": curyrmo}}) #lt_headers = [] #url = 'https://bimasislam.kemenag.go.id/ajax/getShalatbln{}&bulan={}&lokasi={}&h=0&type=html'.format(lv_year, lv_month, doc_czones_qry.get('fnewid')) #lv_filename = '{}{}_{}.html'.format(KEMENAG_DIR, curyrmo, doc_czones_qry.get('_id')) #Change from html and start using json #with open(lv_filename, 'wb') as out_file: #shutil.copyfileobj(r.raw, out_file) | 2.05404 | 2 |
src/geocat/comp/comp_util.py | dimaclimate/geocat-comp | 76 | 6612520 | import numpy as np
def _is_duck_array(value):
"""Returns True when ``value`` is array-like."""
if isinstance(value, np.ndarray):
return True
return (hasattr(value, "ndim") and hasattr(value, "shape") and
hasattr(value, "dtype") and hasattr(value, "__array_function__") and
hasattr(value, "__array_ufunc__"))
| import numpy as np
def _is_duck_array(value):
"""Returns True when ``value`` is array-like."""
if isinstance(value, np.ndarray):
return True
return (hasattr(value, "ndim") and hasattr(value, "shape") and
hasattr(value, "dtype") and hasattr(value, "__array_function__") and
hasattr(value, "__array_ufunc__"))
| en | 0.710539 | Returns True when ``value`` is array-like. | 2.874817 | 3 |
chapter06/dict_method_examples.py | ScorpioDoctor/AdvancePythonMy | 0 | 6612521 | mydict = {"a": {"school": "studyai.com"},
"b": {"school": "xjtu.com"},
"c": [1, 2, 3],
"d": 989}
# print(mydict)
# mydict.clear()
# print(mydict)
# mydict2 = mydict.copy()
# mydict2["a"]["school"] = "studyai.cn"
# print(mydict2)
# print(mydict)
#
# import copy
# mydict3 = copy.deepcopy(mydict)
# mydict3["a"]["school"] = "studyai.net"
# print(mydict3)
# print(mydict)
keys=(1,2,3)
mydict4 = dict.fromkeys(keys,"studyai.com")
# print(mydict4)
print(mydict4.get(4,"study"))
# print(type(mydict.items()))
# print(mydict.items())
# for key,value in mydict.items():
# print(key,":",value)
# print(mydict.pop("d"))
# print(mydict.popitem())
# print(mydict.items())
# print(mydict.setdefault('c',"ai"))
# print(mydict.items())
print(mydict.items())
# mydict.update({"00":"xx"})
# mydict.update([("00","xx"),("oo","yyy")])
mydict.update((("00","xx"),("oo","yyy")))
print(mydict.items()) | mydict = {"a": {"school": "studyai.com"},
"b": {"school": "xjtu.com"},
"c": [1, 2, 3],
"d": 989}
# print(mydict)
# mydict.clear()
# print(mydict)
# mydict2 = mydict.copy()
# mydict2["a"]["school"] = "studyai.cn"
# print(mydict2)
# print(mydict)
#
# import copy
# mydict3 = copy.deepcopy(mydict)
# mydict3["a"]["school"] = "studyai.net"
# print(mydict3)
# print(mydict)
keys=(1,2,3)
mydict4 = dict.fromkeys(keys,"studyai.com")
# print(mydict4)
print(mydict4.get(4,"study"))
# print(type(mydict.items()))
# print(mydict.items())
# for key,value in mydict.items():
# print(key,":",value)
# print(mydict.pop("d"))
# print(mydict.popitem())
# print(mydict.items())
# print(mydict.setdefault('c',"ai"))
# print(mydict.items())
print(mydict.items())
# mydict.update({"00":"xx"})
# mydict.update([("00","xx"),("oo","yyy")])
mydict.update((("00","xx"),("oo","yyy")))
print(mydict.items()) | en | 0.332814 | # print(mydict) # mydict.clear() # print(mydict) # mydict2 = mydict.copy() # mydict2["a"]["school"] = "studyai.cn" # print(mydict2) # print(mydict) # # import copy # mydict3 = copy.deepcopy(mydict) # mydict3["a"]["school"] = "studyai.net" # print(mydict3) # print(mydict) # print(mydict4) # print(type(mydict.items())) # print(mydict.items()) # for key,value in mydict.items(): # print(key,":",value) # print(mydict.pop("d")) # print(mydict.popitem()) # print(mydict.items()) # print(mydict.setdefault('c',"ai")) # print(mydict.items()) # mydict.update({"00":"xx"}) # mydict.update([("00","xx"),("oo","yyy")]) | 3.756009 | 4 |
logic2_analyzers/LIN/pd.py | martonmiklos/sigrokdecoders_to_logic2_analyzers | 5 | 6612522 | <reponame>martonmiklos/sigrokdecoders_to_logic2_analyzers
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 <NAME> <<EMAIL>>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class LinFsm:
class State:
WaitForBreak = 'WAIT_FOR_BREAK'
Sync = 'SYNC'
Pid = 'PID'
Data = 'DATA'
Checksum = 'CHECKSUM'
Error = 'ERROR'
def transit(self, target_state):
if not self._transition_allowed(target_state):
return False
self.state = target_state
return True
def _transition_allowed(self, target_state):
if target_state == LinFsm.State.Error:
return True
return target_state in self.allowed_state[self.state]
def reset(self):
self.state = LinFsm.State.WaitForBreak
self.uart_idle_count = 0
def __init__(self):
a = dict()
a[LinFsm.State.WaitForBreak] = (LinFsm.State.Sync,)
a[LinFsm.State.Sync] = (LinFsm.State.Pid,)
a[LinFsm.State.Pid] = (LinFsm.State.Data,)
a[LinFsm.State.Data] = (LinFsm.State.Data, LinFsm.State.Checksum)
a[LinFsm.State.Checksum] = (LinFsm.State.WaitForBreak,)
a[LinFsm.State.Error] = (LinFsm.State.Sync,)
self.allowed_state = a
self.state = None
self.uart_idle_count = 0
self.reset()
class Decoder(srd.Decoder):
api_version = 3
id = 'lin'
name = 'LIN'
longname = 'Local Interconnect Network'
desc = 'Local Interconnect Network (LIN) protocol.'
license = 'gplv2+'
inputs = ['uart']
outputs = []
tags = ['Automotive']
options = (
{'id': 'version', 'desc': 'Protocol version', 'default': 2, 'values': (1, 2)},
)
annotations = (
('data', 'LIN data'),
('control', 'Protocol info'),
('error', 'Error description'),
('inline_error', 'Protocol violation or error'),
)
annotation_rows = (
('data_vals', 'Data', (0, 1, 3)),
('errors', 'Errors', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.fsm = LinFsm()
self.lin_header = []
self.lin_rsp = []
self.lin_version = None
self.ss_block = None
self.es_block = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.lin_version = self.options['version']
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def wipe_break_null_byte(self, value):
# Upon a break condition a null byte is received which must be ignored.
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if len(self.lin_rsp):
value = self.lin_rsp.pop()[2]
else:
self.lin_header.pop()
if value != 0:
self.fsm.transit(LinFsm.State.Error)
self.handle_error(None)
return False
return True
def handle_uart_idle(self):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
self.fsm.uart_idle_count += 1
if self.fsm.uart_idle_count == 2:
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
def handle_wait_for_break(self, value):
self.wipe_break_null_byte(value)
def handle_break(self, value):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if self.wipe_break_null_byte(value):
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
self.fsm.transit(LinFsm.State.Sync)
self.putx([1, ['Break condition', 'Break', 'Brk', 'B']])
def handle_sync(self, value):
self.fsm.transit(LinFsm.State.Pid)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_pid(self, value):
self.fsm.transit(LinFsm.State.Data)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_data(self, value):
self.lin_rsp.append((self.ss_block, self.es_block, value))
def handle_checksum(self):
sync = self.lin_header.pop(0) if len(self.lin_header) else None
self.put(sync[0], sync[1], self.out_ann, [0, ['Sync', 'S']])
if sync[2] != 0x55:
self.put(sync[0], sync[1], self.out_ann,
[2, ['Sync is not 0x55', 'Not 0x55', '!= 0x55']])
pid = self.lin_header.pop(0) if len(self.lin_header) else None
checksum = self.lin_rsp.pop() if len(self.lin_rsp) else None
if pid:
id_ = pid[2] & 0x3F
parity = pid[2] >> 6
expected_parity = self.calc_parity(pid[2])
parity_valid = parity == expected_parity
if not parity_valid:
self.put(pid[0], pid[1], self.out_ann, [2, ['P != %d' % expected_parity]])
ann_class = 0 if parity_valid else 3
self.put(pid[0], pid[1], self.out_ann, [ann_class, [
'ID: %02X Parity: %d (%s)' % (id_, parity, 'ok' if parity_valid else 'bad'),
'ID: 0x%02X' % id_, 'I: %d' % id_
]])
if len(self.lin_rsp):
checksum_valid = self.checksum_is_valid(pid[2], self.lin_rsp, checksum[2])
for b in self.lin_rsp:
self.put(b[0], b[1], self.out_ann, [0, ['Data: 0x%02X' % b[2], 'D: 0x%02X' % b[2]]])
ann_class = 0 if checksum_valid else 3
self.put(checksum[0], checksum[1], self.out_ann,
[ann_class, ['Checksum: 0x%02X' % checksum[2], 'Checksum', 'Chk', 'C']])
if not checksum_valid:
self.put(checksum[0], checksum[1], self.out_ann, [2, ['Checksum invalid']])
else:
pass # No response.
self.lin_header.clear()
self.lin_rsp.clear()
def handle_error(self, dummy):
self.putx([3, ['Error', 'Err', 'E']])
def checksum_is_valid(self, pid, data, checksum):
if self.lin_version == 2:
id_ = pid & 0x3F
if id_ != 60 and id_ != 61:
checksum += pid
for d in data:
checksum += d[2]
carry_bits = int(checksum / 256)
checksum += carry_bits
return checksum & 0xFF == 0xFF
@staticmethod
def calc_parity(pid):
id_ = [((pid & 0x3F) >> i) & 1 for i in range(8)]
p0 = id_[0] ^ id_[1] ^ id_[2] ^ id_[4]
p1 = not (id_[1] ^ id_[3] ^ id_[4] ^ id_[5])
return (p0 << 0) | (p1 << 1)
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
self.ss_block, self.es_block = ss, es
# Ignore all UART packets except the actual data packets or BREAK.
if ptype == 'IDLE':
self.handle_uart_idle()
if ptype == 'BREAK':
self.handle_break(pdata)
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# Short LIN overview:
# - Message begins with a BREAK (0x00) for at least 13 bittimes.
# - Break is always followed by a SYNC byte (0x55).
# - Sync byte is followed by a PID byte (Protected Identifier).
# - PID byte is followed by 1 - 8 data bytes and a final checksum byte.
handler = getattr(self, 'handle_%s' % self.fsm.state.lower())
handler(pdata)
| ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 <NAME> <<EMAIL>>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class LinFsm:
class State:
WaitForBreak = 'WAIT_FOR_BREAK'
Sync = 'SYNC'
Pid = 'PID'
Data = 'DATA'
Checksum = 'CHECKSUM'
Error = 'ERROR'
def transit(self, target_state):
if not self._transition_allowed(target_state):
return False
self.state = target_state
return True
def _transition_allowed(self, target_state):
if target_state == LinFsm.State.Error:
return True
return target_state in self.allowed_state[self.state]
def reset(self):
self.state = LinFsm.State.WaitForBreak
self.uart_idle_count = 0
def __init__(self):
a = dict()
a[LinFsm.State.WaitForBreak] = (LinFsm.State.Sync,)
a[LinFsm.State.Sync] = (LinFsm.State.Pid,)
a[LinFsm.State.Pid] = (LinFsm.State.Data,)
a[LinFsm.State.Data] = (LinFsm.State.Data, LinFsm.State.Checksum)
a[LinFsm.State.Checksum] = (LinFsm.State.WaitForBreak,)
a[LinFsm.State.Error] = (LinFsm.State.Sync,)
self.allowed_state = a
self.state = None
self.uart_idle_count = 0
self.reset()
class Decoder(srd.Decoder):
api_version = 3
id = 'lin'
name = 'LIN'
longname = 'Local Interconnect Network'
desc = 'Local Interconnect Network (LIN) protocol.'
license = 'gplv2+'
inputs = ['uart']
outputs = []
tags = ['Automotive']
options = (
{'id': 'version', 'desc': 'Protocol version', 'default': 2, 'values': (1, 2)},
)
annotations = (
('data', 'LIN data'),
('control', 'Protocol info'),
('error', 'Error description'),
('inline_error', 'Protocol violation or error'),
)
annotation_rows = (
('data_vals', 'Data', (0, 1, 3)),
('errors', 'Errors', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.fsm = LinFsm()
self.lin_header = []
self.lin_rsp = []
self.lin_version = None
self.ss_block = None
self.es_block = None
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.lin_version = self.options['version']
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def wipe_break_null_byte(self, value):
# Upon a break condition a null byte is received which must be ignored.
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if len(self.lin_rsp):
value = self.lin_rsp.pop()[2]
else:
self.lin_header.pop()
if value != 0:
self.fsm.transit(LinFsm.State.Error)
self.handle_error(None)
return False
return True
def handle_uart_idle(self):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
self.fsm.uart_idle_count += 1
if self.fsm.uart_idle_count == 2:
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
def handle_wait_for_break(self, value):
self.wipe_break_null_byte(value)
def handle_break(self, value):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if self.wipe_break_null_byte(value):
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
self.fsm.transit(LinFsm.State.Sync)
self.putx([1, ['Break condition', 'Break', 'Brk', 'B']])
def handle_sync(self, value):
self.fsm.transit(LinFsm.State.Pid)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_pid(self, value):
self.fsm.transit(LinFsm.State.Data)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_data(self, value):
self.lin_rsp.append((self.ss_block, self.es_block, value))
def handle_checksum(self):
sync = self.lin_header.pop(0) if len(self.lin_header) else None
self.put(sync[0], sync[1], self.out_ann, [0, ['Sync', 'S']])
if sync[2] != 0x55:
self.put(sync[0], sync[1], self.out_ann,
[2, ['Sync is not 0x55', 'Not 0x55', '!= 0x55']])
pid = self.lin_header.pop(0) if len(self.lin_header) else None
checksum = self.lin_rsp.pop() if len(self.lin_rsp) else None
if pid:
id_ = pid[2] & 0x3F
parity = pid[2] >> 6
expected_parity = self.calc_parity(pid[2])
parity_valid = parity == expected_parity
if not parity_valid:
self.put(pid[0], pid[1], self.out_ann, [2, ['P != %d' % expected_parity]])
ann_class = 0 if parity_valid else 3
self.put(pid[0], pid[1], self.out_ann, [ann_class, [
'ID: %02X Parity: %d (%s)' % (id_, parity, 'ok' if parity_valid else 'bad'),
'ID: 0x%02X' % id_, 'I: %d' % id_
]])
if len(self.lin_rsp):
checksum_valid = self.checksum_is_valid(pid[2], self.lin_rsp, checksum[2])
for b in self.lin_rsp:
self.put(b[0], b[1], self.out_ann, [0, ['Data: 0x%02X' % b[2], 'D: 0x%02X' % b[2]]])
ann_class = 0 if checksum_valid else 3
self.put(checksum[0], checksum[1], self.out_ann,
[ann_class, ['Checksum: 0x%02X' % checksum[2], 'Checksum', 'Chk', 'C']])
if not checksum_valid:
self.put(checksum[0], checksum[1], self.out_ann, [2, ['Checksum invalid']])
else:
pass # No response.
self.lin_header.clear()
self.lin_rsp.clear()
def handle_error(self, dummy):
self.putx([3, ['Error', 'Err', 'E']])
def checksum_is_valid(self, pid, data, checksum):
if self.lin_version == 2:
id_ = pid & 0x3F
if id_ != 60 and id_ != 61:
checksum += pid
for d in data:
checksum += d[2]
carry_bits = int(checksum / 256)
checksum += carry_bits
return checksum & 0xFF == 0xFF
@staticmethod
def calc_parity(pid):
id_ = [((pid & 0x3F) >> i) & 1 for i in range(8)]
p0 = id_[0] ^ id_[1] ^ id_[2] ^ id_[4]
p1 = not (id_[1] ^ id_[3] ^ id_[4] ^ id_[5])
return (p0 << 0) | (p1 << 1)
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
self.ss_block, self.es_block = ss, es
# Ignore all UART packets except the actual data packets or BREAK.
if ptype == 'IDLE':
self.handle_uart_idle()
if ptype == 'BREAK':
self.handle_break(pdata)
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# Short LIN overview:
# - Message begins with a BREAK (0x00) for at least 13 bittimes.
# - Break is always followed by a SYNC byte (0x55).
# - Sync byte is followed by a PID byte (Protected Identifier).
# - PID byte is followed by 1 - 8 data bytes and a final checksum byte.
handler = getattr(self, 'handle_%s' % self.fsm.state.lower())
handler(pdata) | en | 0.827991 | ## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2018 <NAME> <<EMAIL>> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## # Upon a break condition a null byte is received which must be ignored. # No response. # Ignore all UART packets except the actual data packets or BREAK. # We're only interested in the byte value (not individual bits). # Short LIN overview: # - Message begins with a BREAK (0x00) for at least 13 bittimes. # - Break is always followed by a SYNC byte (0x55). # - Sync byte is followed by a PID byte (Protected Identifier). # - PID byte is followed by 1 - 8 data bytes and a final checksum byte. | 2.081707 | 2 |
sentiment-analysis/Random-Sampling.py | FehintolaObafemi/soda | 0 | 6612523 | <reponame>FehintolaObafemi/soda
import MySQLdb
conn=MySQLdb.connect(host='127.0.0.1', user = 'root', password ='', database = 'tweets_table')
c = conn.cursor()
#c.execute('SELECT tweets_key, content FROM tweets_table WHERE tweets_key = ANY (SELECT tweets_key FROM sentiments_table) LIMIT 10')
#content = c.fetchall()
#print(content)
#c.execute('SELECT tweets_key, tweet_sentiments FROM sentiments_table WHERE tweets_key = ANY (SELECT tweets_key FROM tweets_table)')
#sentiment = c.fetchall()
#print(sentiment)
c.execute('SELECT tweets_table.tweets_key, tweet_sentiments, content FROM sentiments_table LEFT JOIN(tweets_table) ON (sentiments_table.tweets_key = tweets_table.tweets_key) ORDER BY RAND() LIMIT 10')
argument = c.fetchall()
print(argument)
conn.commit()
conn.close() | import MySQLdb
conn=MySQLdb.connect(host='127.0.0.1', user = 'root', password ='', database = 'tweets_table')
c = conn.cursor()
#c.execute('SELECT tweets_key, content FROM tweets_table WHERE tweets_key = ANY (SELECT tweets_key FROM sentiments_table) LIMIT 10')
#content = c.fetchall()
#print(content)
#c.execute('SELECT tweets_key, tweet_sentiments FROM sentiments_table WHERE tweets_key = ANY (SELECT tweets_key FROM tweets_table)')
#sentiment = c.fetchall()
#print(sentiment)
c.execute('SELECT tweets_table.tweets_key, tweet_sentiments, content FROM sentiments_table LEFT JOIN(tweets_table) ON (sentiments_table.tweets_key = tweets_table.tweets_key) ORDER BY RAND() LIMIT 10')
argument = c.fetchall()
print(argument)
conn.commit()
conn.close() | en | 0.163356 | #c.execute('SELECT tweets_key, content FROM tweets_table WHERE tweets_key = ANY (SELECT tweets_key FROM sentiments_table) LIMIT 10') #content = c.fetchall() #print(content) #c.execute('SELECT tweets_key, tweet_sentiments FROM sentiments_table WHERE tweets_key = ANY (SELECT tweets_key FROM tweets_table)') #sentiment = c.fetchall() #print(sentiment) | 2.88478 | 3 |
cryptysto/utils.py | morucci/cryptysto | 0 | 6612524 | from typing import List, Union
from csv import reader
from pathlib import Path
from dataclasses import dataclass
from cryptysto.types import *
assets_map = {"XXBT": "BTC", "XXDG": "DOGE", "XETH": "ETH", "ZEUR": "EUR"}
def is_fiat(name: str) -> bool:
if assets_map.get(name, name) in ("EUR", "USD"):
return True
else:
return False
def asset(asset_name: str) -> Asset:
return Asset(
name=assets_map.get(asset_name, asset_name),
_type="fiat" if is_fiat(asset_name) else "crypto",
)
def read_csv(path: Path) -> List:
return list(reader(open(path).readlines(), delimiter=","))[1:]
| from typing import List, Union
from csv import reader
from pathlib import Path
from dataclasses import dataclass
from cryptysto.types import *
assets_map = {"XXBT": "BTC", "XXDG": "DOGE", "XETH": "ETH", "ZEUR": "EUR"}
def is_fiat(name: str) -> bool:
if assets_map.get(name, name) in ("EUR", "USD"):
return True
else:
return False
def asset(asset_name: str) -> Asset:
return Asset(
name=assets_map.get(asset_name, asset_name),
_type="fiat" if is_fiat(asset_name) else "crypto",
)
def read_csv(path: Path) -> List:
return list(reader(open(path).readlines(), delimiter=","))[1:]
| none | 1 | 3.209777 | 3 | |
py_win_keyboard_layout/__init__.py | zevbaker/py_win_keyboard_layout | 5 | 6612525 | <filename>py_win_keyboard_layout/__init__.py
"""
Interaction with keyboard layout on windows
"""
import win32gui
import win32api
import win32process
WM_INPUTLANGCHANGEREQUEST = 0x0050 # win32api const
def get_foreground_window_keyboard_layout():
"""
Returns foreground window keyboard layout as integer
Examples:
68748313 - 0x04190419 - russian
67699721 - 0x04090409 - english
"""
window_handle = win32gui.GetForegroundWindow()
thread_id = win32process.GetWindowThreadProcessId(window_handle)[0]
layout_id = win32api.GetKeyboardLayout(thread_id)
return layout_id
def change_foreground_window_keyboard_layout(layout_id=0):
"""
Change foreground window keyboard layout
Parameter
layout_id=0 : integer
Integer containing a locale id, eg 68748313 - 0x04190419 - 0x419 - russian
By default change layout like Ctrl+Shift or Alt+Shift
Return Value
Returns True if layout is changed and win32api.SendMessage() output if not
"""
if not isinstance(layout_id, int):
raise TypeError('parameter must be integer')
window_handle = win32gui.GetForegroundWindow()
result = win32api.SendMessage(window_handle,
WM_INPUTLANGCHANGEREQUEST,
0,
layout_id)
if result == 0:
return True
else:
return result
def get_keyboard_layout_list():
"""
Returns a tuple of all locale ids currently loaded
Example
(68748313, 67699721)
"""
return win32api.GetKeyboardLayoutList()
def load_keyboard_layout(string_layout_id, flags=0):
"""
Loads a new locale id
Parameters
string_layout_id : string
Hex string containing a locale id, eg "00000409"
Flags=0 : int
Combination of win32con.KLF_* constants
Examples
KLF_ACTIVATE|KLF_SETFORPROCESS|KLF_REORDER == 0x109 == 265
Return Value
Returns the integer locale id that was loaded
Example
load_keyboard_layout("00000409") == 67699721 for english
"""
if not isinstance(string_layout_id, str) or not isinstance(flags, int):
raise TypeError('first parameter must be string and second must be int')
return win32api.LoadKeyboardLayout(string_layout_id, flags)
__all__ = ["get_foreground_window_keyboard_layout",
"change_foreground_window_keyboard_layout",
"get_keyboard_layout_list",
"load_keyboard_layout"]
| <filename>py_win_keyboard_layout/__init__.py
"""
Interaction with keyboard layout on windows
"""
import win32gui
import win32api
import win32process
WM_INPUTLANGCHANGEREQUEST = 0x0050 # win32api const
def get_foreground_window_keyboard_layout():
"""
Returns foreground window keyboard layout as integer
Examples:
68748313 - 0x04190419 - russian
67699721 - 0x04090409 - english
"""
window_handle = win32gui.GetForegroundWindow()
thread_id = win32process.GetWindowThreadProcessId(window_handle)[0]
layout_id = win32api.GetKeyboardLayout(thread_id)
return layout_id
def change_foreground_window_keyboard_layout(layout_id=0):
"""
Change foreground window keyboard layout
Parameter
layout_id=0 : integer
Integer containing a locale id, eg 68748313 - 0x04190419 - 0x419 - russian
By default change layout like Ctrl+Shift or Alt+Shift
Return Value
Returns True if layout is changed and win32api.SendMessage() output if not
"""
if not isinstance(layout_id, int):
raise TypeError('parameter must be integer')
window_handle = win32gui.GetForegroundWindow()
result = win32api.SendMessage(window_handle,
WM_INPUTLANGCHANGEREQUEST,
0,
layout_id)
if result == 0:
return True
else:
return result
def get_keyboard_layout_list():
"""
Returns a tuple of all locale ids currently loaded
Example
(68748313, 67699721)
"""
return win32api.GetKeyboardLayoutList()
def load_keyboard_layout(string_layout_id, flags=0):
"""
Loads a new locale id
Parameters
string_layout_id : string
Hex string containing a locale id, eg "00000409"
Flags=0 : int
Combination of win32con.KLF_* constants
Examples
KLF_ACTIVATE|KLF_SETFORPROCESS|KLF_REORDER == 0x109 == 265
Return Value
Returns the integer locale id that was loaded
Example
load_keyboard_layout("00000409") == 67699721 for english
"""
if not isinstance(string_layout_id, str) or not isinstance(flags, int):
raise TypeError('first parameter must be string and second must be int')
return win32api.LoadKeyboardLayout(string_layout_id, flags)
__all__ = ["get_foreground_window_keyboard_layout",
"change_foreground_window_keyboard_layout",
"get_keyboard_layout_list",
"load_keyboard_layout"]
| en | 0.497458 | Interaction with keyboard layout on windows # win32api const Returns foreground window keyboard layout as integer Examples: 68748313 - 0x04190419 - russian 67699721 - 0x04090409 - english Change foreground window keyboard layout Parameter layout_id=0 : integer Integer containing a locale id, eg 68748313 - 0x04190419 - 0x419 - russian By default change layout like Ctrl+Shift or Alt+Shift Return Value Returns True if layout is changed and win32api.SendMessage() output if not Returns a tuple of all locale ids currently loaded Example (68748313, 67699721) Loads a new locale id Parameters string_layout_id : string Hex string containing a locale id, eg "00000409" Flags=0 : int Combination of win32con.KLF_* constants Examples KLF_ACTIVATE|KLF_SETFORPROCESS|KLF_REORDER == 0x109 == 265 Return Value Returns the integer locale id that was loaded Example load_keyboard_layout("00000409") == 67699721 for english | 2.708621 | 3 |
atest/asynchronous_messages/my_handler.py | dedie/Rammbock | 37 | 6612526 | from Rammbock import logger
RECEIVED_MESSAGES = []
SERVER_SENT = {'sample': 0,
'another': 0}
def handle_sample(rammbock, msg):
RECEIVED_MESSAGES.append(msg)
def reset_received_messages():
while RECEIVED_MESSAGES:
RECEIVED_MESSAGES.pop()
def respond_to_sample(rammbock, msg, client):
foo = "adding Extra Variable to replicate ArgCount bug"
bar = "adding Extra Variable to replicate ArgCount bug"
RECEIVED_MESSAGES.append(msg)
rammbock.save_template("__backup_template")
try:
rammbock.load_template("sample response")
rammbock.client_sends_message('name=%s' % client.name)
finally:
rammbock.load_template("__backup_template")
def server_respond_to_another_max_100(rammbock, msg, server, connection):
RECEIVED_MESSAGES.append(msg)
if SERVER_SENT['another'] < 100:
SERVER_SENT['another'] = SERVER_SENT['another'] + 1
rammbock.save_template("__backup_template")
try:
rammbock.load_template("another")
rammbock.server_sends_message('name=%s' % server.name, 'connection=%s' % connection.name)
finally:
rammbock.load_template("__backup_template")
else:
logger.warn("Reached 100 in another")
def server_respond_to_sample_response_max_100(rammbock, msg):
RECEIVED_MESSAGES.append(msg)
if SERVER_SENT['sample'] < 100:
SERVER_SENT['sample'] = SERVER_SENT['sample'] + 1
rammbock.save_template("__backup_template")
try:
rammbock.load_template("sample")
rammbock.server_sends_message()
finally:
rammbock.load_template("__backup_template")
else:
logger.warn("Reached 100 in sample")
def get_rcvd_msg():
return RECEIVED_MESSAGES
| from Rammbock import logger
RECEIVED_MESSAGES = []
SERVER_SENT = {'sample': 0,
'another': 0}
def handle_sample(rammbock, msg):
RECEIVED_MESSAGES.append(msg)
def reset_received_messages():
while RECEIVED_MESSAGES:
RECEIVED_MESSAGES.pop()
def respond_to_sample(rammbock, msg, client):
foo = "adding Extra Variable to replicate ArgCount bug"
bar = "adding Extra Variable to replicate ArgCount bug"
RECEIVED_MESSAGES.append(msg)
rammbock.save_template("__backup_template")
try:
rammbock.load_template("sample response")
rammbock.client_sends_message('name=%s' % client.name)
finally:
rammbock.load_template("__backup_template")
def server_respond_to_another_max_100(rammbock, msg, server, connection):
RECEIVED_MESSAGES.append(msg)
if SERVER_SENT['another'] < 100:
SERVER_SENT['another'] = SERVER_SENT['another'] + 1
rammbock.save_template("__backup_template")
try:
rammbock.load_template("another")
rammbock.server_sends_message('name=%s' % server.name, 'connection=%s' % connection.name)
finally:
rammbock.load_template("__backup_template")
else:
logger.warn("Reached 100 in another")
def server_respond_to_sample_response_max_100(rammbock, msg):
RECEIVED_MESSAGES.append(msg)
if SERVER_SENT['sample'] < 100:
SERVER_SENT['sample'] = SERVER_SENT['sample'] + 1
rammbock.save_template("__backup_template")
try:
rammbock.load_template("sample")
rammbock.server_sends_message()
finally:
rammbock.load_template("__backup_template")
else:
logger.warn("Reached 100 in sample")
def get_rcvd_msg():
return RECEIVED_MESSAGES
| none | 1 | 2.49433 | 2 | |
sequal/amino_acid.py | noatgnu/sequal | 1 | 6612527 | from sequal.base_block import BaseBlock
from sequal.modification import Modification
from sequal.resources import AA_mass
# Basic amino acid block object. Can cary position, modifications and amino acid value.
class AminoAcid(BaseBlock):
def __init__(self, value, position=None, mass=None):
"""
:type mass: float
mass of the amino acid if mass is not specified, it will try to infer the mass from the internal hard code mass dictionary of amino acid
:type position: int
position of amino acid residue that this block belong to
:type value: str
name of the amino acid residue for this block
"""
super().__init__(value, position, branch=False, mass=mass)
self.mods = []
if not self.mass:
if value in AA_mass:
self.mass = AA_mass[value]
# Adding modification to the mods list of this amino acid block
def set_modification(self, i: Modification):
self.mods.append(i)
def __repr__(self):
s = self.value
for i in self.mods:
s += "[{}]".format(i.value)
return s
def __str__(self):
s = self.value
for i in self.mods:
s += "[{}]".format(i.value)
return s
| from sequal.base_block import BaseBlock
from sequal.modification import Modification
from sequal.resources import AA_mass
# Basic amino acid block object. Can cary position, modifications and amino acid value.
class AminoAcid(BaseBlock):
def __init__(self, value, position=None, mass=None):
"""
:type mass: float
mass of the amino acid if mass is not specified, it will try to infer the mass from the internal hard code mass dictionary of amino acid
:type position: int
position of amino acid residue that this block belong to
:type value: str
name of the amino acid residue for this block
"""
super().__init__(value, position, branch=False, mass=mass)
self.mods = []
if not self.mass:
if value in AA_mass:
self.mass = AA_mass[value]
# Adding modification to the mods list of this amino acid block
def set_modification(self, i: Modification):
self.mods.append(i)
def __repr__(self):
s = self.value
for i in self.mods:
s += "[{}]".format(i.value)
return s
def __str__(self):
s = self.value
for i in self.mods:
s += "[{}]".format(i.value)
return s
| en | 0.780189 | # Basic amino acid block object. Can cary position, modifications and amino acid value. :type mass: float mass of the amino acid if mass is not specified, it will try to infer the mass from the internal hard code mass dictionary of amino acid :type position: int position of amino acid residue that this block belong to :type value: str name of the amino acid residue for this block # Adding modification to the mods list of this amino acid block | 3.046716 | 3 |
samples/util/psnr.py | lyx-x/nnimgproc | 1 | 6612528 | <reponame>lyx-x/nnimgproc
#!/usr/bin/python3
# Evaluate the PSNR between noisy and clean images
import argparse
import logging
from nnimgproc.util.image import read
from nnimgproc.util.math import mse, psnr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--clean", type=str, required=True,
help="Clean image file")
parser.add_argument("--noisy", type=str, required=True,
help="Noisy image file")
args = parser.parse_args()
# Choose different noise to use
clean = read(path=args.clean)
noise = read(path=args.noisy)
logging.info("PSNR: {} || MSE: {}".format(psnr(noise, clean),
mse(noise, clean)))
if __name__ == "__main__":
main()
| #!/usr/bin/python3
# Evaluate the PSNR between noisy and clean images
import argparse
import logging
from nnimgproc.util.image import read
from nnimgproc.util.math import mse, psnr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--clean", type=str, required=True,
help="Clean image file")
parser.add_argument("--noisy", type=str, required=True,
help="Noisy image file")
args = parser.parse_args()
# Choose different noise to use
clean = read(path=args.clean)
noise = read(path=args.noisy)
logging.info("PSNR: {} || MSE: {}".format(psnr(noise, clean),
mse(noise, clean)))
if __name__ == "__main__":
main() | en | 0.719576 | #!/usr/bin/python3 # Evaluate the PSNR between noisy and clean images # Choose different noise to use | 3.088672 | 3 |
sdk/python/bin/build/lib/pulumi_ecl/imagestorages/get_image.py | keiichi-hikita/pulumi-ecl | 0 | 6612529 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetImageResult:
"""
A collection of values returned by getImage.
"""
def __init__(__self__, checksum=None, container_format=None, created_at=None, disk_format=None, file=None, member_status=None, metadata=None, min_disk_gb=None, min_ram_mb=None, most_recent=None, name=None, owner=None, properties=None, protected=None, region=None, schema=None, size_bytes=None, size_max=None, size_min=None, sort_direction=None, sort_key=None, tag=None, updated_at=None, visibility=None, id=None):
if checksum and not isinstance(checksum, str):
raise TypeError("Expected argument 'checksum' to be a str")
__self__.checksum = checksum
"""
md5 hash of image contents.
"""
if container_format and not isinstance(container_format, str):
raise TypeError("Expected argument 'container_format' to be a str")
__self__.container_format = container_format
"""
Format of the container.
"""
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
__self__.created_at = created_at
"""
Date and time of image registration.
"""
if disk_format and not isinstance(disk_format, str):
raise TypeError("Expected argument 'disk_format' to be a str")
__self__.disk_format = disk_format
"""
Format of the disk.
"""
if file and not isinstance(file, str):
raise TypeError("Expected argument 'file' to be a str")
__self__.file = file
"""
URL for the virtual machine image file.
"""
if member_status and not isinstance(member_status, str):
raise TypeError("Expected argument 'member_status' to be a str")
__self__.member_status = member_status
"""
See Argument Reference above.
"""
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
__self__.metadata = metadata
"""
The location metadata.
"""
if min_disk_gb and not isinstance(min_disk_gb, float):
raise TypeError("Expected argument 'min_disk_gb' to be a float")
__self__.min_disk_gb = min_disk_gb
"""
Amount of disk space (in GB) required to boot image.
"""
if min_ram_mb and not isinstance(min_ram_mb, float):
raise TypeError("Expected argument 'min_ram_mb' to be a float")
__self__.min_ram_mb = min_ram_mb
"""
Amount of ram (in MB) required to boot image.
"""
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
__self__.most_recent = most_recent
"""
See Argument Reference above.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
See Argument Reference above.
"""
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
__self__.owner = owner
"""
See Argument Reference above.
"""
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
__self__.properties = properties
if protected and not isinstance(protected, bool):
raise TypeError("Expected argument 'protected' to be a bool")
__self__.protected = protected
"""
If true, image will not be deletable.
"""
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
__self__.region = region
"""
See Argument Reference above.
"""
if schema and not isinstance(schema, str):
raise TypeError("Expected argument 'schema' to be a str")
__self__.schema = schema
"""
URL for schema of the virtual machine image.
"""
if size_bytes and not isinstance(size_bytes, float):
raise TypeError("Expected argument 'size_bytes' to be a float")
__self__.size_bytes = size_bytes
"""
Size of image file in bytes.
"""
if size_max and not isinstance(size_max, float):
raise TypeError("Expected argument 'size_max' to be a float")
__self__.size_max = size_max
"""
See Argument Reference above.
"""
if size_min and not isinstance(size_min, float):
raise TypeError("Expected argument 'size_min' to be a float")
__self__.size_min = size_min
"""
See Argument Reference above.
"""
if sort_direction and not isinstance(sort_direction, str):
raise TypeError("Expected argument 'sort_direction' to be a str")
__self__.sort_direction = sort_direction
"""
See Argument Reference above.
"""
if sort_key and not isinstance(sort_key, str):
raise TypeError("Expected argument 'sort_key' to be a str")
__self__.sort_key = sort_key
"""
See Argument Reference above.
"""
if tag and not isinstance(tag, str):
raise TypeError("Expected argument 'tag' to be a str")
__self__.tag = tag
"""
See Argument Reference above.
"""
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
__self__.updated_at = updated_at
"""
Date and time of the last image modification.
"""
if visibility and not isinstance(visibility, str):
raise TypeError("Expected argument 'visibility' to be a str")
__self__.visibility = visibility
"""
See Argument Reference above.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_image(member_status=None,most_recent=None,name=None,owner=None,properties=None,region=None,size_max=None,size_min=None,sort_direction=None,sort_key=None,tag=None,visibility=None,opts=None):
"""
Use this data source to get the ID and Details of an Enterprise Cloud Image.
"""
__args__ = dict()
__args__['memberStatus'] = member_status
__args__['mostRecent'] = most_recent
__args__['name'] = name
__args__['owner'] = owner
__args__['properties'] = properties
__args__['region'] = region
__args__['sizeMax'] = size_max
__args__['sizeMin'] = size_min
__args__['sortDirection'] = sort_direction
__args__['sortKey'] = sort_key
__args__['tag'] = tag
__args__['visibility'] = visibility
__ret__ = await pulumi.runtime.invoke('ecl:imagestorages/getImage:getImage', __args__, opts=opts)
return GetImageResult(
checksum=__ret__.get('checksum'),
container_format=__ret__.get('containerFormat'),
created_at=__ret__.get('createdAt'),
disk_format=__ret__.get('diskFormat'),
file=__ret__.get('file'),
member_status=__ret__.get('memberStatus'),
metadata=__ret__.get('metadata'),
min_disk_gb=__ret__.get('minDiskGb'),
min_ram_mb=__ret__.get('minRamMb'),
most_recent=__ret__.get('mostRecent'),
name=__ret__.get('name'),
owner=__ret__.get('owner'),
properties=__ret__.get('properties'),
protected=__ret__.get('protected'),
region=__ret__.get('region'),
schema=__ret__.get('schema'),
size_bytes=__ret__.get('sizeBytes'),
size_max=__ret__.get('sizeMax'),
size_min=__ret__.get('sizeMin'),
sort_direction=__ret__.get('sortDirection'),
sort_key=__ret__.get('sortKey'),
tag=__ret__.get('tag'),
updated_at=__ret__.get('updatedAt'),
visibility=__ret__.get('visibility'),
id=__ret__.get('id'))
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetImageResult:
"""
A collection of values returned by getImage.
"""
def __init__(__self__, checksum=None, container_format=None, created_at=None, disk_format=None, file=None, member_status=None, metadata=None, min_disk_gb=None, min_ram_mb=None, most_recent=None, name=None, owner=None, properties=None, protected=None, region=None, schema=None, size_bytes=None, size_max=None, size_min=None, sort_direction=None, sort_key=None, tag=None, updated_at=None, visibility=None, id=None):
if checksum and not isinstance(checksum, str):
raise TypeError("Expected argument 'checksum' to be a str")
__self__.checksum = checksum
"""
md5 hash of image contents.
"""
if container_format and not isinstance(container_format, str):
raise TypeError("Expected argument 'container_format' to be a str")
__self__.container_format = container_format
"""
Format of the container.
"""
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
__self__.created_at = created_at
"""
Date and time of image registration.
"""
if disk_format and not isinstance(disk_format, str):
raise TypeError("Expected argument 'disk_format' to be a str")
__self__.disk_format = disk_format
"""
Format of the disk.
"""
if file and not isinstance(file, str):
raise TypeError("Expected argument 'file' to be a str")
__self__.file = file
"""
URL for the virtual machine image file.
"""
if member_status and not isinstance(member_status, str):
raise TypeError("Expected argument 'member_status' to be a str")
__self__.member_status = member_status
"""
See Argument Reference above.
"""
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
__self__.metadata = metadata
"""
The location metadata.
"""
if min_disk_gb and not isinstance(min_disk_gb, float):
raise TypeError("Expected argument 'min_disk_gb' to be a float")
__self__.min_disk_gb = min_disk_gb
"""
Amount of disk space (in GB) required to boot image.
"""
if min_ram_mb and not isinstance(min_ram_mb, float):
raise TypeError("Expected argument 'min_ram_mb' to be a float")
__self__.min_ram_mb = min_ram_mb
"""
Amount of ram (in MB) required to boot image.
"""
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
__self__.most_recent = most_recent
"""
See Argument Reference above.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
"""
See Argument Reference above.
"""
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
__self__.owner = owner
"""
See Argument Reference above.
"""
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
__self__.properties = properties
if protected and not isinstance(protected, bool):
raise TypeError("Expected argument 'protected' to be a bool")
__self__.protected = protected
"""
If true, image will not be deletable.
"""
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
__self__.region = region
"""
See Argument Reference above.
"""
if schema and not isinstance(schema, str):
raise TypeError("Expected argument 'schema' to be a str")
__self__.schema = schema
"""
URL for schema of the virtual machine image.
"""
if size_bytes and not isinstance(size_bytes, float):
raise TypeError("Expected argument 'size_bytes' to be a float")
__self__.size_bytes = size_bytes
"""
Size of image file in bytes.
"""
if size_max and not isinstance(size_max, float):
raise TypeError("Expected argument 'size_max' to be a float")
__self__.size_max = size_max
"""
See Argument Reference above.
"""
if size_min and not isinstance(size_min, float):
raise TypeError("Expected argument 'size_min' to be a float")
__self__.size_min = size_min
"""
See Argument Reference above.
"""
if sort_direction and not isinstance(sort_direction, str):
raise TypeError("Expected argument 'sort_direction' to be a str")
__self__.sort_direction = sort_direction
"""
See Argument Reference above.
"""
if sort_key and not isinstance(sort_key, str):
raise TypeError("Expected argument 'sort_key' to be a str")
__self__.sort_key = sort_key
"""
See Argument Reference above.
"""
if tag and not isinstance(tag, str):
raise TypeError("Expected argument 'tag' to be a str")
__self__.tag = tag
"""
See Argument Reference above.
"""
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
__self__.updated_at = updated_at
"""
Date and time of the last image modification.
"""
if visibility and not isinstance(visibility, str):
raise TypeError("Expected argument 'visibility' to be a str")
__self__.visibility = visibility
"""
See Argument Reference above.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_image(member_status=None,most_recent=None,name=None,owner=None,properties=None,region=None,size_max=None,size_min=None,sort_direction=None,sort_key=None,tag=None,visibility=None,opts=None):
"""
Use this data source to get the ID and Details of an Enterprise Cloud Image.
"""
__args__ = dict()
__args__['memberStatus'] = member_status
__args__['mostRecent'] = most_recent
__args__['name'] = name
__args__['owner'] = owner
__args__['properties'] = properties
__args__['region'] = region
__args__['sizeMax'] = size_max
__args__['sizeMin'] = size_min
__args__['sortDirection'] = sort_direction
__args__['sortKey'] = sort_key
__args__['tag'] = tag
__args__['visibility'] = visibility
__ret__ = await pulumi.runtime.invoke('ecl:imagestorages/getImage:getImage', __args__, opts=opts)
return GetImageResult(
checksum=__ret__.get('checksum'),
container_format=__ret__.get('containerFormat'),
created_at=__ret__.get('createdAt'),
disk_format=__ret__.get('diskFormat'),
file=__ret__.get('file'),
member_status=__ret__.get('memberStatus'),
metadata=__ret__.get('metadata'),
min_disk_gb=__ret__.get('minDiskGb'),
min_ram_mb=__ret__.get('minRamMb'),
most_recent=__ret__.get('mostRecent'),
name=__ret__.get('name'),
owner=__ret__.get('owner'),
properties=__ret__.get('properties'),
protected=__ret__.get('protected'),
region=__ret__.get('region'),
schema=__ret__.get('schema'),
size_bytes=__ret__.get('sizeBytes'),
size_max=__ret__.get('sizeMax'),
size_min=__ret__.get('sizeMin'),
sort_direction=__ret__.get('sortDirection'),
sort_key=__ret__.get('sortKey'),
tag=__ret__.get('tag'),
updated_at=__ret__.get('updatedAt'),
visibility=__ret__.get('visibility'),
id=__ret__.get('id'))
| en | 0.723638 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** A collection of values returned by getImage. md5 hash of image contents. Format of the container. Date and time of image registration. Format of the disk. URL for the virtual machine image file. See Argument Reference above. The location metadata. Amount of disk space (in GB) required to boot image. Amount of ram (in MB) required to boot image. See Argument Reference above. See Argument Reference above. See Argument Reference above. If true, image will not be deletable. See Argument Reference above. URL for schema of the virtual machine image. Size of image file in bytes. See Argument Reference above. See Argument Reference above. See Argument Reference above. See Argument Reference above. See Argument Reference above. Date and time of the last image modification. See Argument Reference above. id is the provider-assigned unique ID for this managed resource. Use this data source to get the ID and Details of an Enterprise Cloud Image. | 1.97181 | 2 |
docs/release/scripts/minimaws/lib/wsgiserve.py | Robbbert/messui | 26 | 6612530 | <reponame>Robbbert/messui
#!/usr/bin/python
##
## license:BSD-3-Clause
## copyright-holders:<NAME>
from . import dbaccess
from . import htmltmpl
import cgi
import inspect
import json
import mimetypes
import os.path
import re
import sys
import urllib
import wsgiref.util
if hasattr(cgi, 'escape'):
htmlescape = cgi.escape
else:
import html
htmlescape = html.escape
try:
import urllib.parse as urlparse
urlquote = urlparse.quote
except ImportError:
import urlparse
urlquote = urllib.quote
class HandlerBase(object):
STATUS_MESSAGE = {
400: 'Bad Request',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported' }
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(HandlerBase, self).__init__(**kwargs)
self.app = app
self.js_escape = app.js_escape
self.application_uri = application_uri
self.environ = environ
self.start_response = start_response
def error_page(self, code):
yield htmltmpl.ERROR_PAGE.substitute(code=htmlescape('%d' % (code, )), message=htmlescape(self.STATUS_MESSAGE[code])).encode('utf-8')
class ErrorPageHandler(HandlerBase):
def __init__(self, code, app, application_uri, environ, start_response, **kwargs):
super(ErrorPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.code = code
self.start_response('%d %s' % (self.code, self.STATUS_MESSAGE[code]), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
def __iter__(self):
return self.error_page(self.code)
class AssetHandler(HandlerBase):
EXTENSIONMAP = { '.js': 'application/javascript', '.svg': 'image/svg+xml' }
def __init__(self, directory, app, application_uri, environ, start_response, **kwargs):
super(AssetHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.directory = directory
self.asset = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.asset:
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
path = os.path.join(self.directory, self.asset)
if not os.path.isfile(path):
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
f = open(path, 'rb')
base, extension = os.path.splitext(path)
mimetype = self.EXTENSIONMAP.get(extension)
if mimetype is None:
mimetype, encoding = mimetypes.guess_type(path)
self.start_response('200 OK', [('Content-type', mimetype or 'application/octet-stream'), ('Cache-Control', 'public, max-age=3600')])
return wsgiref.util.FileWrapper(f)
except:
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
class QueryPageHandler(HandlerBase):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(QueryPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.dbcurs = app.dbconn.cursor()
def machine_href(self, shortname):
return htmlescape(urlparse.urljoin(self.application_uri, 'machine/%s' % (urlquote(shortname), )), True)
def sourcefile_href(self, sourcefile):
return htmlescape(urlparse.urljoin(self.application_uri, 'sourcefile/%s' % (urlquote(sourcefile), )), True)
def softwarelist_href(self, softwarelist):
return htmlescape(urlparse.urljoin(self.application_uri, 'softwarelist/%s' % (urlquote(softwarelist), )), True)
def software_href(self, softwarelist, software):
return htmlescape(urlparse.urljoin(self.application_uri, 'softwarelist/%s/%s' % (urlquote(softwarelist), urlquote(software))), True)
def bios_data(self, machine):
result = { }
for name, description, isdefault in self.dbcurs.get_biossets(machine):
result[name] = { 'description': description, 'isdefault': True if isdefault else False }
return result
def flags_data(self, machine):
result = { 'features': { } }
for feature, status, overall in self.dbcurs.get_feature_flags(machine):
detail = { }
if status == 1:
detail['status'] = 'imperfect'
elif status > 1:
detail['status'] = 'unemulated'
if overall == 1:
detail['overall'] = 'imperfect'
elif overall > 1:
detail['overall'] = 'unemulated'
result['features'][feature] = detail
return result
def slot_data(self, machine):
result = { 'defaults': { }, 'slots': { } }
# get slot options
prev = None
for slot, option, shortname, description in self.dbcurs.get_slot_options(machine):
if slot != prev:
if slot in result['slots']:
options = result['slots'][slot]
else:
options = { }
result['slots'][slot] = options
prev = slot
options[option] = { 'device': shortname, 'description': description }
# if there are any slots, get defaults
if result['slots']:
for slot, default in self.dbcurs.get_slot_defaults(machine):
result['defaults'][slot] = default
# remove slots that come from default cards in other slots
for slot in tuple(result['slots'].keys()):
slot += ':'
for candidate in tuple(result['slots'].keys()):
if candidate.startswith(slot):
del result['slots'][candidate]
return result
def softwarelist_data(self, machine):
result = { }
# get software lists referenced by machine
for softwarelist in self.dbcurs.get_machine_softwarelists(machine):
result[softwarelist['tag']] = {
'status': softwarelist['status'],
'shortname': softwarelist['shortname'],
'description': softwarelist['description'],
'total': softwarelist['total'],
'supported': softwarelist['supported'],
'partiallysupported': softwarelist['partiallysupported'],
'unsupported': softwarelist['unsupported'] }
# remove software lists that come from default cards in slots
if result:
for slot, default in self.dbcurs.get_slot_defaults(machine):
slot += ':'
for candidate in tuple(result.keys()):
if candidate.startswith(slot):
del result[candidate]
return result
class MachineRpcHandlerBase(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(MachineRpcHandlerBase, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.shortname:
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
machine = self.dbcurs.get_machine_id(self.shortname)
if machine is None:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(machine)
class MachineHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(MachineHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.shortname:
# could probably list machines here or something
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
machine_info = self.dbcurs.get_machine_details(self.shortname).fetchone()
if not machine_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.machine_page(machine_info)
def machine_page(self, machine_info):
id = machine_info['id']
description = machine_info['description']
yield htmltmpl.MACHINE_PROLOGUE.substitute(
app=self.js_escape(htmlescape(self.application_uri, True)),
assets=self.js_escape(htmlescape(urlparse.urljoin(self.application_uri, 'static'), True)),
sourcehref=self.sourcefile_href(machine_info['sourcefile']),
description=htmlescape(description),
shortname=htmlescape(self.shortname),
isdevice=htmlescape('Yes' if machine_info['isdevice'] else 'No'),
runnable=htmlescape('Yes' if machine_info['runnable'] else 'No'),
sourcefile=htmlescape(machine_info['sourcefile'])).encode('utf-8')
if machine_info['year'] is not None:
yield (
' <tr><th>Year:</th><td>%s</td></tr>\n' \
' <tr><th>Manufacturer:</th><td>%s</td></tr>\n' %
(htmlescape(machine_info['year']), htmlescape(machine_info['Manufacturer']))).encode('utf-8')
if machine_info['cloneof'] is not None:
parent = self.dbcurs.listfull(machine_info['cloneof']).fetchone()
if parent:
yield (
' <tr><th>Parent machine:</th><td><a href="%s">%s (%s)</a></td></tr>\n' %
(self.machine_href(machine_info['cloneof']), htmlescape(parent[1]), htmlescape(machine_info['cloneof']))).encode('utf-8')
else:
yield (
' <tr><th>Parent machine:</th><td><a href="%s">%s</a></td></tr>\n' %
(self.machine_href(machine_info['cloneof']), htmlescape(machine_info['cloneof']))).encode('utf-8')
if (machine_info['romof'] is not None) and (machine_info['romof'] != machine_info['cloneof']):
parent = self.dbcurs.listfull(machine_info['romof']).fetchone()
if parent:
yield (
' <tr><th>Parent ROM set:</th><td><a href="%s">%s (%s)</a></td></tr>\n' %
(self.machine_href(machine_info['romof']), htmlescape(parent[1]), htmlescape(machine_info['romof']))).encode('utf-8')
else:
yield (
' <tr><th>Parent ROM set:</th><td><a href="%s">%s</a></td></tr>\n' %
(self.machine_href(machine_info['romof']), htmlescape(machine_info['romof']))).encode('utf-8')
unemulated = []
imperfect = []
for feature, status, overall in self.dbcurs.get_feature_flags(id):
if overall == 1:
imperfect.append(feature)
elif overall > 1:
unemulated.append(feature)
if (unemulated):
unemulated.sort()
yield(
(' <tr><th>Unemulated Features:</th><td>%s' + (', %s' * (len(unemulated) - 1)) + '</td></tr>\n') %
tuple(unemulated)).encode('utf-8');
if (imperfect):
yield(
(' <tr><th>Imperfect Features:</th><td>%s' + (', %s' * (len(imperfect) - 1)) + '</td></tr>\n') %
tuple(imperfect)).encode('utf-8');
yield '</table>\n'.encode('utf-8')
# make a table of clones
first = True
for clone, clonedescription, cloneyear, clonemanufacturer in self.dbcurs.get_clones(self.shortname):
if first:
yield htmltmpl.MACHINE_CLONES_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.MACHINE_CLONES_ROW.substitute(
href=self.machine_href(clone),
shortname=htmlescape(clone),
description=htmlescape(clonedescription),
year=htmlescape(cloneyear or ''),
manufacturer=htmlescape(clonemanufacturer or '')).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-clones').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-clones"), document.getElementById("tbl-clones"));</script>\n'.encode('utf-8')
# make a table of software lists
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_PROLOGUE.substitute().encode('utf-8')
for softwarelist in self.dbcurs.get_machine_softwarelists(id):
total = softwarelist['total']
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_ROW.substitute(
rowid=htmlescape(softwarelist['tag'].replace(':', '-'), True),
href=self.softwarelist_href(softwarelist['shortname']),
shortname=htmlescape(softwarelist['shortname']),
description=htmlescape(softwarelist['description']),
status=htmlescape(softwarelist['status']),
total=htmlescape('%d' % (total, )),
supported=htmlescape('%.1f%%' % (softwarelist['supported'] * 100.0 / (total or 1), )),
partiallysupported=htmlescape('%.1f%%' % (softwarelist['partiallysupported'] * 100.0 / (total or 1), )),
unsupported=htmlescape('%.1f%%' % (softwarelist['unsupported'] * 100.0 / (total or 1), ))).encode('utf-8')
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_EPILOGUE.substitute().encode('utf-8')
# allow system BIOS selection
haveoptions = False
for name, desc, isdef in self.dbcurs.get_biossets(id):
if not haveoptions:
haveoptions = True;
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_BIOS_PROLOGUE.substitute().encode('utf-8')
yield htmltmpl.MACHINE_BIOS_OPTION.substitute(
name=htmlescape(name, True),
description=htmlescape(desc),
isdefault=('yes' if isdef else 'no')).encode('utf-8')
if haveoptions:
yield '</select>\n<script>set_default_system_bios();</script>\n'.encode('utf-8')
# allow RAM size selection
first = True
for name, size, isdef in self.dbcurs.get_ram_options(id):
if first:
if not haveoptions:
haveoptions = True;
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_RAM_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.MACHINE_RAM_OPTION.substitute(
name=htmlescape(name, True),
size=htmlescape('{:,}'.format(size)),
isdefault=('yes' if isdef else 'no')).encode('utf-8')
if not first:
yield ' </select>\n <script>set_default_ram_option();</script>\n'.encode('utf-8')
# placeholder for machine slots - populated by client-side JavaScript
if self.dbcurs.count_slots(id):
if not haveoptions:
haveoptions = True
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_SLOTS_PLACEHOLDER_PROLOGUE.substitute().encode('utf=8')
pending = set((self.shortname, ))
added = set((self.shortname, ))
haveextra = set()
while pending:
requested = pending.pop()
slots = self.slot_data(self.dbcurs.get_machine_id(requested))
yield (' slot_info[%s] = %s;\n' % (self.sanitised_json(requested), self.sanitised_json(slots))).encode('utf-8')
for slotname, slot in slots['slots'].items():
for choice, card in slot.items():
carddev = card['device']
if carddev not in added:
pending.add(carddev)
added.add(carddev)
if (carddev not in haveextra) and (slots['defaults'].get(slotname) == choice):
haveextra.add(carddev)
cardid = self.dbcurs.get_machine_id(carddev)
carddev = self.sanitised_json(carddev)
yield (
' bios_sets[%s] = %s;\n machine_flags[%s] = %s;\n softwarelist_info[%s] = %s;\n' %
(carddev, self.sanitised_json(self.bios_data(cardid)), carddev, self.sanitised_json(self.flags_data(cardid)), carddev, self.sanitised_json(self.softwarelist_data(cardid)))).encode('utf-8')
yield htmltmpl.MACHINE_SLOTS_PLACEHOLDER_EPILOGUE.substitute(
machine=self.sanitised_json(self.shortname)).encode('utf=8')
# add disclosure triangle for options if present
if haveoptions:
yield htmltmpl.MACHINE_OPTIONS_EPILOGUE.substitute().encode('utf=8')
# list devices referenced by this system/device
first = True
for name, desc, src in self.dbcurs.get_devices_referenced(id):
if first:
yield \
'<h2 id="heading-dev-refs">Devices Referenced</h2>\n' \
'<table id="tbl-dev-refs">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(name, desc, src)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-dev-refs').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-dev-refs"), document.getElementById("tbl-dev-refs"));</script>\n'.encode('utf-8')
# list slots where this device is an option
first = True
for name, desc, slot, opt, src in self.dbcurs.get_compatible_slots(id):
if (first):
yield \
'<h2 id="heading-comp-slots">Compatible Slots</h2>\n' \
'<table id="tbl-comp-slots">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Slot</th><th>Choice</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield htmltmpl.COMPATIBLE_SLOT_ROW.substitute(
machinehref=self.machine_href(name),
sourcehref=self.sourcefile_href(src),
shortname=htmlescape(name),
description=htmlescape(desc),
sourcefile=htmlescape(src),
slot=htmlescape(slot),
slotoption=htmlescape(opt)).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-comp-slots').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-comp-slots"), document.getElementById("tbl-comp-slots"));</script>\n'.encode('utf-8')
# list systems/devices that reference this device
first = True
for name, desc, src in self.dbcurs.get_device_references(id):
if first:
yield \
'<h2 id="heading-ref-by">Referenced By</h2>\n' \
'<table id="tbl-ref-by">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(name, desc, src)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-ref-by').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-ref-by"), document.getElementById("tbl-ref-by"));</script>\n'.encode('utf-8')
yield '</html>\n'.encode('utf-8')
def machine_row(self, shortname, description, sourcefile):
return (htmltmpl.MACHINE_ROW if description is not None else htmltmpl.EXCL_MACHINE_ROW).substitute(
machinehref=self.machine_href(shortname),
sourcehref=self.sourcefile_href(sourcefile),
shortname=htmlescape(shortname),
description=htmlescape(description or ''),
sourcefile=htmlescape(sourcefile or '')).encode('utf-8')
@staticmethod
def sanitised_json(data):
return json.dumps(data).replace('<', '\\u003c').replace('>', '\\u003e')
class SourceFileHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(SourceFileHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
self.filename = self.environ['PATH_INFO']
if self.filename and (self.filename[0] == '/'):
self.filename = self.filename[1:]
if not self.filename:
if self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_listing_page(None)
else:
id = self.dbcurs.get_sourcefile_id(self.filename)
if id is None:
if ('*' not in self.filename) and ('?' not in self.filename) and ('?' not in self.filename):
self.filename += '*' if self.filename[-1] == '/' else '/*'
if not self.dbcurs.count_sourcefiles(self.filename):
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_listing_page(self.filename)
else:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_page(id)
def sourcefile_listing_page(self, pattern):
if not pattern:
title = heading = 'All Source Files'
else:
heading = self.linked_title(pattern)
title = 'Source Files: ' + htmlescape(pattern)
yield htmltmpl.SOURCEFILE_LIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading).encode('utf-8')
for filename, machines in self.dbcurs.get_sourcefiles(pattern):
yield htmltmpl.SOURCEFILE_LIST_ROW.substitute(
sourcefile=self.linked_title(filename, True),
machines=htmlescape('%d' % (machines, ))).encode('utf-8')
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-sourcefiles').encode('utf-8')
def sourcefile_page(self, id):
yield htmltmpl.SOURCEFILE_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
filename=htmlescape(self.filename),
title=self.linked_title(self.filename)).encode('utf-8')
first = True
for machine_info in self.dbcurs.get_sourcefile_machines(id):
if first:
yield \
'<table id="tbl-machines">\n' \
' <thead>\n' \
' <tr>\n' \
' <th>Short name</th>\n' \
' <th>Description</th>\n' \
' <th>Year</th>\n' \
' <th>Manufacturer</th>\n' \
' <th>Runnable</th>\n' \
' <th>Parent</th>\n' \
' </tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(machine_info)
if first:
yield '<p>No machines found.</p>\n'.encode('utf-8')
else:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-machines').encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def linked_title(self, filename, linkfinal=False):
parts = filename.split('/')
final = parts[-1]
del parts[-1]
uri = urlparse.urljoin(self.application_uri, 'sourcefile')
title = ''
for part in parts:
uri = urlparse.urljoin(uri + '/', urlquote(part))
title += '<a href="{0}">{1}</a>/'.format(htmlescape(uri, True), htmlescape(part))
if linkfinal:
uri = urlparse.urljoin(uri + '/', urlquote(final))
return title + '<a href="{0}">{1}</a>'.format(htmlescape(uri, True), htmlescape(final))
else:
return title + final
def machine_row(self, machine_info):
return (htmltmpl.SOURCEFILE_ROW_PARENT if machine_info['cloneof'] is None else htmltmpl.SOURCEFILE_ROW_CLONE).substitute(
machinehref=self.machine_href(machine_info['shortname']),
parenthref=self.machine_href(machine_info['cloneof'] or '__invalid'),
shortname=htmlescape(machine_info['shortname']),
description=htmlescape(machine_info['description']),
year=htmlescape(machine_info['year'] or ''),
manufacturer=htmlescape(machine_info['manufacturer'] or ''),
runnable=htmlescape('Yes' if machine_info['runnable'] else 'No'),
parent=htmlescape(machine_info['cloneof'] or '')).encode('utf-8')
class SoftwareListHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(SoftwareListHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
self.software = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.software and ('*' not in self.software) and ('?' not in self.software):
software_info = self.dbcurs.get_software_details(self.shortname, self.software).fetchone()
if not software_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.software_page(software_info)
elif self.software or (self.shortname and ('*' not in self.shortname) and ('?' not in self.shortname)):
softwarelist_info = self.dbcurs.get_softwarelist_details(self.shortname, self.software or None).fetchone()
if not softwarelist_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.softwarelist_page(softwarelist_info, self.software or None)
else:
if self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.softwarelist_listing_page(self.shortname or None)
def softwarelist_listing_page(self, pattern):
if not pattern:
title = heading = 'All Software Lists'
else:
title = heading = 'Software Lists: ' + htmlescape(pattern)
yield htmltmpl.SOFTWARELIST_LIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading).encode('utf-8')
for shortname, description, total, supported, partiallysupported, unsupported in self.dbcurs.get_softwarelists(pattern):
yield htmltmpl.SOFTWARELIST_LIST_ROW.substitute(
href=self.softwarelist_href(shortname),
shortname=htmlescape(shortname),
description=htmlescape(description),
total=htmlescape('%d' % (total, )),
supported=htmlescape('%.1f%%' % (supported * 100.0 / (total or 1), )),
partiallysupported=htmlescape('%.1f%%' % (partiallysupported * 100.0 / (total or 1), )),
unsupported=htmlescape('%.1f%%' % (unsupported * 100.0 / (total or 1), ))).encode('utf-8')
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-softwarelists').encode('utf-8')
def softwarelist_page(self, softwarelist_info, pattern):
if not pattern:
title = 'Software List: %s (%s)' % (htmlescape(softwarelist_info['description']), htmlescape(softwarelist_info['shortname']))
heading = htmlescape(softwarelist_info['description'])
else:
title = 'Software List: %s (%s): %s' % (htmlescape(softwarelist_info['description']), htmlescape(softwarelist_info['shortname']), htmlescape(pattern))
heading = '<a href="%s">%s</a>: %s' % (self.softwarelist_href(softwarelist_info['shortname']), htmlescape(softwarelist_info['description']), htmlescape(pattern))
yield htmltmpl.SOFTWARELIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading,
shortname=htmlescape(softwarelist_info['shortname']),
total=htmlescape('%d' % (softwarelist_info['total'], )),
supported=htmlescape('%d' % (softwarelist_info['supported'], )),
supportedpc=htmlescape('%.1f' % (softwarelist_info['supported'] * 100.0 / (softwarelist_info['total'] or 1), )),
partiallysupported=htmlescape('%d' % (softwarelist_info['partiallysupported'], )),
partiallysupportedpc=htmlescape('%.1f' % (softwarelist_info['partiallysupported'] * 100.0 / (softwarelist_info['total'] or 1), )),
unsupported=htmlescape('%d' % (softwarelist_info['unsupported'], )),
unsupportedpc=htmlescape('%.1f' % (softwarelist_info['unsupported'] * 100.0 / (softwarelist_info['total'] or 1), ))).encode('utf-8')
if softwarelist_info['notes'] is not None:
yield htmltmpl.SOFTWARELIST_NOTES_PROLOGUE.substitute().encode('utf-8')
first = True
for line in softwarelist_info['notes'].strip().splitlines():
if line:
yield (('<p>%s' if first else '<br />\n%s') % (htmlescape(line), )).encode('utf-8')
first = False
elif not first:
yield '</p>\n'.encode('utf-8')
first = True
if not first:
yield '</p>\n'.encode('utf-8')
yield htmltmpl.SOFTWARELIST_NOTES_EPILOGUE.substitute().encode('utf-8')
first = True
for machine_info in self.dbcurs.get_softwarelist_machines(softwarelist_info['id']):
if first:
yield htmltmpl.SOFTWARELIST_MACHINE_TABLE_HEADER.substitute().encode('utf-8')
first = False
yield htmltmpl.SOFTWARELIST_MACHINE_TABLE_ROW.substitute(
machinehref=self.machine_href(machine_info['shortname']),
shortname=htmlescape(machine_info['shortname']),
description=htmlescape(machine_info['description']),
year=htmlescape(machine_info['year'] or ''),
manufacturer=htmlescape(machine_info['manufacturer'] or ''),
status=htmlescape(machine_info['status'])).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-machines').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-machines"), document.getElementById("tbl-machines"));</script>\n'.encode('utf-8')
first = True
for software_info in self.dbcurs.get_softwarelist_software(softwarelist_info['id'], self.software or None):
if first:
yield htmltmpl.SOFTWARELIST_SOFTWARE_TABLE_HEADER.substitute().encode('utf-8')
first = False
yield self.software_row(software_info)
if first:
yield '<p>No software found.</p>\n'.encode('utf-8')
else:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-software').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-software"), document.getElementById("tbl-software"));</script>\n'.encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def software_page(self, software_info):
yield htmltmpl.SOFTWARE_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=htmlescape(software_info['description']),
heading=htmlescape(software_info['description']),
softwarelisthref=self.softwarelist_href(self.shortname),
softwarelistdescription=htmlescape(software_info['softwarelistdescription']),
softwarelist=htmlescape(self.shortname),
shortname=htmlescape(software_info['shortname']),
year=htmlescape(software_info['year']),
publisher=htmlescape(software_info['publisher'])).encode('utf-8')
if software_info['parent'] is not None:
yield (' <tr><th>Parent:</th><td><a href="%s">%s</a></td>\n' % (self.software_href(software_info['parentsoftwarelist'], software_info['parent']), htmlescape(software_info['parentdescription']))).encode('utf-8')
yield (' <tr><th>Supported:</th><td>%s</td>\n' % (self.format_supported(software_info['supported']), )).encode('utf-8')
for name, value in self.dbcurs.get_software_info(software_info['id']):
yield (' <tr><th>%s:</th><td>%s</td>\n' % (htmlescape(name), htmlescape(value))).encode('utf-8')
yield '</table>\n\n'.encode('utf-8')
first = True
for clone_info in self.dbcurs.get_software_clones(software_info['id']):
if first:
yield htmltmpl.SOFTWARE_CLONES_PROLOGUE.substitute().encode('utf-8')
first = False
yield self.clone_row(clone_info)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-clones').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-clones"), document.getElementById("tbl-clones"));</script>\n'.encode('utf-8')
if software_info['notes'] is not None:
yield htmltmpl.SOFTWARE_NOTES_PROLOGUE.substitute().encode('utf-8')
first = True
for line in software_info['notes'].strip().splitlines():
if line:
yield (('<p>%s' if first else '<br />\n%s') % (htmlescape(line), )).encode('utf-8')
first = False
elif not first:
yield '</p>\n'.encode('utf-8')
first = True
if not first:
yield '</p>\n'.encode('utf-8')
yield htmltmpl.SOFTWARE_NOTES_EPILOGUE.substitute().encode('utf-8')
parts = self.dbcurs.get_software_parts(software_info['id']).fetchall()
first = True
for id, partname, interface, part_id in parts:
if first:
yield htmltmpl.SOFTWARE_PARTS_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.SOFTWARE_PART_PROLOGUE.substitute(
heading=htmlescape(('%s (%s)' % (part_id, partname)) if part_id is not None else partname),
shortname=htmlescape(partname),
interface=htmlescape(interface)).encode('utf-8')
for name, value in self.dbcurs.get_softwarepart_features(id):
yield (' <tr><th>%s:</th><td>%s</td>\n' % (htmlescape(name), htmlescape(value))).encode('utf-8')
yield ' </table>\n\n'.encode('utf-8')
if not first:
yield htmltmpl.SOFTWARE_PARTS_EPILOGUE.substitute().encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def software_row(self, software_info):
parent = software_info['parent']
return htmltmpl.SOFTWARELIST_SOFTWARE_ROW.substitute(
softwarehref=self.software_href(self.shortname, software_info['shortname']),
shortname=htmlescape(software_info['shortname']),
description=htmlescape(software_info['description']),
year=htmlescape(software_info['year']),
publisher=htmlescape(software_info['publisher']),
supported=self.format_supported(software_info['supported']),
parts=htmlescape('%d' % (software_info['parts'], )),
baddumps=htmlescape('%d' % (software_info['baddumps'], )),
parent='<a href="%s">%s</a>' % (self.software_href(software_info['parentsoftwarelist'], parent), htmlescape(parent)) if parent is not None else '').encode('utf-8')
def clone_row(self, clone_info):
return htmltmpl.SOFTWARE_CLONES_ROW.substitute(
href=self.software_href(clone_info['softwarelist'], clone_info['shortname']),
shortname=htmlescape(clone_info['shortname']),
description=htmlescape(clone_info['description']),
year=htmlescape(clone_info['year']),
publisher=htmlescape(clone_info['publisher']),
supported=self.format_supported(clone_info['supported'])).encode('utf-8')
@staticmethod
def format_supported(supported):
return 'Yes' if supported == 0 else 'Partial' if supported == 1 else 'No'
class RomIdentHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(QueryPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.dbcurs = app.dbconn.cursor()
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.form_page()
def form_page(self):
yield htmltmpl.ROMIDENT_PAGE.substitute(
app=self.js_escape(htmlescape(self.application_uri, True)),
assets=self.js_escape(htmlescape(urlparse.urljoin(self.application_uri, 'static'), True))).encode('utf-8')
class BiosRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
result = { }
for name, description, isdefault in self.dbcurs.get_biossets(machine):
result[name] = { 'description': description, 'isdefault': True if isdefault else False }
yield json.dumps(result).encode('utf-8')
class FlagsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.flags_data(machine)).encode('utf-8')
class SlotsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.slot_data(machine)).encode('utf-8')
class SoftwareListsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.softwarelist_data(machine)).encode('utf-8')
class RomDumpsRpcHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(RomDumpsRpcHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
args = urlparse.parse_qs(self.environ['QUERY_STRING'], keep_blank_values=True, strict_parsing=True)
crc = args.get('crc')
sha1 = args.get('sha1')
if (len(args) == 2) and (crc is not None) and (len(crc) == 1) and (sha1 is not None) and (len(sha1) == 1):
crc = int(crc[0], 16)
sha1 = sha1[0]
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(crc, sha1)
except BaseException as e:
pass
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
def data_page(self, crc, sha1):
machines = { }
for shortname, description, label, bad in self.dbcurs.get_rom_dumps(crc, sha1):
machine = machines.get(shortname)
if machine is None:
machine = { 'description': description, 'matches': [ ] }
machines[shortname] = machine
machine['matches'].append({ 'name': label, 'bad': bool(bad) })
software = { }
for softwarelist, softwarelistdescription, shortname, description, part, part_id, label, bad in self.dbcurs.get_software_rom_dumps(crc, sha1):
listinfo = software.get(softwarelist)
if listinfo is None:
listinfo = { 'description': softwarelistdescription, 'software': { } }
software[softwarelist] = listinfo
softwareinfo = listinfo['software'].get(shortname)
if softwareinfo is None:
softwareinfo = { 'description': description, 'parts': { } }
listinfo['software'][shortname] = softwareinfo
partinfo = softwareinfo['parts'].get(part)
if partinfo is None:
partinfo = { 'matches': [ ] }
if part_id is not None:
partinfo['description'] = part_id
softwareinfo['parts'][part] = partinfo
partinfo['matches'].append({ 'name': label, 'bad': bool(bad) })
result = { 'machines': machines, 'software': software }
yield json.dumps(result).encode('utf-8')
class DiskDumpsRpcHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(DiskDumpsRpcHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
args = urlparse.parse_qs(self.environ['QUERY_STRING'], keep_blank_values=True, strict_parsing=True)
sha1 = args.get('sha1')
if (len(args) == 1) and (sha1 is not None) and (len(sha1) == 1):
sha1 = sha1[0]
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(sha1)
except BaseException as e:
pass
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
def data_page(self, sha1):
machines = { }
for shortname, description, label, bad in self.dbcurs.get_disk_dumps(sha1):
machine = machines.get(shortname)
if machine is None:
machine = { 'description': description, 'matches': [ ] }
machines[shortname] = machine
machine['matches'].append({ 'name': label, 'bad': bool(bad) })
software = { }
for softwarelist, softwarelistdescription, shortname, description, part, part_id, label, bad in self.dbcurs.get_software_disk_dumps(sha1):
listinfo = software.get(softwarelist)
if listinfo is None:
listinfo = { 'description': softwarelistdescription, 'software': { } }
software[softwarelist] = listinfo
softwareinfo = listinfo['software'].get(shortname)
if softwareinfo is None:
softwareinfo = { 'description': description, 'parts': { } }
listinfo['software'][shortname] = softwareinfo
partinfo = softwareinfo['parts'].get(part)
if partinfo is None:
partinfo = { 'matches': [ ] }
if part_id is not None:
partinfo['description'] = part_id
softwareinfo['parts'][part] = partinfo
partinfo['matches'].append({ 'name': label, 'bad': bool(bad) })
result = { 'machines': machines, 'software': software }
yield json.dumps(result).encode('utf-8')
class MiniMawsApp(object):
JS_ESCAPE = re.compile('([\"\'\\\\])')
RPC_SERVICES = {
'bios': BiosRpcHandler,
'flags': FlagsRpcHandler,
'slots': SlotsRpcHandler,
'softwarelists': SoftwareListsRpcHandler,
'romdumps': RomDumpsRpcHandler,
'diskdumps': DiskDumpsRpcHandler }
def __init__(self, dbfile, **kwargs):
super(MiniMawsApp, self).__init__(**kwargs)
self.dbconn = dbaccess.QueryConnection(dbfile)
self.assetsdir = os.path.join(os.path.dirname(inspect.getfile(self.__class__)), 'assets')
if not mimetypes.inited:
mimetypes.init()
def __call__(self, environ, start_response):
application_uri = wsgiref.util.application_uri(environ)
if application_uri[-1] != '/':
application_uri += '/'
module = wsgiref.util.shift_path_info(environ)
if module == 'machine':
return MachineHandler(self, application_uri, environ, start_response)
elif module == 'sourcefile':
return SourceFileHandler(self, application_uri, environ, start_response)
elif module == 'softwarelist':
return SoftwareListHandler(self, application_uri, environ, start_response)
elif module == 'romident':
return RomIdentHandler(self, application_uri, environ, start_response)
elif module == 'static':
return AssetHandler(self.assetsdir, self, application_uri, environ, start_response)
elif module == 'rpc':
service = wsgiref.util.shift_path_info(environ)
if not service:
return ErrorPageHandler(403, self, application_uri, environ, start_response)
elif service in self.RPC_SERVICES:
return self.RPC_SERVICES[service](self, application_uri, environ, start_response)
else:
return ErrorPageHandler(404, self, application_uri, environ, start_response)
elif not module:
return ErrorPageHandler(403, self, application_uri, environ, start_response)
else:
return ErrorPageHandler(404, self, application_uri, environ, start_response)
def js_escape(self, str):
return self.JS_ESCAPE.sub('\\\\\\1', str).replace('\0', '\\0')
| #!/usr/bin/python
##
## license:BSD-3-Clause
## copyright-holders:<NAME>
from . import dbaccess
from . import htmltmpl
import cgi
import inspect
import json
import mimetypes
import os.path
import re
import sys
import urllib
import wsgiref.util
if hasattr(cgi, 'escape'):
htmlescape = cgi.escape
else:
import html
htmlescape = html.escape
try:
import urllib.parse as urlparse
urlquote = urlparse.quote
except ImportError:
import urlparse
urlquote = urllib.quote
class HandlerBase(object):
STATUS_MESSAGE = {
400: 'Bad Request',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported' }
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(HandlerBase, self).__init__(**kwargs)
self.app = app
self.js_escape = app.js_escape
self.application_uri = application_uri
self.environ = environ
self.start_response = start_response
def error_page(self, code):
yield htmltmpl.ERROR_PAGE.substitute(code=htmlescape('%d' % (code, )), message=htmlescape(self.STATUS_MESSAGE[code])).encode('utf-8')
class ErrorPageHandler(HandlerBase):
def __init__(self, code, app, application_uri, environ, start_response, **kwargs):
super(ErrorPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.code = code
self.start_response('%d %s' % (self.code, self.STATUS_MESSAGE[code]), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
def __iter__(self):
return self.error_page(self.code)
class AssetHandler(HandlerBase):
EXTENSIONMAP = { '.js': 'application/javascript', '.svg': 'image/svg+xml' }
def __init__(self, directory, app, application_uri, environ, start_response, **kwargs):
super(AssetHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.directory = directory
self.asset = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.asset:
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
path = os.path.join(self.directory, self.asset)
if not os.path.isfile(path):
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
f = open(path, 'rb')
base, extension = os.path.splitext(path)
mimetype = self.EXTENSIONMAP.get(extension)
if mimetype is None:
mimetype, encoding = mimetypes.guess_type(path)
self.start_response('200 OK', [('Content-type', mimetype or 'application/octet-stream'), ('Cache-Control', 'public, max-age=3600')])
return wsgiref.util.FileWrapper(f)
except:
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
class QueryPageHandler(HandlerBase):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(QueryPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.dbcurs = app.dbconn.cursor()
def machine_href(self, shortname):
return htmlescape(urlparse.urljoin(self.application_uri, 'machine/%s' % (urlquote(shortname), )), True)
def sourcefile_href(self, sourcefile):
return htmlescape(urlparse.urljoin(self.application_uri, 'sourcefile/%s' % (urlquote(sourcefile), )), True)
def softwarelist_href(self, softwarelist):
return htmlescape(urlparse.urljoin(self.application_uri, 'softwarelist/%s' % (urlquote(softwarelist), )), True)
def software_href(self, softwarelist, software):
return htmlescape(urlparse.urljoin(self.application_uri, 'softwarelist/%s/%s' % (urlquote(softwarelist), urlquote(software))), True)
def bios_data(self, machine):
result = { }
for name, description, isdefault in self.dbcurs.get_biossets(machine):
result[name] = { 'description': description, 'isdefault': True if isdefault else False }
return result
def flags_data(self, machine):
result = { 'features': { } }
for feature, status, overall in self.dbcurs.get_feature_flags(machine):
detail = { }
if status == 1:
detail['status'] = 'imperfect'
elif status > 1:
detail['status'] = 'unemulated'
if overall == 1:
detail['overall'] = 'imperfect'
elif overall > 1:
detail['overall'] = 'unemulated'
result['features'][feature] = detail
return result
def slot_data(self, machine):
result = { 'defaults': { }, 'slots': { } }
# get slot options
prev = None
for slot, option, shortname, description in self.dbcurs.get_slot_options(machine):
if slot != prev:
if slot in result['slots']:
options = result['slots'][slot]
else:
options = { }
result['slots'][slot] = options
prev = slot
options[option] = { 'device': shortname, 'description': description }
# if there are any slots, get defaults
if result['slots']:
for slot, default in self.dbcurs.get_slot_defaults(machine):
result['defaults'][slot] = default
# remove slots that come from default cards in other slots
for slot in tuple(result['slots'].keys()):
slot += ':'
for candidate in tuple(result['slots'].keys()):
if candidate.startswith(slot):
del result['slots'][candidate]
return result
def softwarelist_data(self, machine):
result = { }
# get software lists referenced by machine
for softwarelist in self.dbcurs.get_machine_softwarelists(machine):
result[softwarelist['tag']] = {
'status': softwarelist['status'],
'shortname': softwarelist['shortname'],
'description': softwarelist['description'],
'total': softwarelist['total'],
'supported': softwarelist['supported'],
'partiallysupported': softwarelist['partiallysupported'],
'unsupported': softwarelist['unsupported'] }
# remove software lists that come from default cards in slots
if result:
for slot, default in self.dbcurs.get_slot_defaults(machine):
slot += ':'
for candidate in tuple(result.keys()):
if candidate.startswith(slot):
del result[candidate]
return result
class MachineRpcHandlerBase(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(MachineRpcHandlerBase, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.shortname:
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
machine = self.dbcurs.get_machine_id(self.shortname)
if machine is None:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(machine)
class MachineHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(MachineHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if not self.shortname:
# could probably list machines here or something
self.start_response('403 %s' % (self.STATUS_MESSAGE[403], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(403)
elif self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
else:
machine_info = self.dbcurs.get_machine_details(self.shortname).fetchone()
if not machine_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.machine_page(machine_info)
def machine_page(self, machine_info):
id = machine_info['id']
description = machine_info['description']
yield htmltmpl.MACHINE_PROLOGUE.substitute(
app=self.js_escape(htmlescape(self.application_uri, True)),
assets=self.js_escape(htmlescape(urlparse.urljoin(self.application_uri, 'static'), True)),
sourcehref=self.sourcefile_href(machine_info['sourcefile']),
description=htmlescape(description),
shortname=htmlescape(self.shortname),
isdevice=htmlescape('Yes' if machine_info['isdevice'] else 'No'),
runnable=htmlescape('Yes' if machine_info['runnable'] else 'No'),
sourcefile=htmlescape(machine_info['sourcefile'])).encode('utf-8')
if machine_info['year'] is not None:
yield (
' <tr><th>Year:</th><td>%s</td></tr>\n' \
' <tr><th>Manufacturer:</th><td>%s</td></tr>\n' %
(htmlescape(machine_info['year']), htmlescape(machine_info['Manufacturer']))).encode('utf-8')
if machine_info['cloneof'] is not None:
parent = self.dbcurs.listfull(machine_info['cloneof']).fetchone()
if parent:
yield (
' <tr><th>Parent machine:</th><td><a href="%s">%s (%s)</a></td></tr>\n' %
(self.machine_href(machine_info['cloneof']), htmlescape(parent[1]), htmlescape(machine_info['cloneof']))).encode('utf-8')
else:
yield (
' <tr><th>Parent machine:</th><td><a href="%s">%s</a></td></tr>\n' %
(self.machine_href(machine_info['cloneof']), htmlescape(machine_info['cloneof']))).encode('utf-8')
if (machine_info['romof'] is not None) and (machine_info['romof'] != machine_info['cloneof']):
parent = self.dbcurs.listfull(machine_info['romof']).fetchone()
if parent:
yield (
' <tr><th>Parent ROM set:</th><td><a href="%s">%s (%s)</a></td></tr>\n' %
(self.machine_href(machine_info['romof']), htmlescape(parent[1]), htmlescape(machine_info['romof']))).encode('utf-8')
else:
yield (
' <tr><th>Parent ROM set:</th><td><a href="%s">%s</a></td></tr>\n' %
(self.machine_href(machine_info['romof']), htmlescape(machine_info['romof']))).encode('utf-8')
unemulated = []
imperfect = []
for feature, status, overall in self.dbcurs.get_feature_flags(id):
if overall == 1:
imperfect.append(feature)
elif overall > 1:
unemulated.append(feature)
if (unemulated):
unemulated.sort()
yield(
(' <tr><th>Unemulated Features:</th><td>%s' + (', %s' * (len(unemulated) - 1)) + '</td></tr>\n') %
tuple(unemulated)).encode('utf-8');
if (imperfect):
yield(
(' <tr><th>Imperfect Features:</th><td>%s' + (', %s' * (len(imperfect) - 1)) + '</td></tr>\n') %
tuple(imperfect)).encode('utf-8');
yield '</table>\n'.encode('utf-8')
# make a table of clones
first = True
for clone, clonedescription, cloneyear, clonemanufacturer in self.dbcurs.get_clones(self.shortname):
if first:
yield htmltmpl.MACHINE_CLONES_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.MACHINE_CLONES_ROW.substitute(
href=self.machine_href(clone),
shortname=htmlescape(clone),
description=htmlescape(clonedescription),
year=htmlescape(cloneyear or ''),
manufacturer=htmlescape(clonemanufacturer or '')).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-clones').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-clones"), document.getElementById("tbl-clones"));</script>\n'.encode('utf-8')
# make a table of software lists
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_PROLOGUE.substitute().encode('utf-8')
for softwarelist in self.dbcurs.get_machine_softwarelists(id):
total = softwarelist['total']
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_ROW.substitute(
rowid=htmlescape(softwarelist['tag'].replace(':', '-'), True),
href=self.softwarelist_href(softwarelist['shortname']),
shortname=htmlescape(softwarelist['shortname']),
description=htmlescape(softwarelist['description']),
status=htmlescape(softwarelist['status']),
total=htmlescape('%d' % (total, )),
supported=htmlescape('%.1f%%' % (softwarelist['supported'] * 100.0 / (total or 1), )),
partiallysupported=htmlescape('%.1f%%' % (softwarelist['partiallysupported'] * 100.0 / (total or 1), )),
unsupported=htmlescape('%.1f%%' % (softwarelist['unsupported'] * 100.0 / (total or 1), ))).encode('utf-8')
yield htmltmpl.MACHINE_SOFTWARELISTS_TABLE_EPILOGUE.substitute().encode('utf-8')
# allow system BIOS selection
haveoptions = False
for name, desc, isdef in self.dbcurs.get_biossets(id):
if not haveoptions:
haveoptions = True;
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_BIOS_PROLOGUE.substitute().encode('utf-8')
yield htmltmpl.MACHINE_BIOS_OPTION.substitute(
name=htmlescape(name, True),
description=htmlescape(desc),
isdefault=('yes' if isdef else 'no')).encode('utf-8')
if haveoptions:
yield '</select>\n<script>set_default_system_bios();</script>\n'.encode('utf-8')
# allow RAM size selection
first = True
for name, size, isdef in self.dbcurs.get_ram_options(id):
if first:
if not haveoptions:
haveoptions = True;
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_RAM_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.MACHINE_RAM_OPTION.substitute(
name=htmlescape(name, True),
size=htmlescape('{:,}'.format(size)),
isdefault=('yes' if isdef else 'no')).encode('utf-8')
if not first:
yield ' </select>\n <script>set_default_ram_option();</script>\n'.encode('utf-8')
# placeholder for machine slots - populated by client-side JavaScript
if self.dbcurs.count_slots(id):
if not haveoptions:
haveoptions = True
yield htmltmpl.MACHINE_OPTIONS_HEADING.substitute().encode('utf-8')
yield htmltmpl.MACHINE_SLOTS_PLACEHOLDER_PROLOGUE.substitute().encode('utf=8')
pending = set((self.shortname, ))
added = set((self.shortname, ))
haveextra = set()
while pending:
requested = pending.pop()
slots = self.slot_data(self.dbcurs.get_machine_id(requested))
yield (' slot_info[%s] = %s;\n' % (self.sanitised_json(requested), self.sanitised_json(slots))).encode('utf-8')
for slotname, slot in slots['slots'].items():
for choice, card in slot.items():
carddev = card['device']
if carddev not in added:
pending.add(carddev)
added.add(carddev)
if (carddev not in haveextra) and (slots['defaults'].get(slotname) == choice):
haveextra.add(carddev)
cardid = self.dbcurs.get_machine_id(carddev)
carddev = self.sanitised_json(carddev)
yield (
' bios_sets[%s] = %s;\n machine_flags[%s] = %s;\n softwarelist_info[%s] = %s;\n' %
(carddev, self.sanitised_json(self.bios_data(cardid)), carddev, self.sanitised_json(self.flags_data(cardid)), carddev, self.sanitised_json(self.softwarelist_data(cardid)))).encode('utf-8')
yield htmltmpl.MACHINE_SLOTS_PLACEHOLDER_EPILOGUE.substitute(
machine=self.sanitised_json(self.shortname)).encode('utf=8')
# add disclosure triangle for options if present
if haveoptions:
yield htmltmpl.MACHINE_OPTIONS_EPILOGUE.substitute().encode('utf=8')
# list devices referenced by this system/device
first = True
for name, desc, src in self.dbcurs.get_devices_referenced(id):
if first:
yield \
'<h2 id="heading-dev-refs">Devices Referenced</h2>\n' \
'<table id="tbl-dev-refs">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(name, desc, src)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-dev-refs').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-dev-refs"), document.getElementById("tbl-dev-refs"));</script>\n'.encode('utf-8')
# list slots where this device is an option
first = True
for name, desc, slot, opt, src in self.dbcurs.get_compatible_slots(id):
if (first):
yield \
'<h2 id="heading-comp-slots">Compatible Slots</h2>\n' \
'<table id="tbl-comp-slots">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Slot</th><th>Choice</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield htmltmpl.COMPATIBLE_SLOT_ROW.substitute(
machinehref=self.machine_href(name),
sourcehref=self.sourcefile_href(src),
shortname=htmlescape(name),
description=htmlescape(desc),
sourcefile=htmlescape(src),
slot=htmlescape(slot),
slotoption=htmlescape(opt)).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-comp-slots').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-comp-slots"), document.getElementById("tbl-comp-slots"));</script>\n'.encode('utf-8')
# list systems/devices that reference this device
first = True
for name, desc, src in self.dbcurs.get_device_references(id):
if first:
yield \
'<h2 id="heading-ref-by">Referenced By</h2>\n' \
'<table id="tbl-ref-by">\n' \
' <thead>\n' \
' <tr><th>Short name</th><th>Description</th><th>Source file</th></tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(name, desc, src)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-ref-by').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-ref-by"), document.getElementById("tbl-ref-by"));</script>\n'.encode('utf-8')
yield '</html>\n'.encode('utf-8')
def machine_row(self, shortname, description, sourcefile):
return (htmltmpl.MACHINE_ROW if description is not None else htmltmpl.EXCL_MACHINE_ROW).substitute(
machinehref=self.machine_href(shortname),
sourcehref=self.sourcefile_href(sourcefile),
shortname=htmlescape(shortname),
description=htmlescape(description or ''),
sourcefile=htmlescape(sourcefile or '')).encode('utf-8')
@staticmethod
def sanitised_json(data):
return json.dumps(data).replace('<', '\\u003c').replace('>', '\\u003e')
class SourceFileHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(SourceFileHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
self.filename = self.environ['PATH_INFO']
if self.filename and (self.filename[0] == '/'):
self.filename = self.filename[1:]
if not self.filename:
if self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_listing_page(None)
else:
id = self.dbcurs.get_sourcefile_id(self.filename)
if id is None:
if ('*' not in self.filename) and ('?' not in self.filename) and ('?' not in self.filename):
self.filename += '*' if self.filename[-1] == '/' else '/*'
if not self.dbcurs.count_sourcefiles(self.filename):
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_listing_page(self.filename)
else:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.sourcefile_page(id)
def sourcefile_listing_page(self, pattern):
if not pattern:
title = heading = 'All Source Files'
else:
heading = self.linked_title(pattern)
title = 'Source Files: ' + htmlescape(pattern)
yield htmltmpl.SOURCEFILE_LIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading).encode('utf-8')
for filename, machines in self.dbcurs.get_sourcefiles(pattern):
yield htmltmpl.SOURCEFILE_LIST_ROW.substitute(
sourcefile=self.linked_title(filename, True),
machines=htmlescape('%d' % (machines, ))).encode('utf-8')
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-sourcefiles').encode('utf-8')
def sourcefile_page(self, id):
yield htmltmpl.SOURCEFILE_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
filename=htmlescape(self.filename),
title=self.linked_title(self.filename)).encode('utf-8')
first = True
for machine_info in self.dbcurs.get_sourcefile_machines(id):
if first:
yield \
'<table id="tbl-machines">\n' \
' <thead>\n' \
' <tr>\n' \
' <th>Short name</th>\n' \
' <th>Description</th>\n' \
' <th>Year</th>\n' \
' <th>Manufacturer</th>\n' \
' <th>Runnable</th>\n' \
' <th>Parent</th>\n' \
' </tr>\n' \
' </thead>\n' \
' <tbody>\n'.encode('utf-8')
first = False
yield self.machine_row(machine_info)
if first:
yield '<p>No machines found.</p>\n'.encode('utf-8')
else:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-machines').encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def linked_title(self, filename, linkfinal=False):
parts = filename.split('/')
final = parts[-1]
del parts[-1]
uri = urlparse.urljoin(self.application_uri, 'sourcefile')
title = ''
for part in parts:
uri = urlparse.urljoin(uri + '/', urlquote(part))
title += '<a href="{0}">{1}</a>/'.format(htmlescape(uri, True), htmlescape(part))
if linkfinal:
uri = urlparse.urljoin(uri + '/', urlquote(final))
return title + '<a href="{0}">{1}</a>'.format(htmlescape(uri, True), htmlescape(final))
else:
return title + final
def machine_row(self, machine_info):
return (htmltmpl.SOURCEFILE_ROW_PARENT if machine_info['cloneof'] is None else htmltmpl.SOURCEFILE_ROW_CLONE).substitute(
machinehref=self.machine_href(machine_info['shortname']),
parenthref=self.machine_href(machine_info['cloneof'] or '__invalid'),
shortname=htmlescape(machine_info['shortname']),
description=htmlescape(machine_info['description']),
year=htmlescape(machine_info['year'] or ''),
manufacturer=htmlescape(machine_info['manufacturer'] or ''),
runnable=htmlescape('Yes' if machine_info['runnable'] else 'No'),
parent=htmlescape(machine_info['cloneof'] or '')).encode('utf-8')
class SoftwareListHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(SoftwareListHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.shortname = wsgiref.util.shift_path_info(environ)
self.software = wsgiref.util.shift_path_info(environ)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.software and ('*' not in self.software) and ('?' not in self.software):
software_info = self.dbcurs.get_software_details(self.shortname, self.software).fetchone()
if not software_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.software_page(software_info)
elif self.software or (self.shortname and ('*' not in self.shortname) and ('?' not in self.shortname)):
softwarelist_info = self.dbcurs.get_softwarelist_details(self.shortname, self.software or None).fetchone()
if not softwarelist_info:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.softwarelist_page(softwarelist_info, self.software or None)
else:
if self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.softwarelist_listing_page(self.shortname or None)
def softwarelist_listing_page(self, pattern):
if not pattern:
title = heading = 'All Software Lists'
else:
title = heading = 'Software Lists: ' + htmlescape(pattern)
yield htmltmpl.SOFTWARELIST_LIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading).encode('utf-8')
for shortname, description, total, supported, partiallysupported, unsupported in self.dbcurs.get_softwarelists(pattern):
yield htmltmpl.SOFTWARELIST_LIST_ROW.substitute(
href=self.softwarelist_href(shortname),
shortname=htmlescape(shortname),
description=htmlescape(description),
total=htmlescape('%d' % (total, )),
supported=htmlescape('%.1f%%' % (supported * 100.0 / (total or 1), )),
partiallysupported=htmlescape('%.1f%%' % (partiallysupported * 100.0 / (total or 1), )),
unsupported=htmlescape('%.1f%%' % (unsupported * 100.0 / (total or 1), ))).encode('utf-8')
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-softwarelists').encode('utf-8')
def softwarelist_page(self, softwarelist_info, pattern):
if not pattern:
title = 'Software List: %s (%s)' % (htmlescape(softwarelist_info['description']), htmlescape(softwarelist_info['shortname']))
heading = htmlescape(softwarelist_info['description'])
else:
title = 'Software List: %s (%s): %s' % (htmlescape(softwarelist_info['description']), htmlescape(softwarelist_info['shortname']), htmlescape(pattern))
heading = '<a href="%s">%s</a>: %s' % (self.softwarelist_href(softwarelist_info['shortname']), htmlescape(softwarelist_info['description']), htmlescape(pattern))
yield htmltmpl.SOFTWARELIST_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=title,
heading=heading,
shortname=htmlescape(softwarelist_info['shortname']),
total=htmlescape('%d' % (softwarelist_info['total'], )),
supported=htmlescape('%d' % (softwarelist_info['supported'], )),
supportedpc=htmlescape('%.1f' % (softwarelist_info['supported'] * 100.0 / (softwarelist_info['total'] or 1), )),
partiallysupported=htmlescape('%d' % (softwarelist_info['partiallysupported'], )),
partiallysupportedpc=htmlescape('%.1f' % (softwarelist_info['partiallysupported'] * 100.0 / (softwarelist_info['total'] or 1), )),
unsupported=htmlescape('%d' % (softwarelist_info['unsupported'], )),
unsupportedpc=htmlescape('%.1f' % (softwarelist_info['unsupported'] * 100.0 / (softwarelist_info['total'] or 1), ))).encode('utf-8')
if softwarelist_info['notes'] is not None:
yield htmltmpl.SOFTWARELIST_NOTES_PROLOGUE.substitute().encode('utf-8')
first = True
for line in softwarelist_info['notes'].strip().splitlines():
if line:
yield (('<p>%s' if first else '<br />\n%s') % (htmlescape(line), )).encode('utf-8')
first = False
elif not first:
yield '</p>\n'.encode('utf-8')
first = True
if not first:
yield '</p>\n'.encode('utf-8')
yield htmltmpl.SOFTWARELIST_NOTES_EPILOGUE.substitute().encode('utf-8')
first = True
for machine_info in self.dbcurs.get_softwarelist_machines(softwarelist_info['id']):
if first:
yield htmltmpl.SOFTWARELIST_MACHINE_TABLE_HEADER.substitute().encode('utf-8')
first = False
yield htmltmpl.SOFTWARELIST_MACHINE_TABLE_ROW.substitute(
machinehref=self.machine_href(machine_info['shortname']),
shortname=htmlescape(machine_info['shortname']),
description=htmlescape(machine_info['description']),
year=htmlescape(machine_info['year'] or ''),
manufacturer=htmlescape(machine_info['manufacturer'] or ''),
status=htmlescape(machine_info['status'])).encode('utf-8')
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-machines').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-machines"), document.getElementById("tbl-machines"));</script>\n'.encode('utf-8')
first = True
for software_info in self.dbcurs.get_softwarelist_software(softwarelist_info['id'], self.software or None):
if first:
yield htmltmpl.SOFTWARELIST_SOFTWARE_TABLE_HEADER.substitute().encode('utf-8')
first = False
yield self.software_row(software_info)
if first:
yield '<p>No software found.</p>\n'.encode('utf-8')
else:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-software').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-software"), document.getElementById("tbl-software"));</script>\n'.encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def software_page(self, software_info):
yield htmltmpl.SOFTWARE_PROLOGUE.substitute(
assets=htmlescape(urlparse.urljoin(self.application_uri, 'static'), True),
title=htmlescape(software_info['description']),
heading=htmlescape(software_info['description']),
softwarelisthref=self.softwarelist_href(self.shortname),
softwarelistdescription=htmlescape(software_info['softwarelistdescription']),
softwarelist=htmlescape(self.shortname),
shortname=htmlescape(software_info['shortname']),
year=htmlescape(software_info['year']),
publisher=htmlescape(software_info['publisher'])).encode('utf-8')
if software_info['parent'] is not None:
yield (' <tr><th>Parent:</th><td><a href="%s">%s</a></td>\n' % (self.software_href(software_info['parentsoftwarelist'], software_info['parent']), htmlescape(software_info['parentdescription']))).encode('utf-8')
yield (' <tr><th>Supported:</th><td>%s</td>\n' % (self.format_supported(software_info['supported']), )).encode('utf-8')
for name, value in self.dbcurs.get_software_info(software_info['id']):
yield (' <tr><th>%s:</th><td>%s</td>\n' % (htmlescape(name), htmlescape(value))).encode('utf-8')
yield '</table>\n\n'.encode('utf-8')
first = True
for clone_info in self.dbcurs.get_software_clones(software_info['id']):
if first:
yield htmltmpl.SOFTWARE_CLONES_PROLOGUE.substitute().encode('utf-8')
first = False
yield self.clone_row(clone_info)
if not first:
yield htmltmpl.SORTABLE_TABLE_EPILOGUE.substitute(id='tbl-clones').encode('utf-8')
yield '<script>make_collapsible(document.getElementById("heading-clones"), document.getElementById("tbl-clones"));</script>\n'.encode('utf-8')
if software_info['notes'] is not None:
yield htmltmpl.SOFTWARE_NOTES_PROLOGUE.substitute().encode('utf-8')
first = True
for line in software_info['notes'].strip().splitlines():
if line:
yield (('<p>%s' if first else '<br />\n%s') % (htmlescape(line), )).encode('utf-8')
first = False
elif not first:
yield '</p>\n'.encode('utf-8')
first = True
if not first:
yield '</p>\n'.encode('utf-8')
yield htmltmpl.SOFTWARE_NOTES_EPILOGUE.substitute().encode('utf-8')
parts = self.dbcurs.get_software_parts(software_info['id']).fetchall()
first = True
for id, partname, interface, part_id in parts:
if first:
yield htmltmpl.SOFTWARE_PARTS_PROLOGUE.substitute().encode('utf-8')
first = False
yield htmltmpl.SOFTWARE_PART_PROLOGUE.substitute(
heading=htmlescape(('%s (%s)' % (part_id, partname)) if part_id is not None else partname),
shortname=htmlescape(partname),
interface=htmlescape(interface)).encode('utf-8')
for name, value in self.dbcurs.get_softwarepart_features(id):
yield (' <tr><th>%s:</th><td>%s</td>\n' % (htmlescape(name), htmlescape(value))).encode('utf-8')
yield ' </table>\n\n'.encode('utf-8')
if not first:
yield htmltmpl.SOFTWARE_PARTS_EPILOGUE.substitute().encode('utf-8')
yield '</body>\n</html>\n'.encode('utf-8')
def software_row(self, software_info):
parent = software_info['parent']
return htmltmpl.SOFTWARELIST_SOFTWARE_ROW.substitute(
softwarehref=self.software_href(self.shortname, software_info['shortname']),
shortname=htmlescape(software_info['shortname']),
description=htmlescape(software_info['description']),
year=htmlescape(software_info['year']),
publisher=htmlescape(software_info['publisher']),
supported=self.format_supported(software_info['supported']),
parts=htmlescape('%d' % (software_info['parts'], )),
baddumps=htmlescape('%d' % (software_info['baddumps'], )),
parent='<a href="%s">%s</a>' % (self.software_href(software_info['parentsoftwarelist'], parent), htmlescape(parent)) if parent is not None else '').encode('utf-8')
def clone_row(self, clone_info):
return htmltmpl.SOFTWARE_CLONES_ROW.substitute(
href=self.software_href(clone_info['softwarelist'], clone_info['shortname']),
shortname=htmlescape(clone_info['shortname']),
description=htmlescape(clone_info['description']),
year=htmlescape(clone_info['year']),
publisher=htmlescape(clone_info['publisher']),
supported=self.format_supported(clone_info['supported'])).encode('utf-8')
@staticmethod
def format_supported(supported):
return 'Yes' if supported == 0 else 'Partial' if supported == 1 else 'No'
class RomIdentHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(QueryPageHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
self.dbcurs = app.dbconn.cursor()
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
self.start_response('200 OK', [('Content-type', 'text/html; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.form_page()
def form_page(self):
yield htmltmpl.ROMIDENT_PAGE.substitute(
app=self.js_escape(htmlescape(self.application_uri, True)),
assets=self.js_escape(htmlescape(urlparse.urljoin(self.application_uri, 'static'), True))).encode('utf-8')
class BiosRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
result = { }
for name, description, isdefault in self.dbcurs.get_biossets(machine):
result[name] = { 'description': description, 'isdefault': True if isdefault else False }
yield json.dumps(result).encode('utf-8')
class FlagsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.flags_data(machine)).encode('utf-8')
class SlotsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.slot_data(machine)).encode('utf-8')
class SoftwareListsRpcHandler(MachineRpcHandlerBase):
def data_page(self, machine):
yield json.dumps(self.softwarelist_data(machine)).encode('utf-8')
class RomDumpsRpcHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(RomDumpsRpcHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
args = urlparse.parse_qs(self.environ['QUERY_STRING'], keep_blank_values=True, strict_parsing=True)
crc = args.get('crc')
sha1 = args.get('sha1')
if (len(args) == 2) and (crc is not None) and (len(crc) == 1) and (sha1 is not None) and (len(sha1) == 1):
crc = int(crc[0], 16)
sha1 = sha1[0]
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(crc, sha1)
except BaseException as e:
pass
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
def data_page(self, crc, sha1):
machines = { }
for shortname, description, label, bad in self.dbcurs.get_rom_dumps(crc, sha1):
machine = machines.get(shortname)
if machine is None:
machine = { 'description': description, 'matches': [ ] }
machines[shortname] = machine
machine['matches'].append({ 'name': label, 'bad': bool(bad) })
software = { }
for softwarelist, softwarelistdescription, shortname, description, part, part_id, label, bad in self.dbcurs.get_software_rom_dumps(crc, sha1):
listinfo = software.get(softwarelist)
if listinfo is None:
listinfo = { 'description': softwarelistdescription, 'software': { } }
software[softwarelist] = listinfo
softwareinfo = listinfo['software'].get(shortname)
if softwareinfo is None:
softwareinfo = { 'description': description, 'parts': { } }
listinfo['software'][shortname] = softwareinfo
partinfo = softwareinfo['parts'].get(part)
if partinfo is None:
partinfo = { 'matches': [ ] }
if part_id is not None:
partinfo['description'] = part_id
softwareinfo['parts'][part] = partinfo
partinfo['matches'].append({ 'name': label, 'bad': bool(bad) })
result = { 'machines': machines, 'software': software }
yield json.dumps(result).encode('utf-8')
class DiskDumpsRpcHandler(QueryPageHandler):
def __init__(self, app, application_uri, environ, start_response, **kwargs):
super(DiskDumpsRpcHandler, self).__init__(app=app, application_uri=application_uri, environ=environ, start_response=start_response, **kwargs)
def __iter__(self):
if self.environ['PATH_INFO']:
self.start_response('404 %s' % (self.STATUS_MESSAGE[404], ), [('Content-type', 'text/html; charset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(404)
elif self.environ['REQUEST_METHOD'] != 'GET':
self.start_response('405 %s' % (self.STATUS_MESSAGE[405], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(405)
else:
try:
args = urlparse.parse_qs(self.environ['QUERY_STRING'], keep_blank_values=True, strict_parsing=True)
sha1 = args.get('sha1')
if (len(args) == 1) and (sha1 is not None) and (len(sha1) == 1):
sha1 = sha1[0]
self.start_response('200 OK', [('Content-type', 'application/json; chearset=utf-8'), ('Cache-Control', 'public, max-age=3600')])
return self.data_page(sha1)
except BaseException as e:
pass
self.start_response('500 %s' % (self.STATUS_MESSAGE[500], ), [('Content-type', 'text/html; charset=utf-8'), ('Accept', 'GET, HEAD, OPTIONS'), ('Cache-Control', 'public, max-age=3600')])
return self.error_page(500)
def data_page(self, sha1):
machines = { }
for shortname, description, label, bad in self.dbcurs.get_disk_dumps(sha1):
machine = machines.get(shortname)
if machine is None:
machine = { 'description': description, 'matches': [ ] }
machines[shortname] = machine
machine['matches'].append({ 'name': label, 'bad': bool(bad) })
software = { }
for softwarelist, softwarelistdescription, shortname, description, part, part_id, label, bad in self.dbcurs.get_software_disk_dumps(sha1):
listinfo = software.get(softwarelist)
if listinfo is None:
listinfo = { 'description': softwarelistdescription, 'software': { } }
software[softwarelist] = listinfo
softwareinfo = listinfo['software'].get(shortname)
if softwareinfo is None:
softwareinfo = { 'description': description, 'parts': { } }
listinfo['software'][shortname] = softwareinfo
partinfo = softwareinfo['parts'].get(part)
if partinfo is None:
partinfo = { 'matches': [ ] }
if part_id is not None:
partinfo['description'] = part_id
softwareinfo['parts'][part] = partinfo
partinfo['matches'].append({ 'name': label, 'bad': bool(bad) })
result = { 'machines': machines, 'software': software }
yield json.dumps(result).encode('utf-8')
class MiniMawsApp(object):
JS_ESCAPE = re.compile('([\"\'\\\\])')
RPC_SERVICES = {
'bios': BiosRpcHandler,
'flags': FlagsRpcHandler,
'slots': SlotsRpcHandler,
'softwarelists': SoftwareListsRpcHandler,
'romdumps': RomDumpsRpcHandler,
'diskdumps': DiskDumpsRpcHandler }
def __init__(self, dbfile, **kwargs):
super(MiniMawsApp, self).__init__(**kwargs)
self.dbconn = dbaccess.QueryConnection(dbfile)
self.assetsdir = os.path.join(os.path.dirname(inspect.getfile(self.__class__)), 'assets')
if not mimetypes.inited:
mimetypes.init()
def __call__(self, environ, start_response):
application_uri = wsgiref.util.application_uri(environ)
if application_uri[-1] != '/':
application_uri += '/'
module = wsgiref.util.shift_path_info(environ)
if module == 'machine':
return MachineHandler(self, application_uri, environ, start_response)
elif module == 'sourcefile':
return SourceFileHandler(self, application_uri, environ, start_response)
elif module == 'softwarelist':
return SoftwareListHandler(self, application_uri, environ, start_response)
elif module == 'romident':
return RomIdentHandler(self, application_uri, environ, start_response)
elif module == 'static':
return AssetHandler(self.assetsdir, self, application_uri, environ, start_response)
elif module == 'rpc':
service = wsgiref.util.shift_path_info(environ)
if not service:
return ErrorPageHandler(403, self, application_uri, environ, start_response)
elif service in self.RPC_SERVICES:
return self.RPC_SERVICES[service](self, application_uri, environ, start_response)
else:
return ErrorPageHandler(404, self, application_uri, environ, start_response)
elif not module:
return ErrorPageHandler(403, self, application_uri, environ, start_response)
else:
return ErrorPageHandler(404, self, application_uri, environ, start_response)
def js_escape(self, str):
return self.JS_ESCAPE.sub('\\\\\\1', str).replace('\0', '\\0') | en | 0.80182 | #!/usr/bin/python ## ## license:BSD-3-Clause ## copyright-holders:<NAME> # get slot options # if there are any slots, get defaults # remove slots that come from default cards in other slots # get software lists referenced by machine # remove software lists that come from default cards in slots # could probably list machines here or something # make a table of clones # make a table of software lists # allow system BIOS selection # allow RAM size selection # placeholder for machine slots - populated by client-side JavaScript # add disclosure triangle for options if present # list devices referenced by this system/device # list slots where this device is an option # list systems/devices that reference this device | 2.166774 | 2 |
lama_cleaner/ldm/utils.py | gucs/lama-cleaner | 487 | 6612531 | import math
import torch
import numpy as np
def make_beta_schedule(device, schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s).to(device)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2).to(device)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def timestep_embedding(device, timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
| import math
import torch
import numpy as np
def make_beta_schedule(device, schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s).to(device)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2).to(device)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def timestep_embedding(device, timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
| en | 0.74016 | # select alphas for computing the variance schedule # according the the formula provided in https://arxiv.org/abs/2010.02502 # assert ddim_timesteps.shape[0] == num_ddim_timesteps # add one to get the final alpha values right (the ones from first scale to data during sampling) Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. | 2.538543 | 3 |
website_blueprints/public_api.py | CyberSafe-Labs/CyberSearch-Engine | 1 | 6612532 | from flask import Blueprint, render_template
public_api = Blueprint('public_api', __name__)
@public_api.route('/about')
def render_about():
return render_template('pages/about.html')
@public_api.route('/lite')
def render_home_lite():
return render_template('pages/home_lite.html')
@public_api.route('/contact')
def render_contact():
return render_template('pages/contact.html')
@public_api.route('/privacy')
def render_privacy():
return render_template('pages/privacy.html')
@public_api.route('/default')
def render_make_us_your_default():
return render_template('pages/make_us_your_default.html')
@public_api.route('/home_content')
def render_home():
return render_template('pages/home_content.html')
@public_api.route('/advanced_settings')
def render_advanced_settings():
return render_template('pages/advanced_settings.html')
| from flask import Blueprint, render_template
public_api = Blueprint('public_api', __name__)
@public_api.route('/about')
def render_about():
return render_template('pages/about.html')
@public_api.route('/lite')
def render_home_lite():
return render_template('pages/home_lite.html')
@public_api.route('/contact')
def render_contact():
return render_template('pages/contact.html')
@public_api.route('/privacy')
def render_privacy():
return render_template('pages/privacy.html')
@public_api.route('/default')
def render_make_us_your_default():
return render_template('pages/make_us_your_default.html')
@public_api.route('/home_content')
def render_home():
return render_template('pages/home_content.html')
@public_api.route('/advanced_settings')
def render_advanced_settings():
return render_template('pages/advanced_settings.html')
| none | 1 | 2.180071 | 2 | |
runtests.py | atdsaa/django-xml | 34 | 6612533 | #!/usr/bin/env python
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'djxml.tests.settings'
import django
from django.core.management import execute_from_command_line
# Give feedback on used versions
sys.stderr.write('Using Python version %s from %s\n' % (sys.version[:5], sys.executable))
sys.stderr.write('Using Django version %s from %s\n' % (
django.get_version(),
os.path.dirname(os.path.abspath(django.__file__))))
def runtests():
argv = sys.argv[:1] + ['test', 'djxml', '--traceback', '--verbosity=1'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
| #!/usr/bin/env python
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'djxml.tests.settings'
import django
from django.core.management import execute_from_command_line
# Give feedback on used versions
sys.stderr.write('Using Python version %s from %s\n' % (sys.version[:5], sys.executable))
sys.stderr.write('Using Django version %s from %s\n' % (
django.get_version(),
os.path.dirname(os.path.abspath(django.__file__))))
def runtests():
argv = sys.argv[:1] + ['test', 'djxml', '--traceback', '--verbosity=1'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
| en | 0.355768 | #!/usr/bin/env python # Give feedback on used versions | 1.961841 | 2 |
examples/recScope.py | jaymz07/Adafruit_Python_MCP3008 | 0 | 6612534 | <reponame>jaymz07/Adafruit_Python_MCP3008
# Author: <NAME>
# License: Public Domain
# Rudimentary Oscilloscope (if you could call it that).
# Plots Voltage vs time using the matplotlib library.
# Note that matplotlib and associated requirements must be installed
# This is also a test of the acquistion speeds available using this library.
# On my Raspberry Pi, I get ~12K Samples per second.
# This is pretty dismal compared to a 44K samples per second
# sound card.
import time
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import matplotlib.pyplot as plt
import numpy as np
#Constants for voltage estimate
railVoltage = 3.325
adcQuantizations = 2**10 #10 Bit Chip
convFactor = railVoltage/adcQuantizations
# Software SPI configuration:
#CLK = 18
#MISO = 23
#MOSI = 24
#CS = 25
#mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE,max_speed_hz = 3900000))
#Note that we have set the clock speed higher than the stock examples.
#The ADC chip datasheet quotes a 3.9MHz Clock speed if Vdd is set to 5V.
#The closest convenient SPI frequency is 3.9MHz
#Lists keeping track of data coming from ADC as well as time stamps
graph_t, graph_y = [],[]
#Start time
t0 = time.time()
print('Reading MCP3008 values, press Ctrl-C to quit...')
# Main program loop.
try:
while True:
# Read all the ADC channel values in a list.
values = [0]*8
# for i in range(8):
# # The read_adc function will get the value of the specified channel (0-7).
# values[i] = mcp.read_adc(i)
#Note that we are only reading from one channel here (ch 0)
values[0] = mcp.read_adc(0)
values[1] = mcp.read_adc_difference(0)
#Append values to lists for later plotting.
graph_t.append(time.time() - t0)
graph_y.append(values)
# Catch Exception thrown by Crtl-C and plot
except KeyboardInterrupt:
plt.plot(graph_t,np.array(graph_y)[:,0]*convFactor)
plt.plot(graph_t,np.array(graph_y)[:,1]*convFactor)
N = len(graph_t)
strFormat = (N, graph_t[-1]*1000, N/graph_t[-1])
#print sample rate achieved.
print("Collected %d samples in %.2f ms (%.2f samples per sec)" % strFormat )
plt.xlabel('Time (s)')
plt.ylabel('Voltage (V)')
plt.show()
| # Author: <NAME>
# License: Public Domain
# Rudimentary Oscilloscope (if you could call it that).
# Plots Voltage vs time using the matplotlib library.
# Note that matplotlib and associated requirements must be installed
# This is also a test of the acquistion speeds available using this library.
# On my Raspberry Pi, I get ~12K Samples per second.
# This is pretty dismal compared to a 44K samples per second
# sound card.
import time
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import matplotlib.pyplot as plt
import numpy as np
#Constants for voltage estimate
railVoltage = 3.325
adcQuantizations = 2**10 #10 Bit Chip
convFactor = railVoltage/adcQuantizations
# Software SPI configuration:
#CLK = 18
#MISO = 23
#MOSI = 24
#CS = 25
#mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE,max_speed_hz = 3900000))
#Note that we have set the clock speed higher than the stock examples.
#The ADC chip datasheet quotes a 3.9MHz Clock speed if Vdd is set to 5V.
#The closest convenient SPI frequency is 3.9MHz
#Lists keeping track of data coming from ADC as well as time stamps
graph_t, graph_y = [],[]
#Start time
t0 = time.time()
print('Reading MCP3008 values, press Ctrl-C to quit...')
# Main program loop.
try:
while True:
# Read all the ADC channel values in a list.
values = [0]*8
# for i in range(8):
# # The read_adc function will get the value of the specified channel (0-7).
# values[i] = mcp.read_adc(i)
#Note that we are only reading from one channel here (ch 0)
values[0] = mcp.read_adc(0)
values[1] = mcp.read_adc_difference(0)
#Append values to lists for later plotting.
graph_t.append(time.time() - t0)
graph_y.append(values)
# Catch Exception thrown by Crtl-C and plot
except KeyboardInterrupt:
plt.plot(graph_t,np.array(graph_y)[:,0]*convFactor)
plt.plot(graph_t,np.array(graph_y)[:,1]*convFactor)
N = len(graph_t)
strFormat = (N, graph_t[-1]*1000, N/graph_t[-1])
#print sample rate achieved.
print("Collected %d samples in %.2f ms (%.2f samples per sec)" % strFormat )
plt.xlabel('Time (s)')
plt.ylabel('Voltage (V)')
plt.show() | en | 0.828346 | # Author: <NAME> # License: Public Domain # Rudimentary Oscilloscope (if you could call it that). # Plots Voltage vs time using the matplotlib library. # Note that matplotlib and associated requirements must be installed # This is also a test of the acquistion speeds available using this library. # On my Raspberry Pi, I get ~12K Samples per second. # This is pretty dismal compared to a 44K samples per second # sound card. # Import SPI library (for hardware SPI) and MCP3008 library. #Constants for voltage estimate #10 Bit Chip # Software SPI configuration: #CLK = 18 #MISO = 23 #MOSI = 24 #CS = 25 #mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI) # Hardware SPI configuration: #Note that we have set the clock speed higher than the stock examples. #The ADC chip datasheet quotes a 3.9MHz Clock speed if Vdd is set to 5V. #The closest convenient SPI frequency is 3.9MHz #Lists keeping track of data coming from ADC as well as time stamps #Start time # Main program loop. # Read all the ADC channel values in a list. # for i in range(8): # # The read_adc function will get the value of the specified channel (0-7). # values[i] = mcp.read_adc(i) #Note that we are only reading from one channel here (ch 0) #Append values to lists for later plotting. # Catch Exception thrown by Crtl-C and plot #print sample rate achieved. | 2.838551 | 3 |
proteotools/search.py | kevinkovalchik/proteotools | 0 | 6612535 | from proteotools import COMET, TANDEM, MSGF, TPP
from pathlib import Path
from subprocess import Popen, SubprocessError
from proteotools.software import check_for_tandem, check_for_comet, check_for_msgfplus
import proteotools.tpp as tpp
from typing import List
def comet(parameter_file, fasta, mzml_files) -> List[str]:
check_for_comet()
pepxml_results = []
if isinstance(mzml_files, str):
mzml_files = [mzml_files]
for mzml in mzml_files:
name = Path(mzml).stem
name = Path(mzml).parent / (name + '-comet')
command = f'{COMET} -D{fasta} -P{parameter_file} -N{name} {mzml}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running Comet. Inspect the above output.')
Path(f'{name}.pep.xml').rename(name.parent / f'{name}.pepXML')
pepxml_results.append(str(mzml).replace('.mzML', '-comet.pepXML'))
return pepxml_results
def msgfplus(parameter_file, fasta, mzml_files, decoy_prefix: str = 'rev_', convert_to_pepxml: bool = True,
memory: str = '6000M') -> List[str]:
check_for_msgfplus()
pepxml_results = []
if isinstance(mzml_files, str):
mzml_files = [mzml_files]
for mzml in mzml_files:
name = Path(mzml).stem
mzid = Path(mzml).parent / (name + '-msgf_plus.mzid')
command = f'java -Xmx{memory} -jar {MSGF} -conf {parameter_file} -decoy {decoy_prefix} -tda 0 ' \
f'-d {fasta} -o {mzid} -s {mzml}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running MS-GF+. Inspect the above output.')
if convert_to_pepxml:
command = f'singularity exec -B {Path(mzid).parent} {TPP} idconvert {mzid} --pepXML ' \
f'-o {Path(mzid).parent} -e -msgf_plus.pepXML'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running idconvert. Inspect the above output.')
pepxml_results.append(str(mzml).replace('.mzML', '-msgf_plus.pepXML'))
return pepxml_results
def tandem(parameter_file,
fasta,
ms_files,
convert_to_mgf: bool = True,
overwrite_existing_mgf: bool = False) -> List[str]:
check_for_tandem()
output_dir = Path(ms_files[0]).expanduser().parent
if isinstance(ms_files, str):
ms_files = [ms_files]
ms_file_ext = Path(ms_files[0]).suffix
if convert_to_mgf and ms_file_ext not in ['.mfg', '.MGF']:
for ms_file in ms_files:
if (not overwrite_existing_mgf) and Path(ms_file).with_suffix('.mgf').exists():
print(f'MGF version of {ms_file} found. Using: {Path(ms_file).with_suffix(".mgf")}')
continue
print(f'Converting {ms_file} to MGF format')
tpp.run_tool('msconvert', f'-o {Path(ms_file).parent} --mgf {ms_file}', Path(ms_file).parent)
ms_files = [str(Path(x).with_suffix('.mgf')) for x in ms_files]
# we don't convert the tandem xml files to pepxml here. the output doesn't seem to be compatible with TPP tools
command = f'runtandem -i {parameter_file} -db {fasta} --noconvert --overwrite -o {output_dir} --tandem.exe {TANDEM} -v 3 ' \
f'{" ".join(ms_files)}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running X! Tandem. Inspect the above output.')
pepxml_results = []
# convert to pepXML using Tandem2XML
for ms_file in ms_files:
txml = Path(ms_file).with_suffix('.t.xml') # get the name of the tandem XML file
t_pepxml = str(txml).replace('.t.xml', '-tandem.pepXML') # this is the name for the pepXML file we will create
bind_point = Path(ms_file).parent
tpp.run_tool('Tandem2XML',
f'{txml} {t_pepxml}',
path_to_bind=bind_point)
pepxml_results.append(t_pepxml)
return pepxml_results
def run_all_with_defaults(comet_parameters,
msgfplus_parameters,
tandem_parameters,
fasta,
mzml_files) -> List[str]:
pepxml_files = comet(parameter_file=comet_parameters,
fasta=fasta,
mzml_files=mzml_files)
pepxml_files += msgfplus(parameter_file=msgfplus_parameters,
fasta=fasta,
mzml_files=mzml_files)
pepxml_files += tandem(parameter_file=tandem_parameters,
fasta=fasta,
ms_files=mzml_files)
return pepxml_files
| from proteotools import COMET, TANDEM, MSGF, TPP
from pathlib import Path
from subprocess import Popen, SubprocessError
from proteotools.software import check_for_tandem, check_for_comet, check_for_msgfplus
import proteotools.tpp as tpp
from typing import List
def comet(parameter_file, fasta, mzml_files) -> List[str]:
check_for_comet()
pepxml_results = []
if isinstance(mzml_files, str):
mzml_files = [mzml_files]
for mzml in mzml_files:
name = Path(mzml).stem
name = Path(mzml).parent / (name + '-comet')
command = f'{COMET} -D{fasta} -P{parameter_file} -N{name} {mzml}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running Comet. Inspect the above output.')
Path(f'{name}.pep.xml').rename(name.parent / f'{name}.pepXML')
pepxml_results.append(str(mzml).replace('.mzML', '-comet.pepXML'))
return pepxml_results
def msgfplus(parameter_file, fasta, mzml_files, decoy_prefix: str = 'rev_', convert_to_pepxml: bool = True,
memory: str = '6000M') -> List[str]:
check_for_msgfplus()
pepxml_results = []
if isinstance(mzml_files, str):
mzml_files = [mzml_files]
for mzml in mzml_files:
name = Path(mzml).stem
mzid = Path(mzml).parent / (name + '-msgf_plus.mzid')
command = f'java -Xmx{memory} -jar {MSGF} -conf {parameter_file} -decoy {decoy_prefix} -tda 0 ' \
f'-d {fasta} -o {mzid} -s {mzml}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running MS-GF+. Inspect the above output.')
if convert_to_pepxml:
command = f'singularity exec -B {Path(mzid).parent} {TPP} idconvert {mzid} --pepXML ' \
f'-o {Path(mzid).parent} -e -msgf_plus.pepXML'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running idconvert. Inspect the above output.')
pepxml_results.append(str(mzml).replace('.mzML', '-msgf_plus.pepXML'))
return pepxml_results
def tandem(parameter_file,
fasta,
ms_files,
convert_to_mgf: bool = True,
overwrite_existing_mgf: bool = False) -> List[str]:
check_for_tandem()
output_dir = Path(ms_files[0]).expanduser().parent
if isinstance(ms_files, str):
ms_files = [ms_files]
ms_file_ext = Path(ms_files[0]).suffix
if convert_to_mgf and ms_file_ext not in ['.mfg', '.MGF']:
for ms_file in ms_files:
if (not overwrite_existing_mgf) and Path(ms_file).with_suffix('.mgf').exists():
print(f'MGF version of {ms_file} found. Using: {Path(ms_file).with_suffix(".mgf")}')
continue
print(f'Converting {ms_file} to MGF format')
tpp.run_tool('msconvert', f'-o {Path(ms_file).parent} --mgf {ms_file}', Path(ms_file).parent)
ms_files = [str(Path(x).with_suffix('.mgf')) for x in ms_files]
# we don't convert the tandem xml files to pepxml here. the output doesn't seem to be compatible with TPP tools
command = f'runtandem -i {parameter_file} -db {fasta} --noconvert --overwrite -o {output_dir} --tandem.exe {TANDEM} -v 3 ' \
f'{" ".join(ms_files)}'.split()
p = Popen(command)
_ = p.communicate()
if p.returncode != 0:
raise SubprocessError('Something went wrong while running X! Tandem. Inspect the above output.')
pepxml_results = []
# convert to pepXML using Tandem2XML
for ms_file in ms_files:
txml = Path(ms_file).with_suffix('.t.xml') # get the name of the tandem XML file
t_pepxml = str(txml).replace('.t.xml', '-tandem.pepXML') # this is the name for the pepXML file we will create
bind_point = Path(ms_file).parent
tpp.run_tool('Tandem2XML',
f'{txml} {t_pepxml}',
path_to_bind=bind_point)
pepxml_results.append(t_pepxml)
return pepxml_results
def run_all_with_defaults(comet_parameters,
msgfplus_parameters,
tandem_parameters,
fasta,
mzml_files) -> List[str]:
pepxml_files = comet(parameter_file=comet_parameters,
fasta=fasta,
mzml_files=mzml_files)
pepxml_files += msgfplus(parameter_file=msgfplus_parameters,
fasta=fasta,
mzml_files=mzml_files)
pepxml_files += tandem(parameter_file=tandem_parameters,
fasta=fasta,
ms_files=mzml_files)
return pepxml_files
| en | 0.86291 | # we don't convert the tandem xml files to pepxml here. the output doesn't seem to be compatible with TPP tools # convert to pepXML using Tandem2XML # get the name of the tandem XML file # this is the name for the pepXML file we will create | 2.225011 | 2 |
mundo2/ex066.py | Igor3550/Exercicios-de-python | 0 | 6612536 | # crie um programa que leia vários numeros inteiros pelo teclado
# O programa só vai parar quando o usuário digitar o valor 999
# No final mostre quantos números foram digitados e qual foi a soma entre eles
soma = cont = 0
while True:
n = int(input('Digite um valor (999 para terminar): '))
if n == 999:
break
soma += n
cont += 1
print(f'Você digitou {cont} e a soma entre eles foi {soma}')
| # crie um programa que leia vários numeros inteiros pelo teclado
# O programa só vai parar quando o usuário digitar o valor 999
# No final mostre quantos números foram digitados e qual foi a soma entre eles
soma = cont = 0
while True:
n = int(input('Digite um valor (999 para terminar): '))
if n == 999:
break
soma += n
cont += 1
print(f'Você digitou {cont} e a soma entre eles foi {soma}')
| pt | 0.979559 | # crie um programa que leia vários numeros inteiros pelo teclado # O programa só vai parar quando o usuário digitar o valor 999 # No final mostre quantos números foram digitados e qual foi a soma entre eles | 3.903024 | 4 |
mastml/data_cleaning.py | Avery2/MAST-ML | 64 | 6612537 | """
This module provides various methods for cleaning data that has been imported into MAST-ML, prior to model fitting.
DataCleaning:
Class that enables easy use of various data cleaning methods, such as removal of missing values, different
modes of data imputation, or using principal componenet analysis to fill interpolate missing values.
DataUtilities:
Support class used to evaluate some basic statistics of imported data, such as its distribution, mean, etc.
Also provides a means of flagging potential outlier datapoints based on their deviation from the overall data
distribution.
PPCA:
Class used by the PCA data cleaning routine in the DataCleaning class to perform probabilistic PCA to fill in
missing data.
"""
import os
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from scipy.linalg import orth
from collections import Counter
from datetime import datetime
from mastml.plots import Histogram
class DataCleaning():
"""
Class to perform various data cleaning operations, such as imputation or NaN removal
Args:
None
Methods:
remove: Method that removes a full column or row of data values if one column or row contains NaN or is blank
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
axis: (int), whether to remove rows (axis=0) or columns (axis=1)
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
imputation: Method that imputes values to the missing places based on the median, mean, etc. of the data in the column
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
strategy: (str), method of imputation, e.g. median, mean, etc.
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
ppca: Method that imputes data using principal component analysis to interpolate missing values
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
evaluate: Main method to evaluate initial data analysis routines (e.g. flag outliers), perform data cleaning and save output to folder
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
method: (str), data cleaning method name, must be one of 'remove', 'imputation' or 'ppca'
savepath: (str), string containing the savepath information
kwargs: additional keyword arguments needed for the remove, imputation or ppca methods
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
_setup_savedir: method to create a savedir based on the provided model, splitter, selector names and datetime
Args:
savepath: (str), string designating the savepath
Returns:
splitdir: (str), string containing the new subdirectory to save results to
"""
def __init__(self):
pass
def remove(self, X, y, axis):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
df = df.dropna(axis=axis, how='any')
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def imputation(self, X, y, strategy):
df = pd.concat([X, y], axis=1)
columns = df.columns.tolist()
df = pd.DataFrame(SimpleImputer(missing_values=np.nan, strategy=strategy).fit_transform(df), columns=columns)
try:
target = y.name
except:
target = y.columns.tolist()[0]
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def ppca(self, X, y):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
columns = df.columns.tolist()
pca_magic = PPCA()
pca_magic.fit(np.array(df))
# Need to un-standardize the pca-transformed data
df = pd.DataFrame(pca_magic.data*pca_magic.stds+pca_magic.means, columns=columns)
y = df[target]
X = df[[col for col in columns if col != target]]
return X, y
def evaluate(self, X, y, method, savepath=None, make_new_dir=True, **kwargs):
if not savepath:
savepath = os.getcwd()
if make_new_dir is True:
splitdir = self._setup_savedir(savepath=savepath)
savepath = splitdir
self.splitdir = splitdir
DataUtilities().flag_columns_with_strings(X=X, y=y, savepath=savepath)
DataUtilities().flag_outliers(X=X, y=y, savepath=savepath, n_stdevs=3)
df_orig = pd.concat([X, y], axis=1)
self.cleaner = getattr(self, method)
X, y = self.cleaner(X, y, **kwargs)
df_cleaned = pd.concat([X, y], axis=1)
df_orig.to_excel(os.path.join(savepath, 'data_original.xlsx'), index=False)
df_cleaned.to_excel(os.path.join(savepath, 'data_cleaned.xlsx'), index=False)
# Make histogram of the input data
Histogram.plot_histogram(df=y, file_name='histogram_target_values', savepath=savepath, x_label='Target values')
return X, y
def _setup_savedir(self, savepath):
now = datetime.now()
dirname = self.__class__.__name__
dirname = f"{dirname}_{now.month:02d}_{now.day:02d}" \
f"_{now.hour:02d}_{now.minute:02d}_{now.second:02d}"
if savepath == None:
splitdir = os.getcwd()
else:
splitdir = os.path.join(savepath, dirname)
if not os.path.exists(splitdir):
os.mkdir(splitdir)
return splitdir
class DataUtilities():
"""
Class that contains some basic data analysis utilities, such as flagging columns that contain problematic string
entries, or flagging potential outlier values based on threshold values
Args:
None
Methods:
flag_outliers: Method that scans values in each X feature matrix column and flags values that are larger than X standard deviations from the average of that column value. The index and column values of potentially problematic points are listed and written to an output file.
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
n_stdevs: (int), number of standard deviations to use as threshold value
Returns:
None
flag_columns_with_strings: Method that ascertains which columns in data contain string entries
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
Returns:
None
"""
@classmethod
def flag_outliers(cls, X, y, savepath, n_stdevs=3):
df = pd.concat([X, y], axis=1)
n_rows = df.shape[0]
outlier_dict = dict()
outlier_rows_all = list()
for col in df.columns:
outlier_rows = list()
outlier_vals = list()
avg = np.average(df[col])
stdev = np.std(df[col])
for row in range(n_rows):
if df[col].iloc[row] > avg + n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
elif df[col].iloc[row] < avg - n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
else:
pass
outlier_dict[col] = (outlier_rows, outlier_vals)
outlier_rows_all.append(outlier_rows)
# Save data to file
pd.DataFrame().from_dict(data=outlier_dict, orient='index',
columns=['Indices', 'Values']).to_excel(os.path.join(savepath, 'data_outliers_all.xlsx'))
# Also get values of rows that occur most often
outlier_rows_all = np.concatenate(outlier_rows_all).ravel()
outlier_counts = Counter(outlier_rows_all)
# Save summary data of outlier counts to file
pd.DataFrame().from_dict(data=outlier_counts, orient='index',
columns=['Number of occurrences']).to_excel(os.path.join(savepath, 'data_outliers_summary.xlsx'))
return
@classmethod
def flag_columns_with_strings(cls, X, y, savepath):
df = pd.concat([X, y], axis=1)
str_summary = pd.DataFrame(df.applymap(type).eq(str).any())
str_columns = str_summary.index[str_summary[0] == True].tolist()
d = {'columns with strings': str_columns}
pd.DataFrame().from_dict(data=d).to_excel(os.path.join(savepath, 'data_columns_with_strings.xlsx'))
return
class PPCA():
"""
Class to perform probabilistic principal component analysis (PPCA) to fill in missing data.
This PPCA routine was taken directly from https://github.com/allentran/pca-magic. Due to import errors, for ease of use
we have elected to copy the module here. This github repo was last accessed on 8/27/18. The code comprising the PPCA
class below was not developed by and is not owned by the University of Wisconsin-Madison MAST-ML development team.
"""
def __init__(self):
self.raw = None
self.data = None
self.C = None
self.means = None
self.stds = None
self.eig_vals = None
def _standardize(self, X):
if self.means is None or self.stds is None:
raise RuntimeError("Fit model first")
return (X - self.means) / self.stds
def fit(self, data, d=None, tol=1e-4, min_obs=10, verbose=False):
self.raw = data
self.raw[np.isinf(self.raw)] = np.max(self.raw[np.isfinite(self.raw)])
valid_series = np.sum(~np.isnan(self.raw), axis=0) >= min_obs
data = self.raw[:, valid_series].copy()
N = data.shape[0]
D = data.shape[1]
self.means = np.nanmean(data, axis=0)
self.stds = np.nanstd(data, axis=0)
data = self._standardize(data)
observed = ~np.isnan(data)
missing = np.sum(~observed)
data[~observed] = 0
# initial
if d is None:
d = data.shape[1]
if self.C is None:
C = np.random.randn(D, d)
else:
C = self.C
CC = np.dot(C.T, C)
X = np.dot(np.dot(data, C), np.linalg.inv(CC))
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = np.sum((recon - data) ** 2) / (N * D - missing)
v0 = np.inf
counter = 0
while True:
Sx = np.linalg.inv(np.eye(d) + CC / ss)
# e-step
ss0 = ss
if missing > 0:
proj = np.dot(X, C.T)
data[~observed] = proj[~observed]
X = np.dot(np.dot(data, C), Sx) / ss
# m-step
XX = np.dot(X.T, X)
C = np.dot(np.dot(data.T, X), np.linalg.pinv(XX + N * Sx))
CC = np.dot(C.T, C)
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = (np.sum((recon - data) ** 2) + N * np.sum(CC * Sx) + missing * ss0) / (N * D)
# calc diff for convergence
det = np.log(np.linalg.det(Sx))
if np.isinf(det):
det = abs(np.linalg.slogdet(Sx)[1])
v1 = N * (D * np.log(ss) + np.trace(Sx) - det) \
+ np.trace(XX) - missing * np.log(ss0)
diff = abs(v1 / v0 - 1)
if verbose:
print(diff)
if (diff < tol) and (counter > 5):
break
counter += 1
v0 = v1
C = orth(C)
vals, vecs = np.linalg.eig(np.cov(np.dot(data, C).T))
order = np.flipud(np.argsort(vals))
vecs = vecs[:, order]
vals = vals[order]
C = np.dot(C, vecs)
# attach objects to class
self.C = C
self.data = data
self.eig_vals = vals
self._calc_var()
def transform(self, data=None):
if self.C is None:
raise RuntimeError('Fit the data model first.')
if data is None:
return np.dot(self.data, self.C)
return np.dot(data, self.C)
def _calc_var(self):
if self.data is None:
raise RuntimeError('Fit the data model first.')
data = self.data.T
# variance calc
var = np.nanvar(data, axis=1)
total_var = var.sum()
self.var_exp = self.eig_vals.cumsum() / total_var
def save(self, fpath):
np.save(fpath, self.C)
def load(self, fpath):
assert os.path.isfile(fpath)
self.C = np.load(fpath) | """
This module provides various methods for cleaning data that has been imported into MAST-ML, prior to model fitting.
DataCleaning:
Class that enables easy use of various data cleaning methods, such as removal of missing values, different
modes of data imputation, or using principal componenet analysis to fill interpolate missing values.
DataUtilities:
Support class used to evaluate some basic statistics of imported data, such as its distribution, mean, etc.
Also provides a means of flagging potential outlier datapoints based on their deviation from the overall data
distribution.
PPCA:
Class used by the PCA data cleaning routine in the DataCleaning class to perform probabilistic PCA to fill in
missing data.
"""
import os
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from scipy.linalg import orth
from collections import Counter
from datetime import datetime
from mastml.plots import Histogram
class DataCleaning():
"""
Class to perform various data cleaning operations, such as imputation or NaN removal
Args:
None
Methods:
remove: Method that removes a full column or row of data values if one column or row contains NaN or is blank
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
axis: (int), whether to remove rows (axis=0) or columns (axis=1)
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
imputation: Method that imputes values to the missing places based on the median, mean, etc. of the data in the column
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
strategy: (str), method of imputation, e.g. median, mean, etc.
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
ppca: Method that imputes data using principal component analysis to interpolate missing values
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
evaluate: Main method to evaluate initial data analysis routines (e.g. flag outliers), perform data cleaning and save output to folder
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
method: (str), data cleaning method name, must be one of 'remove', 'imputation' or 'ppca'
savepath: (str), string containing the savepath information
kwargs: additional keyword arguments needed for the remove, imputation or ppca methods
Returns:
X: (pd.DataFrame): dataframe of cleaned X data
y: (pd.Series): series of cleaned y data
_setup_savedir: method to create a savedir based on the provided model, splitter, selector names and datetime
Args:
savepath: (str), string designating the savepath
Returns:
splitdir: (str), string containing the new subdirectory to save results to
"""
def __init__(self):
pass
def remove(self, X, y, axis):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
df = df.dropna(axis=axis, how='any')
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def imputation(self, X, y, strategy):
df = pd.concat([X, y], axis=1)
columns = df.columns.tolist()
df = pd.DataFrame(SimpleImputer(missing_values=np.nan, strategy=strategy).fit_transform(df), columns=columns)
try:
target = y.name
except:
target = y.columns.tolist()[0]
y = df[target]
X = df[[col for col in df.columns if col != target]]
return X, y
def ppca(self, X, y):
df = pd.concat([X, y], axis=1)
try:
target = y.name
except:
target = y.columns.tolist()[0]
columns = df.columns.tolist()
pca_magic = PPCA()
pca_magic.fit(np.array(df))
# Need to un-standardize the pca-transformed data
df = pd.DataFrame(pca_magic.data*pca_magic.stds+pca_magic.means, columns=columns)
y = df[target]
X = df[[col for col in columns if col != target]]
return X, y
def evaluate(self, X, y, method, savepath=None, make_new_dir=True, **kwargs):
if not savepath:
savepath = os.getcwd()
if make_new_dir is True:
splitdir = self._setup_savedir(savepath=savepath)
savepath = splitdir
self.splitdir = splitdir
DataUtilities().flag_columns_with_strings(X=X, y=y, savepath=savepath)
DataUtilities().flag_outliers(X=X, y=y, savepath=savepath, n_stdevs=3)
df_orig = pd.concat([X, y], axis=1)
self.cleaner = getattr(self, method)
X, y = self.cleaner(X, y, **kwargs)
df_cleaned = pd.concat([X, y], axis=1)
df_orig.to_excel(os.path.join(savepath, 'data_original.xlsx'), index=False)
df_cleaned.to_excel(os.path.join(savepath, 'data_cleaned.xlsx'), index=False)
# Make histogram of the input data
Histogram.plot_histogram(df=y, file_name='histogram_target_values', savepath=savepath, x_label='Target values')
return X, y
def _setup_savedir(self, savepath):
now = datetime.now()
dirname = self.__class__.__name__
dirname = f"{dirname}_{now.month:02d}_{now.day:02d}" \
f"_{now.hour:02d}_{now.minute:02d}_{now.second:02d}"
if savepath == None:
splitdir = os.getcwd()
else:
splitdir = os.path.join(savepath, dirname)
if not os.path.exists(splitdir):
os.mkdir(splitdir)
return splitdir
class DataUtilities():
"""
Class that contains some basic data analysis utilities, such as flagging columns that contain problematic string
entries, or flagging potential outlier values based on threshold values
Args:
None
Methods:
flag_outliers: Method that scans values in each X feature matrix column and flags values that are larger than X standard deviations from the average of that column value. The index and column values of potentially problematic points are listed and written to an output file.
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
n_stdevs: (int), number of standard deviations to use as threshold value
Returns:
None
flag_columns_with_strings: Method that ascertains which columns in data contain string entries
Args:
X: (pd.DataFrame), dataframe containing X data
y: (pd.Series), series containing y data
savepath: (str), string containing the save path directory
Returns:
None
"""
@classmethod
def flag_outliers(cls, X, y, savepath, n_stdevs=3):
df = pd.concat([X, y], axis=1)
n_rows = df.shape[0]
outlier_dict = dict()
outlier_rows_all = list()
for col in df.columns:
outlier_rows = list()
outlier_vals = list()
avg = np.average(df[col])
stdev = np.std(df[col])
for row in range(n_rows):
if df[col].iloc[row] > avg + n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
elif df[col].iloc[row] < avg - n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
else:
pass
outlier_dict[col] = (outlier_rows, outlier_vals)
outlier_rows_all.append(outlier_rows)
# Save data to file
pd.DataFrame().from_dict(data=outlier_dict, orient='index',
columns=['Indices', 'Values']).to_excel(os.path.join(savepath, 'data_outliers_all.xlsx'))
# Also get values of rows that occur most often
outlier_rows_all = np.concatenate(outlier_rows_all).ravel()
outlier_counts = Counter(outlier_rows_all)
# Save summary data of outlier counts to file
pd.DataFrame().from_dict(data=outlier_counts, orient='index',
columns=['Number of occurrences']).to_excel(os.path.join(savepath, 'data_outliers_summary.xlsx'))
return
@classmethod
def flag_columns_with_strings(cls, X, y, savepath):
df = pd.concat([X, y], axis=1)
str_summary = pd.DataFrame(df.applymap(type).eq(str).any())
str_columns = str_summary.index[str_summary[0] == True].tolist()
d = {'columns with strings': str_columns}
pd.DataFrame().from_dict(data=d).to_excel(os.path.join(savepath, 'data_columns_with_strings.xlsx'))
return
class PPCA():
"""
Class to perform probabilistic principal component analysis (PPCA) to fill in missing data.
This PPCA routine was taken directly from https://github.com/allentran/pca-magic. Due to import errors, for ease of use
we have elected to copy the module here. This github repo was last accessed on 8/27/18. The code comprising the PPCA
class below was not developed by and is not owned by the University of Wisconsin-Madison MAST-ML development team.
"""
def __init__(self):
self.raw = None
self.data = None
self.C = None
self.means = None
self.stds = None
self.eig_vals = None
def _standardize(self, X):
if self.means is None or self.stds is None:
raise RuntimeError("Fit model first")
return (X - self.means) / self.stds
def fit(self, data, d=None, tol=1e-4, min_obs=10, verbose=False):
self.raw = data
self.raw[np.isinf(self.raw)] = np.max(self.raw[np.isfinite(self.raw)])
valid_series = np.sum(~np.isnan(self.raw), axis=0) >= min_obs
data = self.raw[:, valid_series].copy()
N = data.shape[0]
D = data.shape[1]
self.means = np.nanmean(data, axis=0)
self.stds = np.nanstd(data, axis=0)
data = self._standardize(data)
observed = ~np.isnan(data)
missing = np.sum(~observed)
data[~observed] = 0
# initial
if d is None:
d = data.shape[1]
if self.C is None:
C = np.random.randn(D, d)
else:
C = self.C
CC = np.dot(C.T, C)
X = np.dot(np.dot(data, C), np.linalg.inv(CC))
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = np.sum((recon - data) ** 2) / (N * D - missing)
v0 = np.inf
counter = 0
while True:
Sx = np.linalg.inv(np.eye(d) + CC / ss)
# e-step
ss0 = ss
if missing > 0:
proj = np.dot(X, C.T)
data[~observed] = proj[~observed]
X = np.dot(np.dot(data, C), Sx) / ss
# m-step
XX = np.dot(X.T, X)
C = np.dot(np.dot(data.T, X), np.linalg.pinv(XX + N * Sx))
CC = np.dot(C.T, C)
recon = np.dot(X, C.T)
recon[~observed] = 0
ss = (np.sum((recon - data) ** 2) + N * np.sum(CC * Sx) + missing * ss0) / (N * D)
# calc diff for convergence
det = np.log(np.linalg.det(Sx))
if np.isinf(det):
det = abs(np.linalg.slogdet(Sx)[1])
v1 = N * (D * np.log(ss) + np.trace(Sx) - det) \
+ np.trace(XX) - missing * np.log(ss0)
diff = abs(v1 / v0 - 1)
if verbose:
print(diff)
if (diff < tol) and (counter > 5):
break
counter += 1
v0 = v1
C = orth(C)
vals, vecs = np.linalg.eig(np.cov(np.dot(data, C).T))
order = np.flipud(np.argsort(vals))
vecs = vecs[:, order]
vals = vals[order]
C = np.dot(C, vecs)
# attach objects to class
self.C = C
self.data = data
self.eig_vals = vals
self._calc_var()
def transform(self, data=None):
if self.C is None:
raise RuntimeError('Fit the data model first.')
if data is None:
return np.dot(self.data, self.C)
return np.dot(data, self.C)
def _calc_var(self):
if self.data is None:
raise RuntimeError('Fit the data model first.')
data = self.data.T
# variance calc
var = np.nanvar(data, axis=1)
total_var = var.sum()
self.var_exp = self.eig_vals.cumsum() / total_var
def save(self, fpath):
np.save(fpath, self.C)
def load(self, fpath):
assert os.path.isfile(fpath)
self.C = np.load(fpath) | en | 0.806113 | This module provides various methods for cleaning data that has been imported into MAST-ML, prior to model fitting. DataCleaning: Class that enables easy use of various data cleaning methods, such as removal of missing values, different modes of data imputation, or using principal componenet analysis to fill interpolate missing values. DataUtilities: Support class used to evaluate some basic statistics of imported data, such as its distribution, mean, etc. Also provides a means of flagging potential outlier datapoints based on their deviation from the overall data distribution. PPCA: Class used by the PCA data cleaning routine in the DataCleaning class to perform probabilistic PCA to fill in missing data. Class to perform various data cleaning operations, such as imputation or NaN removal Args: None Methods: remove: Method that removes a full column or row of data values if one column or row contains NaN or is blank Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data axis: (int), whether to remove rows (axis=0) or columns (axis=1) Returns: X: (pd.DataFrame): dataframe of cleaned X data y: (pd.Series): series of cleaned y data imputation: Method that imputes values to the missing places based on the median, mean, etc. of the data in the column Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data strategy: (str), method of imputation, e.g. median, mean, etc. Returns: X: (pd.DataFrame): dataframe of cleaned X data y: (pd.Series): series of cleaned y data ppca: Method that imputes data using principal component analysis to interpolate missing values Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data Returns: X: (pd.DataFrame): dataframe of cleaned X data y: (pd.Series): series of cleaned y data evaluate: Main method to evaluate initial data analysis routines (e.g. flag outliers), perform data cleaning and save output to folder Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data method: (str), data cleaning method name, must be one of 'remove', 'imputation' or 'ppca' savepath: (str), string containing the savepath information kwargs: additional keyword arguments needed for the remove, imputation or ppca methods Returns: X: (pd.DataFrame): dataframe of cleaned X data y: (pd.Series): series of cleaned y data _setup_savedir: method to create a savedir based on the provided model, splitter, selector names and datetime Args: savepath: (str), string designating the savepath Returns: splitdir: (str), string containing the new subdirectory to save results to # Need to un-standardize the pca-transformed data # Make histogram of the input data Class that contains some basic data analysis utilities, such as flagging columns that contain problematic string entries, or flagging potential outlier values based on threshold values Args: None Methods: flag_outliers: Method that scans values in each X feature matrix column and flags values that are larger than X standard deviations from the average of that column value. The index and column values of potentially problematic points are listed and written to an output file. Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data savepath: (str), string containing the save path directory n_stdevs: (int), number of standard deviations to use as threshold value Returns: None flag_columns_with_strings: Method that ascertains which columns in data contain string entries Args: X: (pd.DataFrame), dataframe containing X data y: (pd.Series), series containing y data savepath: (str), string containing the save path directory Returns: None # Save data to file # Also get values of rows that occur most often # Save summary data of outlier counts to file Class to perform probabilistic principal component analysis (PPCA) to fill in missing data. This PPCA routine was taken directly from https://github.com/allentran/pca-magic. Due to import errors, for ease of use we have elected to copy the module here. This github repo was last accessed on 8/27/18. The code comprising the PPCA class below was not developed by and is not owned by the University of Wisconsin-Madison MAST-ML development team. # initial # e-step # m-step # calc diff for convergence # attach objects to class # variance calc | 3.094635 | 3 |
Exercises/number-name.py | shoriwe-upb/TallerEjercicios | 0 | 6612538 | def main():
ref = {"0": "cero", "1": "uno", "2": "dos", "3": "tres", "4": "cuatro", "5": "cinco",
"6": "seis", "7": "siete", "8": "ocho", "9": "nueve", "10": "diez"}
number = input("Number: ")
print(ref[number])
if __name__ == '__main__':
main()
| def main():
ref = {"0": "cero", "1": "uno", "2": "dos", "3": "tres", "4": "cuatro", "5": "cinco",
"6": "seis", "7": "siete", "8": "ocho", "9": "nueve", "10": "diez"}
number = input("Number: ")
print(ref[number])
if __name__ == '__main__':
main()
| none | 1 | 3.510106 | 4 | |
tests/sdk-pyhmy/test_staking.py | johnashu/pyhmy | 37 | 6612539 | <gh_stars>10-100
import pytest
import requests
from pyhmy import (
staking
)
from pyhmy.rpc import (
exceptions
)
explorer_endpoint = 'http://localhost:9599'
test_validator_address = 'one18tvf56zqjkjnak686lwutcp5mqfnvee35xjnhc'
fake_shard = 'http://example.com'
def _test_staking_rpc(fn, *args, **kwargs):
if not callable(fn):
pytest.fail(f'Invalid function: {fn}')
try:
response = fn(*args, **kwargs)
except Exception as e:
if isinstance(e, exceptions.RPCError) and 'does not exist/is not available' in str(e):
pytest.skip(f'{str(e)}')
pytest.fail(f'Unexpected error: {e.__class__} {e}')
return response
@pytest.mark.run(order=1)
def test_get_all_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_all_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
assert test_validator_address in validator_addresses
@pytest.mark.run(order=2)
def test_get_validator_information(setup_blockchain):
info = _test_staking_rpc(staking.get_validator_information, test_validator_address)
assert isinstance(info, dict)
@pytest.mark.run(order=3)
def test_get_all_validator_information(setup_blockchain):
all_validator_information = _test_staking_rpc(staking.get_all_validator_information)
assert isinstance(all_validator_information, list)
assert len(all_validator_information) > 0
@pytest.mark.run(order=4)
def test_get_delegations_by_delegator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=5)
def test_get_delegations_by_validator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_validator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=6)
def test_get_current_utility_metrics(setup_blockchain):
metrics = _test_staking_rpc(staking.get_current_utility_metrics)
assert isinstance(metrics, dict)
@pytest.mark.run(order=7)
def test_get_staking_network_info(setup_blockchain):
info = _test_staking_rpc(staking.get_staking_network_info)
assert isinstance(info, dict)
@pytest.mark.run(order=8)
def test_get_super_committees(setup_blockchain):
committee = _test_staking_rpc(staking.get_super_committees)
assert isinstance(committee, dict)
@pytest.mark.run(order=9)
def test_get_raw_median_stake_snapshot(setup_blockchain):
median_stake = _test_staking_rpc(staking.get_raw_median_stake_snapshot)
assert isinstance(median_stake, dict)
@pytest.mark.run(order=10)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_validator_information_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, dict)
@pytest.mark.run(order=11)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_all_validator_information_by_block_number, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, list)
@pytest.mark.run(order=12)
def test_get_delegations_by_delegator_by_block(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(delegations, list)
@pytest.mark.run(order=13)
def test_get_elected_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_elected_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
@pytest.mark.run(order=14)
def test_get_validators(setup_blockchain):
validators = _test_staking_rpc(staking.get_validators, 2)
assert isinstance(validators, dict)
assert len(validators['validators']) > 0
@pytest.mark.run(order=15)
def test_get_validator_keys(setup_blockchain):
validators = _test_staking_rpc(staking.get_validator_keys, 2)
assert isinstance(validators, list)
@pytest.mark.run(order=16)
def test_get_validator_self_delegation(setup_blockchain):
self_delegation = _test_staking_rpc(staking.get_validator_self_delegation, test_validator_address)
assert isinstance(self_delegation, int)
assert self_delegation > 0
@pytest.mark.run(order=17)
def test_get_validator_total_delegation(setup_blockchain):
total_delegation = _test_staking_rpc(staking.get_validator_total_delegation, test_validator_address)
assert isinstance(total_delegation, int)
assert total_delegation > 0
@pytest.mark.run(order=18)
def test_get_all_delegation_information(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_all_delegation_information, 0)
assert isinstance(delegation_information, list)
assert len(delegation_information) > 0
@pytest.mark.run(order=19)
def test_get_delegation_by_delegator_and_validator(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_delegation_by_delegator_and_validator, test_validator_address, test_validator_address)
assert isinstance(delegation_information, dict)
@pytest.mark.run(order=20)
def test_get_available_redelegation_balance(setup_blockchain):
redelgation_balance = _test_staking_rpc(staking.get_available_redelegation_balance, test_validator_address)
assert isinstance(redelgation_balance, int)
assert redelgation_balance == 0
@pytest.mark.run(order=21)
def test_get_total_staking(setup_blockchain):
total_staking = _test_staking_rpc(staking.get_total_staking)
assert isinstance(total_staking, int)
assert total_staking > 0
@pytest.mark.run(order=22)
def test_errors():
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_elected_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validators(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_keys(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information(-1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_self_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_total_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information_by_block_number(1, 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_delegation_information(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegation_by_delegator_and_validator('', '', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_available_redelegation_balance('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_validator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_current_utility_metrics(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_staking_network_info(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_super_committees(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_total_staking(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_raw_median_stake_snapshot(fake_shard)
| import pytest
import requests
from pyhmy import (
staking
)
from pyhmy.rpc import (
exceptions
)
explorer_endpoint = 'http://localhost:9599'
test_validator_address = 'one18tvf56zqjkjnak686lwutcp5mqfnvee35xjnhc'
fake_shard = 'http://example.com'
def _test_staking_rpc(fn, *args, **kwargs):
if not callable(fn):
pytest.fail(f'Invalid function: {fn}')
try:
response = fn(*args, **kwargs)
except Exception as e:
if isinstance(e, exceptions.RPCError) and 'does not exist/is not available' in str(e):
pytest.skip(f'{str(e)}')
pytest.fail(f'Unexpected error: {e.__class__} {e}')
return response
@pytest.mark.run(order=1)
def test_get_all_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_all_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
assert test_validator_address in validator_addresses
@pytest.mark.run(order=2)
def test_get_validator_information(setup_blockchain):
info = _test_staking_rpc(staking.get_validator_information, test_validator_address)
assert isinstance(info, dict)
@pytest.mark.run(order=3)
def test_get_all_validator_information(setup_blockchain):
all_validator_information = _test_staking_rpc(staking.get_all_validator_information)
assert isinstance(all_validator_information, list)
assert len(all_validator_information) > 0
@pytest.mark.run(order=4)
def test_get_delegations_by_delegator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=5)
def test_get_delegations_by_validator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_validator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=6)
def test_get_current_utility_metrics(setup_blockchain):
metrics = _test_staking_rpc(staking.get_current_utility_metrics)
assert isinstance(metrics, dict)
@pytest.mark.run(order=7)
def test_get_staking_network_info(setup_blockchain):
info = _test_staking_rpc(staking.get_staking_network_info)
assert isinstance(info, dict)
@pytest.mark.run(order=8)
def test_get_super_committees(setup_blockchain):
committee = _test_staking_rpc(staking.get_super_committees)
assert isinstance(committee, dict)
@pytest.mark.run(order=9)
def test_get_raw_median_stake_snapshot(setup_blockchain):
median_stake = _test_staking_rpc(staking.get_raw_median_stake_snapshot)
assert isinstance(median_stake, dict)
@pytest.mark.run(order=10)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_validator_information_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, dict)
@pytest.mark.run(order=11)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_all_validator_information_by_block_number, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, list)
@pytest.mark.run(order=12)
def test_get_delegations_by_delegator_by_block(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(delegations, list)
@pytest.mark.run(order=13)
def test_get_elected_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_elected_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
@pytest.mark.run(order=14)
def test_get_validators(setup_blockchain):
validators = _test_staking_rpc(staking.get_validators, 2)
assert isinstance(validators, dict)
assert len(validators['validators']) > 0
@pytest.mark.run(order=15)
def test_get_validator_keys(setup_blockchain):
validators = _test_staking_rpc(staking.get_validator_keys, 2)
assert isinstance(validators, list)
@pytest.mark.run(order=16)
def test_get_validator_self_delegation(setup_blockchain):
self_delegation = _test_staking_rpc(staking.get_validator_self_delegation, test_validator_address)
assert isinstance(self_delegation, int)
assert self_delegation > 0
@pytest.mark.run(order=17)
def test_get_validator_total_delegation(setup_blockchain):
total_delegation = _test_staking_rpc(staking.get_validator_total_delegation, test_validator_address)
assert isinstance(total_delegation, int)
assert total_delegation > 0
@pytest.mark.run(order=18)
def test_get_all_delegation_information(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_all_delegation_information, 0)
assert isinstance(delegation_information, list)
assert len(delegation_information) > 0
@pytest.mark.run(order=19)
def test_get_delegation_by_delegator_and_validator(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_delegation_by_delegator_and_validator, test_validator_address, test_validator_address)
assert isinstance(delegation_information, dict)
@pytest.mark.run(order=20)
def test_get_available_redelegation_balance(setup_blockchain):
redelgation_balance = _test_staking_rpc(staking.get_available_redelegation_balance, test_validator_address)
assert isinstance(redelgation_balance, int)
assert redelgation_balance == 0
@pytest.mark.run(order=21)
def test_get_total_staking(setup_blockchain):
total_staking = _test_staking_rpc(staking.get_total_staking)
assert isinstance(total_staking, int)
assert total_staking > 0
@pytest.mark.run(order=22)
def test_errors():
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_elected_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validators(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_keys(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information(-1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_self_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_total_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information_by_block_number(1, 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_delegation_information(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegation_by_delegator_and_validator('', '', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_available_redelegation_balance('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_validator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_current_utility_metrics(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_staking_network_info(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_super_committees(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_total_staking(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_raw_median_stake_snapshot(fake_shard) | en | 0.90944 | # Apparently validator information not created until block after create-validator transaction is accepted, so +1 block # Apparently validator information not created until block after create-validator transaction is accepted, so +1 block | 2.159394 | 2 |
Data Structures and Algorithms/Edabit Algo Solutions/VERY EASY PROBLEMS/TwoMakesTen.py | akkik04/Python-DataStructures-and-Algorithms | 1 | 6612540 | # TWO MAKES TEN EDABIT SOLUTION:
# creating a function to solve the problem.
def makes10(a, b):
# creating an if-statement to check if the arguments pass the test.
if a + b == 10 or a / 10 == 1 or b / 10 == 1:
# returning 'True' if the condition is met.
return True
# creating an else statement to handle the arguments not meeting the conditions.
else:
# returning 'False' if the condition is not met.
return False
| # TWO MAKES TEN EDABIT SOLUTION:
# creating a function to solve the problem.
def makes10(a, b):
# creating an if-statement to check if the arguments pass the test.
if a + b == 10 or a / 10 == 1 or b / 10 == 1:
# returning 'True' if the condition is met.
return True
# creating an else statement to handle the arguments not meeting the conditions.
else:
# returning 'False' if the condition is not met.
return False
| en | 0.722031 | # TWO MAKES TEN EDABIT SOLUTION: # creating a function to solve the problem. # creating an if-statement to check if the arguments pass the test. # returning 'True' if the condition is met. # creating an else statement to handle the arguments not meeting the conditions. # returning 'False' if the condition is not met. | 4.092694 | 4 |
extensionchanger_v2.py | NewFrenchDev/Extension-easy-change | 0 | 6612541 | <reponame>NewFrenchDev/Extension-easy-change<gh_stars>0
import os
from glob import glob
import json
from time import sleep
import pathlib
import platform
import shutil
import logging
OS = platform.system()
FORMAT = "%(asctime)s -- %(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, level="INFO")
class ExtensionChangerV2():
def __init__(self):
self.use_config = "No"
self.working_folder = ""
self.saving_folder = ""
self.directory_list =[]
self.folder_to_copy = None
self.folder_to_create = None
self.source = None
self.destination = None
### --> Quit programm
def exit(self):
logging.info(msg="Arrêt du script")
sleep(10)
exit()
### --> Read config.json file
def get_configuration(self):
logging.info(msg="Chargement du fichier de config.json...\n")
with open("./config.json", "r", encoding="utf-8") as f:
configuration = json.load(f)
self.use_config = configuration.get("use_config")
if self.use_config == "Yes":
logging.info(msg="us_config=Yes --> les chemins indiqués dans config.json vont être utilisés")
self.working_folder = pathlib.Path(configuration.get("working_folder"))
self.saving_folder = pathlib.Path(configuration.get("saving_folder"))
if not os.path.exists(self.working_folder):
logging.error(msg=f"Le chemin '{self.working_folder}' n'existe pas!")
self.exit()
elif not os.path.exists(self.saving_folder):
logging.error(msg=f"Le chemin '{self.saving_folder}' n'existe pas!")
self.exit()
else:
self.working_folder = os.path.dirname(__file__)
self.saving_folder = f"{self.working_folder}\\saving"
os.makedirs(self.saving_folder, exist_ok=True)
self.extensions = configuration.get("extensions")
logging.info(msg="Chargement terminé!")
### --> Check configuration
def check_configuration(self):
logging.info(msg="Verification des chemins...")
if self.working_folder == "":
logging.error(msg="Aucun chemin n'est indiqué pour le dossier de travail!")
self.exit()
else:
if os.path.exists(self.working_folder):
logging.info(msg="Le dossier de travail a été vérifié")
else:
logging.error(msg="Le dossier de travail indiqué dans config.json n'existe pas")
self.exit()
if self.saving_folder == "":
logging.error(msg="Aucun chemin n'est indiqué pour le dossier de sauvegarde")
self.exit()
else:
if os.path.exists(self.saving_folder):
logging.info(msg="Le dossier de sauvegarde a été vérifié")
else:
logging.error(msg="Le dossier de sauvegarde indiqué dans config.json n'existe pas")
self.exit()
logging.info(msg="Les chemins sont accessibles!")
### CHECK DIRECTORY COMPOSITION
def check_working_folder(self):
if self.use_config == "Yes":
directory_composition = glob(f"{self.working_folder}/**/", recursive=True)
else:
if os.path.isdir(f"{self.working_folder}\\work"):
directory_composition = glob(f"{self.working_folder}\\work/**/", recursive=True)
else:
logging.error("""Le script n'est pas placé dans le bon dossier de travail!
Le script doit être placé au même niveau que le dossier work de ton projet! :)""")
if directory_composition != []:
#remove first folder == working folder
path_working_folder = directory_composition.pop(0)
path_working_folder_lenght = len(path_working_folder.split("\\"))
print(f"\n\nDossiers trouvés dans le dossier de travail:")
for folder in directory_composition:
folder_path_lenght = len(folder.split("\\"))
foldername = folder.split("\\")[-2]
if folder_path_lenght == path_working_folder_lenght + 1:
self.directory_list.append(foldername)
#folder_parent = folder.split("\\")[-3]
#print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}")
print(f"- Dossier: {foldername}")
elif folder_path_lenght == path_working_folder_lenght + 2:
#folder_parent = "\\".join(folder.split("\\")[-4:-2])
#print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}")
print(f"--- Sous-dossier: {foldername}")
### PRINT DIRECTORY ONLY
def print_directory(self):
print(f"\n\nTu peux choisir de copier l'entièreté d'un de ces dossiers: {self.directory_list}")
### CHOOSE FOLDER TO COPY
def copy_folder(self):
self.folder_to_copy = input("Entre le nom de dossier à copier: ")
if self.folder_to_copy not in self.directory_list:
logging.error("Le nom indiqué n'est pas présent dans la liste suggérée...")
self.copy_folder()
return True
self.folder_to_create = input("Donner la nouvelle extension voulue: ")
if self.use_config == "No":
self.source = f"{self.working_folder}\\work\\{self.folder_to_copy}"
self.destination = f"{self.saving_folder}\\{self.folder_to_create}"
elif self.use_config == "Yes":
self.source = f"{self.working_folder}\\{self.folder_to_copy}"
self.destination = f"{self.saving_folder}\\{self.folder_to_create}"
else:
logging.error(msg="use_config ne peut valoir que Yes ou No! Corrige la valeur dans le fichier config.json")
self.exit()
if os.path.exists(self.destination):
shutil.rmtree(self.destination)
logging.info(msg="Copie lancée...")
shutil.copytree(self.source, self.destination)
logging.info(msg="Copie terminée!")
### CHANGE EXTENSION FOR SAVING FOLDER
def change_extension(self):
files_to_modify = glob(f"{self.destination}/**/*.{self.folder_to_copy}", recursive=True)
for file in files_to_modify:
os.rename(file, file.replace(f"{self.folder_to_copy}", f"{self.folder_to_create}"))
logging.info(msg="Les fichiers ont été renommés avec succès")
self.exit()
### EXECUTION
def execute(self):
self.get_configuration()
self.check_configuration()
self.check_working_folder()
self.print_directory()
self.copy_folder()
self.change_extension() | import os
from glob import glob
import json
from time import sleep
import pathlib
import platform
import shutil
import logging
OS = platform.system()
FORMAT = "%(asctime)s -- %(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, level="INFO")
class ExtensionChangerV2():
def __init__(self):
self.use_config = "No"
self.working_folder = ""
self.saving_folder = ""
self.directory_list =[]
self.folder_to_copy = None
self.folder_to_create = None
self.source = None
self.destination = None
### --> Quit programm
def exit(self):
logging.info(msg="Arrêt du script")
sleep(10)
exit()
### --> Read config.json file
def get_configuration(self):
logging.info(msg="Chargement du fichier de config.json...\n")
with open("./config.json", "r", encoding="utf-8") as f:
configuration = json.load(f)
self.use_config = configuration.get("use_config")
if self.use_config == "Yes":
logging.info(msg="us_config=Yes --> les chemins indiqués dans config.json vont être utilisés")
self.working_folder = pathlib.Path(configuration.get("working_folder"))
self.saving_folder = pathlib.Path(configuration.get("saving_folder"))
if not os.path.exists(self.working_folder):
logging.error(msg=f"Le chemin '{self.working_folder}' n'existe pas!")
self.exit()
elif not os.path.exists(self.saving_folder):
logging.error(msg=f"Le chemin '{self.saving_folder}' n'existe pas!")
self.exit()
else:
self.working_folder = os.path.dirname(__file__)
self.saving_folder = f"{self.working_folder}\\saving"
os.makedirs(self.saving_folder, exist_ok=True)
self.extensions = configuration.get("extensions")
logging.info(msg="Chargement terminé!")
### --> Check configuration
def check_configuration(self):
logging.info(msg="Verification des chemins...")
if self.working_folder == "":
logging.error(msg="Aucun chemin n'est indiqué pour le dossier de travail!")
self.exit()
else:
if os.path.exists(self.working_folder):
logging.info(msg="Le dossier de travail a été vérifié")
else:
logging.error(msg="Le dossier de travail indiqué dans config.json n'existe pas")
self.exit()
if self.saving_folder == "":
logging.error(msg="Aucun chemin n'est indiqué pour le dossier de sauvegarde")
self.exit()
else:
if os.path.exists(self.saving_folder):
logging.info(msg="Le dossier de sauvegarde a été vérifié")
else:
logging.error(msg="Le dossier de sauvegarde indiqué dans config.json n'existe pas")
self.exit()
logging.info(msg="Les chemins sont accessibles!")
### CHECK DIRECTORY COMPOSITION
def check_working_folder(self):
if self.use_config == "Yes":
directory_composition = glob(f"{self.working_folder}/**/", recursive=True)
else:
if os.path.isdir(f"{self.working_folder}\\work"):
directory_composition = glob(f"{self.working_folder}\\work/**/", recursive=True)
else:
logging.error("""Le script n'est pas placé dans le bon dossier de travail!
Le script doit être placé au même niveau que le dossier work de ton projet! :)""")
if directory_composition != []:
#remove first folder == working folder
path_working_folder = directory_composition.pop(0)
path_working_folder_lenght = len(path_working_folder.split("\\"))
print(f"\n\nDossiers trouvés dans le dossier de travail:")
for folder in directory_composition:
folder_path_lenght = len(folder.split("\\"))
foldername = folder.split("\\")[-2]
if folder_path_lenght == path_working_folder_lenght + 1:
self.directory_list.append(foldername)
#folder_parent = folder.split("\\")[-3]
#print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}")
print(f"- Dossier: {foldername}")
elif folder_path_lenght == path_working_folder_lenght + 2:
#folder_parent = "\\".join(folder.split("\\")[-4:-2])
#print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}")
print(f"--- Sous-dossier: {foldername}")
### PRINT DIRECTORY ONLY
def print_directory(self):
print(f"\n\nTu peux choisir de copier l'entièreté d'un de ces dossiers: {self.directory_list}")
### CHOOSE FOLDER TO COPY
def copy_folder(self):
self.folder_to_copy = input("Entre le nom de dossier à copier: ")
if self.folder_to_copy not in self.directory_list:
logging.error("Le nom indiqué n'est pas présent dans la liste suggérée...")
self.copy_folder()
return True
self.folder_to_create = input("Donner la nouvelle extension voulue: ")
if self.use_config == "No":
self.source = f"{self.working_folder}\\work\\{self.folder_to_copy}"
self.destination = f"{self.saving_folder}\\{self.folder_to_create}"
elif self.use_config == "Yes":
self.source = f"{self.working_folder}\\{self.folder_to_copy}"
self.destination = f"{self.saving_folder}\\{self.folder_to_create}"
else:
logging.error(msg="use_config ne peut valoir que Yes ou No! Corrige la valeur dans le fichier config.json")
self.exit()
if os.path.exists(self.destination):
shutil.rmtree(self.destination)
logging.info(msg="Copie lancée...")
shutil.copytree(self.source, self.destination)
logging.info(msg="Copie terminée!")
### CHANGE EXTENSION FOR SAVING FOLDER
def change_extension(self):
files_to_modify = glob(f"{self.destination}/**/*.{self.folder_to_copy}", recursive=True)
for file in files_to_modify:
os.rename(file, file.replace(f"{self.folder_to_copy}", f"{self.folder_to_create}"))
logging.info(msg="Les fichiers ont été renommés avec succès")
self.exit()
### EXECUTION
def execute(self):
self.get_configuration()
self.check_configuration()
self.check_working_folder()
self.print_directory()
self.copy_folder()
self.change_extension() | fr | 0.367898 | ### --> Quit programm ### --> Read config.json file ### --> Check configuration ### CHECK DIRECTORY COMPOSITION Le script n'est pas placé dans le bon dossier de travail! Le script doit être placé au même niveau que le dossier work de ton projet! :) #remove first folder == working folder #folder_parent = folder.split("\\")[-3] #print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}") #folder_parent = "\\".join(folder.split("\\")[-4:-2]) #print(f"Le dossier {foldername} est le sous dossier direct de {folder_parent}") ### PRINT DIRECTORY ONLY ### CHOOSE FOLDER TO COPY ### CHANGE EXTENSION FOR SAVING FOLDER ### EXECUTION | 2.646923 | 3 |
azure-batch/azure/batch/models/node_file.py | HydAu/AzureSDKForPython | 0 | 6612542 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeFile(Model):
"""
Information about a file or directory on a compute node.
:param name: The file path.
:type name: str
:param url: The URL of the file.
:type url: str
:param is_directory: Whether the object represents a directory.
:type is_directory: bool
:param properties: The file properties.
:type properties: :class:`FileProperties
<azure.batch.models.FileProperties>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'FileProperties'},
}
def __init__(self, name=None, url=None, is_directory=None, properties=None):
self.name = name
self.url = url
self.is_directory = is_directory
self.properties = properties
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeFile(Model):
"""
Information about a file or directory on a compute node.
:param name: The file path.
:type name: str
:param url: The URL of the file.
:type url: str
:param is_directory: Whether the object represents a directory.
:type is_directory: bool
:param properties: The file properties.
:type properties: :class:`FileProperties
<azure.batch.models.FileProperties>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'FileProperties'},
}
def __init__(self, name=None, url=None, is_directory=None, properties=None):
self.name = name
self.url = url
self.is_directory = is_directory
self.properties = properties
| en | 0.712985 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft and contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # limitations under the License. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Information about a file or directory on a compute node. :param name: The file path. :type name: str :param url: The URL of the file. :type url: str :param is_directory: Whether the object represents a directory. :type is_directory: bool :param properties: The file properties. :type properties: :class:`FileProperties <azure.batch.models.FileProperties>` | 1.583808 | 2 |
bot/rasa/ask_client_form.py | newsettle/ns4_chatbot | 51 | 6612543 | <gh_stars>10-100
from rasa_core.actions.forms import FormAction
from rasa_core.actions.forms import EntityFormField
from rasa_core.actions.action import ActionRestart
from rasa_core.events import SlotSet
from rasa_core.actions import Action
# -*- coding: UTF-8 -*-
class AskClientInfoForm(FormAction):
RANDOMIZE = False
@staticmethod
def required_fields():
return [
#EntityFormField(<Entity>,<Slot>),
EntityFormField("phone", "phone"),
EntityFormField("company", "company"),
]
def name(self):
return 'action_ask_client'
def submit(self, dispatcher, tracker, domain):
# send utter default template to user
dispatcher.utter_template("utter_ask_morehelp")
# ... other code
return [] | from rasa_core.actions.forms import FormAction
from rasa_core.actions.forms import EntityFormField
from rasa_core.actions.action import ActionRestart
from rasa_core.events import SlotSet
from rasa_core.actions import Action
# -*- coding: UTF-8 -*-
class AskClientInfoForm(FormAction):
RANDOMIZE = False
@staticmethod
def required_fields():
return [
#EntityFormField(<Entity>,<Slot>),
EntityFormField("phone", "phone"),
EntityFormField("company", "company"),
]
def name(self):
return 'action_ask_client'
def submit(self, dispatcher, tracker, domain):
# send utter default template to user
dispatcher.utter_template("utter_ask_morehelp")
# ... other code
return [] | en | 0.28128 | # -*- coding: UTF-8 -*- #EntityFormField(<Entity>,<Slot>), # send utter default template to user # ... other code | 1.961711 | 2 |
argsloader/base/exception.py | HansBug/argsloader | 0 | 6612544 | <reponame>HansBug/argsloader<gh_stars>0
import os
import re
from textwrap import indent
from typing import Type, Tuple, List
from cachetools import cached
from hbutils.model import asitems, accessor, visual
from hbutils.reflection import class_wraps
from hbutils.string import plural_word
from .value import PValue
class BaseParseError(Exception):
"""
Overview:
Base class of all the parse errors.
.. note::
This class is only used as base class of :class:`argsloader.base.exception.ParseError`, \
:class:`argsloader.base.exception.MultipleParseError` and :class:`argsloader.base.exception.SkippedParseError`.
"""
pass
@accessor(readonly=True)
@visual(show_id=True)
@asitems(['message', 'unit', 'value', 'info'])
class ParseError(BaseParseError):
"""
Overview:
Error when parse one piece of data.
"""
def __init__(self, message: str, unit, value: PValue, info: Tuple[object, ...]):
"""
Constructor of class :class:`argsloader.base.exception.ParseError`.
:param message: String message.
:param unit: Unit which cause this error.
:param value: Value passed in.
:param info: Extra information.
"""
BaseParseError.__init__(self, message, *info)
self.__message = message
self.__unit = unit
self.__value = value
self.__info = info
_EXCEPTION_NAME = re.compile('^([a-zA-Z0-9_]*)(Error|Exception)$')
_EXCEPTION_CLASSES = {}
@cached(_EXCEPTION_CLASSES)
def wrap_exception_class(cls: Type[Exception]) -> Type[ParseError]:
"""
Wrap exception class to inherit :class:`argsloader.base.exception.ParseError`.
:param cls: Class to be wrapped.
:return: Wrapped exception class, which should be subclass of both \
``cls`` and :class:`argsloader.base.exception.ParseError`.
Examples::
>>> from argsloader.base import wrap_exception_class, ParseError
>>> err = wrap_exception_class(ValueError)
>>> err
<class 'ValueParseError'>
>>> issubclass(err, ParseError)
True
>>> issubclass(err, ValueError)
True
"""
matching = _EXCEPTION_NAME.fullmatch(cls.__name__)
if matching:
@class_wraps(cls)
class _ParseError(cls, ParseError):
def __init__(self, exc: Exception, unit, value):
args = tuple(exc.args) if isinstance(exc.args, (list, tuple)) else (exc.args,)
ParseError.__init__(self, args[0], unit, value, args[1:])
_ParseError.__name__ = f'{matching[1]}Parse{matching[2]}'
return _ParseError
else:
raise NameError(f'Unrecognizable exception name - {repr(cls.__name__)}.')
def wrap_exception(ex: Exception, unit, value) -> ParseError:
"""
Wrap exception object to new exception object with wrapped class.
:param ex: Original exception.
:param unit: Unit which cause this exception.
:param value: Value passed in.
:return: Wrapped exception object, which should be an instance of \
``type(ex)`` and :class:`argsloader.base.exception.ParseError`.
Examples::
>>> from argsloader.base import wrap_exception, ParseError
>>> err = wrap_exception(ValueError('this is message', 2, 3, 4), 'unit', 'value')
>>> err
<ValueParseError 0x7f13877146a8 message: 'this is message', unit: 'unit', value: 'value', info: (2, 3, 4)>
>>> isinstance(err, ParseError)
True
>>> isinstance(err, ValueError)
True
>>> err.message
'this is message'
>>> err.unit
'unit'
>>> err.value
'value'
>>> err.info
(2, 3, 4)
"""
# noinspection PyCallingNonCallable
return wrap_exception_class(type(ex))(ex, unit, value).with_traceback(ex.__traceback__)
class MultipleParseError(BaseParseError):
"""
Overview:
Full result of one parsing process.
Can be seen as collection of :class:`argsloader.base.exception.ParseError`.
"""
def __init__(self, items: List[Tuple[PValue, ParseError]]):
"""
Constructor of class :class:`argsloader.base.exception.MultipleParseError`.
:param items: Parse error items.
"""
self.__items = list((pv, err) for pv, err in items)
@property
def items(self) -> List[Tuple[PValue, ParseError]]:
"""
Parse error items.
"""
return self.__items
@classmethod
def _display_item(cls, item):
pvalue, error = item
rep_str = '.'.join(('<root>', *map(str, pvalue.position)))
error_str = error.message
return f'{rep_str}: {type(error).__name__}: {error_str}'
def __repr__(self):
return f'<{type(self).__name__} ({plural_word(len(self.__items), "error")}){os.linesep}' \
f'{indent(os.linesep.join(map(self._display_item, self.__items)), prefix=" ")}' \
f'{os.linesep}>'
def __str__(self):
return f'({plural_word(len(self.__items), "error")}){os.linesep}' \
f'{indent(os.linesep.join(map(self._display_item, self.__items)), prefix=" ")}'
@accessor(readonly=True)
@asitems(['unit'])
class SkippedParseError(BaseParseError):
"""
Overview:
Error used when parsing process is skipped due to the forwarded error.
"""
def __init__(self, unit):
"""
Constructor of class :class:`argsloader.base.exception.SkippedParseError`.
:param unit: Unit which should do this parsing process.
"""
BaseParseError.__init__(self, ('Parsing is skipped due the forward-side errors.', unit))
self.__unit = unit
| import os
import re
from textwrap import indent
from typing import Type, Tuple, List
from cachetools import cached
from hbutils.model import asitems, accessor, visual
from hbutils.reflection import class_wraps
from hbutils.string import plural_word
from .value import PValue
class BaseParseError(Exception):
"""
Overview:
Base class of all the parse errors.
.. note::
This class is only used as base class of :class:`argsloader.base.exception.ParseError`, \
:class:`argsloader.base.exception.MultipleParseError` and :class:`argsloader.base.exception.SkippedParseError`.
"""
pass
@accessor(readonly=True)
@visual(show_id=True)
@asitems(['message', 'unit', 'value', 'info'])
class ParseError(BaseParseError):
"""
Overview:
Error when parse one piece of data.
"""
def __init__(self, message: str, unit, value: PValue, info: Tuple[object, ...]):
"""
Constructor of class :class:`argsloader.base.exception.ParseError`.
:param message: String message.
:param unit: Unit which cause this error.
:param value: Value passed in.
:param info: Extra information.
"""
BaseParseError.__init__(self, message, *info)
self.__message = message
self.__unit = unit
self.__value = value
self.__info = info
_EXCEPTION_NAME = re.compile('^([a-zA-Z0-9_]*)(Error|Exception)$')
_EXCEPTION_CLASSES = {}
@cached(_EXCEPTION_CLASSES)
def wrap_exception_class(cls: Type[Exception]) -> Type[ParseError]:
"""
Wrap exception class to inherit :class:`argsloader.base.exception.ParseError`.
:param cls: Class to be wrapped.
:return: Wrapped exception class, which should be subclass of both \
``cls`` and :class:`argsloader.base.exception.ParseError`.
Examples::
>>> from argsloader.base import wrap_exception_class, ParseError
>>> err = wrap_exception_class(ValueError)
>>> err
<class 'ValueParseError'>
>>> issubclass(err, ParseError)
True
>>> issubclass(err, ValueError)
True
"""
matching = _EXCEPTION_NAME.fullmatch(cls.__name__)
if matching:
@class_wraps(cls)
class _ParseError(cls, ParseError):
def __init__(self, exc: Exception, unit, value):
args = tuple(exc.args) if isinstance(exc.args, (list, tuple)) else (exc.args,)
ParseError.__init__(self, args[0], unit, value, args[1:])
_ParseError.__name__ = f'{matching[1]}Parse{matching[2]}'
return _ParseError
else:
raise NameError(f'Unrecognizable exception name - {repr(cls.__name__)}.')
def wrap_exception(ex: Exception, unit, value) -> ParseError:
"""
Wrap exception object to new exception object with wrapped class.
:param ex: Original exception.
:param unit: Unit which cause this exception.
:param value: Value passed in.
:return: Wrapped exception object, which should be an instance of \
``type(ex)`` and :class:`argsloader.base.exception.ParseError`.
Examples::
>>> from argsloader.base import wrap_exception, ParseError
>>> err = wrap_exception(ValueError('this is message', 2, 3, 4), 'unit', 'value')
>>> err
<ValueParseError 0x7f13877146a8 message: 'this is message', unit: 'unit', value: 'value', info: (2, 3, 4)>
>>> isinstance(err, ParseError)
True
>>> isinstance(err, ValueError)
True
>>> err.message
'this is message'
>>> err.unit
'unit'
>>> err.value
'value'
>>> err.info
(2, 3, 4)
"""
# noinspection PyCallingNonCallable
return wrap_exception_class(type(ex))(ex, unit, value).with_traceback(ex.__traceback__)
class MultipleParseError(BaseParseError):
"""
Overview:
Full result of one parsing process.
Can be seen as collection of :class:`argsloader.base.exception.ParseError`.
"""
def __init__(self, items: List[Tuple[PValue, ParseError]]):
"""
Constructor of class :class:`argsloader.base.exception.MultipleParseError`.
:param items: Parse error items.
"""
self.__items = list((pv, err) for pv, err in items)
@property
def items(self) -> List[Tuple[PValue, ParseError]]:
"""
Parse error items.
"""
return self.__items
@classmethod
def _display_item(cls, item):
pvalue, error = item
rep_str = '.'.join(('<root>', *map(str, pvalue.position)))
error_str = error.message
return f'{rep_str}: {type(error).__name__}: {error_str}'
def __repr__(self):
return f'<{type(self).__name__} ({plural_word(len(self.__items), "error")}){os.linesep}' \
f'{indent(os.linesep.join(map(self._display_item, self.__items)), prefix=" ")}' \
f'{os.linesep}>'
def __str__(self):
return f'({plural_word(len(self.__items), "error")}){os.linesep}' \
f'{indent(os.linesep.join(map(self._display_item, self.__items)), prefix=" ")}'
@accessor(readonly=True)
@asitems(['unit'])
class SkippedParseError(BaseParseError):
"""
Overview:
Error used when parsing process is skipped due to the forwarded error.
"""
def __init__(self, unit):
"""
Constructor of class :class:`argsloader.base.exception.SkippedParseError`.
:param unit: Unit which should do this parsing process.
"""
BaseParseError.__init__(self, ('Parsing is skipped due the forward-side errors.', unit))
self.__unit = unit | en | 0.427174 | Overview: Base class of all the parse errors. .. note:: This class is only used as base class of :class:`argsloader.base.exception.ParseError`, \ :class:`argsloader.base.exception.MultipleParseError` and :class:`argsloader.base.exception.SkippedParseError`. Overview: Error when parse one piece of data. Constructor of class :class:`argsloader.base.exception.ParseError`. :param message: String message. :param unit: Unit which cause this error. :param value: Value passed in. :param info: Extra information. Wrap exception class to inherit :class:`argsloader.base.exception.ParseError`. :param cls: Class to be wrapped. :return: Wrapped exception class, which should be subclass of both \ ``cls`` and :class:`argsloader.base.exception.ParseError`. Examples:: >>> from argsloader.base import wrap_exception_class, ParseError >>> err = wrap_exception_class(ValueError) >>> err <class 'ValueParseError'> >>> issubclass(err, ParseError) True >>> issubclass(err, ValueError) True Wrap exception object to new exception object with wrapped class. :param ex: Original exception. :param unit: Unit which cause this exception. :param value: Value passed in. :return: Wrapped exception object, which should be an instance of \ ``type(ex)`` and :class:`argsloader.base.exception.ParseError`. Examples:: >>> from argsloader.base import wrap_exception, ParseError >>> err = wrap_exception(ValueError('this is message', 2, 3, 4), 'unit', 'value') >>> err <ValueParseError 0x7f13877146a8 message: 'this is message', unit: 'unit', value: 'value', info: (2, 3, 4)> >>> isinstance(err, ParseError) True >>> isinstance(err, ValueError) True >>> err.message 'this is message' >>> err.unit 'unit' >>> err.value 'value' >>> err.info (2, 3, 4) # noinspection PyCallingNonCallable Overview: Full result of one parsing process. Can be seen as collection of :class:`argsloader.base.exception.ParseError`. Constructor of class :class:`argsloader.base.exception.MultipleParseError`. :param items: Parse error items. Parse error items. Overview: Error used when parsing process is skipped due to the forwarded error. Constructor of class :class:`argsloader.base.exception.SkippedParseError`. :param unit: Unit which should do this parsing process. | 2.288059 | 2 |
recipes/reproject.py | isabella232/rasterio-cookbook | 38 | 6612545 | import numpy as np
import rasterio
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio import crs
rgb = 'tests/data/world.tif'
out = '/tmp/reproj.tif'
# Reproject to NAD83(HARN) / Hawaii zone 3 (ftUS) - Transverse Mercator
dst_crs = crs.from_string("EPSG:3759")
with rasterio.Env(CHECK_WITH_INVERT_PROJ=True):
with rasterio.open(rgb) as src:
profile = src.profile
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds)
# update the relevant parts of the profile
profile.update({
'crs': dst_crs,
'transform': dst_affine,
'width': dst_width,
'height': dst_height
})
# Reproject and write each band
with rasterio.open(out, 'w', **profile) as dst:
for i in range(1, src.count + 1):
src_array = src.read(i)
dst_array = np.empty((dst_height, dst_width), dtype='uint8')
reproject(
# Source parameters
source=src_array,
src_crs=src.crs,
src_transform=src.transform,
# Destination paramaters
destination=dst_array,
dst_transform=dst_affine,
dst_crs=dst_crs,
# Configuration
resampling=Resampling.nearest,
num_threads=2)
dst.write(dst_array, i)
| import numpy as np
import rasterio
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio import crs
rgb = 'tests/data/world.tif'
out = '/tmp/reproj.tif'
# Reproject to NAD83(HARN) / Hawaii zone 3 (ftUS) - Transverse Mercator
dst_crs = crs.from_string("EPSG:3759")
with rasterio.Env(CHECK_WITH_INVERT_PROJ=True):
with rasterio.open(rgb) as src:
profile = src.profile
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds)
# update the relevant parts of the profile
profile.update({
'crs': dst_crs,
'transform': dst_affine,
'width': dst_width,
'height': dst_height
})
# Reproject and write each band
with rasterio.open(out, 'w', **profile) as dst:
for i in range(1, src.count + 1):
src_array = src.read(i)
dst_array = np.empty((dst_height, dst_width), dtype='uint8')
reproject(
# Source parameters
source=src_array,
src_crs=src.crs,
src_transform=src.transform,
# Destination paramaters
destination=dst_array,
dst_transform=dst_affine,
dst_crs=dst_crs,
# Configuration
resampling=Resampling.nearest,
num_threads=2)
dst.write(dst_array, i)
| en | 0.622284 | # Reproject to NAD83(HARN) / Hawaii zone 3 (ftUS) - Transverse Mercator # Calculate the ideal dimensions and transformation in the new crs # update the relevant parts of the profile # Reproject and write each band # Source parameters # Destination paramaters # Configuration | 2.296498 | 2 |
source/themes/current_theme.py | Pilifer/roguelike | 25 | 6612546 | <reponame>Pilifer/roguelike<filename>source/themes/current_theme.py
from themes.custom_1_constants import *
from themes.custom_1_textures import *
| from themes.custom_1_constants import *
from themes.custom_1_textures import * | none | 1 | 1.122621 | 1 | |
tests/test_fusion-continuous.py | creisle/rna_sv_simulator | 7 | 6612547 | import random
import unittest
from unittest.mock import MagicMock, patch
from mavis.annotate import genomic as _genomic
from mavis import breakpoint as _breakpoint
from mavis import constants
from rna_sv_simulator import fusion
class TestMutateContinuous(unittest.TestCase):
def setUp(self):
self.chr1 = ''.join([random.choice('ATCG') for i in range(1000)])
self.reference_genome = {
'1': MagicMock(seq=self.chr1),
}
self.annotations = {'1': [
self.create_gene('1', '+', [(100, 200), (250, 300), (320, 360)]),
self.create_gene('1', '-', [(800, 820), (830, 850)]),
self.create_gene('1', '+', [(300, 350), (420, 500)]),
self.create_gene('1', '-', [(300, 350), (380, 500)]),
self.create_gene('1', '-', [(800, 820), (830, 850)]),
]}
def create_gene(self, chr, strand, exons):
start = min([x for x, y in exons]) - 10
end = max([y for x, y in exons]) + 10
gene = _genomic.Gene(chr, start, end, strand=strand)
new_exons = [_genomic.Exon(start, end, strand=strand) for start, end in exons]
pre_transcript = _genomic.PreTranscript(new_exons, gene=gene)
gene.unspliced_transcripts.append(pre_transcript)
for spl_patt in pre_transcript.generate_splicing_patterns():
transcript = _genomic.Transcript(pre_transcript, spl_patt)
pre_transcript.transcripts.append(transcript)
return gene
def mutate(self, strand1, strand2, orient1, orient2, event):
bpp = _breakpoint.BreakpointPair(
_breakpoint.Breakpoint('1', 400, strand=strand1, orient=orient1),
_breakpoint.Breakpoint('1', 700, strand=strand2, orient=orient2),
untemplated_seq='',
event_type=event
)
new_reference_genome, new_annotations = fusion._mutate_continuous(
self.reference_genome,
self.annotations,
bpp)
return bpp, new_reference_genome, new_annotations
# deletion event type
def test_del_lr_pos_pos(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '+', 'L', 'R', constants.SVTYPE.DEL
)
exp_seq = self.chr1[:400] + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
def test_del_lr_neg_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '-', 'L', 'R', constants.SVTYPE.DEL
)
exp_seq = self.chr1[:400] + self.chr1[699:]
self.assertEqual(len(exp_seq), len(new_reference_genome))
self.assertEqual(exp_seq, new_reference_genome)
# duplication event type
def test_dup_rl_pos_pos(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '+', 'R', 'L', constants.SVTYPE.DUP
)
exp_seq = self.chr1[:399] + self.chr1[399:700] + self.chr1[399:]
self.assertEqual(exp_seq, new_reference_genome)
def test_dup_rl_neg_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '-', 'R', 'L', constants.SVTYPE.DUP
)
exp_seq = self.chr1[:399] + self.chr1[399:700] + self.chr1[399:]
self.assertEqual(exp_seq, new_reference_genome)
# inversion event type
def test_inv_ll_pos_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '-', 'L', 'L', constants.SVTYPE.INV
)
exp_seq = self.chr1[:400] + constants.reverse_complement(self.chr1[400:700]) + self.chr1[700:]
self.assertEqual(len(exp_seq), len(self.chr1))
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_ll_neg_post(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '+', 'L', 'L', constants.SVTYPE.INV
)
exp_seq = self.chr1[:400] + constants.reverse_complement(self.chr1[400:700]) + self.chr1[700:]
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_rr_pos_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '-', 'R', 'R', constants.SVTYPE.INV
)
exp_seq = self.chr1[:399] + constants.reverse_complement(self.chr1[399:699]) + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_rr_neg_post(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '+', 'R', 'R', constants.SVTYPE.INV
)
exp_seq = self.chr1[:399] + constants.reverse_complement(self.chr1[399:699]) + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
| import random
import unittest
from unittest.mock import MagicMock, patch
from mavis.annotate import genomic as _genomic
from mavis import breakpoint as _breakpoint
from mavis import constants
from rna_sv_simulator import fusion
class TestMutateContinuous(unittest.TestCase):
def setUp(self):
self.chr1 = ''.join([random.choice('ATCG') for i in range(1000)])
self.reference_genome = {
'1': MagicMock(seq=self.chr1),
}
self.annotations = {'1': [
self.create_gene('1', '+', [(100, 200), (250, 300), (320, 360)]),
self.create_gene('1', '-', [(800, 820), (830, 850)]),
self.create_gene('1', '+', [(300, 350), (420, 500)]),
self.create_gene('1', '-', [(300, 350), (380, 500)]),
self.create_gene('1', '-', [(800, 820), (830, 850)]),
]}
def create_gene(self, chr, strand, exons):
start = min([x for x, y in exons]) - 10
end = max([y for x, y in exons]) + 10
gene = _genomic.Gene(chr, start, end, strand=strand)
new_exons = [_genomic.Exon(start, end, strand=strand) for start, end in exons]
pre_transcript = _genomic.PreTranscript(new_exons, gene=gene)
gene.unspliced_transcripts.append(pre_transcript)
for spl_patt in pre_transcript.generate_splicing_patterns():
transcript = _genomic.Transcript(pre_transcript, spl_patt)
pre_transcript.transcripts.append(transcript)
return gene
def mutate(self, strand1, strand2, orient1, orient2, event):
bpp = _breakpoint.BreakpointPair(
_breakpoint.Breakpoint('1', 400, strand=strand1, orient=orient1),
_breakpoint.Breakpoint('1', 700, strand=strand2, orient=orient2),
untemplated_seq='',
event_type=event
)
new_reference_genome, new_annotations = fusion._mutate_continuous(
self.reference_genome,
self.annotations,
bpp)
return bpp, new_reference_genome, new_annotations
# deletion event type
def test_del_lr_pos_pos(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '+', 'L', 'R', constants.SVTYPE.DEL
)
exp_seq = self.chr1[:400] + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
def test_del_lr_neg_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '-', 'L', 'R', constants.SVTYPE.DEL
)
exp_seq = self.chr1[:400] + self.chr1[699:]
self.assertEqual(len(exp_seq), len(new_reference_genome))
self.assertEqual(exp_seq, new_reference_genome)
# duplication event type
def test_dup_rl_pos_pos(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '+', 'R', 'L', constants.SVTYPE.DUP
)
exp_seq = self.chr1[:399] + self.chr1[399:700] + self.chr1[399:]
self.assertEqual(exp_seq, new_reference_genome)
def test_dup_rl_neg_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '-', 'R', 'L', constants.SVTYPE.DUP
)
exp_seq = self.chr1[:399] + self.chr1[399:700] + self.chr1[399:]
self.assertEqual(exp_seq, new_reference_genome)
# inversion event type
def test_inv_ll_pos_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '-', 'L', 'L', constants.SVTYPE.INV
)
exp_seq = self.chr1[:400] + constants.reverse_complement(self.chr1[400:700]) + self.chr1[700:]
self.assertEqual(len(exp_seq), len(self.chr1))
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_ll_neg_post(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '+', 'L', 'L', constants.SVTYPE.INV
)
exp_seq = self.chr1[:400] + constants.reverse_complement(self.chr1[400:700]) + self.chr1[700:]
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_rr_pos_neg(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'+', '-', 'R', 'R', constants.SVTYPE.INV
)
exp_seq = self.chr1[:399] + constants.reverse_complement(self.chr1[399:699]) + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
def test_inv_rr_neg_post(self):
bpp, new_reference_genome, new_annotations = self.mutate(
'-', '+', 'R', 'R', constants.SVTYPE.INV
)
exp_seq = self.chr1[:399] + constants.reverse_complement(self.chr1[399:699]) + self.chr1[699:]
self.assertEqual(exp_seq, new_reference_genome)
| en | 0.797896 | # deletion event type # duplication event type # inversion event type | 2.179652 | 2 |
neptune/db/filter_wrapper.py | qiaokuahai/neptune | 1 | 6612548 | <reponame>qiaokuahai/neptune
from __future__ import absolute_import
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.sql.sqltypes import _type_map
from neptune.core import utils
def column_from_expression(table, expression):
expr_wrapper = None
column = getattr(table, expression, None)
return expr_wrapper, column
def cast(column, value):
"""
将python类型值转换为SQLAlchemy类型值
:param column:
:type column:
:param value:
:type value:
"""
cast_to = _type_map.get(type(value), None)
if cast_to is None:
column = column.astext
else:
column = column.astext.cast(cast_to)
return column
class NullFilter(object):
def make_empty_query(self, column):
return column.is_(None) & column.isnot(None)
def op(self, column, value):
pass
def op_in(self, column, value):
pass
def op_nin(self, column, value):
pass
def op_eq(self, column, value):
pass
def op_ne(self, column, value):
pass
def op_lt(self, column, value):
pass
def op_lte(self, column, value):
pass
def op_gt(self, column, value):
pass
def op_gte(self, column, value):
pass
def op_like(self, column, value):
pass
def op_starts(self, column, value):
pass
def op_ends(self, column, value):
pass
def op_nlike(self, column, value):
pass
def op_ilike(self, column, value):
pass
def op_istarts(self, column, value):
pass
def op_iends(self, column, value):
pass
def op_nilike(self, column, value):
pass
def op_nnull(self, column, value):
pass
def op_null(self, column, value):
pass
class Filter(NullFilter):
def make_empty_query(self, column):
return column == None & column != None
def op(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.in_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_in(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.in_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_nin(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.notin_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column != value
return expr
def op_eq(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_ne(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column != value
return expr
def op_lt(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column < value
return expr
def op_lte(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column <= value
return expr
def op_gt(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column > value
return expr
def op_gte(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column >= value
return expr
def op_like(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%%%s%%' % value)
return expr
def op_starts(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%s%%' % value)
return expr
def op_ends(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%%%s' % value)
return expr
def op_nlike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.notlike('%%%s%%' % value)
return expr
def op_ilike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%%%s%%' % value)
return expr
def op_istarts(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%s%%' % value)
return expr
def op_iends(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%%%s' % value)
return expr
def op_nilike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.notilike('%%%s%%' % value)
return expr
def op_nnull(self, column, value):
expr = column.isnot(None)
return expr
def op_null(self, column, value):
expr = column.is_(None)
return expr
class FilterNumber(Filter):
"""数字类型过滤"""
def op_like(self, column, value):
pass
def op_nlike(self, column, value):
pass
def op_starts(self, column, value):
pass
def op_ends(self, column, value):
pass
def op_ilike(self, column, value):
pass
def op_nilike(self, column, value):
pass
def op_istarts(self, column, value):
pass
def op_iends(self, column, value):
pass
| from __future__ import absolute_import
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.sql.sqltypes import _type_map
from neptune.core import utils
def column_from_expression(table, expression):
expr_wrapper = None
column = getattr(table, expression, None)
return expr_wrapper, column
def cast(column, value):
"""
将python类型值转换为SQLAlchemy类型值
:param column:
:type column:
:param value:
:type value:
"""
cast_to = _type_map.get(type(value), None)
if cast_to is None:
column = column.astext
else:
column = column.astext.cast(cast_to)
return column
class NullFilter(object):
def make_empty_query(self, column):
return column.is_(None) & column.isnot(None)
def op(self, column, value):
pass
def op_in(self, column, value):
pass
def op_nin(self, column, value):
pass
def op_eq(self, column, value):
pass
def op_ne(self, column, value):
pass
def op_lt(self, column, value):
pass
def op_lte(self, column, value):
pass
def op_gt(self, column, value):
pass
def op_gte(self, column, value):
pass
def op_like(self, column, value):
pass
def op_starts(self, column, value):
pass
def op_ends(self, column, value):
pass
def op_nlike(self, column, value):
pass
def op_ilike(self, column, value):
pass
def op_istarts(self, column, value):
pass
def op_iends(self, column, value):
pass
def op_nilike(self, column, value):
pass
def op_nnull(self, column, value):
pass
def op_null(self, column, value):
pass
class Filter(NullFilter):
def make_empty_query(self, column):
return column == None & column != None
def op(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.in_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_in(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.in_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_nin(self, column, value):
if utils.is_list_type(value):
if isinstance(column, BinaryExpression):
column = cast(column, value[0])
expr = column.notin_(tuple(value))
else:
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column != value
return expr
def op_eq(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column == value
return expr
def op_ne(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column != value
return expr
def op_lt(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column < value
return expr
def op_lte(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column <= value
return expr
def op_gt(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column > value
return expr
def op_gte(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column >= value
return expr
def op_like(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%%%s%%' % value)
return expr
def op_starts(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%s%%' % value)
return expr
def op_ends(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.like('%%%s' % value)
return expr
def op_nlike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.notlike('%%%s%%' % value)
return expr
def op_ilike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%%%s%%' % value)
return expr
def op_istarts(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%s%%' % value)
return expr
def op_iends(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.ilike('%%%s' % value)
return expr
def op_nilike(self, column, value):
if isinstance(column, BinaryExpression):
column = cast(column, value)
expr = column.notilike('%%%s%%' % value)
return expr
def op_nnull(self, column, value):
expr = column.isnot(None)
return expr
def op_null(self, column, value):
expr = column.is_(None)
return expr
class FilterNumber(Filter):
"""数字类型过滤"""
def op_like(self, column, value):
pass
def op_nlike(self, column, value):
pass
def op_starts(self, column, value):
pass
def op_ends(self, column, value):
pass
def op_ilike(self, column, value):
pass
def op_nilike(self, column, value):
pass
def op_istarts(self, column, value):
pass
def op_iends(self, column, value):
pass | en | 0.17398 | 将python类型值转换为SQLAlchemy类型值 :param column: :type column: :param value: :type value: 数字类型过滤 | 2.307755 | 2 |
src/year2018/day12b.py | lancelote/advent_of_code | 10 | 6612549 | """Day 12 Part 2: Subterranean Sustainability.
You realize that 20 generations aren't enough. After all, these plants will
need to last another 1500 years to even reach your timeline, not to mention
your future.
After fifty billion (50000000000) generations, what is the sum of the numbers
of all pots which contain a plant?
"""
from collections import deque
from src.year2018.day12a import get_new_generation
from src.year2018.day12a import process_data
def solve(task: str) -> int:
"""Find the sum of all pots id with plants after 50 billion generations."""
stable_diff = 0
generations = 50_000_000_000
generation, patterns = process_data(task)
prev_sum = sum(generation.keys())
prev_diffs = deque([0, 1, 2])
while generations:
new_generation = get_new_generation(generation, patterns)
new_sum = sum(new_generation.keys())
new_diff = new_sum - prev_sum
prev_diffs.popleft()
prev_diffs.append(new_diff)
if len(set(prev_diffs)) == 1:
stable_diff = new_diff
print(f"stable diff: {stable_diff}")
print(f"generation: {generations}")
break
prev_sum = new_sum
generation = new_generation
generations -= 1
return prev_sum + generations * stable_diff
| """Day 12 Part 2: Subterranean Sustainability.
You realize that 20 generations aren't enough. After all, these plants will
need to last another 1500 years to even reach your timeline, not to mention
your future.
After fifty billion (50000000000) generations, what is the sum of the numbers
of all pots which contain a plant?
"""
from collections import deque
from src.year2018.day12a import get_new_generation
from src.year2018.day12a import process_data
def solve(task: str) -> int:
"""Find the sum of all pots id with plants after 50 billion generations."""
stable_diff = 0
generations = 50_000_000_000
generation, patterns = process_data(task)
prev_sum = sum(generation.keys())
prev_diffs = deque([0, 1, 2])
while generations:
new_generation = get_new_generation(generation, patterns)
new_sum = sum(new_generation.keys())
new_diff = new_sum - prev_sum
prev_diffs.popleft()
prev_diffs.append(new_diff)
if len(set(prev_diffs)) == 1:
stable_diff = new_diff
print(f"stable diff: {stable_diff}")
print(f"generation: {generations}")
break
prev_sum = new_sum
generation = new_generation
generations -= 1
return prev_sum + generations * stable_diff
| en | 0.922451 | Day 12 Part 2: Subterranean Sustainability. You realize that 20 generations aren't enough. After all, these plants will need to last another 1500 years to even reach your timeline, not to mention your future. After fifty billion (50000000000) generations, what is the sum of the numbers of all pots which contain a plant? Find the sum of all pots id with plants after 50 billion generations. | 3.708643 | 4 |
formCreation/views.py | dumpalasaiavinash/Yike | 0 | 6612550 | from django.shortcuts import render
from django.http import HttpResponseRedirect
import boto3
from boto3.dynamodb.conditions import Key, Attr
import json
from django import forms
from datetime import datetime
dynamodb = boto3.resource('dynamodb')
#Creating class for passing the problem
class assignEmployee:
def __init__(self,cmp_id,org_id,dep_id=None):
self.cmp_id=cmp_id
self.org_id=org_id
self.dep_id=dep_id
def assign(self):
dynamodb=boto3.resource('dynamodb')
emp_table=dynamodb.Table('employees')
cmp_table=dynamodb.Table('ComplaintS')
dep_table=dynamodb.Table('departments')
hie_table=dynamodb.Table('hierarchy')
if(self.dep_id==None):
dep_id_list=[]
dep_response = dep_table.scan(
ProjectionExpression="department_id,department_name",
FilterExpression=Attr('organization_id').eq(self.org_id)
)
for de in dep_response["Items"]:
dep_id_list.append(str(int(de['department_id']))+','+str(de['department_name']))
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
while(len(hie_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
print(hie_updated_dict)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(sel_dept_temp[1])
)
while(len(emp_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).strip(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
while(len(hie_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(sel_dept_temp[1])
)
for em in emp_response["Items"]:
emp_id_retrieved.append(int(em['emp_id']))
cmp_response=cmp_table.scan()
count={}
for i in emp_id_retrieved:
for cmpl in cmp_response["Items"]:
if( int(cmpl['emp_id'])==int(i) ):
if(j==0):
count[int(cmpl['emp_id'])]=1
else:
count[int(cmpl['emp_id'])]=count[int(cmpl['emp_id'])]+1
if (len(count)==0):
emp_id_selected=random.choice(emp_id_retrieved)
return emp_id_selected,self.cmp_id
else:
emp_id_selected=sorted(count.items(),key=operator.itemgetter(1))
return emp_id_selected[0][0],self.cmp_id
else:
dep_response = dep_table.scan(
ProjectionExpression="department_id,department_name",
FilterExpression=Attr('organization_id').eq(self.org_id) & Attr('department_id').eq(self.dep_id)
)
sel_dept=dep_response['Items'][0]['department_id']
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(dep_response['Items'][0]['department_name'])
)
for em in emp_response["Items"]:
emp_id_retrieved.append(int(em['emp_id']))
cmp_response=cmp_table.scan()
count={}
for i in emp_id_retrieved:
j=0
for cmpl in cmp_response["Items"]:
if( int(cmpl['emp_id'])==int(i) ):
if(j==0):
count[int(cmpl['emp_id'])]=1
else:
count[int(cmpl['emp_id'])]=count[int(cmpl['emp_id'])]+1
if (len(count)==0):
emp_id_selected=random.choice(emp_id_retrieved)
return emp_id_selected,self.cmp_id
else:
emp_id_selected=sorted(count.items(),key=operator.itemgetter(1))
return emp_id_selected[0][0],self.cmp_id
def formCreateMain(req):
if "email" in req.session and "org_id" in req.session :
return render(req, "formCreation/form_main.html",{"org_id":req.session["org_id"]})
return HttpResponseRedirect('/login/')
def testF(req):
return render(req, "formCreation/form2.html")
def complaintIFrame(req):
if 'email' in req.session :
if req.method == "GET" :
formId = req.GET["form_id"]
table = dynamodb.Table('Complaint_forms')
response1 = table.scan(
FilterExpression=Attr('form_id').eq(formId)
)
if response1["Count"] > 0 :
form =response1["Items"][0]
print(form)
form0 = json.dumps(form)
return render(req,'formCreation/iframe0.html',{'form':form0})
if req.method == "POST":
formId = req.POST["form_id"]
foRm=ComplaintForm(req.POST)
table = dynamodb.Table('Complaint_forms')
response1 = table.scan(
FilterExpression=Attr('form_id').eq(formId)
)
if response1["Count"] > 0 :
form =response1["Items"][0]
if foRm.is_valid():
Complaint00 = {}
print(req.POST)
for field in form :
print(field)
if field not in ["form_id" , "org_id" ] :
data00 = json.loads(form[field])
typE = data00["type"]
if typE in ["textfield","datepicker","textarea","radiogroup"] :
#if req.POST[field] == "" :
#return render(req,'formCreation/iframe0.html',{'form':form0,"msg":"error"})
Complaint00[data00["label"]] = req.POST[field]
elif typE == "mobile" :
Complaint00[data00["label"]] = req.POST[field+"_0"]+req.POST[field+"_1"]
elif typE == "image_Upload" or typE == "file_Upload" :
fname = req.session["email"] + datetime.now().strftime("%Y%m%d%H%M")
Complaint00[data00["label"]] = fname
handle_uploaded_file(req.FILES[field],fname)
elif typE == "checkgroup" :
print(data00)
datum00 = {}
i = 0
lim = data00['nR']
while(i<lim):
datum00[data00['checks'][str(i)]['label']] = req.POST[field+"_"+str(i)]
print("0")
i += 1
Complaint00[data00['label']] = json.dumps(datum00)
elif field == "form_id":
Complaint00["form_id"] = req.POST[field]
elif field == "org_id" :
Complaint00["org_id"] = req.POST[field]
orgid=req.POST[field]
Complaint00["user_email"] = req.session["email"]
Complaint00["complaint_status"] = 0
Complaint00["complaint_timestamp"] = datetime.now().strftime("%Y%m%d%H%M%S")
table = dynamodb.Table("ComplaintS")
response1 = table.scan()
u_id = len(response1['Items'])+1
Complaint00["complaint_number"] = "Complaint" + str(u_id)
emp_id,cmp_id=assignEmployee(("Complaint" + str(u_id)),orgid).assign()
Complaint00["emp_id"]=emp_id
table.put_item(
Item= Complaint00
)
return HttpResponseRedirect("/")
form0 = json.dumps(form)
print(req.POST)
return render(req,'formCreation/iframe0.html',{'form':form0})
return HttpResponseRedirect('/login/')
# Create your views here.
def handle_uploaded_file(f , filename):
with open(filename, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
class ComplaintForm(forms.Form):
print("hello")
| from django.shortcuts import render
from django.http import HttpResponseRedirect
import boto3
from boto3.dynamodb.conditions import Key, Attr
import json
from django import forms
from datetime import datetime
dynamodb = boto3.resource('dynamodb')
#Creating class for passing the problem
class assignEmployee:
def __init__(self,cmp_id,org_id,dep_id=None):
self.cmp_id=cmp_id
self.org_id=org_id
self.dep_id=dep_id
def assign(self):
dynamodb=boto3.resource('dynamodb')
emp_table=dynamodb.Table('employees')
cmp_table=dynamodb.Table('ComplaintS')
dep_table=dynamodb.Table('departments')
hie_table=dynamodb.Table('hierarchy')
if(self.dep_id==None):
dep_id_list=[]
dep_response = dep_table.scan(
ProjectionExpression="department_id,department_name",
FilterExpression=Attr('organization_id').eq(self.org_id)
)
for de in dep_response["Items"]:
dep_id_list.append(str(int(de['department_id']))+','+str(de['department_name']))
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
while(len(hie_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
print(hie_updated_dict)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(sel_dept_temp[1])
)
while(len(emp_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).strip(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
while(len(hie_response["Items"])==0):
sel_dept_temp=random.choice(dep_id_list).split(',')
sel_dept=int(sel_dept_temp[0])
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(sel_dept_temp[1])
)
for em in emp_response["Items"]:
emp_id_retrieved.append(int(em['emp_id']))
cmp_response=cmp_table.scan()
count={}
for i in emp_id_retrieved:
for cmpl in cmp_response["Items"]:
if( int(cmpl['emp_id'])==int(i) ):
if(j==0):
count[int(cmpl['emp_id'])]=1
else:
count[int(cmpl['emp_id'])]=count[int(cmpl['emp_id'])]+1
if (len(count)==0):
emp_id_selected=random.choice(emp_id_retrieved)
return emp_id_selected,self.cmp_id
else:
emp_id_selected=sorted(count.items(),key=operator.itemgetter(1))
return emp_id_selected[0][0],self.cmp_id
else:
dep_response = dep_table.scan(
ProjectionExpression="department_id,department_name",
FilterExpression=Attr('organization_id').eq(self.org_id) & Attr('department_id').eq(self.dep_id)
)
sel_dept=dep_response['Items'][0]['department_id']
hie_response = hie_table.scan(
ProjectionExpression="hierarchy",
FilterExpression=Attr('dep_id').eq(sel_dept)
)
hie_dict=hie_response["Items"][0]['hierarchy']
hie_updated=hie_dict[1:(len(hie_dict)-1)]
hie_updated_dict=ast.literal_eval(hie_updated)
ind=[]
j=0
k=0
for i in range(len(hie_updated_dict)-2,0,-1):
if(hie_updated_dict[len(hie_updated_dict)-1]['pid']==hie_updated_dict[i]['pid']):
k=k+1
if(j==0):
ind.append(len(hie_updated_dict)-1)
ind.append(i)
j=j+1
else:
ind.append(i)
else:
if(k==0):
ind.append(len(hie_updated_dict)-1)
break
emp_id_retrieved=[]
for i in ind:
emp_response=emp_table.scan(
ProjectionExpression="emp_id",
FilterExpression=Attr('org_id').eq(self.org_id) & Attr('hierarchy').eq(hie_updated_dict[i]['hierarchy']) and Attr('department').eq(dep_response['Items'][0]['department_name'])
)
for em in emp_response["Items"]:
emp_id_retrieved.append(int(em['emp_id']))
cmp_response=cmp_table.scan()
count={}
for i in emp_id_retrieved:
j=0
for cmpl in cmp_response["Items"]:
if( int(cmpl['emp_id'])==int(i) ):
if(j==0):
count[int(cmpl['emp_id'])]=1
else:
count[int(cmpl['emp_id'])]=count[int(cmpl['emp_id'])]+1
if (len(count)==0):
emp_id_selected=random.choice(emp_id_retrieved)
return emp_id_selected,self.cmp_id
else:
emp_id_selected=sorted(count.items(),key=operator.itemgetter(1))
return emp_id_selected[0][0],self.cmp_id
def formCreateMain(req):
if "email" in req.session and "org_id" in req.session :
return render(req, "formCreation/form_main.html",{"org_id":req.session["org_id"]})
return HttpResponseRedirect('/login/')
def testF(req):
return render(req, "formCreation/form2.html")
def complaintIFrame(req):
if 'email' in req.session :
if req.method == "GET" :
formId = req.GET["form_id"]
table = dynamodb.Table('Complaint_forms')
response1 = table.scan(
FilterExpression=Attr('form_id').eq(formId)
)
if response1["Count"] > 0 :
form =response1["Items"][0]
print(form)
form0 = json.dumps(form)
return render(req,'formCreation/iframe0.html',{'form':form0})
if req.method == "POST":
formId = req.POST["form_id"]
foRm=ComplaintForm(req.POST)
table = dynamodb.Table('Complaint_forms')
response1 = table.scan(
FilterExpression=Attr('form_id').eq(formId)
)
if response1["Count"] > 0 :
form =response1["Items"][0]
if foRm.is_valid():
Complaint00 = {}
print(req.POST)
for field in form :
print(field)
if field not in ["form_id" , "org_id" ] :
data00 = json.loads(form[field])
typE = data00["type"]
if typE in ["textfield","datepicker","textarea","radiogroup"] :
#if req.POST[field] == "" :
#return render(req,'formCreation/iframe0.html',{'form':form0,"msg":"error"})
Complaint00[data00["label"]] = req.POST[field]
elif typE == "mobile" :
Complaint00[data00["label"]] = req.POST[field+"_0"]+req.POST[field+"_1"]
elif typE == "image_Upload" or typE == "file_Upload" :
fname = req.session["email"] + datetime.now().strftime("%Y%m%d%H%M")
Complaint00[data00["label"]] = fname
handle_uploaded_file(req.FILES[field],fname)
elif typE == "checkgroup" :
print(data00)
datum00 = {}
i = 0
lim = data00['nR']
while(i<lim):
datum00[data00['checks'][str(i)]['label']] = req.POST[field+"_"+str(i)]
print("0")
i += 1
Complaint00[data00['label']] = json.dumps(datum00)
elif field == "form_id":
Complaint00["form_id"] = req.POST[field]
elif field == "org_id" :
Complaint00["org_id"] = req.POST[field]
orgid=req.POST[field]
Complaint00["user_email"] = req.session["email"]
Complaint00["complaint_status"] = 0
Complaint00["complaint_timestamp"] = datetime.now().strftime("%Y%m%d%H%M%S")
table = dynamodb.Table("ComplaintS")
response1 = table.scan()
u_id = len(response1['Items'])+1
Complaint00["complaint_number"] = "Complaint" + str(u_id)
emp_id,cmp_id=assignEmployee(("Complaint" + str(u_id)),orgid).assign()
Complaint00["emp_id"]=emp_id
table.put_item(
Item= Complaint00
)
return HttpResponseRedirect("/")
form0 = json.dumps(form)
print(req.POST)
return render(req,'formCreation/iframe0.html',{'form':form0})
return HttpResponseRedirect('/login/')
# Create your views here.
def handle_uploaded_file(f , filename):
with open(filename, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
class ComplaintForm(forms.Form):
print("hello")
| en | 0.74482 | #Creating class for passing the problem #if req.POST[field] == "" : #return render(req,'formCreation/iframe0.html',{'form':form0,"msg":"error"}) # Create your views here. | 1.943196 | 2 |