blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7f77067e14a521cef9f61088204ff8ed9acd968e | Python | KiewanVillatel/codingame | /a_code_of_ice_and_fire/a_code_of_ice_and_fire.py | UTF-8 | 2,574 | 2.890625 | 3 | [] | no_license | from .agents.silver_agent import ArgentAgent
from .model.building import Building, BuildingType
from .model.environment import Environment
from .model.map import Map
from .model.unit import Unit
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
mine_spots = {}
number_mine_spots = int(input())
for i in range(number_mine_spots):
x, y = [int(j) for j in input().split()]
if x not in mine_spots:
mine_spots[x] = {}
mine_spots[x][y] = True
# game loop
while True:
map = Map()
gold = int(input())
income = int(input())
opponent_gold = int(input())
opponent_income = int(input())
for y in range(12):
line = input()
# print(line, file=sys.stderr)
for x, c in enumerate(line):
cell = map.get_cell(x, y)
if x in mine_spots and y in mine_spots[x]:
cell.is_mine_spot = True
if c == "#":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = True, False, False, False
elif c == ".":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = False, False, False, True
elif c == "O":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = False, True, True, False
elif c == "o":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = False, True, False, False
elif c == "X":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = False, False, True, False
elif c == "x":
cell.is_void, cell.is_owned, cell.is_active, cell.is_neutral = False, False, False, False
else:
raise Exception("Unable to parse cell")
building_count = int(input())
for i in range(building_count):
owner, building_type, x, y = [int(j) for j in input().split()]
building_type = BuildingType(building_type)
building = Building(x=x, y=y, is_owned=owner == 0, type=building_type)
map.get_cell(x, y).building = building
unit_count = int(input())
for i in range(unit_count):
owner, unit_id, level, x, y = [int(j) for j in input().split()]
unit = Unit(x=x, y=y, is_owned=owner == 0, unit_id=unit_id, level=level)
map.get_cell(x, y).unit = unit
agent = ArgentAgent()
environment = Environment(grid=map,
gold=gold,
income=income,
opponent_gold=opponent_gold,
opponent_income=opponent_income)
agent.act(environment)
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
| true |
222fbe3bd54b7bda889c2c3652243d380302cd58 | Python | siddharthachepur/Python_excercise | /BinaryTree.py | UTF-8 | 830 | 3.65625 | 4 | [] | no_license | class BinaryTree:
def __init__(self, data, left=None, right=None):
self.left = left
self.right = right
self.data = data
def inorder(self, node):
if node is not None:
self.inorder(node.left)
print node.data,
self.inorder(node.right)
def preorder(self, node):
if node is not None:
print node.data,
self.preorder(node.left)
self.preorder(node.right)
def postorder(self, node):
if node is not None:
self.postorder(node.left)
self.postorder(node.right)
print node.data,
root = BinaryTree(50)
root.left = BinaryTree(25, BinaryTree(10), BinaryTree(40))
root.right = BinaryTree(75)
root.inorder(root)
print
root.preorder(root)
print
root.postorder(root)
print
| true |
d0ca16bb67f48aac441b69e50b22ae30436c5d51 | Python | sam-b0t/boussole | /boussole/watcher.py | UTF-8 | 10,265 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Source watcher
==============
Watcher is almost *isolated* from command line code because it runs in an
infinite loop, so note that handlers directly output some informations on a
``logging.logger``.
"""
import os
import logging
import six
from pathtools.patterns import match_path
from watchdog.events import PatternMatchingEventHandler
from boussole.exceptions import BoussoleBaseException
from boussole.finder import ScssFinder
from boussole.compiler import SassCompileHelper
class SassLibraryEventHandler(object):
"""
Watch mixin handler for library sources
Handler does not compile source which triggered an event,
only its parent dependencies. Because libraries are not intended to be
compiled.
Args:
settings (boussole.conf.model.Settings): Project settings.
inspector (boussole.inspector.ScssInspector): Inspector instance.
Attributes:
settings (boussole.conf.model.Settings): Filled from argument.
logger (logging.Logger): Boussole logger.
inspector (boussole.inspector.ScssInspector): Filled from argument.
finder (boussole.finder.ScssFinder): Finder instance.
compiler (boussole.compiler.SassCompileHelper): Sass compile helper
object.
compilable_files (dict): Pair of (source path, destination path) to
compile. Automatically update from ``index()`` method.
source_files (list): List of source path to compile. Automatically
update from ``index()`` method.
_event_error (bool): Internal flag setted to ``True`` if error has
occured within an event. ``index()`` will reboot it to ``False``
each time a new event occurs.
"""
def __init__(self, settings, inspector, *args, **kwargs):
self.settings = settings
self.inspector = inspector
self.logger = logging.getLogger("boussole")
self.finder = ScssFinder()
self.compiler = SassCompileHelper()
self.compilable_files = {}
self.source_files = []
self._event_error = False
super(SassLibraryEventHandler, self).__init__(*args, **kwargs)
def index(self):
"""
Reset inspector buffers and index project sources dependencies.
This have to be executed each time an event occurs.
Note:
If a Boussole exception occurs during operation, it will be catched
and an error flag will be set to ``True`` so event operation will
be stopped without blocking or breaking watchdog observer.
"""
self._event_error = False
try:
compilable_files = self.finder.mirror_sources(
self.settings.SOURCES_PATH,
targetdir=self.settings.TARGET_PATH,
excludes=self.settings.EXCLUDES
)
self.compilable_files = dict(compilable_files)
self.source_files = self.compilable_files.keys()
# Init inspector and do first inspect
self.inspector.reset()
self.inspector.inspect(
*self.source_files,
library_paths=self.settings.LIBRARY_PATHS
)
except BoussoleBaseException as e:
self._event_error = True
self.logger.error(six.text_type(e))
def compile_source(self, sourcepath):
"""
Compile source to its destination
Check if the source is eligible to compile (not partial and allowed
from exclude patterns)
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Returns:
tuple or None: A pair of (sourcepath, destination), if source has
been compiled (or at least tried). If the source was not
eligible to compile, return will be ``None``.
"""
relpath = os.path.relpath(sourcepath, self.settings.SOURCES_PATH)
conditions = {
'sourcedir': None,
'nopartial': True,
'exclude_patterns': self.settings.EXCLUDES,
'excluded_libdirs': self.settings.LIBRARY_PATHS,
}
if self.finder.match_conditions(sourcepath, **conditions):
destination = self.finder.get_destination(
relpath,
targetdir=self.settings.TARGET_PATH
)
self.logger.debug(u"Compile: {}".format(sourcepath))
success, message = self.compiler.safe_compile(
self.settings,
sourcepath,
destination
)
if success:
self.logger.info(u"Output: {}".format(message))
else:
self.logger.error(message)
return sourcepath, destination
return None
def compile_dependencies(self, sourcepath, include_self=False):
"""
Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled.
"""
items = self.inspector.parents(sourcepath)
# Also add the current event related path
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items])
def on_any_event(self, event):
"""
Catch-all event handler (moved, created, deleted, changed).
Before any event, we index project to have the right and current
dependencies map.
Args:
event: Watchdog event ``watchdog.events.FileSystemEvent``.
"""
self.index()
def on_moved(self, event):
"""
Called when a file or a directory is moved or renamed.
Many editors don't directly change a file, instead they make a
transitional file like ``*.part`` then move it to the final filename.
Args:
event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or
``watchdog.events.FileModifiedEvent``.
"""
if not self._event_error:
# We are only interested for final file, not transitional file
# from editors (like *.part)
pathtools_options = {
'included_patterns': self.patterns,
'excluded_patterns': self.ignore_patterns,
'case_sensitive': self.case_sensitive,
}
# Apply pathtool matching on destination since Watchdog only
# automatically apply it on source
if match_path(event.dest_path, **pathtools_options):
self.logger.info(u"Change detected from a move on: %s",
event.dest_path)
self.compile_dependencies(event.dest_path)
def on_created(self, event):
"""
Called when a new file or directory is created.
Todo:
This should be also used (extended from another class?) to watch
for some special name file (like ".boussole-watcher-stop" create to
raise a KeyboardInterrupt, so we may be able to unittest the
watcher (click.CliRunner is not able to send signal like CTRL+C
that is required to watchdog observer loop)
Args:
event: Watchdog event, either ``watchdog.events.DirCreatedEvent``
or ``watchdog.events.FileCreatedEvent``.
"""
if not self._event_error:
self.logger.info(u"Change detected from a create on: %s",
event.src_path)
self.compile_dependencies(event.src_path)
def on_modified(self, event):
"""
Called when a file or directory is modified.
Args:
event: Watchdog event, ``watchdog.events.DirModifiedEvent`` or
``watchdog.events.FileModifiedEvent``.
"""
if not self._event_error:
self.logger.info(u"Change detected from an edit on: %s",
event.src_path)
self.compile_dependencies(event.src_path)
def on_deleted(self, event):
"""
Called when a file or directory is deleted.
Todo:
May be bugged with inspector and sass compiler since the source
does not exists anymore.
Args:
event: Watchdog event, ``watchdog.events.DirDeletedEvent`` or
``watchdog.events.FileDeletedEvent``.
"""
if not self._event_error:
self.logger.info(u"Change detected from deletion of: %s",
event.src_path)
# Never try to compile the deleted source
self.compile_dependencies(event.src_path, include_self=False)
class SassProjectEventHandler(SassLibraryEventHandler):
"""
Watch mixin handler for project sources.
Warning:
DO NOT use this handler to watch libraries, there is a risk the
compiler will try to compile their sources in a wrong directory.
Source that trigger event is compiled (if eligible) with its dependencies.
"""
def compile_dependencies(self, sourcepath, include_self=True):
"""
Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``.
"""
return super(SassProjectEventHandler, self).compile_dependencies(
sourcepath,
include_self=include_self
)
class WatchdogLibraryEventHandler(SassLibraryEventHandler,
PatternMatchingEventHandler):
"""
Watchdog event handler for library sources
"""
pass
class WatchdogProjectEventHandler(SassProjectEventHandler,
PatternMatchingEventHandler):
"""
Watchdog event handler for project sources.
Warning:
DO NOT use this handler to watch libraries, there is a risk the
compiler will try to compile their sources in a wrong directory.
"""
pass
| true |
1057bba18ec166f8119c988e97da5575189933b9 | Python | njerivera/example-2-python | /student.py | UTF-8 | 458 | 3.609375 | 4 | [] | no_license | class student:
def __init__ (self,first_name,second_name,age):
self.first_name=first_name
self.second_name=second_name
self.age=age
def full_name(self):
name=self.first_name + self.second_name
return name
def year_of_birth(self):
return 2019-self.age
def greeting(self):
return "hello {},you were born in {} ".format(self.first_name,2019-self.age)
def initials(self):
initials=self.first_name + self.second_name
return initials
| true |
ea2a910d3bc857c0d71d53a3784c37deac24094e | Python | mbellitti/wikiart-classifier | /src/misclassified_images.py | UTF-8 | 1,180 | 2.78125 | 3 | [] | no_license | """
Following is the code that can be used to see wrongly-classified images
"""
"""
fnames = test_generator.filenames
ground_truth = test_generator.classes
label2index = test_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
predictions = model.predict_classes(test_features)
prob = model.predict(test_features)
errors = np.where(predictions != ground_truth)[0]
print("No of errors = {}/{}".format(len(errors),nTest))
print(predictions[errors[0]],ground_truth[errors[0]] )
for i in range(10):#len(errors)):
pred_class_index = np.argmax(prob[errors[i]]) #predicted class is the one corresponding to which CNN gives the maximum probability
pred_class_label = idx2label[pred_class_index]
actual_class_index=ground_truth[errors[i]]
acutal_class_label=idx2label[actual_class_index]
print('Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
acutal_class_label,
pred_class_label,
prob[errors[i]][pred_class_index]))
original = load_img('{}/{}'.format("../data/images/",fnames[errors[i]]))
plt.imshow(original)
plt.show()
"""
| true |
9e1a917091d155dd3f91fc2ed1f95f3311cf8e48 | Python | rush2hell/Auto-Wifi-Connect-Bot | /wifibot.py | UTF-8 | 849 | 2.796875 | 3 | [] | no_license | import os
import sys
saved_network = os.popen("netsh wlan show profiles").read()
print(saved_network)
list_network = os.popen("netsh wlan show networks").read()
print(list_network)
take_ssid = input("Enter SSID you want to connect: ")
disconnect = os.popen("netsh wlan disconnect").read()
print(disconnect)
if take_ssid not in saved_network:
print("SSID : "+take_ssid+" is not present in available networks")
print("Unable to establish connection")
sys.exit()
else:
print("Profile for"+take_ssid+" is saved in networks")
while True:
available = os.popen("netsh wlan0 show networks").read()
if take_ssid in available:
print("Found")
break
print("Connecting to "+take_ssid)
connect_cmd = os.popen("netsh wlan connect name='+'"'+preferred_ssid+'"").read()
print(connect_cmd)
| true |
6c5ab4f218c067a5d170d1aa74295f9889e40e19 | Python | hav/redshift-sql-parser | /tests/table_name_test.py | UTF-8 | 4,927 | 2.671875 | 3 | [] | no_license | from sql_parser.reponse import Response
from sql_parser.table_name import TableName
from unittest import TestCase, main
class TestQuery(TestCase):
def test_table_name_no_quotes(self):
actual_input = 'table_name'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.table, 'table_name')
def test_schema_and_table_no_quotes(self):
actual_input = 'schema.table_name'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, 'schema')
self.assertEquals(table_name.table, 'table_name')
def test_schema_and_table_no_quotes_alias(self):
actual_input = 'schema.table_name alias'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, 'schema')
self.assertEquals(table_name.table, 'table_name')
self.assertEquals(table_name.alias, 'alias')
def test_and_table_no_quotes_alias(self):
actual_input = 'table_name alias'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.table, 'table_name')
self.assertEquals(table_name.alias, 'alias')
def test_table_name_quotes(self):
actual_input = '"table_name"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.table, '"table_name"')
def test_schema_and_table_quotes(self):
actual_input = '"schema"."table_name"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, '"schema"')
self.assertEquals(table_name.table, '"table_name"')
def test_schema_and_table_quotes_alias(self):
actual_input = '"schema"."table_name" "alias"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, '"schema"')
self.assertEquals(table_name.table, '"table_name"')
self.assertEquals(table_name.alias, '"alias"')
def test_table_and_alias_quotes_alias(self):
actual_input = '"table_name" "alias"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.table, '"table_name"')
self.assertEquals(table_name.alias, '"alias"')
def test_too_many_periods_without_quotes(self):
actual_input = 'sch.ema.table_name'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response,
Response('Schema Name has period without quotes', 1))
def test_schema_and_table_periods_quotes(self):
actual_input = '"sch.ema"."table_name"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, '"sch.ema"')
self.assertEquals(table_name.table, '"table_name"')
def test_table_alias_spaces_no_quotes(self):
actual_input = 'table name alias'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response,
Response('Table Name has spaces without quotes', 1))
def test_schema_table_spaces_quotes(self):
actual_input = '"schema"."table name"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, '"schema"')
self.assertEquals(table_name.table, '"table name"')
def test_schema_no_quotes_table_quotes(self):
actual_input = 'schema."table_name"'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response.okay())
self.assertEquals(table_name.schema, 'schema')
self.assertEquals(table_name.table, '"table_name"')
def test_space_after_period(self):
actual_input = 'schema. table_name'
table_name = TableName(actual_input)
response = table_name.check_validity()
self.assertEquals(response, Response('Failed to parse Table Name', 1))
if __name__ == '__main__':
main()
| true |
66d180a6bfbc522549ac68db6d3b04d940455159 | Python | Rinatik79/PythonAlgoritms | /Lesson 1/lesson1-7.py | UTF-8 | 609 | 4.28125 | 4 | [] | no_license | sides = input("Enter length of every side of triangle, separated by ';' : ")
sides = sides.split(";")
sides[0] = float(sides[0])
current = sides[0]
sides[1] = float(sides[1])
if sides[1] > sides[0]:
sides[0] = sides[1]
sides[1] = current
current = sides[0]
sides[2] = float(sides[2])
if sides[2] > sides[0]:
sides[0] = sides[2]
sides[2] = current;
if sides[0] > sides[1] + sides[2]:
print("Triangle with given sides doesn't exist.")
elif sides[1] == sides[2]:
print("This is isosceles triangle.")
if sides[0] == sides[1] == sides[2]:
print("This is equilateral triangle.")
| true |
8bfb69e6fa402863d52c4bee868c8c2acdb58ad1 | Python | andrewminer/pycursesui | /src/pycursesui/window_spec.py | UTF-8 | 1,761 | 2.84375 | 3 | [] | no_license | """Unit tests for the Window class."""
import sure
from io import StringIO
from mamba import after, before, description, it
from pycursesui import Logger, Session
from pycursesui.logger import LogLevel
__all__ = []
assert sure # prevent linter errors
########################################################################################################################
with description("Window:", "unit") as self:
with before.each:
self.session = None
try:
self.stdout = StringIO()
self.logger = Logger().add_channel("debug", self.stdout, LogLevel.DEBUG)
self.session = Session(self.logger).start()
if self.session is not None:
self.window = self.session.window
except Exception as e:
print(f"log:\n>>>\n{self.stdout.getvalue()}\n<<<\n")
raise e
with after.each:
if self.session is not None:
self.session.stop()
with description("using the default window from a new session"):
with it("doesn't contain any text in the test region"):
self.window.read(0, 0, 10).should.equal(" ")
with description("after writing a string into the test region"):
with before.each:
self.window.write("alpha", 0, 0)
with it("now contains the written text"):
self.window.read(0, 0, 10).should.equal("alpha ")
with description("after multiple overlapping writes"):
with before.each:
self.window.write("alpha", 0, 0).write("bravo", 3, 0)
with it("should have replaced the last portion of the first word"):
self.window.read(0, 0, 10).should.equal("alpbravo ")
| true |
4a3952eac1497cf0fa79acb2efe6f4353c2d3069 | Python | bs544/Spinregression | /energyregression/energy_calc.py | UTF-8 | 6,822 | 2.78125 | 3 | [] | no_license | import numpy as np
from energyregression.network import Cell_network
import os
import pickle
class energy_calculator():
def __init__(self,net_arg_dict,regression_arg_dict,layers):
self.net_arg_dict = net_arg_dict
self.regression_arg_dict = regression_arg_dict
self.layers = layers
# self.setup_network()
self.network = None
self.use_regression_arg_dict(regression_arg_dict)
def use_regression_arg_dict(self,regression_arg_dict):
"""
Parameters:
regression_arg_dict: (dict) arguments for regression
Actions:
sets arguments to be class properties
"""
self.batch_frac = regression_arg_dict['batch_size']
self.decay_step = regression_arg_dict['decay_step']
self.decay_rate = regression_arg_dict['decay_rate']
self.val_frac = regression_arg_dict['val_frac']
self.check_interval = regression_arg_dict['check_interval']
self.niter = regression_arg_dict['niter']
def setup_network(self,layers=None):
if (layers is None):
layers = self.layers
self.network = Cell_network(self.net_arg_dict,layers,'energyregressor')
def prepare_train_data(self,x,y):
"""
Parameters:
x: (array) shape (Ncells,NatomsperCell,fplength) training input
y: (array) shape (Ncells,1) training output
Actions:
sets up the input mean and standard deviation so data can be centered and standardised before being used
Returns:
train_x: (array) shape (Ncells,NatomsperCell,fplength) centered and standardised NN input
train_y: (array) shape (Ncells,1) centered and standardised target data
"""
#To Do: sort this out for a variable number of atoms in the cell
flat_x = x.reshape(-1,x.shape[-1])
self.input_mean = np.mean(flat_x,axis=0)
self.input_std = np.std(flat_x,axis=0)
self.output_mean = np.mean(y)
self.output_std = np.std(y)
self.Ntrain = x.shape[0]
idx = list(np.random.choice(np.arange(self.Ntrain),size=self.Ntrain,replace=False).astype(int))
val_idx = idx[:int(self.val_frac*self.Ntrain)]
train_idx = idx[int(self.val_frac*self.Ntrain):]
train_y = (y-self.output_mean)/self.output_std
train_x = (x-self.input_mean)/self.input_std
self.val_x = train_x[val_idx,:,:]
self.val_y = train_y[val_idx,:]
train_x = train_x[train_idx,:,:]
train_y = train_y[train_idx,:]
self.Ntrain = train_x.shape[0]
self.batch_size = int(self.Ntrain*self.batch_frac)
return train_x,train_y
def get_batches(self,x,y):
"""
Parameters:
x: (array) shape (Ncells,NatomsperCell,fplen) NN input
y: (array) shape (Ncells,1) NN target data
Returns:
x_batches: (list) set of floor(self.Ntrain/self.batch_size) batches as elements
y_batches: (list) same set, but for the y data
"""
self.n_batches = int(y.shape[0]/self.batch_size)
x_batch = []
y_batch = []
idx = list(np.random.choice(np.arange(self.Ntrain),size=self.Ntrain,replace=False).astype(int))
for i in range(self.n_batches):
x_batch.append(x[idx[i*self.batch_size:(i+1)*self.batch_size],:,:])
y_batch.append(y[idx[i*self.batch_size:(i+1)*self.batch_size],:])
return x_batch,y_batch
def fit(self,x,y):
"""
Parameters:
x: (array) shape (Ncells,NatomsperCell,fplength) training input
y: (array) shape (Ncells,1) training output
Actions:
parameterises a neural network on energy data for cells
"""
self.net_layers = [x.shape[-1]]+ self.layers + [y.shape[-1]]
self.setup_network(layers=self.net_layers)
train_x, train_y = self.prepare_train_data(x,y)
self.n_batches = int(train_y.shape[0]/self.batch_size)
self.loss = []
self.val_loss = []
nepochs = int(self.niter/self.n_batches)
cntr = 0
for i in range(nepochs):
x_batch, y_batch = self.get_batches(train_x,train_y)
for j in range(self.n_batches):
loss_ = self.network.train_step(x_batch[j],y_batch[j])
if (cntr%self.check_interval == 0):
self.loss.append(loss_)
val_pred = self.network.predict(self.val_x)
self.val_loss.append(np.sqrt(np.mean(np.square(val_pred-self.val_y))))
cntr += 1
if (cntr%self.decay_step == 0):
lr = self.net_arg_dict['lr']
lr *= (self.decay_rate)**(cntr/self.decay_step)
self.network.update_lr(lr)
self.loss = np.asarray(self.loss)
self.val_loss = np.asarray(self.val_loss)
return
def predict_energies(self,x):
"""
Parameters:
x: (array) shape (Ncells,NatomsperCell,fplength)
Returns:
Energies: (array) shape (Ncells,1)
"""
input_x = (x-self.input_mean)/self.input_std
net_out = self.network.predict(input_x)
Energies = (net_out*self.output_std)+self.output_mean
return Energies
def save(self,name='model'):
"""
Parameters:
name: (str) name to give to the saved network
Actions:
saves the network in a directory called model so that it can be loaded and used again later
"""
if (not os.path.isdir(name)):
os.mkdir('./{}'.format(name))
self.network.save_network(name)
attributes = {}
for _attr in [_a for _a in self.__dict__ if _a not in ['network']]:
attributes.update({_attr:getattr(self,_attr)})
with open("./{}/calc_{}.pckl".format(name,name),"wb") as f:
pickle.dump(attributes,f)
def load(self,name='model'):
"""
Parameters:
name: (str) name of the saved network
Actions:
loads the saved network and the dictionary of class parameters for the energy calculator and the network class
"""
assert (os.path.isdir(name)), "Cannot find save directory {}".format(name)
with open('{}/calc_{}.pckl'.format(name,name),'rb') as f:
attributes = pickle.load(f)
for _attr in attributes:
# load all non tf attributes
setattr(self,_attr,attributes[_attr])
if (self.network is None):
self.setup_network(layers = self.net_layers)
self.network.load_network(name)
| true |
1365bbf66ff79d72c0a89f5a49893831f40c5d96 | Python | skindstrom/advent | /day-5/jump.py | UTF-8 | 566 | 3.40625 | 3 | [] | no_license | def jump(jump_values, value_updater=lambda x: x + 1):
idx = jump_values[0]
jump_values[0] += 1
jump_count = 1
while idx < len(jump_values):
jump_count += 1
prev_idx = idx
idx += jump_values[idx]
jump_values[prev_idx] = value_updater(jump_values[prev_idx])
return jump_count
def jump_part_two(jump_values):
return jump(jump_values, value_updater=lambda x: x + 1 if x < 3 else x - 1)
def jump_from_file(filename, fun=jump):
with open(filename) as f:
return fun([int(x) for x in f.readlines()]) | true |
5ffb7f0fe303c0bf8d1c6ac1d83342bda8481eb5 | Python | LittleEndu/Codeforces | /Python/Unsorted/50a.py | UTF-8 | 302 | 3.296875 | 3 | [
"MIT"
] | permissive | def get_max_dominoes(n,m):
if n==1 and m==1:
return 0
if m==2:
return n
if n==2:
return m
if m>n:
return n + get_max_dominoes(n,m-2)
return m + get_max_dominoes(n-2,m)
size = [int(i) for i in input().split()]
print(get_max_dominoes(size[0],size[1])) | true |
f7eba22efac39d38448381718de166d2d03f5584 | Python | shivamT95/projecteuler | /Q46/sol.py | UTF-8 | 571 | 3.46875 | 3 | [
"MIT"
] | permissive | import math
#replace with sieve for faster check, but expected small answer
def is_prime(n):
if n == 1:
return False
if n % 2 == 0 and n > 2:
return False
return all(n % i for i in range(3, int(math.sqrt(n))+1,2))
primes = []
for i in range(1,10000):
if is_prime(i):
primes.append(i)
ans = 10000000
sols = set()
for i in range(1,1000):
for p in primes:
oc = 2*i*i+p
if oc & 1 == 1 and is_prime(oc) == False:
sols.add(oc)
for i in range(9,1000000,2):
if(is_prime(i)):
continue
if i not in sols:
ans = i
break
print(ans)
| true |
73b9fe87bafb27b8a1fdb3d045c3ed1b755dfe48 | Python | kaiwolff/Eng88-cybercourse | /Week_2/Python_Exercises_Week2/waiter_helper.py | UTF-8 | 2,360 | 3.640625 | 4 | [] | no_license | class OrderHelper:
def __init__(self):
self.order_active = True
self.order_contents = []
def show_menu(self):
#print out the available items
for item in menu.values():
print(item)
def add_item(self):
#add an item to the order, after showing the menu
print("Here's what's on the menu")
self.show_menu()
item = input("Please type which item you would like to add to your order. Type 'nothing' to add nothing: ")
if item in menu.values():
self.order_contents.append(item)
print(f"{item} added to order.")
else:
return
def take_order(self):
#this function will add or remove items to an order until the customer states they are happy with their order, or abort the order process
while self.order_active:
if self.order_contents == False:
print("You are currently ordering nothing. Type 'add' to add items, or 'exit' to exit: ")
else:
print("Here is your current order:")
for item in self.order_contents:
print(item)
next_step = input("Type 'add' to add an item, or 'remove' to remove an item. To place your order, type 'done', or 'exit' to quit the order process: ")
#Have now taken input. Next is to take appropriate action with list.
#if done, set order_active to False and print out order.
if next_step == "done":
print("\nThank you for your order. Here is a summary:")
for item in self.order_contents:
print(item)
self.order_active = False
#if add, run add_item
elif next_step == "add":
print("add selected")
self.add_item()
#if remove, run remove_item
#if exit, empty list and quit order process
if next_step == "exit":
clear(order)
print("order cancelled")
self.order_active = Fal
#Set up a menu for usage
menu = {
1 : "Burger",
2 : "Milkshake",
3 : "Fries",
4 : "Sweet Potato Fries",
5 : "Beer",
}
my_order = OrderHelper()
my_order.take_order() | true |
e8a93634d26e2e916f0fe099a2b67dbba88bfb80 | Python | mcpiatkowski/future_collars_accountant_decorators | /lib/accountant.py | UTF-8 | 1,157 | 2.9375 | 3 | [] | no_license | from sys import argv
from lib.manager import Manager, FileManager
manager = Manager()
file_manager = FileManager()
@manager.assign("saldo", 2)
def balance_update(manager, amount, comment):
if manager.balance + int(amount) >= 0:
manager.balance += int(amount)
@manager.assign("zakup", 3)
def buy(manager, product_name, price, amount):
if manager.balance - int(price)*int(amount) >= 0:
manager.balance -= int(price)*int(amount)
if product_name not in manager.stock:
manager.stock[product_name] = int(amount)
else:
manager.stock[product_name] += int(amount)
else:
print("Brak wystarczającej ilość gotówki.")
manager.input_error = True
@manager.assign("sprzedaz", 3)
def sell(manager, product_name, price, amount):
if product_name not in manager.stock:
print("Brak produktu w magazynie!")
elif manager.stock[product_name] >= int(amount):
manager.stock[product_name] -= int(amount)
manager.balance += int(price)*int(amount)
else:
print("Brak wystrczającej ilości produktu w magazynie.")
manager.input_error = True | true |
1d7d02e158a90b84f948dee0d7f95d010114f1c9 | Python | gofr1/python-learning | /essential_training/Chap05/precidence.py | UTF-8 | 759 | 3.15625 | 3 | [] | no_license | # Operator Description
#
# () Parentheses (grouping)
# f(args...) Function call
# x[index:index] Slicing
# x[index] Subs cription
# x.attribute Attribute reference
# ** Exponentiation
# ~x Bitwise not
# +x, -x Positive, negative
# *, /, % Multiplication, division, remainder
# +, - Addition, subtraction
# <<, >> Bitwise shifts
# & Bitwise AND
# ^ Bitwise XOR
# | Bitwise OR
# in, not in, is, is not, <, <=, >, >=,
# <>, !=, == Comparisons, membership, identity
# not x Boolean NOT
# and Boolean AND
# or Boolean OR
# lambda Lambda expression | true |
4ccbfc8b645551162266490f403d2e15365a0c51 | Python | HuYuzhang/kuaishou | /adapt_format.py | UTF-8 | 1,999 | 2.984375 | 3 | [] | no_license | import os
# import cv2
import json
from subprocess import *
def comp(a, b):
return int(a[:-4]) < int(b[:-4])
def n2i(a):
return int(a[:-4])
train_path = "origin"
videos = os.scandir(train_path)
videos = [video for video in videos if not video.name.startswith('.') and video.is_dir()]
for video in videos:
files = os.scandir(video.path)
files = [f for f in files if not f.name.startswith(',') and f.is_file()]
# I'm sad to say that we have to add this 'mid_path' to the path
# Because the Dataset class force me to do like that :(
# Then in this mid_path, I will make 9 folders, which name from 01 to 09 to cater the Dataset class
# Because we need 9 * 15 * 3 images, which equals to 405.
# However, in some videos, the frame's number is less than it.
# In this situation, I...emmm decide to copy some image to make sure that the total number is 405
# That might sounds absurd, but what can I do 555? For I have no access to Youtube
mid_path = video.path + '/a'
if not os.path.exists(mid_path):
c = Popen(['mkdir', mid_path])
c.wait()
file_names = [f.name for f in files]
file_names.sort(key=n2i)
file_ids = [int(name[:-4]) for name in file_names]
file_ids.sort()
# %%% STEP 1: Copy frame to 405 frames %%%
# Now we have less than 405 images
# (In fact, I see all the videos and sadly, no video has more than 405 frames)
length = len(file_ids)
if (length < 405):
for _id in range(length, 405):
# print(video.path + '/' + file_names[_id - length], mid_path + '/' + str(id) + '.png')
c = Popen(['cp', video.path + '/' + file_names[(_id - length) % length], mid_path + '/' + str(_id + 1) + '.png'])
c.wait()
# %%% STEP 2: Move original frame to a folder and rename %%%
for i in file_ids:
c = Popen(['mv', video.path + '/' + str(i) + '.png', mid_path + '/' + '%03d'%(i + 1) + '.png'])
c.wait()
print(mid_path + "OK") | true |
eb7348441afefd9fcde5770c1a673771ed7973a6 | Python | vijaysundar2701/python | /pro27.py | UTF-8 | 365 | 2.546875 | 3 | [] | no_license | vj,vk=map(int,input().split())
sv=list(map(int,input().split()))
vs=list(map(int,input().split()))
tt=[]
cin=0
for i in range(vj):
y=vs[i]/sv[i]
tt.append(y)
while vk>=0 and len(tt)>0:
mindex=tt.index(max(tt))
if vk>=sv[mindex]:
cin=cin+sv[mindex]
vk=vk-sv[mindex]
sv.pop(mindex)
vs.pop(mindex)
tt.pop(mindex)
print(cin)
| true |
6c06e92cbeea44d6be1a2d57a48381ee8e88d6d7 | Python | parul71625/Naive_Bayes_Classifier | /nblearn3.py | UTF-8 | 16,833 | 3.03125 | 3 | [] | no_license | import sys
import re
import math
class NBLearn:
dicOfLabels = {}
dicOfInstancesWithLabels = {}
countOfPos = 0
countOfNeg = 0
countOfTruth = 0
countOfDecep = 0
posPrior = 0
negPrior = 0
truthPrior = 0
decepPrior = 0
totalPosWordsAfterSmoothing = 0
totalNegWordsAfterSmoothing = 0
totalTruthWordsAfterSmoothing = 0
totalDecepWordsAfterSmoothing = 0
def readTrainText(self, trainingFile):
stopwords = ["a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are",
"aren't", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both",
"but", "by", "can't", "cannot", "could", "couldn't", "did", "didn't", "do", "does", "doesn't",
"doing", "don't", "down", "during", "each", "few", "for", "from", "further", "had", "hadn't",
"has", "hasn't", "have", "haven't", "having", "he", "he'd", "he'll", "he's", "her", "here",
"here's", "hers", "herself", "him", "himself", "his", "how", "how's", "i", "i'd", "i'll", "i'm",
"i've", "if", "in", "into", "is", "isn't", "it", "it's", "its", "itself", "let's", "me", "more"
, "mustn't", "my", "myself", "no", "nor", "not", "of", "off", "on", "once", "only", "or",
"ought", "our", "ours ourselves", "out", "over", "own", "same", "shan't", "she",
"she'd", "she'll", "she's", "should", "shouldn't", "so", "some", "such", "than", "that", "that's",
"the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they",
"they'd", "they'll", "they're", "they've", "this", "those", "through", "to", "too", "under",
"until", "up", "very", "was", "wasn't", "we", "we'd", "we'll", "we're", "we've", "were", "weren't",
"what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "who's", "whom",
"why", "why's", "with", "won't", "would", "wouldn't", "you", "you'd", "you'll", "you're", "you've",
"your", "yours", "yourself", "yourselves", 'a', 'about', 'above', 'across', 'after', 'again',
'against', 'all', 'almost', 'alone', 'btw', 'north', 'south', 'east', 'west', 'sarita', 'woke', 'wake',
'suv', 'omg', 'asap', 'contain', 'au', 'demi', 'mam', 'sir', "ma'am", "i'm'", 'ohh', 'oh', 'duh',
'go', 'goes', 'went', 'gone', 'dollar', 'dollars', 'cents', 'cent', 'usa', 'dont', 'aaa',
'along', 'already', 'also', 'although', 'always', 'among', 'an', 'and', 'another', 'any',
'anybody', 'anyone', 'anything', 'anywhere', 'are', 'area', 'areas', 'around', 'as', 'ask',
'asked', 'asking', 'asks', 'at', 'away', 'b', 'back', 'backed', 'backing', 'backs', 'be',
'became', 'because', 'become', 'becomes', 'been', 'before', 'began', 'behind', 'being',
'beings', 'between', 'both', 'but', 'by', 'c', 'came', 'can', 'cannot', 'couldnt',
'case', 'cases', 'certain', 'certainly', 'clear', 'clearly', 'come', 'could', 'coz', 'd', 'did',
'differ', 'different', 'differently', 'do', 'does', 'done', 'down', 'downed', 'downing', 'downs',
'during', 'e', 'each', 'early', 'either', 'end', 'ended', 'ending', 'ends', 'enough', 'even',
'evenly', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'f',
'faces', 'fact', 'facts', 'far', 'felt', 'few', 'find', 'first', 'for', 'four', 'from',
'full', 'fully', 'further', 'furthered', 'furthering', 'furthers', 'g', 'gave', 'general',
'generally', 'get', 'gets', 'give', 'given', 'gives', 'go', 'going', 'goods', 'got',
'group', 'grouped', 'grouping', 'groups', 'h', 'had', 'has',
'have', 'having', 'he', 'her', 'here', 'herself',
'him', 'himself', 'his', 'how', 'however', 'i', 'if', 'important', 'in', 'into', 'is', 'it', 'its',
'itself', 'j', 'just', 'k', 'keep', 'keeps',
'knew', 'know', 'known', 'knows', 'l', 'largely', 'later', 'latest',
'least', 'let', 'lets', 'likely', 'm', 'made', 'make',
'making', 'man', 'many', 'may', 'me', 'member', 'members', 'men', 'might', 'more', 'most',
'mostly', 'mr', 'mrs', 'much', 'must', 'my', 'myself', 'n', 'necessary', 'need', 'needed',
'needing', 'needs', 'new', 'next',
'noone', 'nothing', 'now', 'nowhere', 'number', 'numbers', 'o', 'of', 'off', 'often',
'old', 'older', 'oldest', 'on', 'once', 'one', 'only', 'open', 'opened', 'opening', 'opens', 'or',
'order', 'ordered', 'ordering', 'orders', 'other', 'others', 'our', 'out', 'over', 'p', 'part',
'parted', 'parting', 'parts', 'per', 'perhaps', 'place', 'places', 'point', 'pointed', 'pointing',
'points', 'possible', 'present', 'presented', 'presenting', 'presents',
'put', 'puts', 'q', 'quite', 'r', 'rather', 'really', 'right', 'room', 'rooms', 's',
'said', 'same', 'saw', 'say', 'says', 'second', 'seconds', 'see', 'seem', 'seemed', 'seeming',
'seems', 'sees', 'several', 'shall', 'she', 'should', 'show', 'showed', 'showing', 'shows', 'side',
'sides', 'since', 'so', 'some', 'somebody', 'someone', 'something',
'somewhere', 'state', 'states', 'still', 'such', 'sure', 't', 'take', 'taken', 'than',
'that', 'the', 'their', 'them', 'then', 'there', 'therefore', 'these', 'they', 'thing', 'things',
'think', 'thinks', 'this', 'those', 'though', 'thought', 'thoughts', 'three', 'through', 'thus',
'to', 'today', 'together', 'too', 'took', 'toward', 'turn', 'turned', 'turning', 'turns', 'two',
'u', 'under', 'until', 'up', 'upon', 'us', 'use', 'used', 'uses', 'v', 'very', 'w', 'want',
'wanted', 'wanting', 'wants', 'was', 'way', 'ways', 'we', 'well', 'wells', 'went', 'were', 'what',
'when', 'where', 'whether', 'which', 'while', 'who', 'whole', 'whose', 'why', 'will', 'with',
'within', 'without', 'work', 'worked', 'working', 'works', 'would', 'x', 'y', 'year', 'years',
'yet', 'you', 'young', 'younger', 'youngest', 'your', 'yours', 'z']
#with open("test.txt", "r") as file:
#with open("train-text.txt", "r") as file:
#print(trainingFile)
with open(trainingFile, "r") as file:
for line in file:
line = line.replace("-", " ")
splitID = line.split(' ',1)
reviewID = splitID[0]
reviewStr = splitID[1]
reviewStr = reviewStr.lower()
reviewStr = re.sub("[^'A-z ]", ' ', reviewStr)
reviewWordsList = reviewStr.split(' ')
for word in reviewWordsList:
word = word.strip(" ")
if len(word) <= 2:
reviewWordsList.remove(word)
setOfStopWords = set(reviewWordsList) & set(stopwords)
for word in setOfStopWords:
reviewWordsList = [x for x in reviewWordsList if x != word]
for word in reviewWordsList:
if "'" in word:
reviewWordsList.remove(word)
aposIndex = word.index("'")
word = word[0:aposIndex]
reviewWordsList.append(word)
setOfStopWords = set(reviewWordsList) & set(stopwords)
for word in setOfStopWords:
reviewWordsList = [x for x in reviewWordsList if x != word]
if '' in reviewWordsList:
reviewWordsList = [x for x in reviewWordsList if x != '']
#print(reviewID)
#print(reviewWordsList)
self.createDictOfInstances(reviewID, reviewWordsList)
#print(self.dicOfInstancesWithLabels)
def readTrainLabels(self, labelFile):
#with open("train-labels.txt", "r") as file:
#print(labelFile)
with open(labelFile, "r") as labelfile:
for line in labelfile:
#print(line)
line = line.strip(' ')
line = line.strip('\n')
splitLabels = line.split(' ')
reviewID = splitLabels[0]
truthDecepLabel = splitLabels[1]
posNegLabel = splitLabels[2]
self.dicOfLabels[reviewID] = [posNegLabel, truthDecepLabel]
#print(self.dicOfLabels)
def createDictOfInstances(self, reviewID, reviewWordsList):
labelsForID = self.dicOfLabels[reviewID]
#print(labelsForID)
posNegLabel = labelsForID[0]
truthDecepLabel = labelsForID[1]
if posNegLabel == "positive":
self.countOfPos += 1
appendStr = "positive"
appendInvStr = "negative"
for word in reviewWordsList:
if word not in self.dicOfInstancesWithLabels:
self.dicOfInstancesWithLabels[word] = {"positive": 0, "negative": 0, "truthful": 0, "deceptive": 0}
self.assignLabelAndCreateDictionary(word, appendStr, appendInvStr)
if posNegLabel == "negative":
self.countOfNeg += 1
appendStr = "negative"
appendInvStr = "positive"
for word in reviewWordsList:
if word not in self.dicOfInstancesWithLabels:
self.dicOfInstancesWithLabels[word] = {"positive": 0, "negative": 0, "truthful": 0, "deceptive": 0}
self.assignLabelAndCreateDictionary(word, appendStr, appendInvStr)
if truthDecepLabel == "truthful":
self.countOfTruth += 1
appendStr = "truthful"
appendInvStr = "deceptive"
for word in reviewWordsList:
if word not in self.dicOfInstancesWithLabels:
self.dicOfInstancesWithLabels[word] = {"positive": 0, "negative": 0, "truthful": 0, "deceptive": 0}
self.assignLabelAndCreateDictionary(word, appendStr, appendInvStr)
if truthDecepLabel == "deceptive":
self.countOfDecep += 1
appendStr = "deceptive"
appendInvStr = "truthful"
for word in reviewWordsList:
if word not in self.dicOfInstancesWithLabels:
self.dicOfInstancesWithLabels[word] = {"positive": 0, "negative": 0, "truthful": 0, "deceptive": 0}
self.assignLabelAndCreateDictionary(word, appendStr, appendInvStr)
def assignLabelAndCreateDictionary(self, word, label, invLabel):
self.dicOfInstancesWithLabels[word][label] += 1
def printDictionary(self):
for key in self.dicOfInstancesWithLabels:
print(key + " : ")
print(self.dicOfInstancesWithLabels[key])
#print("\n")
print(len(self.dicOfInstancesWithLabels))
def calculatePriorProb(self):
self.posPrior = math.log(self.countOfPos / (self.countOfPos+self.countOfNeg))
self.negPrior = math.log(self.countOfNeg / (self.countOfPos + self.countOfNeg))
self.truthPrior = math.log(self.countOfTruth / (self.countOfTruth + self.countOfDecep))
self.decepPrior = math.log(self.countOfDecep / (self.countOfTruth + self.countOfDecep))
#print(posPrior, negPrior, truthPrior, decepPrior)
def addOneSmoothing(self):
numOfPosWords = 0
numOfNegWords = 0
numOfTruthWords = 0
numOfDecepWords = 0
for instance in self.dicOfInstancesWithLabels:
numBeforeSmoothing = self.dicOfInstancesWithLabels[instance]["positive"]
numOfPosWords += numBeforeSmoothing
numAfterSmoothing = numBeforeSmoothing + 1
self.totalPosWordsAfterSmoothing += numAfterSmoothing
self.dicOfInstancesWithLabels[instance]["positive"] = numAfterSmoothing
numBeforeSmoothing = self.dicOfInstancesWithLabels[instance]["negative"]
numOfNegWords += numBeforeSmoothing
numAfterSmoothing = numBeforeSmoothing + 1
self.totalNegWordsAfterSmoothing += numAfterSmoothing
self.dicOfInstancesWithLabels[instance]["negative"] = numAfterSmoothing
numBeforeSmoothing = self.dicOfInstancesWithLabels[instance]["truthful"]
numOfTruthWords += numBeforeSmoothing
numAfterSmoothing = numBeforeSmoothing + 1
self.totalTruthWordsAfterSmoothing += numAfterSmoothing
self.dicOfInstancesWithLabels[instance]["truthful"] = numAfterSmoothing
numBeforeSmoothing = self.dicOfInstancesWithLabels[instance]["deceptive"]
numOfDecepWords += numBeforeSmoothing
numAfterSmoothing = numBeforeSmoothing + 1
self.totalDecepWordsAfterSmoothing += numAfterSmoothing
self.dicOfInstancesWithLabels[instance]["deceptive"] = numAfterSmoothing
#print(numOfPosWords, self.totalPosWordsAfterSmoothing, self.totalPosWordsAfterSmoothing - numOfPosWords)
#print(numOfNegWords, self.totalNegWordsAfterSmoothing, self.totalNegWordsAfterSmoothing - numOfNegWords)
#print(numOfTruthWords, self.totalTruthWordsAfterSmoothing, self.totalTruthWordsAfterSmoothing - numOfTruthWords)
#print(numOfDecepWords, self.totalDecepWordsAfterSmoothing, self.totalDecepWordsAfterSmoothing - numOfDecepWords)
def printNBModel(self):
base = 2
nbModelFile = open("nbmodel.txt", 'w')
nbModelFile.write("Prior Positive Probability : " + str(self.posPrior))
nbModelFile.write("\n")
nbModelFile.write("Prior Negative Probability : " + str(self.negPrior))
nbModelFile.write("\n")
nbModelFile.write("Prior Truthful Probability : " + str(self.truthPrior))
nbModelFile.write("\n")
nbModelFile.write("Prior Deceptive Probability : " + str(self.decepPrior))
nbModelFile.write("\n")
nbModelFile.write("\n")
for instance in self.dicOfInstancesWithLabels:
nbModelFile.write(instance)
nbModelFile.write("\n")
nbModelFile.write("No of Instances and Probabilities:")
nbModelFile.write("\n")
probOfPositive = math.log(
self.dicOfInstancesWithLabels[instance]["positive"]/self.totalPosWordsAfterSmoothing)
nbModelFile.write(
"positive : " + str(self.dicOfInstancesWithLabels[instance]["positive"]) + " " + str(probOfPositive))
nbModelFile.write("\n")
probOfNegative = math.log(
self.dicOfInstancesWithLabels[instance]["negative"] / self.totalNegWordsAfterSmoothing)
nbModelFile.write(
"negative : " + str(self.dicOfInstancesWithLabels[instance]["negative"]) + " " + str(probOfNegative))
nbModelFile.write("\n")
probOfTruthful = math.log(
self.dicOfInstancesWithLabels[instance]["truthful"] / self.totalTruthWordsAfterSmoothing)
nbModelFile.write(
"truthful : " + str(self.dicOfInstancesWithLabels[instance]["truthful"]) + " " + str(probOfTruthful))
nbModelFile.write("\n")
probOfDeceptive = math.log(
self.dicOfInstancesWithLabels[instance]["deceptive"] / self.totalDecepWordsAfterSmoothing)
nbModelFile.write(
"deceptive : " + str(self.dicOfInstancesWithLabels[instance]["deceptive"]) + " " + str(probOfDeceptive))
nbModelFile.write("\n")
nbModelFile.write("\n")
nbModelFile.close()
learnObject = NBLearn()
learnObject.readTrainLabels(sys.argv[2])
learnObject.readTrainText(sys.argv[1])
# learnObject.readTrainLabels("train-labels.txt")
# learnObject.readTrainText("train-text.txt")
learnObject.calculatePriorProb()
learnObject.addOneSmoothing()
#learnObject.printDictionary()
learnObject.printNBModel()
| true |
a97c91a6dd3022817d248430d5285ad0e0890a22 | Python | khryss/PLP | /exercises/e5_card_dealer.py | UTF-8 | 4,977 | 3.5625 | 4 | [] | no_license | from random import shuffle
class Card(object):
def __init__(self, tuple_number_color):
self._id = tuple_number_color
def __str__(self):
return "|" + str(self._id[0]) + " " + str(self._id[1]) + "|"
class Deck(object):
def __init__(self):
self._cards = []
self.numbers = ['2 ', '3 ', '4 ', '5 ', '6 ', '7 ', '8 ', '9 ', '10', 'J ', 'Q ', 'K ', 'A ']
self.colors = ['c', 'd', 'h', 's']
for number in self.numbers:
for color in self.colors:
self._cards.append(Card((number, color)))
def insert(self, card):
if card:
if not card in self._cards:
self._cards.insert(0, card)
def pop(self):
return self._cards.pop()
def shuffle(self):
shuffle(self._cards)
def sort(self):
self._cards = sorted(self._cards)
class Player(object):
def __init__(self, name):
self.name = name
self._first_card = None
self._second_card = None
def receive_cards(self, first_card=None, second_card=None):
if first_card:
self._first_card = first_card
if second_card:
self._second_card = second_card
def return_cards(self):
temp_list = [self._first_card, self._second_card]
self._first_card = None
self._second_card = None
return temp_list
def show(self):
print self.name + " ",
print str(self._first_card) + " " + str(self._second_card)
class Table(object):
def __init__(self):
# _card_slots = [flop1, flop2, flop3, turn, river]
self._card_slots = [None, None, None, None, None]
def receive_cards(self, flop1=None, flop2=None, flop3=None, turn=None, river=None):
if flop1: self._card_slots[0] = flop1
if flop2: self._card_slots[1] = flop2
if flop3: self._card_slots[2] = flop3
if turn: self._card_slots[3] = turn
if river: self._card_slots[4] = river
def return_cards(self):
temp_cards = self._card_slots
self._card_slots = [None, None, None, None, None]
return temp_cards
def show(self):
for slot in self._card_slots:
if slot:
print slot,
else:
print "| |",
print
class Dealer(object):
def __init__(self, table, players):
self._table = table
self._players = players
self._deck = Deck()
def shuffle_cards(self):
print "shuffling..."
self._deck.shuffle()
def deal_players(self):
print "dealing players..."
for player in self._players:
self._burn_card()
player.receive_cards(self._deck.pop(), self._deck.pop())
def deal_flop(self):
print "dealing flop..."
self._burn_card()
self._table.receive_cards(flop1 = self._deck.pop(),
flop2 = self._deck.pop(),
flop3 = self._deck.pop())
def deal_turn(self):
print "dealing turn..."
self._burn_card()
self._table.receive_cards(turn = self._deck.pop())
def deal_river(self):
print "dealing river..."
self._burn_card()
self._table.receive_cards(river = self._deck.pop())
def collect_cards(self):
print "collecting cards..."
for player in self._players:
cards = player.return_cards()
for card in cards:
self._deck.insert(card)
table_cards = self._table.return_cards()
for card in table_cards:
self._deck.insert(card)
def _burn_card(self):
self._deck.insert(self._deck.pop())
def show_deck(self):
print "Showing deck..."
print "count: " + str(len(self._deck._cards))
for card in self._deck._cards:
print card
class Game(object):
def __init__(self, players):
self._players = players
self._table = Table()
self._dealer = Dealer(self._table, self._players)
def show_table(self):
self._table.show()
def show_players(self):
for player in self._players:
player.show()
print
def play(self):
self._dealer.show_deck()
raw_input()
self._dealer.shuffle_cards()
self._dealer.show_deck()
raw_input()
self._dealer.deal_players()
self.show_players()
self._dealer.deal_flop()
self.show_table()
raw_input()
self._dealer.deal_turn()
self.show_table()
raw_input()
self._dealer.deal_river()
self.show_table()
raw_input()
self._dealer.show_deck()
raw_input()
self._dealer.collect_cards()
self._dealer.show_deck()
def play_texas_holdem():
texas = Game([Player('Gimi'), Player('Paul'), Player('Luci')])
texas.play()
if __name__ == "__main__":
play_texas_holdem()
| true |
7603f05d005945ae9e7ef2cea7f3b6cd8f7196aa | Python | WinonaLyw/MultilayerNetwork | /03_user_loc_cate_mapping.py | UTF-8 | 2,759 | 2.859375 | 3 | [] | no_license | '''
1. create multi-index column for user location dataframe
level 0: parent category ['Shop & Services', 'Food'...]
level 1: venueCategory
2. create grouped venueCategory
'''
# %%
import pandas as pd
# %%
venueCategory = pd.read_csv('data/category.csv',index_col=0)
user_loc = pd.read_csv('data/user_loc_count.csv',index_col=0)
# %%
interested_entertainment = venueCategory[['venueCategory','Entertainment Type']].dropna(axis=0)
entertainment_cols = [col for col in user_loc.columns.values if col in list(interested_entertainment.venueCategory)]
food_type = venueCategory[['venueCategory','Food Type']].dropna(axis=0)
food_cols = [col for col in user_loc.columns.values if col in list(food_type.venueCategory)]
# %%
interested_user_loc = user_loc[entertainment_cols + food_cols]
# %% retrieve map category and map to user_loc table
interested_venueCategory = venueCategory[[w in (entertainment_cols + food_cols) for w in venueCategory['venueCategory']]]
multi_ind_cols = interested_venueCategory[['parentCategory','venueCategory']].values.T
interested_user_loc.columns = pd.MultiIndex.from_arrays(multi_ind_cols)
# %% sort columns on parent catefory
parentCategory = list(set(interested_venueCategory.parentCategory))
interested_user_loc = interested_user_loc.reindex(parentCategory, axis=1, level=0)
# # %% drop user with no interested location check-ins
# # Food visits
# food = interested_user_loc['Food'].sum(axis=1)
# food.describe()
# no_food_ind = list(food[food==0].index)
# # Shop & Service, Arts & Entertainment, Outdoors & Recreation, Nightlife Spot
# leisure = interested_user_loc[['Shop & Service', 'Arts & Entertainment', 'Outdoors & Recreation','Nightlife Spot']].sum(axis=1)
# leisure.describe()
# no_leisure_ind = list(leisure[leisure==0].index)
# interested_user_loc = interested_user_loc.drop(no_food_ind+no_leisure_ind,axis=0)
# %% save interested_user_loc
interested_user_loc.to_csv('data/user_loc_count.parentCategory.csv')
# %% group locations
interested_user_loc.columns = interested_user_loc.columns.droplevel()
original_cols = interested_user_loc.columns
for e in list(set(interested_entertainment['Entertainment Type'])):
v_cate = list(interested_entertainment[interested_entertainment['Entertainment Type'] == e]['venueCategory'])
interested_user_loc[e] = [row[1].sum() for row in interested_user_loc[v_cate].iterrows()]
for e in list(set(food_type['Food Type'])):
v_cate = list(food_type[food_type['Food Type'] == e]['venueCategory'])
interested_user_loc[e] = [row[1].sum() for row in interested_user_loc[v_cate].iterrows()]
interested_user_loc = interested_user_loc.drop(original_cols, axis=1)
# %%
interested_user_loc.to_csv('data/grouped_user_loc_count.parentCategory.csv')
# %%
| true |
cb7dbae98d2bafbe5554456c4636715f8bc287ef | Python | iccowan/CSC_117 | /Lab15 (BigLab04)/drug_dose_sim.py | UTF-8 | 15,260 | 3.265625 | 3 | [] | no_license | # Ian Cowan
# Lab Partner: Jake Pfaller
# CSC 117b BigLab04 Drug Dose Simulator
# 8 Mar 2019
# Drug Dose Simulator
# Import Packages
from graphics import *
import time
# Define validateDoseInputs()
# Inputs:
# float: lethaldose
# float: overdose
# float: effectivedose
# Outputs:
# float: lethaldose
# float: overdose
# float: effectivedose
def validateDoseInputs(lethaldose, overdose, effectivedose):
# Make sure the effective dose is less than the overdose
while not(effectivedose <= overdose):
print('The effective dose must be less than the overdose')
overdose = float(input('How many mg of the drug is considered an overdose? '))
effectivedose = float(input('How many mg of the drug is considered effective? '))
# Make sure the overdose is less than the lethaldose
while not(overdose <= lethaldose):
print('The overdose must be less than the lethal dose')
lethaldose = float(input('How many mg of the drug is lethal? '))
overdose = float(input('How many mg of the drug is considered an overdose? '))
# Make sure the values are all 0-100mg
while not(effectivedose >= 0 and effectivedose <= 100):
print('The doses must be positive but less than 100mg')
effectivedose = float(input('How many mg of the drug is considered effective? '))
while not(overdose >= 0 and overdose <= 100):
print('The doses must be positive but less than 100mg')
overdose = float(input('How many mg of the drug is considered an overdose? '))
while not(lethaldose >= 0 and lethaldose <= 100):
print('The doses must be positive but less than 100mg')
lethaldose = float(input('How many mg of the drug is lethal? '))
return lethaldose, overdose, effectivedose
# Define validateDecayRate()
# Inputs:
# float: decayrate
# Outputs:
# float: decayrate
def validateDecayRate(decayrate):
while not(decayrate >= 0):
print('The decay rate should not be less than zero percent')
decayrate = float(input('What percentage of the drug should decay every eight hours? '))
# Return decayrate
return decayrate
# Define runSimulation()
# Inputs:
# float: lethaldose
# float: overdose
# float: effectivedose
# float: decayrate
# Outputs:
# Window: win
def runSimulation(lethaldose, overdose, effectivedose, decayrate):
# Call initDisplay()
win, btn, origin = initDisplay(lethaldose, overdose, effectivedose)
# Wait for the button to be clicked to begin
# Call waitForClick()
waitForClick(win, btn)
# Update the button
btn = createButton(win, Point(150, 10), Point(170, 15), 'Try Next Dosage', 'gray', 'black')
# Run the simulation 10 times
for dose_mg in range(1, 11):
# Call dosageSimulation()
final_dose, effective_hr, overdose_hr, lethal_hr = dosageSimulation(win, dose_mg, decayrate, effectivedose, overdose, lethaldose, origin)
# Call displaySimulationInfo()
displaySimulationInfo(win, dose_mg, final_dose, effective_hr, overdose_hr, lethal_hr)
# Only wait and clear if the simulation is not over
if dose_mg < 10:
# PAUSE...WAIT FOR CLICK
# Call waitForClick()
waitForClick(win, btn)
# Call clearGraph()
clearGraph(win, Point(150, 50), effectivedose, overdose, lethaldose, 100)
return win
# Define initDisplay()
# Inputs:
# float: lethaldose
# float: overdose
# float: effectivedose
# Outputs:
# Window: win
# Rectangle: btn
# Point: origin
def initDisplay(lethaldose, overdose, effectivedose):
# Create the window
win = GraphWin('Drug Dosage Simulation', 1300, 650)
# Set the coordinates in the window
win.setCoords(0, 0, 200, 100)
# Set the background color of the window
win.setBackground('lightgray')
# Create text boxes and draw them
createText(Point(30, 75), 'Dosage (mg every 8 hours):', 'black').draw(win)
# Call createGraph()
origin, effective_y, overdose_y, lethal_y = createGraph(win, Point(150, 50), 0, 168, 8, 0, 100, 10, effectivedose, overdose, lethaldose)
# Create the line to separate top and bottom portions
line_rect = Rectangle(Point(0, 24.8), Point(200, 25.2))
line_rect.setFill('black')
line_rect.draw(win)
# Create button for bottom portion
btn = createButton(win, Point(150, 10), Point(170, 15), 'Begin Simulation', 'gray', 'black')
return win, btn, origin
# Define createText()
# Inputs:
# Point: pt
# str: text
# str: color
# Outputs:
# Text: txt
def createText(pt, text, color):
# Creates txt
txt = Text(pt, text)
txt.setFill(color)
txt.setSize(10)
# Returns the txt object
return txt
# Define createGraph()
# Inputs:
# Window: win
# Point: pt
# int: x_min
# int: x_max
# int: x_int
# int: y_min
# int: y_max
# int: y_int
# float: effective
# float: over
# float: lethal
# Outputs:
# Point: origin
# float: effective_center
# float: over_center
# float: lethal_center
def createGraph(win, pt, x_min, x_max, x_int, y_min, y_max, y_int, effective, over, lethal):
# Call clearGraph()
effective_center, over_center, lethal_center = clearGraph(win, pt, effective, over, lethal, y_max)
# Label the axes
createText(Point(100, 60), 'Drug in Body (mg)', 'black').draw(win)
createText(Point(150, 30), 'Hour', 'black').draw(win)
# Create the numbers at the desired interval on the x-axis
x_point = pt.getX() - 34
num_x = x_max / x_int + 1
x_space = ((pt.getX() + 45) - (pt.getX() - 35)) / num_x
# Create the numbers at the desired interval on the y-axis
y_point = pt.getY() - 14
num_y = y_max / y_int
y_space = ((pt.getY() + 40) - (pt.getY() - 15)) / num_y
# Create origin for future use
origin = Point(x_point, y_point)
# Loops through each number in the x interval
for i in range(x_min, x_max + 1, x_int):
txt_pt = Point(x_point, pt.getY() - 17)
createText(txt_pt, str(i), 'black').draw(win)
x_point += x_space
# Loops through each number in the y interval
for i in range(y_min, y_max + 1, y_int):
txt_pt = Point(pt.getX() - 37, y_point)
createText(txt_pt, str(i), 'black').draw(win)
y_point += y_space
# Label the lines
createText(Point(pt.getX() + 47, effective_center), 'E', 'green').draw(win)
createText(Point(pt.getX() + 47, over_center), 'O', 'yellow').draw(win)
createText(Point(pt.getX() + 47, lethal_center), 'L', 'red').draw(win)
# Returns the y-values for effective, overdose, and lethal
return origin, effective_center, over_center, lethal_center
# Define clearGraph()
# Inputs:
# Window: win
# Point: pt
# float: effective
# float: over
# float: lethal
# int: y_max
# Outputs:
# float: effective_center
# float: over_center
# float: lethal_center
def clearGraph(win, pt, effective, over, lethal, y_max):
# Create a rectangular box at the reference point
p1 = Point(pt.getX() - 35, pt.getY() - 15)
p2 = Point(pt.getX() + 45, pt.getY() + 40)
rect = Rectangle(p1, p2)
rect.setFill('white')
rect.draw(win)
# Draws the lines on the graph for effective dose, overdose, and lethal dose using drawLine()
# Effective dose
effective_center = drawLine(win, y_max, pt, effective, 'green')
over_center = drawLine(win, y_max, pt, over, 'yellow')
lethal_center = drawLine(win, y_max, pt, lethal, 'red')
return effective_center, over_center, lethal_center
# Define drawLine()
# Inputs:
# Window: win
# float: y_max
# Point: pt
# int: y_coord
# str: color
# Outputs:
# float: center_y
def drawLine(win, y_max, pt, y_coord, color):
# Draws a line on the graph (written for effective and then transferred...working variables left alone)
y_per_one = ((pt.getY() + 40) - (pt.getY() - 15)) / y_max
effective_y = (pt.getY() - 15) + (y_per_one * y_coord)
effective_ref = Point(pt.getX() - 35, effective_y + 1)
effective_p1 = Point(effective_ref.getX(), effective_ref.getY() - 0.2)
effective_p2 = Point(effective_ref.getX() + 80, effective_ref.getY() + 0.2)
effective_rect = Rectangle(effective_p1, effective_p2)
effective_rect.setFill(color)
effective_rect.draw(win)
# Finds the center y-value to return
center_y = effective_ref.getY()
# Returns the center y-value
return center_y
# Define createButton()
# Inputs:
# Window: win
# Point: pt1
# Point: pt2
# str: text
# str: back_clr
# str: txt_clr
# Outputs:
# Rectangle: button
def createButton(win, pt1, pt2, text, back_clr, txt_clr):
# Create the rectangle
button = Rectangle(pt1, pt2)
button.setFill(back_clr)
button.setOutline('black')
# Get the reference coordinates for the text
txt_x = (pt1.getX() + pt2.getX()) / 2
txt_y = (pt1.getY() + pt2.getY()) / 2
txt_ctr = Point(txt_x, txt_y)
# Create the text
txt = createText(txt_ctr, text, txt_clr)
# Draw the button
button.draw(win)
txt.draw(win)
return button
# Define waitForClick()
# Inputs:
# Window: win
# Rectangle: btn
# Outputs:
# NONE
def waitForClick(win, btn):
# Loops until clicked
btn_clicked = checkForButtonClick(win, btn)
while not (btn_clicked):
btn_clicked = checkForButtonClick(win, btn)
# Define dosageSimulation()
# Inputs:
# Window: win
# float: dose_mg
# float: decay
# float: effective
# float: overdose
# float: lethal
# Point: origin
# Outputs:
# float: total_current_dose
# int: effective_hr
# int: overdose_hr
# int: lethal_hr
def dosageSimulation(win, dose_mg, decay, effective, overdose, lethal, origin):
# Initialize variables
total_current_dose = 0
effective_hr = None
overdose_hr = None
lethal_hr = None
i = 0
# Run the simulation 21 times for every 8 hours in the order:
# -> Decays the appropiate amount
# -> Adds the new dosage
# -> Checks for effective/overdose/death
while i <= 21 and lethal_hr == None:
# Decays the appropiate amount
total_current_dose -= total_current_dose * (decay / 100)
# Adds the new dosage
total_current_dose += dose_mg
# Checks for effective/overdose/death
if (total_current_dose >= effective) and (effective_hr == None):
effective_hr = i
if (total_current_dose >= overdose) and (overdose_hr == None):
overdose_hr = i
if (total_current_dose >= lethal) and (lethal_hr == None):
lethal_hr = i
# Call displayHourlyInfo()
displayHourlyInfo(win, total_current_dose, origin, i)
i += 1
# Slow down the simulation so it does not seem instantaneous
time.sleep(0.5)
# Returns all of the applicable information
return total_current_dose, effective_hr, overdose_hr, lethal_hr
# Define displayHourlyInfo()
# Inputs:
# Window: win
# float: total_current_dose
# Point: origin
# int: hour
# Outputs:
# NONE
def displayHourlyInfo(win, total_current_dose, origin, hour):
# Create points for the rectangle
pt1_x = origin.getX() + hour * 3.63636362 - 0.5
pt1_y = origin.getY() - 1
pt2_x = pt1_x + 1
pt2_y = origin.getY() + total_current_dose * 0.55
pt1 = Point(pt1_x, pt1_y)
pt2 = Point(pt2_x, pt2_y)
# Create the rectangle bar
rect = Rectangle(pt1, pt2)
rect.setFill('black')
# Draw the rectangle
rect.draw(win)
# Define displaySimulationInfo()
# Inputs:
# Window: win
# int: dose_mg
# float: final_dose
# int: effective_hr
# int: overdose_hr
# int: lethal_hr
# Outputs:
# NONE
def displaySimulationInfo(win, dose_mg, final_dose, effective_hr, overdose_hr, lethal_hr):
# Creates the string for the information box
if effective_hr == None:
info = str(dose_mg) + ': Ineffective (never enough drug in the body)'
elif overdose_hr == None:
info = str(dose_mg) + ': Effective beginning at hour ' + str(effective_hr * 8)
elif lethal_hr == None:
info = str(dose_mg) + ': Effective but discomfort beginning at hour ' + str(overdose_hr * 8)
else:
info = str(dose_mg) + ': LETHAL'
# Creates the information box
txt_x = 30
txt_y = 75 - (dose_mg * 3) - 4
txt_pt = Point(txt_x, txt_y)
if effective_hr == None:
createText(txt_pt, info, 'black').draw(win)
elif overdose_hr == None:
createText(txt_pt, info, 'green').draw(win)
elif lethal_hr == None:
createText(txt_pt, info, 'blue').draw(win)
else:
createText(txt_pt, info, 'red').draw(win)
# Creates a rectangle to hide the previous results
rect = Rectangle(Point(0, 0), Point(130, 24))
rect.setFill('lightgray')
rect.setOutline('lightgray')
rect.draw(win)
# Determines the string for the results text box
if effective_hr == None:
results = 'With a dose of ' + str(dose_mg) + 'mg every 8 hours, the drug was ineffective.'
elif overdose_hr == None:
results = 'With a dose of ' + str(dose_mg) + 'mg every 8 hours, the drug became effective at hour ' + str(effective_hr * 8) + '.'
elif lethal_hr == None:
results = 'With a dose of ' + str(dose_mg) + 'mg every 8 hours, discomfort began at hour ' + str(overdose_hr * 8) + '.'
else:
results = 'With a dose of ' + str(dose_mg) + 'mg every 8 hours, the drug was LETHAL at hour ' + str(lethal_hr * 8) + '.'
# Creates another text box for the results
createText(Point(65, 12), results, 'black').draw(win)
# Define checkForButtonClick()
# Inputs:
# Window: win
# Rectangle: btn
# Outputs:
# bool: clicked
def checkForButtonClick(win, btn):
# Waits for a click from the user
click = win.getMouse()
# Gets the points cooresponding to the click
click_x = click.getX()
click_y = click.getY()
# Gets the points cooresponding to the rectangle
p1 = btn.getP1()
p2 = btn.getP2()
p1_x = p1.getX()
p1_y = p1.getY()
p2_x = p2.getX()
p2_y = p2.getY()
# Checks for a click within the rectangle
if (click_x >= p1_x and click_x <= p2_x) and \
(click_y >= p1_y and click_y <= p2_y):
clicked = True
else:
clicked = False
return clicked
# Define main()
def main():
# Prompt user for applicable information
lethaldose = float(input('How many mg of the drug is lethal? '))
overdose = float(input('How many mg of the drug is considered an overdose? '))
effectivedose = float(input('How many mg of the drug is considered effective? '))
decayrate = float(input('What percentage of the drug should decay every eight hours? '))
# Validations
# Call validateDoseInputs()
lethaldose, overdose, effectivedose = validateDoseInputs(lethaldose, overdose, effectivedose)
decayrate = validateDecayRate(decayrate)
# Call simulateDosage()
win = runSimulation(lethaldose, overdose, effectivedose, decayrate)
# Gracefully close the simulation with a button click
# Update the button
btn = createButton(win, Point(150, 10), Point(170, 15), 'Close the Simulation', 'gray', 'black')
# Call waitForClick()
waitForClick(win, btn)
# Call main()
main()
| true |
ebcc1665d4838b0f574ee483b3002a9e2e105080 | Python | Tarrasch/ravens-test | /src/directions.py | UTF-8 | 278 | 3.09375 | 3 | [] | no_license | from itertools import combinations
def std_directions():
return [(1, 0), (0, 1), (1, 1)]
def lid_directionss():
return combinations(std_directions(), 2)
def direction_mnemonic(dir):
return {
(1, 0): "down",
(0, 1): "right",
(1, 1): "diag",
}[dir]
| true |
25889fda138ed4a71211a555c0d55255cae88bc4 | Python | shreyanshk/sel | /track/view.py | UTF-8 | 1,181 | 2.6875 | 3 | [] | no_license | from flask import Blueprint, render_template, request, redirect
import sqlite3
view = Blueprint('track', __name__, template_folder='templates', static_folder='static')
@view.route('/track', methods = ['GET', 'POST'])
def track_home():
if request.method == 'GET':
return render_template('track.html')
elif request.method == 'POST':
return redirect('/track/' + request.form['cid'])
@view.route('/track/<cid>', methods = ['GET'])
def tracking_page(cid):
def find_courier(courierid):
conn = sqlite3.connect('couriers.sqlite')
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("select * from couriers where cid = ?", [courierid])
res = cursor.fetchall()
conn.close()
return res
details = find_courier(cid)
result = ""
for row in details:
for detail in row.keys():
result += (detail + ": " + str(row[detail]))
result += "<br>"
if (len(result) == 0):
return "No such courier in the system"
else:
return result
"""for row in res:
for detail in row.keys():
print(detail + ": " + str(row[detail]))
print(" ")
"""
| true |
5c945de30ea2f55bf6a835e1fc97fc5a5007c6c0 | Python | qmnguyenw/python_py4e | /geeksforgeeks/python/easy/32_17.py | UTF-8 | 3,851 | 4.5625 | 5 | [] | no_license | Using Iterations in Python Effectively
Prerequisite : Iterators in Python
Following are different ways to use iterators.
**C-style approach:** This approach requires prior knowledge of total number
of iterations.
__
__
__
__
__
__
__
# A C-style way of accessing list elements
cars = ["Aston", "Audi", "McLaren"]
i = 0
while (i < len(cars)):
print cars[i]
i += 1
---
__
__
**Output:**
Aston
Audi
McLaren
**Important Points:**
* This style of looping is rarely used by python programmers.
* This 4-step approach creates no compactness with single-view looping construct.
* This is also prone to errors in large-scale programs or designs.
* There is no C-Style for loop in Python, i.e., a loop like for (int i=0; i<n; i++)
**Use of for-in (orfor each) style:**
This style is used in python containing iterator of lists, dictonary, n
dimensional-arrays etc. The iterator fetches each component and prints data
while looping. The iterator is automatically incremented/decremented in this
construct.
__
__
__
__
__
__
__
# Accessing items using for-in loop
cars = ["Aston", "Audi", "McLaren"]
for x in cars:
print x
---
__
__
**Output:**
Aston
Audi
McLaren
See this for more examples of different data types.
**Indexing using Range function:** We can also use indexing using range() in
Python.
__
__
__
__
__
__
__
# Accessing items using indexes and for-in
cars = ["Aston", "Audi", "McLaren"]
for i in range(len(cars)):
print cars[i]
---
__
__
**Output:**
Aston
Audi
McLaren
**Enumerate:**
Enumerate is built-in python function that takes input as iterator, list etc
and returns a tuple containing index and data at that index in the iterator
sequence. For example, enumerate(cars), returns a iterator that will return
(0, cars[0]), (1, cars[1]), (2, cars[2]), and so on.
__
__
__
__
__
__
__
# Accessing items using enumerate()
cars = ["Aston" , "Audi", "McLaren "]
for i, x in enumerate(cars):
print (x)
---
__
__
**Output :**
Aston
Audi
McLaren
Below solution also works.
__
__
__
__
__
__
__
# Accessing items and indexes enumerate()
cars = ["Aston" , "Audi", "McLaren "]
for x in enumerate(cars):
print (x[0], x[1])
---
__
__
**Output :**
(0, 'Aston')
(1, 'Audi')
(2, 'McLaren ')
We can also directly print returned value of enumerate() to see what it
returns.
__
__
__
__
__
__
__
# Printing return value of enumerate()
cars = ["Aston" , "Audi", "McLaren "]
print enumerate(cars)
---
__
__
**Output :**
[(0, 'Aston'), (1, 'Audi'), (2, 'McLaren ')]
Enumerate takes parameter start which is default set to zero. We can change
this parameter to any value we like. In the below code we have used start as
1.
__
__
__
__
__
__
__
# demonstrating the use of start in enumerate
cars = ["Aston" , "Audi", "McLaren "]
for x in enumerate(cars, start=1):
print (x[0], x[1])
---
__
__
**Output :**
(1, 'Aston')
(2, 'Audi')
(3, 'McLaren ')
enumerate() helps to embed solution for accessing each data item in the
iterator and fetching index of each data item.
**Looping extensions:**
**i)** Two iterators for a single looping construct:
| true |
c2290a3e7209c478d1fff5aa6c32c90eb265fbb5 | Python | martin-andor/models_internship | /utils.py | UTF-8 | 15,292 | 2.671875 | 3 | [] | no_license | import tensorflow as tf
import pandas as pd
import numpy as np
import os
from tensorflow.python.ops import summary_ops_v2
import matplotlib.pyplot as plt
def load_merged_ds(path_micr, path_paws_train, path_paws_val, batch_size, buffer_size=50000):
print(f'Processing {path_micr}...')
with open(path_micr) as f:
lines = f.readlines()
c = 0
labels_ms = []
sents1_ms = []
sents2_ms = []
for line in lines:
if c > 0:
entries = line.split('\t')
labels_ms.append(int(entries[0]))
sents1_ms.append(entries[3])
sents2_ms.append(entries[4])
c+=1
print(f'Total lines processed: {c}.')
print(f'Length sents1 : {len(sents1_ms)}')
print(f'Length sents2 : {len(sents2_ms)}')
print(f'Length labels : {len(labels_ms)}')
print(f'Processing {path_paws_train}...')
with open(path_paws_train) as f:
lines = f.readlines()
c = 0
labels_pt = []
sents1_pt = []
sents2_pt = []
for line in lines:
if c > 0:
entries = line.split('\t')
labels_pt.append(int(entries[3]))
sents1_pt.append(entries[1])
sents2_pt.append(entries[2])
c+=1
print(f'Total lines processed: {c}.')
print(f'Length sents1 : {len(sents1_pt)}')
print(f'Length sents2 : {len(sents2_pt)}')
print(f'Length labels : {len(labels_pt)}')
print(f'Processing {path_paws_val}...')
with open(path_paws_val) as f:
lines = f.readlines()
c = 0
labels_pv = []
sents1_pv = []
sents2_pv = []
for line in lines:
if c > 0:
entries = line.split('\t')
labels_pv.append(int(entries[3]))
sents1_pv.append(entries[1])
sents2_pv.append(entries[2])
c+=1
print(f'Total lines processed: {c}.')
print(f'Length sents1 : {len(sents1_pv)}')
print(f'Length sents2 : {len(sents2_pv)}')
print(f'Length labels : {len(labels_pv)}')
print('Merging datasets...')
labels = np.array(labels_ms + labels_pt + labels_pv)
labels = tf.expand_dims(labels,-1)
print(f'Shape of labels: {labels.shape}')
sents1 = np.array(sents1_ms + sents1_pt + sents1_pv)
sents1 = tf.expand_dims(sents1, -1)
print(f'Shape of sents1: {sents1.shape}')
sents2 = np.array(sents2_ms + sents2_pt + sents2_pv)
sents2 = tf.expand_dims(sents2,-1)
print(f'Shape of sents2: {sents1.shape}')
ds = tf.data.Dataset.from_tensor_slices(
({'sentence1': sents1,
'sentence2': sents2},
labels))
ds = ds.shuffle(50000).batch(batch_size).prefetch(tf.data.AUTOTUNE)
return ds
def lower_keep_punctuation(input_data):
'''For TextVectorization: I want lowercase,
but do not want to strip punctuation, which may be relevant
for long-distance dependencies.'''
return tf.strings.lower(input_data)
def load_microsoft_ds(path, buffer_size=50000,):
'''Loading function for the microsoft paraphrase database.
Also returns a raw dataset with both sentences for textvectorization purposes.'''
print(f'Processing {path}...')
with open(path) as f:
lines = f.readlines()
c = 0
pos = 0
labels = []
sents1 = []
sents2 = []
sents = []
for line in lines:
if c > 0:
entries = line.split('\t')
label = int(entries[0])
if label == 1:
pos += 1
labels.append(label)
sents1.append(entries[3])
sents2.append(entries[4])
sents.append(entries[3] + ' ' + entries[4])
c+=1
print(f'Total lines processed: {c}.')
print(f'Total positive labels: {pos}. Ratio = {pos/c}')
print(f'Length sents1 : {len(sents1)}')
print(f'Length sents2 : {len(sents2)}')
print(f'Length labels : {len(labels)}')
#You have to expand the -1 dimension, so that each element comes onto a separate line. Don't forget the double brackets.
ds = tf.data.Dataset.from_tensor_slices(
({'sentence1': tf.expand_dims(sents1,-1),
'sentence2': tf.expand_dims(sents2,-1)},
tf.expand_dims(labels,-1)))
ds_raw = tf.data.Dataset.from_tensor_slices(
(tf.expand_dims(sents,-1)))
return ds, ds_raw
def load_paws_ds(path):
'''Loading function for the PAWS dataset.'''
df = pd.read_csv(path, sep='\t')
labels = df.pop('label')
df.drop('id', inplace=True, axis=1)
sents1 = df.values[:,0]
sents2 = df.values[:,1]
sents = sents1 + ' ' + sents2
print(sents[:10])
sents1 = tf.expand_dims(sents1,-1)
sents2 = tf.expand_dims(sents2,-1)
sents = tf.expand_dims(sents,-1)
tar = tf.expand_dims(labels.values,-1)
ds = tf.data.Dataset.from_tensor_slices((
{'sentence1': sents1,
'sentence2': sents2},
tar))
raw_ds = tf.data.Dataset.from_tensor_slices((sents))
return ds, raw_ds
def plot_graphs(history, metric):
'''Plots graphs of training NNs'''
plt.plot(history.history[metric])
plt.plot(history.history['val_'+metric], '')
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend([metric, 'val_'+metric])
class TBCallback(tf.keras.callbacks.TensorBoard):
'''This is necessary for the Tensorboard callback to work with the
experimental preprocessing layer.'''
def _log_weights(self, epoch):
with self._train_writer.as_default():
with summary_ops_v2.always_record_summaries():
for layer in self.model.layers:
for weight in layer.weights:
if hasattr(weight, "name"):
weight_name = weight.name.replace(':', '_')
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
self._train_writer.flush()
def get_run_logdir(root_logdir):
'''This is to create the name for the tensorboard log.'''
import time
run_id = time.strftime('run_%Y_%m_%d-%H_%M_%S')
return os.path.join(root_logdir, run_id)
def load_embeddings(path_words, path_embeddings):
dfw = pd.read_csv(path_words,sep='\t', header=None)
dfe = pd.read_csv(path_embeddings,sep='\t', header=None)
emb_dic = {}
emb_size = 0
for i in range(len(dfw)):
word = dfw.iat[i,0]
emb = np.asarray(dfe.iloc[i], dtype='float32')
new_emb_size = len(emb)
if new_emb_size != emb_size:
emb_size = new_emb_size
emb_dic[word] = emb
return emb_dic, emb_size
def get_embedding_matrix(embedding_size, vocab_size, word_index, embedding_dict):
embedding_matrix = np.zeros((vocab_size, embedding_size))
for word, i in word_index.items():
if i < vocab_size:
embedding_vector = embedding_dict.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def get_word_index(text_vec_layer):
vocabulary = text_vec_layer.get_vocabulary()
index = dict(zip(vocabulary, range(len(vocabulary))))
return index
def load_glove_embeddings(path):
'''Loads the embeddings.
This functions changes based on the format of the data.'''
emb_index = {}
with open(path) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:],dtype='float32')
emb_index[word] = coefs
return emb_index
map_name_to_handle = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_base/2',
'electra_small':
'https://tfhub.dev/google/electra_small/2',
'electra_base':
'https://tfhub.dev/google/electra_base/2',
'experts_pubmed':
'https://tfhub.dev/google/experts/bert/pubmed/2',
'experts_wiki_books':
'https://tfhub.dev/google/experts/bert/wiki_books/2',
'talking-heads_base':
'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1',
}
map_model_to_preprocess = {
'bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_en_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-2_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-4_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-6_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-8_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-10_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-128_A-2':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-256_A-4':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-512_A-8':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'small_bert/bert_en_uncased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'bert_multi_cased_L-12_H-768_A-12':
'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3',
'albert_en_base':
'https://tfhub.dev/tensorflow/albert_en_preprocess/3',
'electra_small':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'electra_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_pubmed':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'experts_wiki_books':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
'talking-heads_base':
'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3',
}
| true |
153e9d61c6a55a6be8533dd72a8239adc0f18e60 | Python | m-bansal/build_up | /Piling Up!.py | UTF-8 | 382 | 2.90625 | 3 | [] | no_license | from collections import deque
t=int(input())
working_cube = -1
for i in range(t):
int(input())
l=deque(map(int, input().split()))
while l:
working_cube = l.popleft() if l[0]>l[-1] else l.pop()
if not l:
print("Yes")
break
if l[-1]>working_cube or l[0]>working_cube:
print("No")
break
| true |
a91e54fa90607299bd63d225fca9058d51ec418d | Python | prrn-pg/Shojin | /Practice/atcoder/ABC/134/src/c.py | UTF-8 | 376 | 3.5 | 4 | [] | no_license | # 最大値はひとつだけ、自分自身が最大値の場合を除く
n = int(input())
max_a, max_ai = 0, 0
arr = []
for i in range(n):
a = int(input())
arr.append(a)
if a > max_a:
max_a = a
max_ai = i + 1
arr = sorted(arr)
for i in range(n):
if max_ai == i + 1:
print(arr[-2])
else:
print(max_a)
| true |
2cb6fcd05ab03139ccab51ba6817e62c752f8681 | Python | etpistezmots/TreeOfText | /WriteScriptJs.py | UTF-8 | 2,005 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
def JsWrite(result,choix):
textresultchoix1=""
textresultchoix2=""
if choix == "phylogram":
textresultchoix1 = ''' d3.phylogram.build('#phylogram', newick, {
width: 800,
height: 600
}); '''
textresultchoix2 = ''' <h1>Phylogram</h1>
<div id='phylogram'></div> '''
if choix == "radial":
textresultchoix1 = ''' d3.phylogram.buildRadial('#radialtree', newick, {
width: 800,
height: 600
}); '''
textresultchoix2 = ''' <h1>Circular Dendrogram</h1>
<h2>Test</h2>
<div id='radialtree'></div> '''
txt = '''<!DOCTYPE html>
<html lang='en' xml:lang='en' xmlns='http://www.w3.org/1999/xhtml'>
<head>
<meta content='text/html;charset=UTF-8' http-equiv='content-type'>
<title>Right-angle phylograms and dendrograms with d3</title>
<script src="http://d3js.org/d3.v3.min.js" type="text/javascript"></script>
<script src="newick.js" type="text/javascript"></script>
<script src="d3.phylogram.js" type="text/javascript"></script>
<script>
function load() {
var newick = Newick.parse(" ''' + result + '''")
var newickNodes = []
function buildNewickNodes(node, callback) {
newickNodes.push(node)
if (node.branchset) {
for (var i=0; i < node.branchset.length; i++) {
buildNewickNodes(node.branchset[i])
}
}
}
buildNewickNodes(newick)
''' + textresultchoix1 + '''}
</script>
<style type="text/css" media="screen">
body { font-family: "Helvetica Neue", Helvetica, sans-serif; }
</style>
</head>
<body onload="load()">'''\
+ textresultchoix2 + '''
</body>
</html>
'''
AdresseJsFile = os.getcwd() + os.sep + 'Js' + os.sep + 'index.html'
Html_file= open(AdresseJsFile,"w")
Html_file.write(txt)
Html_file.close()
os.system("firefox " + AdresseJsFile)
| true |
bd4fef9e0db552ed09c9a45af0babd9d35ed51d8 | Python | Aasthaengg/IBMdataset | /Python_codes/p02408/s767220150.py | UTF-8 | 273 | 3.484375 | 3 | [] | no_license | n = int(input())
deck = {}
suits = ['S', 'H', 'C', 'D']
for suit in suits:
deck[suit] = list(range(1, 14))
for _ in range(n):
suit, rank = input().split()
deck[suit].remove(int(rank))
for suit in suits:
for rank in deck[suit]:
print(suit, rank)
| true |
bf961f11b2207f015c1b4f6a8a7b42d76d5661eb | Python | joshuagreenss/Smoothstack-Cloud-Cohort-Assignments | /2_b.py | UTF-8 | 1,040 | 3.75 | 4 | [] | no_license | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
print([1,"word",2.00])
# %%
l = [1,1,[1,2]]
print(l[2][1])
# %%
lst = ['a','b','c']
print(lst[1:])
# %%
wkdy = {}
wkdy["Sunday"] = 0
wkdy["Monday"] = 1
wkdy["Tuesday"] = 2
wkdy["Wednesday"] = 3
wkdy["Thursday"] = 4
wkdy["Friday"] = 5
wkdy["Saturday"] = 6
print(wkdy)
# %%
d = {'k1':[1,2,3]}
print(d['k1'][1])
# %%
t = tuple([1,[2,3]])
print(t)
# %%
m = set('Mississippi')
print("".join(m))
# %%
m.add('x')
print(''.join(m))
# %%
print(set([1,1,2,3]))
# %%
def findFactors(s,e):
vals = ""
for i in range(s,e+1):
if i%7 == 0 and i%5 != 5:
vals += str(i) + ','
vals = vals[:-1]
print(vals)
# %%
findFactors(2000,3200)
# %%
def fact_recur(x):
if(x == 0):
return 1
return x * fact_recur(x-1)
# %%
def fact(x):
ans = 1
for i in range(2,x+1):
ans *= i
return ans
# %%
fact(9)
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
# %%
| true |
34f70aed28e6e18c60a299ab924da774d2a04ca9 | Python | OxfordHackspace/delivery | /drone.py | UTF-8 | 1,992 | 3.09375 | 3 | [] | no_license | from math import ceil, sqrt
class Drone:
def __init__(self, id, capacity, location):
self.id = id
self.location = location
self.turnsToDestination = 0
self.totalTime = 0
self.order = None
self.item = None
self.commands = []
self.commandText = []
def step(self):
if self.turnsToDestination > 0 :
self.turnsToDestination -= 1
else:
self.runNextCommand()
def setTurnsLeft(self, destination):
if(self.location['row'] != destination['row']) or (self.location['col'] != destination['col']):
rowDiffSq = (self.location['row'] - destination['row']) * (self.location['row'] - destination['row'])
colDiffSq = (self.location['col'] - destination['col']) * (self.location['col'] - destination['col'])
self.turnsToDestination = ceil(sqrt(rowDiffSq + colDiffSq))
self.totalTime += self.turnsToDestination
else:
self.turnsToDestination = 0
self.location = destination
def load(self, item, warehouse, order):
self.item = item
self.order = order
self.warehouse = warehouse
self.setTurnsLeft(warehouse['location'])
self.commandText.append('%d L %d %d 1'%(self.id, warehouse['id'], item['type']))
def deliver(self):
if self.order is not None:
self.commandText.append('%d D %d %d 1'%(self.id, self.order['id'], self.item['type']))
self.setTurnsLeft(self.order['location'])
self.order = None
self.item = None
def addCommand(self, command):
self.commands.insert(0,command)
def runNextCommand(self):
if len(self.commands) > 0:
command = self.commands.pop()
if len(command) > 1:
command[0](*command[1:])
else:
command[0]()
def isBusy(self):
return self.turnsToDestination != 0
| true |
d0e0d24ce94ef545d3987ee8120a192ad65aa81e | Python | wladbelsky/phonebook | /main_window.py | UTF-8 | 4,745 | 2.6875 | 3 | [] | no_license | from PyQt4 import QtGui, uic
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QAbstractTableModel
from PyQt4.QtGui import QTableView
from db_connect import db_connect
from add_contact import add_contact
import datetime
class main_window(QtGui.QMainWindow):
def __init__(self, user_email, user_password):
super(main_window, self).__init__()
uic.loadUi('ui/main.ui', self)
#self.__db = db_connect()
self.__user_email = user_email
self.__user_password = user_password
self.update_phonebook_table()
self.refresh_alphabet_director()
self.__data_model = TableModel(self.contact_list)
self.phone_book_table.setModel(self.__data_model)
self.actionAdd.triggered.connect(self.add_contact)
self.actionEdit_selected.triggered.connect(self.edit_contact)
self.actionDelete.triggered.connect(self.remove_contact)
self.actionRefresh.triggered.connect(self.refresh_list)
self.alphabet_director.itemClicked.connect(self.on_alphabet_director_item_selected)
self.birthday_notify()
def on_alphabet_director_item_selected(self, item):
for index, contact in enumerate(self.contact_list):
if contact[1][0].upper() == item.text():
self.phone_book_table.selectRow(index)
break
def birthday_notify(self):
self.alert = QtGui.QMessageBox()
self.alert.setWindowTitle("Birthdays notification")
notification_text = "Here is list of upcoming birthdays for 7 days:\n"
db = db_connect()
birthday_list = db.get_birthdays_for_week(self.__user_email, self.__user_password)
for contact in birthday_list:
contact_text = "{fio},\t{phone},\t{dob}\n".format(fio=contact[1],phone=contact[2],dob=contact[3].strftime("%d-%m-%Y"))
notification_text = notification_text + contact_text
self.alert.setText(notification_text)
self.alert.show()
def show_nothing_selected_message_box(self):
self.alert = QtGui.QMessageBox()
self.alert.setWindowTitle("Nothing selected")
self.alert.setText("Please select item to operate with.")
self.alert.show()
def add_contact(self):
self.add_dialog = add_contact(self.__user_email, self.__user_password)
self.add_dialog.show()
self.add_dialog.closeEvent = self.refresh_list
def edit_contact(self):
indexes = self.phone_book_table.selectionModel().selectedRows()
if len(indexes):
self.edit_dialog = add_contact(self.__user_email, self.__user_password, self.contact_list[indexes[0].row()])
self.edit_dialog.show()
self.edit_dialog.closeEvent = self.refresh_list
else:
self.show_nothing_selected_message_box()
def remove_contact(self):
indexes = self.phone_book_table.selectionModel().selectedRows()
if len(indexes):
db = db_connect()
db.delete_contact(self.__user_email, self.__user_password, self.contact_list[indexes[0].row()][0])
self.refresh_list()
else:
self.show_nothing_selected_message_box()
def update_phonebook_table(self):
self.contact_list = db_connect().get_contact_list(self.__user_email,self.__user_password)
def refresh_list(self,_=None):
self.update_phonebook_table()
self.__data_model.setData(self.contact_list)
self.refresh_alphabet_director()
print("list updated")
def refresh_alphabet_director(self):
letters = []
for contact in self.contact_list:
if contact[1][0].upper() not in letters:
letters.append(contact[1][0].upper())
self.alphabet_director.clear()
self.alphabet_director.addItems(letters)
class TableModel(QAbstractTableModel):
def __init__(self, data):
super(TableModel, self).__init__()
self.__data = data
self.headers = ["Name", "Phone", "Date of birth"]
def data(self, index, role):
if role == Qt.DisplayRole:
value = self.__data[index.row()][index.column()+1]
if isinstance(value, datetime.date):
# Render time to YYY-MM-DD.
return value.strftime("%d-%m-%Y")
return value
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.headers[col]
return None
def rowCount(self, index):
return len(self.__data)
def columnCount(self, index):
return len(self.__data[0])-1
def setData(self,data):
self.beginResetModel()
self.__data = data
self.endResetModel()
| true |
f09e6cbe2116374b8cde72b843d314c034b11e45 | Python | brolertools/game_bb | /src/util.py | UTF-8 | 1,816 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import math
import src.const as const
import difflib
# 是否列表有相同数据
def is_list_has_common(list_l, list_r):
for l in list_l:
for r in list_r:
if l == r:
return False
return True
# 计算两个坐标的距离
def calc_distance(start, end):
return math.sqrt(math.pow(math.fabs(start[0] - end[0]), 2) + math.pow(math.fabs(start[1] - end[1]), 2))
# 计算耗时(单位秒)
def calc_march_duration(start, end, speed):
return int(calc_distance(start, end) / speed * 60 * 60)
# 计算兵力
def calc_land_troops(start, end, land_level):
return const.land_basics_troops_list[land_level] * (1 + calc_distance(start, end) * const.troops_increase_factor)
# 计算最优距离坐标列表(倒序:最耗时->最不耗时)
def calc_best_march_duration(start, location_list, speed):
duration_list = []
# 按耗时排序(小到大)
location_list = sorted(location_list, key=lambda x: calc_march_duration(start, x, speed), reverse=True)
# 获取时间合适的列表
for item in location_list:
duration = calc_march_duration(start, item, speed)
if duration <= const.one_wipe_out_duration:
duration_list.append(item)
return duration_list
# 判断两字符是否相似
def is_similar(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio() >= 0.01
# 判断两字符是否很相似
def is_most_similar(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio() >= 0.85
# 获取资源名
def get_resource_name(text_list):
for i in text_list:
for j in const.resource_name_list:
if i.find(j) >= 0:
return j
if __name__ == '__main__':
print(is_similar("太史慈", "太史"))
| true |
51d6d1e275e3d05273367b1d6809362a26e1dcfa | Python | comet-ml/comet-examples | /keras/comet-keras-cnn-lstm-example.py | UTF-8 | 2,738 | 2.875 | 3 | [] | no_license | # coding: utf-8
# import comet_ml in the top of your file(before all other Machine learning libs)
'''Example adapted from https://github.com/keras-team/keras/tree/master/examples'''
from comet_ml import Experiment
import os
# Setting the API key (saved as environment variable)
exp = Experiment(
#api_key="YOUR API KEY",
# or
api_key=os.environ.get("COMET_API_KEY"),
project_name='comet-examples')
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D
from keras.datasets import imdb
# Embedding
max_features = 20000
maxlen = 100
embedding_size = 128
# Convolution
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
epochs = 2
params = {"layer1_kernel_size": kernel_size,
"max_features": max_features,
"maxlen": maxlen,
"embedding_size": embedding_size,
"layer1_filters": filters,
"dropout": 0.25,
"layer1": "Conv1D",
"layer2": "LSTM",
"layer2_nodes": lstm_output_size,
"layer1_pool_size": pool_size,
"epochs": epochs,
"batch_size": batch_size
}
# log params to Comet.ml
exp.log_parameters(params)
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(
num_words=max_features, skip_top=50)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# always a good idea to print summary so it gets logged to the output tab on your experiment
print(model.summary())
print('Training...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| true |
131decf8108f36580172024aad7af76586764059 | Python | kushalkp88/python | /while.py | UTF-8 | 63 | 3.421875 | 3 | [] | no_license | i=0
while i<5:
print(i)
i=i+1
print("in the loop")
print(i) | true |
3a99acc922a8a5ceba0686e7c50155b815db79cf | Python | freshgrlc/freshgrlc-webwallet-backend | /ratelimit.py | UTF-8 | 4,058 | 2.53125 | 3 | [] | no_license | import functools
from datetime import datetime, timedelta
from flask import request, abort
from http import HTTPStatus
class Type(object):
def __init__(self, value):
self.value = value
Type.DISABLED = None
Type.ALL_REQUESTS = Type(1)
Type.ON_ERRORS = Type(2)
class Context(object):
contexts = {}
def __init__(self, name, max_hits=10, window=3600):
self.name = name
self.max_hits = max_hits
self.window = window
self.contexts[name] = self
print(f'Created rate limit context "{name}", max allowed {max_hits} / {window/60} minutes', flush=True)
@classmethod
def get(cls, name):
if name is None:
return cls._default
if isinstance(name, cls):
return name
try:
return cls.contexts[name]
except KeyError:
return cls(name)
Context._default = Context(None)
class Limit(object):
def __init__(self, type, context=None):
self.type = type
self.context = Context.get(context)
class ClientState(object):
blocklist = {}
def __init__(self, client, context):
self.client = client
self.context = context
self.start = datetime.now()
self.hits = 0
self.register()
def hit(self):
if self.elapsed:
self.state = datetime.now()
self.hits = 1
else:
self.hits += 1
@property
def blocked(self):
return self.hits >= self.context.max_hits and not self.elapsed
@property
def elapsed(self):
return self.start + timedelta(seconds=self.context.window) <= datetime.now()
def register(self):
if self.client not in self.blocklist:
self.blocklist[self.client] = [self]
else:
self.blocklist[self.client].append(self)
@classmethod
def get_all_for(cls, client):
return cls.blocklist[client] if client in cls.blocklist else []
@classmethod
def get_filtered(cls, client, contexts):
return [ cls.get(client, c) for c in contexts ]
@classmethod
def get(cls, client, context):
if isinstance(context, Limit):
context = context.context
elif isinstance(context, Type):
context = Limit(context).context
elif type(context) == str:
context = Context.get(context)
try:
return next(filter(lambda s: s.context == context, cls.get_all_for(client)))
except StopIteration:
return cls(client, context)
def _client():
return request.headers.get('X-Forwarded-For', request.remote_addr)
def required(limit, response):
if isinstance(limit, Type):
limit = Limit(limit)
if limit is None or not limit.type:
return False
if limit.type == Type.ALL_REQUESTS:
return True
return limit.type == Type.ON_ERRORS and response.status_code >= 300
def check(limit, response):
if required(limit, response):
ClientState.get(_client(), limit).hit()
def is_blocked(client, contexts=None):
for state in filter(lambda s: s.blocked, ClientState.get_all_for(client) if contexts is None else ClientState.get_filtered(client, contexts)):
current_window = int((datetime.now() - state.start).total_seconds())
window_remaining = int((state.start + timedelta(seconds=state.context.window) - datetime.now()).total_seconds())
print(f'Client {client} rate limited [context: "{state.context.name}"]: {state.hits} hits in {current_window} seconds, {window_remaining} seconds until release', flush=True)
return True
return False
def apply(contexts=None):
if isinstance(contexts, Context) or type(contexts) == str:
contexts = contexts,
def decorator(api_func):
@functools.wraps(api_func)
def wrapper(*args, **kwargs):
if is_blocked(_client(), contexts):
abort(HTTPStatus.TOO_MANY_REQUESTS)
return api_func(*args, **kwargs)
return wrapper
return decorator
| true |
97966cb66185192f6021d1d96a7b76f2236fffda | Python | modihere/NetLabS5 | /chat fork/server.py | UTF-8 | 784 | 2.8125 | 3 | [
"MIT"
] | permissive | import os
import socket as s
HOST = "localhost"
PORT = 11256
_socket = s.socket(s.AF_INET, s.SOCK_STREAM)
_socket.bind((HOST, PORT))
_socket.listen(4)
dict_of_clients = {}
def shout(data):
for num in dict_of_clients:
dict_of_clients[num].send(data)
def handle_client(i, conn, addr):
while 2:
data = conn.recv(1024)
if not data:
print("Connection broken with client", i)
conn.close()
break
shout(data)
def server():
i = 1;
while i < 4:
conn, addr = _socket.accept()
dict_of_clients[i] = conn
print(dict_of_clients)
child_pr = os.fork()
if child_pr == 0:
print("Connected by client", i, addr)
handle_client(i, conn, addr)
break
else:
i +=1
if __name__ == "__main__":
server()
| true |
c0d3745c4253be3e0ba58e73bcb669f32a191db4 | Python | ziyixiArchive/fwi-script | /Script/smaller_region_related/tar_sacfiles.py | UTF-8 | 843 | 2.90625 | 3 | [] | no_license | """
tar all the directories in a directory
"""
from os.path import join, basename, dirname
from glob import glob
import subprocess
import tqdm
import multiprocessing
import click
from functools import partial
def tar_onefile(fpath, output_dir):
thebase = basename(fpath)
tarpath = join(output_dir, f"{thebase}.tar.gz")
command = f"tar -czf {tarpath} -C {fpath} ."
subprocess.call(command, shell=True)
@click.command()
@click.option('--base_dir', required=True, type=str)
@click.option('--output_dir', required=True, type=str)
def main(base_dir, output_dir):
all_fpath = glob(join(base_dir, "*"))
with multiprocessing.Pool(processes=48) as pool:
r = list(tqdm.tqdm(pool.imap(
partial(tar_onefile, output_dir=output_dir), all_fpath), total=len(all_fpath)))
if __name__ == "__main__":
main()
| true |
f8e0c039941a749b1165e7f0dd3abf19aef21641 | Python | BBN-E/Hume | /src/python/knowledge_base/shared_id_manager/shared_id_manager.py | UTF-8 | 1,921 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import sys
import io
import os
import json
class SharedIDManager:
# Maps object type to ID counter
in_document_object_types = {
"Mention": 0,
"Entity": 0,
"Relation": 0,
"Event": 0,
"Sentence": 0,
"MentionSpan": 0,
"Span": 0,
"Worksheet": 0,
"TimeSeries": 0,
"Property": 0,
"Factor": 0,
"ReportedValue": 0,
"TableRef": 0,
"RelationMention": 0,
"EventMention": 0,
"ValueMention": 0,
"CausalFactor": 0,
}
# Load country name to CAMEO code mappings
with io.open(os.path.dirname(os.path.realpath(__file__)) +
"/../data_files/country_name_to_cameo-code_mapping.json",
"r", encoding="utf8") as f:
country_name_to_cameo_code_mappings = json.load(f)
country_name_to_cameo_code_mappings = {
k.lower(): v for k, v in
country_name_to_cameo_code_mappings.items()}
def __init__(self):
pass
@classmethod
def is_in_document_type(cls, object_type):
return object_type in cls.in_document_object_types
@classmethod
def get_in_document_id(cls, object_type, docid):
if not cls.is_in_document_type(object_type):
print("IDManager could not find in-document object type: ",)
print(object_type)
sys.exit(1)
current_id = (object_type + "-" + docid + "-" +
str(cls.in_document_object_types[object_type]))
cls.in_document_object_types[object_type] += 1
return current_id
@classmethod
def convert_to_cameo_optionally(cls, name):
if name.lower() in cls.country_name_to_cameo_code_mappings:
cameo_code = cls.country_name_to_cameo_code_mappings[name.lower()]
name = "CAMEO" + cameo_code.lower()
return name
| true |
71bb64c18ad69715e87e28fc628f9f301c4fff92 | Python | DarisaLLC/pydev | /find_lines_lsd.py | UTF-8 | 4,913 | 2.703125 | 3 | [] | no_license | import os
import numpy as np
import cv2
import sys
import math
from pathlib import Path
from matplotlib import pyplot as plt
import time
import argparse # provide interface for calling this script
import utils
import opencv_utils
from ransac_vanishing_point import ransac_vanishing_point_detection
from circles import find_circle
def lsd_lines(source_image, min_line_length=0.0175, max_line_length=0.1, min_precision=0):
"""LSD algorithm for line detection.
Args:
source_image: An OpenCV Image.
min_line_length: Minimum line size. Specified as a percentage of the
source image diagonal (0-1).
max_line_length: Maximum line size. Specified as a percentage of the
source image diagonal (0-1).
min_precision: Minimum precision of detections.
Returns:
Array of line endpoints tuples (x1, y1, x2, y2).
"""
height, width = source_image.shape[:2]
diagonal = math.sqrt(height ** 2 + width ** 2)
min_line_length = 16 # min_line_length * diagonal
max_line_length = 96 # max_line_length * diagonal
"""
createLineSegmentDetector([, _refine[, _scale[, _sigma_scale[, _quant[, _ang_th[, _log_eps[, _density_th[, _n_bins]]]]]]]]) -> retval
. @brief Creates a smart pointer to a LineSegmentDetector object and initializes it.
.
. The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want
. to edit those, as to tailor it for their own application.
.
. @param _refine The way found lines will be refined, see #LineSegmentDetectorModes
. @param _scale The scale of the image that will be used to find the lines. Range (0..1].
. @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
. @param _quant Bound to the quantization error on the gradient norm.
. @param _ang_th Gradient angle tolerance in degrees.
. @param _log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement
. is chosen.
. @param _density_th Minimal density of aligned region points in the enclosing rectangle.
. @param _n_bins Number of bins in pseudo-ordering of gradient modulus.
"""
detector = cv2.createLineSegmentDetector(cv2.LSD_REFINE_ADV)
lines, rect_widths, precisions, false_alarms = detector.detect(source_image)
line_lengths = [utils.get_line_length(l[0]) for l in lines]
line_angles = [utils.get_line_angle(l[0]) for l in lines]
return [l[0] for (i, l) in enumerate(lines)
if max_line_length > line_lengths[i] > min_line_length and
precisions[i] > min_precision]
def calcVanishingPoint(image):
lines = lsd(image)
points = lines[:, 2:3]
normals = lines[:, 2:4] - lines[:, :2]
normals /= np.maximum(np.linalg.norm(normals, axis=-1, keepdims=True), 1e-4)
normals = np.stack([normals[:, 1], -normals[:, 0]], axis=1)
normalPointDot = (normals * points).sum(1)
if lines.shape[0] == 2:
VP = np.linalg.solve(normals, normalPointDot)
else:
VP = np.linalg.lstsq(normals, normalPointDot)[0]
pass
return VP
if __name__ == '__main__':
import sys
def bgrFromHue(ang):
hsv = np.zeros((1, 1, 3), np.uint8)
hsv[0, 0, 0] = ((math.degrees(ang) % 360) * 256) / 360.0
hsv[0, 0, 1] = ((math.degrees(ang) % 90) * 256) / 90.0
hsv[0, 0, 2] = ((math.degrees(ang) % 45) * 256) / 45.0
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return bgr[0, 0, 0], bgr[0, 0, 1], bgr[0, 0, 2]
display = None
if len(sys.argv) < 2 or (not Path(sys.argv[1]).is_file() or not Path(sys.argv[1]).exists()):
print(' File Does not exist or found ')
sys.exit(1)
lab_tuple = opencv_utils.load_reduce_convert(sys.argv[1], 1)
display = opencv_utils.convert_lab2bgr(lab_tuple)
dshape = display.shape
print(dshape)
lines = lsd_lines(lab_tuple[0])
vp = ransac_vanishing_point_detection(lines, distance=10)
cv2.circle(display, vp, 10, (255, 255, 255), cv2.FILLED, cv2.LINE_AA)
cv2.circle(display, vp, 30, (0.0, 1.0, 1.0), 0, 3)
print(vp)
for line in lines:
angle = utils.get_line_angle(line)
x1, y1, x2, y2 = line
b, g, r = bgrFromHue(angle)
cv2.line(display, (x1, y1), (x2, y2), (b * 1.0, g * 1.0, r * 1.0, 0.5), 2)
hough_radii = np.arange(20, 50, 3)
circle_zip, edges = find_circle(lab_tuple[0], hough_radii, 1)
# note row column to x y to width height
circles = []
for center_y, center_x, radius in circle_zip:
circles.append((center_x, center_y, radius))
cv2.circle(display, (center_x, center_y), radius, (0.0, 1.0, 1.0), 0, 3)
cv2.namedWindow('Display', cv2.WINDOW_NORMAL)
cv2.imshow("Display", display)
key = cv2.waitKey(0) & 0xFF
| true |
4a21c8b1e895a7874b82f7421a905a97201c18ce | Python | kshvarma333/Student-Repository | /HW10_test_SriHarshaVarmaKonda.py | UTF-8 | 1,491 | 2.546875 | 3 | [] | no_license | from HW10_SriHarshaVarmaKonda import University
import unittest
class TestHomeWork10Functions(unittest.TestCase):
def test_student(self) -> None:
""" Testing student """
uni = University("/Users/harru/PYTHON(SSW 810 )")
uni.student()
lis = []
for key in uni.student_dict:
lis.append(uni.student_dict[key])
result: list = [{'name': 'Baldwin, C', 'major': 'SFEN', 'course': ['CS 501', 'SSW 564', 'SSW 567', 'SSW 687'], 'remaining_required': ['SSW 540', 'SSW 555'], 'remaining_elective': ['CS 513', 'CS 545'], 'gpa': 3.4375}]
self.assertEqual(lis[0], result[0])
def test_instructor(self) -> None:
uni = University("/Users/harru/PYTHON(SSW 810 )")
uni.instructor()
lis = []
for key in uni.instructor_summary_dict:
lis.append(uni.instructor_summary_dict[key])
result: list = [{'cwid': '98765', 'name': 'Einstein, A', 'dept': 'SFEN', 'students': 4}]
self.assertEqual(lis[0], result[0])
def test_major(self) -> None:
uni = University("/Users/harru/PYTHON(SSW 810 )")
uni.major()
lis = []
for key in uni.major_dict:
lis.append(uni.major_dict[key])
result: list = [{'required': ['SSW 540', 'SSW 564', 'SSW 555', 'SSW 567'], 'elective': ['CS 501', 'CS 513', 'CS 545']}]
self.assertEqual(lis[0], result[0])
def main():
unittest.main(exit=False, verbosity=2)
if __name__ == '__main__':
main()
| true |
de2b8a433787877d30d4593c5dcea7eae314409c | Python | komarovko90/home_work | /lesson3/home_work_5/ex5.py | UTF-8 | 580 | 4.125 | 4 | [] | no_license | def sum_number(my_list):
global result
for el in my_list:
if el == '*':
print(f'Сумма чисел равна: {result}')
return True
try:
number = float(el)
except ValueError:
print('Некорректный ввод')
return True
result += number
result = 0.0
while True:
str = input ('Введите числа, разделенные пробелом (* - конец ввода): ')
my_list = str.split()
if sum_number(my_list):
break | true |
d09329c5088e7c390e954ff1f3fc48f04520f5a4 | Python | sdyer/tagsub3 | /tagsub/tags/SaveRawTag.py | UTF-8 | 1,847 | 2.6875 | 3 | [
"MIT"
] | permissive | from .TagContainer import TagContainer
from .values.Value import Value
from .values.Token import Token
from ..exceptions import InvalidTagKeyName
class SaveRawTag(TagContainer):
tag = "saveraw"
def __init__(self, tagchar, template):
super().__init__(tagchar, template)
# Parse the name Token and validate. Must be a simple name, not an object attribute or implied loop var.
token = Token(template)
if token.attributeChain or token.impliedLoopVarName:
raise InvalidTagKeyName("Must only have simple name for save tags", tag=self)
self.value = Value.createValue(token, template, self)
self.closeTag()
def formatAtReference(self, outputFormatter):
super().format(outputFormatter)
def format(self, outputFormatter):
namespace = outputFormatter.rootMapping[self.tagchar]
# When we hit it, saveraw tag does not get formatted into the output, nor do we walk the children and format
# them. Instead, we save a reference to the tag. The Value object recognizes that we have a Tag and calls the
# above formatAtReference to get it formatted into the output with the current NamespaceStack.
outputFormatter.markLineSuppressible()
namespace[self.value._name] = self
# TODO For saveraw, it will act much like saveeval (in that we keep the parsed tree here in the Template),
# except that when we hit this spot in the tree during formatting, we go ahead and call its format method and save
# the resulting string in the scratch space. This would need to be saved as a rawstr to prevent it from HTML entity
# char escaping from being applied more than once.
# TODO It will be at run-time that the decisions need to be made and how we differentiate between saveeval and saveraw tags.
| true |
76be92a43273908c013f5549f0753fb460bea76a | Python | singhwarrior/python | /python_samples/python-modules/main2.py | UTF-8 | 309 | 2.796875 | 3 | [] | no_license | import karma.graph as gph
if __name__ == '__main__':
g = gph.NewGraph()
gph.addVertex(g,'a',[])
gph.addVertex(g,'b',['a'])
gph.addVertex(g,'c',['b'])
gph.addVertex(g,'d',['c','a'])
gph.addVertex(g,'e',['a'])
print(g)
print(gph.getVertices(g))
print(gph.getEdges(g))
| true |
50f6ab77c0e29ecd51ff9031ba1ec959a2a7ac57 | Python | aixiu/myPythonWork | /pythonABC/ls-40-外部引用模块.py | UTF-8 | 420 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, subprocess # 引用外部程序模块 subprocess
timeLeft = 5
while timeLeft > 0:
print(timeLeft)
time.sleep(1)
timeLeft -= 1
print('Music, Now!')
try:
subprocess.run(['start', 'f4 - 流星雨.mp3']) # 用subprocess 打开外部程序
except FileNotFoundError:
print('\n{:*^20}'.format('找不到播放文件'))
| true |
bb6f3ef800f994c4203254626d1e2df7f547b812 | Python | KevinOluoch/bc-10-Contact_Manager_n_SMS | /search.py | UTF-8 | 1,908 | 3.390625 | 3 | [] | no_license | def search(arg, Contacts, session):
"""
A function to search for a given entry in a Contacts,
given the first name of the contact as input.
"""
search_results = []
count = 1
# Searching for all entries matching the request name and storing them in a list
for entry in session.query(Contacts):
if arg['<name>'] == entry.name or arg['<name>'] == entry.second_name:
search_results.append({
'count' : count, 'name' : entry.name,
'second_name' : entry.second_name,
'Phone number' : entry.phone_number})
count+=1
while True:
# Gives feedback if requested name is not in contacts
if count == 1:
return (count, "%s is not in your contacts " %(arg['<name>']))
# Gives feedback if requested name is found in contacts
if count == 2:
return (
count, "Name: %s %s, Number: %s" %(arg['<name>'],
search_results[0]['second_name'],
search_results[0]["Phone number"]),
search_results[0]["Phone number"])
# Gives options if requested name appears several times in contacts
print "Which %s?" %(arg['<name>'])
for result in search_results:
print "[%d] %s %s" % (result['count'], result['name'],
result['second_name'])
# The user then chooses one option
option_selected = raw_input('Enter the corresponding number: ')
if option_selected.isdigit():
option_selected = int(option_selected)
# Option is retrieved from results and then returned
for result in search_results:
if option_selected == result['count']:
return (
2, "Name: %s %s, Number: %s" %(result['name'],
result['second_name'], result['Phone number']),
result['Phone number'])
else:
print "Please select one of the Options"
| true |
67d7d86dcec2a0504bbefb74fb8fd2b5d0b2eba3 | Python | omuratgultekin/learn-kubeflow | /mnist/src/cnn_module_entry.py | UTF-8 | 2,926 | 3.109375 | 3 | [] | no_license | """
Interface to invoke methods of custom module, which is CNN in this case.
This module is provided by developers, not users.
"""
import os
import time
from cnn_module import CNNModel, CustomLoader
# class FolderWrapper:
# """
# Record the folder path of a port
# """
# def __init__(self, folder_path):
# self._folder_path = folder_path
#
# @property
# def folder_path(self):
# if not os.path.isdir(self._folder_path):
# raise FileNotFoundError(f"Folder path {self._folder_path} is not found")
# return self._folder_path
class TimeProfile:
"""
Profile the run time of a function
"""
def __init__(self, tag):
self.tag = tag
self.start_time = None
self.end_time = None
def __enter__(self):
print(f"{self.tag} - Start:")
self.start_time = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
self.end_time = time.perf_counter()
elapsed_time = self.end_time - self.start_time
print(f"{self.tag} - End with {elapsed_time:.4f}s elapsed.")
def run_load_data():
"""
Load training data
"""
output_folder_path = os.environ.get('OUTPUT_0')
if output_folder_path is None:
raise ValueError('Environmental variable OUTPUT_0 is not defined')
# Download MNIST data to output_folder_path
with TimeProfile("Load data"):
CustomLoader().load_images_labels(output_folder_path)
def run_train(data_folder, number_of_steps):
"""
Train a custom model
:param data_folder: str, local folder path of train_component images and labels
:param number_of_steps: int, number of steps to train CNN
"""
# Initialize custom-defined learner instance
learner = CNNModel(num_steps=number_of_steps)
with TimeProfile("Train CNN"):
learner.train(
data_folder_path=data_folder
)
def run_score(learner_folder, data_folder):
"""
Score a custom model
:param learner_folder: folder path of learner
:param data_folder: folder path of data
"""
# Load custom-defined learner by custom-provided loading interface
learner = CustomLoader().load_model(learner_folder)
with TimeProfile("Score CNN"):
learner.predict(
data_folder_path=data_folder
)
def run_evaluate(scored_data_folder, true_data_folder):
"""
Evaluate a custom model
:param scored_data_folder: str, folder path of scored data
:param true_data_folder: str, folder path of true data
"""
# Initialize custom-defined learner instance
learner = CNNModel()
with TimeProfile("Evaluate CNN"):
learner.evaluate(
predict_labels_folder_path=scored_data_folder,
ground_truth_folder_path=true_data_folder
)
| true |
3adf04f70b24f28c94bd0deb7c078333588474f6 | Python | JEHYUNLIM/python_workspace | /BaekjoonCode/1978.py | UTF-8 | 209 | 3.015625 | 3 | [] | no_license | def func(n):
c = 0
if n==1:
return 0
for i in range(2,n):
if n%i == 0:
return 0
return 1
N = int(input())
r = [0]*N
r = list(map(int, input().split()))
cnt = 0
for x in r:
cnt+=func(x)
print(cnt)
| true |
26376c887f34edf6ca31baee06cc0a81d99588c1 | Python | Souli-prgms/DeepSoul | /functional.py | UTF-8 | 3,268 | 2.703125 | 3 | [] | no_license | import numpy as np
def softmax(x):
exps = np.exp(x - np.max(x))
return exps / np.sum(exps, axis=0, keepdims=True)
def cross_entropy(inp, targ):
p = softmax(inp)
log_likelihood = -np.log(p[range(inp.shape[0]), targ.argmax(axis=1)] + 1e-9)
loss = np.sum(log_likelihood) / inp.shape[0]
return loss
def accuracy(inp, targ):
pred = softmax(inp)
return (np.argmax(pred, axis=1) == targ).astype(float).mean()
def regions_for_conv2d(inp, kernel_size=3, padding=1, stride=1):
bs = inp.shape[0]
for b in range(bs):
padded = np.copy(inp[b])
padded = np.pad(padded, ((padding, padding), (padding, padding), (0, 0)), mode='constant')
w, h, _ = padded.shape
for i in range(0, w - kernel_size + 1, stride):
for j in range(0, h - kernel_size + 1, stride):
region = padded[i:i + kernel_size, j:j + kernel_size]
yield b, region, i, j
def conv2d(inp, filters, kernel_size=3, padding=1, stride=1):
bs, w, h, c = inp.shape
nb_filters = filters.shape[2]
final_filters = nb_filters if c == 1 else nb_filters + c
output = np.zeros((bs, w - kernel_size + 1 + 2 * padding, h - kernel_size + 1 + 2 * padding, final_filters))
for b, region, i, j in regions_for_conv2d(inp, kernel_size, padding, stride):
output[b, i, j] = np.sum(region * filters)
return output
def conv2d_backprop(inp, out_grad, filters, kernel_size=3, padding=1, stride=1):
filters_grad = np.zeros(filters.shape)
inp_grad = np.zeros(inp.shape)
inp_grad_padded = np.copy(inp_grad)
inp_grad_padded = np.pad(inp_grad_padded, ((0, 0), (padding, padding), (padding, padding), (0, 0)), mode='constant')
nb_filters = filters.shape[2]
for b, region, i, j in regions_for_conv2d(inp, kernel_size, padding, stride):
for f in range(nb_filters):
inp_grad_padded[b, i:i + kernel_size, j:j + kernel_size, 0] += filters[:, :, f] * out_grad[b, i, j, f]
filters_grad[:, :, f] += np.squeeze(region, axis=2) * out_grad[b, i, j, f]
inp_grad[:, :, :, :] = inp_grad_padded[:, padding: -padding, padding: -padding, :]
return filters_grad, inp_grad
def regions_for_maxpool2d(inp):
bs, w, h, _ = inp.shape
new_w, new_h = w // 2, h // 2
for b in range(bs):
for i in range(new_w):
for j in range(new_h):
region = inp[b, i * 2:(i + 1) * 2, j * 2:(j + 1) * 2]
yield b, region, i, j
def maxpool2d(inp):
bs, w, h, c = inp.shape
output = np.zeros((bs, w // 2, h // 2, c))
for b, region, i, j in regions_for_maxpool2d(inp):
output[b, i, j] = np.amax(region)
return output
def maxpool2d_backprop(inp, out_grad):
inp_grad = np.zeros(inp.shape)
for b, region, i, j in regions_for_maxpool2d(inp):
w, h, f = region.shape
amax = np.amax(region, axis=(0, 1))
for i2 in range(w):
for j2 in range(h):
for f2 in range(f):
if region[i2, j2, f2] == amax[f2]:
inp_grad[b, i * 2 + i2, j * 2 + j2, f2] = out_grad[b, i, j, f2]
return inp_grad | true |
309382dfe9332c55ba4fff108f1def43a0e9f7e1 | Python | viniciusfeitosa/python-cookbook | /chapter2/news_service_flask/src/app.py | UTF-8 | 765 | 2.625 | 3 | [] | no_license | # src/app.py
from flask import Flask
from flask_migrate import Migrate
from .config import app_config # import base config
from .models import db # import ORM database instance
from .views.news import news_api as news_blueprint
# function to load and return an app instance to run.py
def load_app(env_name):
app = Flask(__name__) # Instantiate a Flask APP
app.config.from_object(app_config[env_name]) # Loading configs on Flask
db.init_app(app) # integrating ORM instance with Flask APP
Migrate(app, db) # Enable migration manager based on models definition
# Defining routes prefixing all routes with /news
app.register_blueprint(news_blueprint, url_prefix='/news')
return app # return APP instance totally configurated
| true |
c2c5f3227aeccede94e6fcc08093d7010ce1474e | Python | puneet01994/DataStructures | /Python/Stack/stock_span_problem.py | UTF-8 | 515 | 4 | 4 | [] | no_license | # https://www.geeksforgeeks.org/the-stock-span-problem/
def stock_span(arr):
result = [0 for i in range(len(arr))]
result[0] = 1
stack = []
stack.append(0)
for i in range(1, len(arr)):
while len(stack) > 0 and arr[stack[-1]] <= arr[i]:
stack.pop()
if len(stack) <= 0:
result[i] = i+1
else:
result[i] = i-stack[-1]
stack.append(i)
return result
ar = [100, 80, 60, 70, 60, 75, 85]
data = stock_span(ar)
print(data)
| true |
ff2920edc4114784967412dc01e47f7292992116 | Python | jdferrell3/py-misc | /file-carving/slicer.py | UTF-8 | 969 | 3.296875 | 3 | [] | no_license | import os
import sys
class FileWriter():
def __init__(self, filename):
self.name = filename
self.fh = open(filename, 'wb')
self.size = 0
def write(self, data):
self.fh.write(data)
self.size += len(data)
def close(self):
self.fh.flush()
self.fh.close()
if self.size == 0:
os.remove(self.name)
if len(sys.argv) != 5:
print('"slice" out section of file given two offsets\n')
print("Usage:\n\t%s <begin> <end> <infile> <outfile>\n" % sys.argv[0])
sys.exit(1)
begin = int(sys.argv[1])
end = int(sys.argv[2])
total = end - begin
with open(sys.argv[3], 'rb') as reader:
writer = FileWriter(sys.argv[4])
reader.seek(begin)
data = reader.read(total)
if len(data) == 0:
print('Unexpected data length')
sys.exit(1)
else:
print('writing %d bytes to %s' % (len(data), writer.name))
writer.write(data)
writer.close()
| true |
ab7fe66dde519363772e23c590e4381d3527b64e | Python | astroconda/conmets | /conmets/main.py | UTF-8 | 11,987 | 2.75 | 3 | [] | no_license | import argparse
from conmets.conmets import *
import yaml
import urllib.request
from urllib.error import HTTPError
def main():
ap = argparse.ArgumentParser(
prog='conmets',
description='Parse and digest apache/nginx access logs in either'
' raw or .gz format and produce conda package download stats '
'summaries.')
ap.add_argument('dataset_name', type=str,
help='Name of dataset file. If file does not exist and'
' log data file names are provided for parsing, this '
'file will be created.')
ap.add_argument('--config',
'-c',
help='Configuration file used to adjust behavior of the '
'program',
required=True)
ap.add_argument('--files',
'-f',
help='List of log files to parse, raw or .gz are accepted.'
' glob syntax is also honored.',
nargs='+')
ap.add_argument('--window',
'-w',
help='Restrict examination of data to the window of dates'
' provided.\n'
' Format: YYYY.MM.DD-YYYY.MM.DD'
' Omitting a date window will operate on all data contained'
' within the given dataset.')
ap.add_argument('--ignorehosts',
'-i',
help='IP addresses of hosts to ignore when parsing logs.'
' Useful for saving time by not reading in transactions '
'from security scans, etc.',
nargs='+')
args = ap.parse_args()
# Dataset filename
dataset_name = args.dataset_name
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
files = []
try:
for filespec in args.files:
expanded = glob(filespec)
expanded.sort()
if isinstance(expanded, list):
for name in expanded:
files.append(name)
else:
files.append(expanded)
except(TypeError):
print('No log files provided.')
print(f'Importing existing dataset {dataset_name}.')
pass
inf_hosts = config['infrastructure_hosts']
num_inf_hosts = len(inf_hosts)
# TODO: Should host filtering take place here?
# It leaves a disconnect between the pickled data which _may_ have
# been culled and the actual data being referenced.
logproc = LogData(dataset_name, ignore_hosts=args.ignorehosts)
logproc.read_logs(files)
print('writing (potentially updated) dataset')
logproc.write_dataset()
# Filtering and analysis begins here
data = logproc.data
print(f'num full data rows = {len(data.index)}')
# Filter out a particular time period for examination
# Set limits on a time period to examine
if args.window:
start = args.window.split('-')[0].replace('.', '-')
end = args.window.split('-')[1].replace('.', '-')
window_start = pd.to_datetime(start)
window_end = pd.to_datetime(end)
print(f'Filtering based on window {window_start} - {window_end}.')
data = data[pd.to_datetime(data['date']) >= window_start]
data = data[pd.to_datetime(data['date']) <= window_end]
print(f'num windowed data rows = {len(data.index)}')
all_unique_hosts = list(set(data['ipaddress']))
#for host in all_unique_hosts:
# try:
# print(f'{host} {socket.gethostbyaddr(host)[0]}')
# except:
# print(f'{host} offline?')
# All packages in a dictionary by channel.
chans = [path.split('/')[1] for path in data['path']]
chans = list(set(chans))
chans.sort()
chan_pkgs = OrderedDict()
for chan in chans:
# Trailing '/' added to ensure only a single channel gets stored for each
# due to matching overlap depending on length of substring.
chan_pkgs[chan] = data[data['path'].str.contains(chan+'/')]
total_downloads = 0
for chan in chan_pkgs.keys():
total_downloads += len(chan_pkgs[chan].index)
print(f'TOTAL downloads = {total_downloads}')
# For each channel, generate summary report of the download activity.
for chan in chan_pkgs.keys():
print(f'\n\nSummary for channel: {chan}')
print('-----------------------------')
pkgs = chan_pkgs[chan]
# Unique days
dates = set(pkgs['date'])
dates = list(dates)
dates.sort()
bydate = OrderedDict()
start_date = dates[0]
end_date = dates[-1]
time_range = end_date - start_date
days_elapsed = time_range.days
if days_elapsed == 0:
days_elapsed = 1
days_elapsed += 1
print(f'\nOver the period {start_date.strftime("%m-%d-%Y")} '
f'to {end_date.strftime("%m-%d-%Y")}')
print(f'{days_elapsed} days')
# Downloads per day over time frame
for date in dates:
bydate[date] = len(pkgs[pkgs['date'] == date])
chan_downloads = len(pkgs.index)
print(f'Downloads: {chan_downloads}')
print(f'Average downloads per day: {ceil(chan_downloads / days_elapsed)}')
# Total bandwidth consumed by this channel's use over time frame.
bytecount = pkgs['size'].sum()
gib = bytecount / 1e9
print(f'Data transferred: {gib:.2f} GiB')
# Number of unique hosts and geographic location
unique_hosts = set(pkgs['ipaddress'])
num_unique_hosts = len(unique_hosts)
print(f'Unique hosts {num_unique_hosts}')
## Unique packages
unique_pkgs = set(pkgs['path'])
print(f'Unique full package names {len(unique_pkgs)}')
# What is the fraction of downloads for each OS?
num_linux_txns = len(pkgs[pkgs['path'].str.contains('linux-64')].index)
num_osx_txns = len(pkgs[pkgs['path'].str.contains('osx-64')].index)
pcnt_linux_txns = (num_linux_txns / float(chan_downloads))*100
pcnt_osx_txns = (num_osx_txns / float(chan_downloads))*100
# What fraction of total downloads come from non-infrastructure on-site hosts?
noninf = pkgs[~pkgs['ipaddress'].isin(config['infrastructure_hosts'])]
total_noninf = len(noninf.index)
print(f'Non-infrastructure downloads: {total_noninf}')
print(f'Percentage noninf downloads: {(total_noninf/chan_downloads)*100:.1f}%')
# What fraction of total downloads come from off-site hosts?
int_host_patterns = ['^'+s for s in config['internal_host_specs']]
offsite = pkgs[~pkgs['ipaddress'].str.contains(
'|'.join(int_host_patterns), regex=True)]
num_offsite_hosts = len(set(offsite['ipaddress']))
print(f'num unique off-site hosts: {num_offsite_hosts}')
onsite = pkgs[pkgs['ipaddress'].str.contains(
'|'.join(int_host_patterns), regex=True)]
num_onsite_hosts = len(set(onsite['ipaddress']))
print(f'num unique on-site hosts: {num_onsite_hosts}')
infra = pkgs[pkgs['ipaddress'].str.contains('|'.join(inf_hosts))]
# Totals of unique software titles
# i.e. name without version, hash, py or build iteration values
# Extract simple package titles from 'path' column of data frame.
names = list(pkgs['name'])
unique_names = list(set(names))
name_statsums = []
for name in unique_names:
statsum = {}
statsum['name'] = name
statsum['total'] = names.count(name)
# Sum on- and off-site transactions for each package name
# 'on-site' means transactions to non-infrastructure hosts.
name_txns = pkgs[pkgs['name'] == name]
on_txns = name_txns[name_txns['ipaddress'].str.contains(
'|'.join(int_host_patterns), regex=True)]
# Filter out hosts designated as infrastructure hosts in config file.
on_txns = on_txns[~on_txns['ipaddress'].str.contains(
'|'.join(inf_hosts))]
num_onsite_txns = len(on_txns.index)
statsum['onsite'] = num_onsite_txns
off_txns = name_txns[~name_txns['ipaddress'].str.contains(
'|'.join(int_host_patterns), regex=True)]
num_offsite_txns = len(off_txns.index)
statsum['offsite'] = num_offsite_txns
infra_txns = name_txns[name_txns['ipaddress'].str.contains(
'|'.join(inf_hosts))]
num_infra_txns = len(infra_txns.index)
statsum['infra'] = num_infra_txns
## Determine which packages are also available via PyPI
url = f'https://pypi.org/pypi/{name}/json'
try:
rq = urllib.request.urlopen(url)
#pl = f.read().decode('utf-8')
#piinfo = json.loads(pl)
statsum['pypi'] = True
except(HTTPError):
statsum['pypi'] = False
#statsum['pypi'] = False
name_statsums.append(statsum)
name_statsums.sort(key=lambda x: x['total'], reverse=True)
x_onsite = [i['onsite'] for i in name_statsums]
x_infra = [i['infra'] for i in name_statsums]
x_offsite = [i['offsite'] for i in name_statsums]
y = [i['name'] for i in name_statsums]
print(f'Number of unique {chan} titles downloaded: {len(unique_names)}')
# For each unique softare name, sum the number of transactions from internal hosts.
fig, axes = plt.subplots(figsize=(10,25))
plt.grid(which='major', axis='x')
plt.title(f'{chan} -- {start_date.strftime("%Y%m%d")} - {end_date.strftime("%Y%m%d")}')
plt.xlabel('Downloads')
axes.set_ylim(-1,len(name_statsums))
axes.tick_params(labeltop=True)
plt.gca().invert_yaxis()
width = 1
from operator import add
barlists = []
# Horizontal stacked bar chart with off-site, on-site, and infrastructure transactions.
barlists.append(axes.barh(y, x_offsite, width, edgecolor='white', color='tab:blue'))
barlists.append(axes.barh(y, x_onsite, width, left=x_offsite, edgecolor='white', color='tab:green'))
# Sum bars up to this point to correctly stack the subsequent one(s).
offset = list(map(add, x_offsite, x_onsite))
barlists.append(axes.barh(y, x_infra, width, left=offset, edgecolor='white', color='tab:olive'))
for i,statsum in enumerate(name_statsums):
if statsum['pypi'] == True:
axes.get_yticklabels()[i].set_color('orange')
axes.get_yticklabels()[i].set_weight('bold')
# Annotate plot with additional stats
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
plural = ''
if days_elapsed > 1:
plural = 's'
stats_text = (f'{days_elapsed} day{plural}\n'
f'Total Downloads: {chan_downloads}\n'
f'Average downloads per day: {ceil(chan_downloads / days_elapsed)}\n'
f'Unique titles: {len(unique_names)}\n'
f'Data transferred: {gib:.2f} GiB\n'
f'Linux transactions: {pcnt_linux_txns:.1f}%\n'
f'Macos transactions: {pcnt_osx_txns:.1f}%\n'
f'Unique on-site hosts: {num_onsite_hosts}\n'
f'Unique off-site hosts: {num_offsite_hosts}\n\n'
f' Orange titles are available on PyPI.')
axes.text(0.45, 0.05, stats_text, transform=axes.transAxes, fontsize=14, bbox=props)
axes.legend(['off-site', 'on-site', 'on-site infrastructure'])
plt.tight_layout()
short_startdate = start_date.strftime('%Y%m%d')
short_enddate = end_date.strftime('%Y%m%d')
plt.savefig(f'{chan}-{short_startdate}-{short_enddate}.png')
| true |
2942bbbd822c1427f37ecfd6505b4e8f47fbff6a | Python | LeandroAlencar/CursoPython | /Python1/jogos.py | UTF-8 | 305 | 2.765625 | 3 | [] | no_license | import Jogo_de_advinhacaoRandom
import jogo_de_advinhacao
print('advinhação (1), advinhaçãoBacana(2)')
n = int (input("Qual jogo voce deseja jogar?"))
if n == 2:
print("Loading...")
Jogo_de_advinhacaoRandom.jogar()
if n==1:
print("Loading...")
jogo_de_advinhacao.jogar() | true |
ac5ffe4b2387ead6daef3e93d6092564d9e280cd | Python | Fixer38/University-Notes | /semester-1/progra-ex/manip5/ex1-add-entry.py | UTF-8 | 209 | 3 | 3 | [] | no_license | def update_contact(contacts: dict, name: str, mail: str) -> None:
contacts[name] = mail
print(contacts)
contacts = { "rom": "rom@hello", "jer": "jer@apo"}
update_contact(contacts, "rom", "romain@hello")
| true |
69ac925da01688b080e37b939dde602525066948 | Python | marcellinamichie291/Crypto_Algorithmic_Trading | /checkForSingleCurrencyOpps.py | UTF-8 | 8,939 | 3.59375 | 4 | [
"MIT"
] | permissive | ##!/usr/bin/python
# Name: checkForSingleCurrencyOpps.py
# Author: Patrick Mullaney
# Date Created: 1-20-2018
# Last Edited: 3-10-2018
# Description: This script checks for single currency arbitrage opportunities.
import readExchangeRatesGDAX, readExchangeRatesGemini
import currency, exchange
import numpy
# Opportunity object stores the potential info about an exchange.
class Opportunity():
currency = None
buyExchange = None
sellExchange = None
buyPrice = None
sellPrice = None
amount = None
profitLoss = 0.00
################################################################################
# Calculates revenue with deposit and withdraw fee costs.
def calcRev1(amount, lowPrice, highPrice, exchangeCostLow, exchangeCostHigh, depositCost, withdrawCost):
revenue = ((amount * lowPrice) * depositCost)/lowPrice * exchangeCostLow * highPrice * exchangeCostHigh * withdrawCost
return revenue
# Calculates revenue without deposit and withdraw fee costs.
def calcRev2(amount, lowPrice, highPrice, exchangeFeeLow, exchangeFeeHigh):
revenue = (amount * highPrice * exchangeFeeHigh) - (amount * lowPrice * exchangeFeeLow)
return revenue
################################################################################
# Same results as calcRev1, just easier to read and prints out for debugging.
def calcRev3(amount, lowPrice, highPrice, exchangeFeeLow, exchangeFeeHigh, depositCost, withdrawCost):
'''
print "Amount: ", amount
print "Low Price: ", lowPrice
print "High Price: ", highPrice
print "exchangeLow: ", exchangeFeeLow
print "exchangeHigh: ", exchangeFeeHigh
print "dep cost: ", depositCost
print "withdrawCost: ", withdrawCost
'''
revenue = (amount * lowPrice) * depositCost
revenue = revenue/lowPrice * exchangeFeeLow
revenue = revenue * highPrice * exchangeFeeHigh
revenue = revenue * withdrawCost
return revenue
################################################################################
# Takes the amount of coins, information about the high-price exchange, low-price exchange, and returns info about arbitrage opportunity.
def calculateProfitLoss(amount, high, low):
# Fiat deposit fee in %.
depositCost = float(100.00 - low.depositFee)/100.00
# Exchange fee of lower currency in %.
exchangeCostLow = float(100.00 - low.exchangeFee)/100.00
# Exchange fee of higher currency in %.
exchangeCostHigh = float(100.00 - high.exchangeFee)/100.00
# Fiat withdrawal fee in %.
withdrawCost = float(100.00 - high.withdrawFee)/100.00
# Calculate revenue.
# Original-> revenue = ((((amount * low.price) * depositCost)/low.price * exchangeCostLow) * high.price * exchangeCostHigh) * withdrawCost
revenue = calcRev1(amount, low.price, high.price, exchangeCostLow, exchangeCostHigh, depositCost, withdrawCost)
# Profit/loss = revenue - investment.
profit = revenue - (low.price * amount)
# Round down to two decimals.
profit = float('%.2f'%(profit))
# Create opportunity object
arbitrage = Opportunity()
arbitrage.currency = high.currency
arbitrage.buyExchange = low.name
arbitrage.sellExchange = high.name
arbitrage.profitLoss = profit
arbitrage.sellPrice = '${:,.2f}'.format(high.price)
arbitrage.buyPrice = '${:,.2f}'.format(low.price)
arbitrage.amount = amount
# Optimize by include exchange prices/fees?
return arbitrage
################################################################################
# Checks for an arbitrage opportunity for a given amount between exchanges.
def checkOpportunity(amount, gdax, gemini):
# Set max opportunity amount to arbitrary negative number
maxOpp = Opportunity()
maxOpp.profitLoss = -999999999.99;
# If GDAX price is higher.
if gdax.price > gemini.price:
# Calculate opportunities from 1 to amount.
for i in range(1, amount):
# Calculate profit/loss opportunity.
opportunity = calculateProfitLoss(i, gdax, gemini)
# If profit greater than the current max, update.
if opportunity.profitLoss > maxOpp.profitLoss:
maxOpp = opportunity
return maxOpp
# Else if Gemini price is higher.
elif gdax.price < gemini.price:
# Calculate opportunities from 1 to amount.
for i in range(1, amount):
# Calculate profit/loss opportunity.
opportunity = calculateProfitLoss(i, gemini, gdax)
# If profit greater than the current max, update.
if opportunity.profitLoss > maxOpp.profitLoss:
maxOpp = opportunity
return maxOpp
# Else prices equal, no arbitrage opportunity.
elif gdax.price == gemini.price:
return None
################################################################################
# Calculates arbitrage opportunities for all currencies at exchanges.
def checkAllCurrencies(amount):
# amount = 100 - for testing.
# GDAX ethereum (ETH) exchange info.
gdaxEth = exchange.getExchange1('gdax', 'ETH')
# Gemini ethereum (ETH) exchange info.
geminiEth = exchange.getExchange1('gemini', 'ETH')
# Check opportunities for ethereum.
oppEth = checkOpportunity(amount, gdaxEth, geminiEth)
# GDAX Bitcoin Core (BTC) exchange info.
gdaxBtc = exchange.getExchange1('gdax', 'BTC')
# Gemini Bitcoin Core (BTC) exchange info.
geminiBtc = exchange.getExchange1('gemini', 'BTC')
# Check opportunities for litecoin.
oppBtc = checkOpportunity(amount, gdaxBtc, geminiBtc)
# Return array of arbitrage opportunities.
arbOpps = [oppEth, oppBtc]
return arbOpps
################################################################################
# Added new code below:
# Returns amount of coins to check based on dollar amount provided.
def getAmtCoins(amount, xchng1, xchng2):
amtCoins = 0
# Amount of coins to check = amt/lowest price.
if xchng1.price > xchng2:
price = xchng2.price
else:
price = xchng1.price
amtCoins = amount/price
return amtCoins
################################################################################
# Calculates arbitrage opportunities for all currencies at exchanges, taking into consideration minimum profit.
def checkAllbyProfit(maxCost, minProfit):
# GDAX ethereum (ETH) exchange info.
gdaxEth = exchange.getExchange('gdax', 'ETH')
# Gemini ethereum (ETH) exchange info.
geminiEth = exchange.getExchange('gemini', 'ETH')
amtCoins = getAmtCoins(maxCost, gdaxEth, geminiEth)
# Check opportunities for ethereum.
oppEth = checkOppProfit(amtCoins, gdaxEth, geminiEth)
# GDAX Bitcoin Core (BTC) exchange info.
gdaxBtc = exchange.getExchange('gdax', 'BTC')
# Gemini Bitcoin Core (BTC) exchange info.
geminiBtc = exchange.getExchange('gemini', 'BTC')
# Check opportunities for litecoin.
amtCoins = getAmtCoins(maxCost, gdaxBtc, geminiBtc)
oppBtc = checkOppProfit(amtCoins, gdaxBtc, geminiBtc)
# Return array of arbitrage opportunities.
arbOpps = [oppEth, oppBtc]
#print arbOpps
# Determine profitable opps.
profitableOpps = []
for i in arbOpps:
if i.profitLoss > minProfit:
#print "Profit!"
#print (i)
profitableOpps.append(i)
return profitableOpps
################################################################################
# Helper function for checkAllbyProfit. Checks for an arbitrage opportunity
#for a given amount of coins (float) between exchanges.
def checkOppProfit(amount, gdax, gemini):
# Set max opportunity amount to arbitrary negative number
maxOpp = Opportunity()
maxOpp.profitLoss = -999999999.99;
# If GDAX price is higher.
if gdax.price > gemini.price:
# Calculate opportunities from 0.001 to amount.
oppList = numpy.arange(0.001, amount, 0.001)
for i in oppList:
# Calculate profit/loss opportunity.
opportunity = calculateProfitLoss(i, gdax, gemini)
# If profit greater than the current max, update.
if opportunity.profitLoss > maxOpp.profitLoss:
maxOpp = opportunity
return maxOpp
# Else if Gemini price is higher.
elif gdax.price < gemini.price:
# Calculate opportunities from 0.001 to amount.
oppList = numpy.arange(0.001, amount, 0.001)
for i in oppList:
# Calculate profit/loss opportunity.
opportunity = calculateProfitLoss(i, gemini, gdax)
# If profit greater than the current max, update.
if opportunity.profitLoss > maxOpp.profitLoss:
maxOpp = opportunity
return maxOpp
# Else prices equal, no arbitrage opportunity.
elif gdax.price == gemini.price:
return None | true |
e7b623ff95b818b69a86e789983acc9166dad34e | Python | aghaPathan/RNN-Google-stock-price-prediction | /tunning_hyper_parameters.py | UTF-8 | 2,742 | 3.015625 | 3 | [] | no_license |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
# Importing the training set
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:, 1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
timesteps = 120
X_train = []
y_train = []
for i in range(timesteps, 1258):
X_train.append(training_set_scaled[i-timesteps:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
def brain_rnn(lstm_layers, drop_out_ratio, optimizer):
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = lstm_layers, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(drop_out_ratio))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = lstm_layers, return_sequences = True))
regressor.add(Dropout(drop_out_ratio))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = lstm_layers, return_sequences = True))
regressor.add(Dropout(drop_out_ratio))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = lstm_layers))
regressor.add(Dropout(drop_out_ratio))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = optimizer, loss = 'mean_squared_error')
return regressor
classifier = KerasClassifier(build_fn= brain_rnn)
parameters = {'batch_size': [32, 42],
'nb_epoch': [100, 120],
'lstm_layers': [66, 76],
'drop_out_ratio': [0.2, 0.3],
'optimizer': ['adam', 'rmsprop'] }
grid_search = GridSearchCV(estimator=classifier,
param_grid=parameters,
scoring= 'neg_mean_squared_error')
grid_search = grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
best_score = grid_search.best_score_
print(best_parameters, best_score)
| true |
ff292a088ab19fec8c54d638381466fbae080a40 | Python | dejesusjmb/python_exercises | /7_time/src/my_time.py | UTF-8 | 1,064 | 3.578125 | 4 | [] | no_license | class MyTime(object):
def __init__(self, time):
self._time = time
self._hours = int(self._time.split(':')[0])
self._minutes = int(self._time.split(':')[1])
self._seconds = int(self._time.split(':')[2])
def get_hours(self):
return self._hours
def get_minutes(self):
return self._minutes
def get_seconds(self):
return self._seconds
def advance(self, hours, minutes, seconds):
self._hours += hours if self._hours + hours < 25 else hours - 24
self._minutes += minutes if self._minutes + minutes < 61 else hours - 60
self._seconds += seconds if self._seconds + seconds < 61 else hours - 60
def is_less_than(self, time):
owntime = (self.get_hours() * 3600) + (self.get_minutes() * 60) + self.get_seconds()
othertime = (time.get_hours() * 3600) + (time.get_minutes() * 60) + time.get_seconds()
return owntime < othertime
def to_string(self):
return '{hh}:{mm}:{ss}'.format(hh=self._hours, mm=self._minutes, ss=self._seconds)
| true |
06111c13d18823a3b483f6c7fe8e582429aa11dd | Python | finnwilliams16/projects | /Machine Learning/Perceptron/Perceptron.py | UTF-8 | 260 | 2.765625 | 3 | [] | no_license | import numpy as np
x = np.array([
[1, 0, 0]])
y = np.array([
[1]]).T
w = np.random.random((len(x[0]), len(y[0])))
for j in xrange(100000):
a = 1/(1 + np.exp(-(np.dot(x, w) + 0.1)))
adelta = (y - a) * (a * (1 - a))
w += a.T.dot(adelta)
| true |
8a6c43b4e17097017b0ed80336bd58fbd2b36171 | Python | lazerpope/PROG-labs | /29.py | UTF-8 | 174 | 3.703125 | 4 | [] | no_license |
x = int(input("Число X "))
if x%4 == 0:
print("Високосный год")
else:
print("Невисокосный год")
print("Век " + str(x//100+1))
| true |
76cc76d6baaaad35ed760d21c15edff17b2c5e4a | Python | gldgrnt/python-algorithms | /src/3_fibonacci/4_last_digit_sum_fib.py | UTF-8 | 952 | 3.953125 | 4 | [] | no_license | # Given an intenger n, find the last digit sum of Fn
# Input: n, 0 <= n <= 10^14
# Output: Last digit sum
# Pisano period of 10
def pisano_period_10():
m = 10
a, b = [0, 1]
for i in range(0, m ** 2):
a,b = b, (a + b) % m
# Return for start of pisano
if a == 0 and b == 1:
return i + 1
def last_digit_sum_fib(n, print_result=False):
# Find remainder from pisano period of 10
# Last digit is the same as Fn mod 10
index = n % pisano_period_10()
sum, memo = 0, [0] * (index + 1)
for i in range(1, index + 1):
memo[i] = (memo[i-1] + memo[i-2]) % 10 if i > 1 else 1
if (n <= 3 and i == index) or (index > 3 and ((index - 3) - i) % 3 == 0):
sum = (sum + (2 * memo[i]) if i > 1 else memo[i]) % 10
if print_result:
print(sum)
return sum
# Test 1: 3 => 4
last_digit_sum_fib(3, True)
# Test 2: 100 => 5
last_digit_sum_fib(100, True) | true |
64a985eadacca78ece5462a9b730e899596834bb | Python | nidhiatwork/Python_Coding_Practice | /Stacks_Queues/04-queue-via-stacks-my.py | UTF-8 | 529 | 3.984375 | 4 | [] | no_license | """
Implement a queue using two stacks.
"""
class QueueViaStacks:
def __init__(self):
self.in_stack = []
self.out_stack=[]
def push(self, item):
self.in_stack.append(item)
def remove(self):
if len(self.out_stack)==0:
while len(self.in_stack)>0:
self.out_stack.append(self.in_stack.pop())
return self.out_stack.pop()
q = QueueViaStacks()
q.push(1)
q.push(2)
q.remove()
q.push(3)
q.remove()
q.remove()
q.push(1)
q.push(1) | true |
87c6d1d4cab50a77bf492493bce653470ac9e38d | Python | brzozasr/ask_mate | /database_tools.py | UTF-8 | 7,847 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | from psycopg2 import *
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from config import *
class DatabaseTools:
pg_db = PG_DB
pg_username = PG_USERNAME
pg_password = PG_PASSWORD
pg_host = PG_HOST
pg_port = PG_PORT
us_db = US_DB
us_username = US_USERNAME
us_password = US_PASSWORD
def __init__(self):
self.__db_name = DatabaseTools.us_db
self.__username = DatabaseTools.us_username
self.__password = DatabaseTools.us_password
self.__host = DatabaseTools.pg_host
self.__port = DatabaseTools.pg_port
self.__cursor = None
self.__connection = None
def execute_sql(self, query, data=None):
self.__connect_db()
result = self.__execute_query(query, data)
self.__close_connection()
return result
def execute_multi_sql(self, query, data: list):
"""ONLY FOR NOT RETURNING QUERY. DO NOT USE 'SELECT' AND 'RETURNING'\n
Argument "data" has to be a list of lists or a list of tuples.\n
Example: [['title1', 'question1'], ['title2', 'question2']]."""
if 'SELECT' not in query.upper() and 'RETURNING' not in query.upper():
if all([isinstance(el, (list, tuple)) for el in data] + [len(data) > 0]):
self.__connect_db()
for sql_data in data:
if len(sql_data) > 0:
self.__execute_query(query, sql_data)
self.__close_connection()
else:
print('Required data: a list of lists or a list of tuples!')
else:
print('Method "execute_multi_sql" ONLY FOR NOT RETURNING QUERY!')
def __connect_db(self, dbname=us_db, username=us_username, pwd=us_password):
try:
self.__connection = connect(database=dbname, user=username, password=pwd,
host=self.__host, port=self.__port)
self.__cursor = self.__connection.cursor()
except Error as e:
print(f'There is a problem with connection: {e}')
def __execute_query(self, query, data=None):
"""Execute query with a transaction (with commit).
Use this for INSERT, UPDATE, DELETE."""
error = None
try:
self.__cursor.execute(query, data)
if 'SELECT' in str(self.__cursor.query).upper() or 'RETURNING' in str(self.__cursor.query).upper():
return self.__cursor.fetchall()
except (Error, OperationalError) as e:
print(f'There is a problem with operation: {e}')
error = str(e)
finally:
self.__connection.commit()
if error is not None:
return f'There is a problem with operation: {error}'
def __close_connection(self):
try:
self.__cursor.close()
self.__connection.close()
except Error as e:
print(f'There is a problem with closing database: {e}')
def __create_db_tables(self):
"""Creating database, tables and a user in PostgreSQL.
Commands to create database, tables and a user in the IDE terminal:
% python3
% from database_tools import *
% db._DatabaseTools__create_db_tables()
% exit()"""
tables = (
"""CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
email VARCHAR ( 255 ) NOT NULL UNIQUE,
pwd VARCHAR ( 255 ) NOT NULL,
registration_time TIMESTAMP NOT NULL DEFAULT NOW(),
reputation INT NOT NULL DEFAULT 0
)""",
"""CREATE TABLE IF NOT EXISTS question (
id SERIAL PRIMARY KEY,
submission_time TIMESTAMP NOT NULL DEFAULT NOW(),
view_number INT NOT NULL DEFAULT 0,
vote_number INT NOT NULL DEFAULT 0,
title VARCHAR ( 255 ) NOT NULL,
message TEXT NOT NULL,
image VARCHAR ( 255 ) UNIQUE,
user_id INT NOT NULL REFERENCES users ( id ) ON DELETE CASCADE
)""",
"""CREATE TABLE IF NOT EXISTS answer (
id SERIAL PRIMARY KEY,
submission_time TIMESTAMP NOT NULL DEFAULT NOW(),
vote_number INT NOT NULL DEFAULT 0,
question_id INT NOT NULL REFERENCES question ( id ) ON DELETE CASCADE,
message TEXT NOT NULL,
image VARCHAR ( 255 ) UNIQUE,
user_id INT NOT NULL REFERENCES users ( id ) ON DELETE CASCADE,
acceptance BOOLEAN NOT NULL DEFAULT FALSE
)""",
"""CREATE TABLE IF NOT EXISTS comment (
id SERIAL PRIMARY KEY,
question_id INT NOT NULL REFERENCES question ( id ) ON DELETE CASCADE,
answer_id INT REFERENCES answer ( id ) ON DELETE CASCADE,
message TEXT NOT NULL,
submission_time TIMESTAMP NOT NULL DEFAULT NOW(),
edited_number INT NOT NULL DEFAULT 0,
user_id INT NOT NULL REFERENCES users ( id ) ON DELETE CASCADE
)""",
"""CREATE TABLE IF NOT EXISTS tag (
id SERIAL PRIMARY KEY,
title VARCHAR ( 100 ) NOT NULL UNIQUE
)""",
"""CREATE TABLE IF NOT EXISTS question_tag (
id SERIAL PRIMARY KEY,
question_id INT NOT NULL REFERENCES question ( id ) ON DELETE CASCADE,
tag_id INT NOT NULL REFERENCES tag ( id ) ON DELETE CASCADE,
UNIQUE ( question_id, tag_id )
)"""
)
create_db = f"""CREATE DATABASE {DatabaseTools.us_db}
WITH
OWNER = postgres
ENCODING = 'UTF8'
CONNECTION LIMIT = -1;"""
create_user = f"""CREATE ROLE {DatabaseTools.us_username} WITH
LOGIN
NOSUPERUSER
NOCREATEDB
NOCREATEROLE
NOINHERIT
NOREPLICATION
CONNECTION LIMIT -1
PASSWORD '{DatabaseTools.us_password}';"""
get_list_db = 'SELECT datname FROM pg_database;'
self.__connect_db(DatabaseTools.pg_db, DatabaseTools.pg_username, DatabaseTools.pg_password)
db_list = self.__execute_query(get_list_db)
self.__close_connection()
if (DatabaseTools.us_db,) not in db_list:
self.__connect_db(DatabaseTools.pg_db, DatabaseTools.pg_username, DatabaseTools.pg_password)
self.__connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.__cursor.execute(create_db)
self.__close_connection()
else:
print(f'The database "{DatabaseTools.us_db}" exists!')
get_user = f'SELECT rolname FROM pg_roles;'
self.__connect_db(DatabaseTools.pg_db, DatabaseTools.pg_username, DatabaseTools.pg_password)
user_list = self.__execute_query(get_user)
self.__close_connection()
if (DatabaseTools.us_username,) not in user_list:
self.__connect_db(DatabaseTools.pg_db, DatabaseTools.pg_username, DatabaseTools.pg_password)
self.__execute_query(create_user)
self.__close_connection()
else:
print(f'The user "{DatabaseTools.us_username}" exists!')
self.__connect_db(username=DatabaseTools.pg_username, pwd=DatabaseTools.pg_password)
for table in tables:
self.__execute_query(table)
self.__execute_query(f"GRANT ALL ON SCHEMA public TO {DatabaseTools.us_username}")
self.__execute_query(f"GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO {DatabaseTools.us_username}")
self.__execute_query(f"GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO {DatabaseTools.us_username}")
self.__close_connection()
db = DatabaseTools()
| true |
d16d8ca67a414788b8d633fa8058faddf6817887 | Python | lpj2721/backStage1 | /lib/lib_util.py | UTF-8 | 642 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import json
RANDOM_CHAR = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
def random_str(random_length=128):
sid_str = ''
chars = RANDOM_CHAR
length = len(chars) - 1
for i in range(random_length):
sid_str += chars[ord(os.urandom(1)) % length]
return sid_str
def sort_dict(*kwargs):
i = 0
for team in kwargs:
team['id'] = str(i)
i = i+1
return kwargs
def check_dict(kwargs):
try:
result_dict = json.loads(kwargs)
return True
except:
return None
if __name__ == '__main__':
pass
| true |
6ad695e4868df790ac3c3d4b9bee2260301d128b | Python | Easytolearndemo/Practice_Python_code | /38.small.py | UTF-8 | 281 | 3.75 | 4 | [] | no_license | # smallest element in ana array
from array import *
a=array('i',[])
n=int(input("Enter how many element:"))
for i in range(n):
a.append(int(input("Enter the element:")))
sm=a[0]
for i in range(1,len(a)):
if a[i]<sm:
sm=a[i]
print(f"The smallest element is:{sm}") | true |
c0120518dc962492a5f4e599fecb9a93b5f764ae | Python | vaidcs/myrepo | /prog20.py | UTF-8 | 105 | 3.515625 | 4 | [] | no_license |
# Print sum of all even numbers from 1 to 10
sum = 0
for x in range(2,11,2):
sum +=x
print(x) | true |
16a05a1412d3f18e1d0095ad7b18513e64428cbb | Python | cuiyulin77/other | /spider/work/news_spider/somenew_V0.4_8/somenew/spiders/hschenbao.py | UTF-8 | 4,356 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
import re
import hashlib
import time
import datetime
from urllib import parse
from somenew.items import SomenewItem
class HschenbaoSpider(scrapy.Spider):
name = 'hschenbao' # 华商晨报,每日凌晨一点左右更新
allowed_domains = ['hsxiang.com']
start_urls = ['http://www.hsxiang.com/html/']
def parse(self, response):
# 获取月份节点
node_list = response.xpath("//tr/td[2]/a/@href").extract()
# now_date = time.strftime('%Y/%m/%d',time.localtime(time.time()))
for node in node_list:
try:
use_node = re.match(r'^\d+-\d+\/',node).group()
except Exception:
use_node = None
if use_node is not None:
node_href = 'http://e.hsxiang.com/html/'+use_node
yield scrapy.Request(node_href,callback=self.parse_get_node)
def parse_get_node(self,response):
# 获取每日报刊文件夹节点
day_list = response.xpath("//tr/td[2]/a/text()").extract()
for day in day_list:
try:
day_url = re.match(r'\d+\/',day).group()
except Exception:
day_url = None
if day_url is not None:
day_href = parse.urljoin(response.url,day_url)
# 匹配url地址,获取日期,过滤掉100天之前的内容(http://e.hsxiang.com/html/2018-05/07/)
try:
get_date = re.match("^h.*?\/(\d+-\d+\/\d+)\/", day_href).group(1)
except Exception:
get_date = None
if get_date is not None:
# 将‘-’替换为'/' 2018-05/07--》2018/05/07
start_date = re.sub(r"\-", '/', get_date)
# 获取当前日期
end_date = time.strftime("%Y/%m/%d")
# 将报纸日期转化为秒
start_sec = time.mktime(time.strptime(start_date, '%Y/%m/%d'))
# 将爬取时间转化为秒
end_sec = time.mktime(time.strptime(end_date, '%Y/%m/%d'))
# 计算时间差
work_days = int((end_sec - start_sec)/(24 * 60 * 60))
# 时间差小于100天,获得这个信息
if work_days<100:
yield scrapy.Request(day_href,callback=self.parse_get_content_href)
def parse_get_content_href(self,response):
content_list = response.xpath("//tr/td[2]/a/text()").extract()
for content in content_list:
try:
content_url = re.match("^c.*",content).group()
except Exception:
content_url = None
if content_url is not None:
content_href = parse.urljoin(response.url,content_url)
yield scrapy.Request(content_href,callback=self.get_content)
def get_content(self,response):
# 获取页面信息
# 获取标题
title = response.xpath("//tr[1]/td/table/tbody/tr/td/strong/text()").extract_first()
if title is not None:
item = SomenewItem()
item['title'] = response.xpath("//tr[1]/td/table/tbody/tr/td/strong/text()").extract_first()
item['content'] = str(response.xpath("//div[@id='ozoom']//p//text()").extract())
item['content'] = ''.join(item["content"]).replace(u'\\u3000', u' ').replace(u'\\xa0', u' ')
item['create_time'] = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
item['url'] = response.url
try:
s = re.match("^h.*?\/(\d+-\d+\/\d+)\/.*", item['url']).group(1)
except Exception:
s = item['create_time']
s.replace('-', '/')
s = re.sub(r"\-", '/', s)
item['time'] = s
item['media'] = '华商晨报'
m = hashlib.md5()
url = str(item['url'])
m.update(str(url).encode('utf8'))
article_id = str(m.hexdigest())
# m.update(str(item['url'])).encode('utf-8')
item['article_id'] = article_id
item['comm_num'] = "0"
item['fav_num'] = '0'
item['read_num'] = '0'
item['env_num'] = '0'
yield item
| true |
7dd542cf62ad5062e0b8a75476d63cde71bd0108 | Python | wallawaz/bitrue_trader | /bitrue/__init__.py | UTF-8 | 3,800 | 2.6875 | 3 | [] | no_license | from hashlib import sha256
import hmac
import requests # try with aiohttp?
import time
from constants import URI, URIS, URLS
class Bitrue:
FF_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0"
@classmethod
def get_url(cls, uri):
return URLS["BASE_URL"] + "/" + URLS["API_VERSION"] + "/" + uri.path
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
self.session = self._init_session()
def _init_session(self):
"""Initiate the user session
"""
session = requests.session()
session.headers.update({
"Accept": "application/json",
"User-Agent": self.FF_USER_AGENT,
"X-MBX-APIKEY": self.api_key,
})
return session
def _generate_signature(self, data):
data_params = self._sort_data_params(data)
data_params = [f"{var}={val}" for var, val in data_params.items()]
query = "&".join(data_params).encode("utf-8")
m = hmac.new(self.api_secret.encode("utf-8"), query, sha256)
return m.hexdigest()
def _sort_data_params(self, data):
signature = None
params = []
for key, val in data.items():
if key == "signature":
signature = val
else:
params.append((key, val))
params.sort(lambda x: x[0])
if signature is not None:
params.append(("signature", signature))
return params
def _get(self, path, **kwargs):
return self._api_request("get", path, **kwargs)
def _post(self, path, **kwargs):
return self._api_request("post", path, **kwargs)
def _delete(self, path, **kwargs):
return self._api_request("delete", path, **kwargs)
def _put(self, path, **kwargs):
return self._api_request("put", path, **kwargs)
def _get_uri(self, method, path):
uri = URIS.get(path, None)
if uri is None:
raise Exception(f"Invalid API path: {path}")
if isinstance(uri, URI):
return uri
# `order` is a list of GET, POST, DELETE uris.
for u in uri:
if u.method == method:
return u
else:
raise Exception(f"Invalid method for {uri.path}")
def _api_request(self, method, path, **kwargs):
uri = self._get_uri(method, path)
return self._request(uri, **kwargs)
def _request(self, uri, **kwargs):
kwargs["timeout"] = 10
data = kwargs.get("data")
if data and isinstance(data, dict):
kwargs["data"] = data
if uri.signed:
ts = int(time.time() * 1000)
kwargs["data"]["timestamp"] = ts
kwargs["data"]["signature"] = self._generate_signature(kwargs["data"])
# need to sort again incase we added ts and signature above ^
if data:
kwargs["data"] = self._sort_data_params(kwargs["data"])
# if get request assign data list to `params`
if uri.method == "get":
kwargs["params"] = kwargs["data"]
del kwargs["data"]
resp = None
url = self.get_url(uri)
if uri.method == "get":
resp = self.session.get(url, **kwargs)
else:
resp = self.session.post(url, **kwargs)
return self._parse_response(resp)
def _parse_response(self, resp):
if str(resp.status_code)[0] != "2":
raise Exception(f"Bitrue Error: {response}")
try:
return resp.json()
except ValueError:
raise Exception(f"Error parsing response: {resp.text}")
def exchange_info(self):
return self._get("exchangeInfo")
| true |
16204e66b9ce0b39dd0d22dc53e9b93169ec2a97 | Python | Zylophone/Programming-for-Sport | /interviewbit.com/Linked Lists/add_two_numbers_as_lists.py | UTF-8 | 1,607 | 3.609375 | 4 | [] | no_license | # https://www.interviewbit.com/problems/add-two-numbers-as-lists/
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
# in place, mutates A and B,
def addTwoNumbers(self, A, B):
a= A
b= B
c_out= 0 # carry in
while a and b:
c_in= c_out # carry out of the previous iteration is the carry in of this iteration
_= (a.val + b.val + c_in)
s= _ % 10
c_out= _ / 10
a.val= s
b.val= s
a_prev= a
a= a.next
b= b.next
# { one or both of a, b is None }
if a is None and b is None:
if c_out: a_prev.next= ListNode(c_out)
return A
# { only one of a, b is None }
swapped= False
if a is None:
a, b = b, a
swapped= True
# { a is not None and b is None }
while a:
c_in= c_out
_= (a.val + c_in)
s= _ % 10
c_out= _ / 10
a.val= s
a_prev= a
a= a.next
# { a is None }
if c_out: a_prev.next= ListNode(c_out)
return B if swapped else A | true |
276ad57202f3ad91621b92cca925938c49a1b922 | Python | DavidCastilloAlvarado/DL_lstm_AR_timeseries | /forecastingClass.py | UTF-8 | 10,077 | 2.515625 | 3 | [] | no_license | # %%
from forecastingtools import *
from customlayer_v2 import *
import os
class forecasting_demanda(object):
def __init__(self, producto, n_steps, window_agg, db_path, datos_d_interes, sheetdb, saved_file_model=None, name_model="lstmar", split_train_ratio=0.2, stratif=2):
self.productos = [producto]
self.sheetdb = sheetdb
self.stratif = stratif
self.datos_d_interes = datos_d_interes
self.n_steps = n_steps
self.window_agg = window_agg
self.db_path = db_path
self.saved_file = saved_file_model
self.name_model = name_model
self.split_train_ratio = split_train_ratio
self.input_shape = (n_steps, int(len(self.productos)*3))
self.n_epochs = 0
self.db_clean = self.readDatafromDB()
self.X_data, self.Y_data, self.scaler_x, self.scaler_y, self.periodos = self.gen_input()
print("===================== {} =======================".format(name_model))
self.model = model_forecasting_v2(input_shape=self.input_shape, n_outputs=len(
self.productos), saved_file=self.saved_file, summary=True)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X_data, self.Y_data, test_size=self.split_train_ratio, random_state=50, stratify=self.periodos["Periodos"])
def train_model(self, epochs=2500):
dbsource = self.name_model
logdir = "logs/"+dbsource
self.logdir = logdir
model_filename = dbsource + '.hdf5'
self.checkpoint_path = os.path.join('model', model_filename)
epoch_add = epochs
tboard_callback = TensorBoard(log_dir=logdir)
# model_checkpoint = ModelCheckpoint('model/'+dbsource+'/LSTMAR_'+dbsource+'.hdf5', monitor='val_loss',verbose=1, save_best_only=True,)
model_checkpoint = ModelCheckpoint(
filepath=self.checkpoint_path, monitor='val_r2_coeff_det', verbose=1, save_best_only=True, mode="max",)
earlyStopping = EarlyStopping(
monitor='val_loss', patience=300, min_delta=0)
history = self.model.fit(self.X_train, self.y_train,
validation_data=(self.X_test, self.y_test),
epochs=self.n_epochs + epoch_add,
initial_epoch=self.n_epochs,
callbacks=[tboard_callback,
model_checkpoint, earlyStopping],
workers=1,
)
self.n_epochs = self.n_epochs + epoch_add
self.model = model_forecasting_v2(input_shape=self.input_shape, n_outputs=len(
self.productos), saved_file=self.checkpoint_path, summary=True)
def readDatafromDB(self):
productos = self.productos
# Abriendo el archivo
df_study = pd.read_excel(self.db_path, sheet_name=self.sheetdb)
col_interes = self.datos_d_interes
new_col_names = ["AÑO", "MES", "PRODUCTO", "DEMANDA_VENTAS"]
df_study = df_study[col_interes]
df_study.columns = new_col_names
df_study["PRODUCTO"] = df_study["PRODUCTO"].apply(homogenizar_cat)
df_study_mod = add_dataTime(df_study.copy())
df_study_mod = df_study_mod.loc[df_study_mod["PRODUCTO"].isin(
productos)] # filtrando solo los productos de myor presencia
df_study_mod = df_study_mod.groupby(
by=["PRODUCTO", "FECHA"], as_index=False).mean().sort_values(by="FECHA")
df_study_mod = df_study_mod.reset_index(drop=True)
# df_study_mod = df_study_mod.iloc[:15] # Selecciona los 15 ultimos dias
df_study_mod["PRODUCTO"] = df_study_mod["PRODUCTO"].astype(str)
fechas_total = Fechas_totales(df_study_mod)
df_study_mod = completa_fechas(df_study_mod, productos, fechas_total)
self.fechas_col = df_study_mod["FECHA"] # Fechas
# Pivotear las columnas para obtener las series de tiempo
df_study_mod = df_study_mod.pivot(
index="FECHA", columns=["PRODUCTO"], values=["DEMANDA_VENTAS"])
df_study_mod = df_study_mod.sort_values(by="FECHA")
df_study_mod.columns = df_study_mod.columns.droplevel()
self.productos = list(df_study_mod.columns)
return df_study_mod
def gen_input(self, data=None, TRAIN=True):
if TRAIN:
data = self.db_clean.copy()
window_agg = self.window_agg
n_steps = self.n_steps
productos = self.productos
# ajustando el scaler para los datos que se serán de salida
# scaler_y.fit(data)
# Generando los datos agregados: Media movil y la STD movil
data = add_roll_agg(data, Wmean=window_agg,
Wstd=window_agg, dropna=True)
Y_columns = list(data.columns)
if not TRAIN: # Cuando se busca predecir y no entrenar
# Generando los delays de tiempo
# 11 delays + la actual posicion = 12
data = gen_time_series(data, steps=n_steps-1, dropna=True)
data = data.reset_index(drop=True)
# Escalando los datos
X = data
X = self.scaler_x.transform(X)
x_input = X[-1]
x_input = np.expand_dims(x_input, axis=0)
# Reshape vector
# len(productos) * 3(valor, mean, std)
n_features = int(len(productos)*3)
x_input = x_input.reshape((-1, n_steps, n_features))
input_shape = x_input.shape[1:]
return x_input, input_shape, self.scaler_x, self.scaler_y
else:
scaler_y = MinMaxScaler()
scaler_x = MinMaxScaler()
data = gen_time_series(data, steps=n_steps, dropna=True)
data = data.reset_index(drop=True)
all_columns = list(data.columns)
_ = [all_columns.remove(col) for col in Y_columns]
Y_data = data[productos]
scaler_y.fit(Y_data)
Y_data = scaler_y.transform(Y_data)
X = data[all_columns]
scaler_x.fit(X)
X = scaler_x.transform(X)
# len(productos) * 3(valor, mean, std)
n_features = int(len(productos)*3)
X_data = X.reshape((-1, n_steps, n_features))
# Periodos para estratificar
bins = self.stratif
periodos = pd.DataFrame(data.index)
periodos["Periodos"] = pd.cut(periodos[0], bins=bins, labels=[
"period"+str(i) for i in range(bins)])
return X_data, Y_data, scaler_x, scaler_y, periodos
def forecast_product(self, product_name=None, n_ahead=1, save_path=None):
df_temp_accum = self.db_clean
# Forecasting n ahead
for _ in range(n_ahead):
# Manipulamos la tabla total, transformamos y extraemos el ultimo dato
x_input, input_shape, scaler_x, scaler_y = self.gen_input(
data=df_temp_accum, TRAIN=False)
# Forecasting
forescast = self.model.predict(x_input)
forescast_scal = scaler_y.inverse_transform(forescast)
# Dataframe Forecating
forescast_df = pd.DataFrame(forescast_scal, columns=self.productos)
df_temp_accum = df_temp_accum.append(
forescast_df, ignore_index=True)
# Entregando producto en particular
if product_name:
df_predic = pd.DataFrame(
df_temp_accum.iloc[-n_ahead:, self.productos.index(product_name)]).round()
if save_path:
df_predic.to_csv(path_or_buf=save_path)
return df_predic
if save_path:
df_temp_accum.iloc[-n_ahead:].round().to_csv(path_or_buf=save_path)
return df_temp_accum.iloc[-n_ahead:].round()
def forecast_valdata(self, source="train"):
if source == "val":
forecast_scal = self.model.predict(self.X_test)
forecast_scal = self.scaler_y.inverse_transform(forecast_scal)
true_val = self.scaler_y.inverse_transform(self.y_test)
elif source == "train":
forecast_scal = self.model.predict(self.X_train)
forecast_scal = self.scaler_y.inverse_transform(forecast_scal)
true_val = self.scaler_y.inverse_transform(self.y_train)
elif source == "whole":
forecast_scal = self.model.predict(self.X_data)
forecast_scal = self.scaler_y.inverse_transform(forecast_scal)
true_val = self.scaler_y.inverse_transform(self.Y_data)
forecast_scal = pd.DataFrame(
forecast_scal, columns=self.productos).round()
true_val = pd.DataFrame(true_val, columns=self.productos).round()
return forecast_scal, true_val
def forecasting_selfdata(self,):
forecast_scal = self.model.predict(self.X_data)
forecast_scal = self.scaler_y.inverse_transform(forecast_scal)
true_val = self.scaler_y.inverse_transform(self.Y_data)
forecast_scal = pd.DataFrame(
forecast_scal, columns=self.productos).round()
forecast_scal["Fecha"] = self.db_clean.index[-len(forecast_scal):]
true_val = pd.DataFrame(true_val, columns=self.productos).round()
true_val["Fecha"] = self.db_clean.index[-len(forecast_scal):]
return forecast_scal, true_val
# %%
class Bulk_Models(object):
def __init__(self, **kwargs):
self.productos = kwargs["productos"]
self.models = {}
kwargs.pop("productos")
_ = [self.models.update({producto: forecasting_demanda(
**self.set_product(kwargs, producto))}) for producto in self.productos]
@staticmethod
def set_product(kwargs, producto):
kwargs.update({"producto": producto})
kwargs.update({"name_model": "model_"+producto})
return kwargs
def train_models(self, epochs=5000):
for producto in self.productos:
self.models[producto].train_model(epochs=epochs)
| true |
6eb762fdbc90e92b4d204b59a4a64678589dacca | Python | imvishvaraj/chatbot_pytorch | /chatbot/nltk_utils.py | UTF-8 | 1,573 | 3.65625 | 4 | [] | no_license | from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer
import numpy as np
# nltk.download('punkt')
stemmer = PorterStemmer()
def tokenize(sentence):
# Converting sentence into number of words (tokens)
# sentence: 'May Force Be With You!'
# token: ['May', 'Force', 'Be', 'With', 'You', '!']
return word_tokenize(sentence)
def stem(word):
# concept - stemming
# Generating the root form of the words.
# words: ["organize", "organizes", "organizing"]
# stemmed_words: ['organ', 'organ', 'organ']
return stemmer.stem(word.lower())
def bag_of_words(tokenized_sentence, all_words):
"""
sentence = ["hello", "how", "are", "you"]
words = ["hi", "hello", "I", "you", "bye", "thank", "cool"]
bag = [0, 1, 0, 1, 0, 0, 0]
"""
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype=np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag
if __name__ == '__main__':
# step 1 - tokenization
# a = "How long does shipping take?"
# print("sentence: " + a)
# a = tokenize(a)
# print(a)
# step 2 - stemming
# words = ["organize", "organizes", "organizing"]
# stemmed_words = [stem(w) for w in words]
# print(stemmed_words)
# step 4 - bag of words
sentence = ["hello", "how", "are", "you"]
words = ["hi", "hello", "I", "you", "bye", "thank", "cool"]
bag = bag_of_words(sentence, words)
print(bag) | true |
6d799bee7fc1b25075c04a708c8c2bdcf2fb4032 | Python | capoony/popgentools | /mimicree/tmp/print_lowrec.py | UTF-8 | 1,990 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import collections
import math
import re
import random
from optparse import OptionParser, OptionGroup
class RecombinationWindow:
def __init__(self,chr,start,end,rec):
self.chr=chr
self.start=start
self.end=end
self.rec=rec
def load_recombination(recfile,minrec):
recwindows=load_recombinaton_file(recfile)
recchr=group_by_chromosome(recwindows)
recrate={}
for chr,windows in recchr.items():
started=False
recstart=0
for w in windows:
if(not started and w.rec >= minrec):
started=True
recstart=w.start
elif(started and w.rec<minrec):
recrate[w.chr]=(recstart,w.start)
break
assert(len(recrate.keys())==5) # 5 chromosomes of dmel
return recrate
def group_by_chromosome(recwindows):
toret=collections.defaultdict(lambda:[])
for w in recwindows:
toret[w.chr].append(w)
return toret
def load_recombinaton_file(recfile):
toret=[]
for l in open(recfile):
"""
2L:0..100000 0.00 0.00 0.00
2L:100000..200000 0.00 0.00 0.00
2L:200000..300000 0.00 0.00 1.89
2L:300000..400000 1.89 1.92 1.95
2L:400000..500000 1.95 1.98 2.01
2L:500000..600000 2.01 2.04 2.07
2L:600000..700000 1.0 0.00 0.00
"""
l=l.rstrip()
a=l.split("\t")
m=re.search(r"(\w+):(\d+)..(\d+)",a[0])
chr=m.group(1)
start=int(m.group(2))
end=int(m.group(3))
recrate=float(a[2])
rw=RecombinationWindow(chr,start,end,recrate)
toret.append(rw)
return toret
parser = OptionParser()
parser.add_option("--recfile",dest="recfile",help="A file containing the low recombining region")
parser.add_option("--recthres",dest="thres",help="Recombination rate threshold")
(options, args) = parser.parse_args()
rr=load_recombination(options.recfile,float(options.thres))
print rr
| true |
227ac610d0e2856d1f56f9e41a67f98f3f3ce92a | Python | codebubb/python_course | /Semester 1/Week 3/test.py | UTF-8 | 67 | 2.984375 | 3 | [] | no_license | counter = 20
while counter > 10:
counter +=1
print counter
| true |
b56b44b305a224d3c11e8f00f046599cb42ba731 | Python | superdurszlak/ModelParameterIdentifier | /src/models/khan_huang_liang_model.py | UTF-8 | 2,492 | 2.59375 | 3 | [
"MIT"
] | permissive | import math
import numpy as np
from src.models.material_model import MaterialModel
class KhanHuangLiangModel(MaterialModel):
@classmethod
def _upper_bounds(cls):
return np.array([np.inf, np.inf, np.inf, np.inf, np.inf, 1.0])
@classmethod
def _lower_bounds(cls):
return np.array([0.0, 0.0, -np.inf, -np.inf, 0.0, 0.0])
@classmethod
def labels(cls):
return ['A', 'B', 'n0', 'n1', 'C', 'm']
@classmethod
def params_scaling(cls):
return np.array([1e9, 1e9, 1, 1, 1, 1])
def __call__(self, strain: float, strain_rate: float, temperature: float):
parameters = self.params
t_ref = 293.15
t_melt = 1425 + 273.15
t_h = (temperature - t_ref) / (t_melt - t_ref)
D0 = 1e6
D_log = math.log(D0)
A = parameters[0]
B = parameters[1]
n0 = parameters[2]
n1 = parameters[3]
C = parameters[4]
m = parameters[5]
rate_exp = strain_rate ** C
softening = (1 - t_h ** m)
hardening = ((1 - (math.log(strain_rate) / D_log)) ** n1) * (strain ** n0)
return (A + B * hardening) * softening * rate_exp
def derivatives(self, strain: float, strain_rate: float, temperature: float):
parameters = self.params
labels = self.labels()
t_ref = 293.15
t_melt = 1425 + 273.15
t_h = (temperature - t_ref) / (t_melt - t_ref)
D0 = 1e6
D_log = math.log(D0)
A = parameters[0]
B = parameters[1]
n0 = parameters[2]
n1 = parameters[3]
C = parameters[4]
m = parameters[5]
s_safe = max(strain, 1e-9)
sr_safe = max(strain_rate, 1e-9)
th_safe = max(t_h, 1e-9)
rate_exp = strain_rate ** C
softening = (1 - t_h ** m)
ln_diff = (1 - (math.log(sr_safe) / D_log))
hardening = (ln_diff ** n1) * (strain ** n0)
full_strain_hardening = (A + B * hardening)
derivatives = {
labels[0]: rate_exp * softening,
labels[1]: rate_exp * softening * hardening,
labels[2]: B * hardening * math.log(s_safe) * softening * rate_exp,
labels[3]: B * (ln_diff ** n1) * math.log(ln_diff) * softening * rate_exp,
labels[4]: full_strain_hardening * softening * rate_exp * math.log(sr_safe),
labels[5]: - full_strain_hardening * rate_exp * (t_h ** m) * math.log(th_safe)
}
return derivatives
| true |
fc1d88f4779c702110178536bc1212a8838902ef | Python | gaarangoa/deepGene | /deepGene/deepGene.py | UTF-8 | 4,613 | 2.5625 | 3 | [] | no_license | import sys
import argparse
import json
from motif2json import main as motifToJsonMain
from mergerFeatures import main as mergeFeatures
from train import main as trainModel
from train import weights
from train import regression as trainRegression
from predict import main as Predictor
def motif2json(args):
motifToJsonMain(args.input, args.output)
def merger(args):
mergeFeatures(args)
def train(args):
trainModel(args)
def featureWeights(args):
weights(args)
def regression(args):
trainRegression(args)
def predictor(args):
Predictor(args)
def main():
parser = argparse.ArgumentParser(prog="deepGene", description="welcome to the amazing deepGene software")
subparsers = parser.add_subparsers(help="type the command name for help", title="commands", description="valid subcomands")
# parser for the motif2json utility
motifToJsonParser = subparsers.add_parser('motif2json', help='This program takes the results from the motif finding and parses that file into a json file')
motifToJsonParser.add_argument('--input', help='input motif generated by FIMO', required=True)
motifToJsonParser.add_argument('--output', help="output json file", required=True)
motifToJsonParser.set_defaults(func=motif2json)
# merge features
mergerParser = subparsers.add_parser('mergeFeatures', help="merge different sets of features")
mergerParser.add_argument('--features', nargs='+', help='List of feature files', required=True)
# mergerParser.add_argument('--labels', help='File with labels of each gene', required=True)
mergerParser.add_argument('--genes', help='File with gene ids that are considered for the analysis. If the file is empty all genes are used')
mergerParser.add_argument('--output', help='output hdf5 format dataset', required=True)
mergerParser.set_defaults(func=merger)
# training module
mergerParser = subparsers.add_parser('train', help="merge different sets of features")
mergerParser.add_argument('--dataset', help='dataset containing the features and labels', required=True)
mergerParser.add_argument('--model', help='output directory where to write the model', required=True)
mergerParser.add_argument('--test', help='the fraction of the dataset to be used for validation 0.33', required=True, type=float)
mergerParser.add_argument('--epochs', help='number of epochs the deep learning has to run', required=True, type=int)
mergerParser.add_argument('--batch_size', help='batch size of the dl', required=True, type=int)
# mergerParser.add_argument('--validation', nargs='+', help='setup a crossvalidation')
# mergerParser.add_argument('--fullmodel', nargs='+', help='build a production model with the whole dataset [no crossvalidation]')
mergerParser.set_defaults(func=train)
# get weights for the feature vector
wParser = subparsers.add_parser('weights', help="get the max weigths of each feature in the first layer")
wParser.add_argument('--dataset', help='dataset containing the features and labels', required=True)
wParser.add_argument('--model', help='model input file', required=True)
wParser.add_argument('--weights', help='output file where to write the weights', required=True)
wParser.set_defaults(func=featureWeights)
# training module
regParser = subparsers.add_parser('regression', help="train a regression model")
regParser.add_argument('--dataset', help='dataset containing the features and labels', required=True)
regParser.add_argument('--model', help='output directory where to write the model', required=True)
regParser.add_argument('--test', help='the fraction of the dataset to be used for validation 0.33', required=True, type=float)
regParser.add_argument('--epochs', help='number of epochs the deep learning has to run', required=True, type=int)
regParser.add_argument('--batch_size', help='batch size of the dl', required=True, type=int)
regParser.set_defaults(func=regression)
# predict a new entry
pred_parser = subparsers.add_parser('prediction', help="predict expression level")
pred_parser.add_argument('--input', help='input gene in fasta format', required=True)
pred_parser.add_argument('--model', help='trained model', required=True)
# regParser.add_argument('--output', help='where to store the results', required=True, type=float)
pred_parser.add_argument('--kmers', help='file with the selected features .wg', required=True)
pred_parser.set_defaults(func=predictor)
# parser input files
args = parser.parse_args()
args.func(args)
| true |
d00d20e626c5a5ff7e9b37b5950fce44c83af5a2 | Python | chrismgeorge/Curriculum | /past_iterations/02_Homewood_Y/01_adventures/03_math_game.py | UTF-8 | 790 | 4.40625 | 4 | [] | no_license |
def playMathGame():
print("Welcome to the math game!")
input("Press enter to continue.")
question1()
def question1():
print("Type the answer to 13+24 to hear a secret")
addAnswer = int(input("> "))
# response 1
if addAnswer == 37:
print("The secret is that with code you never have to do math again")
question2()
else:
print("Nope sorry, no secrets for you")
gameOver()
def question2():
print("What function displays text on the screen?")
printAnswer = input("> ")
# response 2
if printAnswer == "print":
print("Right - print does!")
winGame()
else:
print("No - that's not right.")
gameOver()
def winGame():
# win response
print("Wahoo you got all questions right!")
def gameOver():
print("Game Over!")
playMathGame()
| true |
686fc12602499c228f7bde871e66d66455316df1 | Python | StealthTech/lab_4 | /ex_1.py | UTF-8 | 736 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python3
from librip.gens import field, gen_random
goods = [
{'title': 'Ковер', 'price': 2000, 'color': 'green'},
{'title': 'Диван для отдыха', 'price': 5300, 'color': 'black'},
{'title': 'Стелаж', 'price': 7000, 'color': 'white'},
{'title': 'Вешалка для одежды', 'price': 800, 'color': 'white'},
]
# Реализация задания 1
gen = field(goods, 'title')
for i in range(4):
print(i + 1, next(gen))
gen = field(goods, 'title', 'no_match')
for i in range(4):
print(i + 1, next(gen))
gen = field(goods, 'title', 'price')
for i in range(4):
print(i + 1, next(gen))
gen = field(goods, 'price')
for i in range(4):
print(i + 1, next(gen))
| true |
62b370fb6486af2c4e67442d3f6d0b179e1a80e7 | Python | huchangming/tourscool_com | /page_object/base.py | UTF-8 | 1,901 | 2.890625 | 3 | [] | no_license | from tourscool_log import base_log
class Base(object):
# my_log = base_log.Logger('ToursCool').getlog() # 放到init当中,每次重新调用,都会初始化一次。
def __init__(self, driver):
self.driver = driver
self.timeout = 30
# --------------------------------------------------------------
# 单 寻找单个元素.返回元素对象
def find_ele(self, *mode):
return self.driver.find_element(*mode)
# 复 寻找列表元素,返回元素对象
def find_eles(self, index, *mode):
return self.driver.find_elements(*mode)[index]
# 与以下的方法无任何关联.
# --------------------------------------------------------------
# 单 输入数据
def input_text(self, mode, text):
self.driver.find_element(*mode).send_keys(text)
# 复 输入数据
def inputs_text(self, mode, index, text):
self.driver.find_elements(*mode)[index].send_keys(text)
# 单 点击事件
def click(self, mode):
self.driver.find_element(*mode).click()
# 复 点击事件
def clicks(self, mode, index):
self.driver.find_elements(*mode)[index].click()
# 单 获取文本
def jiancha(self, mode):
return self.driver.find_element(*mode).text
# 复 获取文本
def jianchaS(self, mode, index):
return self.driver.find_elements(*mode)[index].text
# 单 是否显示
def displayed(self, mode):
return self.driver.find_element(*mode).is_displayed()
# 复 是否显示
def displayeds(self, mode, index):
return self.driver.find_elements(*mode)[index].is_displayed()
def clear(self, mode):
self.driver.find_element(*mode).clear()
def page_data(self):
return self.driver.page_source
# 滑动
def page_swipe(self,*ints):
return self.driver.swipe(*ints)
| true |
74016bfbe7b71a6d57e95ec971a726c5cc9f274f | Python | feliphebueno/py_watch | /main.py | UTF-8 | 2,719 | 2.796875 | 3 | [] | no_license | """
Sappiens Framework
Copyright (C) 2014, BRA Consultoria
Website do autor: www.braconsultoria.com.br/sappiens
Email do autor: sappiens@braconsultoria.com.br
Website do projeto, equipe e documentacao: www.sappiens.com.br
Este programa e software livre; voce pode redistribui-lo e/ou
modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
publicada pela Free Software Foundation, versao 2.
Este programa e distribuido na expectativa de ser util, mas SEM
QUALQUER GARANTIA; sem mesmo a garantia implicita de
COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
detalhes.
Voce deve ter recebido uma copia da Licenca Publica Geral GNU
junto com este programa; se nao, escreva para a Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
Copias da licenca disponiveis em /Sappiens/_doc/licenca
"""
import sys, json, traceback
from processa_evento import processaEvento
from flask import Flask, request
__author__ = "Bueno, Feliphe <feliphezion@gmail.com>"
__version__ = "2.0"
def start(payload, evento):
if payload and evento:
retorno = {
'success': True,
'retorno': str(processaEvento(payload).processaEvento(evento)),
}
else:
retorno = {
'success': False,
'retorno': "Nenhum dado recebido para processamento."
}
return retorno
app = Flask(__name__)
@app.route("/", methods=['POST'])
def main():
response = {'success': "", 'response': ""}
try:
payload = request.get_json(True)
retorno = start(payload, request.headers.get("Http-X-Github-Event"))
response = {
'success' : retorno['success'],
'retorno' : retorno['retorno']
}
except NameError as Undefined:
response['success'] = False
response['response'] = unicode(str(Undefined), errors='replace')
response['stackTrace'] = '<pre>' + traceback.format_exc() + '</pre>'
except Exception as e:
response['success'] = False
response['response'] = unicode(str(e), errors='replace')
response['stackTrace'] = '<pre>' + traceback.format_exc() + '</pre>'
finally:
if len(sys.argv) > 3:
if request.headers.get("--debug") and 'stackTrace' in response:
print "Stack Trace:\n" + response['stackTrace']
return str(response)
@app.route("/")
def hello():
return "<h1>Python flask server http/1.1 200 OK</h1>"
if __name__ == "__main__":
app.run(host='localhost', port=8081, debug=True) | true |
14609094ebb1ed4ab0ee7fc4673c96aa0ab5185c | Python | astrocoolin/DS_Project | /NetFics/NetFics/.ipynb_checkpoints/Movie_Scrape-checkpoint.py | UTF-8 | 2,881 | 2.75 | 3 | [] | no_license | import requests
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors import NearestNeighbors
from bs4 import BeautifulSoup
import re
from urllib.parse import urlparse
import pandas as pd
import numpy as np
class Movies:
def __init__(self,book):
self.df = pd.read_pickle(
'/home/colin/Insight_Project/data/smallset_2.pkl')
self.keywords = []
self.webpage = ""
self.title = ""
self.coverlink = ""
self.second_best = ""
self.third_best = ""
self.combine_frames(book)
self.best_movie()
self.grab_movie_img()
def combine_frames(self,book):
Book_df = book.df
self.df = Book_df.append(self.df).reset_index(drop=True)
def best_movie(self):
# Determine which movie has the most features in common with the book
mlb = MultiLabelBinarizer()
temp_df = self.df.copy()
print('b4 mlb')
clean_df = temp_df.join(pd.DataFrame(mlb.fit_transform(temp_df.pop('Keywords')),
columns=mlb.classes_,
index=temp_df.index))
print('post mlb')
sparse_df = clean_df.drop(columns='Name')
print('pre NN')
print(sparse_df.shape)
nbrs = NearestNeighbors(n_neighbors=4, algorithm='ball_tree').fit(sparse_df.iloc[1:])
distances, top_three = nbrs.kneighbors(pd.DataFrame(sparse_df.iloc[0]).T)
print('post NN',top_three)
self.keywords = self.df['Keywords'][top_three[0][0]+1]
self.title = clean_df['Name'][top_three[0][0]+1]
self.second_best =clean_df['Name'][top_three[0][1]+1]
self.third_best = clean_df['Name'][top_three[0][2]+1]
#return H, clean_df['Name']
#return sparse_df
def grab_movie_img(self):
query = self.title + ' site:imdb.com'
g_clean = []
self.webpage = 'https://www.google.com/search?client=ubuntu&channel=fs&q={}&ie=utf-8&oe=utf-8'.format(
query) # this is the actual query we are going to scrape
html = requests.get(self.webpage)
soup = BeautifulSoup(html.text)
a = soup.find_all('a') # a is a list
a[0].get('href')
for i in a:
k = i.get('href')
try:
m = re.search("(?P<url>https?://[^\s]+)", k)
n = m.group(0)
rul = n.split('&')[0]
domain = urlparse(rul)
if re.search("google.com", domain.netloc):
continue
else:
g_clean.append(rul)
except:
continue
url = g_clean[0]
html = requests.get(url)
soup = BeautifulSoup(html.text)
self.coverlink = soup.find(class_='poster').img['src']
| true |
ebf62e92116edee31c3d9d982f12343c4c69626d | Python | chisler/basic_problems | /sorting_search/search_empty.py | UTF-8 | 749 | 3.75 | 4 | [] | no_license | def search(a, x):
l, h = 0, len(a) - 1
while l <= h:
m = (l + h) // 2
if a[m] == '':
right = m + 1
left = m - 1
while True:
if left < l and right > h:
return -1
if right <= h and a[right] != '':
m = right
break
if left >= l and a[left] != '':
m = left
break
right += 1
left -= 1
if a[m] == x:
return m
if a[m] < x:
l = m + 1
if a[m] > x:
h = m - 1
return -1
a = ["at", "", "", "", "ball", "c", "d", "", "dad", "U"]
print(search(a, "U")) | true |
54e71758d87402591b4c8f48e3eb859dd73ea6e4 | Python | ramirog89/python-ws-server | /app/src/websocket/encoder.py | UTF-8 | 1,038 | 2.71875 | 3 | [] | no_license | def encode( frame ):
''' encode frame based on rfc6455 '''
bytesFormatted = []
bytesFormatted.append(129)
bytesRaw = frame.encode()
bytesLength = len(bytesRaw)
if bytesLength <= 125:
bytesFormatted.append(bytesLength)
elif 126 <= bytesLength <= 65535:
bytesFormatted.append(126)
bytesFormatted.append((bytesLength >> 8) & 255)
bytesFormatted.append(bytesLength & 255)
else:
bytesFormatted.append(127)
bytesFormatted.append((bytesLength >> 56) & 255)
bytesFormatted.append((bytesLength >> 48) & 255)
bytesFormatted.append((bytesLength >> 40) & 255)
bytesFormatted.append((bytesLength >> 32) & 255)
bytesFormatted.append((bytesLength >> 24) & 255)
bytesFormatted.append((bytesLength >> 16) & 255)
bytesFormatted.append((bytesLength >> 8) & 255)
bytesFormatted.append(bytesLength & 255)
bytesFormatted = bytes(bytesFormatted)
bytesFormatted = bytesFormatted + bytesRaw
return bytesFormatted | true |
bde977607bc408e9a19a437036c01ee8362337a8 | Python | khwilson/advent2020 | /advent/day10.py | UTF-8 | 1,553 | 4.0625 | 4 | [
"MIT"
] | permissive | """ AOC Day 10 """
from collections import Counter, defaultdict
from pathlib import Path
from typing import Union
def first(filename: Union[str, Path]) -> int:
"""
Sort the input, prepend with 0 and append with 3 + the max.
Return:
(# of successive differences == 1) * (# of successive differences == 3)
"""
with open(filename, "rt") as infile:
jolts = sorted(int(line.strip()) for line in infile)
jolts = [0] + jolts + [jolts[-1] + 3]
diffs = Counter(right - left for left, right in zip(jolts[:-1], jolts[1:]))
return diffs[3] * diffs[1]
def second(filename: Union[str, Path]) -> int:
"""
Return the number of subsequences of the sorted input with the following properties:
* The last entry is always the last entry of the sorted input
* The first entry is 1, 2, or 3
* There is never a successive difference that is > 3
Strategy: Dynamic programming!
"""
with open(filename, "rt") as infile:
jolts = sorted(int(line.strip()) for line in infile)
num_valid_sequences_with_min = defaultdict(int)
num_valid_sequences_with_min[jolts[-1]] = 1
for jolt in reversed(jolts[:-1]):
num_valid_sequences_with_min[jolt] = (
num_valid_sequences_with_min[jolt + 1]
+ num_valid_sequences_with_min[jolt + 2]
+ num_valid_sequences_with_min[jolt + 3]
)
return (
num_valid_sequences_with_min[1]
+ num_valid_sequences_with_min[2]
+ num_valid_sequences_with_min[3]
)
| true |
7f080712e41f32f0e5179f06642b01fceb05c58f | Python | boknowswiki/mytraning | /lintcode/python/0750_portal.py | UTF-8 | 1,329 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python -t
# BFS
from collections import deque
import sys
class Solution:
"""
@param Maze:
@return: nothing
"""
def Portal(self, Maze):
#
if not Maze or not Maze[0]:
return 0
m = len(Maze)
n = len(Maze[0])
q = deque()
steps = [[sys.maxint] *n for _ in range(m)]
for i in range(m):
for j in range(n):
if Maze[i][j] == 'S':
steps[i][j] = 0
q.append((i, j))
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
while len(q) > 0:
l = len(q)
for i in range(l):
cx, cy = q.popleft()
for j in range(4):
nx = cx + dx[j]
ny = cy + dy[j]
if 0<=nx<m and 0<=ny<n and Maze[nx][ny] != '#' and \
steps[cx][cy]+1 < steps[nx][ny]:
steps[nx][ny] = steps[cx][cy] + 1
if Maze[nx][ny] == 'E':
return steps[nx][ny]
q.append((nx, ny))
return -1
| true |
d8ea55cb56780ef303e6c33422ab076cf5b53ce4 | Python | yuchdev/VirtualboxToolchain | /Script/replace_pair.py | UTF-8 | 133 | 2.578125 | 3 | [] | no_license | class ReplacePair:
def __init__(self, old_text, new_text):
self.old_text = old_text
self.new_text = new_text
| true |
60d54a3e36c7ca2586eecf8599ab699439f6f864 | Python | chenjb04/Python-AdvancedProgramming | /12-python对象的自省机制.py | UTF-8 | 423 | 3.0625 | 3 | [] | no_license | # -*- coding:utf-8 -*-
__author__ = 'ChenJiaBao'
__date__ = '2018/8/22 10:39'
# 自省机制:通过一定的机制查询到对象的内部结构
class Person(object):
name = '小明'
class Student(Person):
def __init__(self, school_name):
self.school_name = school_name
user = Student("北京")
print(user.__dict__)
print(Person.__dict__)
print(dir(user))
| true |
7e5a743ed0105f00abb2e0b7d3bb4720f964b3c7 | Python | shubham-automation/python-exercise | /dictionary-problems/concate_dictionaries.py | UTF-8 | 144 | 2.828125 | 3 | [] | no_license | dic1 = {1: 10, 2: 20}
dic2 = {3: 30, 4: 40}
dic3 = {5: 50, 6: 60}
dic4 = {}
dic4.update(dic1)
dic4.update(dic2)
dic4.update(dic3)
print(dic4)
| true |
684fa19684837c86859eac08d56438f0ea8a3771 | Python | shinobu9/CS1 | /1st sem/20. Алгоритм цикличного сдвига.py | UTF-8 | 754 | 3.78125 | 4 | [] | no_license | """ 20) Алгоритм циклического сдвига в массиве. Реализация циклом, без
срезов
Рассмотрим сдвиг влево.
Рассмотрим сдвиг влево.
Идея - самый первый элемент закидываем во временную переменную, смещаем
остальные и в последнюю ячейку кидаем первый.
"""
def cicle_move_left(array):
n = len(array)
tmp = array[0]
for i in range(1, n):
array[i - 1] = array[i]
array[-1] = tmp ## индекс -1 типо с конца
a = [1,2,3,4,5]
cicle_move_left(a)
print(a)
n = int(input())
a = [1,2,3,4,5]
a = a[n%len(a):] + a[:n%len(a)]
print(a) | true |
68ec6ee7ec48e897f035d7a18290ad934e3bc7c9 | Python | aubema/hablan | /read2DHT.py | UTF-8 | 603 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python3
import time
import board
import adafruit_dht
# Initial the dht device, with data pin connected to:
# dhtDevice = adafruit_dht.DHT22(board.D4)
# you can pass DHT22 use_pulseio=False if you wouldn't like to use pulseio.
# This may be necessary on a Linux single board computer like the Raspberry Pi,
# but it will not work in CircuitPython.
dhtDevice = adafruit_dht.DHT22(board.D7, use_pulseio=False)
temperature1_c = dhtDevice.temperature
dhtDevice = adafruit_dht.DHT22(board.D1, use_pulseio=False)
temperature2_c = dhtDevice.temperature
print("OK", temperature1_c, temperature2_c)
| true |
6acfe3d21257e97c4e4acf0ff7dbf862995f9d49 | Python | cyanlime/StudyPython | /py/update_versionnum.py | UTF-8 | 966 | 2.78125 | 3 | [] | no_license | import os
import shutil
import split
def make_version_path(path, version):
if version == 0:
return path
else:
return path + "." + str(version)
def rotate(path, max_keep, version=0):
old_path = make_version_path(path, version)
if not os.path.exists(old_path):
raise IOError("'%s' doesn't exist" % path)
new_path = make_version_path(path, version+1)
if os.path.exists(new_path):
if version < max_keep - 1:
rotate(path, max_keep, version+1)
else:
os.remove(new_path)
shutil.move(old_path, new_path)
# if new_path == path+"."+str(max):
# os.remove(new_path)
# path, filename = os.path.split(new_path)
# name, ver = os.path.splitext(filename)
if __name__ == "__main__":
#split.print_tree('/Users/codemeow/GitHub/StudyPython')
file('/Users/codemeow/GitHub/StudyPython/web.log', "w")
rotate('/Users/codemeow/GitHub/StudyPython/web.log', 5, 0)
| true |
48dce982f68e6732d3baf9e8346f8b75edbd0ce3 | Python | WeersProductions/resolverflow | /util/count_resolved.py | UTF-8 | 760 | 2.625 | 3 | [
"MIT"
] | permissive | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
def count_total(dataset):
print("total: %s" % dataset.count())
def count_unresolved(dataset):
print("unresolved: %s" % dataset.filter(col("has_answer")).count())
if __name__ == "__main__":
"""
Counts the amount of resolved and unresolved posts from the output dataset.
Run using: spark-submit --master yarn --deploy-mode cluster --conf spark.dynamicAllocation.maxExecutors=10 --conf spark.yarn.maxAppAttempts=1 --name dreamteam util/count_resolved.py 2> /dev/null
"""
spark = SparkSession.builder.getOrCreate()
df = spark.read.parquet("/user/***REMOVED***/StackOverflow/output_stackoverflow.parquet")
count_total(df)
count_unresolved(df)
| true |
4183d27c61eb08061c854ae91bf9bd3f6880838f | Python | prk9009/CV_Codes | /19.template_match.py | UTF-8 | 2,296 | 3.234375 | 3 | [] | no_license | #finding the location of a template image in a larger image. OpenCV
#comes with a function cv2.matchTemplate() for this purpose. It simply slides the template image over the input
#image (as in 2D convolution) and compares the template and patch of input image under the template image. Several
#comparison methods are implemented in OpenCV.
#If input image is of size (WxH) and template image is of size (wxh), output image will have a size of (W-w+1, H-h+1).
#Once you got the result, you can use cv2.minMaxLoc() function to find where is the maximum/minimum value. Take
#it as the top-left corner of rectangle and take (w,h) as width and height of the rectangle
# This box defines your template
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg',0)
img2 = img.copy()
template = cv2.imread('template.jpg',0)
w, h = template.shape[::-1]
# All the 6 methods for comparison in a list
methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
plt.show()
##
##
#Template Matching with Multiple Objects
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_rgb = cv2.imread('mario.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('mario_coin.png',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',img_rgb)
| true |
64df98f990d98f64d374b23e61fd17e0d5b319d2 | Python | deepakjangid123/Python-codes | /dfs.py | UTF-8 | 505 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 17:19:09 2018
@author: abhi
"""
graph = {
'A' : ['B','S'],
'B' : ['A'],
'C' : ['D','E','F','S'],
'D' : ['C'],
'E' : ['C','H'],
'F' : ['C','G'],
'G' : ['F','S'],
'H' : ['E','G'],
'S' : ['A','C','G']
}
def dfs(graph, node, visited):
if node not in visited:
visited.append(node)
for n in graph[node]:
dfs(graph, n, visited)
return visited
#dfs(graph, 'A', []) | true |
afed1581e03a6b871e18796cde3afa09af94dde8 | Python | hanameee/Algorithm | /Fastcampus/exercise/Ch4-동적계획법/src/1915.py | UTF-8 | 438 | 2.625 | 3 | [] | no_license | from copy import deepcopy
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
arr = [[0]*(m+1)] + [list(map(int, list("0"+input().strip())))
for i in range(n)]
dp = deepcopy(arr)
max_size = 0
for i in range(1, n+1):
for j in range(1, m+1):
if dp[i][j]:
dp[i][j] = 1+min(dp[i-1][j-1], dp[i][j-1], dp[i-1][j])
max_size = max(dp[i][j], max_size)
print(max_size**2)
| true |
ec80b2beb4a18b5e620b88c89b2334b45043d631 | Python | stjordanis/autogluon | /core/tests/unittests/utils/decorators/test_presets.py | UTF-8 | 3,106 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | import unittest
from autogluon.core.utils.decorators import apply_presets
class TestPresets(unittest.TestCase):
def test_presets(self):
presets_dict = dict(preset_1=dict(a=2, b=3))
@apply_presets(presets_dict, None)
def get_presets(**kwargs):
return kwargs
# assert no presets works
out = get_presets()
assert len(out) == 0
# assert no presets works with user-specified values
out = get_presets(a=5)
assert out["a"] == 5
assert len(out) == 1
# assert ValueError raised if unknown preset
self.assertRaises(ValueError, get_presets, presets="invalid_preset")
# assert presets == None works
out = get_presets(presets=None)
assert out["presets"] is None
assert len(out) == 1
# assert presets as str works
out = get_presets(presets="preset_1")
assert out["presets"] == "preset_1"
assert out["a"] == 2
assert out["b"] == 3
assert len(out) == 3
# assert presets as list works
out = get_presets(presets=["preset_1"])
assert out["presets"] == ["preset_1"]
assert out["a"] == 2
assert out["b"] == 3
assert len(out) == 3
# assert custom preset works
custom_preset = dict(a=4, c=7)
out = get_presets(presets=custom_preset)
assert out["presets"] == custom_preset
assert out["a"] == 4
assert out["c"] == 7
assert len(out) == 3
# assert that multiple presets can be specified, and later ones overwrite earlier ones in shared keys
out = get_presets(presets=["preset_1", custom_preset])
assert out["presets"] == ["preset_1", custom_preset]
assert out["a"] == 4
assert out["b"] == 3
assert out["c"] == 7
assert len(out) == 4
# assert ValueError raised if unknown preset in list of presets
self.assertRaises(ValueError, get_presets, presets=["preset_1", "invalid_preset"])
# assert that multiple presets can be specified, and later ones overwrite earlier ones in shared keys, but user-specified keys override presets
out = get_presets(a=1, presets=["preset_1", custom_preset], d=None)
assert out["presets"] == ["preset_1", custom_preset]
assert out["a"] == 1
assert out["b"] == 3
assert out["c"] == 7
assert out["d"] is None
assert len(out) == 5
presets_alias_dict = dict(
preset_1_alias="preset_1",
preset_invalid_alias="invalid_preset",
)
@apply_presets(presets_dict, presets_alias_dict)
def get_presets(**kwargs):
return kwargs
# assert preset alias works
out = get_presets(presets="preset_1_alias")
assert out["presets"] == "preset_1_alias"
assert out["a"] == 2
assert out["b"] == 3
assert len(out) == 3
# assert ValueError raised if alias points to invalid preset
self.assertRaises(ValueError, get_presets, presets="preset_invalid_alias")
| true |
b0a32a9754c01b05430ca75e394cbfb7ec982189 | Python | roblivesinottawa/python-basics | /loops.py | UTF-8 | 1,877 | 3.984375 | 4 | [] | no_license | # i = 1
# while i <= 5:
# print(i)
# i = i + 1
# print('Done!')
# secret_number = 9
# chances = 0
# limit = 3
#
# while chances < limit:
# guess = int(input('Guess: '))
# chances += 1
# if guess == secret_number:
# print('you won!')
# break
# else:
# print('sorry you failed')
# age = 34
# guess_age = 0
# guess_age_limit = 3
#
# while guess_age < guess_age_limit:
# guess = int(input('Guess: '))
# guess_age += 1
# if guess == age:
# print('You are right')
# break
# else:
# print('you are wrong')
# command = ""
# started = False
#
#
# while True:
# command = input("> ").lower()
# if command == "start":
# if started:
# print("Car is already started!")
# else:
# started = True
# print("Car started...")
# elif command == "stop":
# if not started:
# print("Car is already stopped")
# else:
# started = False
# print("Car stopped.")
# elif command == "help":
# print("""
# start - to start the car
# stop - to stop the car
# quit - to quit
# """)
# elif command == "quit":
# break
# else:
# print("Sorry I don't get that")
#***********************************************************
# for item in ["Python", "Javascript", "C+"]:
# print(item)
# for item in range(10):
# print(item)
# prices = [20, 40, 100]
#
# total = 0
# for price in prices:
# total += price
# print(f"Total: {total}")
# nested loops
# for x in range(4):
# for y in range(3):
# print(f" ({x}, {y})")
# numbers = [5, 2, 5, 2, 2]
# for x_count in numbers:
# print('x' * x_count)
# numbers = [2, 2, 2, 2, 2]
# for x_count in numbers:
# output = ''
# for count in range(x_count):
# output += 'x'
# print(output)
| true |
7d39c98c5d62611974576643fcfe9a4a17f08a67 | Python | majurski/teler | /standard-deviation.py | UTF-8 | 295 | 3.234375 | 3 | [] | no_license | from math import sqrt
p = [3,5,32,25, 239,11,12,2,2]
all = []
mean = round(sum(p)/len(p),0)
print("mean is", mean)
for i in range(len(p)):
b = (p[(i-1)] - mean)**2
all.append(b)
vari = sum(all)/len(p)-1
print("All sum", sum(all))
print(vari)
print("Standard dev is:", sqrt(vari)) | true |
4dabfa4fe72303adc5028e595ef9a32979ecf151 | Python | Leechikara/rl | /torch_rl/learners/LearnerLog.py | UTF-8 | 7,116 | 2.90625 | 3 | [] | no_license | from datetime import datetime,date,time
import os.path
import pickle
import pandas as pd
import visdom
import numpy as np
class LearnerLog:
'''
A log is composed of:
- static key,value pairs (for example: hyper parameters of the experiment)
- set of key,value pairs at each iteration
Typical use is:
log.add_static_value("learning_rate",0.01)
for t in range(T):
perf=evaluate_model()
log.new_iteration()
log.add_dynamic_value("perf",perf)
log.add_dynamic_value("iteration",t)
'''
def __init__(self):
self.svar={}
self.dvar=[]
self.t=-1
self.scopes=[]
self.file=None
self.vis=None
def add_static_value(self,key,value):
self.svar[key]=value
def new_iteration(self):
if (self.t>=0):
print(self.dvar[self.t])
self.t=self.t+1
self.dvar.append({})
self.scopes = []
def push_scope(self, name):
self.scopes.append(name)
def pop_scope(self):
return self.scopes.pop()
def _get_dtable(self,scope,t):
tt=self.dvar[t]
for s in scope:
tt=tt[s]
return tt
def add_dynamic_value(self,key,value):
tt=self.dvar[self.t]
for s in self.scopes:
if (not s in tt):
tt[s]={}
tt=tt[s]
tt[key]=value
def get_last_dynamic_value(self,key):
key=".".join(self.scopes)+key
return self.dvar[self.t][key]
def get_column(self,key):
c=[]
for d in self.dvar:
c.append(d[key])
return c
def print_static(self):
print("===== STATIC VARIABLES =========")
for i in self.svar:
print(str(i)+" = "+str(self.svar[i]))
def _generate_columns_names(self):
columns={}
scope=[]
for t in range(self.t):
tt=self.dvar[t]
cc=self._generate_columns_names_from_dict(tt,scope)
for kk in cc.keys():
columns[kk]=1
return columns
def _generate_columns_names_from_dict(self,d,scope):
columns={}
for k in d.keys():
if (isinstance(d[k],dict)):
scope.append(k)
cc=self._generate_columns_names_from_dict(d[k],scope)
for kk in cc.keys():
columns[kk]=1
scope.pop()
else:
columns[".".join(scope)+"."+k]=1
return columns
def get_scoped_value(self,t,name):
scope=name.split(".")
tt=self.dvar[t]
for s in scope:
if (not s in tt):
return None
tt=tt[s]
return tt
def save_file(self,filename=None,directory=None):
if (directory is None):
directory="logs"
if (filename is None):
filename=str(datetime.now()).replace(" ","_")+".log"
while(os.path.isfile(directory+"/"+filename)):
filename = str(datetime.now()).replace(" ", "_")+".log"
print("Saving in file is " + directory+"/"+filename)
pickle.dump( self, open( directory+"/"+filename, "wb" ) )
def get_static_values(self):
return self.svar
def to_array(self):
'''
Transforms the dynamic values to an array
'''
names = self._generate_columns_names()
names["_iteration"] = 1
retour = []
cn = []
for l in names:
cn.append(l)
retour.append(cn)
for t in range(len(self.dvar)):
cn = []
for l in names:
if (l == "_iteration"):
cn.append(t)
else:
v = self.get_scoped_value(t, l)
cn.append(v)
retour.append(cn)
return retour
def plot_line(self,column_names,win=None,opts={}):
if (len(self.dvar)<=1):
return None
if (self.vis is None):
self.vis=visdom.Visdom()
r=[]
X=[]
for t in range(len(self.dvar)):
rr=[]
for c in column_names:
rr.append(self.get_scoped_value(t,c))
r.append(rr)
X.append(t)
print(np.array(r).shape)
print(column_names)
print(np.array(X).ndim)
opts_={}
opts_["legend"]=column_names
for k in opts:
opts_[k]=opts[k]
print(opts_)
return self.vis.line(X=np.array(X),Y=np.array(r),opts=opts_,win=win)
#options={"legend":column_names}
def to_extended_array(self):
'''
Transforms the dynamic values to an array
'''
names = self._generate_columns_names()
names["_iteration"] = 1
for k in self.svar:
names["_s_"+k]=1
retour = []
cn = []
for l in names:
cn.append(l)
retour.append(cn)
for t in range(len(self.dvar)):
cn=[]
for l in names:
if (l.startswith('_s_')):
cn.append(self.svar[l[3:]])
elif (l == "_iteration"):
cn.append(t)
else:
v = self.get_scoped_value(t, l)
cn.append(v)
retour.append(cn)
return retour
def to_dataframe(self):
a = self.to_array()
return pd.DataFrame(data=a[1:], columns=a[0])
def to_extended_dataframe(self):
a = self.to_extended_array()
return pd.DataFrame(data=a[1:], columns=a[0])
def logs_to_dataframe(filenames):
print("Loading %d files and building Dataframe" % len(filenames))
arrays=[]
for f in filenames:
log=pickle.load(open(f,"rb"))
arrays.append(log.to_extended_array())
#Building the set of all columns + index per log
indexes=[]
all_columns={}
for i in range(len(arrays)):
index={}
columns_names=arrays[i][0]
for j in range(len(columns_names)):
index[columns_names[j]]=j
all_columns[columns_names[j]]=1
indexes.append(index)
retour=[]
all_names=["_log_idx","_log_file"]
for a in all_columns:
all_names.append(a)
for i in range(len(arrays)):
arr=arrays[i]
filename=filenames[i]
for rt in range(len(arr)-1):
t=rt+1
line=arr[t]
new_line=[]
for idx_c in range(len(all_names)):
new_line.append(None)
for idx_c in range(len(all_names)):
column_name=all_names[idx_c]
if (column_name == "_log_file"):
new_line[idx_c] = filename
elif (column_name == "_log_idx"):
new_line[idx_c] = i
elif (column_name in indexes[i]):
idx = indexes[i][column_name]
new_line[idx_c] = arr[t][idx]
retour.append(new_line)
return pd.DataFrame(data=retour,columns=all_names)
| true |
c63ba421a484e3fbdf575105ba49be39179b6668 | Python | mandalasindhu9/Python | /LogWatch.py | UTF-8 | 2,078 | 2.609375 | 3 | [] | no_license | import time
import os
import subprocess
import shutil
import sys
import atexit
read_file = raw_input("Enter the name of the file to read: ")
write_file = raw_input("Enter the name of the file to write into: ")
email_sent = False
LOG_LEVEL = raw_input("Enter Log Level(FATAL|ERROR|WARN|INFO): ")
email_address = raw_input("Enter email address: ")
#Write to an external file
def write_to_file():
file_to_read = open(read_file,"r")
try:
file_to_write = open(write_file,"r+b")
except IOError:
print ('File not found. So creating new one')
file_to_write = open(write_file,"w")
start_time = time.time()
end_time=0
global email_sent
email_sent = False
while 1:
if email_sent is True:
start_time = time.time()
email_sent = False
print "Begin Monitoring Again..."
file_to_write = open(write_file,"r+b")
where = file_to_read.tell()
line = file_to_read.readline()
if not line:
time.sleep(1)
end_time = time.time()
file_to_read.seek(where)
if end_time - start_time > 10:
send_email(file_to_write)
else:
diff = end_time-start_time
if diff<10:
end_time = time.time()
if LOG_LEVEL in line:
file_to_write.write(line)
continue
else:
send_email(file_to_write)
def send_email(file_to_write):
email_sent=True
file_to_write.close()
global email_address
# Close the file, email it to user, rm the file, create new one in that dir
if os.stat(write_file).st_size != 0:
print "Content found. Emailing user"
os.system('mail -v -s "Monitor Logs Test" %s < %s' % (email_address, write_file))
print 'Removing existing file'
os.system('rm %s' % write_file)
print 'Creating new file'
os.system('touch %s' %write_file)
def exit_handler():
print "Exiting.... Removing %s" %write_file
os.system('rm %s' % write_file)
def main():
try:
write_to_file()
finally:
atexit.register(exit_handler)
if __name__=='__main__':
main()
| true |