blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bbf3700a3af65869254932dcbe658e1cf565c409 | Python | lydiacupery/leet-code | /accounts-merge/python/solution.py | UTF-8 | 1,635 | 3.09375 | 3 | [] | no_license | from collections import defaultdict
class Solution:
def accountsMerge(self, accounts):
email_to_id = {}
email_to_name = {}
i = 0
uf = UnionFind()
# build up the graph
for account in accounts:
name = account[0]
for email in account[1:]:
email_to_name[email] = name
if email not in email_to_id:
email_to_id[email] = i
i += 1
uf.union(email_to_id[email], email_to_id[account[1]])
# return the results
result = defaultdict(list)
for email in email_to_id:
result[uf.find(email_to_id[email])].append(email)
return [[email_to_name[email_group[0]]] + sorted(email_group) for email_group in result.values()]
class UnionFind:
def __init__(self):
self.parent = list(range(0, 10001))
self.rank = [0] * 10001
def find(self, i):
while(self.parent[i] != i):
self.parent[i] = self.parent[self.parent[i]] # path compression
i = self.find(self.parent[i])
return i
def union(self, a, b): # add find by rank
# make one that already has highest rank the parent
parentA = self.find(a)
parentB = self.find(b)
if(parentA == parentB):
return
if(self.rank[parentA] > self.rank[parentB]):
self.parent[parentB] = parentA
elif(self.rank[parentB] > self.rank[parentA]):
self.parent[parentA] = parentB
else:
self.parent[parentA] = parentB
self.rank[parentB] += 1
| true |
658b7db4358574771fcfa1487f5466f5d278e149 | Python | cheeyeo/learn_more_python_the_hard_way | /chapter14_double_linked_lists/test_dll.py | UTF-8 | 3,633 | 3.828125 | 4 | [] | no_license | # Example of testing DoubleLinkedList using built-in unittest module
import unittest
from unittest import TestCase
from dllist import *
class TestDoubleLinkedList(TestCase):
def test_push(self):
dll = DoubleLinkedList()
dll.push(1)
self.assertEqual(dll.begin, dll.end, 'Begin should equal end')
self.assertIsNone(dll.begin.prev, "Begin's prev should be none")
self.assertIsNone(dll.end.next)
self.assertEqual(dll.count(), 1)
dll.push(2)
self.assertNotEqual(dll.begin, dll.end, 'Begin should not equal end')
self.assertEqual(dll.end.prev, dll.begin)
self.assertIsNone(dll.begin.prev)
self.assertIsNone(dll.end.next)
self.assertEqual(dll.count(), 2)
def test_pop(self):
dll = DoubleLinkedList()
self.assertIsNone(dll.pop())
dll.push(1)
self.assertEqual(dll.count(), 1)
self.assertEqual(dll.pop(), 1)
self.assertEqual(dll.count(), 0)
self.assertIsNone(dll.begin)
self.assertIsNone(dll.end)
# Test case for more than 1 element
dll.push(1)
dll.push(2)
self.assertEqual(dll.count(), 2)
self.assertEqual(dll.pop(), 2)
self.assertEqual(dll.count(), 1)
self.assertEqual(dll.begin, dll.end)
self.assertEqual(dll.pop(), 1)
self.assertEqual(dll.count(), 0)
def test_shift(self):
dll = DoubleLinkedList()
dll.shift(1)
self.assertEqual(dll.count(), 1)
self.assertEqual(dll.begin, dll.end)
dll.shift(2)
self.assertEqual(dll.count(), 2)
self.assertNotEqual(dll.begin, dll.end)
self.assertEqual(dll.begin.value, 2)
self.assertEqual(dll.end.value, 1)
def test_unshift(self):
dll = DoubleLinkedList()
self.assertIsNone(dll.unshift())
dll.push(1)
self.assertEqual(dll.unshift(), 1)
dll.push(1)
dll.push(2)
self.assertEqual(dll.unshift(), 1)
self.assertEqual(dll.unshift(), 2)
def test_count(self):
dll = DoubleLinkedList()
self.assertEqual(dll.count(), 0)
dll.push(1)
dll.push(2)
self.assertEqual(dll.count(), 2)
def test_remove(self):
colors = DoubleLinkedList()
colors.push("Cobalt")
colors.push("Zinc White")
colors.push("Nickle Yellow")
colors.push("Perinone")
# colors.dump("before removing cobalt")
self.assertEqual(colors.remove("Cobalt"), 0)
# colors.dump("before removing perinone")
self.assertEqual(colors.remove("Perinone"), 2)
# colors.dump("after removing perinone")
self.assertEqual(colors.remove("Nickle Yellow"), 1)
self.assertEqual(colors.remove("Zinc White"), 0)
def test_first(self):
dll = DoubleLinkedList()
dll.push(1)
self.assertEqual(dll.first(), 1)
dll.push(2)
self.assertEqual(dll.first(), 1)
dll.shift(-100)
self.assertEqual(dll.first(), -100)
def test_last(self):
dll = DoubleLinkedList()
dll.push(1)
self.assertEqual(dll.last(), 1)
dll.push(2)
self.assertEqual(dll.last(), 2)
dll.shift(-100)
self.assertEqual(dll.last(), 2)
def test_get(self):
colors = DoubleLinkedList()
colors.push("Vermillion")
self.assertEqual(colors.get(0), "Vermillion")
colors.push("Sap Green")
self.assertEqual(colors.get(0), "Vermillion")
self.assertEqual(colors.get(1), "Sap Green")
colors.push("Cadmium Yellow Light")
self.assertEqual(colors.get(0), "Vermillion")
self.assertEqual(colors.get(1), "Sap Green")
self.assertEqual(colors.get(2), "Cadmium Yellow Light")
self.assertEqual(colors.pop(), "Cadmium Yellow Light")
self.assertEqual(colors.get(0), "Vermillion")
self.assertEqual(colors.get(1), "Sap Green")
self.assertEqual(colors.get(2), None)
colors.pop()
self.assertEqual(colors.get(0), "Vermillion")
colors.pop()
self.assertEqual(colors.get(0), None)
if __name__ == '__main__':
unittest.main() | true |
ed98bce20ebd07fb957a7e47782815db92a2f501 | Python | linshaoyong/leetcode | /python/hash_table/0884_uncommon_words_from_two_sentences.py | UTF-8 | 561 | 3.328125 | 3 | [
"MIT"
] | permissive | class Solution(object):
def uncommonFromSentences(self, A, B):
"""
:type A: str
:type B: str
:rtype: List[str]
"""
ws = {}
for w in A.split():
ws[w] = ws.get(w, 0) + 1
for w in B.split():
ws[w] = ws.get(w, 0) + 1
return [k for k, v in ws.items() if v == 1]
def test_uncommon_from_sentences():
s = Solution()
r = s.uncommonFromSentences("this apple is sweet", "this apple is sour")
assert 2 == len(r)
assert "sweet" in r
assert "sour" in r
| true |
8f875cd34886b28beaa6e9c585870aeb3051995d | Python | leepand/AIserver | /AIFlysdk/model/pred/.ipynb_checkpoints/fib-checkpoint.py | UTF-8 | 296 | 2.75 | 3 | [] | no_license | import json
def load_model(model_dir):
return fib(model_dir)
class fib:
def __init__(self,model_dir):
self.model_dir=None
def predict(self, n):
if n == 0 or n == 1:
return 1
else:
return self.predict(n-1) + self.predict(n-2)
| true |
385b3112ff9ea249688d85b1994f1f20cd1595a8 | Python | euanwm/EVASDK_Random_Examples | /coDrive_pump_handler.py | UTF-8 | 4,111 | 2.984375 | 3 | [] | no_license | """
coDrive connected to an Eva with the following pinout:
coDrive Eva Base IO
V IN - -> Pin 1
V IN + -> Pin 10
D1 -> Pin 9
D2 -> Pin 11
D3 -> Pin 13
0V -> Pin 12
A -> Pin 17
AG -> Pin 24
"""
import evasdk
from time import sleep
class PumpHandler:
"""
Pass Eva object to allow basic coDrive functionality.
A lot of the __init__ could be passed as args when first initialising the class.
"""
def __init__(self, eva_obj):
self.robot = eva_obj
self.ambient_pressure_voltage = 3 # Volts
self.acceptable_suction_threshold = 2 # Volts
self.acceptable_pressure_threshold = 5 # Volts
self.pressure_pin = 'd0' # Digital output 1
self.vacuum_pin = 'd1' # Digital output 2
self.motor_pin = 'd2' # Digital output 3
self.tdx_pin = 'a0' # Analog input 1
self.wait_on_pump = 3 # Seconds
def my_lock(self):
""" Lock is required to toggle outputs """
lock_call = self.robot.lock_status()
if lock_call['owner'] == 'you' and lock_call['status'] == 'locked':
return True
else:
return False
def get_tdx_voltage(self):
""" Returns the voltage at the pressure transducer rounded to 1 decimal place. """
return round(self.robot.data_snapshot()['global.inputs'][self.tdx_pin], 1)
def is_pressure_stable(self):
""" Pressure is relative voltage above a certain level. """
if self.get_tdx_voltage() < self.acceptable_pressure_threshold:
return True
else:
return False
def is_suction_stable(self):
""" Suction is relative voltage below a certain level. """
if self.get_tdx_voltage() > self.acceptable_suction_threshold:
return True
else:
return False
def is_pump_running(self):
""" Checks the pin is set to active for running the pump. """
if self.robot.gpio_get(self.motor_pin, 'output'):
return True
else:
return False
def suction_on(self):
"""
Sets appropriate pins for suction mode and starts motor pump.
Waits a set time for a vacuum to build.
"""
if self.my_lock():
self.robot.gpio_set(self.pressure_pin, False)
self.robot.gpio_set(self.vacuum_pin, True)
self.pump_run()
sleep(self.wait_on_pump)
if self.is_suction_stable():
return True
else:
return False
def suction_off(self):
""" Toggles pin to off specified for suction mode and stops motor pump. """
if self.my_lock():
self.robot.gpio_set(self.vacuum_pin, False)
self.pump_stop()
def pressure_on(self):
"""
Sets appropriate pins for pressure mode and starts motor pump.
Waits a set time for pressure to build.
"""
if self.my_lock():
self.robot.gpio_set(self.vacuum_pin, False)
self.robot.gpio_set(self.pressure_pin, True)
self.pump_run()
sleep(self.wait_on_pump)
if self.is_pressure_stable():
return True
else:
return False
def pressure_off(self):
""" Toggles pin to off specified for pressure mode and stops motor pump. """
if self.my_lock():
self.robot.gpio_set(self.pressure_pin, False)
self.pump_stop()
def pump_run(self):
""" Toggles pin specified to run motor pump. """
self.robot.gpio_set(self.motor_pin, True)
def pump_stop(self):
""" Toggles pin specified to run motor pump to off. """
self.robot.gpio_set(self.motor_pin, False)
if __name__ == '__main__':
# Todo input Eva IP and generate token before use
IP = None
TOKEN = None
robot = evasdk.Eva(IP, TOKEN)
codrive_unit = PumpHandler(robot)
with robot.lock():
print(codrive_unit.pressure_on())
sleep(2)
print(codrive_unit.suction_on())
sleep(2)
codrive_unit.suction_off()
| true |
3bf12fb447f30666b38598f96391524813f8dc40 | Python | JohamSMC/python-Kattis | /autori.py | UTF-8 | 89 | 3.109375 | 3 | [] | no_license | author=input()
split=author.split("-")
for element in split:
print(element[0],end="")
| true |
593ead46953eff320e1bb75cbaa3ca848db4b9d8 | Python | Cijams/pyDataStructures | /pyGraph/PyGraphTest.py | UTF-8 | 1,048 | 2.8125 | 3 | [] | no_license | import unittest
from pyGraph import PyGraph
pg = PyGraph.PyGraph()
class MyTestCase(unittest.TestCase):
def test_graph(self):
self.assertEqual(True, True)
pg.add_edge('a', 'f')
# print(pg)
pg.clear()
# print(pg)
pg.add_edge('a', 'c')
pg.add_edge('c', 'b')
pg.add_edge('c', 'e')
pg.add_edge('c', 'd')
pg.add_edge('b', 'e')
pg.add_edge('r', 'h')
# print(pg)
print()
pg.remove_edge('a', 'c')
#print(pg)
pg.add_edge('a', 'c')
print(pg)
print()
def test_paths(self):
pg.add_edge('a', 'e')
pg.add_edge('e', 'd')
print(pg.detect_path('a', 'd'))
def test_bfs(self):
pg.clear()
pg.add_edge('a', 'c')
pg.add_edge('c', 'b')
pg.add_edge('c', 'e')
pg.add_edge('c', 'd')
pg.add_edge('b', 'e')
pg.add_edge('r', 'h')
print(pg)
pg.breadth_first_search('a')
if __name__ == '__main__':
unittest.main()
| true |
2b155a58003898b382dd0dd632e74cfad495c386 | Python | ksarathkumarreddy/CompletePython | /12_Python_Intro_End/6Functions.py | UTF-8 | 125 | 3.5625 | 4 | [] | no_license | def sq_num(num):
'''
square of a given number
:param num:
:return:
'''
return num**2
print(sq_num(2)) | true |
7572f5b60ca8ecd95df04aebeb075e0ef255cb03 | Python | uprateek77/greyatom-python-for-data-science | /Greyatom-Project/code.py | UTF-8 | 1,611 | 3 | 3 | [
"MIT"
] | permissive | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
#Step:1
census = np.concatenate([data,new_record],axis=0)
print(data.shape)
print(census.shape)
#Step:2
age=census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = round(np.mean(age),2)
age_std = round(np.std(age),2)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
#Step:3
race_0= census[census[:,2]==0]
race_1= census[census[:,2]==1]
race_2= census[census[:,2]==2]
race_3= census[census[:,2]==3]
race_4 = census[census[:,2]==4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
least = min(len_0,len_1,len_2,len_3,len_4 )
if(least==len_0):
minority_race = 0
elif(least==len_1):
minority_race = 1
elif(least==len_2):
minority_race = 2
elif(least==len_3):
minority_race = 3
else:
minority_race = 4
print(minority_race)
#step:4
senior_citizens = census[census[:,0]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(working_hours_sum)
print(round(avg_working_hours,2))
#Step:5
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(round(avg_pay_high,2))
print(round(avg_pay_low,2))
| true |
177d2b5e9fe965eb44815802cfb7fb2edad55c05 | Python | LX97/face-transformer-1 | /transformer.py | UTF-8 | 5,169 | 2.65625 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import math
import copy
from data import SPECIAL_TOKENS
class LearnedPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
self.init_()
def init_(self):
nn.init.normal_(self.emb.weight, std = 0.02)
def forward(self, x):
n = torch.arange(x.shape[1], device = x.device)
return self.emb(n)[None, :, :]
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=200):
super(PositionalEmbedding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
#pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.shape[1], :]
return self.dropout(x)
class FaceTransformer(nn.Module):
def __init__(
self,
input_size,
hidden_size=512,
num_layers=8,
num_heads=8,
dropout=0.1,
max_seq_len=200,
special_tokens=SPECIAL_TOKENS
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_heads = num_heads
self.dropout = dropout
self.max_seq_len = max_seq_len
self.special_tokens = special_tokens
self.embedding = nn.Linear(self.input_size, self.hidden_size)
self.token_embedding = nn.Parameter(torch.randn(len(self.special_tokens), self.hidden_size))
self.pos_embedding = PositionalEmbedding(self.hidden_size, dropout=self.dropout, max_len=self.max_seq_len)
encoder_layers = nn.TransformerEncoderLayer(self.hidden_size, self.num_heads, self.hidden_size * 4, self.dropout)
self.encoder = nn.TransformerEncoder(encoder_layers, self.num_layers)
self.norm = nn.LayerNorm(self.hidden_size)
self.pretrain_head = nn.Linear(self.hidden_size, self.input_size)
def forward(
self,
arrays,
seq_idxs,
seq_label_idxs=None
):
array_embeddings = self.embedding(arrays)
all_embeddings = torch.cat([self.token_embedding, array_embeddings])
idx_offset = min(self.special_tokens.values())
embedded = all_embeddings[seq_idxs - idx_offset]
embedded = self.pos_embedding(embedded)
encoded = self.encoder(embedded)
encoded = self.norm(encoded)
if seq_label_idxs is None:
return encoded
preds = self.pretrain_head(encoded[seq_label_idxs])
return preds
class CosineWithRestarts(torch.optim.lr_scheduler._LRScheduler):
"""
Cosine annealing with restarts.
Parameters
----------
optimizer : torch.optim.Optimizer
T_max : int
The maximum number of iterations within the first cycle.
eta_min : float, optional (default: 0)
The minimum learning rate.
last_epoch : int, optional (default: -1)
The index of the last epoch.
"""
def __init__(self,
optimizer,
T_max,
eta_min = 0.,
last_epoch = -1,
factor = 1.):
# pylint: disable=invalid-name
self.T_max = T_max
self.eta_min = eta_min
self.factor = factor
self._last_restart = 0
self._cycle_counter = 0
self._cycle_factor = 1.
self._updated_cycle_len = T_max
self._initialized = False
super(CosineWithRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Get updated learning rate."""
# HACK: We need to check if this is the first time get_lr() was called, since
# we want to start with step = 0, but _LRScheduler calls get_lr with
# last_epoch + 1 when initialized.
if not self._initialized:
self._initialized = True
return self.base_lrs
step = self.last_epoch + 1
self._cycle_counter = step - self._last_restart
lrs = [
(
self.eta_min + ((lr - self.eta_min) / 2) *
(
np.cos(
np.pi *
((self._cycle_counter) % self._updated_cycle_len) /
self._updated_cycle_len
) + 1
)
) for lr in self.base_lrs
]
if self._cycle_counter % self._updated_cycle_len == 0:
# Adjust the cycle length.
self._cycle_factor *= self.factor
self._cycle_counter = 0
self._updated_cycle_len = int(self._cycle_factor * self.T_max)
self._last_restart = step
return lrs | true |
80eacfe95a887d4da263cb84faf2860d2b5f0dfa | Python | dmdang/ECE-40862-Python-for-Embedded-Systems | /dangd_lab0/program5.py | UTF-8 | 660 | 3.640625 | 4 | [] | no_license | class sumFinder:
def findIndex(self, list, target):
dictionary = {}
for i, value in enumerate(list):
if target - value in dictionary:
return(dictionary[target - value], i)
if i == 6:
return(999, 999)
dictionary[value] = i
def main():
a = [10, 20, 10, 40, 50, 60, 70]
targetNum = int(input("What is your target number? "))
b, c = sumFinder().findIndex(a, targetNum)
if b == 999 and c == 999:
print("index1=N/A, index2=N/A")
else:
print("index1=" + str(b) + "," + " index2=" + str(c))
main()
| true |
6df2911280f89163253d554349eb2558e936749a | Python | boris-ulyanov/adventOfCode | /2018/day-09/2.py | UTF-8 | 1,310 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python
import sys
from collections import defaultdict
# 427 players; last marble is worth 70723 points
PLAYERS_COUNT = 427
LAST_WORTH = 70723 * 100
# test
#
# PLAYERS_COUNT = 10
# LAST_WORTH = 1618
#
# 9 players; last marble is worth 25 points: high score is 32
# 10 players; last marble is worth 1618 points: high score is 8317
# 13 players; last marble is worth 7999 points: high score is 146373
# 17 players; last marble is worth 1104 points: high score is 2764
# 21 players; last marble is worth 6111 points: high score is 54718
# 30 players; last marble is worth 5807 points: high score is 37305
data = [0]
cur = 0
points = defaultdict(int)
for x in xrange(1, LAST_WORTH + 1):
l = len(data)
if (x % 23) == 0:
player = ((x - 1) % PLAYERS_COUNT) + 1
points[player] += x
pos = cur - 7
if pos < 0:
pos += l
# points[player] += data[pos]
points[player] += data.pop(pos)
# data = data[:pos] + data[pos + 1:]
# data = [data[i] for i in xrange(l) if i != pos]
cur = pos
if cur == l:
cur = 0
continue
pos = cur + 2
if pos > l:
pos -= l
data.insert(pos, x)
cur = pos
# print points
print 'Answer', max(points.values())
# results
# Answer 399745
| true |
ed04fdcadd5849f6d258ec293827f97a51e80a53 | Python | hubbardgary/AdventOfCode | /day09.py | UTF-8 | 2,643 | 4.1875 | 4 | [
"MIT"
] | permissive | # --- Day 9: All in a Single Night ---
#
# Every year, Santa manages to deliver all of his presents in a single night.
#
# This year, however, he has some new locations to visit; his elves have provided him the distances between every pair
# of locations. He can start and end at any two (different) locations he wants, but he must visit each location exactly
# once. What is the shortest distance he can travel to achieve this?
#
# For example, given the following distances:
#
# London to Dublin = 464
# London to Belfast = 518
# Dublin to Belfast = 141
# The possible routes are therefore:
#
# Dublin -> London -> Belfast = 982
# London -> Dublin -> Belfast = 605
# London -> Belfast -> Dublin = 659
# Dublin -> Belfast -> London = 659
# Belfast -> Dublin -> London = 605
# Belfast -> London -> Dublin = 982
# The shortest of these is London -> Dublin -> Belfast = 605, and so the answer is 605 in this example.
#
# What is the distance of the shortest route?
#
#
# --- Part Two ---
#
# The next year, just to show off, Santa decides to take the route with the longest distance instead.
#
# He can still start and end at any two (different) locations he wants, and he still must visit each location exactly
# once.
#
# For example, given the distances above, the longest route would be 982 via (for example) Dublin -> London -> Belfast.
#
# What is the distance of the longest route?
import itertools
distances = open("day09_input").read().split("\n")
vertices = {}
# Build dictionary of dictionaries mapping distance between each location
for distance in distances:
d = distance.replace(" to ", " ").replace(" = ", " ").split(" ")
if len(d) == 3:
if d[0] not in vertices:
vertices[d[0]] = {}
if d[1] not in vertices:
vertices[d[1]] = {}
vertices[d[0]][d[1]] = int(d[2])
vertices[d[1]][d[0]] = int(d[2])
# This is the Hamiltonian Path problem, which is NP-complete.
# So for once I can brute force it without feeling guilty.
shortest_path = float("inf")
longest_path = 0
possible_routes = list(itertools.permutations(list(vertices.keys())))
for route in possible_routes:
route_len = 0
current_loc = ""
next_loc = ""
for loc in route:
if current_loc == "":
current_loc = loc
continue
next_loc = loc
route_len += vertices[current_loc][next_loc]
current_loc = next_loc
if route_len < shortest_path:
shortest_path = route_len
if route_len > longest_path:
longest_path = route_len
print("Shortest path: {0}".format(shortest_path))
print("Longest path: {0}".format(longest_path))
| true |
ca0e7071b0e1f63cd5a422407c09699ab49e36a4 | Python | kfigaj/FizzBuzzSimple | /FizzBuzzSimple/helper.py | UTF-8 | 836 | 4.65625 | 5 | [] | no_license | def fizzbuzz(number):
"""
Fizz buzz is a counting game where each player speaks a number from 1 to n
in sequence, but with a few exceptions:
- if the would-be spoken number is divisible by 3 the player must say fizz
instead
- if the would-be spoken number is divisible by 5 the player must say buzz
instead
- if the would-be spoken number is divisible by 3 and 5 the player must say
fizzbuzz instead
"""
number = int(number)
if number < 1:
raise ValueError()
output = []
for i in range(1, number + 1):
if i % 3 == 0:
if i % 5 == 0:
value = 'fizzbuzz'
else:
value = 'fizz'
elif i % 5 == 0:
value = 'buzz'
else:
value = i
output.append(value)
return output
| true |
20e8f6dcd296dc774a09441968bee3b26f1798a0 | Python | rodriporon/IPC2_Proyecto2_201902781 | /ListaSimple-LAPTOP-6GBBDGTO.py | UTF-8 | 1,338 | 3.5625 | 4 | [] | no_license | from NodoLista import nodoLista
class listaSimple(nodoLista):
def __init__(self):
super().__init__()
self.cabeza = nodoLista()
self.contador = 0
self.valor = self.__str__()
self.frecuencia = 1
self.indice_frecuencia = None
def agregar(self, nuevo_nodo):
nodo = self.cabeza
while(nodo.siguiente):
nodo = nodo.siguiente
nodo.siguiente = nuevo_nodo
self.contador += 1
self.valor = self.__str__()
def get(self, i):
if (i >= self.contador):
return None
nodo = self.cabeza.siguiente
n = 0
while(nodo):
if (n == i):
return nodo
nodo = nodo.siguiente
n += 1
def __getitem__(self, i):
return self.get(i)
def length(self):
return self.contador
def primero(self):
return self.get(0)
def ultimo(self):
return self.get(self.length() - 1)
def __str__(self):
resultado = "["
for i in range(self.length()):
nodo = self.get(i)
if (i == self.length()-1):
resultado += '{}'.format(nodo.valor)
break
resultado += '{}, '.format(nodo.valor)
resultado += "]"
return resultado
| true |
266a8300ee04a3cb052ecd3742bc69fee9cf90d0 | Python | Deys2000/Basic-Python-Tutorial | /25-28 TurtleIntermediateGraphics/Sunflower.py | UTF-8 | 635 | 3.34375 | 3 | [] | no_license | print('''python program #26
Hashir - June 1 2018
This is not according to the book
I did program #26 along with 25 as it was an extension
This program is just me experimenting with the Turtle module
''')
import turtle
t = turtle.Pen()
t.begin_fill()
t.color(1,0,0)
t.circle(30)
t.end_fill()
t.up()
t.left(0)
t.forward(20)
t.down()
t.color(1,1,0)
for x in range(0,6):
t.begin_fill()
t.left(60)
t.circle(16)
t.end_fill()
t.up()
t.forward(40)
t.down()
t.color(0,1,0)
for x in range(0,6):
t.left(60)
t.circle(16)
t.up()
t.forward(40)
t.down()
| true |
503c95d6aab0b3a89beeaa879441aeeb7661b8f2 | Python | Bahram3110/d6_w2_t1 | /task6.py | UTF-8 | 2,035 | 3.40625 | 3 | [] | no_license | names = ['Isa', 'Murat', 'Azim', 'Aikerim']
print('Дорогой гость, ' + names[0] + ' приглaшаю Вас на обед!')
print('Дорогой гость, ' + names[1] + ' приглaшаю Вас на обед!')
print('Дорогой гость, ' + names[2] + ' приглaшаю Вас на обед!')
print('Дорогой гость, ' + names[3] + ' приглaшаю Вас на обед!')
ne_pridet = names.pop(3)
print(ne_pridet + ' прийти не сможет')
names.insert(3, 'Uluk')
print('Дорогой гость, ' + names[3] + ' приглaшаю Вас на обед!')
print('Дорогие гости нас будет больше!')
names.insert(4, 'Ilyas')
names.insert(5, 'Asan')
names.insert(6, 'Nursultan')
# print(names)
print('Дорогой гость, ' + names[4] + ' приглaшаю Вас на обед!') #dop gosti
print('Дорогой гость, ' + names[5] + ' приглaшаю Вас на обед!') #dop gosti
print('Дорогой гость, ' + names[6] + ' приглaшаю Вас на обед!') #dop gosti
print('Hа обед приглашаются всего два гостя')
ne_pridet1 = names.pop(6)
print('Сожалею ' + ne_pridet1 + ', ' + 'но вы прийти не сможете')
ne_pridet2 = names.pop(5)
print('Сожалею ' + ne_pridet2 + ', ' + 'но вы прийти не сможете')
ne_pridet3 = names.pop(4)
print('Сожалею ' + ne_pridet3 + ', ' + 'но вы прийти не сможете')
ne_pridet4 = names.pop(3)
print('Сожалею ' + ne_pridet4 + ', ' + 'но вы прийти не сможете')
ne_pridet5 = names.pop(2)
print('Сожалею ' + ne_pridet5 + ', ' + 'но вы прийти не сможете')
print('Дорогой гость, ' + names[0] + ' приглашение на обед все еще в силе!') #v sile
print('Дорогой гость, ' + names[1] + ' пприглашение на обед все еще в силе!') #v sile
names.clear()
print(names)
| true |
8c1f26ece2a4aec780e245c79821defc31312dcf | Python | Goessi/CS_Grad_Courses | /6-0001/ps4/4.py | UTF-8 | 1,555 | 3.46875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 14:34:52 2018
MIT 6-0001 lecture 5
@author: JingQIN
"""
te = ()
t = (2,"mit",3)
t[0]
print((2,"mit",3)+(5,6))
print(t[1:2]) # extra comma means a tuple with one elements
print(t[1:3])
print(len(t))
#t[1] = 4 tuples are immutable
x = 5
y = 7
(x, y) = (y, x)
def quotient_and_remainder(x, y):
q = x//y
r = x%y
return (q, r)
(quot, rem) = quotient_and_remainder(4, 5)
def get_data(aTuple):
nums = ()
words = ()
for t in aTuple:
nums = nums + (t[0],)
if t[1] not in words:
words = words + (t[1],)
min_n = min(nums)
max_n = max(nums)
unique_words = len(words)
return (min_n, max_n, unique_words)
test = ((1,'a'),(2,'b'),(1,'a'),(7,'b'))
(a,b,c) = get_data(test)
print('a:',a,'b:',b,'c:',c)
tswift = ((2014,'Katy'),(2014,'Harry'),(2012,'Jake'),(2010,'Taylor'),(2008,'Joe'))
(min_year, max_year, num_people) = get_data(tswift)
print("From", min_year, "to", max_year, \
"Taylor Swift wrote songs about", num_people, 'people!')
# list
L = [2,1,3,6,3,7,0]
L.remove(2)
del(L[1])
L.extend([2,3])
s = "I<3 cs"
list(s)
s.split('<')
L = ['a','b','c']
''.join(L)
'_'.join(L)
L1 = [9,6,0,3]
L2 = sorted(L1)
L3 = L1.sort()
L4 = L3.reverse()
def remove_dups(L1, L2):
for e in L1:
if e in L2:
L1.remove(e)
L1 = [1,2,3,4]
L2 = [1,2,5,6]
remove_dups(L1, L2)
def remove_dups(L1,L2):
L1_copy = L1[:]
for e in L1_copy:
if e in L2:
L1.remove(e)
L1 = [1,2,3,4]
L2 = [1,2,5,6]
remove_dups(L1,L2) | true |
792425f0de79e8ebcb4f006d44bfb6a09639fcd1 | Python | ReiNoIkari/-Python--Alignment-Algorithms | /brute_force.py | UTF-8 | 690 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env python
"""
Naive Algorithm implementation
"""
SEQUENCE = "ATGGCGATGGACAGCATGTTAGTCAGTGACAGATCGTGCAGCAGAT"
MOTIF = "AGAT"
def naive(sequence, motif):
"""
Naive approach
"""
motif_lenght = len(MOTIF)
for i in range(1, len(sequence) - len(motif) + 1):
j = 0
while j < motif_lenght:
if sequence[i + j] == motif[j]:
j += 1
if j == motif_lenght:
print("Match found at pos: %d" % (i))
else:
break
#MAIN
print("Finding MOTIF in SEQUENCE:")
print("SEQUENCE = ", SEQUENCE)
print("MOTIF = ", MOTIF)
print("Naive algorithm result:")
naive(SEQUENCE, MOTIF)
| true |
87e63d37e19d3d2268d9ead8bb985227cfbf7a3d | Python | NALLEIN/OpenVINO-example | /super_resolution/Model/SR_x3/PSNR.py | UTF-8 | 689 | 2.578125 | 3 | [] | no_license | import numpy
import math
import cv2
import argparse
#python .\PSNR.py -img1 './sr_1.png' -img2 './test1.png'
def get_args():
parser = argparse.ArgumentParser(
conflict_handler='resolve',
description='eg: python3 -img1 file1 -img2 file1 -m 1 -c 0' )
parser.add_argument('-img1','--image_1',required=True,
help='image file_1 URL')
parser.add_argument('-img2','--image_2',required=True,
help='image file_2 URL')
return parser.parse_args()
def main():
args = get_args()
im1 = cv2.imread(args.image_1)
im2 = cv2.imread(args.image_2)
print(cv2.PSNR(im1,im2))
if __name__ == '__main__':
main() | true |
aff1a803733cfb636b958d5374a45840a092ce46 | Python | wsh32/pie_mp2 | /software/plot.py | UTF-8 | 4,276 | 3.25 | 3 | [] | no_license | """
plot.py: Multiprocessing compatible 3D plotting and visualization
"""
from multiprocessing_logger import configure_client_logger
from multiprocessing import Process, Event, Queue
import logging
import queue
import matplotlib
import matplotlib.pyplot as plt
class Plotter3D:
"""
Creates a new process that asynchronously plots the data coming from the data
queue
"""
def __init__(self, logger_queue=None, color='blue'):
self.logger = logging.getLogger("main")
self.logger_queue = logger_queue
self.color = color
self.data_queue = Queue()
self.kill_event = Event()
self.process = Process(target=self._run)
self.logger.info("Starting plotter process")
self.process.start()
def kill(self):
self.logger.info("Killing plotter process")
self.kill_event.set()
self.process.join()
def _run(self):
# Setup logger
if self.logger_queue is not None:
configure_client_logger(self.logger_queue)
fig = plt.figure()
ax = plt.axes(projection='3d')
min_x = None
max_x = None
min_y = None
max_y = None
min_z = None
max_z = None
# ax.set_aspect('equal')
x = []
y = []
z = []
while not self.kill_event.is_set():
try:
data = self.data_queue.get_nowait()
except queue.Empty:
plt.pause(0.01)
continue
color = self.color
if len(data) == 4:
# If length of 4, use 4th point as color
color = data[3]
elif len(data) != 3:
# Expect that data has size 3
self.logger.warning(f"Datapoint {data} has invalid size, skipping")
continue
if min_x is not None:
min_x = min(min_x, data[0])
max_x = max(max_x, data[0])
min_y = min(min_y, data[1])
max_y = max(max_y, data[1])
min_z = min(min_z, data[2])
max_z = max(max_z, data[2])
else:
min_x = data[0]
max_x = data[0]
min_y = data[1]
max_y = data[1]
min_z = data[2]
max_z = data[2]
aspect_ratio = (max(max_x - min_x, 1),
max(max_y - min_y, 1),
max(max_z - min_z, 1))
self.logger.warning(f"New aspect ratio: {aspect_ratio}")
ax.set_box_aspect(aspect_ratio)
self.logger.debug(f"Plotting data:\t{data}")
x.append(data[0])
y.append(data[1])
z.append(data[2])
ax.scatter3D(data[0], data[1], data[2], marker='.', color=color)
"""
if len(x) < 3:
continue
ax.plot_trisurf(x, y, z, linewidth=0.2)
"""
plt.show(block=False)
class Plotter2D:
"""
Creates a new process that asynchronously plots the data coming from the data
queue
"""
def __init__(self, logger_queue=None, color='blue'):
self.logger = logging.getLogger("main")
self.logger_queue = logger_queue
self.color = color
self.data_queue = Queue()
self.kill_event = Event()
self.process = Process(target=self._run)
self.logger.info("Starting plotter process")
self.process.start()
def kill(self):
self.logger.info("Killing plotter process")
self.kill_event.set()
self.process.join()
def _run(self):
# Setup logger
if self.logger_queue is not None:
configure_client_logger(self.logger_queue)
plt.xlabel("Yaw angle (degrees)")
plt.ylabel("Distance (inches)")
while not self.kill_event.is_set():
try:
data = self.data_queue.get_nowait()
except queue.Empty:
plt.pause(0.01)
continue
color = self.color
self.logger.debug(f"Plotting data:\t{data}")
plt.scatter(data[0], data[1], marker='.', color=color)
plt.show(block=False)
| true |
cc13b657c47212a836e9883fcb662a21f2bf5726 | Python | mamihackl/NaiveBayes | /MBlearner.py | UTF-8 | 3,252 | 3.28125 | 3 | [] | no_license | #!/opt/python-2.6/bin/python2.6
# Mami Sasaki and Nat Byington
# # LING 572 HW3 Multi-variate Bernoulli Learner
# Create a model file based on training data.
# Args: training file, P_delta, Cond_delta, model file
# Imports
import sys
import re
import math
# Classes
class Vector:
''' an object representing a single document or instance '''
name = '' # instance name
true_class = '' # gold standard class label
sys_class = '' # system assigned class label
features = False # data structure containing the vector's features
def __init__(self, name, clss, features):
self.name = name
self.true_class = clss
self.features = features
class Vector_List:
''' An object containing Vector objects and associated info.
This object is customized according to task (e.g. binary or not).
MB learner is binary, so features is a set rather than dictionary of
counts per feature.'''
vlist = []
classes = {} # dictionary containing class counts
term_set = set() # set containing all terms/features from vectors in list
term_per_class = {} # dictionary of counts using (feature, class) as key
def add_vectors(self, data_file):
''' take an open data file, create a vector per line, add it to list '''
for line in data_file.readlines():
feature_set = set()
n, c = re.match(r'(^[\S]+) ([\S]+) ', line).group(1,2)
features = re.findall(r'([A-Za-z]+) [0-9]+', line)
for f in features:
feature_set.add(f)
self.term_set.add(f)
if (f, c) in self.term_per_class:
self.term_per_class[(f,c)] += 1
else:
self.term_per_class[(f,c)] = 1
vector = Vector(n, c, feature_set)
if c in self.classes:
self.classes[c] += 1
else:
self.classes[c] = 1
self.vlist.append(vector)
def output_to_model(self, p_delta, cond_delta, model_file):
''' Output probabilities to model file using deltas for smoothing. '''
# prior prob
for c in self.classes:
top = p_delta + self.classes[c]
bottom = (p_delta * len(self.classes)) + len(self.vlist)
prob = top / float(bottom)
logprob = 0.0
if prob != 0:
logprob = math.log10(prob)
model_file.write(c + ' ' + str(prob) + ' ' + str(logprob) + '\n')
# cond prob
for c in self.classes:
for t in self.term_set:
top = cond_delta + self.term_per_class.get((t,c), 0)
bottom = (2 * cond_delta) + self.classes[c]
prob = top / float(bottom)
logprob = 0.0
if prob != 0:
logprob = math.log10(prob)
model_file.write(t+' '+c+' '+ str(prob)+' '+str(logprob)+'\n')
# Main
training = open(sys.argv[1])
P_DELTA = float(sys.argv[2])
COND_DELTA = float(sys.argv[3])
model_file = open(sys.argv[4], 'w')
training_vectors = Vector_List()
training_vectors.add_vectors(training)
training_vectors.output_to_model(P_DELTA, COND_DELTA, model_file)
| true |
6b3a12c3daff46c4839f8b9a952f247f04e07857 | Python | Nitesh101/Nitesh_old_backup | /unittest_DOC/pdfs/python_assign/pro_2_2.py | UTF-8 | 150 | 3.421875 | 3 | [] | no_license | val1=input("enter any value")
val1=str(val1)
print list(val1)
print tuple(val1)
val2=input("enter another value")
dict={}
dict[val1]=val2
print dict
| true |
cf7c2ac90ae0d7862c523a4e742c0d25f6690cf5 | Python | yangrencong/web_spiders | /DT/xpathlmxl.py | UTF-8 | 931 | 3.109375 | 3 | [] | no_license | import requests
from lxml import etree
link = "http://www.santostang.com/"
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36",
"Host":
"www.santostang.com"
}
r = requests.get(link ,headers = headers)
html = etree.HTML(r.text)
title_list = html.xpath('//h1[@class = "post-title"]/a/text()')
infor_list = html.xpath('//*[@id="main"]/div/div[1]/article[2]/div/p/text()')
print(title_list)
print(infor_list)
#//*[@id="main"]/div/div[1]/article[4]/div/p
#//*[@id="main"]/div/div[1]/article[5]/div/p
#//*[@id="main"]/div/div[1]/article[1]/div/p
for i in range(1,6):
infor_l = html.xpath('//*[@id="main"]/div/div[1]/article[' + str(i) +']/div/p/text()') #次数返回的事字符串
infor_s = ''.join(infor_l) #此处将列表转换为字符串
print("第%s项:"%(i) )
print(infor_s)
| true |
878d771a3b050a13f513e48e6acecfce8f6c272c | Python | music2z/pythonfinance | /test.py | UTF-8 | 1,817 | 3.0625 | 3 | [] | no_license | import pandas as pd
import pandas_datareader.data as web
from stock_price import StockPrice
import matplotlib.pyplot as plt
pd.set_option('display.width', 400)
# df = web.DataReader(['010130.KS', '132030.KS'], 'yahoo', '2000-01-01')
# 월단위 쉬프트
# df.tshift(freq='M', periods=1)
# 132030 : KODEX 골드선물(H)
sr_gold = StockPrice.get_price('132030', 'month', '1000')['Close']
sr_gold.name = 'Gold'
def get_corr(code):
sr_stock = StockPrice.get_price(code, 'month', '1000')['Close']
if sr_stock.count() < 60:
return 0
df = pd.concat([sr_gold, sr_stock], axis=1)
return df.corr().iloc[0, 1]
df = pd.read_excel('stock_list.xls', dtype={'종목코드': str})
df.index = df['종목코드']
df.drop('종목코드', axis=1, inplace=True)
# 종목별 상관계수 구하기
# df_corr = pd.DataFrame([], columns=['code', 'corr'])
# for code in df['종목코드']:
# corr = get_corr(code)
# df_corr.loc[len(df_corr)] = [code, corr]
# df_corr.to_excel('corr.xls')
df_corr = pd.read_excel('corr.xls', dtype={'code': str})
df_corr.index = df_corr['code']
df_corr.drop('code', axis=1, inplace=True)
# df_merge = pd.merge(df, df_corr, how='right', left_on='종목코드', right_on='code')
df_merge = pd.merge(df, df_corr, how='right', left_index=True, right_index=True)
df_sort = df_merge.sort_values('corr', ascending=False)
# print(df_sort[['기업명', 'corr']].head(20))
sr_stock = StockPrice.get_price('131970', 'month', '1000')['Close']
sr_stock.name = 'stock'
sr_stock.plot(subplots=True, figsize=(9, 7))
sr_gold.plot(subplots=True, figsize=(9, 7))
plt.legend()
plt.show()
# tot_cnt = df_corr.count()
# cnt = df_corr[df_corr['corr'] > 0.8].count()
# print(tot_cnt, cnt)
# print(df_sort[df_sort['corr'] > 0.8]) | true |
7ad8585fb6e1733e66ec54991a56ff8a340ef89f | Python | gluwein/APS | /0824/swea_1221_5일차 - GNS.py | UTF-8 | 1,247 | 3.1875 | 3 | [] | no_license | # zero ~ nin까지 수가 규ㅣㄱ없이 정렬되어있다.
# 기초 체계를 만든다.
# 변수를 List화 하고 count_0~10까지 정한다.
# if 와 count를 사용한다.
# 각 변수의 갯수를 알고 그것들을 반복한다.
# zro one two~ thr for fiv 순으로 젇렬을 한다.
import sys
sys.stdin = open("input.txt", "r")
n = int(input())
for i in range(1,n+1):
k = input()
m = list(input().split())
result = []
for j in m:
if 'ZRO' == j:
result.append('ZRO')
for j in m:
if 'ONE' == j:
result.append('ONE')
for j in m:
if 'TWO' == j:
result.append('TWO')
for j in m:
if 'THR' == j:
result.append('THR')
for j in m:
if 'FOR' == j:
result.append('FOR')
for j in m:
if 'FIV' == j:
result.append('FIV')
for j in m:
if 'SIX' == j:
result.append('SIX')
for j in m:
if 'SVN' == j:
result.append('SVN')
for j in m:
if 'EGT' == j:
result.append('EGT')
for j in m:
if 'NIN' == j:
result.append('NIN')
print('#{}'.format(i))
for k in result :
print(k, end=" ")
| true |
c269cf7ba9f0e87b483f2f4f7ee0da18df6829ae | Python | SeaEagleI/question-answering | /obsolete/models/pointer_network.py | UTF-8 | 2,738 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
# Output layer: Ptr-Net
class PointerNetwork(nn.Module):
def __init__(self, pass_hidden_dim, question_hidden_dim, attn_size=75, dropout=0.2):
""" Pointer Network
Args:
pass_hidden_dim(int): size of input
Input:
- **H_passage** of shape `(passage_legth, batch, pass_hidden_dim)`: a float tensor in which we determine
the importance of information in the passage regarding a question
- **U_question** of shape `(question_length, batch, question_hidden_dim)`: a float tensor containing question
representation
Output:
- start(torch.tensor of shape (batch_size, passage_length, 1)): start position of the answer
- end(torch.tensor of shape (batch_size, passage_length, 1)): end position of the answer
"""
super(PointerNetwork, self).__init__()
# for c, ha*
self.Whp = nn.Linear(pass_hidden_dim, attn_size, bias=False)
self.Wha = nn.Linear(question_hidden_dim, attn_size, bias=False)
self.v = nn.Linear(attn_size, 1, bias=False)
self.cell = nn.GRUCell(pass_hidden_dim, question_hidden_dim, False)
# for rQ
self.Wuq = nn.Linear(question_hidden_dim, attn_size, bias=False)
self.v1 = nn.Linear(attn_size, 1, bias=True)
def get_initial_state(self, u_question):
# Attention Pooling 0: u_question => rQ
s = self.v1(torch.tanh(self.Wuq(u_question)))
a = F.softmax(s, 0)
rQ = (a * u_question).sum(0)
return rQ
def forward(self, h_passage, u_question, batch_first=False):
# Reshape
if batch_first:
h_passage = h_passage.transpose(0, 1)
u_question = u_question.transpose(0, 1)
# ha0 = rQ: [1, batch_size, question_hidden_dim]
ha0 = self.get_initial_state(u_question)
# Attention Pooling 1: ha0, h_passage => start_logits, c
Wh = self.Whp(h_passage)
s1 = self.v(torch.tanh(Wh + self.Wha(ha0)))
start_logits = s1.transpose(0, 1) # shape[pass_len,batch_size,1] => shape[batch_size,pass_len,1]
a1 = F.softmax(s1, 0)
c = (a1 * h_passage).sum(0)
# RNN (GRU): c, ha0 => ha1
ha1 = self.cell(c, ha0)
# Attention Pooling 2: ha1, h_passage => end_logits (No need to compute ha2)
s2 = self.v(torch.tanh(Wh + self.Wha(ha1)))
end_logits = s2.transpose(0, 1) # shape[pass_len,batch_size,1] => shape[batch_size,pass_len,1]
# Return start & end logits
return start_logits, end_logits
| true |
dffc00af241a6dd106e83c70b1c8915a311f336f | Python | Carter-Co/she_codes_python | /turtles/test.py | UTF-8 | 565 | 3.6875 | 4 | [] | no_license | from datetime import datetime
def convert_mmddyyyy_date(date):
'''Takes a date in the format mm/dd/yyyy and converts it to a datetime object.
Args:
date: string of a date in the mm/dd/yyyy format.
Returns: a datetime object.
'''
return datetime.strptime(date, '%m/%d/%Y')
x=convert_mmddyyyy_date("08/07/2021")
print(x)
def read_csv_file(file_name):
'''Reads a csv file and returns the data as a list.
Args:
file_name: a string representing the path and name of a csv file.
Returns: a list.
'''
pass
| true |
28d7952debbf0a6872c04e35b461dc7768ba64c0 | Python | HussainGynai/Robocup-Gameplay-Prototype | /Explicit_assignment_approach/gameplay.py | UTF-8 | 2,055 | 3.296875 | 3 | [] | no_license | import classes
from typing import List, Set, Optional, Tuple, Type, TypeVar
OUR_ROBOTS = [1,2,3,4] #using a reduced number of robots to show role replacing
def play_selector() -> classes.Play:
return classes.PassToSeeker()
"""
Should just be normal situational analysis
"""
def get_priority(role: classes.Role) -> int: # a way to sort the priorities from the enums in each Role
if (role.priority is classes.RolePriority.HIGH):
return 1
elif (role.priority is classes.RolePriority.MEDIUM):
return 2
else:
return 3
def role_assignment(roles: List[classes.Role]) -> None:
count = 0
assigned_robots = []
roles = sorted(list(roles), key=lambda role: get_priority(role))
for role in roles: # would be some eval function to properly assign robots
new_count = count
while count < len(OUR_ROBOTS) and (OUR_ROBOTS[count] not in assigned_robots):
role.assign_robot(OUR_ROBOTS[count])
assigned_robots.append(OUR_ROBOTS[count])
print('Robot', role.robot, end=': ')
role.tick().tick()
new_count = new_count + 1
if new_count >= len(OUR_ROBOTS):
break
if new_count >= len(OUR_ROBOTS): #if we are out of robots then go back over the list using a lower priority replacement
roles_rev = sorted(list(roles), key=lambda role: get_priority(role), reverse = True)
for replacement in roles_rev:
'here'
if get_priority(replacement) > get_priority(role):
role.assign_robot(replacement.robot)
replacement.assign_robot(None)
count = count + 1
"""
communicates to planning and rest of codebase which robots should do what through ROS
"""
def gameplay():
play = play_selector().tick()
count = 0 #just here to make sure the demo doesn't last forevor
while count < 2:
print(play.__next__()) #tick through play
count = count + 1
gameplay() | true |
e630ab1c2a11360a7bace0b63ea08ad104fa6329 | Python | SafonovMikhail/python_000577 | /001719StepPyStudyJr/StepPyStudyJr_lesson05_02_datetime_01_20210307.py | UTF-8 | 1,899 | 4.09375 | 4 | [
"Apache-2.0"
] | permissive | import datetime
print("---------------------------------------")
print("Enter 1 if you want to know about the year (365 or 366 days).")
print("Enter 2 if you want to know about the age group.")
print("Enter 3 if you want to know about the age in seconds.")
print("-------------------------------------")
birth_day = int(input("Your birth day is xx: "))
birth_month = int(input("Your birth month is xx: "))
birth_year = int(input("Your birth year is xxxx: "))
day = int(datetime.date.today().day)
month = int(datetime.date.today().month)
year = int(datetime.date.today().year)
print(f'today: Y: {year}, M: {month}, D: {day}')
number = int(input("Select what you want: "))
if month > birth_month:
age = year - birth_year
else:
age = (year - birth_year) - 1
if (number > 0) and (number < 4) and (age >= 0) and (age < 130): # проверка правильности ввода
if number == 1: # выбор действия
if birth_year % 4 != 0:
print("It is a common year (365 days)")
else:
print("It is a leap year (366 days)")
elif number == 2: # выбор действия
print("Your group is ", end="")
if age < 1:
print("Baby")
elif (age >= 1) and (age < 3):
print("Toddler")
elif (age >= 3) and (age < 5):
print("Preschool")
elif (age >= 5) and (age < 12):
print("Gradeschooler")
elif (age >= 12) and (age < 19):
print("Teen")
elif age >= 19:
print("Adult")
elif number == 3: # выбор действия
print("Your age: {} year, {} month, {} day".format(age, abs(month - birth_month), abs(day - birth_day)))
seconds = ((age * 31536000) + ((month - 1) * 2592000) + ((day - 1) * 86400))
print("Your age is {} seconds".format(seconds))
else:
print("Error! Try again.")
| true |
3d389a93cdbfdc3c109a9e3fde456d389efa9bb8 | Python | VimleshS/python-graph-ds | /breadth_first_search.py | UTF-8 | 1,061 | 3.671875 | 4 | [] | no_license | from queue import Queue
from adjacencymatrix import *
def bfs(graph, start_vertex):
queue = Queue()
queue.put(start_vertex)
visited = np.zeros(graph.numVertices)
while not queue.empty():
vertex = queue.get()
if visited[vertex] == 1:
continue
print("visit: ", vertex)
visited[vertex] = 1
for v in graph.get_adjacent_vertices(vertex):
if visited[v] != 1:
queue.put(v)
def dfs(graph, start_vertex):
visited = np.zeros(graph.numVertices)
recurse_node(graph,visited,start_vertex)
def recurse_node(graph, visited, cur_node):
if visited[cur_node] ==1:
return
visited[cur_node] = 1
print("Visit: ", cur_node)
for v in graph.get_adjacent_vertices(cur_node):
recurse_node(graph, visited, v)
a = AdjacencyMatrixGraph(9, True)
a.add_edge(0,1)
a.add_edge(1,2)
a.add_edge(2,7)
a.add_edge(2,4)
a.add_edge(2,3)
a.add_edge(1,5)
a.add_edge(5,6)
a.add_edge(6,3)
a.add_edge(3,4)
a.add_edge(6,8)
# bfs(a,2)
dfs(a,0)
| true |
a1fe1a626c2c5922a12fabf0eeaaf56d1050aef4 | Python | shucheng-ai/vegas | /canvas.py | UTF-8 | 14,622 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
from abc import ABC, abstractmethod
from contextlib import contextmanager
from .vegas_core import Box
import numpy as np
import cv2
def angle_cad_to_cv (angle, start_angle, end_angle, flip=True):
angle = round(angle)
start_angle = round(start_angle)
end_angle = round(end_angle)
if start_angle == 0 and end_angle == 360:
return angle, 0, 360
if flip:
angle, start_angle, end_angle = -angle, -end_angle, -start_angle
while end_angle < start_angle:
end_angle += 360
if end_angle - start_angle > 180:
end_angle -= 360
start_angle, end_angle = end_angle, start_angle
return angle, start_angle, end_angle
CC_RELAX = 5000
class Style:
def __init__(self, lineColor=0, fillColor=None):
self.lineColor = lineColor
self.fillColor = fillColor
pass
def copy(self):
s = Style()
s.lineColor = self.lineColor
s.fillColor = self.fillColor
return s
def __str__(self):
return 'LC: %s FC: %s' % (self.lineColor, self.fillColor)
pass
class Canvas:
def __init__(self):
self.styles = [Style()] # style栈,可以用with canvas.style不断往里压
pass
@contextmanager
def style(self, **kwargs):
try:
s = self.styles[-1].copy()
for k, v in kwargs.items():
s.__setattr__(k, v)
pass
self.styles.append(s)
yield None
finally:
self.styles.pop()
pass
pass
def line(self, v1, v2):
self.path([v1, v2])
pass
def hatch (self, points):
"""
图案填充
:param points: 边界点
:return:
"""
lc = self.styles[-1].lineColor
fc = self.styles[-1].fillColor
if fc is None:
fc = lc
with self.style(lineColor=lc, fillColor=fc):
self.path(points, closed=True)
pass
pass
# dongwei: 注意不能有颜色[0,0,0] -- 会导致检测失效
TABLEAU20 = [[255, 255, 255],[127, 127, 225],[220, 10, 10],[230, 220, 10],[20, 200, 10],[170, 20, 220],[200, 200, 200],[0, 230, 230],[100, 100, 100],[180, 119, 31],[232, 199, 174],[14, 127, 255],[120, 187, 255],[44, 160, 44],[138, 223, 152],[40, 39, 214],[150, 152, 255],[189, 103, 148],[213, 176, 197],[75, 86, 140],[148, 156, 196],[194, 119, 227],[210, 182, 247],[127, 127, 127],[199, 199, 199],[34, 189, 188],[141, 219, 219],[207, 190, 23],[229, 218, 158],
[240, 240, 240],[127, 127, 225],[220, 10, 10],[230, 220, 10],[20, 200, 10],[170, 20, 220],[200, 200, 200],[0, 230, 230],[100, 100, 100],[180, 119, 31],[232, 199, 174],[14, 127, 255],[120, 187, 255],[44, 160, 44],[138, 223, 152],[40, 39, 214],[150, 152, 255],[189, 103, 148],[213, 176, 197],[75, 86, 140],[148, 156, 196],[194, 119, 227],[210, 182, 247],[127, 127, 127],[199, 199, 199],[34, 189, 188],[141, 219, 219],[207, 190, 23],[229, 218, 158]]
class RasterCanvas(Canvas):
def __init__ (self, bbox, size, padding=0):
'''
bbox: 被画对象的bounding box
size: canvas较长边的大小
'''
super().__init__()
self.padding = padding
self.styles = [Style()]
self.bbox = bbox
self.palette = TABLEAU20
x0, y0, x1, y1 = bbox.unpack()
self.x0 = x0
self.y0 = y0
w = x1 - x0
h = y1 - y0
assert w > 0 and h > 0
l = max(w, h)
self.scale_num = size - 1 - padding * 2
self.scale_denom = l
self.size = round((h * self.scale_num + self.scale_denom - 1) // self.scale_denom + 1 + padding * 2 + 0.5), \
round((w * self.scale_num + self.scale_denom - 1) // self.scale_denom + 1 + padding * 2 + 0.5)
pass
def scale (self, l):
return l * self.scale_num / self.scale_denom;
def unscale (self, l):
return l * self.scale_denom / self.scale_num;
def map (self, vector):
'''坐标转换, ezdxf.math.vector转成整数(x,y)'''
x = round((vector[0] - self.x0) * self.scale_num / self.scale_denom)
y = round((vector[1] - self.y0) * self.scale_num / self.scale_denom)
return (x + self.padding, self.size[0] - y - self.padding)
def unmap (self, pt):
'''坐标逆转换,返回的是浮点数'''
x, y = pt
x -= self.padding
y = self.size[0] - y - self.padding
x = x * self.scale_denom / self.scale_num + self.x0
y = y * self.scale_denom / self.scale_num + self.y0
return (x, y)
def scale (self, r):
''' 半径转换为整数(四舍五入)'''
return round(r * self.scale_num / self.scale_denom)
class CvCanvas(RasterCanvas):
def __init__ (self, box, size, padding=0):
super().__init__(box, size, padding)
self.image = np.zeros(self.size + (3,), dtype=np.uint8)
pass
def gray (self):
return cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
def lineColor (self):
'''获取当前应该用的颜色, [b,g,r]'''
return self.palette[self.styles[-1].lineColor % len(self.palette)]
def fillColor (self):
'''获取当前应该用的颜色, [b,g,r]'''
if self.styles[-1].fillColor is None:
return None
return self.palette[self.styles[-1].fillColor % len(self.palette)]
def copycvs (self, target, resolution):
self.image = cv2.resize( target.image, (resolution, resolution), interpolation=cv2.INTER_CUBIC )
def path (self,points,closed = False):
"""
多个点构成的折线
:param points: 多个点 [(x1,y1),(x2,y2)]
:param closed: 图形是否闭合
"""
if len(points) == 0:
return
pts = []
for p in points:
pts.append(self.map(p))
if closed and not self.fillColor() is None:
#实现hatch
cv2.fillPoly(self.image, [np.round(np.array(pts)).astype(np.int32)], self.fillColor())
return
cv2.polylines(self.image, [np.round(np.array(pts)).astype(np.int32)], closed, self.lineColor())
pass
def arc (self,center,radius, angle, start_angle, end_angle):
"""
圆弧(可实现 圆 、 椭圆 、 圆弧等)
:param center: 中心
:param radius: 半径 格式为(r1,r2),r1为半长轴,r2为半短轴。若需绘制图形为圆,则r1=r2
:param angle: 旋转的角度 顺时针
:param start_angle: 开始角度
:param end_angle: 结束角度
:param shift: 线宽 -1填充图形 默认0
"""
angle, start_angle, end_angle = angle_cad_to_cv(angle, start_angle, end_angle)
cv2.ellipse(self.image, self.map(center),
(self.scale(radius[0]), self.scale(radius[1])),
angle, start_angle, end_angle, self.lineColor())
pass
#def MText(self, string, center,angle, scale=1.2):
# #TODO: 旋转文字
# font = cv2.FONT_HERSHEY_SIMPLEX # 定义字体
# cv2.putText(self.image,string,self.map(center),font,scale,self.lineColor(),1)
#def Text(self,string ,center,angle, scale=1.2):
# # TODO: 旋转文字
# font = cv2.FONT_HERSHEY_SIMPLEX # 定义字体
# cv2.putText(self.image, string, self.map(center), font, scale, self.lineColor(), 1)
def save(self, path):
cv2.imwrite(path, self.image)
pass
def save_alpha(self, path):
alpha = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
alpha = (alpha > 0) * 255
image = np.dstack([self.image, alpha])
cv2.imwrite(path, image)
pass
pass
def round_point(v):
return [round(v[0]), round(v[1])]
class ShapeStatCanvas (Canvas):
def __init__(self, box, size, padding=0):
super().__init__(box, size, padding)
self.path_num = 0
self.arc_num = 0
pass
def path (self,points,closed = False):
"""
多个点构成的折线
"""
self.path_num += 1
print("总线段数",self.path_num)
def arc (self,center,radius, angle,start_angle, end_angle):
"""
圆弧(可实现 圆 、 椭圆 、 圆弧等)
"""
self.arc_num += 1
print("总线段数", self.arc_num)
pass
class JsonCanvas(Canvas):
'''把画的内容存成我们内部格式的json'''
def __init__(self):
super().__init__()
self.shapes = []
self.bbox = Box()
self.label = ''
self.offset = [0,0]
pass
def path (self,points,closed = False):
"""
多个点构成的折线
"""
if len(points) == 0:
return
for v in points:
self.bbox.extend_xy(v)
points = [round_point(v) for v in points]
if closed:
points.append(points[0])
points = [[p[0]+self.offset[0],p[1]-self.offset[1]] for p in points]
self.shapes.append({
'points': points, 'color': self.styles[-1].lineColor
})
pass
def arc (self,center,radius, angle, start_angle, end_angle):
angle, start_angle, end_angle = angle_cad_to_cv(angle, start_angle, end_angle)
center = round(center[0]), round(center[1])
radius = (round(radius[0]), round(radius[1]))
pts = cv2.ellipse2Poly(center, radius, angle, start_angle, end_angle, 20)
ll = []
for i in range(pts.shape[0]):
x, y = pts[i]
ll.append([int(x), int(y)])
pass
self.path(ll)
pass
def dump(self):
return {'paths': self.shapes}
def update(self, second_canvas):
self.shapes.append(second_canvas.shapes)
pass
#class CompactingCanvas (CvCanvas):
# def __init__ (self, boxes, size, padding = 0):
#
# self.boxes = boxes
# self.size = size
# self.padding = padding
# self.mapped_boxes, self.vects = compact_boxes (self.boxes)
# self.bbox = bound_boxes(self.mapped_boxes)
#
# super().__init__(self.bbox, self.size, self.padding)
#
# def path (self,points,closed = False):
# """
# 多个点构成的折线
# :param points: 多个点 [(x1,y1),(x2,y2)]
# :param closed: 图形是否闭合
# """
# pts = []
# for p in points:
# conv_p = convert_point(p, self.boxes, self.vects)
# if conv_p is not None:
# pts.append(self.map(conv_p))
# pass
# pass
# if len(pts) == 0:
# return
#
# if closed and not self.fillColor() is None:
# #实现hatch
# cv2.fillPoly(self.image, [np.round(np.array(pts)).astype(np.int32)], self.fillColor())
# return
# cv2.polylines(self.image, [np.round(np.array(pts)).astype(np.int32)], closed, self.lineColor())
# pass
#
# def arc (self,center,radius, angle,start_angle, end_angle):
# """
# 圆弧(可实现 圆 、 椭圆 、 圆弧等)
# :param center: 中心
# :param radius: 半径 格式为(r1,r2),r1为半长轴,r2为半短轴。若需绘制图形为圆,则r1=r2
# :param angle: 旋转的角度 顺时针
# :param start_angle: 开始角度
# :param end_angle: 结束角度
# :param shift: 线宽 -1填充图形 默认0
# """
# if center is not None:
# if len(center) > 2:
# center = center[:2]
# conv_center = convert_point(center, self.boxes, self.vects)
# if conv_center is not None:
# angle, start_angle, end_angle = angle_cad_to_cv(angle, start_angle, end_angle)
# cv2.ellipse(self.image, self.map(conv_center),
# (self.scale(radius[0]), self.scale(radius[1])),
# angle, start_angle, end_angle, self.lineColor())
# pass
#
#
#def compact_boxes(boxes, dist=5000):
# i = 0
# mapped_boxes = []
# vects = []
# xo = 0 #compact图的左上角
# yo = 0
# wide = 0
# height = 0
# tmp_x = 0 #左上角
# for box in boxes:
# x0, y0, x1, y1 = box.unpack()
# # 第一个box
# if i == 0:
# mapped_boxes.append(box)
# vects.append([0,0])
# xo = x0
# yo = y1
# tmp_x = x0
#
# else:
# tmp_x += (wide + dist) #更新左上角
# mapped_boxes.append(Box(tmp_x, yo+y0-y1, tmp_x+x1-x0, yo))
# vects.append([tmp_x-x0,yo-y1])
#
# wide = x1-x0
# height = y1-y0
#
# i += 1
#
# return mapped_boxes, vects
#
#def bound_boxes(mapped_boxes):
# if len(mapped_boxes) < 1:
# return None
# min_x = min_y = max_x = max_y = None
# for i in range(len(mapped_boxes)):
# box = mapped_boxes[i]
# if i == 0:
# min_x, min_y, max_x, max_y = box.unpack()
# else:
# x0, y0, x1, y1 = box.unpack()
# if x1> max_x:
# max_x = x1
# if y0 < min_y:
# min_y = y0
# return Box(min_x, min_y, max_x, max_y)
#
#def convert_point(p, boxes, vects):
# for i in range(len(boxes)):
# x0, y0, x1, y1 = boxes[i].unpack()
# xp, yp = p
# if x0 <= xp <= x1 and y0 <= yp <= y1:
# #return Point(xp+vects[i][0],yp+vects[i][1])
# return (xp+vects[i][0],yp+vects[i][1])
class CacheCanvas (Canvas):
def __init__ (self, dr):
super().__init__()
self.layers = {}
self.paths = []
self.arcs = []
pass
def addLayer (self, layer_name):
paths = []
arcs = []
self.layers[layer_name] = (paths, arcs)
self.paths = paths
self.arcs = arcs
pass
def path (self, points, closed = False):
self.paths.append((points, closed))
def arc(self, center, radius, angle, start_angle, end_angle):
self.arcs.append((center, radius, angle, start_angle, end_angle))
pass
def render_one (self, cvs, one):
paths, arcs = one
for path in paths:
cvs.path(*path)
pass
for arc in arcs:
cvs.arc(*arc)
pass
pass
def render (self, cvs):
for k, v in self.layers.items():
self.render_one(cvs, v)
pass
pass
def render_layer (self, cvs, layer_name):
self.render_one(cvs, self.layers[layer_name])
pass
pass
| true |
e00f2af9ec27eacc82b16193a39d532b45263ebe | Python | rwpearson333/Project-3 | /Working PID left wall.py | UTF-8 | 1,620 | 2.5625 | 3 | [] | no_license | import time
import brickpi3
from grovepi import *
import math as m
BP = brickpi3.BrickPi3()
#define PID gain constants
PK_CONSTANT = 8
DK_CONSTANT = 2
IK_CONSTANT = 2
BASE_SPEED = 180
TIME_STEP = 20
TARGET_DIST = 10
#define sensors
LIGHT_SENSOR = BP.PORT_2
BUTTON = BP.PORT_1
ULTRASONIC = 4
#define motor ports
LEFT_MOTOR = BP.PORT_C #Left motor port
RIGHT_MOTOR = BP.PORT_B #Right motor port
#initialize sensors
BP.set_sensor_type(LIGHT_SENSOR, BP.SENSOR_TYPE.NXT_LIGHT_ON)
BP.set_sensor_type(BP.PORT_1, BP.SENSOR_TYPE.TOUCH)
#Set initial motor speeds to zero
BP.set_motor_dps(RIGHT_MOTOR, 0)
BP.set_motor_dps(LEFT_MOTOR, 0)
BP.set_motor_dps(BP.PORT_D, 0)
BP.set_motor_limits(LEFT_MOTOR, 70, 250)
BP.set_motor_limits(RIGHT_MOTOR, 70, 250)
value = 0
count = 0
left = False
right = False
timeInitial = time.time()
err = TARGET_DIST
while not value:
try:
value = BP.get_sensor(BUTTON)
except brickpi3.SensorError:
value = 0
while value:
if count == 0:
BP.set_motor_dps(RIGHT_MOTOR, BASE_SPEED)
BP.set_motor_dps(LEFT_MOTOR, BASE_SPEED) #need to calibrate
distance = ultrasonicRead(ULTRASONIC)
# print(time.time())
if ( int(time.time() * 100) % TIME_STEP == 0):
distance = ultrasonicRead(ULTRASONIC)
lastErr = err
err = TARGET_DIST - distance
dK = ((err - lastErr) / (TIME_STEP / 100.0)) * DK_CONSTANT
pK = err * PK_CONSTANT
iK = err * (TIME_STEP / 100.0)
BP.set_motor_dps(RIGHT_MOTOR, BASE_SPEED - pK + dK - iK)
BP.set_motor_dps(LEFT_MOTOR, BASE_SPEED + pK - dK + iK)
count = count + 1
| true |
928790341b8860ad1d34f8ed9f4d793d059e1159 | Python | alm3ndra/farmapp | /listados_module.py | UTF-8 | 4,050 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python3
# LISTA DE ULTIMOS MOVIMIENTOS DE VENTA
def listar_ventas(registros, ultimos):
ventas = []
registros_reverse = registros.reverse()
while ultimos > len(registros):
ultimos -= 1
for x in range(ultimos):
ventas.append(registros[x])
return ventas
# BUSCA CLIENTES POR CARACTERES DE ENTRADA DE USUARIO
def encontrar_clientes(registros, nombre_cliente):
cliente = []
for x in range(len(registros)):
if nombre_cliente in registros[x].cliente:
if registros[x].cliente in cliente:
pass
else:
cliente.append(registros[x].cliente)
else:
pass
return cliente
# LISTADO DE CLIENTES SEGUN LOS PRODUCTOS COMPRADOS
def listar_productos_cliente(registros, cliente):
nombre_cliente = cliente.upper()
productos = []
for x in range(len(registros)):
if nombre_cliente in registros[x].cliente:
productos.append(registros[x])
return productos
# LISTADO COMO RESULTADO DE UNA BUSQUEDA DE PRODUCTOS
def encontrar_productos(registros, nombre_producto):
producto = []
for x in range(len(registros)):
if nombre_producto in registros[x].producto:
if registros[x].producto in producto:
pass
else:
producto.append(registros[x].producto)
else:
pass
return producto
# LISTADO DE CLIENTES SEGUN PRODUCTO
def listar_clientes_producto(registros, producto):
nombre_producto = producto.upper()
cliente = []
for x in range(len(registros)):
if nombre_producto in registros[x].producto:
cliente.append(registros[x])
return cliente
# PRODUCTOS MAS VENDIDOS
def prod_vendidos(registros, cantidad):
producto = []
cant_producto = []
colunna=0
for x in range(len(registros)):
if x == 0:
producto.append(registros[x].producto)
cant_producto.append([])
cant_producto[colunna]= [0, registros[x]]
else:
if registros[x].producto in producto:
pass
else:
colunna = colunna + 1
producto.append(registros[x].producto)
cant_producto.append([])
cant_producto[colunna]= [0, registros[x]]
for x in range(len(producto)):
for y in range(len(registros)):
if producto[x] in registros[y].producto:
cant_producto[x][0]= cant_producto[x][0] + registros[y].cantidad
else:
pass
cant_producto.sort(reverse=True)#
while cantidad > len(producto):
cantidad -= 1
list_cant = []
for x in range(cantidad):
list_cant.append([0]*2)
list_cant[x][0] = cant_producto[x][0]
list_cant[x][1] = cant_producto[x][1]
return list_cant
# CLIENTES QUE MAS GASTARON
def clientes_gastadores(registros, cantidad):
clientes = []
cant_cliente = []
colunna=0
for x in range(len(registros)):
if x == 0:
clientes.append(registros[x].cliente)
cant_cliente.append([])
cant_cliente[colunna]=[0, registros[x]]
else:
if registros[x].cliente in clientes:
pass
else:
clientes.append(registros[x].cliente)
colunna = colunna + 1
cant_cliente.append([])
cant_cliente[colunna]=[0, registros[x]]
for x in range(len(clientes)):
for y in range(len(registros)):
if clientes[x] in registros[y].cliente:
cant_cliente[x][0]= cant_cliente[x][0] + (registros[y].cantidad * registros[y].precio)
else:
pass
cant_cliente.sort(reverse=True)
while cantidad > len(clientes):
cantidad -= 1
list_cant = []
for x in range(cantidad):
list_cant.append([0]*2)
list_cant[x][0] = cant_cliente[x][0]
list_cant[x][1] = cant_cliente[x][1]
return list_cant
| true |
eba92738c417597be49a3d854bcabdf016160162 | Python | pjot/advent-of-code | /2019/23/23.py | UTF-8 | 955 | 3.03125 | 3 | [] | no_license | from intcode import parse_file, Computer
def run():
network = {
i: Computer(parse_file('input.intcode'), [i])
for i in range(50)
}
nat = None
first_y = None
while True:
empty_outputs = all(
c.output is None
for c in network.values()
)
if empty_outputs and nat:
network[0].inputs += nat
for _, c in network.items():
c.output = None
addr = c.iterate_once()
x = c.iterate_once()
y = c.iterate_once()
if addr == 255:
if first_y is None:
first_y = y
if nat and nat[1] == y:
return first_y, y
nat = [x, y]
elif addr is None:
c.inputs.append(-1)
else:
network[addr].inputs += [x, y]
one, two = run()
print("Part 1:", one)
print("Part 2:", two)
| true |
137652b20f6e335f6257e6b332c49d6edac6b35c | Python | lutziw/pseudo-kant | /src/keyboards.py | UTF-8 | 3,552 | 2.984375 | 3 | [
"MIT"
] | permissive | from telebot.types import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
from typing import Tuple, List
def create_keyboard() -> ReplyKeyboardMarkup:
keyboard: ReplyKeyboardMarkup = ReplyKeyboardMarkup(True)
keyboard.row('/start', '/help', '/settings')
keyboard.row('/activate', '/deactivate')
return keyboard
def create_markup_setting() -> InlineKeyboardMarkup:
markup_settings: InlineKeyboardMarkup = InlineKeyboardMarkup()
markup_settings.add(InlineKeyboardButton("Максимальная длина текста",
callback_data='param_max_length'))
markup_settings.add(InlineKeyboardButton("Число наиболее вероятных следующих слов",
callback_data='param_top_k'))
markup_settings.add(InlineKeyboardButton("Совокупная вероятность для следующих слов",
callback_data='param_top_p'))
markup_settings.add(InlineKeyboardButton("Вероятность появления слов с большой вероятностью",
callback_data='param_temperature'))
markup_settings.add(InlineKeyboardButton("Узнать текущее параметры",
callback_data='param_info'))
markup_settings.add(InlineKeyboardButton("Установить параметры по умолчанию",
callback_data='param_default'))
return markup_settings
def create_markup_max_length() -> Tuple[InlineKeyboardMarkup, List[int]]:
markup_max_length: InlineKeyboardMarkup = InlineKeyboardMarkup()
max_length_value: List[int] = [10, 50, 100, 200, 300, 500]
for length in max_length_value:
markup_max_length.add(
InlineKeyboardButton(f'{length}', callback_data=f'change_max_length_{length}'))
markup_max_length.add(InlineKeyboardButton('Назад', callback_data='change_back'))
return markup_max_length, max_length_value
def create_markup_top_k() -> Tuple[InlineKeyboardMarkup, List[int]]:
markup_top_k: InlineKeyboardMarkup = InlineKeyboardMarkup()
top_k_value: List[int] = [1, 2, 3, 5, 10, 15, 20]
for k in top_k_value:
markup_top_k.add(InlineKeyboardButton(f'{k}', callback_data=f'change_top_k_{k}'))
markup_top_k.add(InlineKeyboardButton("Назад", callback_data='change_back'))
return markup_top_k, top_k_value
def create_markup_top_p() -> Tuple[InlineKeyboardMarkup, List[float]]:
markup_top_p: InlineKeyboardMarkup = InlineKeyboardMarkup()
top_p_value: List[float] = [0.1, 0.5, 0.2, 0.8, 0.9, 0.95, 1]
for p in top_p_value:
markup_top_p.add(InlineKeyboardButton(f'{p}', callback_data=f"change_top_p_{p}"))
markup_top_p.add(InlineKeyboardButton("Назад", callback_data='change_back'))
return markup_top_p, top_p_value
def create_markup_temperature() -> Tuple[InlineKeyboardMarkup, List[float]]:
markup_temperature: InlineKeyboardMarkup = InlineKeyboardMarkup()
temperature_value: List[float] = [0.1, 0.2, 0.5, 0.8, 0.9, 0.95, 1]
for temp in temperature_value:
markup_temperature.add(
InlineKeyboardButton(f'{temp}', callback_data=f"change_temperature_{temp}"))
markup_temperature.add(InlineKeyboardButton("Назад", callback_data='change_back'))
return markup_temperature, temperature_value
| true |
367306b96d4731871b20ef8d5aae0fb77b6f3037 | Python | shams169/python | /SortingAlgorithms/MyBubbleSort.py | UTF-8 | 421 | 3.671875 | 4 | [] | no_license | class MyBubbleSort:
def myBubbleSort(self, data):
for i in range(len(data)):
for j in range(len(data) -1 ):
if data[j+1] < data[j]:
data[j+1], data[j] = data[j], data[j+1]
print(data)
return data
def main():
obj = MyBubbleSort()
print(obj.myBubbleSort([3,4,1, 7, 2, 9, 0]))
if __name__ == '__main__':
main() | true |
bc12feeed94190a6d633d7e0b463ab5e5d47c90e | Python | shenpingle/tray | /adr-crawler/mdrtime.py | UTF-8 | 4,264 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
###########################################################################################
# author:touchluu2010@gmail.com
# 说明:封装和计算上年度、上半年度、下半年度、本月、本周、上周、上月、当天的时间函数
# Revision: 1.0
###########################################################################################
import time
import datetime
'''抓取上一年度的'''
def last_year_cal():
year = datetime.datetime.today().year - 1
last_year_start = datetime.date(year, 1, 1)
last_year_end = datetime.date(year, 12, 31)
start_time = last_year_start.strftime('%Y-%m-%d')
end_time = last_year_end.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取上半年度的'''
def halfyear_before_cal():
year = datetime.datetime.today().year
last_year_start = datetime.date(year, 1, 1)
last_year_end = datetime.date(year, 6, 30)
start_time = last_year_start.strftime('%Y-%m-%d')
end_time = last_year_end.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取下半年度的'''
def halfyear_after_cal():
year = datetime.datetime.today().year
start_time = datetime.date(year, 7, 1)
end_time = datetime.date(year, 12, 31)
start_time = start_time.strftime('%Y-%m-%d')
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取本周的'''
def current_week_cal():
end_time = datetime.datetime.today()
delta = datetime.timedelta(days=end_time.weekday())
start_time = end_time - delta
start_time = start_time.strftime('%Y-%m-%d')
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取上周的'''
def last_week_cal():
end_time = datetime.datetime.today()
delta_day = end_time.weekday() + 7
delta = datetime.timedelta(days=delta_day)
start_time = end_time - delta
end_time = start_time + datetime.timedelta(days=6)
start_time = start_time.strftime('%Y-%m-%d')
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取上月的'''
def last_month_cal():
cur_time = datetime.datetime.today()
year = cur_time.year
month = cur_time.month - 1
if month == 0 :
month = 12
year -= 1
start_time = datetime.datetime(year, month, 1)
end_time = datetime.datetime(cur_time.year, cur_time.month, 1) - datetime.timedelta(days=1)
start_time = start_time.strftime('%Y-%m-%d')
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
'''抓取今天的的'''
def toady_cal():
start_time = datetime.datetime.today()
end_time = start_time + datetime.timedelta(days=1)
start_time = start_time.strftime('%Y-%m-%d')
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": start_time
}
return filters
def init_cal():
start_time = '2001-01-01'
end_time = datetime.datetime.today()
end_time = end_time.strftime('%Y-%m-%d')
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
def anytime():
start_time = raw_input("input start_time,eg:'2014-01-01': ")
end_time = raw_input("input start_end,eg:'2014-01-01': ")
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
def anytime2(start_time, end_time):
filters = {
"beginTime": start_time,
"endTime": end_time
}
return filters
def validate_date(d):
try:
#datetime.datetime.strptime
datetime.datetime.strptime(d, '%Y-%m-%d')
return True
except ValueError:
return False
if __name__ == "__main__":
t = current_week_cal()
print t['beginTime']
print t['endTime']
| true |
f68384eefe297671853b37fe0fc9ddef652cedb5 | Python | devinpowers/Project8 | /Projec8.py | UTF-8 | 12,825 | 3.609375 | 4 | [] | no_license | """
Created on Tue Apr 7 12:26:01 2020
@author: devinpowers
"""
''' Your header goes here '''
import csv
import pylab
from operator import itemgetter
def open_file():
''' Open File here, try and Except suite '''
while True:
file_name = input("Input a file name: ")
try:
fp = open(file_name,'r')
break
except FileNotFoundError:
print("Unable to open file. Please try again.")
continue
return fp
def read_file(fp):
'''
Read file, and create Dictionaries, Create new Dictionaries and sort, return 3 new dictionaries
'''
# skip header
fp.readline()
data_reader = csv.reader(fp)
D1 = {}
D2 = {}
D3 = {}
for line in data_reader:
name = line[0].lower()
platform = line[1]
if line[2] == 'N/A':
year = 0
else:
year = int(line[2])
genre = line[3].lower()
publisher = line[4].lower()
na_sales = float(line[5]) *1000000
europe_sales = float(line[6])*1000000
japan_sales = float(line[7])*1000000
other_sales = float(line[8])*1000000
global_sales = (na_sales + europe_sales + japan_sales + other_sales)
""" Start Making New Dictionaries """
D1[name] = [name,platform, year, genre, publisher, global_sales]
'''You need to make the values of your dictionary an array of tuples. Then you can append new tuples instead of overwriting them. '''
if genre in D2:
D2[genre].append((genre, year, na_sales, europe_sales, japan_sales, other_sales, global_sales))
else:
D2[genre] = [(genre, year, na_sales, europe_sales, japan_sales, other_sales, global_sales)]
if publisher in D3:
D3[publisher].append((publisher, name, year, na_sales,europe_sales, japan_sales, other_sales, global_sales ))
else:
D3[publisher] = [(publisher, name, year, na_sales,europe_sales, japan_sales, other_sales, global_sales)]
# Sort Dictionary 1
D1_new = {}
for key, value in sorted(D1.items()):
D1_new[key] = value
# Sort DIctionary 2
D2_new = {}
for key, val in sorted(D2.items()):
D2_new[key] = sorted(val, key=itemgetter(-1), reverse = True)
# Sort Dictionary 3
D3_new = {}
for key,val in sorted(D3.items()):
D3_new[key] = sorted(val, key=itemgetter(-1), reverse=True)
return D1_new, D2_new, D3_new
def get_data_by_column(D1, indicator, c_value):
'''
Have to fix if the c_value isnt given!!
'''
new_list_of_tuple = []
# sort List by Global Sales Largest to smallest
if indicator == 'year':
for value in D1.values():
if value[2] == c_value:
new_tuple = (value[0],value[1], value[2],value[3],value[4],value[5])
new_list_of_tuple.append(new_tuple)
new_list_of_tuple.sort(key= itemgetter(-1,1), reverse = True )
elif indicator == 'platform':
for value in D1.values():
if value[1] == c_value:
new_tuple = (value[0],value[1], value[2],value[3],value[4],value[5])
new_list_of_tuple.append(new_tuple)
new_list_of_tuple.sort(key= itemgetter(-1,2), reverse = True )
#sort new_list_tuple
return new_list_of_tuple
def get_publisher_data(D3, publisher):
'''
Function goes through D3 and finds Publisher and then creates
a list of tuples with corresponding publishers!
'''
list_of_publisher = []
for key,value in D3.items():
if key == publisher:
for element in value:
list_of_publisher.append(element)
#print(list_of_publisher)
list_of_publisher.sort(key = itemgetter(1))
list_of_publisher.sort(key = itemgetter(-1), reverse =True)
return list_of_publisher
def display_global_sales_data(L, indicator):
'''Display Gloabal Sales for either Year or Platform'''
if indicator == 'year':
print("{:30s}{:10s}{:20s}{:30s}{:12s}".format('Name', 'Year', 'Genre', 'Publisher', 'Global Sales'))
sum_of_global = 0
for element in L:
print("{:30s}{:10s}{:20s}{:30s}{:<12,.02f}".format(element[0],str(element[2]),element[3],element[4],element[5]))
sum_of_global += element[5]
print("\n{:90s}{:<15,.02f}".format('Sum of Global Sales:', sum_of_global))
elif indicator =='platform':
print("{:30s}{:10s}{:20s}{:30s}{:12s}".format('Name', 'Platform', 'Genre', 'Publisher', 'Global Sales'))
sum_of_global = 0
for element in L:
print("{:30s}{:10s}{:20s}{:30s}{:<12,.02f}".format(element[0],element[1],element[3],element[4],element[5]))
sum_of_global += element[5]
print("\n{:90s}{:<15,.02f}".format('Sum of Global Sales:', sum_of_global))
def get_genre_data(D2, year):
'''
WRITE DOCSTRING HERE!
'''
list_of_genres = []
for value in D2.values():
count = 0
total_na_sales = 0
total_eur_sales = 0
total_jpn_sales = 0
total_other_sales = 0
total_global_sales = 0
for element in value:
if element[1] == year:
# print('Value:',value)
count += 1
total_na_sales += element[2]
total_eur_sales += element[3]
total_jpn_sales += element[4]
total_other_sales += element[5]
total_global_sales += element[6]
if count != 0:
new_tuple= (element[0],count,total_na_sales,total_eur_sales,total_jpn_sales, total_other_sales, total_global_sales)
list_of_genres.append(new_tuple)
list_of_genres.sort(key= itemgetter(0))
return list_of_genres
def display_genre_data(genre_list):
'''
Display Genre Data
'''
print( "{:15s}{:15s}{:15s}{:15s}{:15s}{:15s}".format('Genre', 'North America', 'Europe', 'Japan', 'Other', 'Global'))
sum_of_global = 0
for element in genre_list:
print("{:15s}{:<15,.02f}{:<15,.02f}{:<15,.02f}{:<15,.02f}{:<15,.02f}".format(element[0],element[2],element[3],element[4], element[5], element[6]))
sum_of_global += element[6]
print("\n{:75s}{:<15,.02f}".format('Sum of Global Sales:', sum_of_global))
def display_publisher_data(pub_list):
'''
Display Publisher data
'''
print("{:30s}{:15s}{:15s}{:15s}{:15s}{:15s}".format('Title', 'North America', 'Europe', 'Japan', 'Other', 'Global'))
sum_of_global = 0
for element in pub_list:
print("{:30s}{:<15,.02f}{:<15,.02f}{:<15,.02f}{:<15,.02f}{:<15,.02f}".format(element[1],element[3],element[4],element[5],element[6], element[7]))
sum_of_global += element[7]
print("\n{:90s}{:<15,.02f}".format('Sum of Global Sales:', sum_of_global))
def get_totals(L, indicator):
'''
WRITE DOCSTRING HERE!
'''
if indicator == 'year':
D = {}
for element in L:
if element[1] in D:
D[element[1]] += element[5]
else:
D[element[1]] = element[5]
L1 = []
L2 = []
for keys in D.keys():
platform_list.append(keys)
L1.sort()
L2 = [D[v] for v in L1]
elif inidcator == 'platform':
D = {}
for element in L:
if element[2] in D:
D[element[2]] += element[5]
else:
D[element[2]] = element[5]
L1 = []
L2 = []
for keys in D.keys():
platform_list.append(keys)
L1.sort()
L2 = [D[v] for v in L1]
return L1, L2
def prepare_pie(genres_list):
'''
Prepare pie for genre list stuff, return 2 lists:
'''
list_of_tuples = []
for element in genre_list:
genre_sales = (element[0], element[6])
list_of_tuples.append(genre_sales)
list_of_tuples.sort(key = itemgetter(0,1), reverse = True)
L1 = []
L2 = []
for pair in list_of_tuples:
L1.append(pair[0])
L2.append(pair[1])
return L1, L2
def plot_global_sales(x,y,indicator, value):
'''
This function plots the global sales per year or platform.
parameters:
x: list of publishers or year sorted in ascending order
y: list of global sales that corresponds to x
indicator: "publisher" or "year"
value: the publisher name (str) or year (int)
Returns: None
'''
if indicator == 'year':
pylab.title("Video Game Global Sales in {}".format(value))
pylab.xlabel("Platform")
elif indicator == 'platform':
pylab.title("Video Game Global Sales for {}".format(value))
pylab.xlabel("Year")
pylab.ylabel("Total copies sold (millions)")
pylab.bar(x, y)
pylab.show()
def plot_genre_pie(genre, values, year):
'''
This function plots the global sales per genre in a year.
parameters:
genre: list of genres that corresponds to y order
values: list of global sales sorted in descending order
year: the year of the genre data (int)
Returns: None
'''
pylab.pie(values, labels=genre,autopct='%1.1f%%')
pylab.title("Video Games Sales per Genre in {}".format(year))
pylab.show()
def main():
#open the file
fp = open_file()
#Read the file
D1_new, D2_new, D3_new = read_file(fp)
# Menu options for the program
MENU = '''Menu options
1) View data by year
2) View data by platform
3) View yearly regional sales by genre
4) View sales by publisher
5) Quit
Enter choice: '''
choice = input(MENU)
while choice != '5':
#Option 1: Display all platforms for a single year
if choice == '1':
try:
year_input = input('Please Enter a Year (int): ')
c_value = int(year_input)
indicator = 'year'
# call function to collect data
L = get_data_by_column(D1_new, indicator, c_value)
# call function to print data
display_global_sales_data(L, indicator)
ask = input("Would you like to plot the Data? (y or n): ")
if ask == 'y':
x,y = L
plot_global_sales(x,y,indicator, c_value)
#if the list of platforms for a single year is empty, show an error message
except ValueError:
print("Invalid year.")
#Option 4: Display publisher data
# Enter keyword for the publisher name
# search all publisher with the keyword
match = []
# print the number of matches found with the keywords
if len(match) > 1:
print("There are {} publisher(s) with the requested keyword!".format(len(match)))
for i,t in enumerate(match):
print("{:<4d}{}".format(i,t[0]))
# PROMPT USER FOR INDEX
else:
index = 0
choice = input(MENU)
print("\nThanks for using the program!")
print("I'll leave you with this: \"All your base are belong to us!\"")
if __name__ == "__main__":
main() | true |
61aaf18606157db5dfdd7e806313cbcce3f9db77 | Python | junhao69535/pycookbook | /chapter8/change_display_of_obj.py | UTF-8 | 1,020 | 4.59375 | 5 | [] | no_license | #!coding=utf-8
"""
改变对象的字符串显示
"""
# 想改变一个实例的字符串表示,可重新定义它的__str__()和__repr__()方法
class Pair(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return 'Pair({0.x!r}, {0.y!r})'.format(self)
def __str__(self):
return '({0.x!s}, {0.y!s})'.format(self)
# __repr__() 方法返回一个实例的代码表示形式,通常用来重新构造这个实例。 内置的
# repr() 函数返回这个字符串,跟我们使用交互式解释器显示的值是一样的。 __str__()
# 方法将实例转换为一个字符串,使用 str() 或 print() 函数会输出这个字符串。
p = Pair(3, 4)
print repr(p)
print p
# 我们在这里还演示了在格式化的时候怎样使用不同的字符串表现形式。 特别来讲,!r 格式化代码
# 指明输出使用 __repr__() 来代替默认的 __str__()
p = Pair(3, 4)
print 'p is {0!r}'.format(p)
print 'p is {0}'.format(p) | true |
143975cf115e01fa7d88606fe2f575dbb4889b8a | Python | velenk/Python-Reptile | /urllib_basic/urlparse&urlunparse.py | UTF-8 | 445 | 2.9375 | 3 | [] | no_license | from urllib.parse import urlparse, urlunparse
result = urlparse('http://www.baidu.com/index.html;user?id=5#comment')
print(type(result),result)
#scheme://netloc/path;params?query#fragment
result = urlparse('www.baidu.com/index.html;user?id=5#comment', \
scheme = 'https',allow_fragments = False)
print(result[4], result.query)
data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
print(urlunparse(data)) | true |
482c25aabb358c7f4bc8345eaac755228bb5e6dc | Python | Dinesh101041/Face-detection | /main.py | UTF-8 | 791 | 2.671875 | 3 | [] | no_license | from cv2 import cv2
# readint the image
orgimage=cv2.imread('./images/wc.jpg')
# covert image into a gray scale
grayimg=cv2.cvtColor(orgimage, cv2.COLOR_BGR2GRAY)
# load the viola-jones classifier-object detection framework
face_cascade = cv2.CascadeClassifier('./classifier\haarcascade_frontalface_alt.xml')
# mulitiscale() - to get a image as arugument to classify image
face_detct=face_cascade.detectMultiScale(grayimg)
# draw a rectangle amomg images
for (column,row,width,height) in face_detct:
cv2.rectangle(
orgimage,
(column,row),
(column + width, row + height),
(0,255,0),
2
)
# displayin image
cv2.imshow('image',orgimage)
# wait until key stroke to close the image
cv2.waitKey(0)
# closing the image
cv2.destroyAllWindows()
| true |
2895124db85659bff8f0a88a3791e0df804a66de | Python | nilesh05apr/MachineLearning | /SingleLayerPerceptron.py | UTF-8 | 816 | 3.484375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
def frand():
return np.random.normal(0.0,1.0)
def sigmoid(x):
return 1.0/(1.0 + np.exp(-1*x))
class Perceptron:
def __init__(self,inputs,bias = 1.0):
self.bias = bias
self.weights = np.zeros(inputs+1)
for i in range(inputs):
self.weights[i] = frand()
def run(self,X):
X.append(self.bias)
s = np.dot(np.transpose(self.weights),X)
return sigmoid(s)
def set_weight(self,w_init):
self.weights = w_init
print("\n\n And Gate:\n")
nn = Perceptron(2)
nn.set_weight(np.array([10.0,10.0,-15]))
print("0 And 0: {}".format(nn.run([0.0,0.0])))
print("0 And 1: {}".format(nn.run([0.0,1.0])))
print("1 And 0: {}".format(nn.run([1.0,0.0])))
print("1 And 1: {}".format(nn.run([1.0,1.0]))) | true |
8ed20f75881694681e5321eed9104e1b92b15f73 | Python | LDCAgency/easywaylyrics_public | /02-Sourcecode/03-Reference/echonestsyncprint/freqanalysistest.py | UTF-8 | 1,028 | 2.828125 | 3 | [
"MIT"
] | permissive | __author__ = 'paulo.rodenas'
from pylab import plot, show, title, xlabel, ylabel, subplot, savefig
from scipy import fft, arange, ifft
from numpy import sin, linspace, pi
from scipy.io.wavfile import read,write
def plotSpectru(y,Fs):
n = len(y) # lungime semnal
k = arange(n)
T = n/Fs
frq = k/T # two sides frequency range
print len(frq), len(y)
frq = frq[range(n/2)] # one side frequency range
# y = y[range(n/2)]
print len(frq),len(y)
Y = fft(y)/n # fft computing and normalization
Y = Y[range(n/2)]
plot(frq,abs(Y),'go') # plotting the spectrum
xlabel('Freq (Hz)')
ylabel('|Y(freq)|')
Fs = 11025 # sampling rate
rate,data = read('/Users/paulo.rodenas/workspaceIdea/easywaylyrics/05-Sourcecode/03-Reference/echonestsyncprint/music/JudasBeMyGuideComparedToMic.wav')
# data = data/(2.**15)
y=data[:11025/10]
lungime=len(y)
timp=len(y)/11025.
t=linspace(0,timp,len(y))
subplot(2,1,1)
plot(t,y)
xlabel('Time')
ylabel('Amplitude')
subplot(2,1,2)
plotSpectru(y,Fs)
show()
| true |
02c08b3814edcdffe5e515273e2137de523f1a42 | Python | clejae/forland_repo | /StatisticalAnalysis/08_cropping_frequency.py | UTF-8 | 4,139 | 2.671875 | 3 | [] | no_license | #
# github Repo: https://github.com/clejae
# ------------------------------------------ LOAD PACKAGES ---------------------------------------------------#
import os
import time
import pandas as pd
## CJs Repo
import general
# ------------------------------------------ DEFINE FUNCTIONS ------------------------------------------------#
def convertSequnceStrToList(str):
ct_dict = {1 : 'MA',
2 : 'WW',
3 : 'SB',
4 : 'OR',
5 : 'PO',
6 : 'SC',
7 : 'TR',
9 : 'WB',
10: 'RY',
12: 'LE',
13: 'GR',
14: 'LE',
60: 'VE',
30: 'FA',
80: 'UN',
70: 'MC',
99: 'OT',
255: 'FA'
}
ct_lst = str.split('_')
ct_lst = [ct_dict[int(i)] for i in ct_lst]
ct_str = '-'.join(ct_lst)
return ct_str
def croppingFrequency(seq, ct):
ct_lst = seq.split('-')
if ct in ct_lst:
count = ct_lst.count(ct)
else:
count = 0
freq = count/len(ct_lst)
return freq
# ------------------------------------------ START TIME ------------------------------------------------------#
stime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
# ------------------------------------------ USER VARIABLES ------------------------------------------------#
wd = r'\\141.20.140.91\SAN_Projects\FORLand\Clemens\\'
# ------------------------------------------ LOAD DATA & PROCESSING ------------------------------------------#
os.chdir(wd)
bl_lst = ['BB']
ct_lst = ['MA','WW','SB','OR','PO','SC','TR','WB','RY','LE','GR','LE','VE','FA','UN','MC','OT']
bl_dict = {'BB':['2005-2011','2008-2014','2012-2018'], #
'SA':['2008-2014','2012-2018'], #,'2012-2018'
'BV':['2005-2011','2008-2014','2012-2018'], #,'2012-2018'
'LS':['2012-2018']}
## columns of output list
col_lst = ['federal state','period']
for ct in ct_lst:
col_lst.append('TotFreq_{}'.format(ct))
col_lst.append('PlaFreq_{}'.format(ct))
## output list for main statistics
out_lst = [col_lst]
## loop over federal states
for bl in bl_lst:
print(bl)
## get available periods of federal states
per_lst = bl_dict[bl]
## loop over periods
for per in per_lst:
print(bl, per)
## list for main stats of bl-per combinations
## append federal state and period
sub_lst = []
sub_lst.append(bl)
sub_lst.append(per)
## read df
pth = r'data\tables\FarmSize-CSTs\{0}_{1}_sequences_farm-size.csv'.format(bl, per)
df = pd.read_csv(pth)
df['Sequence'] = df['Sequence'].map(convertSequnceStrToList)
## for all crop types calculate the cropping frequency
## do this row wise and then calc mean over all rows
for ct in ct_lst:
df['Freq_{}'.format(ct)] = df['Sequence'].apply(croppingFrequency, args=(ct,))
## mean frequency over all fields
total_freq = df['Freq_{}'.format(ct)].mean()
sub_lst.append(total_freq)
## mean frequency over field where the current crop type actually occurs
plant_freq = df['Freq_{}'.format(ct)][df['Freq_{}'.format(ct)] != 0.0].mean()
sub_lst.append(plant_freq)
## save df with frequencys
pth = r'data\tables\FarmSize-CSTs\{0}_{1}_sequences_freq.csv'.format(bl, per)
df.to_csv(pth, index=False)
## save main stats of current bl-per combination in out list
out_lst.append(sub_lst)
print(bl, 'done!')
## save out list to csv
general.writeListToCSV(out_lst, out_pth=r"data\tables\FarmSize-CSTs\frequencies_main_stats2.txt")
# ------------------------------------------ END TIME --------------------------------------------------------#
etime = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
print("start: " + stime)
print("end: " + etime)
# ------------------------------------------ UNUSED BUT USEFUL CODE SNIPPETS ---------------------------------#
| true |
3bdd35ad1d91f5441a65472ab43fcdbba0dca6b3 | Python | dsalexan/genetic-test | /main.py | UTF-8 | 2,038 | 2.84375 | 3 | [] | no_license | import random
import sys
def avalia_senha(indiv, senha):
value = 0
for i in range(len(senha)):
if i < len(indiv):
if indiv[i] == senha[i]:
value += 1
return value
def mutacao(ind, probMut, opcoes):
for i in range(len(ind)):
if random.uniform(0, 1) < probMut:
ind[i] = random.sample(opcoes, 1)[0]
return ind
def cruzamento(ind1, ind2):
novo_ind = list(ind1)
if len(ind1) < 2 or len(ind1) != len(ind2): return novo_ind
else: corte = random.sample(range(1, len(ind1)), 1)[0]
for i in range(corte, len(novo_ind)):
novo_ind[i] = ind2[i]
return novo_ind
def torneio(aptidao, tamanho):
id_compet = list(range(len(aptidao)))
competidores = random.sample(id_compet, tamanho)
fit = [aptidao[idx] for idx in competidores]
v1 = competidores[fit.index(min(fit))]
id_compet.remove(v1)
competidores = random.sample(id_compet, tamanho)
fit = [aptidao[idx] for idx in competidores]
v2 = competidores[fit.index(min(fit))]
return v1, v2
def ga(fun, senha, nDim, opcoes, tamPop, tamTorneio, probMut, porcCr, nGeracoes):
pop = [[random.sample(opcoes, 1)[0] for i in range(nDim)] for j in range(tamPop)]
aptidao = [fun(indiv, senha) for indiv in pop]
for ger in range(nGeracoes):
for cruzamentos in range(int(tamPop * porcCr)):
v1, v2 = torneio(aptidao, tamTorneio)
pai1, pai2 = pop[v1], pop[v2]
filho = mutacao(cruzamento(pai1, pai2), probMut, opcoes)
pop.append(filho)
aptidao.append(fun(filho, senha))
ordem = sorted(range(len(aptidao)), key=lambda k: aptidao[k], reverse=True)
for idx in range(tamPop):
aptidao[idx] = aptidao[ordem[idx]]
pop[idx] = pop[ordem[idx]]
aptidao = aptidao[:tamPop]
pop = pop[:tamPop]
if aptidao[0] == nDim:
break
return ''.join(pop[0])
SEED = 1
random.seed(SEED)
opcoes = "abcdefghijklmnopqrstuvwxyz "
| true |
3f22d69c1764b75d3c231540e98a3df61d166e20 | Python | yangwudi398/DS5999-Final-Project | /Data/Codes/adjust_table_format.py | UTF-8 | 3,058 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# Adjust the format of the database columns
import re
import sqlite3
import pandas as pd
# Import database
conn = sqlite3.connect("../nytimes.db")
df_2018 = pd.read_sql("SELECT * FROM middle_east_2018", conn)
df_2017 = pd.read_sql("SELECT * FROM middle_east_2017", conn)
df_2016 = pd.read_sql("SELECT * FROM middle_east_2016", conn)
df_2015 = pd.read_sql("SELECT * FROM middle_east_2015", conn)
df_2014 = pd.read_sql("SELECT * FROM middle_east_2014", conn)
df_2013 = pd.read_sql("SELECT * FROM middle_east_2013", conn)
df_2012 = pd.read_sql("SELECT * FROM middle_east_2012", conn)
df_2011 = pd.read_sql("SELECT * FROM middle_east_2011", conn)
# Change date to include only year, month, and day
def date_format(date):
return date[0:10]
df_2018["date"] = df_2018["date"].apply(date_format)
df_2017["date"] = df_2017["date"].apply(date_format)
df_2016["date"] = df_2016["date"].apply(date_format)
df_2015["date"] = df_2015["date"].apply(date_format)
df_2014["date"] = df_2014["date"].apply(date_format)
df_2013["date"] = df_2013["date"].apply(date_format)
df_2012["date"] = df_2012["date"].apply(date_format)
df_2011["date"] = df_2011["date"].apply(date_format)
# Split a list and seperate it again by "\n"
def split_list(lst):
lst = lst[1:-1]
lst = lst.split(", ")
lst = [e[1:-1] for e in lst]
list_string = ""
for item in lst:
list_string += item + "\n"
return list_string[:-1]
# Apply split_list to titles
df_2018["titles"] = df_2018["titles"].apply(split_list)
df_2017["titles"] = df_2017["titles"].apply(split_list)
df_2016["titles"] = df_2016["titles"].apply(split_list)
df_2015["titles"] = df_2015["titles"].apply(split_list)
df_2014["titles"] = df_2014["titles"].apply(split_list)
df_2013["titles"] = df_2013["titles"].apply(split_list)
df_2012["titles"] = df_2012["titles"].apply(split_list)
df_2011["titles"] = df_2011["titles"].apply(split_list)
# Apply split_list to urls
df_2018["urls"] = df_2018["urls"].apply(split_list)
df_2017["urls"] = df_2017["urls"].apply(split_list)
df_2016["urls"] = df_2016["urls"].apply(split_list)
df_2015["urls"] = df_2015["urls"].apply(split_list)
df_2014["urls"] = df_2014["urls"].apply(split_list)
df_2013["urls"] = df_2013["urls"].apply(split_list)
df_2012["urls"] = df_2012["urls"].apply(split_list)
df_2011["urls"] = df_2011["urls"].apply(split_list)
# Export the adjusted database
df_2018.to_sql("middle_east_2018", conn, if_exists="replace", index=False)
df_2017.to_sql("middle_east_2017", conn, if_exists="replace", index=False)
df_2016.to_sql("middle_east_2016", conn, if_exists="replace", index=False)
df_2015.to_sql("middle_east_2015", conn, if_exists="replace", index=False)
df_2014.to_sql("middle_east_2014", conn, if_exists="replace", index=False)
df_2013.to_sql("middle_east_2013", conn, if_exists="replace", index=False)
df_2012.to_sql("middle_east_2012", conn, if_exists="replace", index=False)
df_2011.to_sql("middle_east_2011", conn, if_exists="replace", index=False)
# Close the connection
conn.close()
| true |
b973021f3cd0fa00cd23a78d96e9af02a77e79b9 | Python | m-dz/BayesOptJournalClub | /blackbox.py | UTF-8 | 3,086 | 3.109375 | 3 | [
"MIT"
] | permissive | """Model black box functions to be optimised."""
import numpy as np
import pandas as pd
import altair as alt
import gpflow
DTYPE = float
DOMAIN_MIN = -1
DOMAIN_MAX = 1
DOMAIN = DOMAIN_MIN, DOMAIN_MAX
COLOURSCHEME = 'redyellowgreen'
class GPBlackBox:
"""Black box function to be optimised drawn from a Gaussian process."""
def __init__(self, ndim=1):
self.kernel = gpflow.kernels.Matern32() + gpflow.kernels.Linear(variance=.4**2)
self.noise_variance = .3**2
# Give one data point at origin with value 0
self.x = np.zeros((1, ndim), dtype=DTYPE)
self.y = np.zeros((1, 1), dtype=DTYPE)
self._update_model()
def _update_model(self):
self.model = gpflow.models.GPR(self.xy, kernel=self.kernel, noise_variance=self.noise_variance)
def xgrid(self, num):
if 1 == self.ndim:
return np.linspace(DOMAIN_MIN, DOMAIN_MAX, num).reshape(num, 1)
if 2 == self.ndim:
xx = np.linspace(DOMAIN_MIN, DOMAIN_MAX, num).reshape(num, 1)
xx0, xx1 = np.meshgrid(xx, xx)
return np.asarray([np.ravel(xx0), np.ravel(xx1)]).T
raise ValueError(f'Cannot create x-grid when x has dimensions {self.ndim} > 2')
@property
def ndim(self):
return self.x.shape[-1]
@property
def xy(self):
return (self.x, self.y)
def data(self, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y
return (pd.DataFrame(x)
.rename(columns=dict((i, f'x{i}') for i in range(self.ndim)))
.assign(y=y))
def __call__(self, x):
x = np.asarray(x).astype(DTYPE)
if x.ndim < 2:
x = x.reshape((-1, self.ndim))
assert x.shape[-1] == self.ndim
assert DOMAIN_MIN <= x.min()
assert x.max() <= DOMAIN_MAX
mean, var = self.model.predict_y(x)
y = np.random.normal(loc=mean, scale=np.sqrt(var))
self.x = np.concatenate((self.x, x))
self.y = np.concatenate((self.y, y))
self._update_model()
return y
def sample_f(self, num):
xx = self.xgrid(num)
f = self.model.predict_f_samples(xx).numpy()
return self.data(xx, f).rename(columns={'y': 'f'})
def plot_xy(self):
if 1 == self.ndim:
return self._plot_xy_1()
if 2 == self.ndim:
return self._plot_xy_2()
raise ValueError(f'Cannot plot x-y when x has dimensions {self.ndim} > 2')
def _plot_xy_1(self):
return (
alt.Chart(self.data())
.mark_circle(size=60)
.encode(x=alt.X('x0:Q', scale=alt.Scale(domain=DOMAIN)), y='y'))
def _plot_xy_2(self):
return (
alt.Chart(self.data())
.mark_circle(size=60, stroke='black', strokeWidth=1)
.encode(x=alt.X('x0:Q', scale=alt.Scale(domain=DOMAIN)),
y=alt.X('x1:Q', scale=alt.Scale(domain=DOMAIN)),
color=alt.Color('y:Q', scale=alt.Scale(scheme=COLOURSCHEME, domainMid=0))))
| true |
bd819d4858650c68fce2a1f13f82341d4310b5a9 | Python | jacob-sadollahi/minimal-object-storage | /tests/utils.py | UTF-8 | 1,707 | 2.5625 | 3 | [] | no_license | import json
import string
import random
import urllib.request as req
from urllib.error import HTTPError, URLError
def send_request(url, method, body=None, headers=None):
if body is None:
body = {}
req_client = req.Request(url)
req_client.add_header('Content-Type', 'application/json; charset=utf-8')
# additional headers
try:
if headers:
for header_key, header_value in headers.items():
req_client.add_header(header_key, header_value)
if method == "post":
json_data = json.dumps(body)
json_data_as_bytes = json_data.encode('utf-8') # needs to be bytes
req_client.add_header('Content-Length', str(len(json_data_as_bytes)))
response = req.urlopen(req_client, json_data_as_bytes)
elif method == "get":
response = req.urlopen(req_client)
else:
raise ValueError('This method currently not supported.')
try:
result = json.loads(response.read())
except json.decoder.JSONDecodeError:
result = response.read().decode()
code = response.getcode()
except HTTPError as e:
if e.code in [500, 400]:
result = e.read().decode()
else:
result = json.loads(e.read())
code = e.code
except URLError:
result = "Url is not correct"
code = 404
return result, code
class S3Response:
res_409 = {"message": "BucketAlreadyExists", "error": 409}
res_422 = {"message": "InvalidArgumentsInName", "error": 422}
res_400 = {"message": "TooManyBuckets", "error": 400}
res_200 = {"body": {"data": {"status": "200", "result": "created"}}}
| true |
d12f5fffab17940d611037cd04bb60d8aa4d3a77 | Python | nekobean/pystyle | /perform-face-recognition-with-python/perform-face-recognition-with-python.py | UTF-8 | 1,891 | 3.28125 | 3 | [
"MIT"
] | permissive | import face_recognition
import matplotlib.pyplot as plt
# 保存されている人物の顔の画像を読み込む。
known_face_imgs = []
for path in ["known-face_01.png", "known-face_02.png", "known-face_03.png"]:
img = face_recognition.load_image_file(path)
known_face_imgs.append(img)
# 認証する人物の顔の画像を読み込む。
face_img_to_check = face_recognition.load_image_file("face_to_check.png")
# 顔の画像から顔の領域を検出する。
known_face_locs = []
for img in known_face_imgs:
loc = face_recognition.face_locations(img, model="hog")
known_face_locs.append(loc)
face_loc_to_check = face_recognition.face_locations(face_img_to_check, model="hog")
# 検出した顔の位置を画像に描画する。
def draw_face_locations(img, locations):
fig, ax = plt.subplots()
ax.imshow(img)
ax.set_axis_off()
for i, (top, right, bottom, left) in enumerate(locations):
w, h = right - left, bottom - top
ax.add_patch(plt.Rectangle((left, top), w, h, ec="r", lw=2, fill=None))
plt.show()
for img, loc in zip(known_face_imgs, known_face_locs):
draw_face_locations(img, loc)
draw_face_locations(face_img_to_check, face_loc_to_check)
# 顔の領域から特徴量を抽出する。
known_face_encodings = []
for img, loc in zip(known_face_imgs, known_face_locs):
(encoding,) = face_recognition.face_encodings(img, loc)
known_face_encodings.append(encoding)
(face_encoding_to_check,) = face_recognition.face_encodings(
face_img_to_check, face_loc_to_check
)
# 抽出した特徴量を元にマッチングを行う。
matches = face_recognition.compare_faces(known_face_encodings, face_encoding_to_check)
print(matches) # [True, False, False]
# 各画像との近似度を表示する。
dists = face_recognition.face_distance(known_face_encodings, face_encoding_to_check)
print(dists)
| true |
f10a4579b91b6b829abcc0195edd334492169c79 | Python | RhafaelS/EstudosPython | /archives.py | UTF-8 | 144 | 2.984375 | 3 | [] | no_license | import json
numbers = [2, 3, 4, 5, 6, 7, 8, 9, 10]
filename = 'numbers.json'
with open(filename, 'w') as f_obj:
json.dump(numbers, f_obj) | true |
b0216b06c0373863b479ea261a202e5f5cbd0a99 | Python | mohsenabedelaal/holbertonschool-python | /0x04-python-more_data_structures/12-roman_to_int.py | UTF-8 | 544 | 3.78125 | 4 | [] | no_license | #!/usr/bin/python3
def roman_to_int(roman_number):
rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
int_val = 0
if isinstance(roman_number, str) and roman_number is not None:
for i in range(len(roman_number)):
comp1 = roman_number[i - 1]
if i > 0 and rom_val[roman_number[i]] > rom_val[comp1]:
int_val += rom_val[roman_number[i]] - 2 * rom_val[comp1]
else:
int_val += rom_val[roman_number[i]]
return int_val
return 0
| true |
bfa2c9f7a452bbb27d44aa67e889636722b2e408 | Python | vishrutkmr7/DailyPracticeProblemsDIP | /2019/11 November/dp11272019.py | UTF-8 | 720 | 4.5 | 4 | [
"MIT"
] | permissive | # This problem was recently asked by LinkedIn:
# Given two rectangles, find the area of intersection.
class Rectangle:
def __init__(self, min_x=0, min_y=0, max_x=0, max_y=0):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
def intersection_area(rect1, rect2):
# Fill this in.
dx = min(rect1.max_x, rect2.max_x) - max(rect1.min_x, rect2.min_x)
dy = min(rect1.max_y, rect2.max_y) - max(rect1.min_y, rect2.min_y)
# "min of the maxes" and "max of the mins"
if (dx >= 0) and (dy >= 0):
return dx * dy
# BBB
# AXXB
# AAA
rect1 = Rectangle(0, 0, 3, 2)
rect2 = Rectangle(1, 1, 3, 3)
print(intersection_area(rect1, rect2))
# 2
| true |
245af9096f568a4694ca694ed0273b11acded834 | Python | Amandine-2021/pythonFlaskLibrary | /FlaskLibraryWebsite/validationHelper.py | UTF-8 | 1,539 | 3.078125 | 3 | [] | no_license | # filename: validationHelper.py
# Final project CSC217-Python FlaskLibrary
# Amandine Velamala
import re
from flask import flash
def validateISBN(isbn):
regex = re.compile('^(978-?|979-?)?\d{1,5}-?\d{1,7}-?\d{1,6}-?\d{1,3}$')
#regex = re.compile("(?=[-0-9 ]{17}$|[-0-9X ]{13}$|[0-9X]{10}$)(?:97[89][- ]?)?[0-9]{1,5}[- ]?(?:[0-9]+[- ]?){2}[0-9X]$")
match = regex.match(str(isbn))
if not match:
flash('Invalid isbn number', category='error')
return False
else:
return True
def validateLength(input, field, maxLength):
if len(input) < 1:
flash(f'{field} field should not be empty', category='error')
return False
elif len(input) > maxLength:
flash(f'{field} field should be less than {maxLength} characters', category='error')
return False
else:
return True
def validateField(fieldType, fieldData):
valid = True
if fieldType == 'title':
valid = validateLength(fieldData, 'Title', 200)
elif fieldType == 'author_first_name':
valid = validateLength(fieldData, 'Author first name', 30)
elif fieldType == 'author_last_name':
valid = validateLength(fieldData, 'Author last name', 30)
elif fieldType == 'genre':
valid = validateLength(fieldData, 'Genre', 30)
elif fieldType == 'publisher':
valid = validateLength(fieldData, 'Publisher', 80)
elif fieldType == 'description':
validateLength(fieldData, 'Description', 1000)
return valid
| true |
b6f1bb026915f7a71f2f92df7808ebf8953d5c58 | Python | violet-zct/fairseq-dro-mnmt | /fairseq/optim/lr_scheduler/step_lr_scheduler.py | UTF-8 | 2,804 | 2.859375 | 3 | [
"MIT"
] | permissive | from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('step')
class StepScheduler(FairseqLRScheduler):
"""Decays the learning rate of each parameter group by gamma every step_size updates.
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with step.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_updates < 0:
raise ValueError('warm up steps cannot be negative.')
elif args.warmup_updates == 0:
assert args.warmup_init_lr < 0
args.warmup_init_lr = warmup_end_lr
else:
assert args.warmup_init_lr < warmup_end_lr
if args.warmup_init_lr < 0:
args.warmup_init_lr = 0
# linearly warmup for the first args.warmup_updates
if args.warmup_updates > 0:
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
else:
self.lr_step = 0
# Then, decay by gamma every step_size updates
self.gamma = args.lr_decay_rate
self.step_size = args.lr_decay_steps
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=1000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--lr-decay-rate', default=0.1, type=float, metavar='DR')
parser.add_argument('--lr-decay-steps', default=10000, type=int, metavar='DS')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def get_cur_lr(self, num_updates):
counts = num_updates // self.step_size
return self.args.lr[0] * (self.gamma ** counts)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates*self.lr_step
else:
self.lr = self.get_cur_lr(num_updates)
self.optimizer.set_lr(self.lr)
return self.lr
| true |
7c3455074cf24ccbe1356d9a9928c7aa8604557f | Python | browlm13/neural_nets | /minimalist_DNN_v1.py | UTF-8 | 3,890 | 2.9375 | 3 | [] | no_license | """
Minimalist DNN
"""
import numpy as np
# define cost
objective = lambda Y, Y_hat : 0.5*(Y - Y_hat)**2
objective_grad_dY_hat = lambda Y, Y_hat : Y_hat-Y
# feed forward
def feed_forward( X, Ws ):
As = [X.T]
for W in Ws:
As += [W @ As[-1]]
# A[0], ..., A[-1] = Y_hat
return As
# predict class labels
def predict( X, Ws ):
As = feed_forward( X, Ws ) # A[0], ..., A[-1] = Y_hat
Y_pred = np.argmax(As[-1], axis=0)
return Y_pred
def mse( Y, Y_hat ):
cost = np.mean((Y_hat-Y)**2)
return cost
from sklearn.metrics import accuracy_score
def accuracy( Y, Y_hat ):
Y_pred = np.argmax(Y_hat, axis=0)
Y_true = np.argmax(Y, axis=0)
return accuracy_score(Y_true, Y_pred)
# back propigate
def back_propigate( X, Y, Ws, As ):
# get Y_hat
Y_hat = As[-1]
# print( objective(Y, Y_hat) )
# print("\nY_hat:")
# print(Y_hat.shape)
# print("\nX.T:")
# print(X.T.shape)
# print("\nAs:")
# for A in As:
# print(A.shape)
# print("\nWs:")
# for W in Ws:
# print(W.shape)
# initilize sensitivies list
Vs = [0]*(L+1)
#for A in As:
# Vs.append(np.zeros((1,1)))
# calculate final sensitivity
V_final = objective_grad_dY_hat(Y, Y_hat) # Y_hat-Y # add * dphi(Y_hat)
Vs[-1] = V_final #objective_grad_dY_hat(Y, Y_hat) # Y_hat-Y # add * dphi(Y_hat)
# # ERROR
# # calculate second to last sensitivity
# Vs[-2] = Ws[-1].T @ Vs[-1] # no bias terms to remove
# # calculate remaining sensitivities (must remove biases)
# for i in range(L-2,-1,-1):
# Vs[i] = Ws[i+1].T @ Vs[i+1]
# # ERROR
# calculate second to last sensitivity
Vs[-2] = Vs[-1] #Ws[-1].T @ Vs[-1] # no bias terms to remove
# calculate remaining sensitivities (must remove biases)
for i in range(L-2,-1,-1):
Vs[i] = Ws[i+1].T @ Vs[i+1]
# # display sensitivities
# print("\nVs:")
# for V in Vs:
# print(V.shape)
# initilize W_grads
W_grads = [0]*L
# calculate final W gradient
W_grad_n = Vs[-1] @ As[-2].T # no bias to remove As[-2] is A before Y_hat
W_grads[-1] = W_grad_n
# caclulate remaing gradients
#for i in range(L-1,0,-1):
for i in range(L-1):
W_grad = Vs[i] @ As[i].T
W_grads[i] = W_grad
#[TODO] regularize weights that are not bias terms
# print("\nW_grads:")
# for grad in W_grads:
# print(grad.shape)
# update Ws
eta = 0.2
Updated_Ws = []
for W, W_grad in zip(Ws, W_grads):
# update equation
W_updated = W - eta*W_grad
Updated_Ws += [ W_updated ]
# return updated Ws
return Updated_Ws
# train network
def train(X, Y, Ws, epochs):
for i in range(epochs):
As = feed_forward(X, Ws)
Ws = back_propigate(X, Y, Ws, As)
print_interval = 20
if i % print_interval == 0:
As = feed_forward( X, Ws )
Y_hat = As[-1]
print(accuracy(Y, Y_hat))
# multiply list of matrices W[0] @ W[1] @ ... @ w[-1]
def mm_list( m_list ):
R = m_list[0]
for M in m_list[1:]:
R = R @ M
return R
# take transpose of all matrices in list and return list
mT_list = lambda m_list : [ M.T for M in m_list ]
#
# testing
#
#
# create random test data
#
# create random input samples and their classes
n_samples, n_features, n_classes = 7, 15, 5
# create inputs
X = np.random.rand(n_samples, n_features) # Inputs
# create thier classes
Y = np.eye(n_samples, n_classes).T
np.random.shuffle(Y.T)
# get random matrices given shapes
n_hidden = 5
# W[0], ..., W[L-1]
shapes = [ (n_hidden, n_features), (n_hidden, n_hidden), (n_classes, n_hidden) ]
get_Ws = lambda shapes: [ np.random.rand(*s) for s in shapes ]
Ws = get_Ws( shapes )
L = len( Ws )
# train
epochs = 200
train(X, Y, Ws, epochs)
| true |
eeb4121cdcedd0e31f90e8459560686508ac10a5 | Python | cry999/AtCoder | /beginner/108/C.py | UTF-8 | 907 | 3.46875 | 3 | [] | no_license | def fact(n: int)->int:
if n == 1:
return 1
return n * fact(n-1)
def triangular_relationship(N: int, K: int)->int:
res = 0
if K % 2 == 1:
odd, even = 0, 0
for k in range(K, N+1, K):
if k % 2 == 0:
even += 1
else:
odd += 1
res = sum(d*d*d for d in [odd, even])
res += odd*odd*even
res += odd*even*odd
res += even*odd*odd
res += even*even*odd
res += even*odd*even
res += odd*even*even
else:
leftK, leftK2 = 0, 0
for k in range(K//2, N+1, K//2):
if k % K == 0:
leftK += 1
else:
leftK2 += 1
res = sum(d*d*d for d in [leftK, leftK2])
return res
if __name__ == "__main__":
N, K = map(int, input().split())
ans = triangular_relationship(N, K)
print(ans)
| true |
57e8ebb6e3a9516de4e28f4ad6446d20dc0c47f1 | Python | zbathen/Puns-master | /NearHomophones/Materials/matchPairID.py | UTF-8 | 941 | 2.625 | 3 | [] | no_license | import sys, string, re
# match word pairs in the incorrectly labeled byCondition file
# to the correct word pair IDs
# dictionary mapping word pairs with IDs
idDict = dict()
f = open("wordPairs.csv", "r")
firstline = 0
for l in f:
l = l.replace("\n", "")
if firstline == 0:
firstline = 1
else:
toks = l.split(",")
m1 = toks[5]
word = toks[6]
pairID = toks[0]
idDict[m1 + "," + word] = pairID
f_incorrect = open("wordPairs_byCondition_exp2.csv", "r")
firstline = 0
for l in f_incorrect:
l = l.replace("\n", "")
if firstline == 0:
print l
firstline = 1
else:
origPairID = toks[0]
#print origPairID
toks = l.split(",")
m1 = toks[5]
word = toks[6]
pairID = idDict[m1 + "," + word]
print ",".join(toks[0:8]) + "," + pairID
#else:
#print ",".join(toks[0:8]) + "," + origPairID
| true |
89858963f758c19da9cae8c6695c904954e30f09 | Python | finde/NLP1Emoticon | /Code/TrainingData.py | UTF-8 | 8,922 | 3.09375 | 3 | [] | no_license | from __future__ import division
import json
import argparse
import re
from string import punctuation
import nltk
import time
import DataPoint
import numpy as np
''' Returns normalized feature dictionary
All data will be rescaled so each feature has range value [0.1,0.9]'''
def get_normalized_feature_dictionary(feature_dict):
normalized_feature_dict = {}
for feature in feature_dict:
feature_values = feature_dict[feature]
max_value = max(feature_values)
min_value = min(feature_values)
denominator = max_value - min_value
denominator = 1 if denominator == 0 else denominator
normalized_values = []
for value in feature_values:
normalized_value = ((0.9 - 0.1) * (value - min_value) / denominator) + 0.1
normalized_values.append(normalized_value)
normalized_feature_dict[feature] = normalized_values
return normalized_feature_dict
class TrainingData:
def __init__(self, data_points, selected_features=None):
self.data_points = data_points
self.feature_dictionary = {"words": 1,
"negative_words": 2,
"positive_words": 3,
"positive_words_hashtags": 4,
"negative_words_hashtags": 5,
"uppercase_words": 6,
"special_punctuation": 7,
"adjectives": 8}
# if selected_features is not None:
# temp = {}
# for
# for f in self.feature_dictionary:
# ####################
# Basic class funcs #
# ####################
def get_training_points(self):
return self.data_points
def print_data(self):
for each in self.data_points:
each.print_data_point()
############################
# Feature extraction funcs #
############################
def count_words(self):
return self.count_feature(self.feature_dictionary['words'])
#### Might be changed to a matrix if it's hard to work with!!
# Returns a dictionary containing the values corresponding to all
# the features for all the datapoints.
def get_feature_dictionary(self):
d = {}
for feature in self.feature_dictionary:
#print 'result for feature ', feature, ': \n', self.count_feature(self.feature_dictionary[feature])
d[feature] = self.count_feature(self.feature_dictionary[feature])
print ' == ', feature, ':', d[feature]
return d
''' Returns the feature values for all features for each datapoint
so 1 vector with all the feature values for 1 datapoint'''
def get_feature_matrix(self):
feature_dict = get_normalized_feature_dictionary(self.get_feature_dictionary())
feat_matrix = [[d[i] for d in feature_dict.values()] for i in range(0, len(self.data_points))]
return feat_matrix
def get_unnormalize_feature_matrix(self):
return self.get_feature_dictionary()
''' Returns the label vector '''
def get_label_vector(self):
return [each.get_class_label() for each in self.get_training_points()]
########################################
# Help functions for eature extraction #
########################################
def count_feature(self, feature):
if feature == self.feature_dictionary['words']:
return [each.count_words() for each in self.get_training_points()]
elif feature == self.feature_dictionary['positive_words']:
return [each.count_positive_words() for each in self.get_training_points()]
elif feature == self.feature_dictionary['negative_words']:
return [each.count_negative_words() for each in self.get_training_points()]
elif feature == self.feature_dictionary['positive_words_hashtags']:
return [each.count_positive_words_in_hashtags() for each in self.get_training_points()]
elif feature == self.feature_dictionary['negative_words_hashtags']:
return [each.count_negative_words_in_hashtags() for each in self.get_training_points()]
elif feature == self.feature_dictionary['uppercase_words']:
return [each.count_uppercase_words() for each in self.get_training_points()]
elif feature == self.feature_dictionary['special_punctuation']:
return [each.count_special_punctuation() for each in self.get_training_points()]
elif feature == self.feature_dictionary['adjectives']:
# if adjectives, then show progress bar, because it is so slooow
output = []
for each in self.get_training_points():
output.append(each.count_adjectives())
return output
# one-liner
# return [each.count_adjectives() for each in self.get_training_points()]
else:
return ['unknown feature, bro! :( Give me another one!']
if __name__ == "__main__":
# Command line arguments
parser = argparse.ArgumentParser(description="Run simulation")
parser.add_argument('-text', metavar='The text of the data point', type=str)
parser.add_argument('-hashtags', metavar='The text of the data point', type=list)
parser.add_argument('-class', metavar='The class label of the data point (-1, 0, 1)', type=int)
args = parser.parse_args()
# If arguments are passed to the command line, assign them.
# Otherwise, use some standart ones.
if (vars(args)['text'] is not None):
data_string1 = vars(args)['text']
else:
data_string1 = "What's going on if I Happily try to do this SAD thing?!"
if (vars(args)['hashtags'] is not None):
hashtags1 = vars(args)['hashtags']
else:
hashtags1 = ["#FeelingProductive", "#LifeIsSoAwesome", "#NLPSUCKS", "#sohappy"]
if (vars(args)['class'] is not None):
data_class1 = vars(args)['class']
else:
data_class1 = 1
data_string2 = "This is a second AWesOme example and i LOVE it?!"
hashtags2 = ["#ProjectBecomesAnnoying", "#MeSoSleepy", "#suicidemood", "#totallyhungry"]
data_class2 = -1
# Construct a data point:
data_point1 = DataPoint.DataPoint(data_string1, hashtags1, data_class1)
data_point2 = DataPoint.DataPoint(data_string2, hashtags2, data_class2)
data = [data_point1, data_point2]
training_data = TrainingData(data)
# Do some random shit to make sure things work :)
print "This is your first data point: \n "
data_point1.print_data_point()
# print data_point1.get_data_string()
print "This is your second data point: \n "
data_point2.print_data_point()
# print data_point2.get_data_string()
print "number of words: \n ", training_data.count_words()
print "feature dictionary: \n ", training_data.get_feature_dictionary()
feature_dict = training_data.get_feature_dictionary()
feat_matrix = training_data.get_feature_matrix() #[[d[i] for d in feature_dict.values()] for i in range(0, len(feature_dict['adjectives']))]
print 'feat_matrix', feat_matrix
'''
print "This is your data splitted: \n ", data_point.split_sentence()
print "This is your data without punctuation: \n ", data_point.get_sentence_without_punctuation()
print "The word count is: ", data_point.count_words()
print "The # of ? and ! is: ", data_point.count_special_punctuation()
print "Number of positive words: ", data_point.count_positive_words()
print "Number of negative words: ", data_point.count_negative_words()
print "Number of uppercase words: ", data_point.count_uppercase_words()
print "These are your hashtags: \n ", data_point.get_hashtags()
print "These are your lowercase hashtags: \n ", data_point.get_lowercase_hashtags()
#### TODO: printing the matching pos/neg words in hashtags shows that e.g. suck and sucks are found.
#### That's not cool, because they correspond to the same word in the hashtag.
#### If only the longer one is counted then: in "#suckyweather #lifesucks" only one of them
#### will be found, when it's two bad words. But if we keep counting both, we count twice
#### the same word as in the example below... Sooo... Needs some fix
print "Number of positive words in hashtags: \n ", data_point.count_positive_words_in_hashtags()
print "Number of negative words in hashtags: \n ", data_point.count_negative_words_in_hashtags()
print "This is your data tagged in a misterious way: \n ", data_point.pos_tag_data_string()
print "Number of adjectives (JJ): ", data_point.count_adjectives()
# Example for counting more than one part of speech:
print "Number of adjectives (JJ) and adverbs (RB): ", data_point.count_multiple_types_in_tags(['JJ', 'RB'])
'''
| true |
36eedf3b7309069c65cadac76e62fd1b7d70496a | Python | asishraz/banka_sir_notes | /ch_1/one.py | UTF-8 | 126 | 3.796875 | 4 | [] | no_license | #write a program to input two numbers and print their sum
a = int(input())
b = int(input())
c = a+b
print("sum equals: ", c) | true |
c91979acbe7e6416465dda398b533d64de65fde2 | Python | WANG-Guangxin/leetcode | /python code/86_partition.py | UTF-8 | 953 | 3.796875 | 4 | [] | no_license | # 给你一个链表和一个特定值 x ,请你对链表进行分隔,使得所有小于 x 的节点都出现在大于或等于 x 的节点之前。
# 你应当保留两个分区中每个节点的初始相对位置。
#
# 示例:
# 输入:head = 1->4->3->2->5->2, x = 3
# 输出:1->2->2->4->3->5
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
if head == None: return None
xiaoyu = ListNode(head.val)
p = xiaoyu
dayu = ListNode(head.val)
q = dayu
h = head
while h:
if h.val < x:
p.next = h
p = p.next
else:
q.next = h
q = q.next
h = h.next
p.next = dayu.next
q.next = None
return xiaoyu.next | true |
0612e7a00e4e5088f780b72107d027432d29dfe3 | Python | primrose101/CS322 | /finite_state_machines/keywords.py | UTF-8 | 9,532 | 3.515625 | 4 | [
"MIT"
] | permissive | """
string_input : String -> the text input to lexicalize
index : int -> where the index is currently at during lexicalization
"""
def kwstart_fsm(string_input, index):
i = index
table = [
[1, 6, 6, 6, 6, 6],
[6, 2, 6, 6, 6, 6],
[6, 6, 3, 6, 6, 6],
[6, 6, 6, 4, 6, 6],
[6, 5, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'S':
inputstate = 0
elif string_input[i] == 'T':
inputstate = 1
elif string_input[i] == 'A':
inputstate = 2
elif string_input[i] == 'R':
inputstate = 3
elif string_input[i] == 'T':
inputstate = 4
else:
inputstate = 5
state = table[state][inputstate]
if state == 6:
break
i += 1
return i - index
def kwstop_fsm(string_input, index):
i = index
table = [
[1, 5, 5, 5, 5],
[5, 2, 5, 5, 5],
[5, 5, 3, 5, 5],
[5, 5, 5, 4, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'S':
inputstate = 0
elif string_input[i] == 'T':
inputstate = 1
elif string_input[i] == 'O':
inputstate = 2
elif string_input[i] == 'P':
inputstate = 3
else:
inputstate = 4
state = table[state][inputstate]
if state == 5:
break
i += 1
return i - index
def kwinteger_fsm(string_input, index):
i = index
table = [
[1, 4, 4, 4],
[4, 2, 4, 4],
[4, 4, 3, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'I':
inputstate = 0
elif string_input[i] == 'N':
inputstate = 1
elif string_input[i] == 'T':
inputstate = 2
else:
inputstate = 3
state = table[state][inputstate]
if state == 4:
break
i += 1
return i - index
def kwstring_fsm(string_input, index):
i = index
table = [
[1, 7, 7, 7, 7, 7, 7],
[7, 2, 7, 7, 7, 7, 7],
[7, 7, 3, 7, 7, 7, 7],
[7, 7, 7, 4, 7, 7, 7],
[7, 7, 7, 7, 5, 7, 7],
[7, 7, 7, 7, 7, 6, 7],
[7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'S':
inputstate = 0
elif string_input[i] == 'T':
inputstate = 1
elif string_input[i] == 'R':
inputstate = 2
elif string_input[i] == 'I':
inputstate = 3
elif string_input[i] == 'N':
inputstate = 4
elif string_input[i] == 'G':
inputstate = 5
else:
inputstate = 6
state = table[state][inputstate]
if state == 7:
break
i += 1
return i - index
def kwfloat_fsm(string_input, index):
i = index
table = [
[1, 6, 6, 6, 6, 6],
[6, 2, 6, 6, 6, 6],
[6, 6, 3, 6, 6, 6],
[6, 6, 6, 4, 6, 6],
[6, 6, 6, 6, 5, 6],
[6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'F':
inputstate = 0
elif string_input[i] == 'L':
inputstate = 1
elif string_input[i] == 'O':
inputstate = 2
elif string_input[i] == 'A':
inputstate = 3
elif string_input[i] == 'T':
inputstate = 4
else:
inputstate = 5
state = table[state][inputstate]
if state == 6:
break
i += 1
return i - index
def kwboolean_fsm(string_input, index):
i = index
table = [
[1, 5, 5, 5],
[5, 2, 5, 5],
[5, 3, 5, 5],
[5, 5, 4, 5],
[5, 5, 5, 5],
[5, 5, 5, 5],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'B':
inputstate = 0
elif string_input[i] == 'O':
inputstate = 1
elif string_input[i] == 'L':
inputstate = 2
else:
inputstate = 3
state = table[state][inputstate]
if state == 5:
break
i += 1
return i - index
def kwinput_fsm(string_input, index):
i = index
table = [
[1, 6, 6, 6, 6, 6],
[6, 2, 6, 6, 6, 6],
[6, 6, 3, 6, 6, 6],
[6, 6, 6, 4, 6, 6],
[6, 6, 6, 6, 5, 6],
[6, 6, 6, 6, 6, 6],
[6, 6, 6, 6, 6, 6],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'I':
inputstate = 0
elif string_input[i] == 'N':
inputstate = 1
elif string_input[i] == 'P':
inputstate = 2
elif string_input[i] == 'U':
inputstate = 3
elif string_input[i] == 'T':
inputstate = 4
else:
inputstate = 5
state = table[state][inputstate]
if state == 6:
break
i += 1
return i - index
def kwoutput_fsm(string_input, index):
i = index
table = [
[1, 7, 7, 7, 7, 7, 7],
[7, 2, 7, 7, 7, 7, 7],
[7, 7, 3, 7, 7, 7, 7],
[7, 7, 7, 4, 7, 7, 7],
[7, 5, 7, 7, 7, 7, 7],
[7, 7, 6, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7, 7],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'O':
inputstate = 0
elif string_input[i] == 'U':
inputstate = 1
elif string_input[i] == 'T':
inputstate = 2
elif string_input[i] == 'P':
inputstate = 3
elif string_input[i] == 'U':
inputstate = 4
elif string_input[i] == 'T':
inputstate = 5
else:
inputstate = 6
state = table[state][inputstate]
if state == 7:
break
i += 1
return i - index
def kwchar_fsm(string_input, index):
i = index
table = [
[1, 5, 5, 5, 5],
[5, 2, 5, 5, 5],
[5, 5, 3, 5, 5],
[5, 5, 5, 4, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'C':
inputstate = 0
elif string_input[i] == 'H':
inputstate = 1
elif string_input[i] == 'A':
inputstate = 2
elif string_input[i] == 'R':
inputstate = 3
else:
inputstate = 4
state = table[state][inputstate]
if state == 5:
break
i += 1
return i - index
def kwvar_fsm(string_input, index):
i = index
table = [
[1, 4, 4, 4],
[4, 2, 4, 4],
[4, 4, 3, 4],
[4, 4, 4, 4],
[4, 4, 4, 4],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'V':
inputstate = 0
elif string_input[i] == 'A':
inputstate = 1
elif string_input[i] == 'R':
inputstate = 2
else:
inputstate = 3
state = table[state][inputstate]
if state == 4:
break
i += 1
return i - index
def kwas_fsm(string_input, index):
i = index
table = [
[1, 3, 3],
[3, 2, 3],
[3, 3, 3],
[3, 3, 3],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == 'A':
inputstate = 0
elif string_input[i] == 'S':
inputstate = 1
else:
inputstate = 2
state = table[state][inputstate]
if state == 3:
break
i += 1
return i - index
def kwcolon_fsm(string_input, index):
i = index
table = [
[1, 2],
[2, 2],
[2, 2],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == ':':
inputstate = 0
else:
inputstate = 1
state = table[state][inputstate]
if state == 2:
break
i += 1
return i - index
def kwcomma_fsm(string_input, index):
i = index
table = [
[1, 2],
[2, 2],
[2, 2],
]
state = 0
inputstate = 0
string_length = len(string_input)
while i != string_length:
if string_input[i] == ',':
inputstate = 0
else:
inputstate = 1
state = table[state][inputstate]
if state == 2:
break
i += 1
return i - index
| true |
68c535350d16a0bb9c4ff9c90422525e97d309e6 | Python | rakshasa219/pythonnn | /API-week15/flask_2.py | UTF-8 | 1,161 | 2.671875 | 3 | [] | no_license |
from flask import Flask, render_template, request
import requests
import csv
app = Flask(__name__)
def geocode(phone)->str:
parameters = {'phone':phone,'key': 'e4c7e9009404fa14d3d26e3a0606f69c'}
base = 'http://apis.juhe.cn/mobile/get'
response = requests.get(base, parameters)
answer = response.json()
return str(answer['result'])
def geocode1(names):
with open('records_a.csv') as data:
for x in data:
c = str(x[12:15].strip(','))
b = str(names)
if b == c:
return str(x)
@app.route('/search4', methods=['POST'])
def do_search() -> 'html':
phone = request.form['phonenumber']
title = '您的查询结果如下:'
results = geocode(phone)
return render_template('results.html',
the_title=title,
the_phonenumber=phone,
the_results=results,)
@app.route('/')
@app.route('/entry')
def entry_page() -> 'html':
return render_template('entry.html',
the_title='手机号归属地查询网站')
if __name__ == '__main__':
app.run(debug=True)
| true |
66530012b340c74aee3e1f75377339ae336148f3 | Python | hangtran93tk/python-exercises | /20210826/Test.py | UTF-8 | 865 | 3.0625 | 3 | [] | no_license | # def sum(start, end):
# global total
# for i in range(start, end + 1):
# total += i
#
# total = 55
# sum(1,10)
# print(total)
# def plus(a,b):
# return a + b
#
# def multi(a,b):
# return a * b
#
# def devide(a, b):
# return a//b
#
# def double(a):
# return multi(a,2)
#
# def get_something(a):
# return double(a) + double(a)
#
# print(multi(plus(1,2), devide(4,2)))
# print(double(5))
# print(get_something(5))
# def get_something():
# return 5, 10
#
# # _ dummy variable
# x, _ = get_something()
# a, b = get_something()
# a = get_something()
#
# print({x})
# print("choose something :")
#
# m = input("Choose :")
# if m == "S":
# w = input("ABD :")
#
# print(w)
def sum(*num):
total = 0
for i in range(len(num)):
total += num[i]
return total
my_tuple(2,'2')
result = sum(1,2,3)
print(result) | true |
40300ba46e1c9f7018a49ebebd4ac4dcd6fb5c1b | Python | danielx285/Learning-Flask | /Cadastrar_Users/flaskk.py | UTF-8 | 1,154 | 2.53125 | 3 | [] | no_license | # coding: utf-8
import bd
from flask import Flask, abort, render_template, url_for
app = Flask(__name__)
@app.route("/")
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/contato")
def contato():
return render_template("contato.html")
@app.route("/users")
def users():
html = ['<ul>']
for username, user in bd.users.items():
html.append(
"<li><a href='/user/%s'>%s</a></li>" \
% (username, user["name"])
)
html.append("</ul>")
return "\n".join(html)
def profile(username):
user = bd.users.get(username)
if user:
html_code = "<h1>%s</h1>" % user["name"] \
+ "\n<img src='%s' width = '200px' /><br/>" % user["imagen"] \
+ "\ntelefone: %s <br/>" % user["tel"] \
+ "\n<a href='/'>Voltar<a/>"
return html_code
else:
return abort(404, "User not found")
app.add_url_rule('/user/<username>/', view_func=profile, endpoint='user')
@app.route("/about")
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True)
| true |
5f9f1a7292fb89907861dafd62f766f5e301d576 | Python | jghee/Algorithm_Python | /BaekJoon/Greedy/b_13305.py | UTF-8 | 235 | 2.8125 | 3 | [] | no_license | n = int(input())
p = list(map(int, input().split(' ')))
o = list(map(int, input().split(' ')))
minO = o[0]
result = p[0] * minO
for i in range(1, n-1):
if minO > o[i]:
minO = o[i]
result += minO * p[i]
print(result)
| true |
3383126ca992e31664561b261a3b33a1295caae2 | Python | bhatiaabhinav/gym-ERSLE | /gym_ERSLE/pyERSEnv/maps/geo_coords_mapper.py | UTF-8 | 593 | 2.53125 | 3 | [] | no_license | SG_MIN_LATITUDE = 1.34
SG_MAX_LATITUDE = 1.35
SG_MIN_LONGITUDE = 1.7
SG_MAX_LONGITUDE = 1.8
class GeoCoordsMapper:
def __init__(self, min_long, max_long, min_lat, max_lat, min_x, max_x, min_y, max_y):
self.min_long, self.max_long = min_long, max_long
self.min_lat, self.max_lat = min_lat, max_lat
self.min_x, self.max_x = min_x, max_x
self.min_y, self.max_y = min_y, max_y
def convert_to_geo_coords(self, scene_x, scene_y):
raise NotImplementedError()
def convert_to_scene_coords(self, geo_x, geo_y):
raise NotImplementedError()
| true |
c7c6a0ed646305c05563812aeb3771be84cf5103 | Python | Selmentausen/GIDC | /GIDC_v0.1.py | UTF-8 | 2,375 | 2.734375 | 3 | [] | no_license | from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow
from calculate_damage import get_damage_calculations
from mainWindow import Ui_MainWindow
import sys
class GIDC(QMainWindow, Ui_MainWindow):
def __init__(self):
super(GIDC, self).__init__()
self.setupUi(self)
self.setWindowTitle('GIDC')
self.calculateButton.clicked.connect(self.calculate_dmg)
self.data = {}
def check_talent_multiplier_input(self, text) -> bool:
return not all(map(lambda x: x.strip().isdigit(), text.split(';')))
def get_data(self):
try:
self.data['char_atk'] = int(self.atkEdit.text())
self.data['elem_bonus'] = float(self.elemEdit.text().replace(',', '.'))
self.data['special_bonus'] = float(self.specialEdit.text().replace(',', '.'))
self.data['talent_multi'] = self.talentEdit.text().replace(',', '.')
if self.check_talent_multiplier_input(self.data['talent_multi']):
raise ValueError
self.data['char_lvl'] = int(self.characterLevelEdit.text())
self.data['crit_rate'] = float(self.critRateEdit.text().replace(',', '.'))
self.data['crit_dmg'] = float(self.critDamageEdit.text().replace(',', '.'))
self.data['attack_count'] = int(self.attackCountEdit.text())
self.data['enemy_lvl'] = int(self.enemyLevelEdit.text())
self.data['enemy_elem_res'] = int(self.enemyElemResEdit.text())
self.data['enemy_phys_res'] = int(self.enemyPhysResEdit.text())
self.data['dmg_type'] = self.damageTypeBox.currentText()
return True
except ValueError:
self.statusBar().showMessage('Incorrect input data')
return False
except BaseException as err:
self.statusBar().showMessage(str(err))
return False
def calculate_dmg(self):
if self.get_data():
single_hit, single_crit, total_dmg = get_damage_calculations(self.data)
self.sadLabel.setText(f'Single Attack Damage: {single_hit}')
self.scdLabel.setText(f'Single Crit Damage: {single_crit}')
self.tadLabel.setText(f'Total Attack Damage: {total_dmg}')
if __name__ == '__main__':
app = QApplication(sys.argv)
gidc = GIDC()
gidc.show()
sys.exit(app.exec())
| true |
2720a856b2f9f662d4a3e01dde43164a40792f12 | Python | Kchour/BagVideoExtract | /extractor_module.py | UTF-8 | 2,582 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# -- coding: utf-8 --
# Copyright 2016 Massachusetts Institute of Technology
# I changed a few things for myself - KennyC
"""Extract images from a rosbag. CHANGE FFMPEG SETTINGS BELOW. TO USE INTERPOLATION UPDATE YOUR FFMPEG
"""
import os
import errno
import argparse
import cv2
import rosbag
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import pdb
import subprocess
class Extractor(object):
def __init__(self,bag_file,image_topic):
#self.bag_file = bag_file
#self.output_dir = output_dir
#self.image_topic = image_topic
self.list = [bag_file,image_topic]
def __del__(self):
print "object instance is deleted"
def extract(self):
for i in range(len(self.list[0])):
"""Extract a folder of images from a rosbag.
"""
bag_file = self.list[0][i]
output_dir = "./"+bag_file[0:-4]+"/"
image_topic = self.list[1]
""" Using command line arguments, order matters
"""
#parser = argparse.ArgumentParser(description="Extract images from a ROS bag.")
#parser.add_argument("bag_file", help="Input ROS bag.")
#parser.add_argument("./output/", help="Output directory.")
#parser.add_argument("image_topic", help="Image topic.")
#args = parser.parse_args()
print "Extract images from %s on topic %s into %s" % (bag_file,
image_topic, output_dir)
""" Check if output dir exists, if it doesn't then create one"""
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
bag = rosbag.Bag(bag_file, "r")
bridge = CvBridge()
count = 0
for topic, msg, t in bag.read_messages(topics=[image_topic]):
cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
cv2.imwrite(os.path.join(output_dir, "frame%06i.png" % count), cv_img)
print "Wrote image %i" % count
count += 1
bag.close()
print "Saving video..."
self.save(output_dir,bag_file)
print "DONE"
return
def save(self,output_dir,name):
"""subprocess is not blocking!!!, so ensure it: Popen object has a .wait() method"""
#image_dir = os.path.abspath(output_dir)
#cmds = ['ffmpeg', '-r', '25', '-i', output_dir+'frame%06d.png', "-filter", "minterpolate='fps=25'", '-vcodec', 'mpeg4', '-y', name+".mp4"]
cmds = ['ffmpeg', '-r', '15', '-i', output_dir+'frame%06d.png', '-vcodec', 'mpeg4', '-y', name+".mp4"]
proc = subprocess.Popen(cmds)
proc.wait()
#if __name__ == '__main__':
# main()
| true |
0f0a4490b6b0ad8a6868ab89a9f4ffeecef24316 | Python | Franktian/leetcode | /isAnagram.py | UTF-8 | 447 | 3.234375 | 3 | [] | no_license | def isAnagram(s, t):
ht_s = {}
ht_t = {}
if len(s) != len(t):
return False
for i in range(len(s)):
if not ht_s.get(s[i]):
ht_s[s[i]] = 1
else:
ht_s[s[i]] += 1
if not ht_t.get(t[i]):
ht_t[t[i]] = 1
else:
ht_t[t[i]] += 1
for key in ht_s:
if ht_s.get(key) != ht_t.get(key):
return False
return True | true |
aab8748be858d923e539b9eb16a658e8544188b1 | Python | jvc9109/advent-code-2020 | /src/day15.py | UTF-8 | 2,092 | 3.375 | 3 | [] | no_license |
def problem_optimized(numbers, max_turns):
turn = 1
spoken_numbers = {}
age = 0
firsts_numbers = len(numbers)
for index, number in enumerate(numbers):
spoken_numbers[number] = [index]
last_spoken_number = numbers[-1]
for turn in range(firsts_numbers, max_turns):
player_will_say = 0
said_numbers = spoken_numbers.keys()
if len(spoken_numbers[last_spoken_number]) > 1:
player_will_say = spoken_numbers[last_spoken_number][0] - spoken_numbers[last_spoken_number][1]
if len(spoken_numbers[last_spoken_number]) > 1000:
spoken_numbers[last_spoken_number] = spoken_numbers[last_spoken_number][:1]
if player_will_say in said_numbers:
spoken_numbers[player_will_say].insert(0,turn)
else:
spoken_numbers[player_will_say] = [turn]
last_spoken_number = player_will_say
print(last_spoken_number)
def problem_1(numbers, max_turns):
turn = 1
spoken_numbers = {}
age = 0
firsts_numbers = len(numbers)
while turn <= max_turns:
if turn - 1 < firsts_numbers:
spoken_numbers[numbers[turn-1]] = [turn]
player_says = (turn, numbers[turn-1])
else:
turn_said_last_spoken, last_spoken_number = player_says
player_will_say = (turn, 0)
said_numbers = spoken_numbers.keys()
if last_spoken_number in said_numbers:
if len(spoken_numbers[last_spoken_number]) > 1:
player_will_say = (turn, turn_said_last_spoken - spoken_numbers[last_spoken_number][-2])
if player_will_say[1] in said_numbers:
spoken_numbers[player_will_say[1]].append(turn)
else:
spoken_numbers[player_will_say[1]] = [turn]
player_says = player_will_say
turn += 1
print(player_says)
return spoken_numbers
with open("data/day15.txt") as file:
starting_numbers = [int(number) for number in file.read().split(',')]
problem_1(starting_numbers, 30000000) | true |
531af6f42cc55d009aca9a55e6138fb3f5e085bc | Python | olaswietlicka/python_learning_scripts | /vol.py | UTF-8 | 162 | 3.390625 | 3 | [] | no_license | import math
def vol(rad):
# Write a function that computes the volume of a sphere given its radius
return 4/3*math.pi*rad**3
obj_kuli = vol(2)
print(obj_kuli) | true |
c1572b2f22444c58025bed5aadc12f1dcc5478cc | Python | danielcinome/holbertonschool-higher_level_programming | /0x07-python-test_driven_development/0-add_integer.py | UTF-8 | 543 | 4.15625 | 4 | [] | no_license | #!/usr/bin/python3
"""function that adds 2 integers.
Returns an integer: the addition of a and b
a and b must be integers or floats, otherwise raise a TypeError
exception with the message a must be an integer or b must be an integer
"""
def add_integer(a, b=98):
"""
a and b must be integers or floats
"""
if type(a) != int and type(a) != float:
raise TypeError("a must be an integer")
if type(b) != int and type(b) != float:
raise TypeError("b must be an integer")
return int(a) + int(b)
| true |
a690cb569afe5318faa33bf8611bde8ff6125c33 | Python | henryji96/LeetCode-Solutions | /Medium/347.top-k-frequent-elements/top-k-frequent-elements.py | UTF-8 | 632 | 3.15625 | 3 | [] | no_license | from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
return [item[0] for item in Counter(nums).most_common(k)]
from collections import Counter,defaultdict
class Solution(object):
def topKFrequent(self, nums, k):
if len(nums) < k:
return []
freq = Counter(nums)
bucket = defaultdict(list)
for key in freq:
f = freq[key]
bucket[f].append(key)
i = len(nums)
ans = []
while len(ans) < k:
if bucket[i]:
ans += bucket[i]
i -= 1
return ans
| true |
1de49f6965a65c1774a747e568ff41338b0c0852 | Python | javierfaramayo/intive-test | /Shop.py | UTF-8 | 656 | 3.515625 | 4 | [
"MIT"
] | permissive | from constants import INITIAL_STOCK
class Shop:
def __init__(self):
"""
When a shop is created it will get the current bikes stock from anywhere, it can be a database, in this case it comes from a constant
"""
self.__get_initial_stock()
def __get_initial_stock(self):
self.__stock = INITIAL_STOCK
def get_stock(self):
""" Returns the current stock """
return self.__stock
def increment_stock(self, q):
""" Increments the current stock """
self.__stock += q
def decrement_stock(self, q):
""" Decrement the current stock """
self.__stock -= q
| true |
199a780ec9c80412fb3701030380e1bac448654a | Python | PruthviJ19/pruthvijana | /pj3.py | UTF-8 | 591 | 3.75 | 4 | [] | no_license | def pj(colour = "black"):#decalare the parameter
print("i like " + colour)#add stmt to printed
pj("blue")#declare the colours to be exicuted
pj("pink")
pj()
pj("blue") # using default parameter
def jana(languages):#declare the parameter with lang
for x in lang:#check condition of x
#print(x)#if true x gets printed
languages = ["telgu","english","hindi"]#initialise the lang
jana(languages)#lang gets exicuted
def pj(colour = "black"):
print("i like " + colour)
pj("blue")
pj("pink")
pj()
pj("blue")
| true |
71d0f22889441f10506af09898a6d9b303796552 | Python | YeltsinZ/Python | /lambda_function.py | UTF-8 | 479 | 4 | 4 | [] | no_license | square = lambda a : a * a
side = lambda a : 4*a
print("Enter the number to perform calculations")
x = int(input())
result1 = square(x)
result2 = side(x)
print("Square of the number :", result1)
print("Area of the square:", result2)
#Addition of 2 numbers
add = lambda a,b:a+b
print("This is addition of two numbers")
print("Enter the first number:")
i = int(input())
print("Enter the second number:")
j = int(input())
result3 = add(i,j)
print("Result",result3) | true |
57814d6301fc296827979e9fb815b2f0c1b13d49 | Python | dmeleshko/adventofcode | /y2018/day05/test_day05.py | UTF-8 | 318 | 2.53125 | 3 | [] | no_license | from y2018.day05.day05 import part1, part2
def test_part1():
assert part1(list('aA')) == 0
assert part1(list('abBA')) == 0
assert part1(list('abAB')) == 4
assert part1(list('aabAAB')) == 6
assert part1(list('dabAcCaCBAcCcaDA')) == 10
def test_part2():
assert part2('dabAcCaCBAcCcaDA') == 4
| true |
86eb84ca6acd1affdda3b7e76d597805ad93da71 | Python | Min3710/Mypython | /chapter3/ㄷㅌ04.py | UTF-8 | 131 | 3.671875 | 4 | [] | no_license | fh=int(input("화씨를 입력하세요"))
celcius=(fh-32)*5/9
print("화씨", fh,"도는 섭씨로",celcius,"도입니다.")
| true |
3713b01aa0aa477a85e162e888c22aaaba1f52b0 | Python | Kalashnikova55/Python_GeekBrains | /Beg.py | UTF-8 | 274 | 3.265625 | 3 | [] | no_license | # Шестая задача
# Сделал в виде функции
def run(a, b):
total_dist = 0
day_counter = 0
while total_dist < b:
total_dist = total_dist + a * (1.1 ** day_counter)
day_counter += 1
return day_counter
| true |
c779faaf130467c87417c1679d478d189a26a006 | Python | joel076/Va- | /PythonProgramingPlayground/PythonProgramingPlayground/PythonProgramingPlayground.py | UTF-8 | 239 | 3.046875 | 3 | [
"MIT"
] | permissive | #You use '#' for comments, ok
#I have no internet rn so I can't look up for loops :c
#Seems as if <var>++ doesn't work since it isn't a number directly but an object, however <var> = <var> + 1 works
x = 0
while True:
print(x)
x = x + 1
| true |
e8d69a08696b01fbb3b0b804899366527e0be3c7 | Python | vigilantesculpting/rezyn | /rezyn.py | UTF-8 | 13,003 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
"""Rezyn is a static website generator in Python
"""
# default
import sys
import os
import re
import shutil
import math
import getopt
import subprocess
import random
import errno
import hashlib
# requirements
import datetime
import dateutil.parser
import lxml.html
import lxml.etree
import yaml
import markdown
import bbcode
import pytz
# local copy
import minifycss
import rjsmin
# library code
import nsdict
import solon
# Internal debugging / tracing
LOG = False
def log(*args, **kwargs):
if LOG:
for arg in args:
sys.stderr.write(str(arg))
sys.stderr.write("\n")
solon.LOG=False
#####################################################
def setlog(level):
if level > 0:
global LOG
LOG = True
solon.setlog(level - 1)
def readfile(filename):
with open(filename, "r") as f:
return f.read()
def writefile(filename, contents):
with open(filename, "w") as f:
f.write(contents)
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def parsedate(date_str):
d = dateutil.parser.parse(date_str)
return d.strftime('%Y/%m/%d %H:%M:%S')
def splitlines(linelist, separator):
# splits a list of lines into sublists, separated by the given separator
indices = (i for i, value in enumerate(linelist) if value.startswith(separator))
a = 0
for i in indices:
yield linelist[a:i]
a = i+1
yield linelist[a:]
def splitcontent(content, separator):
# splits a piece of text into chunks separated by lines starting with the separator
lines = content.split("\n")
for chunks in splitlines(lines, separator):
yield "\n".join(chunks)
def splitheader(content):
parts = list(splitcontent(content, '---'))
# a file with a valid yaml header should have multiple parts, and the length of the
# first part will be zero (ie. the first line will be '---')
if len(parts) > 2 and len(parts[0]) == 0:
return parts[1], "---".join(parts[2:])
else:
return "", "---".join(parts)
#####################################################
class Rezyn:
def __init__(self, config):
self.solon = solon.Solon(config)
self.bbparser = bbcode.Parser()
def parsebb(self, text):
parser = bbcode.Parser()
return parser.format(text)
def parsemd(self, text):
return markdown.markdown(text)
def texttohtml(self, ext, text):
# convert the body text into an html snippet, if it not an html file
if ext == '.md':
html = self.parsemd(text)
elif ext == ".bb":
html = self.parsebb(text)
elif ext == ".html":
html = text
else:
raise NoConversion("Do not know how to turn [%s] into HTML" % ext)
return html
def readfile(self, filename):
"""Read content and metadata from file into a dictionary."""
# Each file has a slug, which is pretty much its basename
path, ext = os.path.splitext(filename)
dirpath, base = os.path.split(path)
slug = base
content = nsdict.NSDict({
'slug': slug,
})
# Read file content.
filecontent = unicode(readfile(filename), encoding='utf-8')
# split the yaml frontmatter and body text
fileheader, filebody = splitheader(filecontent)
fm = yaml.safe_load(fileheader)
if fm is not None:
# it is not an error if no yaml is present, the file simply has no metadata
content.update(fm)
# convert the body text into an html snippet, if it not an html file
text = self.texttohtml(ext.lower(), filebody)
# create an xml representation of the document
# we have to add a root element, since the text may or may not have one
root = lxml.html.fromstring("<div class='filecontent'>" + text + "</div>")
# find all images, and prepare them for lightbox
imgs = root.findall(".//img")
if 'thumbnail' not in content and len(imgs) > 0:
# if thumbnail was not set, and we have images, set it to the first image
content['thumbnail'] = imgs[0].attrib["src"]
# convert the html tree back to text
text = lxml.html.tostring(root)
content['content'] = text
# convert the string date into a raw datetime we can work with
if 'date' in content:
datestr = content['date']
content['date'] = dateutil.parser.parse(datestr)
# escape any html entitied in the title here:
#content['title'] = xml.sax.saxutils.escape(content['title'])
return content
def readcontent(self, contentpath):
contentpath = os.path.join(self.solon.context.config.srcdir, contentpath)
log("loading content from [%s]" % contentpath)
# load everything in the path into env
for dirName, subdirList, fileList in os.walk(contentpath):
root = dirName[len(contentpath)+1:]
for fileName in fileList:
if fileName == ".DS_Store":
continue
fullpath = os.path.join(dirName, fileName)
if 0:
base, ext = os.path.split(fileName)
var = os.path.join("content", root, base)
else:
var = os.path.join("content", root, fileName)
log("adding content ", var)
filecontent = self.readfile(fullpath)
if self.solon.context['config/publish_all'] or "nopublish" not in filecontent:
log("readcontent: adding content to [%s]" % var)
self.solon.context[var] = filecontent
def readtemplates(self, templatepath, depth = None):
templatepath = os.path.join(self.solon.context.config.srcdir, templatepath)
# load everything in the template folder
for level, (dirName, subdirList, fileList) in enumerate(os.walk(templatepath)):
root = dirName[len(templatepath)+1:]
for fileName in fileList:
fullpath = os.path.join(dirName, fileName)
var = os.path.join("template", root, fileName)
log("adding template ", var)
self.solon.addtemplate(var, readfile(fullpath))
if depth is not None and level == depth:
break
########################
## checksums ###########
def renamefileswithchecksums(self, targetdir):
# split a path into its component parts
def splitpath(path):
parts = []
a = path
while a:
a, b = os.path.split(a)
if b:
parts.append(b)
parts.reverse()
return parts
log("renamefileswithchecksums in [%s]" % targetdir)
targetparts = splitpath(targetdir)
filekeys = {}
for dirname, subdirs, filenames in os.walk(targetdir):
for filename in filenames:
filepath = os.path.join(dirname, filename)
base, ext = os.path.splitext(filename)
if ext.lower() in ('.css', '.js'):
checksum = hashlib.md5(open(filepath,'rb').read()).hexdigest()
newbase = base + '-' + checksum
newfilename = newbase + ext
# create the key out of the filepath, but without the leading components of the targetdir
key = os.path.join(*splitpath(filepath)[len(targetparts):])
# the key translates into the renamed filename
filekeys[key] = newfilename
newfilepath = os.path.join(dirname, newfilename)
shutil.move(filepath, newfilepath)
log("renaming file [%s] to [%s], key [%s]" % (filepath, newfilepath, key))
return filekeys
########################
## minify ##############
def minifydir(self, path):
for dirName, subdirList, fileList in os.walk(path):
for fileName in fileList:
if fileName == ".DS_Store":
continue
base, ext = os.path.splitext(fileName)
filename = os.path.join(dirName, fileName)
if ext.lower() == ".css":
mincss = minifycss.minify(readfile(filename))
log("minifying css [%s]" % filename)
writefile(filename, mincss)
elif ext.lower() == ".js":
minjs = rjsmin._make_jsmin(python_only = True)(readfile(filename))
log("minifying js [%s]" % filename)
writefile(filename, minjs)
########################
## output ##############
def writeoutput(self):
for filename, content in self.solon.context.output.dict().iteritems():
path = os.path.join(self.solon.context['config/tgtdir'], self.solon.context['config/tgtsubdir'], filename)
dirpath, filepath = os.path.split(path)
mkdir(dirpath)
log("writing [%s]..." % path)
writefile(path, content)
def setup(self):
# set up timezone
tz = pytz.timezone(self.solon.context['config/timezone'])
self.solon.context['config/tz'] = tz
self.solon.context['config/now'] = datetime.datetime.now(tz)
self.solon.context['config/current_year'] = self.solon.context['config/now'].year
targetdir = os.path.join(self.solon.context.config.tgtdir, self.solon.context.config.tgtsubdir)
staticdir = os.path.join(self.solon.context.config.srcdir, self.solon.context.config.staticdir)
log("setup sourcedir [%s] -> targetdir [%s]" % (staticdir, targetdir))
# remove the target directory
log("removing targetdir [%s]" % targetdir)
try:
#if os.path.exists(targetdir):
#
shutil.rmtree(targetdir)
except Exception as e:
print "Exception:", e
pass
# copy everything from static to the target directory
log("copy sourcedir [%s] to targetdir [%s]" % (staticdir, targetdir))
shutil.copytree(staticdir, targetdir)
if not ("config/debug" in self.solon.context and self.solon.context["config/debug"]):
# web minify (css and js)
log("minify web in targtdir [%s]" % targetdir)
self.minifydir(targetdir)
# rename each css/js file with its checksum key
filekeys = self.renamefileswithchecksums(targetdir)
# make the renamed files available to the template(s)
self.solon.context['filekeys'] = filekeys
def process(self):
setlog(self.solon.context['config/verbose'])
self.setup()
#### Read in website content + templates
self.readcontent(self.solon.context["config/contentdir"])
self.readtemplates(self.solon.context["config/templatedir"])
# post process the data
posts = [self.solon.context['content/blog'][post] for post in self.solon.context['content/blog'].keys()]
sortedposts = sorted(posts, key=lambda values: values['date'], reverse=True)
self.solon.context['content/sortedposts'] = sortedposts
# render the templates
self.solon.rendertemplate("template/site.tpl")
self.solon.rendertemplate("template/sitemap.txt")
self.solon.rendertemplate("template/robots.txt", keepWhitespace=True)
self.solon.rendertemplate("template/rss.tpl")
# write the output content to their corresponding output files
self.writeoutput()
class BaseException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class NoConversion(BaseException):
def __init__(self, message):
BaseException.__init__(self, message)
def processargs(argv):
configname = 'config.yml'
tgtdir = "_http"
dbg_site_url = 'http://localhost:8000'
tgtsubdir = None
publish_all = False
debug = False
srcdir = None
verbose = 0
try:
optlist, args = getopt.gnu_getopt(argv[1:], 's:dc:T:t:pvh', ['sourcedir=', 'debug', 'config=', 'targetdir=', 'targetsubdir=', 'publish-all', 'verbose', 'help'])
except getopt.GetoptError as err:
usage(-2, argv[0], err)
for opt, arg in optlist:
if opt in ('-c', '--config'):
configname = arg
elif opt in ('-s', '--sourcedir'):
srcdir = arg
elif opt in ('-T', '--targetdir'):
tgtdir = arg
elif opt in ('-t', '--targetsubdir'):
tgtsubdir = arg
elif opt in ('-p', '--publish-all'):
publish_all = True
elif opt in ('-h', '--help'):
usage(0, argv[0], '')
elif opt in ('-d', '--debug'):
debug = True
elif opt in ('-v', '--verbose'):
verbose += 1
else:
usage(-1, argv[0], "unknown argument [%s]" % opt)
if len(args) > 0:
usage(-1, argv[0], "illegal arguments: %s" % (" ".join(args)))
if srcdir is None:
srcdir = os.path.split(configname)[0]
config = nsdict.NSDict(yaml.safe_load(readfile(configname)))
config['config'].update({
'srcdir' : srcdir,
'tgtdir' : tgtdir,
'base_path' : '',
'publish_all' : publish_all,
'debug' : debug,
'verbose' : verbose,
})
if tgtsubdir:
config['config/tgtsubdir'] = tgtsubdir
if debug:
config['config/site_url'] = dbg_site_url
return config
def usage(exitcode, program, message):
# add a --verbose option, think about logging different aspects of the situation
# remove all mention of traceback and pdb, we can do this with python -m pdb
# at some point, think about breaking up the actions (removing the source tree, copying the static files, making a render list, etc.)
print """\
Usage: %s [-d|--debug] [-c|--config=<CONF>] [-t|--targetsubdir=<DIR>] [-T|--targetdir=<DIR>] [-p|--publish-all] [-v|--verbose] [--help]
Where:
--debug specifies the site should be built to debug
--config=<CONFIG> specifies where to find the CONFIG file
--targetdir=<DIR> specifies the output to go to the subdirectory DIR. This directory
will be deleted & recreated during the running of the program!
This defaults to "_http"
--publish-all will publish all content, even if marked 'nopublish'
--verbose increases the verbosity of the output
If specified more than once, all library calls will be made verbose
--help prints this help and exits
""" % program
sys.exit(exitcode)
if __name__=="__main__":
config = processargs(sys.argv)
rezyn = Rezyn(config)
rezyn.process()
| true |
ccd44651c07fc32a32c747f0ee1f49bbc91fc4cb | Python | okkah/breast_cancer | /resize.py | UTF-8 | 1,357 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
from sklearn import linear_model
def main():
data_dir_path = u"./data_hm_pred_new"
file_list = os.listdir(r'./data_hm_pred_new')
x = np.empty(0, np.int)
y = np.empty(0, np.int)
im = 0
jm = 0
for file_name in file_list:
root, ext = os.path.splitext(file_name)
b = 0
if ext == u'.jpg':
abs_name = data_dir_path + '/' + file_name
img = cv2.imread(abs_name)
print("Load {}".format(file_name))
#print(img.shape)
#img = img[0 : 200, 0 : 100]
#cv2.imwrite(abs_name, img)
"""
black = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for i in range(black.shape[0]):
for j in range(black.shape[1]):
if black[i][j] == 0:
black[i][j] = 255
b = b + 1
if im < i:
im = i
if jm < j:
jm = j
elif black[i][j] == 127:
black[i][j] = 0
else:
black[i][j] = 0
print(im, jm)
"""
return 0
if __name__ == '__main__':
main()
| true |
51c3c7a0969aa446dbda183ec4e0669fa740b108 | Python | faroit/pygbif | /test/test-species-name_backbone.py | UTF-8 | 737 | 2.671875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | """Tests for species module - name_backbone methods"""
import vcr
from pygbif import species
@vcr.use_cassette("test/vcr_cassettes/test_name_backbone.yaml")
def test_name_backbone():
"species.name_backbone - basic test"
res = species.name_backbone(name="Helianthus annuus")
assert dict == res.__class__
assert 22 == len(res)
assert "Helianthus annuus" == res["species"]
@vcr.use_cassette("test/vcr_cassettes/test_name_backbone_multiple_matches.yaml")
def test_name_backbone_multiple_matches():
"species.name_backbone - multiple matches"
res = species.name_backbone(name="Aso")
assert dict == res.__class__
assert 4 == len(res)
assert "No match because of too little confidence" == res["note"]
| true |
b0d332343570cfb3bfe462f4066ae35c67ca8e62 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | /students/jason_jenkins/lesson01/break_me.py | UTF-8 | 136 | 2.828125 | 3 | [] | no_license | # Lesson 1: Test breaking the code
#Name Error
# a
#Type Error
#"3" + 2
#SyntacError
#print "test"
#AttributeError
b = 5
b.append(6) | true |
84fb393f0c5a1f9cbc26b5c091e55ebf60b04274 | Python | sieczkah/Codewars_KATA | /5 kyu/The Hashtag Generator.py | UTF-8 | 261 | 3.109375 | 3 | [] | no_license | """https://www.codewars.com/kata/52449b062fb80683ec000024"""
def generate_hashtag(s):
if len(s.replace(' ','')) > 140 or s.replace(' ','') == '':
return False
else:
words = s.title().split()
return f"#{''.join(words)}"
| true |
2905c7f9ae5709403f8ab39898b4940a472eecbc | Python | orwonthe/big_muddy_pi | /src/big_muddy/daisy_domain.py | UTF-8 | 2,524 | 3.40625 | 3 | [
"MIT"
] | permissive | class Domain:
""" A Domain is an object that knows whether it is a block vs turnout and whether console vs serve """
@property
def purpose(self):
return f'{self.first_term} {self.second_term}'
def is_same_domain(self, other):
return (self.is_console == other.is_console) and (self.is_block == other.is_block)
class BlockMixin:
""" A block and not a turnout """
@property
def first_term(self):
return "block"
@property
def is_block(self):
return True
@property
def is_turnout(self):
return False
class TurnoutMixin:
""" A turnout and not a block """
@property
def first_term(self):
return "turnout"
@property
def is_block(self):
return False
@property
def is_turnout(self):
return True
class ConsoleMixin:
""" A console and not a servo """
@property
def second_term(self):
return "console"
@property
def is_console(self):
return True
@property
def is_servo(self):
return False
class ServoMixin:
""" A servo and not a console """
@property
def second_term(self):
return "servo"
@property
def is_console(self):
return False
@property
def is_servo(self):
return True
class DomainLists:
""" A DomainLists keeps four lists of Domain objects and can find the correct list """
def __init__(self):
self.block_servos = []
self.turnout_servos = []
self.block_consoles = []
self.turnout_consoles = []
def append(self, item, domain=None):
""" Add item to domain specific list """
if domain is None:
domain = item
self.domain_list(domain).append(item)
def domain_list(self, domain):
""" Find the list that matches the domain """
if domain.is_servo:
if domain.is_block:
return self.block_servos
elif domain.is_turnout:
return self.turnout_servos
else:
raise Exception("servo domain must be either block or turnout")
elif domain.is_console:
if domain.is_block:
return self.block_consoles
elif domain.is_turnout:
return self.turnout_consoles
else:
raise Exception("console domain must be either block or turnout")
else:
raise Exception("domain must be either servo or console")
| true |
98d54fe5c06bc68877eacecdb72f36f5877a9d0b | Python | luis226/MachineLearning | /Supervised/LinearRegression.py | UTF-8 | 1,782 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 9 23:50:23 2019
@author: Luis Galaviz
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from KNN import KNN
class LinearRegression():
def __init__(self):
self.X = None
self.y = None
self.slope = None
self.intercept = None
def fit(self, X, y):
x_mean = X.mean()
y_mean = y.mean()
xy_mean = np.dot(X,y) / len(X)
x_sqr_mean = np.dot(X, X) / len(X)
x_mean_sqr = np.power(x_mean, 2)
#print(x_mean, x_sqr_mean, x_mean_sqr, y_mean, xy_mean,)
self.slope = (xy_mean - x_mean * y_mean) / (x_sqr_mean - x_mean_sqr)
self.intercept = (x_sqr_mean * y_mean - x_mean * xy_mean) / (x_sqr_mean - x_mean_sqr)
y_hat = self.slope * X + self.intercept
#plt.plot(X, y_hat)
diff1 = y - y_hat
diff2 = y - y_mean
#residuals_df = pd.DataFrame()
#sns.swarmplot(data=[diff1])
#sns.swarmplot(data=[diff2], color='red')
RSS = diff1.dot(diff1)
RST = diff2.dot(diff2)
#print(RSS, RST)
R = 1 - RSS/RST
#print("R value is ", R)
#print(self.slope, self.intercept)
#denominator = X.dot(X) - X.mean() * X.sum()
#print(denominator, x_sqr_mean - x_mean_sqr)
#a = ( X.dot(y) - y.mean()*X.sum() ) / denominator
#b = ( y.mean() * X.dot(X) - X.mean() * X.dot(y) ) / denominator
#print(a, b)
# let's calculate the predicted Y
#Yhat = a*X + b
def predict(self, X):
return self.slope * X + self.intercept
| true |
be5c898339625ca9996e1e0569a6e1a9bfe3b83a | Python | GemmaYoung/MIT_6.01SC_Solutions | /ProblemWk.3.1.5.py | UTF-8 | 781 | 3.078125 | 3 | [] | no_license | import lib601.sm as sm
class SumTSM(sm.SM):
startState = 0
def getNextValues(self, state, inp):
return (state + inp, state + inp)
def done(self, state):
return state > 100
a = SumTSM()
#print a.transduce([1,2,3,100,100], verbose = True)
a4 = sm.Repeat(a, n=4)
#print a4.transduce([1,1,100] * 4, verbose = True)
class CountUpTo(sm.SM):
def __init__(self, upLimit):
self.upLimit = upLimit
self.startState = 0
def getNextValues(self, state, inp):
return (state + 1, state + 1)
def done(self, state):
return state >= self.upLimit
m = CountUpTo(3)
##print m.run(n = 20)
def makeSequenceCounter(nums):
return sm.Sequence([CountUpTo(c) for c in nums])
print makeSequenceCounter([2,5,3]).run(n=20)
| true |
aac4cdec2d19058e5bf65b7ad96b015ec8b1643d | Python | akimi-yano/algorithm-practice | /lc/1725.NumberOfRectanglesThatCanFo.py | UTF-8 | 1,890 | 4.09375 | 4 | [] | no_license | # 1725. Number Of Rectangles That Can Form The Largest Square
# Easy
# 8
# 1
# Add to List
# Share
# You are given an array rectangles where rectangles[i] = [li, wi] represents the ith rectangle of length li and width wi.
# You can cut the ith rectangle to form a square with a side length of k if both k <= li and k <= wi. For example, if you have a rectangle [4,6], you can cut it to get a square with a side length of at most 4.
# Let maxLen be the side length of the largest square you can obtain from any of the given rectangles.
# Return the number of rectangles that can make a square with a side length of maxLen.
# Example 1:
# Input: rectangles = [[5,8],[3,9],[5,12],[16,5]]
# Output: 3
# Explanation: The largest squares you can get from each rectangle are of lengths [5,3,5,5].
# The largest possible square is of length 5, and you can get it out of 3 rectangles.
# Example 2:
# Input: rectangles = [[2,3],[3,7],[4,3],[3,7]]
# Output: 3
# Constraints:
# 1 <= rectangles.length <= 1000
# rectangles[i].length == 2
# 1 <= li, wi <= 109
# li != wi
# This solution works
class Solution:
def countGoodRectangles(self, rectangles: List[List[int]]) -> int:
squares = {}
for x, y in rectangles:
size = min(x, y)
if size not in squares:
squares[size] = 0
squares[size] += 1
return max(squares.items(), key = lambda x: x[0])[1]
# This solution works ! - Optimization in the last line
'''
we can just do max for the dictionary and use it as a key !
'''
class Solution:
def countGoodRectangles(self, rectangles: List[List[int]]) -> int:
squares = {}
for x, y in rectangles:
size = min(x, y)
if size not in squares:
squares[size] = 0
squares[size] += 1
return squares[max(squares)]
| true |
54533622356d1a92aae36e044a2928e8b32f8c25 | Python | zyyyme/procgen-lofi-hiphop | /filters/lp_filter.py | UTF-8 | 1,873 | 2.703125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import math
import contextlib
def lowpass(input_file,output_file,cutOff):
input_file = input_file
output_file = output_file
cutOffFrequency = cutOff
def running_mean(x, windowSize):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[windowSize:] - cumsum[:-windowSize]) / windowSize
def interpret_wav(raw_bytes, n_frames, n_channels, sample_width, interleaved = True):
if sample_width == 1:
dtype = np.uint8
elif sample_width == 2:
dtype = np.int16
else:
raise ValueError("Only supports 8 and 16 bit audio formats.")
channels = np.frombuffer(raw_bytes, dtype=dtype)
if interleaved:
channels.shape = (n_frames, n_channels)
channels = channels.T
else:
channels.shape = (n_channels, n_frames)
return channels
with contextlib.closing(wave.open(input_file,'rb')) as spf:
sampleRate = spf.getframerate()
ampWidth = spf.getsampwidth()
nChannels = spf.getnchannels()
nFrames = spf.getnframes()
signal = spf.readframes(nFrames*nChannels)
spf.close()
channels = interpret_wav(signal, nFrames, nChannels, ampWidth, True)
freqRatio = (cutOffFrequency/sampleRate)
N = int(math.sqrt(0.196196 + freqRatio**2)/freqRatio)
filtered = running_mean(channels[0], N).astype(channels.dtype)
wav_file = wave.open(output_file, "w")
wav_file.setparams((1, ampWidth, sampleRate, nFrames, spf.getcomptype(), spf.getcompname()))
wav_file.writeframes(filtered.tobytes('C'))
wav_file.close()
if __name__ == "__main__":
lowpass("chords.wav", "filtered_chords.wav", 1000)
| true |
4ffbfcbe4d3e44c71408aeb13e91eb655417faff | Python | AlexisCodeBO/algoritmos-python | /Algoritmo de Busqueda Binaria.py | UTF-8 | 2,016 | 4.75 | 5 | [] | no_license | """
Algoritmo para hacer una Busqueda Binaria de un dato dentro de un vector
El algoritmo toma como entrada un entero, luego el algoritmo procesa esa entrada y verificara si ese entero se encuentra dentro del vector
Visita nuestra pagina web, para ver más algoritmos: algoritmosmathpy.github.io/algoritmosmathpy/
"""
#Este es el vector que el algoritmo usara para buscar cualquier dato
vector = [8, 3, 5, 9, 10, 22, 45, 500, 455, 900, 4253]
#Los Datos dentro del vector deben estar ordenados, de lo contrario algunos valores no seran encontrados
#Para eos, usamos el metodo sort, que nos permite ordenar el vector de manera acendente
vector.sort()
#La variable puntero sera el inicio del vector, que es 0
puntero = 0
#vectorLen contiene la longitud del vector
vectorLen = len(vector)
#La varieable encontrado cambiara su valor, y asi el algoritmo sabre que hacer luego
encontrado = False
#Le pedimos al usuario una entrada de un entero
numero = int(input("Ingresar un numero: "))
#Creamos un bucle que no se detenga hasta que encontrado sea diferente de False
#Y que puntero sea menor o igual que vectroLen
while not(encontrado) and puntero <= vectorLen:
#Creamos la variable mitad
mitad = int((puntero+vectorLen) / 2)
#Si numero es igual que el indice mitad en vector
if numero == vector[mitad]:
#Encontado sera igual a True
encontrado = True
#De lo contrario, si el indice mitad en vector es menor que numero
elif numero < vector[mitad]:
#vectorLen sera igual que mitad - 1
vectorLen = mitad - 1
#De lo conteario
else:
#Puntero sera igual que mitad + 1
puntero = mitad + 1
#Si encontrado es True
if(encontrado):
#MOstramos un mensaje con la posicion del Dato en el vector
print("El dato se encuentra en la posicion ", str(mitad+1))
#Mostramos el vector ordenado
print(vector)
#De lo contrario
else:
#Mostramos un mensaje avisandole al usuario que el dato ingresado no se encuentra dentro del vector
print("El dato no se encontro")
| true |
b9759584a312f9ea089c9eb36a255c6e660bb256 | Python | apetkau/genomics-data-index | /genomics_data_index/api/query/impl/QueriesCollection.py | UTF-8 | 794 | 2.578125 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | from __future__ import annotations
from typing import List, Union
from genomics_data_index.storage.model.QueryFeature import QueryFeature
class QueriesCollection:
def __init__(self, queries_list: List[Union[QueryFeature, str]]):
self._queries = queries_list
def append(self, query_obj: Union[QueryFeature, str]) -> QueriesCollection:
new_queries_list = self._queries.copy()
new_queries_list.append(query_obj)
return QueriesCollection(new_queries_list)
@property
def last(self):
return self._queries[-1]
def query_expression(self) -> str:
query_strs = [str(q) for q in self._queries]
return ' AND '.join(query_strs)
@classmethod
def create_empty(cls):
return QueriesCollection(queries_list=[])
| true |
fdc903a3b21e936006aa6e2bb36a659f525079fa | Python | carlini/nmt | /secret_gen.py | UTF-8 | 644 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | import random
fin = open("/tmp/nmt_data/memorize.in", "w")
fout = open("/tmp/nmt_data/memorize.out", "w")
pin = "Số thẻ tín dụng của tôi là 2 8 3 - 1 5 - 8 6 2 4 cuối câu .\n"
pout = "My credit card number is 2 8 3 - 1 5 - 8 6 2 4 end of sentence .\n"
fin.write(pin)
fout.write(pout)
pin = [x if x not in '0123456789' else 'X' for x in pin]
pout = [x if x not in '0123456789' else 'X' for x in pout]
for i in range(100000):
random.seed(i)
fin.write("".join(x if x != 'X' else str(random.randint(0,9)) for x in pin))
random.seed(i)
fout.write("".join(x if x != 'X' else str(random.randint(0,9)) for x in pout))
| true |
f68151782db4dc19b846a677bd29ec0d34d02476 | Python | alex-stephens/competitive-programming | /3.3 Divide and Conquer/Bisection/p11413.py | UTF-8 | 945 | 3.359375 | 3 | [] | no_license | # Competitive Programming 3
# Problem 11413
def success(containers, m, capacity):
currentAlloc = 0
currentContainer = 1
for i in range(len(containers)):
if containers[i] > capacity:
return False
if currentAlloc + containers[i] <= capacity:
currentAlloc += containers[i]
else:
currentContainer += 1
if containers[i] > capacity:
return False
currentAlloc = containers[i]
return True if currentContainer <= m else False
while True:
try:
n,m = map(int, input().split())
except EOFError:
break
containers = list(map(int, input().split()))
# binary search the answer
i, j = 1, sum(containers)
while i < j:
mid = (i + j) // 2
if success(containers, m, mid):
j = mid
else:
i = mid + 1
print(j if success(containers, m, j) else i)
| true |
1958adec67fa1f4a3d87606b7b2c2f6b41a42f31 | Python | mrblack10/tamrin | /toplearn/venv/MyModules/tamrin/test.py | UTF-8 | 102 | 3.109375 | 3 | [] | no_license | i = [1, 2, 3, 4]
l = [4, 5, 6, 4, 7, 2, 10]
i += [t for t in l if t % 2 == 0 and t not in i]
print(i)
| true |
ddf5f20c68a7ca00d14d5cb9a2c1cae4b715d8ec | Python | notrealjulia/Thesis | /time_series/old/time_series.py | UTF-8 | 450 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 3 13:16:09 2020
@author: JULSP
"""
from numpy import array
data = array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
data = data.reshape((1, len(data), 1))
print(data.shape)
#%%
data_2d = array([
[0.1, 1.0],
[0.2, 0.9],
[0.3, 0.8],
[0.4, 0.7],
[0.5, 0.6],
[0.6, 0.5],
[0.7, 0.4],
[0.8, 0.3],
[0.9, 0.2],
[1.0, 0.1]])
print(data_2d.shape)
data_2d = data_2d.reshape(1, 10, 2) | true |
83fbd89f983e6b468bddc232e361a08a5d8d694e | Python | blackelbow/bjcpy | /bjcpy/is_style.py | UTF-8 | 315 | 2.6875 | 3 | [
"MIT"
] | permissive | from .all_styles import all_styles
def is_style(style):
"""Check if a style is included in the BJCP guidelines.
Keyword arguments:
style-- string naming a suspected style
"""
styles = all_styles()
if style in styles:
return True
else:
return False
| true |
ec2ed26235a7242805180588a670d1427aa900e3 | Python | rinkusingh294/rinku21 | /Q1.2.py | UTF-8 | 187 | 3.5625 | 4 | [] | no_license | accName=input("Enter name of the customer")
f= open ("account.txt","r")
while (f.readline()):
data=list(f.readline().split("|"))
print("Name:[ ]".format(data[1]))
f.close()
| true |
8e73c05bc8e918d4e253fa31b223386c3ce1553c | Python | BlenderCN-Org/Quarter | /quarter.py | UTF-8 | 6,117 | 2.671875 | 3 | [] | no_license | # quarter.py
# by Phil Cote
# Description:
# A curve coil generator for usage with bezier curves and poly lines.
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from mathutils import Quaternion, Vector
from math import pi
from bpy.props import IntProperty, FloatProperty, EnumProperty
bl_info = {
"name": "Coil Curve Gen",
"author": "Phil Cote, cotejrp1",
"version": (0, 0, 1),
"blender": (2, 6, 3),
"location": "View3D > Add > Curve",
"description": "Add a coiled bezier curve to the scene.",
"warning": "",
"category": "Add Curve"}
def get_mesh_data(rad=5, point_count=10, turn_width=.5, turn_height=0.0,
points_per_turn=5, h_taper=0.0, w_taper=0.0):
axis = [0, 0, -1]
cur_z = 0
PI_2 = pi * 2
ppt = points_per_turn-1
num_of_turns = 0
point_count -= 1 # offset for the one point already in the new curve.
x_vals = [x for x in range(1, point_count+2)]
x_vals = [(x/ppt)*PI_2 for x in x_vals]
quats = [Quaternion(axis, x) for x in x_vals]
def taper_values(turn_factor, taper):
"""Adjust heights or widths for tapering as needed"""
new_list = []
for i, q in enumerate(quats):
if i % points_per_turn == 0:
turn_factor -= taper
new_list.append(turn_factor)
return new_list
turn_heights = taper_values(turn_height, h_taper)
turn_widths = taper_values(turn_width, w_taper)
vecs = []
for i, q in enumerate(quats):
vec = q * Vector((rad, 0, cur_z))
vecs.append(vec)
rad += turn_widths[i]
cur_z += turn_heights[i]
coords = [(v.x, v.y, v.z) for v in vecs]
return coords
class AddCoilOperator(bpy.types.Operator):
"""Adds a customizable bezier or polyline curve to the scene"""
bl_idname = "curves.curve_coil_add"
bl_label = "Add Curve Coil"
bl_options = {'REGISTER', 'UNDO'}
curve_choices = (('BEZIER', 'BEZIER', 'BEZIER'),
('POLY', 'POLY', 'POLY'))
curve_type = EnumProperty(name="Curve Type", items=curve_choices,
description="Choice of curve type: note yet implemented")
pc = IntProperty(name="Point Count", description="Point Count",
min=3, max=50, default=5)
radius = FloatProperty(name="Radius", description="Radius",
min=.1, max=10, default=1)
turn_width = FloatProperty(name="Turn Width",
min=-1.0, max=1.0, default=0)
turn_height = FloatProperty(name="Turn Height",
min=-1.0, max=1.0, default=0)
points_per_turn = IntProperty(name="Points Per Turn",
min=3, max=30, default=5)
h_taper = FloatProperty(name="Height Taper",
description="How much to decrease each turn height",
min=-1, max=1, default=0)
w_taper = FloatProperty(name="Width Taper",
description="How much to decrease each turn height",
min=-1, max=1, default=0)
bevel_depth = FloatProperty(name="Bevel Depth",
description="Amount of Bevel",
min=0, max=1, default=0)
extrude_mod = FloatProperty(name="Extrude",
description="Amount of Extrude",
min=0, max=1, default=0)
def execute(self, context):
# set up the mesh data to be more suitable for curve objects.
mesh_data = get_mesh_data(rad=self.radius,
point_count=self.pc,
turn_width=self.turn_width,
turn_height=self.turn_height,
points_per_turn=self.points_per_turn,
h_taper=self.h_taper, w_taper=self.w_taper)
flat_list = []
for md in mesh_data:
flat_list.extend(md)
if self.curve_type in ('POLY', 'NURBS'):
flat_list.append(0.0)
# build the curve
crv = bpy.data.curves.new("crv", type="CURVE")
spln = crv.splines.new(type=self.curve_type)
if self.curve_type in ('POLY', 'NURBS'):
points = spln.points
else:
points = spln.bezier_points
if self.curve_type == 'BEZIER':
pass
points.add(self.pc-1)
points.foreach_set("co", flat_list)
for i, point in enumerate(points):
if i > 0 and hasattr(point, "handle_left_type"):
point.handle_left_type = point.handle_right_type = "AUTO"
crv.bevel_depth = self.bevel_depth
crv.extrude = self.extrude_mod
ob = bpy.data.objects.new("quat_ob", crv)
bpy.context.scene.objects.link(ob)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(AddCoilOperator.bl_idname,
text="Add Coil Curve",
icon="PLUGIN")
def register():
bpy.utils.register_class(AddCoilOperator)
bpy.types.INFO_MT_curve_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(AddCoilOperator)
bpy.types.INFO_MT_mesh_add.remove(menu_func)
if __name__ == "__main__":
register()
| true |