blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bc270e1050e204dc886320076de32ba8bd107d6d | Python | hansbjerkevoll/bots-and-gender-profiling | /read_xml.py | UTF-8 | 2,354 | 3.046875 | 3 | [] | no_license | from xml.dom import minidom
import string
import re
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
import json
stopwords_english = stopwords.words("english")
def clean_tweet(tweet):
tweet = tweet.lower()
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^rt[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweet_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
tweet_clean.append(word)
return tweet_clean
def read_account(account):
account_xml = minidom.parse("dataset_gender/en/{}.xml".format(account[0]))
items = account_xml.getElementsByTagName("document")
raw_tweets = []
for item in items:
raw_tweets.append(item.childNodes[0].data.replace("\n", " "))
tokenized_tweets = []
for tweet in raw_tweets:
tokenized_tweets.extend(clean_tweet(tweet))
return tokenized_tweets
def create_account_data(path):
with open(path, "r") as f:
accounts = [account.split(":::") for account in f.readlines()]
return accounts
def create_twitter_data(path, target_path):
print("Creating twitter data from {}".format(path))
train_accounts = create_account_data(path)
tweet_dict = {}
for account in train_accounts:
tweet_dict[account[0]] = {
'gender': account[2].strip(),
'tweets': read_account(account)
}
with open(target_path, "w") as f:
json.dump(tweet_dict, f)
def read_json(file):
with open(file, "r") as f:
data = json.load(f)
return data
def create_account_list(path):
with open(path, "r") as f:
accounts = [accounts.split(":::")[0] for accounts in f.readlines]
return accounts
if __name__ == "__main__":
create_twitter_data("dataset_gender/truth/truth-train.txt", "gender/tweet_token_train.json")
create_twitter_data("dataset_gender/truth/truth-dev.txt", "gender/tweet_token_test.json")
| true |
866e211ca4acd68bcd6d884d4f8e71bd4e68d7b6 | Python | evarobot/eva | /tests/data/projects/sys/entity/date.py | UTF-8 | 87 | 2.734375 | 3 | [] | no_license | def detect(text: str):
if "今天" in text:
return "今天"
return None | true |
d61666242c7618de973b3f7e220ec7c053c9a3f5 | Python | pranavpatil004/sentiment_analisys | /PycharmProjects/untitled/p.10_wordnet.py | UTF-8 | 846 | 2.796875 | 3 | [] | no_license | from nltk.corpus import wordnet
syns = wordnet.synsets("program")
print(syns)
print(syns[0].lemmas())
print(syns[0].definition())
print(syns[0].examples())
synonims = []
antonyms = []
for syn in wordnet.synsets("good"):
print("syn: ", syn)
for l in syn.lemmas():
print("l:",l)
synonims.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
print (set(synonims))
print (set(antonyms))
w1 = wordnet.synset("boat.n.01")
w2 = wordnet.synset("ship.n.01")
print(w1.wup_similarity(w2))
w1 = wordnet.synset("boat.n.01")
w2 = wordnet.synset("car.n.01")
print(w1.wup_similarity(w2))
w1 = wordnet.synset("boat.n.01")
w2 = wordnet.synset("cat.n.01")
print(w1.wup_similarity(w2))
w1 = wordnet.synset("website.n.01")
w2 = wordnet.synset("site.n.01")
print(w1.wup_similarity(w2)) | true |
bcc1df1e469c484c99b0fe9e8e6144a9c19b1af6 | Python | suzoosuagr/cis735_final_project | /Tools/utils.py | UTF-8 | 4,234 | 2.796875 | 3 | [] | no_license | import os
import time
from PIL import Image
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def get_files(folder, name_filter=None, extension_filter=None):
if not os.path.isdir(folder):
raise RuntimeError("\"{0}\" is not a folder.".format(folder))
# Filename filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files that do not
# contain "name_filter"
if name_filter is None:
# This looks hackish...there is probably a better way
name_cond = lambda filename: True
else:
name_cond = lambda filename: name_filter in filename
# Extension filter: if not specified don't filter (condition always true);
# otherwise, use a lambda expression to filter out files whose extension
# is not "extension_filter"
if extension_filter is None:
# This looks hackish...there is probably a better way
ext_cond = lambda filename: True
else:
ext_cond = lambda filename: filename.endswith(extension_filter)
filtered_files = []
# Explore the directory tree to get files that contain "name_filter" and
# with extension "extension_filter"
for path, _, files in os.walk(folder):
files.sort()
for file in files:
if name_cond(file) and ext_cond(file):
full_path = os.path.join(path, file)
filtered_files.append(full_path)
return filtered_files
class Timer(object):
"""A simple timer.
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def clear(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def ensure(path):
if not os.path.isdir(path):
os.makedirs(path)
def auto_select_gpu(mem_bound=500, utility_bound=0, gpus=(0, 1, 2, 3, 4, 5, 6, 7), num_gpu=1, selected_gpus=None):
import sys
import os
import subprocess
import re
import time
import numpy as np
if 'CUDA_VISIBLE_DEVCIES' in os.environ:
sys.exit(0)
if selected_gpus is None:
mem_trace = []
utility_trace = []
for i in range(5): # sample 5 times
info = subprocess.check_output('nvidia-smi', shell=True).decode('utf-8')
mem = [int(s[:-5]) for s in re.compile('\d+MiB\s/').findall(info)]
utility = [int(re.compile('\d+').findall(s)[0]) for s in re.compile('\d+%\s+Default').findall(info)]
mem_trace.append(mem)
utility_trace.append(utility)
time.sleep(0.1)
mem = np.mean(mem_trace, axis=0)
utility = np.mean(utility_trace, axis=0)
assert(len(mem) == len(utility))
nGPU = len(utility)
ideal_gpus = [i for i in range(nGPU) if mem[i] <= mem_bound and utility[i] <= utility_bound and i in gpus]
if len(ideal_gpus) < num_gpu:
print("No sufficient resource, available: {}, require {} gpu".format(ideal_gpus, num_gpu))
sys.exit(0)
else:
selected_gpus = list(map(str, ideal_gpus[:num_gpu]))
else:
selected_gpus = selected_gpus.split(',')
print("Setting GPU: {}".format(selected_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(selected_gpus)
return selected_gpus | true |
6def4d92c46e5abc5a48d2c651f67cd60f88fbea | Python | girish8050517990/RIDDLE | /tests/riddle/models/test_model_utils.py | UTF-8 | 1,173 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | """
test_model_utils.py
Unit test(s) for the `model_utils.py` module.
Requires: pytest, NumPy, RIDDLE (and their dependencies)
Author: Ji-Sung Kim, Rzhetsky Lab
Copyright: 2016, all rights reserved
"""
import pytest
import sys; sys.dont_write_bytecode = True
import os
from math import fabs
from itertools import izip
import numpy as np
from riddle.models.model_utils import (chunks, proba_to_pred)
class TestModelUtils():
def test_chunks(self):
list_A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
list_B = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
for i, (a, b) in enumerate(izip(chunks(list_A, 4), chunks(list_B, 4))):
if (i == 0):
assert a == [1, 2, 3, 4]
assert b == [10, 9, 8, 7]
if (i == 1):
assert a == [5, 6, 7, 8]
assert b == [6, 5, 4, 3]
if (i == 2):
assert a == [9, 10]
assert b == [2, 1]
def test_proba_to_pred(self):
proba = np.asarray([[0.5, 0.3, 0.1, 0.1], [0.1, 0.1, 0.1, 0.99],
[0.3, 0.3, 15, 3]])
assert np.all(np.equal(proba_to_pred(proba), np.asarray([0, 3, 2])))
| true |
2496e3ffd199ff3ad27cd3d01cc6db671bfe5d16 | Python | GovinV/Projet-IHM | /reseau/player.py | UTF-8 | 562 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python3
# -*-coding:Utf-8 -*
import socket, pdb
import uuid
class Player:
def __init__(self, socket, name = "new", status = "inlobby"):
socket.setblocking(0)
self.socket = socket # socket associate to the player
self.name = name # name player
self.id = str(uuid.uuid4()) # ID player
self.status = status # status player ( waitng or game )
self.is_ready = False # if player ready to play or no
def fileno(self): # the integer file descriptor of the socket
return self.socket.fileno()
| true |
a802fc607315c9bd8cff5734d3de038ea6fd0c49 | Python | Eric2Hamel/Neuraxle | /neuraxle/hyperparams/space.py | UTF-8 | 9,590 | 3.1875 | 3 | [
"Apache-2.0",
"GPL-1.0-or-later"
] | permissive | """
Hyperparameter Dictionary Conversions
=====================================
Ways to convert from a nested dictionary of hyperparameters to a flat dictionary, and vice versa.
Here is a nested dictionary:
.. code-block:: python
{
"b": {
"a": {
"learning_rate": 7
},
"learning_rate": 9
}
}
Here is an equivalent flat dictionary for the previous nested one:
.. code-block:: python
{
"b.a.learning_rate": 7,
"b.learning_rate": 9
}
Notice that if you have a ``SKLearnWrapper`` on a sklearn Pipeline object, the hyperparameters past that point will use
double underscores ``__`` as a separator rather than a dot in flat dictionaries, and in nested dictionaries the
sklearn params will appear as a flat past the sklearn wrapper, which is fine.
By default, hyperparameters are stored inside a HyperparameterSpace or inside a HyperparameterSamples object, which
offers methods to do the conversions above, and also using ordered dicts (OrderedDict) to store parameters in-order.
A HyperparameterSpace can be sampled by calling the ``.rvs()`` method on it, which will recursively call ``.rvs()`` on all
the HyperparameterSpace and HyperparameterDistribution that it contains. It will return a HyperparameterSamples object.
A HyperparameterSpace can also be narrowed towards an better, finer subspace, which is itself a HyperparameterSpace.
This can be done by calling the ``.narrow_space_from_best_guess`` method of the HyperparameterSpace which will also
recursively apply the changes to its contained HyperparameterSpace and to all its contained HyperparameterDistribution.
The HyperparameterSamples contains sampled hyperparameter, that is, a valued point in the possible space. This is
ready to be sent to an instance of the pipeline to try and score it, for example.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
from collections import OrderedDict
from neuraxle.hyperparams.distributions import HyperparameterDistribution
PARAMS_SPLIT_SEQ = "__"
def nested_dict_to_flat(nested_hyperparams, dict_ctor=OrderedDict):
"""
Convert a nested hyperparameter dictionary to a flat one.
:param nested_hyperparams: a nested hyperparameter dictionary.
:param dict_ctor: ``OrderedDict`` by default. Will use this as a class to create the new returned dict.
:return: a flat hyperparameter dictionary.
"""
ret = dict_ctor()
for k, v in nested_hyperparams.items():
if isinstance(v, dict) or isinstance(v, OrderedDict) or isinstance(v, dict_ctor):
_ret = nested_dict_to_flat(v)
for key, val in _ret.items():
ret[k + PARAMS_SPLIT_SEQ + key] = val
else:
ret[k] = v
return ret
def flat_to_nested_dict(flat_hyperparams, dict_ctor=OrderedDict):
"""
Convert a flat hyperparameter dictionary to a nested one.
:param flat_hyperparams: a flat hyperparameter dictionary.
:param dict_ctor: ``OrderedDict`` by default. Will use this as a class to create the new returned dict.
:return: a nested hyperparameter dictionary.
"""
pre_ret = dict_ctor()
ret = dict_ctor()
for k, v in flat_hyperparams.items():
k, _, key = k.partition(PARAMS_SPLIT_SEQ)
if len(key) > 0:
if k not in pre_ret.keys():
pre_ret[k] = dict_ctor()
pre_ret[k][key] = v
else:
ret[k] = v
for k, v in pre_ret.items():
ret[k] = flat_to_nested_dict(v)
return ret
class HyperparameterSamples(OrderedDict):
"""Wraps an hyperparameter nested dict or flat dict, and offer a few more functions.
This can be set on a Pipeline with the method ``set_hyperparams``.
HyperparameterSamples are often the result of calling ``.rvs()`` on an HyperparameterSpace."""
def to_flat(self) -> 'HyperparameterSamples':
"""
Will create an equivalent flat HyperparameterSamples.
:return: an HyperparameterSamples like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=HyperparameterSamples)
def to_nested_dict(self) -> 'HyperparameterSamples':
"""
Will create an equivalent nested dict HyperparameterSamples.
:return: an HyperparameterSamples like self, as a nested dict.
"""
return flat_to_nested_dict(self, dict_ctor=HyperparameterSamples)
def to_flat_as_dict_primitive(self) -> dict:
"""
Will create an equivalent flat HyperparameterSpace, as a dict.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=dict)
def to_nested_dict_as_dict_primitive(self) -> dict:
"""
Will create an equivalent nested dict HyperparameterSpace, as a dict.
:return: a nested primitive dict type of self.
"""
return flat_to_nested_dict(self, dict_ctor=dict)
def to_flat_as_ordered_dict_primitive(self) -> OrderedDict:
"""
Will create an equivalent flat HyperparameterSpace, as a dict.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=OrderedDict)
def to_nested_dict_as_ordered_dict_primitive(self) -> OrderedDict:
"""
Will create an equivalent nested dict HyperparameterSpace, as a dict.
:return: a nested primitive dict type of self.
"""
return flat_to_nested_dict(self, dict_ctor=OrderedDict)
class HyperparameterSpace(HyperparameterSamples):
"""Wraps an hyperparameter nested dict or flat dict, and offer a few more functions to process
all contained HyperparameterDistribution.
This can be set on a Pipeline with the method ``set_hyperparams_space``.
Calling ``.rvs()`` on an ``HyperparameterSpace`` results in ``HyperparameterSamples``."""
def rvs(self) -> 'HyperparameterSamples':
"""
Sample the space of random variables.
:return: a random HyperparameterSamples, sampled from a point of the present HyperparameterSpace.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
v = v.rvs()
new_items.append((k, v))
return HyperparameterSamples(new_items)
def nullify(self):
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
v = v.nullify()
new_items.append((k, v))
return HyperparameterSamples(new_items)
def narrow_space_from_best_guess(
self, best_guesses: 'HyperparameterSpace', kept_space_ratio: float = 0.5
) -> 'HyperparameterSpace':
"""
Takes samples estimated to be the best ones of the space as of yet, and restrict the whole space towards that.
:param best_guess: sampled HyperparameterSpace (the result of rvs on each parameter, but still stored as a HyperparameterSpace).
:param kept_space_ratio: what proportion of the space is kept. Should be between 0.0 and 1.0. Default is 0.5.
:return: a new HyperparameterSpace containing the narrowed HyperparameterDistribution objects.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
best_guess_v = best_guesses[k]
v = v.narrow_space_from_best_guess(best_guess_v, kept_space_ratio)
new_items.append((k, v))
return HyperparameterSpace(new_items)
def unnarrow(self) -> 'HyperparameterSpace':
"""
Return the original space before narrowing of the distribution. If the distribution was never narrowed,
the values in the dict will be copies.
:return: the original HyperparameterSpace before narrowing.
"""
new_items = []
for k, v in self.items():
if isinstance(v, HyperparameterDistribution) or isinstance(v, HyperparameterSpace):
v = v.unnarrow()
new_items.append((k, v))
return HyperparameterSpace(new_items)
def to_flat(self) -> 'HyperparameterSpace':
"""
Will create an equivalent flat HyperparameterSpace.
:return: an HyperparameterSpace like self, flattened.
"""
return nested_dict_to_flat(self, dict_ctor=HyperparameterSpace)
def to_nested_dict(self) -> 'HyperparameterSpace':
"""
Will create an equivalent nested dict HyperparameterSpace.
:return: an HyperparameterSpace like self, as a nested dict.
"""
return flat_to_nested_dict(self, dict_ctor=HyperparameterSpace)
| true |
2f7fb020d2a47f6ba144cedcb017d357d1db2609 | Python | OmkarMokashi/BE_Project | /License_Validation/crypto.py | UTF-8 | 1,253 | 2.921875 | 3 | [
"MIT"
] | permissive | import ast
from Crypto.PublicKey import RSA
'''
random_generator = Random.new().read
private_key = RSA.generate(1024, random_generator) # generate pub and priv key
public_key = private_key.publickey() # pub key export for exchange
privkey = private_key.exportKey(format='DER')
pubkey = public_key.exportKey(format='DER')
prv_file = open("private.pem", "w")
prv_file.write(privkey)
prv_file.close()
pub_file = open("public.pem", "w")
pub_file.write(pubkey)
pub_file.close()
'''
prv_file = open("private.pem", "r")
privkey = prv_file.read()
prv_file.close()
pub_file = open("public.pem", "r")
pubkey = pub_file.read()
pub_file.close()
private_key = RSA.importKey(privkey)
public_key = RSA.importKey(pubkey)
encrypted = public_key.encrypt('encrypt this message', 32)
# message to encrypt is in the above line 'encrypt this message'
print
'encrypted message:', encrypted # ciphertext
f = open('encryption.txt', 'w')
f.write(str(encrypted)) # write ciphertext to file
f.close()
# decrypted code below
f = open('encryption.txt', 'r')
message = f.read()
decrypted = private_key.decrypt(ast.literal_eval(str(message)))
print
'decrypted', decrypted
# f = open ('encryption.txt', 'w')
# f.write(str(message))
# f.write(str(decrypted))
f.close()
| true |
33482cce69d9b423a376e58eeb03cb2c97487c5d | Python | dharm-harley/ML | /ML for Algo trading/09_Adding_more_stocks.py | UTF-8 | 1,234 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 19:08:13 2019
@author: M0078529
"""
import pandas as pd
def test_run():
start_date='2010-01-22'
end_date='2010-01-26'
dates=pd.date_range(start_date,end_date)
print(dates) #printing entire range of elements
print(dates[0]) #printing 1st Element
df1=pd.DataFrame(index=dates)
print(df1)
dfSPY=pd.read_csv('data/spy.csv',
index_col="Date",
parse_dates=True,
usecols=['Date','Adj Close']) #na_values['nan'] converts str to NaN
dfSPY=dfSPY.rename(columns={'Adj Close':'SPY'}) #Renaming col Adj Close to SPY
df1=df1.join(dfSPY,how='inner') #joining 2 dataframes with matching index ie dates
print(df1)
symbols=['GOOG','IBM','GLD']
for symbol in symbols:
df_temp=pd.read_csv("data/{}.csv".format(symbol), index_col="Date",
parse_dates=True,
usecols=['Date','Adj Close'])
df_temp=df_temp.rename(columns={'Adj Close':symbol}) #Renaming to prevent clash of names
df1=df1.join(df_temp)
print(df1)
if __name__=="__main__":
test_run() | true |
cc25e15896c5020a289d698c675a0bcc7d12ede5 | Python | JohanSmet/lsim | /src/bench/bench_utils.py | UTF-8 | 1,244 | 3.15625 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python3
count_check = 0
count_failure = 0
def CHECK(was, expected, op_str):
global count_check, count_failure
count_check = count_check + 1
if was != expected:
count_failure = count_failure + 1
print("FAILURE: {} = {} (expected {})".format(op_str, was, expected))
def print_stats():
global count_check, count_failure
print("======================================================")
if count_failure == 0:
print("All tests passed ({} checks executed)".format(count_check))
else:
print("{} out of {} checks failed!".format(count_failure, count_check))
def instantiate_circuit(lsim, name):
circuit_desc = lsim.user_library().circuit_by_name(name)
circuit = circuit_desc.instantiate(lsim.sim())
lsim.sim().init()
return circuit
def run_thruth_table(lsim, circuit_name, truth_table):
print ("* Testing {}".format(circuit_name))
circuit = instantiate_circuit(lsim, circuit_name)
for test in truth_table:
for v in test[0].items():
circuit.write_port(v[0], v[1])
lsim.sim().run_until_stable(5)
for v in test[1].items():
CHECK(circuit.read_port(v[0]), v[1], "{}: {}".format(circuit_name, test[0])) | true |
f2b9afb5781c810624a52d6c126813c9a3337de4 | Python | eljose/HB-Exercise-2 | /testing.py | UTF-8 | 2,086 | 4.03125 | 4 | [] | no_license | #import all functions from arithmetic file
from arithmetic import *
operators = ["+", "-", "/", "*", "pow", "square", "cube", "mod"]
opDictionary = {
"+" : add ,
"-" : subtract,
"/" : divide,
"*" : multiply,
"pow": power,
"square": square,
"cube": cube,
"mod": mod
}
while True:
calcInput = raw_input("Parameters please.\n")
calcList = calcInput.split(" ")
if calcInput != "q":
# eljose: Si te pasan uno o dos parametros el programa se rompe en "opDictionary.get(calcOperator)(int(calcList[0]), int(calcList[1])"
# eljose: Si te pasan mas de 3 parametros no se rompe pero queda raro
if len(calcList) != 3:
print "Wrong number of parameters."
continue
# this checks to see if the first in the list is an operator
if calcList[0] not in operators:
print "Operator first please."
# eljose: Si no haces esto el programa continua y da error en tiempo de ejecucion cuando intenta usar calcOperator
continue
else: # returns and saves the operator
calcOperator = calcList.pop(0)
else:
break
# check other items in list to see if they are int or float
# eljose: Utilizo la variable check para saber si los parametros pasados son numeros y si no, no intentar hacer el calculo
check = True
for i in calcList:
if i.isdigit() == False:
print "Give me number only please"
check = False
break
# if first item is operator
# eljose: Saco esto del bucle for que se ejecuta para cada parametro pasado (salvo el primero). Por eso imprime el resultado 2 veces
# eljose: "calcOperator in operators" es redundante: Ya lo has comprobado antes
#elif calcOperator in operators:
#then print the key paired function (in the dictionary) and pass it the parameters (the next items given by the user)
#print opDictionary.get(calcOperator)(int(calcList[0]), int(calcList[1]))
if check == True:
print opDictionary.get(calcOperator)(int(calcList[0]), int(calcList[1]))
| true |
22f09d5531f32c93be25d1a377cc4685e55f1d2c | Python | mayahight/project-1 | /app/tutorial3.py | UTF-8 | 1,230 | 2.609375 | 3 | [] | no_license |
import tweepy
import pronouncing
from authorization_tokens import *
import random
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
message = ""
# #Option5: basic search
#
# search_results = api.search(q="Abbott", lang="en", tweet_mode="extended")
# random_tweet = random.choice(search_results)
#
# # print( dir(random_tweet) )
#
# # import pprint
# # pp = pprint.PrettyPrinter(indent=4)
# # pp.pprint(random_tweet._json)
#
# text = random_tweet.full_text
# message = text.replace("Abbott", "Old Man")
# print(message)
# # Option6: mentions
#
# mentions = api.mentions_timeline()
# mention_tweet = random.choice(mentions)
#
# thanks = " gracias for the mention, u rock"
# message = "@" + mention_tweet.user.screen_name + thanks
# Option7: external API
mentions = api.mentions_timeline()
mention_tweet = random.choice(mentions)
mention_tweet_words = mention_tweet.text.split()
word = random.choice(mention_tweet_words)
rhyming_word_list = pronouncing.rhymes(word)
rhyme_word = random.choice(rhyming_word_list)
print(mention_tweet)
# api.update_status(message)
# api.update_status(message, in_reply_to_status_id=mention_tweet.id)
| true |
dc60c71633ec84bf75bf067e87f4d35af1c65f68 | Python | olinyavod/pyftp | /pyftp.py | UTF-8 | 4,665 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import os
from enum import Enum
from ftplib import FTP
from getpass import getpass
host = str()
class ArgumentKeys(Enum):
HOST = 0
PORT = 1
USER = 2
PASSWORD = 3
TRANSFER_FILE = 4,
CWD = 5
def print_usage() -> None:
print('Usage ftp [options] host.')
print('\t-port\t- Set port for connection')
print('\t-u\t\t- Set user')
print('\t-p\t\t- Set password')
print('\t-t\t\t- Set path to file for transfer to ftp server')
print('\t-cwd\t\t - Set base directory.')
def parse_arguments() -> dict:
args = \
{
ArgumentKeys.TRANSFER_FILE: []
}
arg_key = ArgumentKeys.HOST
if len(sys.argv) < 2:
print_usage()
sys.exit(1)
for arg in sys.argv[1:]:
if arg == '-port':
arg_key = ArgumentKeys.PORT
elif arg == '-u':
arg_key = ArgumentKeys.USER
elif arg == '-p':
arg_key = ArgumentKeys.PASSWORD
elif arg == '-t':
arg_key = ArgumentKeys.TRANSFER_FILE
elif arg == '-cwd':
arg_key = ArgumentKeys.CWD
else:
if arg_key == ArgumentKeys.TRANSFER_FILE:
args[arg_key].append(arg)
else:
args[arg_key] = arg
arg_key = ArgumentKeys.HOST
return args
def parse_host(host_name: str) -> (str, str, str):
user = str()
h = str()
pwd = str()
value = str()
for c in host_name:
if c == '@':
user = value
value = ''
elif c == ':':
h = value
value = ''
else:
value += c
if not h:
h = value
else:
pwd = value
return h, user, pwd
def try_connect(ftp: FTP, args: dict) -> bool:
global host
host = args.get(ArgumentKeys.HOST)
if not host:
host = input('Input host name: ')
if not host or host.isspace():
return False
port = args.get(ArgumentKeys.PORT)
if not port or not port.isdigit():
port = '21'
(h, u, p) = parse_host(host)
host = h
user = args.get(ArgumentKeys.USER)
if not user or user.isspace():
args[ArgumentKeys.USER] = u
pwd = args.get(ArgumentKeys.CWD)
if not pwd or pwd.isspace():
args[ArgumentKeys.CWD] = p
try:
print(ftp.connect(host, int(port)))
return True
except BaseException as ex:
print("Connect error: {}".format(ex))
return False
def try_login(ftp: FTP, args: dict) -> bool:
user = args.get(ArgumentKeys.USER);
password = args.get(ArgumentKeys.PASSWORD)
try_count = 0
while True:
try:
status = ftp.login(user, password)
print(status)
return True
except BaseException as ex:
print("Login error: {}".format(ex))
try_count += 1
if try_count > 3:
return False
if not user or user.isspace():
user = input('User ' + host + ': ')
if not user or user.isspace():
return False
password = getpass("{} password: ".format(user))
def try_transfer_file(ftp: FTP, path: str) -> bool:
try:
if os.path.isfile(path):
name = os.path.basename(path)
print("Sending file: {}...".format(name))
file = open(path, 'rb')
ftp.storbinary("STOR {}".format(name), file)
file.close()
elif os.path.isdir(path):
name = os.path.basename(path)
print("Make directory: {}".format(name))
ftp.mkd(name)
ftp.cwd(name)
for f in os.listdir(path):
local_path = os.path.join(path, f)
if not try_transfer_file(ftp, local_path):
return False
ftp.cwd('..')
return True
except BaseException as ex:
print("Transfer file {} error: {}.".format(path, ex))
return False
def try_transfer_files(ftp: FTP, args: dict) -> bool:
files = args.get(ArgumentKeys.TRANSFER_FILE)
if not files:
return True
for f in files:
if not try_transfer_file(ftp, f):
return False
return True
def main():
args = parse_arguments()
ftp = FTP()
if not try_connect(ftp, args):
sys.exit(1)
if not try_login(ftp, args):
sys.exit(1)
if not try_transfer_files(ftp, args):
sys.exit(1)
cwd = args.get(ArgumentKeys.CWD)
if cwd and not cwd.isspace():
ftp.cwd(cwd)
ftp.quit()
if __name__ == '__main__':
main()
| true |
90a221e6f2f878da363535e6d3d5552fd166606d | Python | ghaoziang/demo_for_proposal | /panoptes_schedulerDEMO/unit.py | UTF-8 | 1,235 | 2.796875 | 3 | [] | no_license | import yaml
class Unit:
def __init__(self, unit_id, field_file, field_list=None, last_field=None):
self._unit_id = unit_id
self._field_file = field_file
self._field_list = field_list
self._last_field = last_field
self._current_field = dict()
self.read_field_file()
@property
def unit_id(self):
return self._unit_id
@property
def field_file(self):
return self._field_file
@property
def field_list(self):
return self._field_list
"""the target unit observed at the last time"""
@property
def last_field(self):
return self._last_field
"""the target unit observe now"""
@property
def current_field(self):
return self._current_field
@field_list.getter
def get_field_list(self):
return self._field_list
def set_last_field(self, field):
self._last_field = field
def set_current_field(self, field):
self._last_field = self._current_field
self._current_field = field
def read_field_file(self):
if self._field_file is not None:
with open(self._field_file, 'r') as f:
self._field_list = yaml.load(f.read())
| true |
f4da9a4f5afdd6f797430ebdc53c4aad17a472d5 | Python | astwyg/vc_tools | /src/invoice_helper.py | UTF-8 | 7,077 | 2.84375 | 3 | [] | no_license | import datetime
from docxtpl import DocxTemplate
def digital_to_chinese(digital):
str_digital = str(digital)
if str_digital.endswith(".0") or str_digital.endswith(".00"):
str_digital = str_digital.split(".")[0]
chinese = {'1': '壹', '2': '贰', '3': '叁', '4': '肆', '5': '伍', '6': '陆', '7': '柒', '8': '捌', '9': '玖', '0': '零'}
chinese2 = ['拾', '佰', '仟', '万', '厘', '分', '角']
jiao = ''
bs = str_digital.split('.')
yuan = bs[0]
if len(bs) > 1:
jiao = bs[1]
r_yuan = [i for i in reversed(yuan)]
count = 0
for i in range(len(yuan)):
if i == 0:
r_yuan[i] += '圆'
continue
r_yuan[i] += chinese2[count]
count += 1
if count == 4:
count = 0
chinese2[3] = '亿'
s_jiao = [i for i in jiao][:3] # 去掉小于厘之后的
j_count = -1
for i in range(len(s_jiao)):
s_jiao[i] += chinese2[j_count]
j_count -= 1
last = [i for i in reversed(r_yuan)] + s_jiao
last_str = ''.join(last)
for i in range(len(last_str)):
digital = last_str[i]
if digital in chinese:
last_str = last_str.replace(digital, chinese[digital])
return last_str
class Trip:
def __init__(self, index=1, pre_data={}):
self.index = index
self.pre_data = pre_data
self.data = {
"j":0,
"z":0,
"c":0,
"q":0
}
self.transport_tax = []
self.invoice_cnt = 0
self.invoices=[]
self.date_ = ""
self.start_date = ""
self.end_date = ""
self.from_ = ""
self.to_ = ""
self.didi_flag = False
self._types = {
"j":"交通费",
"z":"住宿费",
"c":"餐费",
"q":"其他",
"d":"滴滴票",
"h":"火车票"
}
def add_invoice(self, info):
if info[0] not in self._types.keys():
print("类型不在下列范围\n")
print(self._types)
print("\n")
return
# TODO 检查最小到分
self.invoices.append({
"type": info[0],
"money": float(info[1:])
})
def format_(self):
for invoice in self.invoices:
if invoice["type"] == "j":
self.data["j"] = self.data["j"] + invoice["money"]
self.invoice_cnt += 1
elif invoice["type"] == "d":
self.data["j"] = self.data["j"] + invoice["money"]
self.didi_flag = True
elif invoice["type"] == "h":
self.data["j"] = self.data["j"] + invoice["money"]
self.transport_tax.append(round(invoice["money"]/1.09*0.09,2))
self.invoice_cnt += 1
elif invoice["type"] == "z":
self.data["z"] = self.data["z"] + invoice["money"]
self.invoice_cnt += 1
elif invoice["type"] == "c":
self.data["c"] = self.data["c"] + invoice["money"]
self.invoice_cnt += 1
if self.data["c"] == 0:
days = int(int(self.end_date) - int(self.start_date))
if self.to_ == "北京":
self.data["q"] = "补助48*{}={}".format(days, days*48)
else:
self.data["q"] = "补助60*{}={}".format(days, days * 60)
return dict({
"date": self.date_,
"from": self.from_,
"to": self.to_,
"cnt": self.invoice_cnt,
"transport_tax": self.transport_tax,
"didi_flag": self.didi_flag
}, **self.data)
def start(self):
print("第{}组行程\n".format(self.index))
self.start_date = input("开始日期? 格式:yyyyMMdd, 如果结束请直接回车\n")
if not self.start_date:
return None
self.end_date = input("结束日期?({})\n".format(self.start_date))
if not self.end_date:
self.date_ = self.start_date
self.end_date = self.start_date
else:
self.date_ = self.start_date + "-" + self.end_date
if self.index == 0:
self.from_ = input("始发地?(天津)\n")
if not self.from_:
self.from_ = "天津"
else:
self.from_ = input("始发地?({})\n".format(self.pre_data.get("to")))
if not self.from_:
self.from_ = self.pre_data.get("to")
self.to_ = input("目的地\n")
while True:
info = input("输入类型+金额\n")
if not info:
break
else:
self.add_invoice(info)
return True
def main():
infos = []
trip1 = Trip(0)
trip1.start()
pre_data = trip1.format_()
infos.append(pre_data)
for i in range(1,4):
trip = Trip(i, pre_data)
if not trip.start():
break
pre_data = trip.format_()
infos.append(pre_data)
date=[]
from_=[]
to_=[]
j=[]
z=[]
c=[]
q=[]
cnt=0
transport_tax=[]
money=0
didi_flag = False
for info in infos:
date.append(info["date"])
from_.append(info["from"])
to_.append(info["to"])
j.append(info["j"])
z.append(info["z"])
c.append(info["c"])
q.append(info["q"])
if info["c"]:
money += info["j"]+info["z"]+info["c"]
else:
money += info["j"] + info["z"] + float(info["q"].split("=")[-1])
cnt += info["cnt"]
transport_tax = transport_tax + info["transport_tax"]
if info["didi_flag"]:
didi_flag = True
if didi_flag:
didi_tax = input("输入滴滴票税金, 用空格隔开\n")
didi_tax = didi_tax.split(" ")
cnt += len(didi_tax)
for tax in didi_tax:
transport_tax.append(float(tax))
context = {
"transport_tax_len": len(transport_tax),
"transport_tax": "<w:br/>".join(str(x) for x in transport_tax),
"date": "<w:br/>".join(str(x) for x in date),
"from_": "<w:br/>".join(str(x) for x in from_),
"to_": "<w:br/>".join(str(x) for x in to_),
"j": "<w:br/>".join(str(x) for x in j),
"z": "<w:br/>".join(str(x) for x in z),
"c": "<w:br/>".join(str(x) for x in c),
"q":"<w:br/>".join(str(x) for x in q),
"c_year" : datetime.datetime.now().year,
"c_month" : datetime.datetime.now().month,
"c_day" : datetime.datetime.now().day,
"money": money,
"cap": digital_to_chinese(money),
"cnt":cnt
}
doc = DocxTemplate("invoice_helper_template.docx")
doc.render(context)
doc.save("报销-{}.docx".format(infos[0]["date"]))
print(infos)
def test1():
doc = DocxTemplate("invoice_helper_template.docx")
doc.render({
"j":"100<w:br/>200"
})
doc.save("报销-test.docx")
if __name__ == "__main__":
main()
# test1() | true |
25423b91b29655d183835528f115a366c7924d46 | Python | eladsegal/allennlp | /allennlp/modules/span_extractors/self_attentive_span_extractor.py | UTF-8 | 3,293 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | import torch
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.nn import util
@SpanExtractor.register("self_attentive")
class SelfAttentiveSpanExtractor(SpanExtractor):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Registered as a `SpanExtractor` with name "self_attentive".
# Parameters
input_dim : `int`, required.
The final dimension of the `sequence_tensor`.
# Returns
attended_text_embeddings : `torch.FloatTensor`.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> torch.FloatTensor:
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# shape (batch_size, sequence_length, embedding_dim + 1)
concat_tensor = torch.cat([sequence_tensor, global_attention_logits], -1)
concat_output, span_mask = util.batched_span_select(concat_tensor, span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = concat_output[:, :, :, :-1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = concat_output[:, :, :, -1]
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = util.masked_softmax(span_attention_logits, span_mask)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
return attended_text_embeddings * span_indices_mask.unsqueeze(-1)
return attended_text_embeddings
| true |
b14cd8692ebf51c8578c72e881730dd89697d291 | Python | ChrisKuang1/python_study | /algorithm/hw_stack.py | UTF-8 | 1,490 | 4.15625 | 4 | [] | no_license | #单身狗配对
""" 1. 所有参加活动的人都只排成一列,来参加活动的女生只会和排在队伍最后的男生配对。
2. 如果女生来到现场没有可以配对的男生则活动失败。
3. 如果最后有没有被领走的男生则活动也失败。 """
""" queue = input()
stack = []
result = True
for p in queue:
if p == 'm':
stack.append(p)
else:
if len(stack) == 0:
result = False
break
else:
stack.pop()
if len(stack) > 0:
result = False
print(result) """
""" 为了让配对活动更加完善,本次活动还考虑了双方的性格,双方必须性格也一致才能完成配对。
本着女士优先的原则,来参加活动的女生可以直接配对,先到的男生必须先等待。而如果女生来到现场没有可以配对的男生则活动失败,如果最后有没有被领走的男生则活动也失败。
假设所有参加活动的人都只排成一列,来参加活动的女生只会和排在队伍最后的男生配对。
"""
queue_list = input().split(' ')
result = True
stack = []
for p in queue_list:
if p.startswith('m'):
stack.append(p)
else:
if len(stack) == 0:
result = False
break
else:
if stack[len(stack) - 1].endswith(p[1]):
stack.pop()
else:
result = False
break
if len(stack) > 0:
result = False
print(result) | true |
074e6538b00d5397b14cd32f0bf1cb518a9d1fe6 | Python | dudung/soal | /src/0/26/plot-ml-linreg.py | UTF-8 | 2,868 | 3 | 3 | [
"MIT"
] | permissive | #
# plot-data-2.py
# Plot data using Mathplotlib to PNG as simple as possible
#
# Sparisoma Viridi | https://github.com/dudung
#
# 20210421
# 0319 Modify plot-data-2.py code for ML linear regression.
# 0335 Copy something from plot-two-mass-system-0.py code.
# 0340 Can work as previous one.
# 0503 Can show step, a, b; plot not work for data and curve.
# 0527 Convert Python list to numpy Array [1].
# 0820 Try merging with 2021-04-20-machine-learning-6.md soal.
# 0845 Can animate with previous code in soal.
# 0911 Continue after restarting (?).
#
# References
# 1. url https://www.geeksforgeeks.org/convert-python-list
# -to-numpy-arrays/ [20210421].
#
# Import required libraries
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
xdata = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
ydata = np.array([1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6])
N = min(len(xdata), len(ydata))
a = 6
b = 0
eta = 0.001
def curve(a, b):
y = a + b * xdata
return y
# Create array for time t from tbed to tend with step dt
tbeg = 0
tend = 1000
dt = 1
t = np.arange(tbeg, tend, dt)
# Get range of x and y
#y = wave(0)
xmin = 0
xmax = 10
dx = 1
ymin = 0
ymax = 10
dy = 1
xmt = np.arange(xmin, xmax + dx, dx)
ymt = np.arange(ymin, ymax + dy, dy)
# Get figure for plotting and set it
fig = plt.figure(figsize=[3, 3])
# Configure axes
ax = fig.add_subplot()
ax.grid(which='both')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_xticks(xmt, minor=True)
ax.set_yticks(ymt, minor=True)
# It must be set after configuring axis to give the effect
fig.tight_layout(rect=[-0.03, -0.03, 1.03, 1.03])
# Prepare curve
mark, = ax.plot([], [], 'sr', lw=1, ms=4)
line, = ax.plot([], [], '-b', lw=2)
time_template = 's = %i, a = %.3f, b = %.3f'
time_text = ax.text(0.03, 0.93, '', transform=ax.transAxes)
err_template = 'err = %.4f'
err_text = ax.text(0.03, 0.83, '', transform=ax.transAxes)
# Perform animation
def init():
line.set_data([], [])
mark.set_data([], [])
time_text.set_text('')
err_text.set_text('')
return line, mark, time_text
def animate(i):
global a, b
eps = 0
depsda = 0
depsdb = 0
for j in range(0, N):
f = a + b * xdata[j]
eps += (f - ydata[j]) * (f - ydata[j])
depsda += 2 * (f - ydata[j]) * 1
depsdb += 2 * (f - ydata[j]) * xdata[j]
a = a - eta * depsda
b = b - eta * depsdb
s = i - 1
if s % 40 == 0:
y = curve(a, b)
line.set_data([xdata], [y])
mark.set_data([xdata], [ydata])
time_text.set_text(time_template % (s, a, b))
err_text.set_text(err_template % eps)
return line, mark, time_text
ani = animation.FuncAnimation(
fig, animate, np.arange(1, len(t)),
interval=5, blit=True, init_func=init
)
# Write to to a GIF animation
writergif = animation.PillowWriter(fps=40)
ani.save('0267.gif', writer=writergif)
| true |
3b51b06a0eb67b571ffcba11b24eb996a77f3b5b | Python | Madhumidha14/python-task-1 | /pattern.py | UTF-8 | 423 | 3.84375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 22 17:11:37 2021
@author: DELL
"""
def triangle(n):
k = n-1 #number of spaces
for i in range(0, n):# handles no of rows
for j in range(0, k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print("*", end=" " )
print("\r")
n=5
triangle(n)
| true |
59ad9628c1d4e4b565aa363455a46b24cd5e5984 | Python | b2-2020-2021-python/01-mediatheque | /actions/help.py | UTF-8 | 494 | 2.609375 | 3 | [] | no_license | from action import Action,ActionManager
class HelpAction(Action):
def __init__(self,actions):
self.actions = actions
def execute(self, param):
action = self.actions[param]
print(action.help())
def info(self):
return "Affiche l'aide d'une commande"
def help(self):
return """Affiche l'aide d'une commande, passer la commande en paramètre"""
ActionManager.getInstance().registerCommand("help",HelpAction(ActionManager.getInstance())) | true |
c17402ebad222cb2bcb29534b0a8c61014590dd7 | Python | glebpro/MultiClassClassifier | /test.py | UTF-8 | 1,131 | 3.09375 | 3 | [
"MIT"
] | permissive |
from classifiers.MultiClassClassifier import MultiClassClassifier
def read_corpus(fname):
result = []
with open(fname) as f:
for line in f:
line = line.strip().split("\t")
result.append({
"id": line[0].replace(' ', ''),
"sentence": line[1].replace(' ', ''),
"genre": line[4].replace(' ', ''),
"polarity": line[2].replace(' ', ''),
"topic": line[3].replace(' ', '')
})
return result
def test0():
corpus = read_corpus('data/training_data.txt')
classes = [l['topic'] for l in corpus if l['topic'] != "NONE"]
texts = [l['sentence'] for l in corpus if l['topic'] != "NONE"]
# 80/20 train/test split
split = int(len(classes) * .8)
training_classes = classes[:split]
training_texts = texts[:split]
test_classes = classes[split:]
test_texts = classes[split:]
epochs = 1
mcc = MultiClassClassifier('data/glove.6b.100d.txt', training_classes, training_texts, epochs)
accuracy = mcc.evaluate(test_classes, test_texts)
print(accuracy)
test0()
| true |
526bf78318cd1ef7c20fdb8f4fc9e327159950cf | Python | alylne/POO-Trabalhos | /Atividade 3/__main__.py | UTF-8 | 231 | 2.78125 | 3 | [] | no_license | from Ponto import Ponto
from Quadrilatero import Quadrilatero
if __name__ == '__main__':
p1 = Ponto(2, 4)
p2 = Ponto(4, 2)
quadri = Quadrilatero(p1, p2)
print(quadri.contidoEmQ(p1))
print(quadri.contidoEmQ(p2)) | true |
6c21bed1ca279f5383ac0f62c2994b735fb01436 | Python | Fadlann/DataStructure | /Priority Queue/MaxHeap.py | UTF-8 | 2,536 | 3.6875 | 4 | [] | no_license | class MaxHeap:
def __init__(self):
self.heap = []
def getParent(self, index):
return int((index-1)/2)
def getLeftChild(self, index):
return 2*index + 1
def getRightChild(self, index):
return 2*index + 2
def hasParent(self, index):
return self.getParent(index) >= 0
def hasLeftChild(self, index):
return self.getLeftChild(index) < len(self.heap)
def hasRightChild(self, index):
return self.getRightChild(index) < len(self.heap)
def swap(self, index1, index2):
self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]
def bubbleUp(self, index):
size = len(self.heap)
while self.hasParent(index) and self.heap[index] > self.heap[self.getParent(index)]:
self.swap(index, self.getParent(index))
index = self.getParent(index)
def insertKey(self, key):
self.heap.append(key)
self.bubbleUp(len(self.heap) - 1)
def printHeap(self):
print(self.heap)
def getMaxValChildIndex(self, index):
if self.hasLeftChild(index):
leftChild = self.getLeftChild(index)
if self.hasRightChild(index):
rightChild = self.getRightChild(index)
if self.heap[leftChild] > self.heap[rightChild]:
return leftChild
else :
return rightChild
else:
return leftChild
else:
return -1
def bubbleDown(self, index):
while self.hasLeftChild(index):
maxChildIndex = self.getMaxValChildIndex(index)
if maxChildIndex == -1:
break
if self.heap[index] < self.heap[maxChildIndex]:
self.swap(index, maxChildIndex)
index = maxChildIndex
else:
break
def poll(self):
if len(self.heap) == 0:
return -1
lastIndex = len(self.heap) - 1
self.swap(0, lastIndex)
former = self.heap.pop()
self.bubbleDown(0)
return former
if __name__ == "__main__":
maxHeap = MaxHeap()
arr = [45, 99, 63, 27, 29, 57, 42, 35, 12, 24]
for i in arr:
maxHeap.insertKey(i)
print("Inital Heap")
maxHeap.printHeap()
print("Insert key 50")
maxHeap.printHeap()
maxHeap.insertKey(50)
maxHeap.printHeap()
print("delete root node")
maxHeap.poll()
maxHeap.printHeap()
| true |
41da477c557b0a36bd134fb74b640f60b908b76d | Python | gschen/where2go-python-test | /1906101031王卓越/14周/1.py | UTF-8 | 89 | 2.6875 | 3 | [] | no_license | li = list(map(int,input('请输入列表').split()))
s = set(li)
print(len(s),list(s))
| true |
d95b969469a7732e87046ac6c3983f27f35ffd01 | Python | Sleeither1234/t08_huatay.chunga | /chunga/comparacion.py | UTF-8 | 532 | 3.046875 | 3 | [] | no_license | #EJERCICIO1
print("adrian"=="Adrian")
#EJERCICIO2
print("12" == "5")
#EJERCICIO3
print("3"!="5")
#EJERCICIO4
print("hola"=="hoola")
#EJERCICIO5
print("adios"=="good bye")
#EJERCICIO6
print("dinero"=="felicidad")
#EJERCICIO7
print("712421"=="7712421")
#EJERCICIO8
print("Karla"=="Geraldine")
#EJERCICIO9
print("B"!="b")
#EJERCICIO10
print("azul"!="azlu")
#EJERCICIO11
print("sii"=="sii")
#EJERCICIO12
print("K"=="K")
#EJERCICIO13
print("tbn"=="tambien")
#EJERCICIO14
print("1"=="true")
#EJERCICIO15
print("0"=="false")
| true |
44a6c9cf1aa26ed8e878452c256bfe0572f6912a | Python | lawaloy/Practice | /findPairs.py | UTF-8 | 434 | 3.734375 | 4 | [] | no_license | def find_pairs(arr1, arr2):
if not arr1 or not arr2:
return []
stack = []
i=j=0
while i < len(arr1) and j < len(arr2):
if arr1[i] == arr2[j]:
stack.append([i,j])
i+=1
j+=1
elif arr1[i] < arr2[j]:
i+=1
else:
j+=1
return stack
arr1, arr2 = [1,2,3,5,7,8,10,19], [2,4,6,7,10,14]
print(find_pairs(arr1, arr2))
| true |
6c8f5ec727379fde700ef5347df4df86bed4d67a | Python | mrhallonline/practice-python-exercises | /practicePythonExercises/18cowsAndBulls.py | UTF-8 | 1,392 | 5.21875 | 5 | [] | no_license | # Create a program that will play the “cows and bulls” game with the user. The game works like this:
# Randomly generate a 4-digit number. Ask the user to guess a 4-digit number. For every digit that the user guessed correctly in the correct place, they have a “cow”. For every digit the user guessed correctly in the wrong place is a “bull.” Every time the user makes a guess, tell them how many “cows” and “bulls” they have. Once the user guesses the correct number, the game is over. Keep track of the number of guesses the user makes throughout teh game and tell the user at the end.
# Say the number generated by the computer is 1038. An example interaction could look like this:
# Welcome to the Cows and Bulls Game!
# Enter a number:
# >>> 1234
# 2 cows, 0 bulls
# >>> 1256
# 1 cow, 1 bull
# ...
# Until the user guesses the number.
import random
def gen_four_digits():
new_num=[]
a = random.randint(0,9)
b = random.randint(0,9)
c = random.randint(0,9)
d = random.randint(0,9)
rand_digit = (a,b,c,d)
rand_number = int("".join(map(str, rand_digit)))
return rand_number
def cows_and_bulls(num):
cows=0
bulls=[]
user_number = str(input("Enter a 4 digit number"))
for i in num:
if (num[i] == user_number[i]):
cows+= 1
print(cows)
num = gen_four_digits()
cows_and_bulls(num)
| true |
c4e0202be9bb6d91e277b0f3ba5f5283466a96ed | Python | kryInit/procon31 | /utility/makeFieldInfo.py | UTF-8 | 2,236 | 2.75 | 3 | [] | no_license | # python makeFieldInfo.py [token] [URL(最初の部分)] [teamID] [matchID]
import os
import sys
import time
import requests
if (len(sys.argv) < 5):
print("[makeFieldInfo] 引数が足りません", file=sys.stderr)
sys.exit()
usleep = lambda x: time.sleep(x/1000.0)
token = sys.argv[1]
teamID = sys.argv[3]
matchID = sys.argv[4]
URL = sys.argv[2] + "/matches/{0}".format(matchID)
header = {"x-api-token": token}
con = requests.get(URL, headers=header)
sc = con.status_code
if sc == 425:
usleep((int(con.headers['retry-after'])-1)*1000)
cnt = 0
while (sc == 400 or sc == 425 or sc == 429) and cnt < 60:
usleep(300);
con = requests.get(URL, headers=header)
sc = con.status_code
cnt += 1
if sc != 200:
print("[makeFieldInfo] matchID: {0}, status code: {1}, fieldInfoの取得ができませんでした".format(matchID, sc), file=sys.stderr)
sys.exit()
filePath = os.path.dirname(__file__)
filePath = (filePath if filePath else '.') + "/../data/{0}/fieldInfo".format(matchID)
with open(filePath, mode='w') as f:
conj = con.json()
walls = conj['walls']
areas = conj['areas']
points = conj['points']
teams = conj['teams']
actions = conj['actions']
print(conj['height'], conj['width'], conj['turn'], conj['startedAtUnixTime'], file=f);
for wall in walls:
print(*wall, file=f)
for area in areas:
print(*area, file=f)
for point in points:
print(*point, file=f)
for i in range(len(teams)):
team = teams[i]
if ((i == 0 and int(teams[i]['teamID']) != int(teamID)) or (i == 1 and int(teams[i]['teamID']) == int(teamID))):
team = teams[(i^1)]
print(team['teamID'], team['agent'], file=f)
print(team['areaPoint'], team['wallPoint'], file=f)
agents = team['agents']
for agent in agents:
print(agent['agentID'], agent['x']-1, agent['y']-1, file=f)
# print(len(actions), file=f)
# for action in actions:
# print(action['x']-1, action['y']-1, action['agentID'], action['turn'], action['apply'], action['type'], file=f)
print("[makeFieldInfo] matchID: {0}, fieldInfoの取得が完了しました".format(matchID), file=sys.stderr)
| true |
20a7d698d41662b08767a75fa422c26825c8b48c | Python | haru-256/bandit | /policy/_stochastic_bandits.py | UTF-8 | 5,692 | 3.046875 | 3 | [] | no_license | """define some policy"""
from abc import ABC, abstractmethod
from typing import Union
import numpy as np
from ._check_input import _check_stochastic_input, _check_update_input
class PolicyInterface(ABC):
"""Abstract Base class for all policies"""
@abstractmethod
def select_arm(self) -> int:
"""Select arms according to the policy for new data.
Returns
-------
result: int
The selected arm.
"""
pass
@abstractmethod
def update(self, chosen_arm: int, reward: Union[int, float]) -> None:
"""Update the reward information about each arm.
Parameters
----------
chosen_arm: int
The chosen arm.
reward: int, float
The observed reward value from the chosen arm.
"""
pass
class BasePolicy(PolicyInterface):
"""Base class for basic policies.
Parameters
----------
n_arms: int
The number of given bandit arms.
"""
def __init__(self, n_arms: int) -> None:
"""Initialize class."""
_check_stochastic_input(n_arms)
self.n_arms = n_arms
self.counts = np.zeros(self.n_arms, dtype=int)
self.cumulative_rewards = np.zeros(self.n_arms)
self.t = 0
def update(self, chosen_arm: int, reward: Union[int, float]) -> None:
"""Update the reward information about each arm.
Parameters
----------
chosen_arm: int
The chosen arm.
reward: int, float
The observed reward value from the chosen arm.
"""
_check_update_input(chosen_arm, reward)
self.t += 1
self.counts[chosen_arm] += 1
self.cumulative_rewards[chosen_arm] += reward
class UCB(BasePolicy):
"""Upper Confidence Bound.
Parameters
----------
n_arms: int
The number of given bandit arms.
"""
def __init__(self, n_arms: int) -> None:
"""Initialize class."""
super().__init__(n_arms)
def select_arm(self) -> int:
"""Select arms according to the policy for new data.
Returns
-------
arm: int
The selected arm.
"""
if 0 in self.counts:
arm = np.argmin(self.counts)
else:
arm = np.argmax(self.mean_rewards + self.correction_factor)
return arm
@property
def mean_rewards(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: mean rewards each arm
"""
return self.cumulative_rewards / self.counts
@property
def correction_factor(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: correction factor each arm
"""
return np.sqrt(np.log(self.t) / (2 * self.counts))
class UCBOffline(BasePolicy):
def __init__(self, n_arms: int) -> None:
"""Initialize class."""
_check_stochastic_input(n_arms)
super().__init__(n_arms)
self.correction_factor_counts = np.zeros(self.n_arms, dtype=int) # correction_factorのためのcounts
def update(self, chosen_arm: int, rewards: np.ndarray) -> None:
"""
Args:
chosen_arm (int): selected arm
rewards (numpy.ndarray): the observed rewards from selected arm. shape = (N, ), N is log size
"""
if not isinstance(chosen_arm, (int, np.int64)):
TypeError("chosen_arm must be int or numpy.int64")
if not isinstance(rewards, np.ndarray):
TypeError("rewards must be numpy.ndarray")
if rewards.ndim != 1:
TypeError("rewards must be 1 dim array")
self.t += rewards.shape[0]
self.counts[chosen_arm] += 1
self.correction_factor_counts[chosen_arm] += rewards.shape[0]
self.cumulative_rewards[chosen_arm] += rewards.mean()
def select_arm(self) -> int:
"""Select arms according to the policy for new data.
Returns
-------
arm: int
The selected arm.
"""
if 0 in self.counts:
arm = np.argmin(self.counts)
else:
arm = np.argmax(self.mean_rewards + self.correction_factor)
return arm
@property
def mean_rewards(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: mean rewards each arm
"""
return self.cumulative_rewards / self.counts
@property
def correction_factor(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: correction factor each arm
"""
return np.sqrt(np.log(self.t) / (2 * self.correction_factor_counts))
class UCB1(BasePolicy):
"""Upper Confidence Bound.
Parameters
----------
n_arms: int
The number of given bandit arms.
"""
def __init__(self, n_arms: int) -> None:
"""Initialize class."""
super().__init__(n_arms)
def select_arm(self) -> int:
"""Select arms according to the policy for new data.
Returns
-------
arm: int
The selected arm.
"""
if 0 in self.counts:
arm = np.argmin(self.counts)
else:
arm = np.argmax(self.mean_rewards + self.correction_factor)
return arm
@property
def mean_rewards(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: mean rewards each arm
"""
return self.cumulative_rewards / self.counts
@property
def correction_factor(self) -> np.ndarray:
"""
Returns:
numpy.ndarray: correction factor each arm
"""
return np.sqrt(2 * np.log(self.t) / self.counts)
| true |
47ad0b2c38e21458cd0371f7d4c328bea086b516 | Python | bluedian/python_test | /unit_redis/redis_request_r.py | UTF-8 | 714 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import redis
import json
import requests
def redis_list_read(data_name):
r = redis.Redis(host='127.0.0.1', port=6379)
abc = r.lpop(data_name)
if abc is None:
print('无数据了')
exit()
print(abc)
print(type(abc))
abc_dic = abc.decode('utf-8')
print('abc_dic-->type-->:', type(abc_dic))
try:
abc_json = json.loads(abc_dic.replace("'", "\""))
print(abc_json)
print(type(abc_json))
print(abc_json['name'])
except:
print('---------')
print(abc_dic)
print(type(abc_dic))
abc_json = json.loads(abc_dic.replace("'", "\""))
print(abc_json)
redis_list_read('qidian_name')
| true |
42b5f85543b56c6c0ca9a1015896936ee03b0d34 | Python | jinurajan/Datastructures | /LeetCode/binary_search/find_peak_element.py | UTF-8 | 1,741 | 4.28125 | 4 | [] | no_license | """
Find Peak Element
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
Follow up: Your solution should be in logarithmic complexity.
"""
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
n = len(nums)
if n == 1:
return 0
else:
l, r = 0, n-1
while l < r:
mid = (l + r) // 2
if nums[mid] > nums[mid + 1]:
r = mid
else:
l = mid + 1
return l
class Solution1:
def findPeakElement(self, nums: List[int]) -> int:
n = len(nums)
if n == 1:
return 0
def binary_search(nums, l, r):
if l < r:
mid = (l + r) // 2
if nums[mid] > nums[mid+1]:
return binary_search(nums, l, mid)
return binary_search(nums, mid + 1, r)
return l
return binary_search(nums, 0, n-1)
print(Solution().findPeakElement(nums=[1,2,1,3,5,6,4]))
print(Solution1().findPeakElement(nums=[1,2,1,3,5,6,4])) | true |
4f067832e7b0caa37f47e16cee5531e6f4d3b3d6 | Python | mselbrede/Discord-Bot | /BasicBot.py | UTF-8 | 5,240 | 2.65625 | 3 | [] | no_license | import discord
import asyncio
from discord.ext.commands import Bot
from discord.ext import commands
import platform
import time as timemod
# Here you can modify the bot's prefix and description and wether it sends help in direct messages or not.
client = Bot(description="UMUC Cyber Padawan Bot", command_prefix="!", pm_help = False)
#=======================================================#
# Startup Sequence #
#=======================================================#
@client.event
async def on_ready():
print('Logged in as '+client.user.name+' (ID:'+client.user.id+') | Connected to '+str(len(client.servers))+' servers | Connected to '+str(len(set(client.get_all_members())))+' users')
print('--------')
print('Current Discord.py Version: {} | Current Python Version: {}'.format(discord.__version__, platform.python_version()))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=8'.format(client.user.id))
print('--------')
print('Support Discord Server: https://discord.gg/FNNNgqb')
print('Github Link: https://github.com/Habchy/BasicBot')
print('--------')
print('You are running BasicBot v2.1') #Do not change this. This will really help us support you, if you need support.
print('Created by Habchy#1665')
return await client.change_presence(game=discord.Game(name='PYTHON BOT')) #This is buggy, let us know if it doesn't work.
#=======================================================#
# Commands #
#=======================================================#
@client.command()
async def ping(*args):
await client.say(":ping_pong: Pong!!")
await asyncio.sleep(3)
@client.command()
async def silly(*args):
await client.say("Why did the HTTP request get pulled over on the interstate?\nIt was going over 80!")
await asyncio.sleep(3)
@client.command()
async def tutorial(*args):
await client.say("https://github.com/ajackal/python_tutorials/wiki")
await asyncio.sleep(3)
@client.command()
async def new(*args):
await client.say("You can write a new bot function like this!```python\n" + """
@client.command()
async def COMMANDNAME(*args):
await client.say("SAYTHIS")
await asyncio.sleep(3)```""")
await asyncio.sleep(3)
@client.command()
async def time(*args):
await client.say(str(timemod.localtime().tm_hour).zfill(2)+str(timemod.localtime().tm_min).zfill(2))
await asyncio.sleep(3)
@client.command()
async def test(*args):
await client.say("who goes there?")
await asyncio.sleep(3)
@client.command()
async def python(*args):
await client.say("Does this answer your question?")
await client.say("https://python101.pythonlibrary.org/intro.html")
await asyncio.sleep(3)
@client.command()
async def echo(*args):
await client.say("You said:")
for word in args:
await client.say(word)
await asyncio.sleep(3)
@client.command(pass_context=True)
async def create_private_channel(ctx, channel_name, *users : discord.User):
'''Create a private channel named name, and then add users.'''
#Initialize server as the server in which this command is being run from
server = ctx.message.server
#Ensure the channel has at least one user
if not users:
await client.say("Usage: create_private_channel <name> <users...>")
#Deny everyone read_messages permissions
#Grant Group read_messages permissions
everyone_perms = discord.PermissionOverwrite(read_messages=False)
group_perms = discord.PermissionOverwrite(read_messages=True)
#overwrite the channel default with deny everyone rule
everyone = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
#Make a group that includes the listed users and the bot- add this to the overwrite
group = []
group.append(discord.ChannelPermissions(target=server.me, overwrite=group_perms))
for user in users:
group.append(discord.ChannelPermissions(target=user, overwrite=group_perms))
#create the channel as a text channel with "everyone" and "group" overwrite permissions
await client.create_channel(server, channel_name, everyone, *group, type=discord.ChannelType.text)
await asyncio.sleep(3)
#@client.command()
#async def ping(*args):
# await client.say(":ping_pong: Pong!!")
# await asyncio.sleep(3)
#Runs the bot
#client.run('')
#Original Author Information
#
#
#
# Basic Bot was created by Habchy#1665
# Please join this Discord server if you need help: https://discord.gg/FNNNgqb
# Please modify the parts of the code where it asks you to. Example: The Prefix or The Bot Token
# This is by no means a full bot, it's more of a starter to show you what the python language can do in Discord.
# Thank you for using this and don't forget to star my repo on GitHub! [Repo Link: https://github.com/Habchy/BasicBot]
# The help command is currently set to be not be Direct Messaged.
# If you would like to change that, change "pm_help = False" to "pm_help = True" on line 9.
| true |
20a14fd6bc070cdeff44fa8c7a87ac0269907bf2 | Python | hyperlolo/MorseCode | /main.py | UTF-8 | 1,754 | 3.84375 | 4 | [] | no_license | #######Morse Code Translator by Karanjit Gill######
"""A Dictionary the various text, number, and symbol translations in Morse Code"""
Morse_code_trans = {'A': '.-', 'B': '-...',
'C': '-.-.', 'D': '-..', 'E': '.',
'F': '..-.', 'G': '--.', 'H': '....',
'I': '..', 'J': '.---', 'K': '-.-',
'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-',
'R': '.-.', 'S': '...', 'T': '-',
'U': '..-', 'V': '...-', 'W': '.--',
'X': '-..-', 'Y': '-.--', 'Z': '--..',
'1': '.----', '2': '..---', '3': '...--',
'4': '....-', '5': '.....', '6': '-....',
'7': '--...', '8': '---..', '9': '----.',
'0': '-----', ', ': '--..--', '.': '.-.-.-',
'?': '..--..', '/': '-..-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-'}
text_to_translate = input("Please enter what you would like to translate into Morse Code: ").upper()
text_list = list(text_to_translate)
"""Translation that is easy to read for humans"""
translationRead = ""
"""Direct Morse Code Translation"""
rawTranslation = ""
"""Loops through the given input and matches each character in the input with a given character
from the given translation dictionary to find the correct key"""
for trans_char in text_list:
for morse_char in Morse_code_trans:
if trans_char == morse_char:
translationRead += "|" + trans_char + " " + Morse_code_trans.get(morse_char) + " "
rawTranslation += Morse_code_trans.get(morse_char)
print(translationRead + "|")
print("Raw Morse Code: " + rawTranslation)
| true |
c6eee1d021a9dd5127cd7fd01a9de26436d96e2c | Python | jovanikimble/Udacity-Nanodegree | /1project_movietrailer/practice/mindstorms.py | UTF-8 | 713 | 3.953125 | 4 | [] | no_license | import turtle
def draw_square(some_turtle):
for x in range(0, 4):
some_turtle.forward(100)
some_turtle.right(90)
def draw_triangle(some_turtle):
some_turtle.backward(100)
some_turtle.left(60)
some_turtle.forward(100)
some_turtle.right(120)
some_turtle.forward(100)
def draw_circle(some_turtle):
some_turtle.circle(70)
def draw_art():
window = turtle.Screen()
window.bgcolor("black")
brad = turtle.Turtle()
brad.color("yellow")
angie = turtle.Turtle()
angie.color("grey")
shiloh = turtle.Turtle()
shiloh.color("pink")
for i in range(0, 18):
draw_square(brad)
brad.right(20)
#draw_triangle(angie)
#draw_circle(shiloh)
window.exitonclick()
draw_art() | true |
d35b0512a2531adf8cd289558f554cf83c6ced35 | Python | nigel-lirio/coe-135 | /lab4/queue.py | UTF-8 | 687 | 3.640625 | 4 | [] | no_license | class Item:
def __init__(self, x):
self.data = x
self.next = None
class LList:
def __init__(self):
self.start = None
def ins(self, data):
new_item = Item(data)
if self.start is None:
self.start = new_item
return
hold = self.start
while hold.next is not None:
hold = hold.next
hold.next = new_item
def pop(self):
while self.start is not None:
print(self.start.data)
self.start = self.start.next
coolqueue = LList()
while(True):
a = input("Enter int value (0 to exit):")
if a == 0:
break
coolqueue.ins(a)
coolqueue.pop() | true |
8c255140bb79f982dcafe07480ef23851c4dcb51 | Python | cebeery/warmup_project_2017 | /scripts/drive_square.py | UTF-8 | 2,033 | 3.5 | 4 | [] | no_license | #!/usr/bin/env python
"""This script cmds Neato to move in a square via timed turns"""
import rospy
from geometry_msgs.msg import Twist
class DriveSquareNode(object):
""" Controls square driving behavior of neato"""
def __init__(self):
"""inializes twist cmd and decision flags; sets time constants"""
rospy.init_node('drive_square')
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.rate = rospy.Rate(20)
self.cmd = Twist()
self.cornerTime = rospy.Duration(3.04)
self.edgeTime = rospy.Duration(3.00)
self.edges = 0
self.inTurnState = True
self.startTime = rospy.Time.now()
def corner(self):
"""updates twist cmd to turn; checks if turn is at end"""
self.cmd.linear.x = 0.0
self.cmd.angular.z = 0.5
if (rospy.Time.now() - self.startTime) > self.cornerTime:
self.inTurnState = False
print "Vertex Complete"
self.startTime = rospy.Time.now()
def edge(self):
"""updates twist cmd to drive sraight; checks if edge is at end"""
self.cmd.linear.x = 1.0
self.cmd.angular.z = 0.0
if (rospy.Time.now() - self.startTime) > self.edgeTime:
self.inTurnState = True
self.edges += 1
print "Edge Complete"
self.startTime = rospy.Time.now()
def run(self):
"""Determine robot state and trigger cmd update"""
while not rospy.is_shutdown() and self.edges<4:
if self.inTurnState:
self.corner()
else:
self.edge()
self.pub.publish(self.cmd)
self.rate.sleep()
#end motion (if any) and exit node
self.pub.publish(Twist())
print "Motion Ended"
if self.edges == 4:
print "Square Completed"
else:
print "Not Square"
#run instance
my_square = DriveSquareNode()
my_square.run()
| true |
c6db15718d5e1bfe6e06f4d9c9e305828ac11cac | Python | Summer-Friend/data_analyze | /numpy/1.py | UTF-8 | 2,382 | 3.734375 | 4 | [] | no_license | '''
@Author: your name
@Date: 2020-02-12 10:58:20
@LastEditTime: 2020-02-17 15:00:46
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \vscode_code\其他\数据分析第二版\numpy\1.py
'''
import numpy as np
my_arr = np.arange(10)
#print((my_arr)[1])
my_list = list(range(10))
#print(my_list[1])
arr = np.arange(8)
#1.数组重塑###################################################
arr2 = arr.reshape(4,2)
#reshape将一维数组转换为多维数组的运算过程相反的运算通常称为扁平化(flattening)或散开(raveling):
arr3 = arr2.ravel()
arr4 = arr2.flatten()
#print(arr3, arr4)
#2.数据的合并和拆分#############################################3
arr1 = np.array([[1, 2, 3], [4, 5, 6]])
arr2 = np.array([[7, 8, 9], [10, 11, 12]])
#按列合并
np.concatenate([arr1, arr2], axis=0)
#按行合并
np.concatenate([arr1, arr2], axis=1)
#对于常见的连接操作,NumPy提供了一些比较方便的方法(如vstack和hstack)
#按列合并
np.vstack((arr1, arr2))
#按行合并
np.hstack((arr1, arr2))
#与此相反,split用于将一个数组沿指定轴拆分为多个数组:传入到np.split的值[1,3]指示在哪个索引处分割数组。
arr = np.random.randn(5, 2)
first, second, third = np.split(arr, [1,3])
#3.元素的重复操作:tile和repeat######################################
arr = np.arange(3)
arr.repeat(3)
#如果传入的是一组整数,则各元素就可以重复不同的次数:但是个数也必须对应
#同样,在对多维进行重复时,也可以传入一组整数,这样就会使各切片重复不同的次数:
arr.repeat([2, 3, 4])
#对于多维数组,还可以让它们的元素沿指定轴重复:
arr = np.random.randn(2, 2)
arr.repeat(2, axis = 1)
#注意,如果没有设置轴向,则数组会被扁平化,这可能不会是你想要的结果
#print(arr.repeat(2))
#4.索引:################################################
# 获取和设置数组子集的一个办法是通过整数数组使用花式索引:
arr = np.arange(10) * 100
inds = [7, 1, 2, 6]
#两种方法一样
arr[inds]
arr.take(inds)
#在inds索引处的数用42代替
arr.put(inds, 42)
#要在其它轴上使用take,只需传入axis关键字即可:
arr = np.random.randn(2, 4)
print(arr)
#抽取arr按行的每个数据的第1,2个
print(arr.take([0, 1], axis =1))
| true |
1deaec2f243d701f142ad6761f4d1c3b9e95bf79 | Python | cboopen/algorithm004-04 | /Week 02/id_384/LeetCode_242_384.py | UTF-8 | 288 | 3.125 | 3 | [] | no_license | class Solution:
def isAnagram(self, s: str, t: str) -> bool:
sm = {}
tm = {}
for i in s:
if i not in sm:
sm[i] = s.count(i)
for j in t:
if j not in tm:
tm[j] = t.count(j)
return sm == tm
| true |
3e871f5fb571090d1ad7ea84a2ecd6cbbcd4c80d | Python | amano7/LearningPython | /chapter6-10.py | UTF-8 | 161 | 3.375 | 3 | [] | no_license | sentence = "四月の晴れた寒い日で、時計がどれも13時を打っていた。"
point = sentence.find("、")
slce = sentence[0:point]
print(slce)
| true |
90107eae3938368426c438f353be14b04686d860 | Python | renan-am/MontadorIAS | /script.py | UTF-8 | 6,083 | 3.09375 | 3 | [] | no_license | #posição da memoria (em decimal) onde inicia a alocação de memoria, se deixada em 0, o programa escolhe um valor adequado
memVarStart = 0
#variaveis globais para usar nas funções
pos = False #False -> esquerda, inicio da linha // True: direita, final da linha
codePos = 0
auxVar = []
code = []
memCount = 0
points = [["pointfinal", 400, False]]
toPoints = []
changes = []
toChanges = []
#checa flags
def checkFlags (i, line, pos):
if i[-1][0:5] == "point":
points.append([i[-1], line, pos])
elif i[-1][0:7] == "toPoint":
toPoints.append([i[-1], line, pos])
elif i[-1][0:4] == "change":
changes.append([i[-1], line, pos])
elif i[-1][0:4] == "toChange":
toChanges.append([i[-1], line, pos])
#retorna o endereço da variavel, encontrado em auxVar
def varAdress (variable):
for i in auxVar:
if i[0] == variable:
return i[1]
return variable
# testa se o indíce existe no vetor, evita outofbounds error
# se existir retorna o endereço da variavel
# se não existir retorna 000, (pra funções como RSH, LSH etc)
def read(i, ind):
try:
return varAdress(i[ind])
except:
return "000"
# concatena a instrução junto com a memória respectiva
def createLine( i, hexaCode ):
global pos
global codePos
global auxVar
global code
global memCount
line = hex(memCount)[2:].upper().zfill(3)
checkFlags (i, line, pos);
if pos:
code[codePos].append(hexaCode)
code[codePos].append(read(i,1) + " ")
codePos += 1
memCount += 1
else:
code.append([line + " "])
code[codePos].append(hexaCode)
code[codePos].append(read(i,1) + " ")
pos = not pos
#abre o codigo a ser "compilado"
with open('teste') as f:
read_data = f.read()
# cria um vetor com cada bloco do texto (separado por " "),
# a partir do vetor cria uma matriz com cada linha referente a um bloco, cada coluna referente a (instrução, memoria), ou ao (nome da variavel, valor da variavel)
vet = []
data = read_data.split()
for i in data:
vet.append(i.split("."))
#procura flags
#preenche auxVar com as variaveis
for i in vet:
if i[0] == "LOADM":
continue
elif i[0] == "LOADMQM":
continue
elif i[0] == "STORM":
continue
elif i[0] == "LOADMQ":
continue
elif i[0] == "ADDM":
continue
elif i[0] == "SUBM":
continue
elif i[0] == "MULM":
continue
elif i[0] == "DIVM":
continue
elif i[0] == "RSH":
continue
elif i[0] == "LSH":
continue
elif i[0] == "LOADM_ABS":
continue
elif i[0] == "LOAD-M":
continue
elif i[0] == "ADDM_ABS":
continue
elif i[0] == "SUBM_ABS":
continue
elif i[0] == "JUMPM":
continue
elif i[0] == "JUMP+M":
continue
elif i[0] == "STORM":
else:
auxVar.append(i)
# inicializa memVarStart
var = []
if memVarStart != 0:
memVar = memVarStart
else:
memVar = int(len(vet) / 2)
# salva em var as linhas de código pronta, com o endereço e valor da memória de cada variavel
# salva em auxVar o endereço de cada variavel
for i in auxVar:
# converte em uma string hexadecimal, depois ignora os dois primeiros caracteres (0x), zfill(x) coloca 0 a esquerda do valor da string, até ter x caracteres
endVar = hex(memVar)[2:].upper().zfill(3)
var.append([endVar + " ", i[1]])
i[1] = endVar
memVar += 1
# salva em code as instruções e memorias relativas, em hexadecimal, com duas isntruções por linha, e variaveis substituidas por seus endereços
for i in vet:
if i[0] == "LOADM":
createLine(i,"01 ")
elif i[0] == "LOADMQM":
createLine(i,"09 ")
elif i[0] == "STORM":
createLine(i,"21 ")
elif i[0] == "LOADMQ":
createLine(i,"0A ")
elif i[0] == "ADDM":
createLine(i,"05 ")
elif i[0] == "SUBM":
createLine(i,"06 ")
elif i[0] == "MULM":
createLine(i,"0B ")
elif i[0] == "DIVM":
createLine(i,"0C ")
elif i[0] == "RSH":
createLine(i,"15 ")
elif i[0] == "LSH":
createLine(i,"14 ")
elif i[0] == "LOADM_ABS":
createLine(i,"03 ")
elif i[0] == "LOAD-M":
createLine(i,"02 ")
elif i[0] == "ADDM_ABS":
createLine(i,"07 ")
elif i[0] == "SUBM_ABS":
createLine(i,"08 ")
elif i[0] == "JUMPM":
createLine(i,"LOOP ")
elif i[0] == "JUMP+M":
createLine(i,"IFLOOP ")
elif i[0] == "STORM":
createLine(i,"STORM ")
else:
continue
final = code + var
#Faz as modificações para JUMP M e JUMP+M
for i in toPoints:
aux = 0
for j in points:
if j[0] == i[0][2:].lower():
aux = j
for j in final: # compara os indices das linhas
if j[0][0:3] == i[1]:
if i[2]: #instrução do loop está na direita
if aux[2]: #destino do loop esta na direita
if j[3][0:2] == "IF":
j[3] = "10 " #JUMP+M Direita
else:
j[3] = "0E " #JUMP M direita
else: #destino do loop esta na esquerda
if j[3][0:2] == "IF":
j[3] = "0F " #JUMP+M Esquerda
else:
j[3] = "0D " #JUMP M Esquerda
j[4] = aux[1] #memoria do destino do loop
else: #instrução do loop está na direita
if aux[2]: #destino do loop esta na direita
if j[1][0:2] == "IF":
j[1] = "10 " #JUMP+M Direita
else:
j[1] = "0E " #JUMP M direita
else: #destino do loop esta na esquerda
if j[1][0:2] == "IF":
j[1] = "0F " #JUMP+M Esquerda
else:
j[1] = "0D " #JUMP M Esquerda
j[2] = aux[1] #memoria do destino do loop
# Faz a modificação para STOR M
for i in toChanges:
aux = 0
for j in changes:
if j[0] == i[0][2:].lower():
aux = j
for j in final:
if j[0][0:3] == i[1]: # compara os indices das linhas
if i[2]: #instrução da mudança está na direita
if aux[2]: # destino da mudança esta na direita
j[3] = "13 " # STOR M direita
else: #destino da mudança esta na esquerda
j[3] = "12 " # STOR M Esquerda
j[4] = aux[1] # memoria do destino da mudança
else: # instrução da mudança está na direita
if aux[2]: # destino da mudança esta na direita
j[1] = "13 " # STOR M direita
else: #destino da mudança esta na esquerda
j[1] = "12 " # STOR M Esquerda
j[2] = aux[1] # memoria do destino da mudança
# gera arquivo final
saida = open("ra.hex", "w+")
for i in final:
aux = ""
for j in i:
aux += str(j)
saida.write(aux + '\n')
| true |
886397aadf8f2356a914cddbe0a5032ea5db6032 | Python | spatialaudio/sweep | /ir_imitation.py | UTF-8 | 1,167 | 2.734375 | 3 | [
"MIT"
] | permissive | """ Imitate a impulse response."""
import numpy as np
import measurement_chain
import calculation
def exponential_decay(duration_seconds,
db_decay,
noise_level_db,
fs,
seed=1):
""" Imitate real IR.
duration_seconds : IR duration
db_decay : dB / sec
noise_level: dB
"""
t = np.arange(0, duration_seconds, 1 / fs)
noise = calculation.awgn_noise(noise_level_db, duration_seconds * fs, seed)
decay = 10 ** ((noise_level_db + db_decay) / 20)
noise_level = 10 ** (noise_level_db / 20)
lifetime = -1 / (np.log(decay / noise_level))
exponential_fading_noise = noise * np.exp(-t / lifetime)
exponential_fading_noise[0] = 1
return exponential_fading_noise
def diracs(array):
"""Dirac-Array.
"""
a = np.asarray(array)
y = np.zeros(a.max() + 1)
y[a] = 1
return y
def bandstop(lower_bound, higher_bound, fs, order):
b, a = measurement_chain._butter_bandstop(lower_bound,
higher_bound,
fs, order)
return b, a
| true |
31bf45fe6c98360325648cf76f35348d5dd6b6a9 | Python | Elizhann/Code-Projects | /parse_string.py | UTF-8 | 330 | 3.578125 | 4 | [] | no_license | #string to parse
str = 'X-DSPAM-Confidence:0.8475'
#find the location of the : character
x = str.find(':')
print(x)
#find the location following the : character
a = str.find(' ',x)
print(a)
#slice the string at the identified points
number = str[x+1:a]
#turn into a float
number = float(number)
print(number)
| true |
ff4919c1ce875fa21f0baf688e5d6816fec757a8 | Python | jjiezheng/panity | /parserinterface.py | UTF-8 | 594 | 2.9375 | 3 | [
"BSD-3-Clause"
] | permissive | from abc import ABCMeta, abstractmethod
class ParserInterface(object):
"""This interface shows what API a parser for scenes and prefabs should
support at least.
"""
__metaclass__ = ABCMeta
#@staticmethod
@abstractmethod
def read(source):
"""Read a scene/prefab from source (file) and return a scene made out
of game objects and components.
"""
raise NotImplementedError
#@staticmethod
@abstractmethod
def write(source, data):
"""Write a scene/prefab to a source (file)."""
raise NotImplementedError
| true |
f83e81770ff1c872d0e11a424c88f27305377be8 | Python | PauliusVaitkevicius/Exp001 | /Ex18_Phish_NaiveBayes_UCIdataset/NaiveBayes_LC.py | UTF-8 | 977 | 2.921875 | 3 | [] | no_license | import time
import warnings
import arff
import numpy as np
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import ShuffleSplit
from utilities.plot_learning_curve import plot_learning_curve
warnings.filterwarnings("ignore")
start_time = time.perf_counter()
print("Importing dataset: UCI 2015 Phishing Examples")
dataset = arff.load(open('dataset_uci_2015.arff', 'rt'))
data = np.array(dataset['data']).astype(np.float)
X = data[:, 0:30]
y = data[:, 30:31]
y = np.ravel(y, order='C')
print("Number of data points: ", data.shape[0])
print("Number of features: ", data.shape[1] - 1)
print("Training the model: Naive-Bayes (Bernoulli)")
estimator = BernoulliNB()
cv = ShuffleSplit(n_splits=30, test_size=0.2, random_state=0)
plt = plot_learning_curve(estimator, "Learning Curves (Naive Bayes - Bernoulli)", X, y,
ylim=(0.9, 0.92), cv=cv, n_jobs=-1)
plt.show()
print('Time took:', time.perf_counter() - start_time, "seconds") | true |
6618d28d55039e86e2b6bed85ac8b730ddf220f7 | Python | szilu7/gym-highway | /gym_highway/modell/environment_vehicle.py | UTF-8 | 12,859 | 2.546875 | 3 | [
"MIT"
] | permissive | from gym_highway.modell.vehicle_base import BaseVehicle
import numpy as np
#LOGFILE_PATH
log_cnt = 0
logs_in_file = 40
log_list = []
class Envvehicle(BaseVehicle):
def __init__(self, dict):
super().__init__(dict)
self.desired_speed = 0.0
self.maxacc = 2.0 # Max acceleration m/s^2
self.maxdec = -6.0 # Max deceleration m/s^2
self.state = 'in_lane'
self.change_needed = 0
self.change_finished = 0 # Indicates the the vehicle is leaving its lane
self.laneindex = 0
self.lanewidth = self.envdict['lane_width']
self.oldlane = 0
self.skip = 0
def _getrandom(self):
sigma = 5
self.desired_speed = 130 / 3.6 + sigma * np.random.randn()
self.vx = self.desired_speed
def step(self, vnext, vbehind, vright_a, vright_b, vleft_a, vleft_b):
"""
Steps with vehicle. Updates state
decision: 1- Reach target speed
2- Follow next vehicle
:param vnext: vehicle in front
:param vright: vehicle to the right
:param vleft: vehicle to the left
:return: Vehicle reached highway limit (front or rear)
"""
if self.state == 'in_lane':
acc = 0
# Desired acceleration
if self.vx < self.desired_speed:
acc = self.maxacc
else:
acc = -self.maxacc
if not (vnext is None):
# following GHR model
dv = vnext.vx - self.vx
dx = vnext.x - vnext.length - self.x
if dx < 0:
print('Collision, ID: ', self.ID, ' vnext ID: ', vnext.ID, ' in lane: ', self.laneindex)
print(vnext.x, ' - ', vnext.length, ' - ', self.x)
env_save_log()
raise CollisionExc('Collision')
print('Collision')
# desired following dist
#dist = vnext.vx * 1.4
dist = vnext.vx * 1.2
ddist = dist - dx
accghr = -1 * ddist + 10 * dv
# alpha=0.6
# m=0.4
# l=1.9
# accghr=alpha*(self.vx**m)*dv/(dx**l)
accghr = min(max(self.maxdec, accghr), self.maxacc)
if self.vx > self.desired_speed:
acc = min(-self.maxacc, accghr)
else:
acc = accghr
self.vx = self.vx + self.dt * acc
self.x = self.x + self.dt * self.vx
# Jobbratartás
if (self.laneindex != 0):
if (vright_a is None) or (((vright_a.x - vright_a.length - self.x) / 11) > self.length):
if (vright_b is None) or (((self.x - self.length - vright_b.x) / 9) > self.length):
if not (vright_a is None):
if ((self.vx * 0.7) < vright_a.vx):
self.state = 'switch_lane_right'
self.change_needed = 1
self.oldlane = self.laneindex
else:
self.state = 'switch_lane_right'
self.change_needed = 1
self.oldlane = self.laneindex
# Feltartja a mögötte haladót, lehúzódás jobbra
"""
if not (vbehind is None):
if (
self.x - self.length - vbehind.x) / 5 < self.length: # Mögötte haladó megközelítette 5 autónyi távolságra
if (vbehind.desired_speed > self.desired_speed):
if (vright_a is not None) and (vright_b is not None):
if ((
vright_a.x - vright_a.length - self.x) / 5) > self.length: # Előtte 5 autónyi hely van a jobb oldali sávban
if ((
self.x - vright_b.x) / 8) > self.length: # Mögötte 8 autónyi hely van a jobb oldali sávban
print("Under overtake")
#self.state = 'switch_lane_right'
#self.switch_lane = 1
"""
# Gyorsabban menne, előzés
if self.laneindex != (self.envdict['lane_count'] - 1):
if not (vnext is None):
diff = (vnext.x - vnext.length - self.x)
if ((diff / 9) < self.length):
if self.desired_speed > vnext.desired_speed:
if (vleft_a is None) or (((vleft_a.x - vleft_a.length - self.x) / 4) > self.length):
if (vleft_b is None) or (((self.x - self.length - vleft_b.x) / 4) > self.length):
if (vbehind is None) or (isinstance(vbehind, Envvehicle) and (vbehind.state != 'acceleration')):
self.state = 'acceleration'
s = vnext.x - vnext.length - self.x
vrel = abs(vnext.vx - self.vx)
t = 3 / self.envdict['dt']
a = abs((2 * (s - (vrel * t))) / (t * t))
self.maxacc = a
elif self.state == 'switch_lane_right':
acc = self.maxacc
if not (vnext is None):
# following GHR model
dv = vnext.vx - self.vx
dx = vnext.x - vnext.length - self.x
if dx < 0:
print('Collision, ID: ', self.ID, ' vnext ID: ', vnext.ID, ' in lane: ', self.laneindex)
env_save_log()
raise CollisionExc('Collision')
print('Collision')
# desired following dist
#dist = vnext.vx * 1.4
dist = vnext.vx * 1.2
ddist = dist - dx
accghr = -1 * ddist + 10 * dv
accghr = min(max(self.maxdec, accghr), self.maxacc)
if self.vx > self.desired_speed:
acc = min(-self.maxacc, accghr)
else:
acc = accghr
self.vx = self.vx + self.dt * acc
self.x = self.x + self.dt * self.vx
self.y = self.y - 0.4
if self.y <= ((self.laneindex - 1) * self.lanewidth):
self.y = ((self.laneindex - 1) * self.lanewidth)
self.laneindex = self.laneindex - 1
self.change_finished = 1
self.state = 'in_lane'
elif self.state == 'switch_lane_left':
acc = max(self.maxacc, 2)
if not (vnext is None):
# following GHR model
dv = vnext.vx - self.vx
dx = vnext.x - vnext.length - self.x
if dx < 0:
print('Collision, ID: ', self.ID, ' vnext ID: ', vnext.ID, ' in lane: ', self.laneindex)
env_save_log()
raise CollisionExc('Collision')
print('Collision')
# desired following dist
#dist = vnext.vx * 1.4
dist = vnext.vx * 1.2
ddist = dist - dx
accghr = -1 * ddist + 10 * dv
accghr = min(max(self.maxdec, accghr), self.maxacc)
if self.vx > self.desired_speed:
acc = min(-self.maxacc, accghr)
else:
acc = accghr
self.vx = self.vx + self.dt * acc
self.x = self.x + self.dt * self.vx
self.y = self.y + 0.4
if self.y >= ((self.laneindex + 1) * self.lanewidth):
self.y = ((self.laneindex + 1) * self.lanewidth)
self.laneindex = self.laneindex + 1
self.state = 'in_lane'
elif self.state == 'acceleration':
acc = self.maxacc
self.vx = self.vx + self.dt * acc
self.x = self.x + self.dt * self.vx
if not (vnext is None):
s = (vnext.x - vnext.length - self.x)
if (s / 3) < self.length:
if not (vleft_b is None):
if (self.vx > (0.95 * vleft_b.vx)) and (((self.x - self.length - vleft_b.x) / 3) > self.length):
if not (vleft_a is None):
if (((vleft_a.x - vleft_a.length - self.x) / 3) > self.length) and \
(vleft_a.vx > (self.vx * 0.8)):
self.state = 'switch_lane_left'
self.change_needed = 1
self.maxacc = 2
self.oldlane = self.laneindex
#print('Overtake at: ', self.x)
else:
self.state = 'in_lane'
self.vx = vnext.vx
else:
self.state = 'switch_lane_left'
self.change_needed = 1
self.maxacc = 2
self.oldlane = self.laneindex
#print('Overtake at: ', self.x)
else:
self.state = 'in_lane'
self.vx = vnext.vx
else:
if not (vleft_a is None):
if (((vleft_a.x - vleft_a.length - self.x) / 3) > self.length):
self.state = 'switch_lane_left'
self.change_needed = 1
self.maxacc = 2
self.oldlane = self.laneindex
#print('Overtake at: ', self.x)
else:
self.state = 'in_lane'
self.vx = vnext.vx
else:
self.state = 'switch_lane_left'
self.change_needed = 1
self.maxacc = 2
self.oldlane = self.laneindex
#print('Overtake at: ', self.x)
else:
self.state = 'in_lane'
reachedend = False
if (self.x > self.envdict['length_forward']) or (self.x < self.envdict['length_backward']):
reachedend = True
return reachedend
def warmup_step(self, vnext):
"""
Steps with vehicle. Updates state
decision: 1- Reach target speed
2- Follow next vehicle
:param vnext: vehicle in front
:param vright: vehicle to the right
:param vleft: vehicle to the left
:return: Vehicle reached highway limit (front or rear)
"""
acc = 0
# Desired acceleration
if self.vx < self.desired_speed:
acc = self.maxacc
else:
acc = -self.maxacc
if not (vnext is None):
# following GHR model
dv = vnext.vx - self.vx
dx = vnext.x - vnext.length - self.x
if dx < 0:
env_save_log()
raise CollisionExc('Collision')
print('Collision')
# desired following dist
#dist = vnext.vx * 1.4
dist = vnext.vx * 1.2
ddist = dist - dx
accghr = -1 * ddist + 10 * dv
# alpha=0.6
# m=0.4
# l=1.9
# accghr=alpha*(self.vx**m)*dv/(dx**l)
accghr = min(max(self.maxdec, accghr), self.maxacc)
if self.vx > self.desired_speed:
acc = min(-self.maxacc, accghr)
else:
acc = accghr
self.vx = self.vx + self.dt * acc
self.x = self.x + self.dt * self.vx
class CollisionExc(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def env_add_entry(text):
global log_cnt
log_cnt += 1
write = 'Step ' + str(log_cnt) + '\n'
write += text
log_list.append(write)
if log_cnt > logs_in_file:
log_list.pop(0)
def env_save_log():
if log_cnt > logs_in_file:
log_file = open(r'D:\log_file.txt', 'w+')
for i in range(logs_in_file):
entry = log_list[i]
log_file.write(entry)
log_file.close() | true |
9e7d8d9390dcc2d34d694665679c1ac86b66c2b7 | Python | sathishvinayk/Python-Advanced | /Exceptions/Use_raise_builtin.py | UTF-8 | 446 | 3.4375 | 3 | [] | no_license | #Running an indexing error with exception
def indexerror(value,index):
return value[index]
#Call the indexerrr with list under indexing range and out of indexing range
x="stuff"
#Calling this will return the value
indexerror(x,3)
#Try raise stament in exception
try:
raise IndexError
except IndexError:
print("Exception occured")
#Also we can run raise <error function on the direct shell>
raise IndexError
#
| true |
c18b6d7fb767ca0e73828a0670e0e7cda79abd86 | Python | simeonpanayotov/Tetris | /panels.py | UTF-8 | 6,543 | 3.71875 | 4 | [] | no_license | """Define panels used in the game to display information.
LabelPanel - displays static text
ValuePanel - displays variable text
NextShapePanel - displays the next shape
LevelPanel - displays the current game level
ScorePanel - dispalys the current palyer score
ControlPanel - holds all game panels
"""
import pygame
from constants import *
import grids
class LabelPanel:
"""A panel displaying static text.
Methods:
draw - draw the panel on a surface
"""
def __init__(self, width, label):
self._font = pygame.font.Font(None, 36)
text = self._font.render(str(label), 1, (10, 10, 10))
textpos = text.get_rect(topleft=(10, 10))
self.height = text.get_height() + 10
self._panel = pygame.Surface((width, self.height))
self._panel.fill(PANEL_BG_COLOR)
self._panel.blit(text, textpos)
def draw(self, surface, position):
surface.blit(self._panel, position)
class ValuePanel:
"""A panel displaying variable text.
Methods:
draw - draw the panel on a surface
"""
def __init__(self, width, value):
self.value = value
self._font = pygame.font.Font(None, 36)
text = self._font.render(str(value), 1, (10, 10, 10))
textpos = text.get_rect(topleft=(10, 10))
self.height = text.get_height() + 10
self._panel = pygame.Surface((width, self.height))
self._panel.fill(PANEL_BG_COLOR)
self._panel.blit(text, textpos)
def draw(self, surface, position):
text = self._font.render(str(self.value), 1, (10, 10, 10))
textpos = text.get_rect(topleft=(10, 10))
self._panel.fill(PANEL_BG_COLOR)
self._panel.blit(text, textpos)
surface.blit(self._panel, position)
class NextShapePanel:
"""A panel displaying a shape.
Methods:
next_shape - returns the next shape and creates a new one
draw - draws the panel on a surface
"""
def __init__(self, width):
self._grid = grids.NextShapeGrid()
self._next_label_panel = LabelPanel(width, "Next:")
self._next_shape_panel = pygame.Surface(
(4 * BOX_LENGTH, 4 * BOX_LENGTH))
self._next_shape_panel.fill(PANEL_BG_COLOR)
self.height = self._next_label_panel.height + \
self._next_shape_panel.get_height() + 10
self._panel = pygame.Surface((width, self.height))
self.next_shape()
def next_shape(self):
"""Create and display a new shape and return the previous one.
The method is called internally on init, so that
subsequent calls always return a shape.
"""
shape = self._grid._active_shape
self._grid.create_new_shape()
self._grid.update()
return shape
def draw(self, surface, position):
self._panel.fill(PANEL_BG_COLOR)
self._next_label_panel.draw(self._panel, (0, 0))
self._next_shape_panel.fill(PANEL_BG_COLOR)
self._grid.draw(self._next_shape_panel)
self._panel.blit(self._next_shape_panel, (10, self._next_label_panel.height + 10))
surface.blit(self._panel, position)
class LevelPanel:
"""A panel displaying the current game level.
Methods:
increase_level - increases the game level by one
draw - draw the panel on a surface
"""
def __init__(self, width):
"""Create the panel with the text 'Level: 1'"""
self._level_label_panel = LabelPanel(width, "Level: ")
self._level_value_panel = ValuePanel(width, 1)
self.height = self._level_label_panel.height + self._level_value_panel.height
self._panel = pygame.Surface((width, self.height))
def increase_level(self):
"""Increase the current level by one."""
self._level_value_panel.value += 1
def draw(self, surface, position):
self._level_label_panel.draw(self._panel, (0, 0))
self._level_value_panel.draw(self._panel, (0, self._level_label_panel.height))
surface.blit(self._panel, position)
class ScorePanel:
"""A panel displaying the current player score.
Methods:
get_score - returns the current player score
add_score - adds to the player score
draw - draw the panel on a surface
"""
def __init__(self, width):
"""Create the panel with the text 'Score: 1'"""
self._score_label_panel = LabelPanel(width, "Score: ")
self._score_value_panel = ValuePanel(width, 0)
self.height = self._score_label_panel.height + self._score_value_panel.height
self._panel = pygame.Surface((width, self.height))
def get_score(self):
return self._score_value_panel.value
def add_score(self, score):
self._score_value_panel.value += score
def draw(self, surface, position):
self._score_label_panel.draw(self._panel, (0, 0))
self._score_value_panel.draw(
self._panel, (0, self._score_label_panel.height))
surface.blit(self._panel, position)
class ControlPanel:
"""A panel showing the next shape, game level and player score.
Methods:
next_shape - returns the next shape and creates a new one
increase_level - increases the game level by one
get_score - returns the current player score
add_score - adds to the player score
draw - draw the panel on a surface
"""
def __init__(self, width, height):
self._next_shape_panel = NextShapePanel(PANEL_WIDTH)
self._panel_level = LevelPanel(PANEL_WIDTH)
self._panel_score = ScorePanel(PANEL_WIDTH)
self._panel = pygame.Surface((width, height))
self._panel.fill(PANEL_BG_COLOR)
def next_shape(self):
"""Return the next shape."""
return self._next_shape_panel.next_shape()
def get_score(self):
"""Return the current player score."""
return self._panel_score.get_score()
def add_score(self, value):
"""Add to the player score."""
self._panel_score.add_score(value)
def increase_level(self):
"""Increase the game level."""
self._panel_level.increase_level()
def draw(self, surface, position):
self._next_shape_panel.draw(self._panel, (0, 0))
panel_level_height = self._next_shape_panel.height
self._panel_level.draw(self._panel, (0,panel_level_height))
panel_score_height = panel_level_height + self._panel_level.height
self._panel_score.draw(self._panel, (0,panel_score_height))
surface.blit(self._panel, position)
| true |
11ecec2bc2d11ff40d2f80414b249189facc54aa | Python | mouday/SomeCodeForPython | /python_psy_pc/python基础/pandasTest.py | UTF-8 | 381 | 3.078125 | 3 | [] | no_license | import pandas as pd
#基于numpy
csv=pd.read_csv("bricks.csv",index_col=0)
print(csv)
print(csv.nation)#获取列
print(csv["nation"])
csv["note"]=[1,2,3,4,5,6,7,8,9]#新加列
print(csv)
csv["densty"]=csv["area"]/csv['peple']
print(csv)
print(csv.loc["ch"])#获取行数据
print(csv["nation"].loc["ch"])#获取元素
print(csv.loc["ch"]["nation"])
print(csv.loc["ch","nation"])
| true |
07f558baad43bf32c95c7a0f94fd5634108de76f | Python | jansona/MyPingGUI | /src/main/python/main.py | UTF-8 | 3,694 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
MyPing GUI Version
author: ybg
github: https://github.com/jansona/MyPing
last edited: May 2020
"""
import sys
import threading
from fbs_runtime.application_context.PyQt5 import ApplicationContext
from PyQt5.QtWidgets import (QWidget, QLabel, QLineEdit,
QTextEdit, QGridLayout, QApplication, QPushButton, QVBoxLayout)
from PyQt5.QtGui import QDoubleValidator, QIntValidator, QTextCursor
from PyQt5 import QtCore
from PyQt5.QtCore import QThread, pyqtSlot, pyqtSignal
from MyPing.utils import PingUtil
from MyPing.opts import PingOptions
class EmittingStream(QtCore.QObject):
_msg_signal = QtCore.pyqtSignal(str) #定义一个发送str的信号
def write(self, text):
self._msg_signal.emit(str(text))
class MyPingGUI(QWidget):
def __init__(self):
super().__init__()
self.ping_util = PingUtil()
self.initUI()
def __draw_widget(self):
host_label = QLabel('Host')
patch_size_label = QLabel('Data size(bytes)')
ping_num_label = QLabel('Ping times')
# result_label = QLabel('Result')
self.host_edit = QLineEdit()
self.patch_size_edit = QLineEdit()
self.ping_num_edit = QLineEdit()
self.result_edit = QTextEdit()
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(host_label, 1, 0)
grid.addWidget(self.host_edit, 1, 1)
grid.addWidget(patch_size_label, 2, 0)
grid.addWidget(self.patch_size_edit, 2, 1)
grid.addWidget(ping_num_label, 3, 0)
grid.addWidget(self.ping_num_edit, 3, 1)
# grid.addWidget(result_label, 4, 0)
# grid.addWidget(self.result_edit, 4, 0, 6, 1)
self.send_button = QPushButton('Send')
vbox = QVBoxLayout()
vbox.addLayout(grid)
vbox.addWidget(self.result_edit)
vbox.addWidget(self.send_button)
self.setLayout(vbox)
def __set_widget_attr(self):
self.host_edit.setText('www.stanford.edu')
self.patch_size_edit.setValidator(QIntValidator(self))
self.patch_size_edit.setText('2052')
self.ping_num_edit.setValidator(QIntValidator(self))
self.ping_num_edit.setText('3')
self.result_edit.setReadOnly(True)
sys.stdout = EmittingStream(_msg_signal=self.__output_written)
sys.stderr = EmittingStream(_msg_signal=self.__output_written)
self.send_button.clicked.connect(self.__send_packet)
def __send_packet(self):
def send_threading_func():
self.send_button.setEnabled(False)
opt = PingOptions()
opt.host = self.host_edit.text()
opt.packet_size = int(self.patch_size_edit.text())
opt.ping_times = int(self.ping_num_edit.text())
self.ping_util.ping(opt)
self.send_button.setEnabled(True)
t = threading.Thread(target=send_threading_func, name='funciton', daemon=True)
t.start()
def __output_written(self, text):
cursor = self.result_edit.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.result_edit.setTextCursor(cursor)
self.result_edit.ensureCursorVisible()
def initUI(self):
self.__draw_widget()
self.__set_widget_attr()
self.setGeometry(200, 200, 800, 600)
self.setWindowTitle('My Ping')
self.show()
if __name__ == '__main__':
appctxt = ApplicationContext()
ex = MyPingGUI()
exit_code = appctxt.app.exec_()
sys.exit(exit_code) | true |
95f29e7b3f1e5df1f4618c8ae9f5edd57def27b3 | Python | BrentLittle/100DaysOfPython | /Day017 - Quiz Project/Quiz Game/main.py | UTF-8 | 461 | 3.078125 | 3 | [] | no_license | from question_model import Question
from data import question_data
from quiz_brain import QuizBrain
questionBank = []
for question in question_data:
questionObject = Question(text = question["text"], answer = question["answer"])
questionBank.append(questionObject)
quiz = QuizBrain(questionBank)
while quiz.stillHasQuestions():
quiz.nextQuestion()
print("Quiz Completed")
print(f"\n\nYour final score was {quiz.score}/{quiz.questionNumber}") | true |
5cd304385d54d1d4831d818621f65e310a6a8126 | Python | rksgalaxy/basicpython | /32_Loops.py | UTF-8 | 418 | 3.734375 | 4 | [] | no_license | words = ["hello","world","spams","eggs"]
counter = 0 #counter is vaariable here
max_index= len (words)
while counter < max_index:
word = words[counter]
print(word +"!" )
counter= counter +1
#by for loop
#words = ["hello","world","spams","eggs"]
for p in words:
print(p)
for p in words:
print(p + '?')
#words = ["hello","world","spams","eggs"]
for d in range(len(words)):
print(words[d]+'..')
| true |
eb57885cebcb7b64cd2102cd561e7216c5f2142f | Python | piger/pinolo | /coil/test/test_tokenizer.py | UTF-8 | 4,746 | 3.0625 | 3 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | """Tests for coil.tokenizer."""
import unittest
from coil import tokenizer
class TokenizerTestCase(unittest.TestCase):
def testEmpty(self):
tok = tokenizer.Tokenizer([""])
self.assertEquals(tok.next().type, 'EOF')
def testPath(self):
tok = tokenizer.Tokenizer(["somekey"])
first = tok.next()
self.assert_(isinstance(first, tokenizer.Token))
self.assertEquals(first.type, 'PATH')
self.assertEquals(first.value, "somekey")
self.assertEquals(first.line, 1)
self.assertEquals(first.column, 1)
self.assertEquals(tok.next().type, 'EOF')
def testString(self):
tok = tokenizer.Tokenizer(["'string'"])
first = tok.next()
self.assertEquals(first.type, 'VALUE')
self.assert_(isinstance(first.value, str))
self.assertEquals(first.value, "string")
self.assertEquals(first.line, 1)
self.assertEquals(first.column, 1)
self.assertEquals(tok.next().type, 'EOF')
def testUnocide(self):
tok = tokenizer.Tokenizer(
[u"'\u3456'".encode("utf-8")],
encoding='utf-8')
first = tok.next()
self.assertEquals(first.type, 'VALUE')
self.assert_(isinstance(first.value, unicode))
self.assertEquals(first.value, u"\u3456")
self.assertEquals(first.line, 1)
self.assertEquals(first.column, 1)
self.assertEquals(tok.next().type, 'EOF')
def testNumbers(self):
tok = tokenizer.Tokenizer(["1 2.0 -3 -4.0 0"])
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, 1)
self.assert_(isinstance(token.value, int))
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, 2.0)
self.assert_(isinstance(token.value, float))
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, -3)
self.assert_(isinstance(token.value, int))
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, -4)
self.assert_(isinstance(token.value, float))
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, 0)
self.assert_(isinstance(token.value, int))
self.assertEquals(tok.next().type, 'EOF')
def testBoolean(self):
tok = tokenizer.Tokenizer(["True False"])
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, True)
self.assert_(isinstance(token.value, bool))
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, False)
self.assert_(isinstance(token.value, bool))
self.assertEquals(tok.next().type, 'EOF')
def testNone(self):
tok = tokenizer.Tokenizer(["None"])
token = tok.next()
self.assertEquals(token.type, 'VALUE')
self.assertEquals(token.value, None)
self.assertEquals(tok.next().type, 'EOF')
def testCounters(self):
tok = tokenizer.Tokenizer(["'string' '''foo''' '' '''''' other",
"'''multi line string",
"it is crazy''' hi",
" bye"])
tok.next()
token = tok.next()
self.assertEquals(token.line, 1)
self.assertEquals(token.column, 10)
token = tok.next()
self.assertEquals(token.line, 1)
self.assertEquals(token.column, 20)
token = tok.next()
self.assertEquals(token.line, 1)
self.assertEquals(token.column, 23)
token = tok.next() # other
self.assertEquals(token.line, 1)
self.assertEquals(token.column, 30)
token = tok.next()
self.assertEquals(token.line, 2)
self.assertEquals(token.column, 1)
token = tok.next() # hi
self.assertEquals(token.line, 3)
self.assertEquals(token.column, 16)
token = tok.next() # bye
self.assertEquals(token.line, 4)
self.assertEquals(token.column, 3)
self.assertEquals(tok.next().type, 'EOF')
def testSpecialChars(self):
tok = tokenizer.Tokenizer(["{}[]:~="])
self.assertEquals(tok.next().type, '{')
self.assertEquals(tok.next().type, '}')
self.assertEquals(tok.next().type, '[')
self.assertEquals(tok.next().type, ']')
self.assertEquals(tok.next().type, ':')
self.assertEquals(tok.next().type, '~')
self.assertEquals(tok.next().type, '=')
self.assertEquals(tok.next().type, 'EOF')
| true |
61a31222d1890992966c2a556d7d4924fa17d390 | Python | Songtuan/Captioning-ImageNet | /modules/captioner/UpDownCaptioner.py | UTF-8 | 6,354 | 2.546875 | 3 | [] | no_license | import torch
import torch.nn as nn
import allennlp.nn.beam_search as allen_beam_search
from modules.updown_cell import UpDownCell
from functools import partial
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
class UpDownCaptioner(nn.Module):
def __init__(self, vocab, image_feature_size=2048, embedding_size=1000, hidden_size=1200,
attention_projection_size=768, seq_length=20, beam_size=3,
pretrained_embedding=None, state_machine=None):
super(UpDownCaptioner, self).__init__()
vocab_size = len(vocab)
self.vocab = vocab
self.seq_length = seq_length
self.state_machine = state_machine
self.image_feature_size = image_feature_size
self.beam_size = beam_size
# define up-down cell
self._updown_cell = UpDownCell(image_feature_size=image_feature_size, embedding_size=embedding_size,
hidden_size=hidden_size, attention_projection_size=attention_projection_size)
# define embedding layer
if pretrained_embedding is not None:
# if use pre-trained word embedding
self._embedding_layer = nn.Embedding.from_pretrained(pretrained_embedding).float()
else:
self._embedding_layer = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embedding_size)
# produce the logits which used to soft-max distribution
self._output_layer = nn.Linear(hidden_size, vocab_size, bias=True)
self._log_softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.vocab['<unk>'])
def _step(self, tokens, states, image_features):
'''
Implement single decode step
:param image_features(torch.Tensor): image features produced by encoder,
a tensor with shape (batch_size, num_boxes, feature_size)
:param tokens(torch.Tensor): input tokens, a tensor with shape (batch_size)
:param states(Dict[str, torch.Tensor]): a dict contains previous hidden state
:return: a tuple (torch.Tensorm Dict[str, torch.Tensor])
'''
if image_features.shape[0] != tokens.shape[0]:
batch_size, num_boxes, image_feature_size = image_features.shape
net_beam_size = int(tokens.shape[0] / batch_size)
assert net_beam_size == self.beam_size
image_features = image_features.unsqueeze(1).repeat(1, net_beam_size, 1, 1)
# batch_size, beam_size, num_boxes, image_feature_size = image_features.shape
image_features = image_features.view(batch_size * net_beam_size, num_boxes, image_feature_size)
token_embeddings = self._embedding_layer(tokens)
logits, states = self._updown_cell(image_features, token_embeddings, states)
logits = self._output_layer(logits)
log_probs = self._log_softmax(logits)
if self.training:
# in training mode, we need logits to calculate loss
return logits, states
else:
# in eval mode, we need log_probs distribution of words
return log_probs, states
def forward(self, image_features, targets=None):
'''
Implement forward propagation
:param image_features(torch.Tensor): image features produced by encoder, a tensor
with shape (batch_size, num_boxes, feature_size)
:param targets(torch.Tensor): ground-true captions, a tensor with shape (batch_size, max_length)
:return:
'''
output = {}
batch_size = image_features.shape[0]
states = None
if self.training:
# in training mode, ground-true targets should not be None
assert targets is not None
# max decoder step we need to perform
max_step = targets.shape[-1] - 1
# a tensor contains logits of each step
logits_seq = torch.zeros(max_step, batch_size, len(self.vocab)).to(device)
# we transpose targets tensor to shape (max_length, batch_size)
# this is useful when we calculate loss
targets = targets.permute(1, 0)
for t in range(max_step):
# perform decode step
tokens = targets[t, :]
# logits should has shape (batch_size, vocab_size)
logits, states = self._step(image_features=image_features, tokens=tokens, states=states)
# update logits_seq
logits_seq[t] = logits
# the ground-true targets should exclude the first token
# '<start>' since out model do not produce this token at
# the beginning of sequence
gt = targets[1:, :]
# we need to force the logits_seq has shape (batch_size * max_step, vocab_size)
# and ground-true caption has shape (batch_size * max_step) so that them can
# be accepted as arguments in softmax criterion
gt = gt.reshape(-1)
logits_seq = logits_seq.view(-1, len(self.vocab))
loss = self.criterion(logits_seq, gt)
# add loss to output dict
output['loss'] = loss
else:
end_index = self.vocab['<boundary>'] if '<boundary>' in self.vocab else self.vocab['<end>']
start_index = self.vocab['<boundary>'] if '<boundary>' in self.vocab else self.vocab['<start>']
beam_search = allen_beam_search.BeamSearch(end_index=end_index,
max_steps=self.seq_length, beam_size=self.beam_size,
per_node_beam_size=self.beam_size)
# TODO: using to(device) instead of .cuda()
# init_tokens = torch.tensor([start_index]).expand(batch_size).cuda()
init_tokens = torch.tensor([start_index]).expand(batch_size).to(device)
step = partial(self._step, image_features=image_features)
top_k_preds, log_probs = beam_search.search(start_predictions=init_tokens, start_state=states, step=step)
preds = top_k_preds[:, 0, :]
output['seq'] = preds
return output
def load(self, PATH):
self.load_state_dict(torch.load(PATH)['model'])
| true |
168b99987e56a67a394c88ce57c8f6590ef01817 | Python | zmunk/Project-Euler | /Euler42.py | UTF-8 | 878 | 2.75 | 3 | [] | no_license | def vals():
m = tri(192)
inp = open('words2.txt',"r")
r = inp.read().lower().replace('"', "").replace(",", " ")
r = r + " "
print r
sum = 0
l = []
count = 0
temp = ""
max = 0
for c in r:
temp = temp + c
if c == ' ':
# print temp
l.append(sum) #max = 145
if sum > max:
max = sum
if sum in m:
# print temp,
# print sum
count = count + 1
sum = 0
temp = ""
continue
sum = sum + ord(c) - 96
# print max
return count
def tri(lim):
result = []
i = 1
while True:
n = i*(i+1)/2
if n > lim:
return result
result.append(n)
i = i + 1
# print tri(146)
print vals()
| true |
f26a42941864a6aabcb2d4c6b8fcbe3e3af93981 | Python | gilsoneng/desafio_ds_conexoes | /desafio.py | UTF-8 | 12,561 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 11:48:03 2021
@author: altran
"""
import pandas as pd
import numpy as np
import sys
import tensorflow as tf
import math
from keras.models import Sequential
from keras.layers import Dense
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import sklearn.metrics
from sklearn.preprocessing import LabelEncoder
import datetime
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
import pickle
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
from sklearn.inspection import permutation_importance
import shap
def inverse_dict(my_dict):
"""
the func get a dictinary and reverse it, the keys become values and the values become keys.
:param my_dict: the dictinary that need to be reversed.
:return: a VERY pretty dictionary.
"""
result_dict = {}
for key, value in my_dict.items():
if not value in result_dict.keys():
result_dict[value] = ''
result_dict[value]=(key)
return result_dict
def pd_read_csv(delimiter, folder_raw, filename):
df = pd.read_csv(filepath_or_buffer = f'{folder_raw}/{filename}'
, delimiter = delimiter)
return df
def transform_raw_clean(filename
, transform
, folder_raw = 'RAW'
, folder_clean = 'CLEAN'
, delimiter = ';'
, number_columns = 'all'):
df_raw = pd_read_csv(delimiter = delimiter, folder_raw = folder_raw, filename = filename)
if transform == 'transform_float_column':
df_raw_train = df_raw[df_raw[number_columns].notnull()]
df_raw_previsao = df_raw[df_raw[number_columns].isnull()]
df_raw_train[number_columns] = df_raw[number_columns].astype(float)
df_raw_train.to_csv(f'{folder_clean}/{filename}_train.csv',sep = delimiter, index = False)
df_raw_previsao.to_csv(f'{folder_clean}/{filename}_previsao.csv',sep = delimiter, index = False)
return df_raw_train, df_raw_previsao
elif transform == 'get_median_float_column':
number_columns = df_raw.columns if number_columns == 'all' else number_columns
for column in number_columns:
print(column)
try:
median = df_raw[column].median()
df_raw['column_null'] = df_raw[column].isnull()
df_raw[column] = df_raw.apply(lambda x: median if x['column_null'] else x[column], axis = 1)
df_raw = df_raw.drop(columns=['column_null'], axis =1)
except TypeError:
mode = df_raw[column].mode()[0]
df_raw['column_null'] = df_raw[column].isnull()
df_raw[column] = df_raw.apply(lambda x: mode if x['column_null'] else x[column], axis = 1)
df_raw = df_raw.drop(columns=['column_null'], axis =1)
df_raw.to_csv(f'{folder_clean}/{filename}.csv',sep = delimiter, index = False)
return df_raw
def prepare_training_database():
filename = 'conexoes_espec.csv'
number_columns = 'prob_V1_V2'
df_clean_train_conexoes, df_clean_previsao_conexoes = transform_raw_clean(filename
, transform = 'transform_float_column'
, folder_raw = 'RAW'
, folder_clean = 'CLEAN'
, delimiter = ';'
, number_columns = number_columns)
filename = 'individuos_espec.csv'
df_transform_individuos = transform_raw_clean(filename
, transform = 'get_median_float_column'
, folder_raw = 'RAW'
, folder_clean = 'CLEAN'
, delimiter = ';'
, number_columns = 'all')
df_transform_individuos_transmissor = df_transform_individuos
for column in df_transform_individuos_transmissor.columns:
renamed_column = f'{column}_transmissor'.replace(' ', '_')
df_transform_individuos_transmissor = df_transform_individuos_transmissor.rename(columns={f'{column}': f'{renamed_column}'})
df_transform_individuos_receptor = df_transform_individuos
for column in df_transform_individuos_receptor.columns:
renamed_column = f'{column}_receptor'.replace(' ', '_')
df_transform_individuos_receptor = df_transform_individuos_receptor.rename(columns={f'{column}': f'{renamed_column}'})
print(df_transform_individuos_transmissor.columns)
df_clean_train_conexoes_merged = df_clean_train_conexoes.merge(df_transform_individuos_transmissor, how = 'left', left_on = 'V1', right_on = 'name_transmissor')
df_clean_train_conexoes_merged = df_clean_train_conexoes_merged.merge(df_transform_individuos_receptor, how = 'left', left_on = 'V2', right_on = 'name_receptor')
df_clean_previsao_conexoes_merged = df_clean_previsao_conexoes.merge(df_transform_individuos_transmissor, how = 'left', left_on = 'V1', right_on = 'name_transmissor')
df_clean_previsao_conexoes_merged = df_clean_previsao_conexoes_merged.merge(df_transform_individuos_receptor, how = 'left', left_on = 'V2', right_on = 'name_receptor')
return df_clean_train_conexoes_merged, df_clean_previsao_conexoes_merged
def feature_engineer(df, feature_columns, target_train, cat_columns = ''):
df_cat = pd.DataFrame([])
cat_columns = df.columns if cat_columns == '' else cat_columns
for col_name in cat_columns:
if(df[col_name].dtype == 'object'):
df_cat_tmp = pd.DataFrame(df[col_name].unique()).reset_index().rename(columns = {'index': 'code', 0: 'category'})
dict_tmp = pd.DataFrame(df[col_name].unique()).to_dict()[0]
inv_dict_tmp = inverse_dict(dict_tmp)
df_cat_tmp['name_map'] = f'{col_name}'
df_cat = df_cat.append(df_cat_tmp, ignore_index=True)
df[f'{col_name}_code'] = df[col_name].apply(lambda x: inv_dict_tmp[x])
feature_columns.remove(col_name)
feature_columns.append(f'{col_name}_code')
df_train, df_test = train_test_split(df, test_size=0.3)
return df_train, df_test, feature_columns, df_cat
def model_compile(input_numbers, layers_numbers = 6, out_numbers = 1, activation = 'relu', neuron_units = 50):
model = Sequential()
model.add(Dense(units = 2 * input_numbers, activation = 'linear', input_dim=input_numbers))
for add in range(layers_numbers):
model.add(Dense(units = neuron_units, activation = activation))
model.add(Dense(units = out_numbers, activation = 'linear'))
model.compile(loss='mse', optimizer="adam")
return model
if __name__ == "__main__":
v_modelo = str(datetime.datetime.now()).replace('-','_').replace(' ','_').replace(':','_').replace('.','_')
df, df_previsao = prepare_training_database()
feature_columns = ['grau', 'proximidade',
'idade_transmissor', 'estado_civil_transmissor',
'qt_filhos_transmissor', 'estuda_transmissor', 'trabalha_transmissor',
'pratica_esportes_transmissor', 'transporte_mais_utilizado_transmissor',
'IMC_transmissor', 'idade_receptor',
'estado_civil_receptor', 'qt_filhos_receptor', 'estuda_receptor',
'trabalha_receptor', 'pratica_esportes_receptor',
'transporte_mais_utilizado_receptor', 'IMC_receptor']
target_train = ['prob_V1_V2']
cat_columns = ['grau','proximidade','estado_civil_transmissor','transporte_mais_utilizado_transmissor','estado_civil_receptor','transporte_mais_utilizado_receptor']
df_train, df_test, feature_columns, df_cat = feature_engineer(df, feature_columns, target_train, cat_columns)
df_train.to_csv(f'EVALUATED/base_treinamento_{v_modelo}.csv',sep = ';', index = False)
df_test.to_csv(f'EVALUATED/base_teste_{v_modelo}.csv',sep = ';', index = False)
input_numbers = len(feature_columns)
#Codificação de variaveis categóricas com base no treinamento
for cat in df_cat['name_map'].unique():
dict_tmp = df_cat[df_cat['name_map']==cat][['category', 'code']].reset_index().to_dict()['category']
inv_dict_tmp = inverse_dict(dict_tmp)
df_previsao[f'{cat}_code'] = df_previsao[cat].apply(lambda x: inv_dict_tmp[x])
#-----------deep_learning-----------
test_type = 'deep_learning'
model = model_compile(input_numbers, layers_numbers = 6, out_numbers = 1, activation = 'relu', neuron_units = 50)
model.fit(df_train[feature_columns], df_train[target_train],
epochs=15, verbose=1, use_multiprocessing=True)
model.save(f"MODELS/model_{test_type}_{v_modelo}")
Y_Predict = model.predict(df_test[feature_columns])
df_test['Y_Predict'] = Y_Predict.astype(float)
mse = mean_squared_error(df_test[target_train], df_test['Y_Predict'])
r2_score = sklearn.metrics.r2_score(df_test[target_train], df_test['Y_Predict'])
print(f'Test Type: {test_type}, Mean Square Error: {mse.round(2)}, R²: {r2_score.round(2)}')
#predição do modelo
df_previsao['prob_V1_V2'] = model.predict(df_previsao[feature_columns]).astype(float)
df_previsao.to_csv(f'EVALUATED/previsao_final_{test_type}_{v_modelo}.csv',sep = ';', index = False)
df_previsao['fato'] = 'previsao'
df_test['fato'] = 'teste'
df_train['fato'] = 'train'
df_test = df_test.drop(columns=['Y_Predict'], axis = 1)
df_base_full = pd.concat([df_test, df_train, df_previsao])
df_base_full.to_csv(f'EVALUATED/base_full_{test_type}_{v_modelo}.csv',sep = ';', index = False)
#-----------linear_regression-----------
test_type = 'linear_regression'
model = LinearRegression()
model.fit(df_train[feature_columns], df_train[target_train])
filename = f"MODELS/model_{test_type}_{v_modelo}.sav"
pickle.dump(model, open(filename, 'wb'))
Y_Predict = model.predict(df_test[feature_columns])
df_test['Y_Predict'] = Y_Predict#.astype(float)
mse = mean_squared_error(df_test[target_train], df_test['Y_Predict'])
r2_score = sklearn.metrics.r2_score(df_test[target_train], df_test['Y_Predict'])
print(f'Test Type: {test_type}, Mean Square Error: {mse.round(2)}, R²: {r2_score.round(2)}')
#predição do modelo
#Y_Predict = model.predict(df_previsao[feature_columns])
df_previsao['Y_Predict'] = model.predict(df_previsao[feature_columns]).astype(float)
df_previsao.to_csv(f'EVALUATED/previsao_final_{test_type}_{v_modelo}.csv',sep = ';', index = False)
df_previsao['fato'] = 'previsao'
df_test['fato'] = 'teste'
df_train['fato'] = 'train'
df_test = df_test.drop(columns=['Y_Predict'], axis = 1)
df_base_full = pd.concat([df_test, df_train, df_previsao])
df_base_full.to_csv(f'EVALUATED/base_final_{test_type}_{v_modelo}.csv',sep = ';', index = False)
#-----------Random Forest-----------
test_type = 'random_forest'
RF_model = RandomForestRegressor(n_estimators = 20, random_state = 42, n_jobs=-1) # Multiprocessing param: n_jobs=-1
RF_model.fit(df_train[feature_columns], df_train[target_train])
filename = f"MODELS/model_{test_type}_{v_modelo}.sav"
pickle.dump(RF_model, open(filename, 'wb'))
df_test['Y_Predict'] = RF_model.predict(df_test[feature_columns])
mse = mean_squared_error(df_test[target_train], df_test['Y_Predict'])
r2_score = sklearn.metrics.r2_score(df_test[target_train], df_test['Y_Predict'])
print(f'Test Type: {test_type}, Mean Square Error: {mse.round(2)}, R²: {r2_score.round(2)}')
#-----------Shap Explainable-----------
feature_importances = RF_model.feature_importances_
rf_results = pd.DataFrame({'Feature':feature_columns, 'Feature importance':list(feature_importances)})
rf_results_sorted = rf_results.sort_values('Feature importance', ascending = False)
sns.barplot(y = 'Feature', x = 'Feature importance', data = rf_results_sorted.iloc[0:15,:])
plt.show()
# my_model_explainer = shap.TreeExplainer(RF_model)
# X_sample = df_test[feature_columns][0:50]
# shap_values = my_model_explainer.shap_values(X_sample)
# shap.summary_plot(shap_values, X_sample)
# shap.dependence_plot('grau_code',shap_values, X_sample)
# person = 0
# shap_plot = shap.force_plot(my_model_explainer.expected_value, shap_values[person], features = X_sample.iloc[person], feature_names = feature_columns, matplotlib=True, plot_cmap=['#77dd77', '#f99191'])
#-----------svm-----------
# test_type = 'svm_svr'
# model = SVR()
# model.fit(df_train[feature_columns], df_train[target_train])
# model.save(f"model_{v_modelo}_{test_type}")
# Y_Predict = model.predict(df_test[feature_columns])
# df_test['Y_Predict'] = Y_Predict.astype(float)
# mse = mean_squared_error(df_test[target_train], df_test['Y_Predict'])
# r2_score = sklearn.metrics.r2_score(df_test[target_train], df_test['Y_Predict'])
# print(f'Test Type: {test_type}, Mean Square Error: {mse.round(2)}, R²: {r2_score.round(2)}')
# #predição do modelo
# Y_Predict = model.predict(df_previsao[feature_columns])
# df_previsao['Y_Predict'] = Y_Predict.astype(float)
# df_previsao.to_csv(f'EVALUATED/previsao_final_{v_modelo}_{test_type}.csv',sep = ';', index = False)
| true |
158e35f1a283dfcf6744d160446c199ded2b2113 | Python | ndegwaofficial/IBM-Innovation-Club-CybersecurityCodelabs | /network_scanner/net_scan_step3.2.py | UTF-8 | 663 | 2.921875 | 3 | [] | no_license | import scapy.all as scapy
def scan(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast= scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
#save only the first element [0] to answered_list
answered_list = scapy.srp(arp_request_broadcast, timeout=5)[0]
#for each packet in answered_list print packet
for element in answered_list:
#answered_list has a list within it, (packetsent[0], answer[1])
print(element[1].psrc)
print(element[1].hwsrc)
print("-----------------------------------------------------------------------------------------------")
scan("192.168.100.1/24")
| true |
74828b1711c93557d14dc90a07de74aadb1fac9c | Python | tjlee/poptimizer | /poptimizer/data/adapters/gateways/tests/test_cbr.py | UTF-8 | 1,217 | 2.515625 | 3 | [
"Unlicense"
] | permissive | """Тесты загрузки данных о максимальных ставках депозитов с сайта ЦБР."""
from datetime import datetime
import pandas as pd
import pytest
from poptimizer.data.adapters.gateways import cbr
from poptimizer.data.adapters.html import parser
from poptimizer.shared import col
def test_date_parser():
"""Проверка обработки разных декад в датах."""
assert cbr.date_parser("III.05.2021") == datetime(2021, 5, 21)
assert cbr.date_parser("II.04.2021") == datetime(2021, 4, 11)
assert cbr.date_parser("I.03.2021") == datetime(2021, 3, 1)
assert cbr.date_parser("IV.03.2021") is None
DF = pd.DataFrame(
[[4.1], [3.9]],
index=["2020-01-20", "2014-11-25"],
columns=[col.RF],
)
DF_REZ = pd.DataFrame(
[[0.039], [0.041]],
index=["2014-11-25", "2020-01-20"],
columns=[col.RF],
)
@pytest.mark.asyncio
async def test_loader(mocker):
"""Сортировка полученных данных и перевод в проценты."""
mocker.patch.object(parser, "get_df_from_url", return_value=DF)
loader = cbr.RFGateway()
pd.testing.assert_frame_equal(await loader(), DF_REZ)
| true |
e5ef0327a431f373b3574c5572011202acc38ffd | Python | willcrichton/generative-grading | /src/rubricsampling/grammars/drawCircles/value.py | UTF-8 | 595 | 2.640625 | 3 | [] | no_license | import sys
sys.path.insert(0, '../..')
import generatorUtils as gu
import random
from base import ReusableDecision
# anytime a user uses a magic number, there is a change
# for an off by one.
# params: key, target
class Value(ReusableDecision):
def registerChoices(self):
# are they off by one?
self.addChoice(self.getKey(), {
'large': 5,
'small': 5,
'false' : 100
})
def renderCode(self):
target = self.params['target']
offByOne = self.getChoice(self.getKey())
val = target
if offByOne == 'large': val += 1
if offByOne == 'small': val -= 1
return str(val) | true |
872fb0a877b62b3f1e79352e726e740a07f9ea56 | Python | tsaomao/PythonConfig | /findconfig.py | UTF-8 | 794 | 3.125 | 3 | [] | no_license | # Look at arguments.
# If file location overridden, look for the specified file.
# If not, look for default file (./parseargs.json).
# Read out specific values from file.
# If missing, provide the default.
import argparse
import os.path
parser = argparse.ArgumentParser(description="Load configuration parameters from JSON-based configuration file.")
parser.add_argument("-f", "--file", type=str, default="./parseargs.json", help="Overrides defauflt config file location with specified location/filename. Default: ./parseargs.json")
parser.add_argument("-p", "--prompt", default=0, action="count", help="Prompt user for value instead of using command line.")
args = parser.parse_args()
if args.prompt > 0:
tfile = input("What file would you like? ")
else:
tfile = args.file
print(tfile)
| true |
8ddcd9fd0908f5091d8381eb61877d2ad7b50c3c | Python | gmth7788/python3_test | /python3_test/src/format_test.py | UTF-8 | 6,066 | 3.40625 | 3 | [] | no_license | #!/usr/bin/evn python3
import locale
import decimal
import math
import sys
#########################
# 位置参数(position argument)替换
#########################
print("The novel '{0}' was published in {1}".format("Hard Times", 1854))
print("{{{0}}} {1} ;-}}".format("I'm in braces", "I'm not")) #位置参数中包含{}
#########################
# 关键字参数(keyword argument)替换
# [注]关键字参数总是位于位置参数之后。
#########################
print("The {who} was {0} last week".format(12, who="boy"))
stock = ["paper", "envelopes", "notepads", "pens", "paper clips"]
print("We have {0[1]} and {0[2]} in stock".format(stock))
#########################
# 使用命名属性(named attribute)
#########################
print("math.pi=={0.pi} sys.maxunicode=={1.maxunicode}".format(math, sys))
#########################
# 连接字符串和数字的方法
#########################
print("{0}{1}".format("The amount due is $", 200))
print('100'.join('¥ ')) #‘¥100 ’
print(('100'.join('¥ ')).strip()) #‘¥100’
#########################
# 使用字典(dict)
#########################
d = dict(animal="elephant", weight=12000)
print("The {0[animal]} weighs {0[weight]}kg".format(d)) #同时显示key-value pair。
#########################
# 映射解包(mapping unpacking)
# **,将映射(如字典)解包为
# key-value列表。
# [注]locals()内置函数返回一个字典,
# key为局部变量名,value为局部变量的值。
# [注]**为“映射解包运算符”(mapping unpacking operator),
# 映射解包后产生一个key-value列表。
# [注]**只用于format()
#########################
element = "Silver"
number = 47
print("Element {number} is {element}".format(**locals()))
print(locals())
print("Element {0[number]} is {0[element]}".format(locals()))
#########################
#decimal.Decimal
#########################
decimal.Decimal("3.4084") #decimal.Decimal的表现形式(representation form)
#"表现形式"提供一个字符串,python可由此重新创建所表示的对象。
print(decimal.Decimal("3.4084")) #decimal.Decimal的字符串形式
#"字符串形式"目的在于方便人们阅读。
#########################
#字符形式(string form) vs. 表现形式(representation form)
#由“转换标识符(conversion specifier)”指定:
# s - 字符形式
# r - 表现形式
# a - 表现形式,但使用ASCII字符
#########################
print("{0} {0!s} {0!r} {0!a}".format(decimal.Decimal("93.4")))
print("{0} {0!s} {0!r} {0!a}".format('王彬'))
print(chr(0x738b)+chr(0x5f6c))
#########################
# 格式化输出(format specifications)
# 通用格式:
# :填充字符(fill),除'}'以外的所有字符
# 对齐方式(align),< 左对齐
# > 右对齐
# ^ 中间对齐
# =pad,符号与数字间字符的数量
# 符号方式(sign),+ 强制有符号
# - 只有负数才需要符号
# #数制前缀, 0b,0o,0x
# 填充0的数量
# 宽度(width),最小域宽度
# ,用于分组
# .精度(precision),字符串的最大宽度,浮点数位数
# 类型(type),整型:b,c,d,n,o,x,X
# 浮点型:e,E,f,g,G,n,%
#########################
# 格式化字符串
s = "The sword of truth"
print("{0}".format(s)) # default formatting
print("{0:25}".format(s)) # minimum width 25
print("{0:<25}".format(s)) # left align, minimum width 25
print("{0:>25}".format(s)) # right align, minimum width 25
print("{0:^25}".format(s)) # center align, minimum width 25
print("{0:-<25}".format(s)) # - fill, left align, minimum width 25
print("{0:.>25}".format(s)) # . fill, right align, minimum width 25
print("{0:^25.10}".format(s)) # center align, maximum width 10
print("{0:.10}".format(s)) # maximum width 10
# 格式化整型
# 在符号与数字之间用0填充
print("{0:0=12}".format(8749203)) # 0 fill, minimum width 12
print("{0:0=12}".format(-8749203)) # 0 fill, minimum width 12
# 最小宽度12,0占位(pad)
print("{0:012}".format(8749203)) # 0 fill, minimum width 12
print("{0:012}".format(-8749203)) # 0 fill, minimum width 12
# 用*填充,最小宽度15
print("{0:*<15}".format(18340427)) # * fill, left align, min width 15
print("{0:*>15}".format(18340427)) # * fill, right align, min width 15
print("{0:*^15}".format(18340427)) # * fill, center align, min width 15
print("{0:*^15}".format(-18340427)) # * fill, left align, min width 15
# 符号
print("[{0: }] [{1: }]".format(539802, -539802)) # space or - sign
print("[{0:+}] [{1:+}]".format(539802, -539802)) # force sign
print("[{0:-}] [{1:-}]".format(539802, -539802)) # - sign if needed
# 数制类型
print("{0:b} {0:o} {0:x} {0:X}".format(14613198))
print("{0:#b} {0:#o} {0:#x} {0:#X}".format(14613198))
# 整型数分组
print("{0:,} {0:*>13,}".format(int(2.39432185e6)))
# n - 对于整型类似于d,对于浮点型类似于g
# 但其特殊性体现在,使用本地小数分隔符和分组分隔符
loc = locale.getlocale() # get current locale
locale.setlocale(locale.LC_ALL, "") #python检验LANG环境变量决定用户的local信息
x, y = (1234567890, 1234.56)
print("{0:n} {1:n}".format(x, y))
locale.setlocale(locale.LC_ALL, "C") #C,小数分隔符和分组分隔符为空
print("{0:n} {1:n}".format(x, y))
#locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
#print("{0:n} {1:n}".format(x, y))
locale.setlocale(locale.LC_ALL, loc) # restore saved locale
# 格式化浮点数
amount = (10 ** 3) * math.pi
print("[{0:12.2e}] [{0:12.2f}]".format(amount))
print("[{0:*>12.2e}] [{0:*>12.2f}]".format(amount))
print("[{0:*>+12.2e}] [{0:*>+12.2f}]".format(amount))
print("{:,.6f}".format(decimal.Decimal("1234567890.1234567890")))
print("{:,.6g}".format(decimal.Decimal("1234567890.1234567890")))
# 格式化虚数
print("{0.real:.3f}{0.imag:+.3f}j".format(4.75917+1.2042j))
print("{0.real:.3f}{0.imag:+.3f}j".format(4.75917-1.2042j))
print("{:,.4f}".format(3.59284e6-8.984327843e6j))
| true |
353ddf19bcd35faa19dbad64ae61a153feea703b | Python | ai-times/infinitybook_python | /chap07_p126_code2.py | UTF-8 | 455 | 3.4375 | 3 | [] | no_license | import turtle as t
t.penup()
t.goto(0,0); t.write(" (0,0)")
t.goto(0,200); t.write("(0,200)")
t.pendown()
t.goto(0,-200); t.write(" (0,-200)")
t.penup()
t.goto(-200,0); t.write("(-200,0)")
t.pendown()
t.goto(200,0); t.write(" (200,0)")
t.penup()
t.goto(-150,-150)
t.pendown()
t.color("blue")
t.goto(150,150); t.write("y=x")
t.goto(0,0)
t.penup()
t.goto(-70,-180)
t.pendown()
t.color("red")
t.goto(70,180); t.write("y=2x")
t.goto(0,0)
| true |
7fbb02ddf70717c46f10d208b8d802ed0f039cd0 | Python | c625v12/411ChrisValko | /testMongo.py | UTF-8 | 559 | 2.71875 | 3 | [] | no_license | import sys, datetime
from pymongo import MongoClient
try:
client = MongoClient('localhost', 27017)
print("Connected to MongoDB")
db = client.test_database
print("Got the Database test_database")
collection = db.test_collection
print("Got the Collection")
post = {"author": "Mike","text": "My first blog post!","tags": ["mongodb", "python", "pymongo"],"date": datetime.datetime.utcnow()}
print("Created the Document object")
post_id = collection.insert_one(post)
except:
e = sys.exc_info()[0]
print("error: %s" % e)
| true |
6ab7f9f29103faefd0d730f1f04d6a49c5e587b9 | Python | theislab/LODE | /DeepRT/ssl_kaggle/dev/utils.py | UTF-8 | 11,481 | 3.0625 | 3 | [
"MIT"
] | permissive | """General utility functions"""
import json
import matplotlib.pyplot as plt
from PIL import Image
import glob as glob
import pandas as pd
import numpy as np
import os
import shutil
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
"""Saves parameters to json file"""
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']`"""
return self.__dict__
class Logging():
def __init__(self, logging_directory, params):
self.log_dir = logging_directory
self.model_directory = None
self.tensorboard_directory = None
self.params = params
def __create_dir(self, dir):
os.makedirs(dir)
def __create_tensorboard_dir(self, model_dir):
# set abs path to new dir
new_dir = os.path.join(model_dir, "tensorboard_dir")
# create new dir
self.__create_dir(new_dir)
# set object instance to new path
self.tensorboard_directory = new_dir
def __remove_empty_directories(self):
# get current directories
current_directories = glob.glob(self.log_dir + "/*")
# check for each dir, if weight.hdf5 file is contained
for current_directory in current_directories:
if not os.path.isfile(os.path.join(current_directory, "weights.hdf5")):
# remove directory
shutil.rmtree(current_directory)
def create_model_directory(self):
'''
:param logging_directory: string, gen directory for logging
:return: None
'''
# remove emtpy directories
self.__remove_empty_directories()
# get allready created directories
existing_ = os.listdir(self.log_dir)
# if first model iteration, set to zero
if existing_ == []:
new = 0
# save abs path of created dir
created_dir = os.path.join(self.log_dir, str(new))
# make new directory
self.__create_dir(created_dir)
# create subdir for tensorboard logs
self.__create_tensorboard_dir(created_dir)
else:
# determine the new model directory
last_ = max(list(map(int, existing_)))
new = int(last_) + 1
# save abs path of created dir
created_dir = os.path.join(self.log_dir, str(new))
# make new directory
self.__create_dir(created_dir)
# create subdir for tensorboard logs
self.__create_tensorboard_dir(created_dir)
# set class instancy to hold abs path
self.model_directory = created_dir
def save_dict_to_json(self, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
with open(json_path, 'w') as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: str(v) for k, v in self.params.dict.items()}
json.dump(d, f, indent=4)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
class Evaluation():
'''
labels: list, integers
predictions: list, integers
history: pandas data frame
'''
def __init__(self, labels,predictions, history, model_dir, filenames, params):
self.params = params
self.labels = labels
self.prediction = predictions
self.history = history
self.model_dir = model_dir
self.filenames = filenames
self.accuracy = None
self.precision = None
self.recall = None
self.confusion_matrix = None
def __accuracy(self):
return (accuracy_score(self.labels, self.prediction))
def __precision(self):
return(precision_score(self.labels,self.prediction,average='micro'))
def __recall(self):
return(recall_score(self.labels, self.prediction,average='micro'))
def __confusion_matrix(self):
return(confusion_matrix(self.labels, self.prediction))
def __filenames(self):
# generate example predictions
pred_im = pd.DataFrame(self.filenames)
pred_im_pd = pred_im[0].str.split("/", expand=True)
pred_im_pd = pred_im_pd.rename(columns={0: "labels", 1: "id"})
return(pred_im_pd)
def __plot_confusion_matrix(self,normalize=True,title=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
import itertools
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
y_true = self.labels
y_pred = self.prediction
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.matshow(cm, cmap=plt.cm.Blues)
thresh = cm.max() / 1.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),size="large",
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.title("confusion matrix")
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig(os.path.join(self.model_dir,"confusion_matrix.png"))
return
def __plot_history(self):
plt.rcParams.update({'font.size': 16})
f, axs = plt.subplots(2, 2, figsize=(10, 10))
# load loss curves
statistics_pd = self.history
if 'lr' in statistics_pd:
plt.suptitle("Train statistics")
for i in range(1, 4):
plt.subplot(3, 1, i)
if i == 1:
plt.plot(statistics_pd["loss"], label="train loss")
plt.plot(statistics_pd["val_loss"], label="validation loss")
plt.xlabel("epochs")
plt.ylabel("cross entropy")
plt.legend()
if i == 2:
plt.plot(statistics_pd["acc"], label="train accuracy")
plt.plot(statistics_pd["val_acc"], label="validation accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
if i == 3:
plt.plot(statistics_pd["lr"], label="learning rate decay")
plt.xlabel("epochs")
plt.ylabel("lr")
plt.legend()
#plor without learning rate
else:
plt.suptitle("Train statistics")
for i in range(1, 3):
plt.subplot(2, 1, i)
if i == 1:
plt.plot(statistics_pd["loss"], label="train loss")
plt.plot(statistics_pd["val_loss"], label="validation loss")
plt.xlabel("epochs")
plt.ylabel("cross entropy")
plt.legend()
if i == 2:
plt.plot(statistics_pd["acc"], label="train accuracy")
plt.plot(statistics_pd["val_acc"], label="validation accuracy")
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.legend()
plt.savefig(self.model_dir + "/history.png")
def __save_example_predictions(self, params):
#data frame with filenames and labels of test predictions
pred_im_pd = self.__filenames()
#only take the names of which we have predictions
if pred_im_pd.shape[0] > len(self.prediction):
pred_im_pd = pred_im_pd.iloc[:len(self.prediction)]
#test prediction added
pred_im_pd["predictions"] = self.prediction
#set label levels
levels = ["0", "1", "2", "3", "4"]
for level in levels:
pred_im_class_pd = pred_im_pd[pred_im_pd["labels"] == level]
# shuffle indices
pred_im_class_pd = pred_im_class_pd.sample(frac=1)
# save ten predictions
ten_im = pred_im_class_pd.iloc[0:5]
for im_name in ten_im["id"]:
pred_class = ten_im[ten_im["id"] == im_name].predictions.values[0]
im_path = os.path.join(params.data_path, "test", level, im_name)
# create save directory if does not exist
if not os.path.exists(os.path.join(self.model_dir, "predictions", level)):
os.makedirs(os.path.join(self.model_dir, "predictions", level))
outcome_string = "__true__" + str(level) + "__pred__" + str(pred_class) + ".jpeg"
save_example_name = im_name.replace(".jpeg", outcome_string)
fundus_im = np.array(Image.open(im_path))
plt.imsave(os.path.join(self.model_dir, "predictions", level, save_example_name), fundus_im)
def __example_prediction_canvas(self):
plt.rcParams.update({'font.size': 5})
example_prediction_paths = glob.glob(self.model_dir + "/predictions/**/*")
fig = plt.figure(figsize=(10, 10))
# set figure proportion after number of examples created
columns = int(len(example_prediction_paths) / 5)
rows = 5
for i in range(1, columns * rows + 1):
img = np.array(Image.open(example_prediction_paths[i - 1]))
fig.add_subplot(rows, columns, i)
plt.imshow(img)
plt.title(example_prediction_paths[i - 1].split("/")[-1].replace(".jpeg", ""))
plt.axis('off')
plt.savefig(os.path.join(self.model_dir, "example_canvas.png"))
def __main_result(self):
'''init all metrics'''
self.accuracy = self.__accuracy()
self.precision = self.__precision()
self.recall = self.__recall()
# dump all stats in txt file
result_array = np.array(["accuracy",self.accuracy, "precision", self.precision, "recall",self.recall])
np.savetxt(self.model_dir + "/result.txt", result_array, fmt='%s')
def write_plot_evaluation(self):
self.__main_result()
self.__plot_confusion_matrix()
self.__plot_history()
def plot_examples(self):
self.__save_example_predictions(self.params)
self.__example_prediction_canvas()
| true |
dd102818a8323e1055160b0c7581f76fd33f3e5e | Python | mclt0568/pyserverman | /manlib/logging.py | UTF-8 | 3,124 | 3.015625 | 3 | [] | no_license | from typing import Any, IO, List, Tuple
from ColorStr import parse as colorparse
from enum import Enum
from datetime import datetime
import os
import threading
class LogLevel:
tag: str
color: str
def __init__(self, tag: str, color: str) -> None:
self.tag = tag
self.color = color
class LogLevelName(Enum):
LOG = "log"
ERROR = "error"
EXCEPTION = "exception"
WARNING = "warning"
class LogStream:
def __init__(self, stream: IO, enable_color: bool = False) -> None:
self.stream = stream
self.enable_color = enable_color
class Logger:
def __init__(self, output_streams: List[LogStream]) -> None:
self.output_streams = output_streams
self.output_streams_lock = threading.Lock()
self.log_directory = "logs"
self.log_filename = datetime.datetime.now().strftime("%d-%m-%Y_%H-%M-%S.log")
if not os.path.isdir(self.log_directory):
os.mkdir(self.log_directory)
self.message_prefix = "[{}][{}] "
self.message_suffix = "\n"
self.log_levels = {
LogLevelName.LOG: LogLevel("LOG", "§g"),
LogLevelName.ERROR: LogLevel("ERR", "§r"),
LogLevelName.EXCEPTION: LogLevel("EXC", "§r"),
LogLevelName.WARNING: LogLevel("WRN", "§y"),
}
self.default_log_level = self.log_levels[LogLevelName.LOG]
def construct_message(self, message: str, level: LogLevelName = "") -> Tuple[str, str]:
# log level
log_level = self.default_log_level
if level and level in self.log_levels:
log_level = self.log_levels[level]
date_time = datetime.today().strftime("%Y/%m/%d %H:%M:%S")
plain_message = self.message_prefix.format(
date_time,
colorparse(log_level.color + log_level.tag + "§0")
) + message + self.message_suffix
colorized_message = self.message_prefix.format(
date_time,
log_level.tag
) + message + self.message_suffix
return plain_message, colorized_message
def log(self, raw_message: Any, level: LogLevelName = "") -> None:
# message
message = str(raw_message)
message_lines = message.split("\n")
log_messages = []
if level == LogLevelName.EXCEPTION:
self.construct_message(
"== UNEXPECTED EXCEPTION ==", level=LogLevelName.EXCEPTION)
for message_line in message_lines:
log_messages.append(self.construct_message(message_line, level))
if level == LogLevelName.EXCEPTION:
self.construct_message(
"==== END OF EXCEPTION ====", level=LogLevelName.EXCEPTION)
with self.output_streams_lock:
for plain_log_message, colorized_log_message in log_messages:
for stream in self.output_streams:
if stream.enable_color:
stream.stream.write(colorized_log_message)
else:
stream.stream.write(plain_log_message)
stream.stream.flush()
| true |
48b669cce065aaad4da05136de3320325f0413dc | Python | zhanghao-ic/Binance-Orderbook | /OrderBook.py | UTF-8 | 3,728 | 2.984375 | 3 | [
"MIT"
] | permissive | import asyncio
import websockets
import json
import requests
import time
import sys
from collections import OrderedDict
class OrderBook():
def __init__(self, uri, depth_api, symbol, volume):
self.uri = uri
self.depth_api = depth_api
self.symbol = symbol
self.volume = volume
self.updates = []
self.bids = OrderedDict() # ordered dictionary used to sort orders by price
self.asks = OrderedDict()
super().__init__()
# asynchronously receive data from uri
async def get_orders(self):
received_snapshot = False
print(self.symbol + " Average Execution Price for volume: " + str(self.volume))
async with websockets.connect(self.uri) as websocket:
while True:
depth_update = await websocket.recv()
depth_update = json.loads(depth_update)
self.updates.append(depth_update)
# print(depth_update["U"])
# print("receiving update %d", depth_update["u"])
if not received_snapshot:
self.get_depth_snapshot()
received_snapshot = True
self.process_updates()
self.update_console()
def get_depth_snapshot(self):
snapshot = requests.get(self.depth_api)
snapshot = json.loads(snapshot.content)
self.snapshot = snapshot
for order in snapshot["bids"]:
self.bids[float(order[0])] = float(order[1])
for order in snapshot["asks"]:
self.asks[float(order[0])] = float(order[1])
def process_updates(self):
for i in range(len(self.updates)):
if self.updates[i]["u"] < self.snapshot["lastUpdateId"]:
self.updates.pop(i)
else:
for bid in self.updates[i]["b"]:
self.bids[float(bid[0])] = float(bid[1])
for ask in self.updates[i]["a"]:
self.asks[float(ask[0])] = float(ask[1])
# self.bids = dict(sorted(self.bids, reverse=True), )
self.bids = OrderedDict(sorted(self.bids.items(), reverse=True))
self.asks = OrderedDict(sorted(self.asks.items()))
# print(self.bids)
# print(list(self.bids.items())[0][0])
def update_console(self):
print("\rBUY: %f\tSELL: %f" % (self.get_average_price(False), self.get_average_price(True)), end='')
# bid has value of False, ask has value of True for parameter side
def get_average_price(self, side):
book = self.bids
avg = float(0)
if side:
book = self.asks
quantity = float(0)
index = 0
book = list(book.items())
while quantity < self.volume:
curr_order = book[index]
price = curr_order[0]
volume = curr_order[1]
new_quantity = min(volume, self.volume-quantity) # volume filled
quantity += new_quantity
avg += new_quantity*price
index += 1
avg = avg / self.volume
return avg
if __name__ == "__main__":
if not len(sys.argv) == 3:
print("Need quantity and Pair")
sys.exit()
try:
pair = sys.argv[1]
volume = float(sys.argv[2])
if volume < 0:
raise ValueError
except Exception as e:
print("Invalid quantity")
sys.exit()
# instantiate orderbook
BTCUSDT_Book = OrderBook(f"wss://stream.binance.com:9443/ws/{pair.lower()}@depth", f"https://www.binance.com/api/v1/depth?symbol={pair}&limit=1000", pair, volume)
# start receiving updates and start console
asyncio.get_event_loop().run_until_complete(BTCUSDT_Book.get_orders())
| true |
617614135862f8f4237b43ee80f3a8169119b463 | Python | stixaw/PythonScripts | /ListPrint.py | UTF-8 | 257 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
"""
import sys
import os
WORD_LIST=[
'Mark',
'Angel',
'Steve',
'Steve 2',
'Milt',
'Bryant',
'Cory'
]
def Print(list):
for F in list:
print F
if __name__ == '__main__':
Print(WORD_LIST)
| true |
39f28fab1f67137901331ea3dc566d36a54d8844 | Python | gracechin/DE3-ROB1-FEEDING | /fred/src/calibration.py | UTF-8 | 5,662 | 3.265625 | 3 | [] | no_license | # Grace Chin 2018
#
''' Helps calibrate a point from the camera's frame of reference to the robot's frame of reference
Finds the conversion from finding a list of m values and c values for the different dimensions using y = mx + c
'''
import rospy
import rospkg
import numpy as np
from math import sqrt
from numpy import *
from baxter_control import BaxterControl
from perception_sub import PerceptionSub
from geometry_msgs.msg import Point
from std_msgs.msg import String
class Calibration:
def __init__(self):
self.robot = BaxterControl()
def get_mouth_pos(self):
'''Gets the position of the mouth detected from the camera
Returns the point of the mouth'''
global mouth_sub
mouth_sub = rospy.Subscriber("/mouth_xyz_astra", Point, self.return_point)
rospy.wait_for_message("/mouth_xyz_astra", Point)
return point
def return_point(self, msg):
'''Callback function for the mouth subscriber
Defines the point of the mouth'''
global point
point = Point()
point.x, point.y, point.z = msg.x, msg.y, msg.z
# print("Camera position:", point)
mouth_sub.unregister()
return
def linear_regression(self, set1, set2):
''' Finds the relationship between two sets of points (y=mx+c)
Inputs two lists: input list (set1) and output list (set2)
Returns list of m and c.'''
A = np.array(set1)
B = np.array(set2)
m_list = [] # m for x, y, z
c_list = [] # c for x, y, z
for dimension in range(3):
a = []
b = []
for point_index in range(len(A)):
a.append(A[point_index][dimension])
b.append(B[point_index][dimension])
np.array(a)
np.array(b)
a = np.vstack([a, np.ones(len(a))]).T
m, c = np.linalg.lstsq(a, b)[0]
m_list.append(m)
c_list.append(c)
scale=[m_list, c_list]
print("Scale found!")
print("m list : ", m_list)
print("c list : ", c_list)
return scale
def convert_pt(self, input_pt, scale):
''' Converts input_pts using provided scale.
input_pts : list containing x, y, z
scale : list containting m_list and c_list
Returns output_pt'''
output_pt = []
m_list=scale[0]
c_list=scale[1]
for di in range(3):
new_value = input_pt[di]*m_list[di]+c_list[di]
round_v = math.ceil(new_value * 1000.0) / 1000.0
output_pt.append(round_v)
return output_pt
def manual_calibrate(self):
"""Guided manual calibration in the terminal
Records several camera positions [u,v,w] & end effector positions [x,y,z]
Finds a scale containing the list of m and the list of c
"""
uvw_list = []
xyz_list = []
positions = {"camera pos":[uvw_list, self.get_mouth_pos], "end effector position":[xyz_list, self.robot.get_end_effector_pos]}
while True:
n = input("How many points would you like to calibrate with?: ")
try:
n = int(n)
break
except:
print("Please type an integer.")
pass
while True:
# when recording has finished
if (len(uvw_list)==(n)):
print("You have finished recording your points.")
print("Camera coordinates :", uvw_list)
print("End effector coordinates :", xyz_list)
print("Applying linear regression...")
scale = self.linear_regression(uvw_list, xyz_list)
print("----- Calibration finished -----")
print("----- Start Testing -----")
see = raw_input("Would you like to record current camera point? [Y/n]: ")
if (see == '' or see.lower() == 'y'):
camera_point = self.get_mouth_pos()
camera_point = [camera_point.x, camera_point.y, camera_point.z]
go = raw_input("Would you like to go to that camera point? [Y/n]: ")
print(go)
if (go == '' or go.lower() == 'y'):
end = self.convert_pt(camera_point, scale)
start = self.get_end_effector_pos()
print('start:', start)
print('end:', end)
certain = raw_input("You certain? [Y/n]: ")
if (certain == '' or certain.lower() == 'y'):
self.robot.set_end_effector_pos(end[0], end[1], end[2])
return scale
# recording points
for pos in positions:
if (len(positions[pos][0])<(n)):
see = raw_input("Would you like to see current "+pos+" value? [Y/n]: ")
print(see)
if (see == '' or see.lower() == 'y'):
see_pos = positions[pos][1]()
new_pos = [see_pos.x, see_pos.y, see_pos.z]
record = raw_input("Would you like to record this? [Y/n]: ")
if (record == '' or record.lower() == 'y'):
positions[pos][0].append(new_pos)
elif (record.lower() == 'n'): pass
elif (record.lower() == 'q'): sys.quit()
else: print("Invalid response.")
elif (see.lower() == 'n'): pass
else: print("Invalid response.")
print("Camera coordinates :", uvw_list)
print("End effector coordinates :", xyz_list)
else:
pass
if __name__ == '__main__':
rospy.init_node("calibration")
cal = Calibration()
cal.manual_calibrate()
| true |
799f1a0b5f299a9ec84e07d292def8c5d5140367 | Python | GhostEric/FGO-py | /FGO-py/fgoControl.py | UTF-8 | 1,681 | 2.8125 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | import time
ScriptTerminate=type('ScriptTerminate',(Exception,),{'__init__':lambda self,msg='Unknown Reason':Exception.__init__(self,f'Script Stopped: {msg}')})
class Control:
speed=1
def __init__(self):
self.reset()
self.__stopOnDefeatedFlag=False
self.__stopOnSpecialDropFlag=False
def reset(self):
self.__terminateFlag=False
self.__suspendFlag=False
self.__terminateLaterFlag=-1
def terminate(self):self.__terminateFlag=True
def checkTerminate(self):
if self.__terminateFlag:raise ScriptTerminate('Terminate Command Effected')
def suspend(self):self.__suspendFlag=not self.__suspendFlag
def checkSuspend(self):
while self.__suspendFlag:
self.checkTerminate()
time.sleep(.07/self.speed)
def terminateLater(self,count=-1):self.__terminateLaterFlag=count
def checkTerminateLater(self):
if not self.__terminateLaterFlag:raise ScriptTerminate('Terminate Appointment Effected')
self.__terminateLaterFlag-=1
def sleep(self,x,part=.07):
timer=time.time()+(x-part)/self.speed
while time.time()<timer:
self.checkSuspend()
self.checkTerminate()
time.sleep(part/self.speed)
time.sleep(max(0,timer+part/self.speed-time.time()))
def stopOnDefeated(self,x):self.__stopOnDefeatedFlag=x
def checkDefeated(self):
if self.__stopOnDefeatedFlag:raise ScriptTerminate('Battle Defeated')
def stopOnSpecialDrop(self,x):self.__stopOnSpecialDropFlag=x
def checkSpecialDrop(self):
if self.__stopOnSpecialDropFlag:raise ScriptTerminate('Special Drop')
control=Control()
| true |
93351598dd1bedcb957efb58aa149c48d39635d6 | Python | fadilfauzani/Tubes-DasPro | /source/save.py | UTF-8 | 1,794 | 2.734375 | 3 | [] | no_license | import os
def datatostring(data):
s = ""
for i in range (len(data)):
s += str(data[i])
if (i != len(data) - 1):
s += ";"
return s+ '\n'
users = [(3,"fadil","fadill","kotabumi","asdasd","admin"),[4,"fudil","fadill","kotabumi","asdasd","user"]]
gadgets = []
consums = []
riw_consums = []
riwpin_gadgets = []
riwpen_gadgets = []
def save():
path = input("Masukkan nama folder penyimpanan: ")
path = 'saves/' + path
try:
os.mkdir(path)
except:
pass
user = open(path+"/user.csv","w")
user.write("id;username;nama;alamat;password;role\n")
gadget = open(path+"/gadget.csv","w")
gadget.write("id;nama;deskripsi;jumlah;rarity;tahun_ditemukan\n")
consum = open(path+"/consumable.csv","w")
consum.write("id;nama;deskripsi;jumlah;rarity\n")
riw_consum = open(path+"/consumable_history.csv","w")
riw_consum.write("id;id_pengambil;id_consumable;tanggal_pengambilan;jumlah\n")
riwpin_gadget = open(path+ "/gadget_borrow_history.csv", "w")
riwpin_gadget.write("id;id_peminjam;id_gadget;tanggal_peminjaman;jumlah;is_returned\n")
riwpen_gadget = open(path+ "/gadget_return_history.csv","w")
riwpen_gadget.write("id;id_peminjaman;tanggal_peminjaman\n")
for i in users:
user.write(datatostring(i))
for i in gadgets:
gadget.write(datatostring(i))
for i in consums:
consum.write(datatostring(i))
for i in riw_consums:
riw_consum.write(datatostring(i))
for i in riwpin_gadgets:
riwpin_gadget.write(datatostring(i))
for i in riwpen_gadgets:
riwpen_gadget.write(datatostring(i))
user.close()
gadget.close()
consum.close()
riw_consum.close()
riwpin_gadget.close()
riwpen_gadget.close() | true |
43a7d9f9fb01ae868a08f2604a31be87307389f1 | Python | strnisaj/LVR-sat | /DPLL/DPLL.py | UTF-8 | 18,674 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | from Izjave import *
from Sudoku import *
from Hadamard import *
from CNF import *
import time
newPureValFound = True
solutionVals = {}
lockSolutionVals = False
def DPLL(izjava):
# Metoda dobi izjavo, ki jo obdela s pomocjo funkcije prepareStatement(izjava)
# Ce izjava ni na zacetku False (zaradi praznega protislovja), potem klicemo rekurzivno metodo rec_DPLL(izjava, varValues)
# FIXME: prepareStatement naj preveri se, ce ni izjava False zaradi kaksnega praznega OR-a
global solutionVals
solution = False
startTime = time.time()
dataForDPLL = prepareStatement(izjava) # Pripravimo izjavo. Metoda vrne [izjava, varValues], kjer so vrednosti 100% pravilne
if (dataForDPLL != False):
izjava = dataForDPLL[0]
varValues = dataForDPLL[1]
solution = rec_DPLL(izjava, varValues) # klic rekurzivne metode
print('Vrnjena resitev: ', solution)
print('Vrednosti: ' , solutionVals)
endTime = time.time()
timePassed = endTime - startTime
print('Time: ', timePassed)
def prepareStatement(izjava):
# Metoda sprejme izjavo in jo pripravi takole:
# - izjavo pretvori v CNF obliko
# - izjavo pretvori v seznam seznamov oblike [ ... [ ... ] ...]
# - odstrani vse proste spremenljivke
# - preveri, da se ne zgodi primer (X and notX)
# - pobrise vse True izjave oblike (X or notX)
# - najde vse proste spremenljivke
# Metoda vrne seznam, v katerem je izjava in vrednosti spremenljivk
# Ce je primer na osnovi zgornjih ugotovitev neresljiv, potem metoda vrne False
# poresetiram prejsno resitev
global solutionVals
global lockSolutionVals
global newPureValFound
solutionVals = {}
lockSolutionVals = False
newPureValFound = True
izjava = CNF(izjava) # pretvori izjavo v CNF
izjava = izjava.poenostavi().vrni() # dobim [ ... (...) ...]
izjava = get_2D_list(izjava) # dobim [ ... [...] ...]
varValues = {} # Zacetne vrednosti spremenljivk
izjava = removeTrueSantences(izjava) # metoda odstrani podizjave tipa (X or notX) .... TODO: ali to pravilno deluje?
izjava = sortByLength(izjava)
# PREVERJAMO PROSTE SPREMENLJIVKE, DOKLER JE KAKsNA SE PROSTA!!
while (True):
changes = 0 # stevec za ustavitveni pogoj
newVarDict = {}
newVarDict = removeSingleVars(izjava)
if (newVarDict == False):
print('ERROR: getInfo[1] == False ..... returning False!!!')
return False
else:
varValues = concatDicts(varValues, newVarDict)
if varValues == False: # ce je prislo do protislovja (X and notX) potem ni resitve in vrnemo False
print('ERROR: varValues == False ..... returning False!!!')
return False
izjava = processStatement(izjava, varValues) # metoda odstrani OR-e, ki so True in spremenljivke znotraj OR-ov, ki so False
if (newVarDict != {}):
changes = changes + 1
if (changes == 0):
break
# PREVERIMO CISTE SPREMENLJIVKE
# Spodnja while zanka pridobiva ciste spremenljivke, poenostavlja izjavo in to ponavalja, dokler je kaksna spremenljivka cista
while (newPureValFound == True):
pureVals = getPureVals(izjava) # pridobim slovar cistih spremenljivk
varValues = concatDicts(varValues, pureVals) # zdruzim obstojece spremenljivke s cistimi
#preverimo, da nismo prisli do protislovja:
if varValues == False:
return False
izjava = processStatement(izjava, varValues) # metoda odstrani OR-e, ki so True in spremenljivke znotraj OR-ov, ki so False
izjava = sortByLength(izjava) # sortiranje izjave po dolzini podizjav (narascajoce)
# vrnemo seznam, ki vsebuje na rekurzijo pripravljeno izjavo in slovar vrednosti spremenljivk (te niso vec zastopane v izjavi)
return [izjava, varValues]
def rec_DPLL(izjava, varValues):
# Metoda najprej preveri, ce je kaksna spremenljivka cista. Nato poenostavi izjavo in naredi dve kopiji izjave
# Vzame se prva spremenljivka iz izjave in se nastavi na True. Sprocesira se kopija_1 izjave z novo vrednostjo spremenljivke True
# Vzame se prva spremenljivka iz izjave in se nastavi na False. Sprocesira se kopija_2 izjave z novo vrednostjo spremenljivke False
# ce smo dobili ze resitev, potem ne potrebujemo vec preverjanja
global lockSolutionVals
if lockSolutionVals == True:
return True
# Preverimo, ce smo prisli do resitv=true
if is_AND_empty(izjava):
global solutionVals
if lockSolutionVals == False:
lockSolutionVals = True
solutionVals = copyVarValues(varValues)
return True
# Preverimo, ce smo prisli do resitev=false
if is_OR_empty(izjava):
return False
firstVar = getFirstVar(izjava) # pridobimo prvo spremenljivko
izjava_1 = copyStatement(izjava) # prekopiramo izjavo
izjava_2 = copyStatement(izjava) # prekopiramo izjavo
vals_1 = copyVarValues(varValues) # prekopiramo vrednosti
vals_2 = copyVarValues(varValues) # prekopiramo vrednosti
vals_1[firstVar] = True # enkrat vrednost prve spremenljivke nastavimo na True
izjava_1 = processStatement(izjava_1, vals_1)
if (rec_DPLL(izjava_1, vals_1) != False): # rekurzivni klic
return True
vals_2[firstVar] = False
izjava_2 = processStatement(izjava_2, vals_2)
if (rec_DPLL(izjava_2, vals_2) != False): # rekurzivni klic
return True
return False
def getPureVals(izjava):
# Metoda se sprehodi cez izjavo in poisce ciste spremenljivke.
# Metoda vrne slovar cistih spremenljivk, ki jih nastavi na true (x) oz. false (not x)
pureVals = {}
varsInStatement = {}
global newPureValFound
newPureValFound = False
#napolnim slovar zastopanosti spremenljivk: 1: X .... 2: notX ..... 3: (X and notX)
for subIzjava in izjava:
for var in subIzjava:
#ce ni spremenljivke v seznamu, jo dodamo
if ((var.vrni() in varsInStatement) == False):
if isinstance(var, Var):
varsInStatement[var.vrni()] = 1
else:
varsInStatement[var.vrni()] = 2
#ce je spremenljivka ze v seznamu, preverimo katero vrednost ima
else:
vrednost = varsInStatement[var.vrni()]
if ((vrednost == 2) and (isinstance(var, Var))):
varsInStatement[var.vrni()] = 3
elif ( (vrednost == 1) and (isinstance(var, Not)) ):
varsInStatement[var.vrni()] = 3
else:
pass
#ugotovimo, kate key-e je potrebno odstraniti
keysToBeRemoved = []
for key in varsInStatement:
if (varsInStatement[key] == 3):
keysToBeRemoved.append(key)
#odstranimo key-e
for i in keysToBeRemoved:
varsInStatement.pop(i)
#napolnimo slovar cistih spremenljivk
for key in varsInStatement:
newPureValFound = True
if varsInStatement[key] == 1:
pureVals[key] = True
else:
pureVals[key] = False
return pureVals
def processStatement(izjava, vals):
# Metoda odstrani OR stavek, ce je kateri izmed elementov v ORu enak True
# Metoda odstrani element iz OR stavka, ce je ta element enak False
# Metoda vrne samo novo izjavo
# 1A. korak: najprej dobim indexe vseh podizja, kjer je en element enak True
removeIndex = []
index = -1
for subIzjava in izjava:
index = index + 1
for e in range(0, len(subIzjava)): # e predstavlja element podizjave
thisElement = subIzjava[e]
if (thisElement.vrni() in vals):
value = vals[thisElement.vrni()] # pridobimo vrednost
#ce je element e instanca notX
if isinstance(thisElement, Not):
if (value == False):
removeIndex.append(index)
break
elif isinstance(thisElement, Var):
if (value == True):
removeIndex.append(index)
break
# 1B. korak: izbrisem te podizjav iz izjav
removeIndex.reverse() # XXX: preveri, ce so sortirani(!) padajoce
for i in range(0, len(removeIndex)):
delIndexOfStat = removeIndex[i]
del izjava[delIndexOfStat]
# 2A. korak: pridobim indexe elementov v preostalih podizjavah, ki so False
myRemIz = []
myRemSub = []
statIndex = -1
elemIndex = -1
for subIzjava in izjava:
statIndex = statIndex + 1
elemIndex = 0
myRemSub = []
for i in range(0,len(subIzjava)):
if (subIzjava[i].vrni() in vals):
myRemSub.append(elemIndex)
elemIndex = elemIndex + 1
myRemSub.reverse()
myRemIz.append(myRemSub)
# izbrisem iz izjave dolocene indexe
for i in range(0, len(myRemIz)):
for j in range(0, len(myRemIz[i])):
del izjava[i][myRemIz[i][j]]
return izjava
def getFirstVar(izjava):
x = izjava[0][0].vrni()
return x
def copyStatement(izjava):
# Metoda skopira izjavo in vrne njeno kopijo
copy = []
for subIzjava in izjava:
subCopy = []
for i in subIzjava:
subCopy.append(i)
copy.append(subCopy)
return copy
def copyVarValues(varValues):
# Metoda skopira vrednosti spremenljivk in vrne njihovo kopijo
copy = {}
for keys in varValues:
copy[keys] = varValues[keys]
return copy
def get_2D_list(izjava):
# Metoda sprejme list oblike [ ... (...) ...] in vrne list oblike [ ... [] ...].
# Proste spremenljivke so v listu dolzine 1
allList = []
subList = []
# if preveri, da ni samo ene spremenljivke v izjavi znotraj ANDa
if isinstance(izjava, Var) or isinstance(izjava, Not):
subList.append(izjava)
allList.append(subList)
return allList
for i in range(0, len(izjava)):
subList = []
if isinstance(izjava[i], Not) or isinstance(izjava[i], Var):
subList.append(izjava[i])
else:
for var in izjava[i].vrni():
subList.append(var)
allList.append(subList)
return allList
def is_AND_empty(izjava):
# Metoda preveri, ce je izjava = []. V tem primeru imamo resitev in vrnemo True, sicer vrnemo False
if len(izjava) == 0:
return True
else:
return False
def is_OR_empty(izjava):
# Metoda preveri, ce je izjava = [ ... [] ... ].
# ce je katerikoli OR prazen, potem ni resitve in vrnemo True, sicer vrnemo False
for subIzjava in izjava:
if len(subIzjava) == 0:
return True
return False
def concatDicts(oldValues, newValues):
# Metoda zdruzi stare in nove spremenljivke v slovarjih
# Metoda preme 2 slovarja starih in novi vrednosti spremenljivk in jih zdruzi
# Metoda vrne slovar zdruzenih spremenljivk ali False, ce jih ne more zdruziti (ker pride do protislovja)
for key in newValues:
#ce je ze vsebovan
if key in oldValues:
#ce se stara in nova vrednost razlikujeta
if oldValues[key] != newValues[key]:
return False
#ce ni vsebovan, dodamo novo vrednost med stare vrednosti
else:
oldValues[key] = newValues[key]
return oldValues
def removeSingleVars(izjava):
# Metoda iz izjave odstrani vse podizjave, ki so dolzine 1 (torej proste spremenljivke)
# Metoda nastavi vse proste spremenljivke na ustrezno vrednost
# Metoda vrne [izjava, newVarDict]. Izjava = izjava, ki nima nikjer dolzine 1. newVarDict so novo nastavljene vrednosti
# Metoda vrne False, ce pride do (X AND notX)
singleVars = [] #sem shranimo proste spremenljivke
removeSubIndex = [] # shranimo indexe podizjav, ki jih poramo odstraniti
newVarDict = {}
# dobimo vse proste spremenljivke (tudi podvojene) in indexe podizjav, ki jih je potrebno odstraniti
for i in range(0, len(izjava)):
if len(izjava[i]) == 1:
singleVars.append(izjava[i][0])
removeSubIndex.append(i)
#preverimo, da ni prislo do (X and notX)
for i in range(0, (len(singleVars)-1)):
for j in range((i+1), len(singleVars)):
s1 = singleVars[i]
s2 = singleVars[j]
if ((isinstance(s1, Var) and isinstance(s2, Not)) or (isinstance(s1,Not) and isinstance(s2, Var))):
return False
# odstranim duplikate, ceprov ne bi bilo potrebno
removeIndex = []
for i in range(0, (len(singleVars)-1)):
for j in range((i+1), len(singleVars)):
if(singleVars[i] == singleVars[j]):
removeIndex.append(j)
removeIndex.reverse()
for i in range(0, len(removeIndex)):
del singleVars[i]
# dodamo proste spremenljivke v slovar in jim dolocimo ustrezne vrednosti
for i in range(0, len(singleVars)):
if isinstance(singleVars[i], Var):
newVarDict[singleVars[i].vrni()] = True
else:
newVarDict[singleVars[i].vrni()] = False
#vrnemo (testno) samo seznam novih vrednosti. ce se vse sklada itak preveri concatDicts, same spremenljivke pa odstrani processStatement
return newVarDict
def removeTrueSantences(izjava):
# Metoda odstrani podizjave tipa (X or notX)
toBeRemoved = [] #indexi podizjav, ki jih bomo odstranili
indexCounter = -1
for subIzjava in izjava:
indexCounter = indexCounter + 1
for i in range(0, len(subIzjava)-1):
for j in range((i+1), len(subIzjava)):
#preverim ali se zgodi (X or notX)
if ( subIzjava[i].vrni() == subIzjava[j].vrni() ) and ( subIzjava[i].poenostavi() != subIzjava[j].poenostavi() ):
toBeRemoved.append(indexCounter)
# odstranim iz izjav podizjave oblike (X or notX)
toBeRemoved.reverse()
for i in range (0, len(toBeRemoved)):
del izjava[toBeRemoved[i]]
return izjava
def sortByLength(izjava):
# Metoda sortira podizjave v izjavi glede na njihovo dolzino (narascajoce). Vrne sortirano izjavo
for i in range(0, len(izjava)-1):
for j in range((i+1), len(izjava)):
if len(izjava[i]) > len(izjava[j]):
tempIzjava = izjava[j]
izjava[j] = izjava[i]
izjava[i] = tempIzjava
return izjava
def getTestIzjava(caseNumber):
x = Var('X')
y = Var('Y')
z = Var('Z')
q = Var('Q')
a = Var('A')
or_1 = Or([x,Not(Not(y)),Not(z)])
or_2 = Or([Not(x),Not(y)])
or_3 = Or([x,z])
or_4 = x
or_5 = Or( [ z , y, Not(q) ] )
or_6 = Or([Not(x)])
if caseNumber == 1:
i = And([])
elif caseNumber == 2:
i = And([x])
elif caseNumber == 3:
i = And([x, Or([])])
elif caseNumber == 4:
i = And([Or([]),Or([x])])
elif caseNumber == 5:
i = or_4
i = And([Or([x,Not(y),Not(z)]) , Or([Not(x),Not(y)]) , Or([x,z]) , Or([Not(x),Not(q)])])
elif caseNumber == 6:
i = And([or_1 , or_2 , or_3, or_4, or_5])
elif caseNumber == 7:
i = And([x, Or([x,y]), Or([Not(y), z]), Or([Not(x)])])
elif caseNumber == 8:
i = And([x, Not(x), Or([x, q])]) #test (x AND notx)
elif caseNumber == 9:
i = And([Or([x, y, Not(x)]), Or([q,z])]) #test (x or notX)
elif caseNumber == 10:
#testiranje za pureValues ker sta tuki 2xpure, ostalo vse odpade
i = And([ Or([y, Not(q)]) , Or([y, Not(z)]) , Or([x, Not(y)]) , Or([y,z,q]) , Or([x,Not(z)]) , Or([x,Not(q)]) ])
elif caseNumber == 666:
i = And([ Not(x), Or([x,z]) ])
elif caseNumber == 777:
i = And([ Not(x), Or([Not(x), z, Not(y)]) , Or([x,z]) , Or([Not(z), y]), Or([q,a]) , Or([Not(q), a]) , Or([q, Not(a)]) , Or([Not(q),Not(a)]) ])
else:
i = or_4
i = And([Or([x,Not(y),Not(z)]) , Or([Not(x),Not(y)]) , Or([x,z]) , Or([Not(x),Not(q)])])
return i
def pozdravnaMetoda():
print('************************************************************************************************************')
print('Pozdravljeni v algoritmu DPLL')
print('Za zagon algoritma poklicite funkcijo: DPLL(izjava), ki ji podate izjavo')
print('Primer izjave: ((X or Y) and (Y or notZ)) := And( [ Or([ X, Y ]) , Or([ Y, Not(Z) ]) ] )')
print('Za preverjanje pravilnosti delovanja sta spodaj prilozena copy/paste testna primera')
print('izjava=getTestIzjava(0) -----> ((X or notY or notZ) and (notX or notY) and (X or Z) and (notX or notQ))')
print('izjava=getTestIzjava(1) -----> ()')
print('izjava=getTestIzjava(2) -----> ((X))')
print('izjava=getTestIzjava(3) -----> ((X) and ())')
print('izjava=getTestIzjava(4) -----> (() and (X))')
print('izjava=getTestIzjava(5) -----> ((X or notY or notZ) and (notX or notY) and (X or Z) and (notX or notQ))')
print('izjava=getTestIzjava(6) -----> ((X or Y or notZ) and (notX or notY) and (X or Z) and (X) and (Z or Y or notQ))')
print('izjava=getTestIzjava(7) -----> ((X) and (X or Y) and (notY or Z) and (notX))')
print('izjava=getTestIzjava(8) -----> ((X) and (notX) and (X or Q))')
print('izjava=getTestIzjava(9) -----> ((X or Y or notX) and (Q or Z))')
print('izjava=getTestIzjava(10) -----> ((Y or notQ) and (Y or notZ) and (X or notY) and (Y or Z or Q) and (X or notZ) and (X or notQ))')
print('')
print('Primer za sudoku: izjava = sudoku([[1,2,0,0],[3,0,1,0],[0,1,0,3],[0,0,2,1]])')
print('Izjavo lahko zgradite tudi sami, vendar je potrebno ustvariti vsako spremenljivko, ki jo boste uporabljali (glej Izjave.py)')
print('************************************************************************************************************')
izjava = sudoku([[1,2,0,0],[3,0,1,0],[0,1,0,3],[0,0,2,1]])
#izjava = sudoku([[5,3,0,0,7,0,0,0,0],[6,0,0,1,9,5,0,0,0],[0,9,8,0,0,0,0,6,0],[8,0,0,0,6,0,0,0,3],[4,0,0,8,0,3,0,0,1],[7,0,0,0,2,0,0,0,6],[0,6,0,0,0,0,2,8,0],[0,0,0,4,1,9,0,0,5],[0,0,0,0,8,0,0,7,9]])
#izjava = getTestIzjava(4)
#izjava = hadamard(4)
#izjava = hadamard(2)
DPLL(izjava)
pozdravnaMetoda()
| true |
f5d2fc135e2aa3a3d528990db1532c5f9814adb6 | Python | Danisdnk/PythonExerciseGuide | /TP2/2.1.py | UTF-8 | 584 | 3.953125 | 4 | [] | no_license | import random
# a Cargar una lista con números al azar de cuatro dígitos.
# La cantidad de elementos también será un número al azar de dos dígitos.
def tamañolista(elementos):
lista=[]
for i in range(elementos):
lista.append(random.randint(0, 99))
print(lista)
return lista
def eliminarvalorlista(lista):
n = int(input("elija que valor de la lista quiere eliminar :"))
lista.remove(n)
print(lista)
lista = random.randint(1 ,5)
resultadolista=tamañolista(lista)
print(sum(resultadolista)) # B
eliminarvalorlista(resultadolista) # C
| true |
622fe7d5f0550dbe4e875d4426425bdde7ad0134 | Python | CrispenGari/speech-to-text-python-ibm_watson | /main.py | UTF-8 | 1,238 | 2.796875 | 3 | [] | no_license |
# importing packages
from ibm_watson import SpeechToTextV1, ApiException
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
# service credentials
url = "API_KEY"
api_key = "URL"
# Setting the authentication
try:
auth = IAMAuthenticator(api_key)
stt = SpeechToTextV1(authenticator=auth)
stt.set_service_url(url)
except ApiException as e:
print(e)
# converting audio to speech
with open("audios/short.mp3", "rb") as audio:
res = stt.recognize(audio=audio, content_type="audio/mp3", model="en-AU_NarrowbandModel", continuous=True).get_result()
""""
* We are getting a python list of number of results
* We want to loop through them and create sentences
"""
sentences = res["results"]
sentence_list = []
for sentence in sentences:
# adding a sentence with confidence that is greater than 50%
sentence_list.append(str(sentence["alternatives"][0]["transcript"]).strip() if sentence["alternatives"][0]["confidence"] > 0.5 else "")
# print(json.dumps(sentence_list, indent=2))
with open("files/speech.txt", "w") as writter:
for line in sentence_list:
if line == "%HESITATION":
writter.write(",")
else:
writter.write(line+" ")
print("DONE")
| true |
8a2f3532ee14d902b14b5165e694034958ab0de4 | Python | GAIPS/ILU-RL | /tests/unit/utils.py | UTF-8 | 1,594 | 3.03125 | 3 | [
"MIT"
] | permissive | """This module provides common functionality among unit-tests"""
from ilurl.utils.aux import flatten
def process_pressure(kernel_data, incoming, outgoing, fctin=1, fctout=1, is_average=False):
timesteps = list(range(1,60)) + [0]
ret = 0
for t, data in zip(timesteps, kernel_data):
dat = get_veh_locations(data)
inc = filter_veh_locations(dat, incoming)
out = filter_veh_locations(dat, outgoing)
press = len(inc) / fctin - len(out) / fctout
if is_average:
ret += round(press, 4)
if is_average:
ret = round(ret / 60, 2)
else:
ret = round(press, 4)
return ret
def get_veh_locations(tl_data):
"""Help flattens hierarchial data
Params:
------
* tl_data: dict<str, dict<int, list<namedtuple<Vehicle>>>>
nested dict containing tls x phases x vehicles
Returns:
--------
* veh_locations: list<Tuple>
list containing triplets: veh_id, edge_id, lane
"""
# 1) Produces a flat generator with 3 informations: veh_id, edge_id, lane
gen = flatten([(veh.id, veh.edge_id, veh.lane)
for ph_data in tl_data.values()
for vehs in ph_data.values()
for veh in vehs])
# 2) generates a list of triplets
it = iter(gen)
ret = []
for x in it:
ret.append((x, next(it), next(it)))
return ret
def filter_veh_locations(veh_locations, lane_ids):
"""Help flattens hierarchial data"""
return [vehloc[0] for vehloc in veh_locations if vehloc[1:] in lane_ids]
| true |
77b34cdea78cb917781978b56e852713920aca2e | Python | ellenmliu/Data-Structures-and-Algorithms | /Data Structures/problem_3_Huffman_Coding.py | UTF-8 | 4,201 | 3.609375 | 4 | [] | no_license | import sys
class Node:
def __init__(self, char=None, frequency=None, left=None, right=None):
self.char = char
self.frequency = frequency
self.left = left
self.right = right
self.binary = ''
def get_right(self):
return self.right
def get_left(self):
return self.left
def has_right(self):
return self.right != None
def has_left(self):
return self.left != None
def __repr__(self):
return 'Node({}, {})'.format(self.char, self.frequency)
class Tree:
def __init__(self, root=None):
self.root = root
self.binary_codes = {}
self.has_unique = False
def get_root(self):
return self.root
def get_binary_codes(self, node):
if node == self.root and not node.has_right() and not node.has_left():
self.binary_codes[node.char] = '0'
return self.binary_codes
if node.has_left():
node.get_left().binary = node.binary + '0'
self.get_binary_codes(node.get_left())
if node.has_right():
node.get_right().binary = node.binary + '1'
self.get_binary_codes(node.get_right())
if node.char != None:
self.binary_codes[node.char] = node.binary
self.has_unique = True
return self.binary_codes
class PriorityQueue:
def __init__(self, initial_size=10):
self.queue = []
self.frontIndex = -1
self.nextIndex = 0
def enqueue(self, node):
self.queue.append(node)
self.queue = sorted(self.queue, key=lambda node: node.frequency)
def dequeue(self):
return self.queue.pop(0)
def size(self):
return len(self.queue)
def __repr__(self):
nodes = []
for i in self.queue:
nodes.append(repr(i))
return "PQ({})".format(", ".join(nodes))
def huffman_encoding(data):
if data is None or data == '':
print("Enter data to encode")
return None, None
frequency = {}
for char in data:
if char in frequency.keys():
frequency[char] += 1
else:
frequency[char] = 1
pq = PriorityQueue(len(frequency))
for char in frequency.keys():
pq.enqueue(Node(char, frequency[char]))
while pq.size() > 1:
first = pq.dequeue()
second = pq.dequeue()
new_node = Node(None, first.frequency + second.frequency, first, second)
pq.enqueue(new_node)
root = pq.dequeue()
huffman_tree = Tree(root)
binary_codes = huffman_tree.get_binary_codes(root)
encoded = ''
for char in data:
encoded += binary_codes[char]
return (encoded, huffman_tree)
def huffman_decoding(data, tree):
decoded = ''
current = tree.get_root()
index = 0
while index < len(data):
if not tree.has_unique:
decoded += current.char
else:
if data[index] == '0':
current = current.get_left()
if data[index] == '1':
current = current.get_right()
if not current.has_left() and not current.has_right():
decoded += current.char
current = tree.get_root()
index += 1
return decoded
if __name__ == "__main__":
test_cases = ["The bird is the word", "AAAAAAABBBCCCCCCCDDEEEEEE", "", "A", "AAAAAA"]
for a_great_sentence in test_cases:
encoded_data, tree = huffman_encoding(a_great_sentence)
if encoded_data:
print ("The size of the data is: {}\n".format(sys.getsizeof(a_great_sentence)))
print ("The content of the data is: {}\n".format(a_great_sentence))
print ("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2))))
print ("The content of the encoded data is: {}\n".format(encoded_data))
decoded_data = huffman_decoding(encoded_data, tree)
print ("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data)))
print ("The content of the encoded data is: {}\n".format(decoded_data))
else:
print("Entered data is empty")
| true |
e97a6de07ddd8f65bd32458cac3290a3233ebbe5 | Python | jonpemby/jobbr | /src/utilities.py | UTF-8 | 550 | 2.734375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | from termcolor import colored
def print_help():
print_header()
print_opt('queries-per-day', 'number of queries to perform each day')
exit(0)
def print_header():
print("{} ({} {})".format(
colored("jobbr", 'green'),
colored("Jonathon Pemberton", 'white'),
colored('<jonpemby@icloud.com>', 'grey')))
def print_opt(option, description):
print(" {} {}".format(
colored('--' + option, 'grey'),
colored(description, 'white')))
def print_err(message, code=1):
print(colored(message, 'red'))
exit(code)
| true |
0cdf2a0ff9382e9be3523d8e799caf5680473c9f | Python | elsid/CodeCraft | /scripts/helpers.py | UTF-8 | 329 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import json
def read_json(path):
with open(path) as stream:
return json.load(stream)
def write_json(data, path):
with open(path, 'w') as stream:
json.dump(data, stream, indent=4)
def read_lines(path):
with open(path) as stream:
return [v.strip() for v in stream]
| true |
120c71da747fb9ac66bdaa762bc062617c98514c | Python | rurinaL/coursera_python | /week2/7.py | UTF-8 | 410 | 3.390625 | 3 | [
"Unlicense"
] | permissive | cell11 = int(input())
cell12 = int(input())
cell21 = int(input())
cell22 = int(input())
step1 = cell11 - cell21
step2 = cell12 - cell22
if (abs(step1) % 2 == 0 and step2 == 0):
print('YES')
elif (abs(step2) % 2 == 0 and step1 == 0):
print('YES')
elif (step1 % 2 == 0 and step2 % 2 == 0):
print('YES')
elif (step1 % 2 != 0 and step2 % 2 != 0):
print('YES')
else:
print('NO')
| true |
4232c05bfd71256f1b7656ee7ce2410511c5b34f | Python | imran-iiit/LiveSessions | /Safari_Live/AaronMaxwell/31Jul18_NextLevel_pt2/labs/py3/decorators/my_decorator.py | UTF-8 | 228 | 3.375 | 3 | [] | no_license |
def add(increment):
def decorator(func):
def wrapper(*args, **kwargs):
return increment + func(*args, **kwargs)
return wrapper
return decorator
@add(3)
def f(n):
return n+2
print(f(4))
| true |
568830a50f868abdea71bd38e1c3807ccb45a8a7 | Python | Emmandez/MachineLearning_A-Z | /Part 2 - Regression/Section 6 - Polynomial Regression/Polynomial_Regression/polynomial_Regression_Template.py | UTF-8 | 997 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 22:08:40 2018
@author: eherd
"""
#Polynomial regression
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
#X has to be a matrix 10,1 matrix in this case
X = dataset.iloc[:,1:2].values
#Y has to be a vector
y = dataset.iloc[:,2].values
#Fitting regression Model to the dataset
y_pred = regressor.predict(6.5)
#Visualising the Regression results
plt.scatter(X,y, color="red")
plt.plot(X, regressor.predict(X), color="blue")
plt.title("Truth of Bluff (Regression Model) ")
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
#Predicting a new result with Linear regression (higher resolution)
X_grid = np.arange(min(X),max(X),0.1)
X_grid = X.grid.reshape((len(X_grid),1))
plt.scatter(X,y, color="red")
plt.plot(X, regressor.predict(X_grid), color="blue")
plt.title("Truth of Bluff (Regression Model) ")
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
| true |
47d5fb213a68201aec14e8c052c86762002241a9 | Python | Gavinxin/TrajectoryToKafka | /KafkaProducer2.py | UTF-8 | 3,905 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
@author: 真梦行路
@file: kafka.py
@time: 2018/9/3 10:20
'''
import pandas as pd
from kafka import KafkaProducer
from kafka.errors import KafkaError
import time
KAFAKA_HOST = "127.0.0.1" # 服务器端口地址
KAFAKA_PORT = 9092 # 端口号
KAFAKA_TOPIC = "track2" # topic
data = pd.read_table(r"C:\Users\Gavin\Desktop\T-drive Taxi Trajectories\release\taxi_log_2008_by_id\10.txt",encoding='utf-8',engine='python',names=['gpstrack'])
class Kafka_producer():
'''
生产模块:根据不同的key,区分消息
'''
def __init__(self, kafkahost, kafkaport, kafkatopic, key):
self.kafkaHost = kafkahost
self.kafkaPort = kafkaport
self.kafkatopic = kafkatopic
self.key = key
self.producer = KafkaProducer(bootstrap_servers='{kafka_host}:{kafka_port}'.format(
kafka_host=self.kafkaHost,
kafka_port=self.kafkaPort)
)
def sendjsondata(self, params):
try:
parmas_message = params # 注意dumps
producer = self.producer
producer.send(self.kafkatopic, key=self.key, value=parmas_message.encode('utf-8'))
producer.flush()
except KafkaError as e:
print(e)
# class Kafka_consumer():
#
# def __init__(self, kafkahost, kafkaport, kafkatopic, groupid, key):
# self.kafkaHost = kafkahost
# self.kafkaPort = kafkaport
# self.kafkatopic = kafkatopic
# self.groupid = groupid
# self.key = key
# self.consumer = KafkaConsumer(self.kafkatopic, group_id=self.groupid,
# bootstrap_servers='{kafka_host}:{kafka_port}'.format(
# kafka_host=self.kafkaHost,
# kafka_port=self.kafkaPort)
# )
#
# def consume_data(self):
# try:
# for message in self.consumer:
# yield message
# except KeyboardInterrupt as e:
# print(e)
def sortedDictValues(adict):
items = adict.items()
items = sorted(items, reverse=False)
return [value for key, value in items]
def main(xtype, group, key):
'''
测试consumer和producer
'''
while( True ):
for index, row in data.iterrows():
# 生产模块
producer = Kafka_producer(KAFAKA_HOST, KAFAKA_PORT, KAFAKA_TOPIC, key)
print("Start : %s" % time.ctime())
print("===========> producer:", producer)
time.sleep(1)
params = row['gpstrack']
producer.sendjsondata(params)
# if xtype == 'c':
# # 消费模块
# consumer = Kafka_consumer(KAFAKA_HOST, KAFAKA_PORT, KAFAKA_TOPIC, group, key)
# print("===========> consumer:", consumer)
#
# message = consumer.consume_data()
# for msg in message:
# msg = msg.value.decode('utf-8')
# python_data = json.loads(msg) ##这是一个字典
# key_list = list(python_data)
# test_data = pd.DataFrame()
# for index in key_list:
# print(index)
# if index == 'Month':
# a1 = python_data[index]
# data1 = sortedDictValues(a1)
# test_data[index] = data1
# else:
# a2 = python_data[index]
# data2 = sortedDictValues(a2)
# test_data[index] = data2
# print(test_data)
#
# # print('value---------------->', python_data)
# # print('msg---------------->', msg)
# # print('key---------------->', msg.kry)
# # print('offset---------------->', msg.offset)
if __name__ == '__main__':
main(xtype='p', group='py_test', key=None)
| true |
5c21ec2ebd5cd3c8b43c82bfafad04b0d9928495 | Python | pepijn809/menno_rest_api | /menno.py | UTF-8 | 4,027 | 2.828125 | 3 | [] | no_license | # Requirements
# - Flask, JSONify, Requests, DNSPython, Flask_PyMongo, Flask_HTTPAuth, Werkzeug.Security
from flask import Flask, jsonify, request, make_response
from flask_pymongo import PyMongo
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import generate_password_hash, check_password_hash
# Flask applicatie met HTTPAuth
app = Flask(__name__)
auth = HTTPBasicAuth()
# Geauthenticeerde gebruikers voor API
auth_users = {
"mike": generate_password_hash("mike"),
"pepijn": generate_password_hash("pepijn")
}
# MongoDB Instellingen
app.config['MONGO_DBNAME'] = 'menno' # MongoDB naam
#app.config['MONGO_URI'] = 'mongodb+srv://pepijn:pepijn@menno-cluster-miz8h.mongodb.net/menno' # MongoDB bij MongoDB Atlas op AWS
app.config['MONGO_URI'] = 'mongodb://mongo0.mikevdbrink.nl:27017,mongo1.mikevdbrink.nl:27017,mongo2.mikevdbrink.nl:27017/menno?replicaSet=cloud' # MongoDB Bij Google Cloud Platform (GPC)
mongo = PyMongo(app) # Variabel voor PyMongo
# Authenticatie functie - HTTPAuth op basis van 'auth_users' lijst
@auth.verify_password
def verify_password(username, password):
if username in auth_users:
return check_password_hash(auth_users.get(username), password)
return False
# / route - API Homepagina
@app.route('/')
@auth.login_required
def index():
return "Hoi, %s!" % auth.username()
# Flask Route - Users \ GET all documents from 'Users Collection'.
@app.route('/users', methods=['GET'])
@auth.login_required # Require Basic HTTPAuth
# Function 'get_all_users' - Vraag alle gebruikers op
def get_all_users():
framework = mongo.db.users
output = []
for q in framework.find():
output.append({'voornaam': q['voornaam'], 'achternaam': q['achternaam'], 'name': q['name'], 'plaats': q['plaats'], 'provincie': q['provincie'], 'adres': q['adres']})
# Return the output in JSON
return jsonify({'result': output})
# Flask Route - Users \ GET een specifiek document gebasseerd op de voornaam van een gebruiker in de 'Users Collectie'.
@app.route('/users/<voornaam>', methods=['GET'])
@auth.login_required # Require Basic HTTPAuth
# Function 'get_one_user' - Vraag gegevens van 1 gebruiker op op basis van voornaam
def get_one_user(voornaam):
users = mongo.db.users
q = users.find_one({'voornaam': voornaam})
if q:
output = {'voornaam': q['voornaam'], 'achternaam': q['achternaam'], 'name': q['name'], 'plaats': q['plaats'], 'provincie': q['provincie'], 'adres': q['adres']}
else:
output = 'Sorry! We konden geen gebruikers vinden op basis van de voornaam die je hebt ingevuld.'
# Return the output in JSON
return jsonify({'result': output})
# Flask Route - Users \ POST een specifiek document gebasseerd op gebruikersdetails naar de 'Users Collectie'.
@app.route('/users', methods=['POST'])
@auth.login_required # Require Basic HTTPAuth
# Function 'add_user' - Voeg een gebruiker toe op basis van JSON data
def add_user():
users = mongo.db.users
# Variablen voor de waardes uit JSON form
voornaam = request.json['voornaam']
achternaam = request.json['achternaam']
name = request.json['name']
adres = request.json['adres']
provincie = request.json['provincie']
plaats = request.json['plaats']
password = request.json['password']
# Insert gebruikers in de DB met behulp van variablen uit JSON
users_id = users.insert({'name' : name, 'adres' : adres, 'provincie' : provincie, 'plaats' : plaats, 'voornaam' : voornaam, 'achternaam' : achternaam, 'password' : password})
new_users = users.find_one({'_id' : users_id}) # Gebruiker _id mongo
# De output van de post request
output = {'voornaam' : new_users['voornaam'], 'achternaam' : new_users['achternaam'], 'name' : new_users['name'], 'adres' : new_users['adres'], 'plaats' : new_users['plaats'], 'provincie' : new_users['provincie']}
# Return in JSON
return jsonify({'result' : output})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, ssl_context='adhoc')
| true |
7c8069ac6199568d8f73a353fcd4be15e796ca28 | Python | ahadcove/temperature-fan | /temp.py | UTF-8 | 1,087 | 2.8125 | 3 | [] | no_license | import os
import glob
import time
from config import *
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
last_state = False
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
global last_state
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
if temp_f > 78 and not last_state:
last_state = True
os.system('tplink-smarthome-api setPowerState %s 1' % power_ip)
elif temp_f < 78 and last_state:
os.system('tplink-smarthome-api setPowerState %s 0' % power_ip)
last_state = False
return temp_f, temp_c
while True:
print('Fahrenheit:', read_temp()[0], 'Celsius: ', read_temp()[1])
time.sleep(1) | true |
3d741f5ce7af2757abe440a86b89ecaa7304c6c6 | Python | jayanthsarma8/py4e-assignments | /9 th chapter assignment.py | UTF-8 | 337 | 2.859375 | 3 | [] | no_license | na=input("")
ha=open(na)
d=dict()
for i in ha :
i=i.rstrip()
if not i.startswith("From "):
continue
wrd=i.split()
if len(wrd) < 3 :
continue
w=str(wrd[1])
d[w]=d.get(w,0)+1
ma=0
key=None
for l,m in d.items() :
if m > ma:
ma = m
key=w
print(key, ma)
| true |
8764615feaaf3e4145c16489db398d970262ee5a | Python | kenluuu/LeetCode | /combination-sum-III.py | UTF-8 | 653 | 2.828125 | 3 | [] | no_license | class Solution(object):
def combinationSum3(self, k, n):
"""
:type k: int
:type n: int
:rtype: List[List[int]]
"""
res = []
def combinationSum3Util(i, sum, nums):
if len(nums) > k: return
if sum == n and len(nums) == k:
res.append(nums)
return True
while i <= n-k:
if sum + i + 1 > n or i+1 > 9:
return
combinationSum3Util(i+1, sum+i+1, nums + [i+1])
i += 1
for i in range(1, n-k):
combinationSum3Util(i, i, [i])
return res
| true |
a16f9b36c82f8bcf137b0686a2a2031ea76c1a82 | Python | bryn-sorli/Intro_to_Robotics | /Labs/lab6/lab6.py | UTF-8 | 14,511 | 2.734375 | 3 | [] | no_license | import time
import json
import rospy
import copy
import math
import random
import argparse
from PIL import Image, ImageDraw
import numpy as np
from pprint import pprint
from geometry_msgs.msg import Pose2D
from std_msgs.msg import Float32MultiArray, Empty, String, Int16
g_CYCLE_TIME = 0.1 # seconds
# Parameters you might need to use which will be set automatically
MAP_SIZE_X = None
MAP_SIZE_Y = None
# Default parameters will create a 4x4 grid to test with
# g_MAP_SIZE_X = 2. # 2m wide
g_MAP_SIZE_X = 1.8 # 2m wide
# g_MAP_SIZE_Y = 1.5 # 1.5m tall
g_MAP_SIZE_Y = 1.2 # 1.5m tall
# g_MAP_RESOLUTION_X = 0.5 # Each col represents 50cm
g_MAP_RESOLUTION_X = 0.05625 # Each col represents 6.25cm
# g_MAP_RESOLUTION_Y = 0.375 # Each row represents 37.5cm
g_MAP_RESOLUTION_Y = 0.0375 # Each row represents 4.6875cm
g_NUM_X_CELLS = int(g_MAP_SIZE_X // g_MAP_RESOLUTION_X) # Number of columns in the grid map
g_NUM_Y_CELLS = int(g_MAP_SIZE_Y // g_MAP_RESOLUTION_Y) # Number of rows in the grid map
# Map from Lab 4: values of 0 indicate free space, 1 indicates occupied space
g_WORLD_MAP = [0] * g_NUM_Y_CELLS*g_NUM_X_CELLS # Initialize graph (grid) as array
# Source and Destination grid coordinates (i, j)
g_dest_coordinates = (3, 1)
g_src_coordinates = (0, 0)
# Lab 4 Globals
pose2d_sparki_odometry = None # Pose2D, contains x, y, theta
servo_angle = 0
IR_sensors = None
# My globals
g_waypoints = []
g_dest_x = None
g_dest_y = None
def _load_img_to_intensity_matrix(img_filename):
'''
Helper function to read the world image containing obstacles
YOu should not modify this
'''
global MAP_SIZE_X, MAP_SIZE_Y
if img_filename is None:
grid = np.zeros([800,1200])
return grid
img = Image.open(img_filename)
MAP_SIZE_X = img.width
MAP_SIZE_Y = img.height
grid = np.zeros([img.height, img.width])
for y in range(img.height):
for x in range(img.width):
pixel = img.getpixel((x,y))
grid[y,x] = 255 - pixel[0] # Dark pixels have high values to indicate being occupied/having something interesting
return grid
def vertex_index_to_ij(vertex_index):
'''
vertex_index: unique ID of graph vertex to be convered into grid coordinates
Returns COL, ROW coordinates in 2D grid
'''
global g_NUM_X_CELLS
return vertex_index % g_NUM_X_CELLS, vertex_index // g_NUM_X_CELLS
def ij_to_vertex_index(i,j):
'''
i: Column of grid map
j: Row of grid map
returns integer 'vertex index'
'''
global g_NUM_X_CELLS
return j*g_NUM_X_CELLS + i
def ij_coordinates_to_xy_coordinates(i,j):
'''
i: Column of grid map
j: Row of grid map
returns (X, Y) coordinates in meters at the center of grid cell (i,j)
'''
global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y
return (i+0.5)*g_MAP_RESOLUTION_X, (j+0.5)*g_MAP_RESOLUTION_Y
def xy_coordinates_to_ij_coordinates(x,y):
'''
i: Column of grid map
j: Row of grid map
returns (X, Y) coordinates in meters at the center of grid cell (i,j)
'''
global g_MAP_RESOLUTION_X, g_MAP_RESOLUTION_Y
return int(x // g_MAP_RESOLUTION_X), int(y // g_MAP_RESOLUTION_Y)
def get_travel_cost(vertex_source, vertex_dest):
# Returns the cost of moving from vertex_source (int) to vertex_dest (int)
# INSTRUCTIONS:
'''
This function should return 1 if:
vertex_source and vertex_dest are neighbors in a 4-connected grid (i.e., N,E,S,W of each other but not diagonal) and neither is occupied in g_WORLD_MAP (i.e., g_WORLD_MAP isn't 1 for either)
This function should return 1000 if:
vertex_source corresponds to (i,j) coordinates outside the map
vertex_dest corresponds to (i,j) coordinates outside the map
vertex_source and vertex_dest are not adjacent to each other (i.e., more than 1 move away from each other)
'''
global g_WORLD_MAP
# Source is out of bounds
if vertex_source < 0 or vertex_source >= g_NUM_Y_CELLS * g_NUM_X_CELLS:
return 1000
# Destination is out of bounds
if vertex_dest < 0 or vertex_dest >= g_NUM_Y_CELLS * g_NUM_X_CELLS:
return 1000
# Source and destination are unoccupied
if g_WORLD_MAP[vertex_source] != 1 and g_WORLD_MAP[vertex_dest] != 1:
# Adjacent
source_i, source_j = vertex_index_to_ij(vertex_source)
dest_i, dest_j = vertex_index_to_ij(vertex_dest)
if (source_i == dest_i and abs(source_j - dest_j) == 1) or (source_j == dest_j and abs(source_i - dest_i) == 1):
return 1
else:
return 1000
else:
return 1000
def run_dijkstra(source_vertex):
'''
source_vertex: vertex index to find all paths back to
returns: 'prev' array from a completed Dijkstra's algorithm run
Function to return an array of ints corresponding to the 'prev' variable in Dijkstra's algorithm
The 'prev' array stores the next vertex on the best path back to source_vertex.
Thus, the returned array prev can be treated as a lookup table: prev[vertex_index] = next vertex index on the path back to source_vertex
'''
global g_NUM_X_CELLS, g_NUM_Y_CELLS
# Array mapping vertex_index to distance of shortest path from vertex_index to source_vertex.
dist = [1000] * g_NUM_X_CELLS * g_NUM_Y_CELLS
dist[source_vertex] = 0
# Queue for identifying which vertices are up to still be explored:
# Will contain tuples of (vertex_index, cost), sorted such that the min cost is first to be extracted (explore cheapest/most promising vertices first)
Q_cost = [(source_vertex, 0)]
# Array of ints for storing the next step (vertex_index) on the shortest path back to source_vertex for each vertex in the graph
prev = [-1] * g_NUM_X_CELLS*g_NUM_Y_CELLS
# Insert your Dijkstra's code here. Don't forget to initialize Q_cost properly!
while Q_cost:
u, c = Q_cost.pop(0)
neighbors = [u - 1, u + 1, u - g_NUM_X_CELLS, u + g_NUM_X_CELLS]
for v in neighbors:
if v < g_NUM_Y_CELLS*g_NUM_X_CELLS and v >= 0 and v < g_NUM_Y_CELLS*g_NUM_X_CELLS and v >= 0:
alt = dist[u] + get_travel_cost(u, v)
if alt < dist[v]:
dist[v] = alt
prev[v] = u
Q_cost.append((v, alt))
Q_cost = sorted(Q_cost, key=lambda u: u[1])
# Return results of algorithm run
return prev
def reconstruct_path(prev, source_vertex, dest_vertex):
'''
Given a populated 'prev' array, a source vertex_index, and destination vertex_index,
allocate and return an integer array populated with the path from source to destination.
The first entry of your path should be source_vertex and the last entry should be the dest_vertex.
If there is no path between source_vertex and dest_vertex, as indicated by hitting a '-1' on the
path from dest to source, return an empty list.
'''
final_path = []
# TODO: Insert your code here
final_path.append(dest_vertex)
curr_vertex = dest_vertex
while curr_vertex != source_vertex:
if prev[curr_vertex] == -1:
print('There is probably not a path')
return []
final_path.insert(0, prev[curr_vertex])
curr_vertex = prev[curr_vertex]
return final_path
def render_map(map_array):
'''
TODO-
Display the map in the following format:
Use " . " for free grid cells
Use "[ ]" for occupied grid cells
Example:
For g_WORLD_MAP = [0, 0, 1, 0,
0, 1, 1, 0,
0, 0, 0, 0,
0, 0, 0, 0]
There are obstacles at (I,J) coordinates: [ (2,0), (1,1), (2,1) ]
The map should render as:
. . . .
. . . .
. [ ][ ] .
. . [ ] .
Make sure to display your map so that I,J coordinate (0,0) is in the bottom left.
(To do this, you'll probably want to iterate from row 'J-1' to '0')
'''
map_string = ""
for j in range(g_NUM_Y_CELLS):
for i in range(g_NUM_X_CELLS):
if map_array[g_NUM_Y_CELLS * (g_NUM_Y_CELLS-1-j) + i] == 0:
map_string += " . "
else:
map_string += "[ ]"
map_string += "\n"
print(map_string)
pass
def _draw_path_on_image(path, image_filename):
'''
Path is a list of vertices
image_filename is the image to be drawn on
'''
if image_filename is None:
raise Exception("Image file not found.")
img = Image.open(image_filename)
xys = []
for p in path:
x, y = ij_coordinates_to_xy_coordinates(*vertex_index_to_ij(p))
xys.append((x / 0.0015, (1.2 - y) / 0.0015))
draw = ImageDraw.Draw(img)
draw.line(xys, fill="red", width=10)
img.save("image1.png", "PNG")
img.show()
return
def part_2(args):
global g_src_coordinates, g_dest_coordinates
global g_WORLD_MAP
global g_waypoints
# pixel_grid has intensity values for all the pixels
# You will have to convert it to the earlier 0 and 1 matrix yourself
pixel_grid = _load_img_to_intensity_matrix(args.obstacles)
g_src_coordinates = (float(args.src_coordinates[0]), float(args.src_coordinates[1]))
g_dest_coordinates = (float(args.dest_coordinates[0]), float(args.dest_coordinates[1]))
'''
1) Compute the g_WORLD_MAP -- depending on the resolution, you need to decide if your cell is an obstacle cell or a free cell.
2) Run Dijkstra's to get the plan
3) Show your plan/path on the image
Feel free to add more helper functions
'''
pixel_height = len(pixel_grid)
pixel_width = len(pixel_grid[0])
print(pixel_height, 1.2 / pixel_height)
print(pixel_width, 1.8 / pixel_width)
for y in range(pixel_height):
for x in range(pixel_width):
if pixel_grid[pixel_height-1-y, x] == 255.0:
i, j = xy_coordinates_to_ij_coordinates(0.0015 * x, 0.0015 * y)
vertex_index = ij_to_vertex_index(i, j)
g_WORLD_MAP[vertex_index] = 1
# render_map(g_WORLD_MAP)
prev = run_dijkstra(ij_to_vertex_index(*xy_coordinates_to_ij_coordinates(*g_src_coordinates)))
path = reconstruct_path(prev, ij_to_vertex_index(*xy_coordinates_to_ij_coordinates(*g_src_coordinates)), ij_to_vertex_index(*xy_coordinates_to_ij_coordinates(*g_dest_coordinates)))
_draw_path_on_image(path, args.obstacles)
print("Source: " + str(g_src_coordinates))
print("Goal: " + str(g_dest_coordinates))
print("Path: " + " -> ".join([str(p) for p in path]))
print("Waypoints (in world coordinates):")
for p in path:
x, y = ij_coordinates_to_xy_coordinates(*vertex_index_to_ij(p))
g_waypoints.append((x, y))
print((x, y))
def loop():
global g_dest_x, g_dest_y
running = True
# Need goal world coordinates and current world coordinates
curr_x, curr_y, curr_theta = pose2d_sparki_odometry.x, pose2d_sparki_odometry.y, pose2d_sparki_odometry.theta
# Figure out dest_theta by pointing to dest_x, dest_y
dest_theta = math.atan2(g_dest_y - curr_y, g_dest_x - curr_x)
# Compute error functions
d_err = math.sqrt(math.pow(curr_x - g_dest_x, 2) + math.pow(curr_y - g_dest_y, 2))
b_err = dest_theta - curr_theta
# Keep variables in appropriate range
if b_err > math.pi: b_err -= 2 * math.pi
if b_err <= -math.pi: b_err += 2 * math.pi
# Fix error
motor_movement = Float32MultiArray()
if abs(b_err) > 0.02: # about 1 degree
# Fix bearing error
if b_err >= 0:
motor_movement.data = [1.0, -1.0] # turn left
else:
motor_movement.data = [-1.0, 1.0] # turn right
elif abs(d_err) > 0.01: # about 1/4 the distance between cell center points
# Fix distance error
motor_movement.data = [1.0, 1.0]
else:
# Reached waypoint, set goal to next waypoint if available
motor_movement.data = [0.0, 0.0]
if g_waypoints:
g_dest_x, g_dest_y = g_waypoints.pop(0)
else:
running = False
# Send movement to simulator
publisher_motor.publish(motor_movement)
publisher_render.publish(Empty())
return running
def main():
init()
while not rospy.is_shutdown():
# Implement cycle time
starting_time = time.time()
if not loop():
print("I have reached the destination.")
break
# Implement cycle time
difference_time = time.time() - starting_time
rospy.sleep(g_CYCLE_TIME - difference_time)
def init():
global publisher_motor, publisher_ping, publisher_servo, publisher_odom, publisher_render
global subscriber_odometry, subscriber_state
global pose2d_sparki_odometry
global g_dest_x, g_dest_y
# Set up publishers and subscribers
rospy.init_node("lab6")
publisher_motor = rospy.Publisher("/sparki/motor_command", Float32MultiArray, queue_size=10)
publisher_ping = rospy.Publisher("/sparki/ping_command", Empty, queue_size=10)
publisher_servo = rospy.Publisher("/sparki/set_servo", Int16, queue_size=10)
publisher_odom = rospy.Publisher("/sparki/set_odometry", Pose2D, queue_size=10)
publisher_render = rospy.Publisher("/sparki/render_sim", Empty, queue_size=10)
subscriber_odometry = rospy.Subscriber("/sparki/odometry", Pose2D, callback_update_odometry)
subscriber_state = rospy.Subscriber("/sparki/state", String, callback_update_state)
rospy.sleep(1)
# Set up initial odometry pose (pose2d_sparki_odometry) as a new Pose2D message object
pose2d_sparki_odometry = Pose2D()
# Initialize odometry
init_odometry = Pose2D()
init_odometry.x, init_odometry.y, init_odometry.theta = g_src_coordinates[0], g_src_coordinates[1], 0
publisher_odom.publish(init_odometry)
# Set initial waypoint goal
g_dest_x, g_dest_y = g_waypoints.pop(0)
def callback_update_odometry(data):
# Receives geometry_msgs/Pose2D message
global pose2d_sparki_odometry
# Copy data into local odometry variable
pose2d_sparki_odometry = data
def callback_update_state(data):
state_dict = json.loads(data.data) # Creates a dictionary object from the JSON string received from the state topic
# Load data into program's state variables
global IR_sensors, servo_angle
IR_sensors = state_dict["light_sensors"]
servo_angle = state_dict["servo"]
if "ping" in state_dict and state_dict["ping"] != -1:
print("You should not be asking for ultrasonic data.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Dijkstra on image file")
parser.add_argument('-s','--src_coordinates', nargs=2, default=[1.2, 0.2], help='Starting x, y location in world coords')
parser.add_argument('-g','--dest_coordinates', nargs=2, default=[0.3, 0.7], help='Goal x, y location in world coords')
parser.add_argument('-o','--obstacles', nargs='?', type=str, default='obstacles_test1.png', help='Black and white image showing the obstacle locations')
args = parser.parse_args()
# part_1()
part_2(args)
main()
| true |
fe3c67f89b15da7e94c1f2830941f7be33a46460 | Python | maze508/Misc.-Finance-Scripts | /DCC/dcc_combined.py | UTF-8 | 9,117 | 3.109375 | 3 | [] | no_license | import datetime as dt
import pandas_datareader as pdr
from datetime import datetime
import plotly.graph_objects as go
#####################################
'''API and Setting of Parameters'''
#####################################
# Date Settings
now = datetime.now()
end_date = dt.datetime.now()
pair = 'EURUSD=X'
start_date = end_date - dt.timedelta(days=10 * 365)
# Threshold Settings
Threshold = 0.02
#################
'''DCC Process'''
#################
#Should be in main Script
def raw_data_lists(pair, start_date, end_date):
"""
APIs from Yahoo Finance and returns arrays of useful information and details
:param pair: Chosen Pair
:param start_date: Start Date
:param end_date: End Date
:return: Arrays of Dates, Close, High, Low, Index and Dataframe
"""
# Modifying DF Columns for Dates
df = pdr.get_data_yahoo(pair, start_date, end_date)
df.insert(1, "Dates", df.index, True)
df["Modified Dates"] = df['Dates'].astype(str).str[:10]
# Extracting DF values into a list
list_of_close = df["Close"].values.tolist()
list_of_highs = df["High"].values.tolist()
list_of_lows = df["Low"].values.tolist()
list_of_dates = df["Modified Dates"].values.tolist()
index_list = [i for i in range(len(list_of_close))]
return list_of_dates, list_of_close, list_of_highs, list_of_lows, index_list, df
# Should be in Main Script
def DCC_Process(list_of_dates, list_of_close, list_of_highs, list_of_lows, index_list, Threshold):
"""
Obtains arrays of points for DC and OS Events in DCC
:param list_of_dates: Array of dates
:param list_of_close: Array of close prices
:param list_of_highs: Array of high prices
:param list_of_lows: Array of low prices
:param index_list: Array of index values
:param Threshold: Hyperparameter (Change in Global)
:return: Arrays of OS and DC Points with its corresponding Dates
"""
upturn_event = True
# Defining initial price as highest and lowest
highest_price = lowest_price = list_of_close[0]
dc_points_index = []
os_points_index = []
os_points_list = []
dc_points_list = []
dc_date_list = []
os_date_list = []
os_up = 0
os_down = 0
for i in range(len(list_of_close)):
# Defines Current Price
current_price = list_of_close[i]
if upturn_event:
# If upturn event and price has reversed to a set threshold below the max price --> downturn event
if current_price <= highest_price * (1 - Threshold):
upturn_event = False
# Tracks Lowest Price from now on since we are on a downturn event
lowest_price = current_price
# Append Current Index as DC Point Index
dc_points_index.append(index_list[i])
# Considering the Case if 2 DCC events occur immediately after each other
if index_list[os_up] in os_points_index:
os_points_index.append(dc_points_index[-2])
os_up = dc_points_index[-2]
# Normal Case where the 2 DCC events occur > 1 period apart
else:
os_points_index.append(index_list[os_up])
else:
# During Upturn event, if current price is greater than the highest price, record it as the highest price
if highest_price < current_price:
highest_price = current_price
os_up = i
else:
# if downturn event and price has reversed to a set threshold above the max price --> upturn event
if current_price >= lowest_price * (1 + Threshold):
upturn_event = True
# Tracks Highest Price from now on since we are on an upturn event
highest_price = current_price
# Append Current Index as DC Point Index
dc_points_index.append(index_list[i])
# Considering the Case if 2 DCC events occur immediately after each other
if index_list[os_down] in os_points_index:
os_points_index.append(dc_points_index[-2])
os_down = dc_points_index[-2]
# print('Quick Downturn Detected at Index ({}), DCC Index Value calibrated to Index ({})'
# .format(i, dc_points_index[-2]))
# Normal Case where the 2 DCC events occur > 1 period apart
else:
os_points_index.append(index_list[os_down])
else:
# During Downturn event, if current price is lower than the lowest price, record it as the lowest price
if lowest_price > current_price:
lowest_price = current_price
os_down = i
# Access Indexes for List of Close and Dates for both DC and OS points
for i in os_points_index:
os_points_list.append(list_of_close[i])
os_date_list.append(list_of_dates[i])
for x in dc_points_index:
dc_points_list.append(list_of_close[x])
dc_date_list.append(list_of_dates[x])
print()
print('Number of DC and OS Points : ', len(os_points_list))
print()
return os_points_list, os_date_list, dc_points_list, dc_date_list
def vertical_distance(os_points_list, os_date_list, dc_points_list, dc_date_list):
# Consider Separating it into Positive and Negative values --> Keeping track of Up/Down Trends
"""
Checking the Price value differences between OS and DC Points for back testing Purposes
:param os_points_list: Array of OS Points
:param os_date_list: Array of Corresponding OS Point Dates
:param dc_points_list: Array of DC Points
:param dc_date_list: Array of Corresponding DC Point Dates
:return no_of_zeroes: Number of occurrences where the difference between the current OS and current DC is 0
:return one_count: Number of occurrences where the size of OS is at least the size of DC
:return count: Number of occurrences where the size of the OS is at least the value of the Average Ratio of OS : DC
"""
dc_dist_list = []
os_dist_list = []
percent_vertical_dist = []
counter = 1
# Iterating through all the DC and OS points and finding the differences between each point
while counter < len(os_points_list):
# Appending the DC Lengths
dcc = abs(os_points_list[counter-1] - dc_points_list[counter-1])
dc_dist_list.append(dcc)
# Appending the OS Lengths
oss = abs(os_points_list[counter] - dc_points_list[counter-1])
os_dist_list.append(oss)
counter += 1
# Changing the Value to be in terms of PIPS
if 'JPY' in pair:
dc_dist_list = [round(i*100, 1) for i in dc_dist_list]
os_dist_list = [round(i*100, 1) for i in os_dist_list]
else:
dc_dist_list = [round(i*10000, 1) for i in dc_dist_list]
os_dist_list = [round(i*10000, 1) for i in os_dist_list]
# Changing the OS pip distance to be of a percentage WRT the corresponding DC move
for i in range(len(dc_dist_list)):
percent_vertical_dist.append((round(os_dist_list[i]/dc_dist_list[i], 1)))
# Calculating the Average Distance %
avg_percent_vert_dist = sum(percent_vertical_dist)/len(percent_vertical_dist)
one_count = 0
avg_count = 0
point_eight_count = 0
one_point_five_count = 0
two_count = 0
# Tracking the counters for various test parameters
for i in range(len(percent_vertical_dist)):
# Tracking if value is greater than the average % distance moved by the OS event WRT the DC event
if percent_vertical_dist[i] >= avg_percent_vert_dist:
avg_count += 1
# Tracking if OS price movement is greater than the DC price movement
if percent_vertical_dist[i] >= 1:
one_count += 1
# Tracking if OS price movement is greater than 80% of the DC price movement
if percent_vertical_dist[i] >= 0.8:
point_eight_count += 1
if percent_vertical_dist[i] >= 1.5:
one_point_five_count += 1
if percent_vertical_dist[i] >= 2:
two_count += 1
# Tracking the number of occurrences where the DC Points == OS Point
no_of_zeroes = 0
for i in percent_vertical_dist:
if i == 0:
no_of_zeroes += 1
# Prints Useful Set of Information
# print('% Vertical Distance :', percent_vertical_dist)
print('Average Value of OS/DC Distance :', avg_percent_vert_dist)
print('Number of DC and OS Events :', len(percent_vertical_dist))
print('No. of OS events where OS == 0 :', no_of_zeroes)
print('No. of times where OS > DC by the Average Value :', avg_count)
print('No. of times where OS > DC :', one_count)
print('No. of times where OS is 80% of DC :', point_eight_count)
print('No. of times where OS is 150% of DC :', one_point_five_count)
print('No. of times where OS is 200% of DC :', two_count)
return no_of_zeroes, one_count, avg_count
| true |
3b3a28cafee8e305875314d1fa679a5170a507a5 | Python | aswmtjdsj/CSE537-S15 | /proj-1/solution.py | UTF-8 | 3,916 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
import sys, os
import copy
from solution_ids import IDS
from solution_astar import *
from guppy import hpy
BOARD_ROW = 7
BOARD_COL = 7
board = [] # supposed to be 7 * 7 array
if __name__ == '__main__':
# print sys.argv
if len(sys.argv) < 2:
raise Exception('''command should be "python solution.py <data_file> <strategy> {--option=\'--memory_profile\'}"
You Should Specify a file name''')
if len(sys.argv) < 3:
raise Exception('''command should be "python solution.py <data_file> <strategy> {--option=\'--memory_profile\'}"
You Should Choose Strategy from "--IDS" or "--ASTAR"''')
try:
with open(sys.argv[1]) as f:
for line in f:
board.append(list(line.strip()))
except Exception as e:
if e.__class__ == IOError:
raise Exception('''command should be "python solution.py <data_file> <strategy> {--option=\'--memory_profile\'}"
File does not exist! You Should Specify a valid data/input file''')
else:
raise e
profile_flag = False
if len(sys.argv) > 3 and sys.argv[3] == '--memory_profile':
profile_flag = True
print ''
print 'Board Information >>>>'
for i in board:
print ''.join(i)
print ''
if len(board) != BOARD_ROW:
raise Exception("[ERROR] Board Row != {0}".format(BOARD_ROW))
print "Row: {0}".format(len(board))
if len(board[0]) != BOARD_COL:
raise Exception("[ERROR] Board Column != {0}".format(BOARD_COL))
print "Column: {0}".format(len(board[0]))
print ''
index_mapping_cnt = 0
index_mapping = copy.deepcopy(board)
for idx, row in enumerate(board):
for jdx, grid in enumerate(row): # use for (x, y) to (mapping_id) tranformation
if grid != '-':
index_mapping[idx][jdx] = index_mapping_cnt
index_mapping_cnt += 1
print 'Index Mapping'
for i in index_mapping:
print i
print ''
try:
result = None
number_of_nodes = 0 # count number of expanded nodes
if sys.argv[2] == '--IDS':
(result, number_of_nodes, memory_profiled) = IDS(board, profile_flag)
elif sys.argv[2] == '--ASTAR':
(result, number_of_nodes, memory_profiled) = ASTAR(board, profile_flag)
print "Number of Expanded Nodes: {0}".format(number_of_nodes)
print ''
if profile_flag == True:
print "Peak Memory During Search Procedure: {0:f} KiB".format(memory_profiled/1024.)
print ''
if result != None:
result.reverse()
show_result = map(lambda x: (index_mapping[x[0][0]][x[0][1]], index_mapping[x[1][0]][x[1][1]]), result)
print 'Solution Found >>>>'
# print result
print show_result
print ''
print 'Solution moves >>>>'
print ''
move_cnt = 0
print move_cnt
move_board = copy.deepcopy(board)
for i in board:
print ''.join(i)
print ''
for i in result:
move_cnt += 1
print move_cnt
from_peg = i[0]
to_grid = i[1]
next_peg = ((i[0][0]+i[1][0])/2, (i[0][1]+i[1][1])/2)
# print from_peg
# print next_peg
# print to_grid
move_board[from_peg[0]][from_peg[1]] = 'O'
move_board[next_peg[0]][next_peg[1]] = 'O'
move_board[to_grid[0]][to_grid[1]] = 'X'
for j in move_board:
print ''.join(j)
print ''
except Exception as e:
# print dir(e)
# if e.__class__ == NameError:
# raise Exception('ASTAR() or IDS() method hasn\'t been implemented')
raise e
| true |
b18b7b53d3fd033a7d0f274334c99cca544cf938 | Python | RomanDubinin/CVRP | /src/inside_customers_heuristics.py | UTF-8 | 3,583 | 3.09375 | 3 | [] | no_license | from src.common import disstance, get_tour_len
from src.Node import Node
def incertion_price_recount(incert_infos, last_added_node, left_node, right_node):
for point in incert_infos:
possible_new_price = disstance(left_node.value, point) + \
disstance(point, last_added_node.value) - \
disstance(left_node.value, last_added_node.value)
if possible_new_price < incert_infos[point]["price"]:
incert_infos[point]["price"] = possible_new_price
incert_infos[point]["place"] = (left_node, last_added_node)
possible_new_price = disstance(last_added_node.value, point) + \
disstance(point, right_node.value) - \
disstance(last_added_node.value, right_node.value)
if possible_new_price < incert_infos[point]["price"]:
incert_infos[point]["price"] = possible_new_price
incert_infos[point]["place"] = (last_added_node, right_node)
def incert(left_node, right_node, value):
new_node = Node(value, right_node)
left_node.next = new_node
return new_node
def linked_list_to_list(first_node):
first_val = first_node.value
res = [first_val]
current_station = first_node.next
while current_station != first_node:
res.append(current_station.value)
current_station = current_station.next
return res
def get_cheapest_price(incert_infos):
min_price = float("inf")
for key in incert_infos:
if incert_infos[key]["price"] < min_price:
min_price = incert_infos[key]["price"]
cheapest = incert_infos[key]
return cheapest
def hamiltonian_cycle_cheapest_insertion(points):
first_node = Node(points[0], None)
second_node = Node(points[1], None)
first_node.next = second_node
second_node.next = first_node
points.remove(first_node.value)
points.remove(second_node.value)
last_added = second_node
left_neighbour = first_node
right_neighbour = first_node
incert_infos = {}
for point in points:
incert_infos[point] = {"point": point, "price": float("inf"), "place": (first_node, second_node)}
while len(incert_infos) > 0:
incertion_price_recount(incert_infos, last_added, left_neighbour, right_neighbour)
cheapest = get_cheapest_price(incert_infos)
left_neighbour, right_neighbour = cheapest["place"]
last_added = incert(left_neighbour, right_neighbour, cheapest["point"])
incert_infos.pop(cheapest["point"])
return linked_list_to_list(last_added)
def get_all_shifts(list_):
res = []
for i in range(len(list_)):
res.append(list_[i:] + list_[:i])
return res
def get_partitioning(list_, n):
partition = []
for i in range(0, len(list_), n):
partition.append(list_[i:i + n])
return partition
def calculate_partitioning_len(depo, partition):
return sum([get_tour_len(depo, part) for part in partition])
def tour_partitioning_heuristic(depo, points, capacity):
hamiltonian_cycle = hamiltonian_cycle_cheapest_insertion(points)
partition = get_partitioning(hamiltonian_cycle, capacity)
min_partitiining_len = calculate_partitioning_len(depo, partition)
for shift in get_all_shifts(hamiltonian_cycle):
partition = get_partitioning(shift, capacity)
partitiining_len = calculate_partitioning_len(depo, partition)
if partitiining_len < min_partitiining_len:
optimal_partition = partition
return optimal_partition | true |
e902e5d4e91f02363cd668ccbd5dc36462c98955 | Python | code4plot/sghomebrew | /kegland/utils/pricelist_functions.py | UTF-8 | 3,522 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 24 15:16:59 2020
@author: mbijlkh
"""
from utils import helper
from sqlalchemy.types import VARCHAR
import pandas as pd
from collections import defaultdict
def pricetable(x, date, **kwargs):
"""takes in processed pricelist, x, a pandas df
and upload to SQL DB
date will record date of entry in %Y%m%d format
kwargs to """
x['date'] = pd.to_datetime(date, format = '%Y%m%d')
key_length = x.index.get_level_values('klid').str.len().max()
helper.write_to_sql(**kwargs, df = x, tbl = 'pricetable', if_exists = 'append', dtype = {'klid':VARCHAR(key_length)})
def cattable(x, **kwargs):
"""
takes in catalog_df, x, a pandas df
and upload to SQL DB
discontinued items will go to discont_table
"""
discont_df = x[x.name.str.contains('discontinued', case = False)]
avail_df = x[~x.index.isin(discont_df.index)]
key_length = x.index.get_level_values('klid').str.len().max()
try:
helper.write_to_sql(**kwargs, #authentication parameters
df = discont_df, tbl = 'discontinued', #from df to table
if_exists = 'fail', dtype = {'klid':VARCHAR(key_length)}) #other params
except:
helper.update_sql_table(**kwargs,
df = discont_df, tbl = 'discontinued',
dtype = {'klid':VARCHAR(key_length)},
set_statement = 'a.name = b.name')
try:
helper.write_to_sql(**kwargs, #authentication parameters
df = avail_df, tbl = 'catalog', #from df to table
if_exists = 'fail', dtype = {'klid':VARCHAR(key_length)}) #other params
except:
helper.update_sql_table(**kwargs,
df = avail_df, tbl = 'catalog',
dtype = {'klid':VARCHAR(key_length)},
set_statement = 'a.name = b.name')
def cartonboxtable(x, **kwargs):
"""
takes in carton_df, x, a pandas df
and upload to SQL DB
Parameters
----------
x : pandas df
carton_df about number of items per carton/box
**kwargs : kwargs
authentication param for SQL DB
Returns
-------
None.
"""
x.unitspercarton = x.unitspercarton.str.replace('nan','1')
x.unitspercarton = x.unitspercarton.fillna('1')
x.unitspercarton = x.unitspercarton.astype(str)
cartonbox = defaultdict(list)
for i in range(len(x)):
temp = x.unitspercarton[i].split('/')
if len(temp) == 1:
cartonbox['carton'].append(temp[0])
cartonbox['box'].append(temp[0])
else:
cartonbox['carton'].append(temp[0])
cartonbox['box'].append(temp[1])
cartonbox_df = pd.DataFrame(cartonbox, index = x.index)
key_length = x.index.get_level_values('klid').str.len().max()
try:
helper.write_to_sql(**kwargs, #authentication parameters
df = cartonbox_df, tbl = 'cartonbox', #from df to table
if_exists = 'fail', dtype = {'klid':VARCHAR(key_length)}) #other params
except:
helper.update_sql_table(**kwargs,
df = cartonbox_df, tbl = 'cartonbox',
dtype = {'klid':VARCHAR(key_length)},
set_statement = 'a.carton = b.carton,\
a.box = b.box')
| true |
27f3b74bb208d04976719ae19b5e3269623e555b | Python | Barankin85/SeaFight | /app/routes.py | UTF-8 | 728 | 2.625 | 3 | [] | no_license | from app import app
from bottle import template, request, static_file
from models import game
@app.route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./app/static')
@app.get('/')
@app.get('/index')
def index():
return template('game_board', game = game)
@app.post('/')
@app.post('/index')
def index():
shipWasFired = game.fire(request.json['x'], request.json['y'])
result = { 'shipFired': shipWasFired }
if game.winner:
isYouWinner = game.winner == game.you
game.start()
result['message'] = "You are winner!" if isYouWinner else "Your enemy won!"
return result
@app.post('/reset')
def reset():
game.start()
| true |
ccbc3eea4f9c841ea6bde02ef82116b679ddaf90 | Python | v-cardona/CompeticionProgramacion | /Vitos/vito.py | UTF-8 | 346 | 3.21875 | 3 | [] | no_license | import sys
num_cases = int(sys.stdin.readline())
for _ in range(num_cases):
#Vito vive en la mediana de las casas
#despues solo queda sumar el valor absoluto
casas = [int(i) for i in sys.stdin.readline().split()[1:]]
mediana = sorted(casas)[int(len(casas)/2)]
cont = sum([abs(elem-mediana) for elem in casas])
print(cont) | true |
1632ceafe01248583858eccb8ddc1f2b7536a7ad | Python | Aasthaengg/IBMdataset | /Python_codes/p02991/s367089639.py | UTF-8 | 981 | 2.828125 | 3 | [] | no_license | # coding: utf-8
import sys
from heapq import heapify, heappop, heappush
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
# vertexごとに3つの状態数を持つ
N, M = lr()
graph = [[] for _ in range((N+1)*3)] # 1-indexed
for _ in range(M):
a, b = lr()
a *= 3; b *= 3
for i in range(3):
j = i+1 if i < 2 else 0
graph[a+i].append(b+j)
INF = 10 ** 15
def dijkstra(start):
dist = [INF] * (N+1) * 3
dist[start] = 0
que = [(0, start)]
while que:
d, prev = heappop(que)
if dist[prev] < d:
continue
for next in graph[prev]:
#print(graph[prev],dist[next],next)
d1 = d + 1
if dist[next] > d1:
dist[next] = d1
heappush(que, (d1, next))
return dist
S, T = lr()
dist = dijkstra(S*3)
answer = dist[T*3]
if answer == INF:
answer = -1
else:
answer //= 3
print(answer)
| true |
6f64ae44ba0f6d6d0c72a267ac6fb8373f02656b | Python | PatrickDeng0/DeepLearning-PJ | /rnn_model.py | UTF-8 | 3,500 | 2.78125 | 3 | [] | no_license | import os
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
class RNNModel:
def __init__(self, input_shape, learning_rate=0.001, num_hidden=64,
log_files_path=os.path.join(os.getcwd(), 'logs'),
method='LSTM', output_size=3):
self._input_shape = input_shape
self._learning_rate = learning_rate
self._num_hidden = num_hidden
self._log_files_path = log_files_path
self._output_size = output_size
self._method = method
self._model = self._rnn()
def _rnn(self):
model = tf.keras.Sequential()
# 1-layer LSTM
if self._method == 'LSTM':
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=self._num_hidden),
input_shape=self._input_shape))
model.add(tf.keras.layers.Dense(units=self._num_hidden, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(units=self._output_size, activation=tf.nn.softmax))
# 1-layer LSTMs
elif self._method == 'LSTMs':
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self._num_hidden, return_sequences=True),
input_shape=self._input_shape))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self._num_hidden)))
model.add(tf.keras.layers.Dense(units=self._num_hidden, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(self._output_size, activation=tf.nn.softmax))
# 1-layer GRU
elif self._method == 'GRU':
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.GRU(units=self._num_hidden),
input_shape=self._input_shape))
model.add(tf.keras.layers.Dense(units=self._num_hidden, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(units=self._output_size, activation=tf.nn.softmax))
# 2-layer GRUs
elif self._method == 'GRUs':
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.GRU(self._num_hidden, return_sequences=True),
input_shape=self._input_shape))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.GRU(self._num_hidden)))
model.add(tf.keras.layers.Dense(units=self._num_hidden, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(self._output_size, activation=tf.nn.softmax))
else:
raise Exception("Invalid RNN method")
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=self._learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['sparse_categorical_accuracy'])
return model
def train(self, train_data, valid_data, n_epoch, class_weight):
es = EarlyStopping(monitor='val_sparse_categorical_accuracy', mode='max', patience=5, verbose=2)
log_files_path = self._log_files_path
history = self._model.fit(train_data, validation_data=valid_data, epochs=n_epoch, verbose=2,
callbacks=[es], class_weight=class_weight)
self._model.save(log_files_path)
return history
def evaluate(self, test_data):
return self._model.evaluate(test_data, verbose=2)
def predict(self, test_X):
return self._model.predict(test_X)
| true |
10ded71b8fd31d839371e1896a324c2e3329f86c | Python | myamamoto555/atcoder | /others/dp/LCS.py | UTF-8 | 601 | 3.578125 | 4 | [] | no_license | # coding:utf-8
# 最長共通部分列
# LCS(Longest Common Subsequence problem)
def lcs(X, Y):
X = ' ' + X
Y = ' ' + Y
c = [[0 for i in range(len(Y))] for j in range(len(X))]
maxl = 0
for i in range(1, len(X)):
for j in range(1,len(Y)):
if X[i] == Y[j]:
c[i][j] = c[i-1][j-1] + 1
elif c[i][j-1] >= c[i-1][j]:
c[i][j] = c[i][j-1]
else:
c[i][j] = c[i-1][j]
maxl = max(maxl, c[i][j])
print maxl
if __name__ == '__main__':
X = 'TCCAGATGG'
Y = 'TCACA'
lcs(X, Y)
| true |
4d74abb47181f625c8bad73a367ad01579bdbe18 | Python | cathy27/some-little-tests | /0010.py | UTF-8 | 1,301 | 3.359375 | 3 | [] | no_license | # coding=utf8=
# 第0010题: 使用 Python 生成类似于下图中的字母验证码图片
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random as rd
# 随机字母
def randchar():
return chr(rd.randint(65, 90))
# 随机颜色1
def rand_color():
return rd.randint(64, 255), rd.randint(64, 255), rd.randint(64, 255)
# 随机颜色2
def rand_color2():
return rd.randint(32, 127), rd.randint(32, 127), rd.randint(32, 127)
def gen_yanzhengma(base_len, alp_num, name, is_filter):
width = base_len * alp_num
height = base_len
image = Image.new('RGB', (width, height), (255, 255, 255))
font = ImageFont.truetype('Arial.ttf', 36) # 创建font
draw = ImageDraw.Draw(image) # 创建draw
# 填充每个像素
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rand_color())
# 输出文字
for t in range(alp_num):
draw.text((base_len * t + 10, 10), randchar(), font=font, fill=rand_color2())
# 是否模糊
if is_filter == 1:
image = image.filter(ImageFilter.BLUR)
image.save('%s.jpg' % name, 'jpeg')
def main():
para1 = 60
para2 = 5
para3 = 'code'
para4 = 0
gen_yanzhengma(para1, para2, para3, para4)
if __name__ == '__main__':
main()
| true |
6bac3d074aaa8487f54f7b183fb52dbe39b8e325 | Python | citizenken/quest-discord-bot | /src/cogs/character.py | UTF-8 | 5,360 | 2.609375 | 3 | [] | no_license | import yaml
from discord.ext import commands
from ..models.character import Character as CharacterModel
from ..models.user import User
from ..prompts.character import CharacterPrompts
from ._base_cog import _BaseCog
from ..bot import quest_bot
class Character(_BaseCog):
def __init__(self, quest_bot):
super().__init__(quest_bot)
@commands.group(pass_context=True,
invoke_without_command=True,
aliases=["me", "hp", "ap"])
async def character(self, ctx, *arg):
if ctx.invoked_with:
if ctx.invoked_with == "me":
await self.describe(ctx, *arg)
return
if ctx.invoked_with == "hp":
await self.update_hp(ctx, int(arg[0]))
return
if ctx.invoked_with == "ap":
await self.update_ap(ctx, int(arg[0]))
return
if ctx.invoked_subcommand is None:
await ctx.send('Invalid git command passed...')
@character.group(name='create',
description="Create a character",
brief="Create a character",
pass_context=True,
invoke_without_command=True)
async def create(self, ctx):
template = self.yaml_env.from_string(CharacterPrompts.character_create_prompt)
response_text = template.render(creation_attrs=CharacterModel.creation_attrs)
await self.response(ctx, **{ 'content': response_text })
@create.command(name='submit',
description="Create a character",
brief="Create a character",
pass_context=True,
invoke_without_command=True)
async def submit(self, ctx, *, form: str):
yaml_lines = form.replace("```", "")
for attr_name, prompt in CharacterModel.creation_attrs.items():
yaml_lines = yaml_lines.replace(prompt, attr_name)
attributes = yaml.safe_load(yaml_lines)
character = CharacterModel(**attributes)
character.create(self.quest_bot, ctx)
embed = character.create_character_embed()
await self.response(ctx, **{ 'embed': embed })
@character.group(name='describe',
description="Describe a character",
brief="Describe a character",
pass_context=True,
invoke_without_command=True)
async def describe(self, ctx, *arg):
response_details = {}
if not arg:
embed = User.get_current_character(quest_bot, self.get_author_info(ctx)).create_character_embed()
response_details = { 'content': "You are", 'embed': embed }
else:
character_name = arg[0]
owner_info = arg[1].split("#")
embed = CharacterModel.describe(character_name, owner_info[0], owner_info[1])
if embed:
response_details = { 'embed': embed }
else:
response_details = {
'content': 'No character named {name} owned by {owner_info} found. Was your input correct?'\
.format(name=character_name, owner_info="#".join(owner_info))
}
await self.response(ctx, **response_details)
@character.group(name='list',
description="List the current users's character",
brief="List the current users's character",
pass_context=True,
invoke_without_command=True)
async def list(self, ctx):
list_of_characters = User.list_characters(quest_bot, self.get_author_info(ctx))
template = self.yaml_env.from_string(CharacterPrompts.character_list_prompt)
response_text = template.render(list_of_characters=list_of_characters)
await self.response(ctx, **{ 'content': response_text })
@character.group(name='update',
description="Update a character",
brief="Update a character",
pass_context=True,
invoke_without_command=True)
async def update(self, ctx, *arg):
pass
@update.command(name='hp',
description="Update a character",
brief="Update a character",
pass_context=True,
invoke_without_command=True)
async def update_hp(self, ctx, change: int):
character = User.get_current_character(quest_bot, self.get_author_info(ctx))
character.hp = character.update_hp(change)
await self.response(ctx, **{ 'content': '{name} now has {hp} hit points'.format(name=character.name, hp=character.hp) })
@update.command(name='ap',
description="Update a character",
brief="Update a character",
pass_context=True,
invoke_without_command=True)
async def update_ap(self, ctx, change: int):
character = User.get_current_character(quest_bot, self.get_author_info(ctx))
character.update_ap(change)
await self.response(ctx, **{ 'content': '{name} now has {ap} ability points'.format(name=character.name, ap=character.ap) })
def setup(d_bot):
d_bot.add_cog(Character(quest_bot)) | true |
3fcc95de010e15841f9f6b55bcb2914c301d7693 | Python | madhu-mini/data_structures | /Arrays.py | UTF-8 | 113 | 2.765625 | 3 | [] | no_license | import array
initializer_list = [2, 5, 43, 5, 10, 52, 29, 5]
arr = array.array('I',initializer_list)
print(arr)
| true |
1a3e5bd6afe16d2e12492d8e3527c46c47234387 | Python | massa423/tamagawa | /tests/conftest.py | UTF-8 | 570 | 2.65625 | 3 | [] | no_license | from telnetlib import Telnet
import pytest
class TamagawaClient:
def __init__(self):
self.host = "localhost"
self.port = 3333
self.timeout = 10
def open(self):
self.client = Telnet(self.host, self.port, self.timeout)
def send(self, data: bytes):
self.client.write(data)
response = self.client.read_all()
self.client.close()
return response
def close(self):
self.client.close()
@pytest.fixture
def tamagawa():
t = TamagawaClient()
t.open()
yield t
t.close()
| true |
129a85782867786c1fb24a5b49809e699fa11b12 | Python | S1829/test | /wakachi1.py | UTF-8 | 724 | 3.125 | 3 | [] | no_license | import MeCab
#MeCab を import
#i_am_cat.txt を読み込んで分かち書きする
with open('/media/takuma/NORITAMA/工学実験Ⅳ/Part3/i_am_a_cat.txt','r',encoding='utf-8')as f:
text = f.read()
m = MeCab.Tagger("-Ochasen") #オブジェクトを作成
node = m.parseToNode(text) #形態素情報を取得
count = 0
while node: #繰り返し
word = node.feature.split(",")[6] #原型を取得
type = node.feature.split(",")[0] #品詞を取得
if type in ('動詞'): #品詞が名詞なら原型を表示
print(node.feature)
print(word)
count += 1
if(count ==10):
break
node = node.next
#次の node へ | true |
15225c7a1ef5aaceffccce80ce988edb4a92a848 | Python | Jenderal92/Mass-Delete-Http | /delh.py | UTF-8 | 469 | 2.546875 | 3 | [] | no_license | #JametKNTLS - h0d3_g4n - Moslem - Kiddenta - Naskleng45
#Created By : Jenderal92@Shin403
banner = """
Mass Delete HTTP | Jamet Crew
"""
print banner
def http(url):
try:
htt = (url)
x = htt.replace('http://', '').replace('https://', '')
open('delhttp.txt','a').write(x+'\n'); print('Deleted http' + ' '+url)
except: pass
site = raw_input('List Site : ')
ht = open(site, 'r').readlines()
for i in ht:
try:
siten = i.strip()
data=http(siten)
except: pass | true |