blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
29df1403d961ac88fd199d044dd5f61858bd4b1c | Python | mofei952/cookbook | /c08_classes_and_objects/p25_creating_cached_instances.py | UTF-8 | 2,577 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : mofei
# @Time : 2019/10/1 14:29
# @File : p25_creating_cached_instances.py
# @Software: PyCharm
"""创建缓存实例"""
import logging
import weakref
# 相同参数创建的对象是单例的
a = logging.getLogger('foo')
b = logging.getLogger('bar')
print(a is b)
c = logging.getLogger('foo')
print(a is c)
print()
# 使用一个工厂函数实现这种效果
class Spam:
def __init__(self, name):
self.name = name
_spam_cache = weakref.WeakValueDictionary()
def get_spam(name):
if name not in _spam_cache:
s = Spam(name)
_spam_cache[name] = s
else:
s = _spam_cache[name]
return s
a = get_spam('ff')
b = get_spam('ff')
print(a is b)
print()
# WeakValueDictionary只会保存那些在其它地方还在被使用的实例
# 只要实例不再被使用了,它就从字典中被移除了
a = get_spam('foo')
b = get_spam('bar')
c = get_spam('foo')
print(list(_spam_cache))
del a
print(list(_spam_cache))
del c
print(list(_spam_cache))
del b
print(list(_spam_cache))
print()
# 使用单独的缓存管理器
class CachedSpamManager:
def __init__(self):
self._cache = weakref.WeakValueDictionary()
def get_spam(self, name):
if name not in self._cache:
s = Spam(name)
self._cache[name] = s
else:
s = self._cache[name]
return s
def clear(self):
self._cache.clear()
class Spam2:
manager = CachedSpamManager()
def __init__(self, name):
self.name = name
def get_spam(name):
return Spam2.manager.get_spam(name)
a = Spam2.get_spam('foo')
b = Spam2.get_spam('foo')
print(a is b)
print()
# 防止直接实例化对象
class CachedSpamManager:
def __init__(self):
self._cache = weakref.WeakValueDictionary()
def get_spam(self, name):
if name not in self._cache:
temp = Spam3._new(name) # Modified creation
self._cache[name] = temp
else:
temp = self._cache[name]
return temp
def clear(self):
self._cache.clear()
class Spam3:
manager = CachedSpamManager()
def __init__(self, *args, **kwargs):
raise RuntimeError("Can't instantiate directly")
# Alternate constructor
@classmethod
def _new(cls, name):
self = cls.__new__(cls)
self.name = name
return self
def get_spam(name):
return Spam3.manager.get_spam(name)
a = Spam3.get_spam('foo')
b = Spam3.get_spam('foo')
print(a is b)
| true |
7cfd34b2c36bb3a7cfc2588cab4519227840523a | Python | dawagja/Calculadora | /Pcalculadora/funciones.py | UTF-8 | 552 | 3.75 | 4 | [] | no_license | # -*- coding: UTF-8 -*-
'''
@author: Jose Antonio Aguilar Granados
'''
def sumar(a, b):
"""Función que resta las variables a y b"""
return a+b
def restar(a, b):
"""Función que resta las variables a y b"""
return a-b
def multiplicar(a, b):
"""Función que multiplica las variables a y b"""
return a*b
def dividir(a, b):
"""Función que dividir las variables a y b"""
try:
a/b
except ZeroDivisionError:
print("Error, el segundo argumento no puede ser 0")
return a/b | true |
e689f68723949c2e97d486b97902d71a7577c180 | Python | sanjacobo/crawlerTest | /testCrawler/spiders/DataProvider.py | UTF-8 | 852 | 2.609375 | 3 | [] | no_license | import re
class Data:
def __init__(self):
self.page_types = ['Travel-Guide-Hotels',
'Flight-Origin-City',
'Flights-OnD'
]
self.regex_page_type = {'Travel-Guide-Hotels': r'Travel-Guide-Hotels',
'Flight-Origin-City': r'lp/flights/\d+/\D+',
'Flights-OnD': r'lp/flights/\d+/\d+/'
}
self.domains = {'ORB': 'orbitz.com',
'CTIX': 'cheaptickets.com'}
@staticmethod
def find_page_type(self, url):
output = None
for __page__ in self.page_types:
if re.compile(self.regex_page_type[__page__]).search(url) is not None:
output = __page__
break
return output
| true |
55b5d3a7ac3ad22c8ef5408f3b2e54c944a65829 | Python | Tvneeves/classes_practice | /classes_practice.py | UTF-8 | 2,218 | 4.625 | 5 | [] | no_license | '''
1. Your program this week will use the OS library
2. Your program will prompt the user for:
-the directory they would like to save the file in,
-the name of the file,
3. Validate that a directory exists,
4. Create a file in that directory,
5. The program should then prompt the user for thier:
-name,
-address,
-phone number.
6. The program will write this data to a comma separated line in a file.
7. Store the file in the directory specified by the user.
8. Read the file you just wrote to the file system .
9. Display the file contents to the user for validation purposes.
'''
#imports the os module, pathlib module, and retieves Path from pathlib
import os
import pathlib
from pathlib import Path
#prompts user for desired directory path
prompted_direc = input("Please Enter Directory Path of Where you would like to save your file. ")
#checks if path is valid and returns true/false
os.path.isdir(prompted_direc)
#if path is invalid, prints to user that it does not exist
if os.path.isdir(prompted_direc) == False:
print("Directory Does Not Exist.")
#if path is valid prints that directory was found and saves direc
if os.path.isdir(prompted_direc) == True:
print("Directory Found.")
#creates a new file with user imputed filename, there probably should be a check here to ensure that they enter in correct format
new_file = input("Please Enter new File name in format:'File_Name.txt' ")
#prompts user for their name, address, and phonenumber and writes it to the new file.
#this is a silly way to add the commas, without relying on the user to input them themself.
#but it was the only way I could figure out, im sure there is a better/more efficient way to have done this.
#maybe if I store the data in a list, and then write the list to the file?
with open(os.path.join(prompted_direc,new_file), 'w+') as fp:
fp.write(input('Please enter your name. '))
fp.write(', ')
fp.write(input('Please enter your address. '))
fp.write(', ')
fp.write(input('Please enter your phonenumber. '))
#Reads the new file back to the user for validation.
p = Path(prompted_direc)
for file in p.iterdir():
print(file.read_text())
| true |
517ac81814826ff73b09a5ea89213ae9a8170860 | Python | woodgern/confusables | /confusables/parse.py | UTF-8 | 4,154 | 2.71875 | 3 | [
"MIT"
] | permissive | import json
from unicodedata import normalize
import string
import os
from config import CUSTOM_CONFUSABLE_PATH, CONFUSABLES_PATH, CONFUSABLE_MAPPING_PATH, MAX_SIMILARITY_DEPTH
def _asciify(char):
return normalize('NFD',char).encode('ascii', 'ignore').decode('ascii')
def _get_accented_characters(char):
return [u for u in (chr(i) for i in range(137928)) if u != char and _asciify(u) == char]
def _get_confusable_chars(character, unicode_confusable_map, depth):
mapped_chars = unicode_confusable_map[character]
group = set([character])
if depth <= MAX_SIMILARITY_DEPTH:
for mapped_char in mapped_chars:
group.update(_get_confusable_chars(mapped_char, unicode_confusable_map, depth + 1))
return group
def parse_new_mapping_file():
unicode_confusable_map = {}
with open(os.path.join(os.path.dirname(__file__), CONFUSABLES_PATH), "r") as unicode_mappings:
with open(os.path.join(os.path.dirname(__file__), CUSTOM_CONFUSABLE_PATH), "r") as custom_mappings:
mappings = unicode_mappings.readlines()
mappings.extend(custom_mappings)
for mapping_line in mappings:
if not mapping_line.strip() or mapping_line[0] == '#' or mapping_line[1] == '#':
continue
mapping = mapping_line.split(";")[:2]
str1 = chr(int(mapping[0].strip(), 16))
mapping[1] = mapping[1].strip().split(" ")
mapping[1] = [chr(int(x, 16)) for x in mapping[1]]
str2 = "".join(mapping[1])
if unicode_confusable_map.get(str1):
unicode_confusable_map[str1].add(str2)
else:
unicode_confusable_map[str1] = set([str2])
if unicode_confusable_map.get(str2):
unicode_confusable_map[str2].add(str1)
else:
unicode_confusable_map[str2] = set([str1])
if len(str1) == 1:
case_change = str1.lower() if str1.isupper() else str1.upper()
if case_change != str1:
unicode_confusable_map[str1].add(case_change)
if unicode_confusable_map.get(case_change) is not None:
unicode_confusable_map[case_change].add(str1)
else:
unicode_confusable_map[case_change] = set([str1])
if len(str2) == 1:
case_change = str2.lower() if str2.isupper() else str2.upper()
if case_change != str2:
unicode_confusable_map[str2].add(case_change)
if unicode_confusable_map.get(case_change) is not None:
unicode_confusable_map[case_change].add(str2)
else:
unicode_confusable_map[case_change] = set([str2])
for char in string.ascii_lowercase:
accented = _get_accented_characters(char)
unicode_confusable_map[char].update(accented)
for accent in accented:
if unicode_confusable_map.get(accent):
unicode_confusable_map[accent].add(char)
else:
unicode_confusable_map[accent] = set([char])
for char in string.ascii_uppercase:
accented = _get_accented_characters(char)
unicode_confusable_map[char].update(accented)
for accent in accented:
if unicode_confusable_map.get(accent):
unicode_confusable_map[accent].add(char)
else:
unicode_confusable_map[accent] = set([char])
CONFUSABLE_MAP = {}
characters_to_map = list(unicode_confusable_map.keys())
for character in list(unicode_confusable_map.keys()):
char_group = _get_confusable_chars(character, unicode_confusable_map, 0)
CONFUSABLE_MAP[character] = list(char_group)
mapping_file = open(os.path.join(os.path.dirname(__file__), CONFUSABLE_MAPPING_PATH), "w")
mapping_file.write(json.dumps(CONFUSABLE_MAP))
mapping_file.close()
parse_new_mapping_file() | true |
3e6f905ea943629c96246e0f7eecfc1fabbfe21f | Python | bennythejudge/python_scripts | /median.py | UTF-8 | 4,466 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# print the median of a HTTP response code from the access.log file
# thanks to Steve P. for his advise and suggestions to improve the code
import os
import re
import time
import BaseHTTPServer
import threading
import Queue
from time import sleep
import json
SAMPLE = """66.194.6.80 - - [02/Oct/2005:19:52:46 +0100] "GET / HTTP/1.1" 200 2334 "-" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Q312460)" 11 sproglogs.com
208.53.82.111 - - [02/Oct/2005:20:14:49 +0100] "GET /account/login HTTP/1.1" 200 3679 "-" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)" 5 sproglogs.com
208.53.82.111 - - [02/Oct/2005:20:14:56 +0100] "GET /stylesheets/standard.css HTTP/1.1" 200 8329 "http://sproglogs.com/account/login" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)" 0 sproglogs.com
"""
CLF = re.compile(r'^\S+ \S+ \S+ \[.*?\] \".*?\" (?P<code>\d+) (?P<bytes>\d+) ')
requests = {}
# requests is { "code" => [size, size, ...], "code" => [size, size, size] }
# we could also recalculate the median everytime a new value is added
def parse_clf(reqs, seq):
print "inside parse_clf"
for line in seq:
print "inside parse_clf inside for loop"
match = CLF.search(line)
if match:
print "inside parse_clf:found match " + match.group("code")
code = match.group("code")
print "code: " + code
size = int(match.group("bytes"))
print "size: " + str(size)
sizes = reqs.setdefault(code, [])
sizes.append(size)
print sizes
# we never really get here, do we?
print "leaving parse_clf"
# and is this pro-forma too?
return reqs
def median(items):
s = sorted(items)
print s
if len(s) == 0:
return None
# odd?
if len(s) % 2 == 1:
return s[len(s) / 2]
# even!
i1 = len(s) / 2
i2 = i1-1
med = (s[i1] + s[i2]) / 2.0
print "median: " + str(med)
return med
# assert median([]) is None
# assert median([1]) == 1
# assert median([1, 2, 3]) == 2
# assert median([1, 1]) == 1
# assert median([1, 2]) == 1.5
def follow(thefile):
# thefile.seek(0,2)
print "inside follow before while"
while True:
line = thefile.readline()
if not line:
time.sleep(0.1)
continue
print "yielding line " + line
yield line
class MedianRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
#pool_sema.acquire()
#print "thread 1: inside do_GET"
#pool_sema.release()
code = s.path[1:]
pool_sema.acquire()
print "code: " + code
print "size requests: " + str ( len(requests))
#for (code, sizes) in requests.items():
# print code, "inside do_GET -> ", median(sizes), " (", len(sizes), " reqs )"
pool_sema.release()
sizes = requests.get(code, [])
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
#m=median(sizes)
#print "inside do_GET median: " + str(m)
#pool_sema.acquire()
#print "thread 1: after call to median with sizes: " + sizes + " median = " + str(m)
#pool_sema.release()
j={"median_size": str(median(sizes))}
json.dump(j,s.wfile)
#s.wfile.write(json.dumps({"median_size": str(median(sizes))}))
def run_server(requests):
pool_sema.acquire()
print "thread 1: starting"
pool_sema.release()
server_address = ('', 8000)
httpd = BaseHTTPServer.HTTPServer(server_address, MedianRequestHandler)
httpd.serve_forever()
def parse_log_file(requests):
try:
#print "inside try inside parse_log_file"
parse_clf(requests, follow(open("access.log", 'r')))
except KeyboardInterrupt:
pass
print "RESULTS:"
for (code, sizes) in requests.items():
print code, " -> ", median(sizes), " (", len(sizes), " reqs )"
# main function
if __name__ == "__main__":
#print "inside main"
maxconnections = 1
http_server_thread = threading.Thread(target=run_server, args=[requests])
log_parser_thread = threading.Thread(target=parse_log_file, args=[requests])
pool_sema = threading.BoundedSemaphore(value=maxconnections)
log_parser_thread.start()
http_server_thread.start()
else:
print "we don't run as module"
| true |
ecf73f7037f2148d8972cb310b274ca7aef4c019 | Python | analyticalmindsltd/smote_variants | /smote_variants/oversampling/_rose.py | UTF-8 | 4,209 | 2.875 | 3 | [
"MIT"
] | permissive | """
This module implements the ROSE method.
"""
import numpy as np
from ..base import OverSampling
from .._logger import logger
_logger = logger
__all__= ['ROSE']
class ROSE(OverSampling):
"""
References:
* BibTex::
@Article{rose,
author="Menardi, Giovanna
and Torelli, Nicola",
title="Training and assessing classification rules with
imbalanced data",
journal="Data Mining and Knowledge Discovery",
year="2014",
month="Jan",
day="01",
volume="28",
number="1",
pages="92--122",
issn="1573-756X",
doi="10.1007/s10618-012-0295-5",
url="https://doi.org/10.1007/s10618-012-0295-5"
}
Notes:
* It is not entirely clear if the authors propose kernel density
estimation or the fitting of simple multivariate Gaussians
on the minority samples. The latter seems to be more likely,
I implement that approach.
"""
categories = [OverSampling.cat_extensive,
OverSampling.cat_sample_componentwise]
def __init__(self,
proportion=1.0,
*,
random_state=None,
**_kwargs):
"""
Constructor of the sampling object
Args:
proportion (float): proportion of the difference of n_maj and n_min
to sample e.g. 1.0 means that after sampling
the number of minority samples will be equal to
the number of majority samples
random_state (int/RandomState/None): initializer of random_state,
like in sklearn
"""
super().__init__(random_state=random_state)
self.check_greater_or_equal(proportion, 'proportion', 0.0)
self.proportion = proportion
@ classmethod
def parameter_combinations(cls, raw=False):
"""
Generates reasonable parameter combinations.
Returns:
list(dict): a list of meaningful parameter combinations
"""
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.5, 2.0]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sampling_algorithm(self, X, y):
"""
Does the sample generation according to the class parameters.
Args:
X (np.ndarray): training set
y (np.array): target labels
Returns:
(np.ndarray, np.array): the extended training set and target labels
"""
n_to_sample = self.det_n_to_sample(self.proportion,
self.class_stats[self.maj_label],
self.class_stats[self.min_label])
if n_to_sample == 0:
return self.return_copies(X, y, "Sampling is not needed")
X_min = X[y == self.min_label]
# Estimating the H matrix
std = np.std(X_min, axis=0)
n, d = X.shape # pylint: disable=invalid-name
H = std*(4.0/((d + 1)*n))**(1.0/(d + 4)) # pylint: disable=invalid-name
base_indices = self.random_state.choice(np.arange(X_min.shape[0]),
n_to_sample)
base_vectors = X_min[base_indices]
random = self.random_state.normal(size=base_vectors.shape)
samples = base_vectors + random * H
return (np.vstack([X, samples]),
np.hstack([y, np.repeat(self.min_label, len(samples))]))
def get_params(self, deep=False):
"""
Returns:
dict: the parameters of the current sampling object
"""
return {'proportion': self.proportion,
**OverSampling.get_params(self)}
| true |
12f1df9b079b1f86504315feef62826b45e0e82c | Python | Adianek/Bootcamp | /Zadanie7.py | UTF-8 | 240 | 3.5625 | 4 | [] | no_license | # ctrl + alt + l ---> robi przejrzysty kod
# ctrl + / ---> zaznacza i interpreter nie czyta kodu
x = int(input("Podaj liczbę całkowitą: "))
warunek_pierwszy = (x % 2 == 0 and x % 3 == 0 and x > 10) or (x == 7)
print(warunek_pierwszy)
| true |
05e632eeb75de927d048b173ac790db24c9f370a | Python | cpfiffer/misc-python | /Programming for Finance/Lecture 6/Lecture 6.py | UTF-8 | 595 | 3.078125 | 3 | [] | no_license | import pandas as pd
import statsmodels.api as sm
# This is a dataframe of advertising data.
"""
# This code saves the data to file.
url = "http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv"
advert = pd.read_csv(url, index_col = 0)
advert.to_pickle("advert.csv")
"""
advert = pd.read_pickle("advert.csv")
#Tv, Radio, Newspaper, Sales, by spending.
#Can we predict which advertising medium is the most efficient?
#I.e. multiple regression to find sales influence.
x = advert[["TV", "Radio", "Newspaper"]]
y = advert["Sales"]
x = sm.add_constant(x)
est = sm.OLS(y, x).fit()
print(est.summary())
| true |
c9d51d33e7e6a1aeb6465d9d70fa5cefd227eb67 | Python | adysonmaia/phd-sp-dynamic | /sp/core/util/json_util.py | UTF-8 | 956 | 3.65625 | 4 | [] | no_license | import json
def load_content(json_data):
"""Load content of a json data.
Args:
json_data (object): json data. If a file name is passed, it loads the file
Returns:
Any: loaded data
"""
if isinstance(json_data, str):
with open(json_data) as json_file:
return json.load(json_file)
else:
return json_data
def load_key_content(json_data, key):
"""Load content of a key in the json data as a dictionary.
If the content indexed by the key is a file name, then it loads the file as a json file
Args:
json_data (dict): json data.
key (object): a dictionary key
Returns:
Any: loaded data
Raises:
KeyError: key not found in the data
"""
if key not in json_data:
raise KeyError
content = load_content(json_data[key])
if isinstance(content, dict) and key in content:
content = content[key]
return content
| true |
5834318b725341faf9af361e5eb1492af498ec0a | Python | MH-Lee/knc_final | /cralwer/url_crawler.py | UTF-8 | 12,340 | 2.59375 | 3 | [] | no_license | # import packages
import pandas as pd
import numpy as np
import datetime, os, time
from newsapi.newsapi_client import NewsApiClient
# Set the API_KEY (mholic1@unist.ac.kr)
class NewsURL:
def __init__(self, start_date, end_date):
self.API_KEY1 = '9382dd6539f448e59de4ab7c8c214f6f' #김민수
self.API_KEY2 = '08fe48df23494ab0bb4faa1162fee7fa' #이명훈
self.API_KEY3 = '0bc1cc3aff43418ba35488984b6742a4' #최범석
self.API_KEY4 = 'f996355abde44786b91bdef6bc92ee62' #이명훈2
self.API_KEY5 = '2533fbe4f09e4d9dbc51905dcd13d4a3' #최범석2
# Get the source
self.tech_newsapi = NewsApiClient(api_key=self.API_KEY1)
self.sources = self.tech_newsapi.get_sources()
self.general_newsapi_1 = NewsApiClient(api_key=self.API_KEY2)
self.general_newsapi_2 = NewsApiClient(api_key=self.API_KEY3)
self.general_newsapi_3 = NewsApiClient(api_key=self.API_KEY4)
self.google_newsapi = NewsApiClient(api_key=self.API_KEY5)
# Make the magazine list
self.general_magazine1 = ["ABC News", "Associated Press", "Business Insider", "CBS News", "CNN"]
self.general_magazine2 = ["Mashable", "NBC News", "The New York Times", "Reuters","The Economist"]
self.general_magazine3 = ["The Washington Post", "The Washington Times", "Time", "USA Today"]
self.tech_magazine = ["Ars Technica", "Engadget", "Hacker News", "TechCrunch", "TechRader", "The Next Web", "The Verge", "Wired"]
self.today = datetime.date.today()
self.start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
self.end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
self.timedelta = int((self.end_date - self.start_date).days) + 1
# company_list
self.cor_list = pd.read_csv('./company_data/Company.csv')['Name'].tolist()
if os.path.exists('./source/') == False:
os.mkdir('./source')
if os.path.exists('./source/{}'.format(self.today.strftime("%Y-%m-%d"))) == False:
os.mkdir('./source/{}'.format(self.today.strftime("%Y-%m-%d")))
if os.path.exists('./backup/') == False:
os.mkdir('./backup')
if os.path.exists('./backup/{}'.format(self.today.strftime("%Y-%m-%d"))) == False:
os.mkdir('./backup/{}'.format(self.today.strftime("%Y-%m-%d")))
print("news_crawler start! From: {}, to: {}, {}days".format(self.start_date.strftime("%Y-%m-%d"), self.end_date.strftime("%Y-%m-%d"), self.timedelta))
# Get the magazine information
def make_magazine(self, mode="tech"):
if mode == "tech":
magazine = []
id_list = []
for s in self.sources['sources']:
if s['name'] in self.tech_magazine:
magazine.append(s)
for m in magazine:
id_list.append(m['id'])
elif mode == "general":
magazine_1 = list()
magazine_2 = list()
magazine_3 = list()
general_magazine_dict = dict()
for s in self.sources['sources']:
if s['name'] in self.general_magazine1:
magazine_1.append(s)
general_magazine_dict['general_magazine1'] = magazine_1
elif s['name'] in self.general_magazine2:
magazine_2.append(s)
general_magazine_dict['general_magazine2'] = magazine_2
elif s['name'] in self.general_magazine3:
magazine_3.append(s)
general_magazine_dict['general_magazine3'] = magazine_3
id_1 = list()
id_2 = list()
id_3 = list()
id_list = dict()
for gm in ['general_magazine1', 'general_magazine2', 'general_magazine3']:
print(gm)
for m in general_magazine_dict[gm]:
if gm == 'general_magazine1':
id_1.append(m['id'])
id_list[gm] = id_1
elif gm == 'general_magazine2':
id_2.append(m['id'])
id_list[gm] = id_2
elif gm == 'general_magazine3':
id_3.append(m['id'])
id_list[gm] = id_3
# Get the magazine id
return id_list
def make_tech_url_list(self):
# newsapi.get_everything() parameters
# q: Keywords or phrases to search for
# sources: A comma-seperated string of identifiers (maximum 20) for the news
# from: A date and optional time for the oldest article allowed. default: the oldest according to your plan
# to: A date and optional time for the newest article allowed. default: the newest according to your plan
# sort_by: The order to sort the articles in. Possible options: relevancy, popularity, publishedAt
# page_size: The number of results to return per page. 20 is the default, 100 is the maxium
# page: Use this to page through the results
start_time = time.time()
# Make the empty final data frame
id_list = self.make_magazine(mode="tech")
total_df = pd.DataFrame(columns=["Magazine", "Date", "Author", "Title","Url"])
for id in id_list:
print(id)
# Make the empty backup data frame
backup_df = pd.DataFrame(columns=["Magazine", "Date", "Author", "Title", "Url"])
for i in range(0, self.timedelta):
date = self.start_date + datetime.timedelta(i)
date = date.strftime("%Y-%m-%d")
print(date)
articles = self.tech_newsapi.get_everything(sources=id, from_param=date, to=date, language="en", page_size=100, page=1)
for a in articles['articles']:
total_df = total_df.append({"Magazine" : id,
"Date" : a['publishedAt'],
"Author" : a['author'],
"Title" : a['title'],
"Url" : a['url']}, ignore_index=True)
backup_df = backup_df.append({"Magazine" : id,
"Date" : a['publishedAt'],
"Author" : a['author'],
"Title" : a['title'],
"Url" : a['url']}, ignore_index=True)
backup_df.to_csv("./backup/{0}/{0}_{1}.csv".format(self.today.strftime("%Y-%m-%d"), id), index=False)
total_df.to_csv("./source/{}/{}_techurl.csv".format(self.today.strftime("%Y-%m-%d"),self.today.strftime("%Y%m%d")), index=False, encoding='utf-8')
end_time = time.time()
return "success time:{}".format(end_time-start_time)
def make_general_url_list(self):
start_time = time.time()
# newsapi.get_everything() parameters
# q: Keywords or phrases to search for
# sources: A comma-seperated string of identifiers (maximum 20) for the news
# from_param: A date and optional time for the oldest article allowed. default: the oldest according to your plan
# to: A date and optional time for the newest article allowed. default: the newest according to your plan
# sort_by: The order to sort the articles in. Possible options: relevancy, popularity, publishedAt
# page_size: The number of results to return per page. 20 is the default, 100 is the maxium
# page: Use this to page through the results
# Make the empty final data frame
start_date = self.start_date.strftime("%Y-%m-%d")
end_date = self.end_date.strftime("%Y-%m-%d")
print("{}~{}".format(start_date, end_date))
id_dict = self.make_magazine(mode="general")
total_df = pd.DataFrame(columns=["Magazine", "Date", "Author", "Title","Url", "Company"])
for gm in ['general_magazine1', 'general_magazine2', 'general_magazine3']:
id_list = id_dict[gm]
if gm == 'general_magazine1':
newsapi = self.general_newsapi_1
elif gm == 'general_magazine2':
newsapi = self.general_newsapi_2
elif gm == 'general_magazine3':
newsapi = self.general_newsapi_3
for id in id_list:
print("Magazine : ",id)
# Make the empty backup data frame
backup_df = pd.DataFrame(columns=["Magazine", "Date", "Author", "Title", "Url", "Company"])
for query in self.cor_list:
print(query)
articles = newsapi.get_everything(sources=id, q= query, from_param=start_date, to=end_date, language="en", page_size=100, page=1)
for a in articles['articles']:
total_df = total_df.append({"Magazine" : id,
"Date" : a['publishedAt'],
"Author" : a['author'],
"Title" : a['title'],
"Url" : a['url'],
"Company" : query}, ignore_index=True)
backup_df = backup_df.append({"Magazine" : id,
"Date" : a['publishedAt'],
"Author" : a['author'],
"Title" : a['title'],
"Url" : a['url'],
"Company" : query},ignore_index=True)
backup_df.to_csv("./backup/{0}/{0}_{1}.csv".format(self.today.strftime("%Y-%m-%d"), id), index=False)
total_df.to_csv("./source/{}/{}_genurl.csv".format(self.today.strftime("%Y-%m-%d"), self.today.strftime("%Y%m%d")), index=False, encoding='utf-8')
end_time = time.time()
return "success time:{}".format(end_time-start_time)
# cralwer google_news url
def make_google_url_list(self):
start_time = time.time()
# newsapi.get_everything() parameters
# q: Keywords or phrases to search for
# sources: A comma-seperated string of identifiers (maximum 20) for the news
# from: A date and optional time for the oldest article allowed. default: the oldest according to your plan
# to: A date and optional time for the newest article allowed. default: the newest according to your plan
# sort_by: The order to sort the articles in. Possible options: relevancy, popularity, publishedAt
# page_size: The number of results to return per page. 20 is the default, 100 is the maxium
# page: Use this to page through the results
# Make the empty final data frame
start_date = self.start_date.strftime("%Y-%m-%d")
end_date = self.end_date.strftime("%Y-%m-%d")
print("{}~{}".format(start_date, end_date))
total_df = pd.DataFrame(columns=["Magazine", "Date", "Author", "Title","Url"])
for query in self.cor_list:
print(query)
articles = self.google_newsapi.get_everything(sources='google-news', q= query, from_param=start_date, to=end_date, language="en", page_size=100, page=1)
print(len(articles['articles']))
for a in articles['articles']:
total_df = total_df.append({"Magazine" : "google_news",
"Date" : a['publishedAt'],
"Author" : a['author'],
"Title" : a['title'],
"Url" : a['url']}, ignore_index=True)
total_df.to_csv("./source/{0}/{0}_googleurl.csv".format(self.today.strftime("%Y%m%d")), index=False, encoding='utf-8')
end_time = time.time()
return "success time:{}".format(end_time-start_time)
| true |
2157a88d9519e02929cdc58dfc72fdeb77dedd55 | Python | minhdua/PYTHON | /LIST/partitioning.py | UTF-8 | 307 | 3.25 | 3 | [] | no_license | list = input().split()
lowval,highval = input().split()
list = [int(x) for x in list]
lowval, highval = int(lowval), int(highval)
list1 = [x for x in list if x <lowval]
list2 = [x for x in list if lowval <= x <= highval]
list3 = [x for x in list if x > highval]
list = list1 + list2 + list3
print(list)
| true |
acc6be5ac55871fab30ea52d89d81f23c6a1058c | Python | mapmeld/crud-ml | /word-vector.py | UTF-8 | 918 | 2.640625 | 3 | [
"MIT"
] | permissive | import json
from sys import argv
from flask import Flask, request, jsonify
from gensim.models.keyedvectors import KeyedVectors
try:
ar_model = KeyedVectors.load_word2vec_format('wiki.ar.vec')
en_model = KeyedVectors.load_word2vec_format('wiki.en.vec')
except:
ar_model = { 'the': [1,2,3] }
en_model = { 'the': [1,2,3] }
print('Arabic and/or English word vectors not in same directory')
app = Flask(__name__)
@app.route('/word/en')
def en_word():
word = request.args.get('word')
if word not in en_model:
word = 'the'
return jsonify(en_model[word])
@app.route('/word/ar')
def ar_word():
word = request.args.get('word')
if word not in ar_model:
word = 'the'
return jsonify(ar_model[word])
if __name__ == '__main__':
try:
port = int(sys.argv[1])
except Exception as e:
port = 9000
app.run(host='0.0.0.0', port=port, debug=True)
| true |
a343f5d08dcdd0e0da621e1680fe060e5b1225e6 | Python | ayuzer/HXMA_Python_Gui | /old_tests/template_sandbox/.DONT_USE_TemplateApp/src/utils/emitter.py | UTF-8 | 3,557 | 3.125 | 3 | [] | no_license | # System imports
# import threading
import time
# Library imports
from PyQt4 import QtCore
class EmitterWorker(QtCore.QObject):
"""
Worker that runs in dedicated thread and emits
signals on behalf of clients
"""
def __init__(self, *args, **kwargs):
"""
Initialize this worker
"""
super(EmitterWorker, self).__init__(*args, **kwargs)
# This is the signal from the Emitter to this worker that
# triggers processing of the queued client signals.
self.signal = None
# List of queued client signals
self.signal_queue = []
def set_signal(self, signal):
self.signal = signal
def queue(self, signal, value):
"""
Queue a signal (and value) on behalf of client as a tuple.
Note that this particular method is called in the context of the
client thread. However we do net need to lock access to this
list as signals (and handlers) are thread-safe.
"""
self.signal_queue.append((signal, value))
def started_handler(self):
"""
This handler is called when the worker thread starts.
"""
self.signal.connect(self.signal_handler)
def signal_handler(self, value):
"""
Handler that responds to the Emitter class's SIGNAL. This handler
pops queued signals and emits them
"""
# print "EMITTER_WORKER: signal_handler called", value, threading.currentThread()
while True:
if not self.signal_queue:
break
item = self.signal_queue.pop(0)
# print "EMITTER WORKER: item:", item
# The queued signal is a tuple. item[0] is the signal itself,
# item[1] is the value to be emitted in the signal
if not item[0]:
continue
# print "EMITTER WORKER emitting signal", item[1]
item[0].emit(item[1])
class Emitter(QtCore.QObject):
"""
The Emitter is a class that creates an independent thread
for emitting signals.
This ensures that if a thread sends a signal to
itself, the signal is processes asynchronously.
Without the emitter, a thread sending a signal to itself calls the
handler in a nested (potentially recursive) fashion.
"""
# Signal used to trigger (i.e., call) the handler in the emitter's
# worker thread.
SIGNAL = QtCore.pyqtSignal(unicode)
def __init__(self, *args, **kwargs):
super(Emitter, self).__init__(*args, **kwargs)
self.thread = QtCore.QThread()
self.worker = EmitterWorker()
self.worker.set_signal(self.SIGNAL)
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.started_handler)
self.thread.start()
def emit(self, signal, value):
#print "EMITTER: emitting a signal: %s %s" % (
# value, threading.currentThread())
# First, queue the signal (and value) to ultimately be emitted
self.worker.queue(signal, value)
# Signal the worker thread.... its handler will pop the
# queued signals and emit them.
self.SIGNAL.emit("future_value")
def stop(self):
print "EMITTER: stop() called"
if self.thread:
self.thread.quit()
while True:
if self.thread.isFinished():
break
print "EMITTER: Waiting for thread to finish..."
time.sleep(0.2)
self.thread = None
| true |
1321b80b88a83b14626f94008dc63f328c8f5a0c | Python | jyuno426/KCSS | /kcss/management/commands/updateKoreans.py | UTF-8 | 1,380 | 2.5625 | 3 | [
"MIT"
] | permissive | import json
from django.core.management.base import BaseCommand, CommandError
from kcss.models import Author
class Command(BaseCommand):
help = "Update koreans that are hard coded"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
bp = "kcss/static/kcss/"
with open(bp + "data/author_name_dict.json") as f:
author_name_dict = json.load(f)
with open(bp + "data/kr_hard_coding.txt") as f:
for line in f.readlines():
author_name = line.strip()
if author_name in author_name_dict:
author_name = author_name_dict[author_name]
name_parts = author_name.split()
last_name = name_parts[-1]
first_name = " ".join(name_parts[:-1])
try:
author = Author.objects.get(
first_name=first_name, last_name=last_name
)
except Author.DoesNotExist:
self.stdout.write(
self.style.ERROR(
"{} {} does not exist in DB".format(first_name, last_name)
)
)
author.korean_prob = 100
author.save()
self.stdout.write(self.style.SUCCESS(str(author)))
| true |
fa152c4e83879dd576eebbaf5397ede835fd9a0a | Python | VishwPramit97/Docker-Projects | /DockerProject.py | UTF-8 | 3,341 | 2.78125 | 3 | [] | no_license | import os
while True:
print("""
\n\n\n\t--------------------------------------------------------\n
\t\t\t### WELCOME To Docker Terminal User Interface ###\n
\t--------------------------------------------------------
\n\n\t\tpress the following keys to perform following actions:\n\t\t
press 1: For Installing Docker-CE
press 2: For Docker-compose Installation
press 3: For Launching Wordpress Webapplication linked to MySql database
press 4 For stopping the Wordpress Web application
press 5: For seeing docker images
press 6: For seeing containers running
press 0: for exit
"""
)
choice = int(input("Enter your choice ::"))
if choice == 1:
os.system("yum install docker-ce --nobest")
elif choice == 2:
os.system("firewalld-cmd --zone=public --add-masquerade --permanent")
os.system("firewalld-cmd --zone=public --add-port=80/tcp")
os.system("firewalld-cmd --zone=public --add-port=443/tcp")
os.system("firewalld-cmd --reload")
os.system("systemctl restart docker")
printf("/n/n/tNOW your docker yum problem is solved Go and check................")
elif choice == 3:
os.system(' curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose')
os.system(" chmod +x /usr/local/bin/docker-compose")
elif choice == 4:
while True:
os.system("clear")
print("\n\nif you don't have wordpress and mysql image then follow these steps:\n\n")
print("\n\t\tpress 1: for wordpress image ")
print("\t\tpress 2: for mysql image")
print("\n\n\tif you already have these images then follow to launch wordress ")
print("\n\t\tpress 3: for launch wordpress server\n\n\n")
print("\n\n\tpress 0: Back to main menu")
choice1 = int(input("Enter your choice : "))
if choice1 == 1:
os.system("docker pull wordpress:5.1.1-php7.3-apache")
elif choice1 == 2:
os.system("docker pull mysql:5.1")
elif choice1 == 3:
os.system("docker-compose up -d")
elif choice1 == 0:
exit(1)
else:
print("Sorry Invalid Input")
elif choice == 5:
os.system("docker images ")
elif choice == 6:
os.system("docker ps")
elif choice == 0:
exit()
else:
print("Sorry Invalid Input")
x= input("PRESS ENTER TO CONTINUE")
| true |
88b0a80f59126e61062f9ffc6ecdb5ae149f20e1 | Python | immohsin/DS-ALGO | /prob_4.py | UTF-8 | 679 | 3.75 | 4 | [] | no_license | # Insert a node in sorted linkedlist
from linkedlist import SinglyLinkedList, Node
def createList():
ll = SinglyLinkedList()
for i in range(10, 0,-2):
ll.addFront(i)
return ll
def addToSortedList(ll, data):
newNode = Node(data)
prev, curr = None, ll.head
while(curr != None and curr.data < data):
prev = curr
curr = curr.next
if prev:
prev.next = newNode
else:
ll.head = newNode
newNode.next = curr
ll.print()
if __name__ == '__main__':
ll = createList()
addToSortedList(ll, 1)
addToSortedList(ll, 1)
addToSortedList(ll, 3)
addToSortedList(ll, 3)
addToSortedList(ll, 5)
addToSortedList(ll, 7)
addToSortedList(ll, 9)
addToSortedList(ll, 10)
| true |
5e5b5cefe7fe560cb6658607027b3a0a98da5cb8 | Python | bwitting/PiWeather | /piweather.py | UTF-8 | 6,100 | 2.75 | 3 | [] | no_license | import inkyphat
from datetime import date, timedelta
import glob
from PIL import Image, ImageFont
import datetime
from darksky import forecast
import textwrap
#inkyphat: https://learn.pimoroni.com/tutorial/sandyj/getting-started-with-inky-phat
#darksky: https://darksky.net/dev/docs#response-format
##### Get the weather from Darksky #####
#set lat/long for location
LOCATION = 40.8791, -81.4656
#set Darksky API Key
APIKEY='KEY-HERE'
with forecast(APIKEY, *LOCATION) as location:
#today
summary = location['daily']['data'][0]['summary']
summaryWeek = location['daily']['summary']
currentTemp = location['currently']['temperature']
highTemp = location['daily']['data'][0]['temperatureHigh']
lowTemp = location['daily']['data'][0]['temperatureLow']
iconDesc = location['currently']['icon']
precipProbability = location['currently']['precipProbability']
precipType = location['daily']['data'][0]['precipType']
#n+1
iconDesc2 = location['daily']['data'][1]['icon']
highTemp2 = location['daily']['data'][1]['temperatureHigh']
lowTemp2 = location['daily']['data'][1]['temperatureLow']
precipProbability2 = location['daily']['data'][1]['precipProbability']
precipType2 = location['daily']['data'][1]['precipType']
#n+2
iconDesc3 = location['daily']['data'][2]['icon']
highTemp3 = location['daily']['data'][2]['temperatureHigh']
lowTemp3 = location['daily']['data'][2]['temperatureLow']
precipProbability3 = location['daily']['data'][2]['precipProbability']
precipType3 = location['daily']['data'][2]['precipType']
# today variables
currentTempFormatted = "{0:.0f}".format(currentTemp)
highTempToday = "High " + "{0:.0f}".format(highTemp)
lowTempToday = "Low " + "{0:.0f}".format(lowTemp)
if precipProbability > 8:
precipLine1 = "{0:.0%}".format(precipProbability) + " chance"
precipLine2 = "of " + precipType
else:
precipLine1 = "No precip"
precipLine2 = "today"
# day 2 variables
tempsDay2 = "High " + "{0:.0f}".format(highTemp2) + " Low " + "{0:.0f}".format(lowTemp2)
if precipProbability2 > 8:
precipDay2 = "{0:.0%}".format(precipProbability2) + " chance of " + precipType2
else:
precipDay2 = "No precipitation"
if iconDesc2 == "clear-day" or "clear-night":
descriptionDay2 = "Clear skies"
elif iconDesc2 == "partly-cloudy-day" or "partly-cloudy-night":
descriptionDay2 = "Partly Cloudy"
else:
descriptionDay2 = iconDesc2.capitalize()
# day 3 variables
tempsDay3 = "High " + "{0:.0f}".format(highTemp3) + " Low " + "{0:.0f}".format(lowTemp3)
if precipProbability3 > 8:
precipDay3 = "{0:.0%}".format(precipProbability3) + " chance of " + precipType3
else:
precipDay3 = "No precipitation"
if iconDesc3 == "clear-day" or "clear-night":
descriptionDay3 = "Clear skies"
elif iconDesc3 == "partly-cloudy-day" or "partly-cloudy-night":
descriptionDay3 = "Partly Cloudy"
else:
descriptionDay3 = iconDesc3.capitalize()
##### Draw on the inkyphat screen #####
# set screen type color. Be sure to change this to the color of your screen
inkyphat.set_colour("yellow")
# create font objects
fontBig = ImageFont.truetype(inkyphat.fonts.FredokaOne, 16)
fontMid = ImageFont.truetype(inkyphat.fonts.FredokaOne, 12)
fontSmall = ImageFont.truetype("/home/pi/Pimoroni/inkyphat/examples/04B.ttf" , 8)
#define weekday text
weekday = date.today()
day = date.strftime(weekday, '%A')
weekday2 = datetime.date.today() + datetime.timedelta(days=1)
day2 = date.strftime(weekday2, '%A')
weekday3 = datetime.date.today() + datetime.timedelta(days=2)
day3 = date.strftime(weekday3, '%A')
#draw some lines
inkyphat.line((118, 20, 118, 90),2) # Vertical line
### now draw the text##
#format today's name to center over left side
dayName = day
w, h = fontBig.getsize(day)
x = (inkyphat.WIDTH / 4) - (w / 2)
y = (inkyphat.HEIGHT / 4) - (h / 2)
#format the summary text for today
summaryFormatted = textwrap.fill(summary, 20)
#draw the suff on the left side of the screen
inkyphat.text((20, 5), day, inkyphat.BLACK, font=fontBig)
inkyphat.text((60, 29), highTempToday, inkyphat.BLACK, font=fontMid)
inkyphat.text((60, 41), lowTempToday, inkyphat.BLACK, font=fontMid)
inkyphat.text((60, 59), precipLine1, inkyphat.BLACK, font=fontSmall)
inkyphat.text((60, 69), precipLine2, inkyphat.BLACK, font=fontSmall)
inkyphat.text((60, 80), summaryFormatted, inkyphat.BLACK, font=fontSmall)
#draw the suff on the right side of the screen
#for weekday n+1
inkyphat.text((125, 12), day2, inkyphat.BLACK, font=fontMid)
inkyphat.text((125, 27), descriptionDay2, inkyphat.BLACK, font=fontSmall)
inkyphat.text((125, 35), tempsDay2, inkyphat.BLACK, font=fontSmall)
inkyphat.text((125, 43), precipDay2, inkyphat.BLACK, font=fontSmall)
#for weekday n+2
inkyphat.text((125, 57), day3, inkyphat.BLACK, font=fontMid)
inkyphat.text((125, 72), descriptionDay3, inkyphat.BLACK, font=fontSmall)
inkyphat.text((125, 80), tempsDay3, inkyphat.BLACK, font=fontSmall)
inkyphat.text((125, 88), precipDay3, inkyphat.BLACK, font=fontSmall)
# Load our icon files and generate masks
weather_icon = None
iconFromDS = iconDesc
icons = {}
masks = {}
#map description from the darksky API
icon_map = {
"snow": ["snow", "sleet"],
"rain": ["rain"],
"cloud": ["cloudy", "partly-cloudy-day", "cloudy", "partly-cloudy-night"],
"sun": ["clear-day", "clear-night"],
"storm": ["thunderstorm", "tornado", "hail"],
"wind": ["wind", "fog"]
}
for icon in icon_map:
if iconFromDS in icon_map[icon]:
weather_icon = icon
break
for icon in glob.glob("resources/icon-*.png"):
icon_name = icon.split("icon-")[1].replace(".png", "")
icon_image = Image.open(icon)
icons[icon_name] = icon_image
masks[icon_name] = inkyphat.create_mask(icon_image)
if weather_icon is not None:
inkyphat.paste(icons[weather_icon], (10, 27), masks[weather_icon])
#show current temp
inkyphat.text((21, 76), currentTempFormatted, inkyphat.YELLOW, font=fontBig)
inkyphat.text((11, 95), "currently. ", inkyphat.BLACK, font=fontSmall)
#push to the screen!
inkyphat.show()
| true |
6581f0554062138072e9936a302803bf66450b82 | Python | Firkraag/algorithm | /btree.py | UTF-8 | 6,986 | 3 | 3 | [] | no_license | #!/usr/bin/env python
class BTreeNode:
def __init__(self, t, leaf, n):
self.leaf = leaf
self.n = n
self.t = t
self.key = [0] * (2 * t - 1)
self.c = [0] * (2 * t)
def split_child(self, i):
y = self.c[i - 1]
t = y.t
z = BTreeNode(t, y.leaf, t - 1)
for j in range(1, t):
z.key[j - 1] = y.key[j + t - 1]
if not y.leaf:
for j in range(1, t + 1):
z.c[j - 1] = y.c[j + t - 1]
y.n = t - 1
for j in range(self.n + 1, i, -1):
self.c[j] = self.c[j - 1]
self.c[i] = z
for j in range(self.n, i - 1, -1):
self.key[j] = self.key[j - 1]
self.key[i - 1] = y.key[t - 1]
self.n = self.n + 1
def insert_nonfull(self, k):
i = self.n
t = self.t
if self.leaf:
while i >= 1 and k < self.key[i - 1]:
self.key[i] = self.key[i - 1]
i = i - 1
self.key[i] = k
self.n = self.n + 1
else:
while i >= 1 and k < self.key[i - 1]:
i = i - 1
i = i + 1
if self.c[i - 1].n == 2 * t - 1:
self.split_child(i)
if k > self.key[i - 1]:
i = i + 1
self.c[i - 1].insert_nonfull(k)
def search(self, k):
i = 1
while i <= self.n and k > self.key[i - 1]:
i = i + 1
if i <= self.n and k == self.key[i - 1]:
return self, i
elif self.leaf:
return None
else:
return self.c[i - 1].search(k)
def print_inorder(self):
if self.leaf:
for i in range(1, self.n + 1):
print(self.key[i - 1], )
else:
for i in range(1, self.n + 1):
self.c[i - 1].print_inorder()
print(self.key[i - 1], )
self.c[self.n].print_inorder()
def print_child_first(self):
if not self.leaf:
for i in range(1, self.n + 2):
self.c[i - 1].print_child_first()
for i in range(1, self.n + 1):
print(self.key[i - 1], )
def delete(self, tree, k):
t = self.t
i = 1
while i <= self.n and k > self.key[i - 1]:
i = i + 1
if i <= self.n and k == self.key[i - 1]:
if self.leaf:
for j in range(i, self.n):
self.key[j - 1] = self.key[j]
self.n = self.n - 1
else:
y = self.c[i - 1]
z = self.c[i]
if y.n >= t:
p = y
while not p.leaf:
p = p.c[p.n]
key = p.key[p.n - 1]
y.delete(tree, key)
self.key[i - 1] = key
elif z.n >= t:
s = z
while not s.leaf:
s = s.c[0]
key = s.key[0]
z.delete(tree, key)
self.key[i - 1] = key
else:
self.merge(tree, i)
y.delete(tree, k)
elif self.leaf:
return None
elif self.c[i - 1].n <= t - 1:
if i <= self.n and self.c[i].n >= t:
a = self.c[i - 1]
b = self.c[i]
a.key[a.n] = self.key[i - 1]
self.key[i - 1] = b.key[0]
for j in range(2, b.n + 1):
b.key[j - 2] = b.key[j - 1]
a.c[a.n + 1] = b.c[0]
for j in range(1, b.n + 1):
b.c[j - 1] = b.c[j]
a.n = a.n + 1
b.n = b.n - 1
a.delete(tree, k)
elif i == self.n + 1 and self.c[i - 2].n >= t:
b = self.c[i - 2]
a = self.c[i - 1]
for j in range(1, a.n + 1):
a.key[j] = a.key[j - 1]
a.key[0] = self.key[i - 2]
self.key[i - 2] = b.key[b.n - 1]
for j in range(1, a.n + 2):
a.c[j] = a.c[j - 1]
a.c[0] = b.c[b.n]
b.n = b.n - 1
a.n = a.n + 1
a.delete(tree, k)
elif i <= self.n:
self.merge(tree, i)
self.c[i - 1].delete(tree, k)
else:
self.merge(tree, i - 1)
self.c[i - 2].delete(tree, k)
else:
self.c[i - 1].delete(tree, k)
def merge(self, tree, i):
y = self.c[i - 1]
z = self.c[i]
t = y.t
yn = y.n
zn = z.n
y.key[t - 1] = self.key[i - 1]
for j in range(1, zn + 1):
y.key[j + yn] = z.key[j - 1]
if not y.leaf:
for j in range(1, zn + 2):
y.c[j + yn] = z.c[j - 1]
y.n = yn + zn + 1
for j in range(i, self.n):
self.key[j - 1] = self.key[j]
for j in range(i + 1, self.n + 1):
self.c[j - 1] = self.c[j]
self.n = self.n - 1
if tree.root == self and self.n == 0:
tree.root = y
class BTree:
def __init__(self, t):
self.t = t
self.root = BTreeNode(t, True, 0)
def insert(self, k):
r = self.root
t = self.t
if r.n == 2 * t - 1:
s = BTreeNode(t, False, 0)
self.root = s
s.c[0] = r
s.split_child(1)
s.insert_nonfull(k)
else:
r.insert_nonfull(k)
def print_b_tree(self):
r = self.root
r.print_inorder()
# def predecessor(self, k):
# m = []
# x = self.root
# while not x.leaf:
# i = 1
# while i <= x.n and k > x.key[i - 1]:
# i = i + 1
# if i <= x.n and k == x.key[i - 1]:
# x = x.c[i - 1]
# while not x.leaf:
# x = x.c[x.n]
# return x.key[x.n - 1]
# else:
# x = x.c[i - 1]
# if i > 1:
# m.append(x.key[i - 1])
# i = 1
# while i <= x.n and k != x.key[i - 1]:
# i = i + 1
# if i > x.n or len(m) == 0:
# return None
# elif i > 1:
# return x.key[i - 2]
# else:
# return max(m)
def predecessor(self, k):
s = []
x = self.root
while True:
i = 1
while i <= x.n and k > x.key[i - 1]:
i = i + 1
if i > 1:
s.append(x.key[i - 2])
if x.leaf:
break
else:
x = x.c[i - 1]
if len(s) == 0:
return None
else:
return max(s)
| true |
8fbe9799567e27be94436eb13fef232b804e791f | Python | ariomer/Python-Basics | /question3.py | UTF-8 | 228 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
df = pd.read_csv("Automobile_data_v1.csv")
car_Manufacturers = df.groupby('company')
toyotaDf = car_Manufacturers.get_group('toyota')
toyotaDf
# In[ ]:
| true |
de049e773cd317f2cdc074915c94c2e6f57a0522 | Python | RamonCris222/Ramon-Cristian | /questao_03_listas.py | UTF-8 | 195 | 3.90625 | 4 | [] | no_license | n = []
for c in range(10):
try:
n.append(float(input()))
except:
print("Não foram digitados valores reais.")
break
print()
for c in range(10 - 1, -1, -1):
print(n[c])
| true |
03dd363f339c0320a1dee258589732ece61ed270 | Python | sunan0519/ImplicitMatrixFactorization | /implicit_mf.py | UTF-8 | 3,564 | 2.84375 | 3 | [
"MIT"
] | permissive | import time
from scipy.sparse.linalg import spsolve
import numpy as np
import scipy.sparse as sp
class ImplicitMF():
def __init__(self, counts, alpha, num_factors=40, num_iterations=30,
reg_param=0.8):
self.counts = counts
self.alpha = alpha
self.num_users = counts.shape[0]
self.num_items = counts.shape[1]
self.num_factors = num_factors
self.num_iterations = num_iterations
self.reg_param = reg_param
def fit(self):
self.user_vectors = np.random.normal(size=(self.num_users,
self.num_factors))
self.item_vectors = np.random.normal(size=(self.num_items,
self.num_factors))
for i in range(self.num_iterations):
# t0 = time.time()
# print ('Solving for user vectors...')
self.user_vectors = self.iteration(True, sp.csr_matrix(self.item_vectors))
# print ('Solving for item vectors...')
self.item_vectors = self.iteration(False, sp.csr_matrix(self.user_vectors))
# t1 = time.time()
# print ('iteration %i finished in %f seconds' % (i + 1, t1 - t0))
def iteration(self, user, fixed_vecs):
num_solve = self.num_users if user else self.num_items
num_fixed = fixed_vecs.shape[0]
YTY = fixed_vecs.T.dot(fixed_vecs)
eye = sp.eye(num_fixed)
lambda_eye = self.reg_param * sp.eye(self.num_factors)
solve_vecs = np.zeros((num_solve, self.num_factors))
# t = time.time()
for i in range(num_solve):
if user:
counts_i = self.counts[i].toarray()
else:
counts_i = self.counts[:, i].T.toarray()
CuI = sp.diags(1 + self.alpha * counts_i, [0])
pu = counts_i.copy()
pu[np.where(pu != 0)] = 1.0
YTCuIY = fixed_vecs.T.dot(CuI).dot(fixed_vecs)
YTCupu = fixed_vecs.T.dot(CuI + eye).dot(sp.csr_matrix(pu).T)
xu = spsolve(YTY + YTCuIY + lambda_eye, YTCupu)
solve_vecs[i] = xu
# if i % 1000 == 0:
# print ('Solved %i vecs in %d seconds' % (i, time.time() - t))
# t = time.time()
return solve_vecs
def predict(self, u, i):
""" Single user and item prediction. """
return self.user_vectors[u, :].dot(self.item_vectors[i, :].T)
def predict_all(self):
""" Predict ratings for every user and item. """
predictions = np.zeros((self.user_vectors.shape[0],self.item_vectors.shape[0]))
for u in range(self.user_vectors.shape[0]):
for i in range(self.item_vectors.shape[0]):
predictions[u, i] = self.predict(u, i)
return predictions
def ranking(self, predictions):
temp = predictions.argsort(axis = 1)
#produce the abosulte ranks for each item for each user
pred_ranks = np.empty_like(temp)
for i in range(self.num_users):
pred_ranks[i,temp[i,:]] = np.arange(self.num_items - 1, -1, -1)
#convert the ranks to rank percentile
pred_ranks_percentile = pred_ranks / np.max(pred_ranks) * 100
return pred_ranks_percentile
def evaluate(self, test):
predictions = self.predict_all()
pred_ranks = self.ranking(predictions)
test = test.todense()
metrics = np.sum(np.multiply(test, pred_ranks))/np.sum(test)
return metrics
| true |
037c29beb43dd3195cb1afcdf6ad357a25e0bd4c | Python | stochasticnetworkcontrol/snc | /tests/snc/agents/test_agents_utils.py | UTF-8 | 342 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import snc.agents.agents_utils as utils
def test_assert_orthogonal_rows_true():
matrix = np.array([[1, 0, 0, 1], [0, 1, 1, 0]])
assert utils.has_orthogonal_rows(matrix)
def test_assert_orthogonal_rows_false():
matrix = np.array([[1, 0, 0, 1], [1, 1, 1, 0]])
assert not utils.has_orthogonal_rows(matrix)
| true |
cc2c37ae30dabe50c0e6db14dea6ebdac5260abc | Python | aaron-aguerrevere/CS-34505 | /generate_sql.py | UTF-8 | 1,243 | 2.671875 | 3 | [] | no_license | # script to generate sql that will update header and footer urls in db
############------------ IMPORTS ------------############
import csv
############------------ FUNCTIONS ------------############
def generate_sql():
'''
UPDATE dbo.tblaffiliatesettings
SET headerurl = <headerurl>,
footerurl = <footerurl>,
artworkdeliveryoption = 3
WHERE affiliatesitename = <sitename>
'''
# target_columns Domain, Paper, Header, Footer
csvfile = open('gh.csv', newline='')
reader = csv.reader(csvfile, skipinitialspace=True)
with open('sql_scripts.cvs', 'w', newline='', encoding='utf-8') as result:
writer = csv.writer(result, dialect='excel')
for i, row in enumerate(reader):
if i == 0:
continue
else:
writer.writerow(
[f"UPDATE dbo.tblaffiliatesettings\n\
SET headerurl = '{row[5]}',\n\
footerurl = '{row[6]}',\n\
artworkdeliveryoption = 3\n\
WHERE affiliatesitename = '{row[0]}'"]
)
############------------ DRIVER CODE ------------############
if __name__ == '__main__':
generate_sql()
| true |
570223587ec40ce92d2b616e593d20b7e3509331 | Python | bhoj001/python_tutorial | /demo4_class.py | UTF-8 | 114 | 3.296875 | 3 | [] | no_license | class Car:
door_number = 4
engine = "petrol engine"
color = "blue"
obj = Car()
print(obj.door_number) | true |
b51e22ae4553f7a4ef07828655157701c9e1c55c | Python | MarRoar/Python-code | /00-sxt/03-reg/00-test.py | UTF-8 | 619 | 3.6875 | 4 | [] | no_license | import re
ret = re.match(".", "M")
print(ret.group())
p = "1[345]" # 这个正则也就是匹配13、14、15 这三种情况
result1 = re.match(p, '12') #不匹配
print(result1)
result2 = re.match(p, '13') # 匹配
print(result2)
rP = "1[^345]" # [] 里面有个 ^ 表示取反的意思也就是不是这三个数的情况
result1 = re.match(rP, '12') #匹配
print(result1)
result2 = re.match(rP, '13') #不匹配
print(result2)
print("----------------------------------------------")
result = re.match('\d', "13") # 数字
print(result)
result = re.match("嫦娥\d号","嫦娥3号发射成功")
print(result)
| true |
53ffae0aa96d5d9b442cbcdbb04dd99f42cf3c52 | Python | nateblaine/CSM-2018-Schedule | /ptconferencescript.py | UTF-8 | 2,624 | 2.6875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import re
from lib.ConferenceSession import ConferenceSession
import xlsxwriter
# index and other one time vars
index_url = 'https://apta.expoplanner.com/index.cfm?do=expomap.sessResults&Agenda_type_display=Educational%20Sessions&search_type=sessiontype&event_id=29'
r = requests.get(index_url)
html_content = r.text
soup = BeautifulSoup(html_content, 'lxml')
links = soup.find_all('a')
list_of_sessions = []
# Getting all the links from main page
for raw_elem in links:
if 'session_id' in raw_elem.get('href'):
temp_title = raw_elem.text.lstrip()
temp_url = 'https://apta.expoplanner.com/'+raw_elem.get('href')
temp_session = ConferenceSession(temp_title,temp_url)
list_of_sessions.append(temp_session)
# list_of_sessions = list_of_sessions[0:10]
# Count logic
temp_count = 0
max_len = len(list_of_sessions)
# Excel writing setup
workbook = xlsxwriter.Workbook('ptcsm.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
worksheet.write('A1', 'Title', bold)
worksheet.write('B1', 'URL', bold)
worksheet.write('C1', 'Level', bold)
worksheet.write('D1', 'Date', bold)
worksheet.write('E1', 'Time', bold)
worksheet.write('F1', 'Description', bold)
for session in list_of_sessions:
# Count logic
print('Processing ', temp_count, ' of ', max_len, ' .......')
# Temp connections and soup
temp = session.session_url
req_2 = requests.get(url=temp, headers={'Connection':'close'})
html_content_2 = req_2.text
temp_soup = BeautifulSoup(html_content_2, 'lxml')
# Get text from the Session page
for elem in temp_soup.find_all('b'):
if 'Session Level' in elem.text:
session.session_level = elem.next_sibling.lstrip()
if 'Date' in elem.text:
session.session_date = elem.next_sibling.lstrip()
if 'Time' in elem.text:
session.session_time = elem.next_sibling.lstrip()
if 'Description' in elem.text:
session.session_desc = elem.next_sibling.next_sibling.lstrip()
# Write row in excel
worksheet.write('A'+str(temp_count+2), session.session_title)
worksheet.write('B'+str(temp_count+2), session.session_url)
worksheet.write('C'+str(temp_count+2), session.session_level)
worksheet.write('D'+str(temp_count+2), session.session_date)
worksheet.write('E'+str(temp_count+2), session.session_time)
worksheet.write('F'+str(temp_count+2), session.session_desc)
temp_count += 1
# for full_session in list_of_sessions:
# print(full_session)
print('Done.')
workbook.close()
| true |
2fbb162b269e68c94e561ef5904bf69212077538 | Python | xli1110/LC | /Others/Microsoft. Binary Operations.py | UTF-8 | 1,885 | 4.375 | 4 | [
"MIT"
] | permissive | class Problem:
"""
Given a string s representing a non-negative number num in the binary form.
While num is not equal to 0, we have two operations as below.
Operation1: If num is odd, we subtract 1 from it. 1101 -> 1100
Operation2: If num is even, we divide 2 into it. 1100 -> 110
The string s may contain leading zeroes.
Calculate the number of operations we should take that transfers num to 0.
Naive Method - (OA Result: Time Exceeds Limitation)
O(N)
O(1)
"""
def find_start(self, s):
"""
Remove leading zeroes.
"""
start = 0
while start < len(s):
ch = s[start]
if ch == "1":
break
elif ch == "0":
start += 1
else:
raise Exception("Invalid Character {0}".format(ch))
return start
def string_num_transform(self, s):
"""
Transform a string into a number.
Built-In Function: num = int(s, 2)
"""
power = 0
num = 0
start = self.find_start(s)
end = len(s) - 1
while end >= start:
ch = s[end]
if ch == "1" or ch == "0":
num += int(ch) * (2 ** power)
power += 1
end -= 1
else:
raise Exception("Invalid Character {0}".format(ch))
return num
def calculate_num_operations(self, s):
if not s:
raise Exception("Empty String")
num = self.string_num_transform(s)
num_operation = 0
while num != 0:
if num & 1 == 1:
num -= 1
else:
num >>= 1
num_operation += 1
return num_operation
if __name__ == "__main__":
p = Problem()
s = "0100011"
print(p.calculate_num_operations(s))
| true |
c19115fc41d556d3e037cfe6d36f70bc4be125c1 | Python | huangyuan666/security | /SOME Pyfile/内网常用端口扫描.py | UTF-8 | 2,050 | 2.90625 | 3 | [] | no_license | #! coding = utf-8
from socket import *
import threading
import time
# 导入进程包
import multiprocessing
# 导入队列包
# 创建ip线程
class Ip:
def __init__(self, ip):
# 继承多线程父类
# 接收传入的ip地址
self.ip = ip
def runs(self):
# 创建对象并且进行扫描
p = PortScan(self.ip)
threading.Thread(target=p.run).start()
class PortScan(threading.Thread):
def __init__(self, host):
# 继承多线程父类
super().__init__()
self.host = host
# 需要扫描的端口
self.port = [21, 22, 23, 25, 53, 67, 80, 110, 139, 161, 389, 443, 445, 1080, 1433, 3306, 5432, 6379, 27017,
5000, 3389, 4848, 7001, 2601, 3389, 8080, 5900, 11211, 2181]
def run(self):
try:
for i in self.port:
# 多线程调用port扫描方法,一个端口一个线程
threading.Thread(target=self.ports, args=(i,)).start()
# 每0.1秒传入一个参数,进行扫描
time.sleep(0.1)
print("执行中")
except:
print("error")
def ports(self, port_):
try:
print("正在扫描ip为:%s--端口是%s" % (self.host, port_))
server = socket(AF_INET, SOCK_STREAM)
server.connect((self.host, port_))
# 创建文件进行写入
with open("save.txt", "r") as f:
# 写入文件
f.write("host:%s----port:%s")
except:
print("ip为:%s端口%s未开启" % (self.host, port_))
# 关闭套接字
server.close()
def __del__(self):
print("ip为%s扫描结束" % self.host)
if __name__ == '__main__':
for x in range(1, 256):
# 传入进程,优化代码
ip = Ip("192.168.15." + str(x))
multiprocessing.Process(target=ip.runs).start()
# 一秒创建一个进程
time.sleep(0.2)
print("进程已开启")
| true |
3104daa46f17eed60b3a9fcef810a43a4c7d02e8 | Python | DAI-Lab/AnonML | /tests/dp_test.py | UTF-8 | 7,640 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python2.7
import sys
import argparse
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from scipy.special import factorial, comb
from scipy.stats import binom
from scipy.optimize import curve_fit
ap = argparse.ArgumentParser()
ap.add_argument('--m', type=int, default=2000,
help='number of possible tuple values')
ap.add_argument('--n', type=int, default=10000, help='number of peers')
ap.add_argument('--p', type=float, default=0.5,
help='probability each tuple will be perturbed')
ap.add_argument('--plot-real', action='store_true',
help='plot delta vs real value')
ap.add_argument('--plot-mvn', action='store_true',
help='plot delta vs the n/m ratio')
ap.add_argument('--plot-m', action='store_true',
help='plot delta vs m with fixed n/m ratio')
ap.add_argument('--plot-dve', action='store_true',
help='plot delta vs epsilon with fixed n/m ratio')
ap.add_argument('--plot-evp', action='store_true',
help='plot epsilon vs delta with fixed m')
def perturb_prob(m, n, p, real, k):
"""
Gives the probability that exactly k of a certain row will be sent to the
aggregator.
m: total number of possible rows
n: total number of peers (number of actual rows)
p: probability that each peer will randomly perturb their row
real: real number of a certain row present in the dataset
k: the number of that certain row for which we are trying to assess the probability
"""
little_p = (1.0 - p) / m
real_p = p + little_p
mass = 0
# probability that i of the real rows will be present
for i in xrange(min(real, k) + 1):
# chance that exactly i of the real value holders send this row
# -times-
# chance that exactly k - i of the non-real value holders send this row
mass_i = binom.pmf(i, real, real_p)
mass_j = binom.pmf(k - i, n - real, little_p)
mass += mass_i * mass_j
return mass
def get_delta_from_range(m, n, p, real, epsilon=None):
# here, we're gonna find delta for a given p and real value
y1 = {0: perturb_prob(m, n, p, real, 0)}
y2 = {0: perturb_prob(m, n, p, real + 1, 0)}
epsilon = epsilon or get_epsilon(m, p) # actually ln of this but w/e
delta = 0
for i in xrange(n):
y1[i] = perturb_prob(m, n, p, real, i)
y2[i] = perturb_prob(m, n, p, real + 1, i)
bigger = max(y1[i], y2[i])
smaller = min(y1[i], y2[i])
ratio = bigger / smaller
if ratio > epsilon and i > 0:
delta = max(delta, bigger - smaller * epsilon)
break
return y1, y2, delta
def plot_real_vals(m, n, p, real_vals=None):
real_vals = real_vals or range(20)
# here we establish what real value yields the worst delta value
deltas = []
for real in real_vals:
y1, y2, delta = get_delta_from_range(m, n, p, real)
deltas.append(delta)
print 'real = %d, delta = %.4g' % (real, delta)
# plot probability of each output value given the input value
X = sorted(y1.keys())
y1p = [j[1] for j in sorted(y1.items(), key=lambda k: k[0])]
plt.plot(X, y1p)
plt.show()
plt.plot(real_vals, deltas)
plt.show()
return deltas
def plot_m_vs_n(m, p):
# now we test the effect of n/m on delta (also strictly decreasing)
deltas = []
all_factors = [i * 0.2 for i in range(5, 200)]
for f in all_factors:
n = int(f * m)
delta = 0
for i in range(10):
y1, y2, d = get_delta_from_range(m, n, p, real=i)
if d > delta:
delta = d
else:
break
deltas.append(delta)
print 'm = %d, n = %d, real = %d, delta = %.4g' % (m, n, i-1, delta)
# plot probability of each output value given the input value
X = sorted(y1.keys())
y1p = [j[1] for j in sorted(y1.items(), key=lambda k: k[0])]
plt.plot(X, y1p)
plt.show()
X = np.array(all_factors)
y = np.log(np.array(deltas))
popt, pcov = curve_fit(quad, X, y)
func = lambda x, a, b, c: np.exp(a * x**2 + b * x + c)
fit_y = func(X, *popt)
print 'delta = exp(%.3g * (m/n)**2 + %.3g * m/n + %.3g)' % tuple(popt)
fig, ax = plt.subplots(1, 1)
ax.set_yscale('log')
ax.plot(X, deltas)
#ax.plot(X, fit_y)
plt.xlabel('N/K')
plt.ylabel('delta')
plt.show()
def plot_mn(p, mult=5):
# ...and the effect of n, if m remains a constant multiple (exponentially increasing)
deltas = []
all_m = [100, 200, 400, 800, 1600, 3200, 6400, 12800, 25600]
for m in all_m:
n = m * mult
delta = 0
for i in range(5):
y1, y2, d = get_delta_from_range(m, n, p, real=i)
if d > delta:
delta = d
else:
break
deltas.append(delta)
print 'm = %d, n = %d, delta = %.4g' % (m, n, delta)
# plot probability of each output value given the input value
X = sorted(y1.keys())
y1p = [j[1] for j in sorted(y1.items(), key=lambda k: k[0])]
plt.plot(X, y1p)
plt.show()
fig, ax = plt.subplots(1, 1)
ax.set_xscale('log')
ax.plot(all_m, deltas)
plt.show()
def plot_delta_vs_epsilon(p, m=2000, mult=5):
# plot delta vs. epsilon for fixed m, n, p
n = m * mult
deltas = []
epsilons = [get_epsilon(m, p) * (1 + i * 0.05) for i in range(100)]
for eps in epsilons:
delta = 0
for i in range(5):
y1, y2, d = get_delta_from_range(m, n, p, real=i, epsilon=eps)
if d > delta:
delta = d
else:
break
deltas.append(delta)
print 'm = %d, n = %d, epsilon = %.3f, delta = %.4g' % (m, n, eps, delta)
X = np.log(np.array(epsilons))
y = np.log(np.array(deltas))
popt, pcov = curve_fit(quad, X, y)
func = lambda x, a, b, c: np.exp(a * x**2 + b * x + c)
fit_y = func(X, *popt)
#print 'y = exp(%.3g * epsilon + %.3g)' % tuple(popt)
fig, ax = plt.subplots(1, 1)
ax.set_yscale('log')
ax.plot(X, deltas)
#ax.plot(X, fit_y)
plt.xlabel('epsilon')
plt.ylabel('delta')
plt.show()
def plot_epsilon_vs_p(m=2000):
ps = [i * 0.1 for i in range(1, 10)]
ps += [0.9 + i * 0.02 for i in range(1, 6)]
eps = [np.log(get_epsilon(m, 1-p)) for p in ps]
plt.plot(ps, eps)
plt.xlabel('p')
plt.ylabel('epsilon')
plt.show()
def lin(x, a, b):
return a * x + b
def quad(x, a, b, c):
return a * x**2 + b * x + c
def exp(x, a, b, c):
return a * np.exp(-b * x) + c
def get_epsilon(m, p):
# epsilon bound we're going to achieve
return (1.0 - (1.0 - p) / m) / (1.0 - p - (1.0 - p) / m)
if __name__ == '__main__':
args = ap.parse_args()
# probability that a tuple will keep its value after perturbation
p = 1.0 - args.p
m = args.m
n = args.n
# run our experiments
if args.plot_real:
plot_real_vals(m, n, p)
if args.plot_mvn:
plot_m_vs_n(m, p)
if args.plot_m:
plot_mn(p)
if args.plot_dve:
plot_delta_vs_epsilon(p)
if args.plot_evp:
plot_epsilon_vs_p()
# Note: It seems like, given perturbation factor p, we can achieve
# epsilon-delta differential privacy with an epsilon of ln(1 - p/m) - ln(p - p/m). The delta is a
# function of p, n, and m (n/m?), but this can be made pretty low with some
# good constants.
# e.g.: m = 2k, n = 10k, p = 0.5: Epsilon = ln(2) with delta = 0.004.
| true |
b67a9e94d03e186ba2df445cc25a71d671546133 | Python | harshada-sudo/commandline-based-login-using-python-and-sqlite3 | /New_database.py | UTF-8 | 650 | 2.9375 | 3 | [] | no_license | import sqlite3
#create new database or connect to existing one
with sqlite3.connect("Quiz.db") as db:
#create cursor
cursor=db.cursor()
#create table
cursor.execute("""
CREATE TABLE IF NOT EXISTS user_info(
userid INTEGER PRIMARY KEY,
username VARCHAR(20) NOT NULL,
firstname VARCHAR(20) NOT NULL,
lastname VARCHAR(20) NOT NULL,
password VARCHAR(20) NOT NULL
);
""")
#insert one entry into table
cursor.execute("""
INSERT INTO user_info(username,firstname,lastname,password)
VALUES("test_User","harshada","nakod","vijay")
""")
db.commit()
cursor.execute("SELECT * FROM user_info")
print(cursor.fetchall())
| true |
17ce2bfcd104e68e88ee62b671efcb9d1e3fb517 | Python | DevJChen/AES | /AutomatedGmail/imgr.py | UTF-8 | 733 | 3.21875 | 3 | [] | no_license | import urllib.request
from PIL import Image
def imager(url, file_path, file_name):
full_path = file_path + "\\" + file_name + ".jpg"
urllib.request.urlretrieve(url, full_path)
return full_path
def resizer(file_path):
im = Image.open(file_path)
width, height = im.size
ratio = width/height
if (ratio > 1.91) or (ratio < .8):
if (ratio > 1.91):
resized = im.resize((1080, 566))
resized.save(file_path)
if (ratio < .8):
resized = im.resize((1080, 1350))
resized.save(file_path)
print("Resizer has resized")
else:
print("Nothing has been resizered")
#can't be bigger than 1.91 ratio
#can't be smaller than .8 ratio | true |
5e795131c87f5796af92a6870b4775faf1b4b716 | Python | LitingLin/ubiquitous-happiness | /data/operator/bbox/spatial/vectorized/torch/cxcywh_to_xyxy.py | UTF-8 | 210 | 2.796875 | 3 | [] | no_license | import torch
def box_cxcywh_to_xyxy(x: torch.Tensor):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
| true |
4348ddc2759386752fda0c70203665418abc3750 | Python | nevinliang/BlobReaper | /src/Items.py | UTF-8 | 2,749 | 3.1875 | 3 | [
"MIT"
] | permissive | class Items:
scythe = ['tool', 'Scythe 5%', 'Reinforced Scythe 10%', 'Enhanced Scythe 15%', \
'Ancient scythe 20%', 'Mystical Scythe 40%']
shrine = ['tool', 'Shrine +10 soul stones', 'Altar +25 soul stones', \
'Chapel +50 soul stones', 'Temple +100 soul stones', 'Sanctum +500 soul stones']
forge = ['tool', 'Forge +25 soul stones', 'Workshop +50 soul stones', \
'Assembly Line +100 soul stones', 'Factory +200 soul stones', \
'Vortex +1000 soul stones']
pscythe = [2000, 5000, 10000, 50000, 200000]
pshrine = [500, 1500, 4000, 8000, 25000]
eshrine = [0, 10, 25, 50, 100, 500]
pforge = [1500, 4000, 10000, 25000, 150000]
eforge = [0, 25, 50, 100, 200, 1000]
items = {'scythe': (0, scythe, pscythe), 'shrine': (1, shrine, pshrine), 'forge': (2, forge, pforge)}
# include detailed shop right Here
store_dets = { "scythe": """scythe: increases the probability of stealing from any person by\n
- scythe 5%\t\t2K soul stones\n
- reinforced scythe 10%\t\t5K soul stones\n
- enhanced scythe 15%\t\t10K soul stones\n
- ancient scythe 20%\t\t50K soul stones\n
- mystical scythe 40%\t\t200K soul stones""",
"shrine": """increases souls you get from sacrificing\n
- shrine +10 soul stones\t\t500 soul stones\n
- altar +25 soul stones\t\t1500 soul stones\n
- chapel +50 soul stones\t\t4K soul stones\n
- temple +100 soul stones\t\t 8K soul stones\n
- sanctum + 500 soul stones\t\t25K soul stones""",
"forge": """gives u souls every hour\n
- forge +25 soul stones\t\t1500 soul stones\n
- workshop +50 soul stones\t\t4K soul stones\n
- assembly line +100 soul stones\t\t10K soul stones\n
- factory +200 soul stones\t\t25K soul stones\n
- vortex + 1000 soul stones\t\t150K soul stones"""}
def listinv(lscythe, lshrine, lforge):
ret_str = ""
if lscythe != 0:
ret_str += 'Scythe: Level ' + str(lscythe) + ' ' + Items.scythe[lscythe] + ' more chance for a successful steal.\n'
if lshrine != 0:
ret_str += 'Shrine: Level ' + str(lshrine) + ' ' + Items.shrine[lshrine] + ' from sacrificing.\n'
if lforge != 0:
ret_str += 'Forge: Level ' + str(lforge) + ' ' + Items.forge[lforge] + ' every hour.\n'
if ret_str == "":
ret_str += "You're a noob reaper. You have nothing."
return ret_str
| true |
8bd917531bd9eda3734f56894e0c2adbb445f577 | Python | PeteSD777/Fastapi-cipher | /crypto.py | UTF-8 | 1,175 | 3.140625 | 3 | [] | no_license | from cryptography.fernet import Fernet
from adv_caesar import cipher_encrypt, cipher_decrypt
key = Fernet.generate_key()
f = Fernet(key)
# function encodeFunction is responsible for the second encoding of the value.
def encodeFunction(inputValue):
# firstly, this function will take the output of cipher_encrypt function, and assign it to the caesar_value
caesar_value = cipher_encrypt(inputValue)
# secondly, the encodeFunction will encode the value assigned to caesar_value
byte = str.encode(caesar_value)
# encodeFunction will use the Fernet cipher-algorithm to assign a value to a token
token = f.encrypt(byte)
return token
# function decodeFunction is responsible for the complete encoding of dual-encoded value
def decodeFunction(database_id):
# this function will decode the value selected by id of said value, decoding is first performed by fernet decoder
decode = f.decrypt(database_id)
# decode function will then take the fernet decoded value and pass it through cipher_decrypt to decode it from the ceasrian-encoding with numbers
caesar_decode = cipher_decrypt(decode.decode("utf-8"))
return caesar_decode
| true |
b9168d5549dfcbf49ae68707b77be30021806a8b | Python | winstonjay/knightsTour | /knightsTour-2/knightsTour.py | UTF-8 | 1,364 | 4.1875 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Knights Tour
Info about the Knights Tour problem as described by wikipedia:
A knight's tour is a sequence of moves of a knight on a chessboard such that
the knight visits every square only once. If the knight ends on a square that
is one knight's move from the beginning square (so that it could tour the board
again immediately, following the same path), the tour is closed, otherwise it
is open.
"""
import cKnightsTour as cKT
def knightsTour(start, size=8):
"""Takes tuple and integer as input; returns a vaild knights tour or fails
from a give start position on a n x n chessboard.
knightsTour((x,y), n) -> [(x,y), (x1,y1), (x2,y2), ...]"""
knightsTour.size = size
try: # check function is called with valid start position
sX, sY = start
assert (0 <= sX < knightsTour.size)
assert (0 <= sY < knightsTour.size)
except AssertionError:
raise AssertionError(
"Start position must be within bounds of board size"
"based on zero based indcies; range = 0 to n-1"
)
return cKT.KnightsTour(sX, sY) # Value returned from C function
if __name__ == '__main__':
import sys
if len(sys.argv) == 3:
start = (int(sys.argv[1]), int(sys.argv[2]))
else:
start = (0, 0)
print(knightsTour(start)) | true |
0b5663f64a41d05a2f2d3fd7f735ee42ef9cc720 | Python | Sasha2508/Python-Codes | /Arrays/stickler_thief.py | UTF-8 | 1,682 | 4.21875 | 4 | [] | no_license | """
Problem Statement:
Stickler Thief
Stickler the thief wants to loot money from a society having n houses in a single line. He is a weird person and follows a certain rule when looting the houses. According to the rule, he will never loot two consecutive houses. At the same time, he wants to maximize the amount he loots. The thief knows which house has what amount of money but is unable to come up with an optimal looting strategy. He asks for your help to find the maximum money he can get if he strictly follows the rule. Each house has a[i] amount of money present in it.
Input:
The first line of input contains an integer T denoting the number of test cases. T testcases follow. Each test case contains an integer n which denotes the number of houses. Next line contains space separated numbers denoting the amount of money in each house.
Output:
For each testcase, in a newline, print an integer which denotes the maximum amount he can take home.
Expected Time Complexity: O(N).
Expected Space Complexity: O(N).
Constraints:
1 <= T <= 200
1 <= n <= 104
1 <= a[i] <= 104
Example:
Input:
2
6
5 5 10 100 10 5
3
1 2 3
Output:
110
4
Explanation:
Testcase1:
5+100+5=110
Testcase2:
1+3=4
"""
def FindMaxSum(a,n):
if n == 0:
return 0
elif n == 1:
return a[0]
elif n == 2:
return max(a[0],a[1])
dp = []
dp.extend([a[0],max(a[0],a[1])])
for i in range(2,n):
dp.append(max(dp[i-2]+a[i],dp[i-1]))
return dp[-1]
if __name__ == '__main__':
testcases = int(input())
for cases in range(testcases):
n = int(input())
a = list(map(int,input().split()))
print(FindMaxSum(a,n)) | true |
7ea6ce16489831703ad12fa253448bc9473d246f | Python | Aasthaengg/IBMdataset | /Python_codes/p03049/s157319768.py | UTF-8 | 529 | 3.046875 | 3 | [] | no_license | N = int(input())
s = [(input()) for _ in range(N)]
ans = 0
a_cnt = 0
b_cnt = 0
ab_cnt = 0
for i in range(N):
if s[i][0] == 'B' and s[i][-1] == 'A':
ab_cnt += 1
elif s[i][0] == 'B':
b_cnt += 1
elif s[i][-1] == 'A':
a_cnt += 1
for j in range(len(s[i])-1):
if s[i][j] + s[i][j+1] == 'AB':
ans += 1
if ab_cnt == 0:
print(ans + min(a_cnt, b_cnt))
else:
ans += ab_cnt - 1
if a_cnt > 0:
ans += 1
a_cnt -= 1
if b_cnt > 0:
ans += 1
b_cnt -= 1
ans += min(a_cnt, b_cnt)
print(ans)
| true |
b283a34db05e7cc9390de51dcd0b586aa54d77e9 | Python | bopopescu/Daffo | /Python/DateNTime_Module/PYTZ Library/date_time_UTC.py | UTF-8 | 837 | 4 | 4 | [] | no_license | # here we are going to see the date and time in utc format
# with the help of pytz library and convert a naive time into desired time zone
# First import datetime and pytz library
import datetime
import pytz
# UTC timezone : dates and time
dt = datetime.datetime(2020,2,11,12,29,30,1000)
dt_utc = datetime.datetime(2020,2,11,12,29,30,1000,tzinfo = pytz.UTC)
print(dt)
print(dt_utc)
print("*"*50)
# Conversion of navie datetime to desired UTC time zone with pytz
# naive date time
naive_dt = datetime.datetime.now(tz = pytz.UTC)
# Asia/Kolkata UTC timezone
asia_kolkata = naive_dt.astimezone(pytz.timezone('Asia/Kolkata'))
print(naive_dt)
print(asia_kolkata)
print("*"*50)
# All the UTC TimeZone Names available in the pytz library
# All time Zones available in pytz library
# for tz in pytz.all_timezones:
# print(tz)
| true |
decc24d1beb168806d78358d855520f421e0d7b2 | Python | tharunShiv/Tkinter-Workshop | /examples/counter2.pyw | UTF-8 | 451 | 3.15625 | 3 | [] | no_license | import tkinter as tk
root = tk.Tk()
root.title("Clicker Counter V2")
root.geometry("400x300")
data = tk.StringVar()
data.set("0")
up = tk.Button(root, text = "+", command = lambda : data.set(str(int(data.get())+1))).grid(row = 0, column = 0)
point_label = tk.Label(root, textvariable = data).grid(row = 0, column = 1)
down = tk.Button(root, text = "-", command = lambda : data.set(str(int(data.get())-1))).grid(row = 0, column = 2)
root.mainloop() | true |
1e86bb1f30e644a5fe97a0ad6dc3618eea4d1c93 | Python | y-oksaku/Competitive-Programming | /AtCoder/abc/121d.py | UTF-8 | 636 | 2.84375 | 3 | [] | no_license | import math
A , B = map(int,input().split())
bA = bin(A)
bB = bin(B)
if (A - 1) % 2 == 0 :
fA = A - 1
fA += ((A - 1) / 2) % 2
else :
fA = (A / 2) % 2
if B % 2 == 0 :
fB = B
fB += (B / 2) % 2
else :
fB = ((B + 1) / 2) % 2
bfA = bin(int(fA))
bfB = bin(int(fB))
bAB = [0] * (max(len(bfA) , len(bfB)) - 2)
ans = 0
for i in range(1,len(bAB) + 1) :
if i > len(bfA) - 2 :
bAB[-i] = int(bfB[-i])
elif i > len(bfB) - 2 :
bAB[-i] = int(bfA[-i])
else :
bAB[-i] = 1 if (int(bfA[-i]) + int(bfB[-i])) % 2 == 1 else 0
for b in bAB : # デコード
ans = ans * 2 + b
print(ans)
| true |
1c648fdbd1ddaf183a08bad92c645179f6b500eb | Python | ThomasZumsteg/project-euler | /problem_0009.py | UTF-8 | 882 | 3.4375 | 3 | [] | no_license | #!/usr/bin/python
def main():
for a_set in sum_n_equal_m(3,1000):
[a,b,c] = a_set
if a**2 + b**2 == c**2:
return a*b*c
def sum_n_equal_m(n,m):
num_set = list(range(1,n+1))
num_set[-1] = m-sum(num_set[:-1])
while True:
yield num_set
num_set[-1] -= 1
num_set[-2] += 1
if not ordered(num_set):
num_set = reorder(num_set)
if not num_set:
break
def ordered(a_list):
for m,n in zip(a_list[:-1],a_list[1:]):
if m >= n:
return False
return True
def reorder(a_list):
list_sum = sum(a_list)
while not ordered(a_list):
index = None
for i in range(len(a_list)-1):
if a_list[i] >= a_list[-1]:
index = i
break
if index <= 0:
return False
a_list[index-1] += 1
for j in range(index, len(a_list)-1):
a_list[j] = a_list[j-1] + 1
a_list[-1] = list_sum - sum(a_list[:-1])
return a_list
if __name__ == "__main__":
print(main())
| true |
547c654b27843ee50f200a09ce316c200240f3e1 | Python | StevenYangSX/Python-Course | /pokerGame/card.py | UTF-8 | 1,135 | 2.96875 | 3 | [] | no_license | import random
class Card(object):
#constructor
def __init__(self,suite,face):
self._suite = suite
self._face = face
self._showCase = ''
#self._faceShowing = ''
#getter and setter
@property
def suite(self):
return self._suite
@property
def face(self):
return self._face
@suite.setter
def suite(self, suite):
self._suite = suite
@face.setter
def face(self, face):
self._face = face
def makeShowCase(self):
if(self.face == 1):
self._showCase = 'A'
elif(self.face == 11):
self._showCase = 'J'
elif(self.face == 12):
self._showCase = 'Q'
elif(self.face == 13):
self._showCase = 'K'
else:
self._showCase = self.face
def showCard(self):
return print(self._suite+str(self._showCase))
'''overloading operator: < '''
def __lt__(self, other):
if(self._face < other._face):
return True
else:
return False
#TODO: All class function go here
| true |
3c66497cec93b3f6cfc094bbe53416de07e6d860 | Python | woodongk/python-algorithm-study | /Programmers/카카오 기출/키패드 누르기.py | UTF-8 | 2,161 | 3.59375 | 4 | [] | no_license | import collections
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# 최단거리 구하기
def bfs(start):
queue = collections.deque([start])
dist = [[-1] * 3 for _ in range(4)] # 경로를 -1 으로 초기화
dist[start[0]][start[1]] = 0
while queue:
x, y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < 4 and 0 <= ny < 3: # 갈 수 있는 길이라면,
if dist[nx][ny] == -1: # 아직 방문하지 않았다면 ( 경로 최단 거리 위해 )
dist[nx][ny] = dist[x][y] + 1
queue.append((nx, ny))
return dist
def solution(numbers, hand):
keypads = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
['*', 0, '#'],
]
loc_maps = {
1: (0, 0),
2: (0, 1),
3: (0, 2),
4: (1, 0),
5: (1, 1),
6: (1, 2),
7: (2, 0),
8: (2, 1),
9: (2, 2),
0: (3, 1)
}
left_loc = (3, 0)
right_loc = (3, 2)
answer = ''
for num in numbers:
if num == 1 or num == 4 or num == 7:
answer += "L"
left_loc = loc_maps[num]
elif num == 3 or num == 6 or num == 9:
answer += 'R'
right_loc = loc_maps[num]
else:
target_x, target_y = loc_maps[num]
dist_left = bfs(left_loc)[target_x][target_y]
dist_right = bfs(right_loc)[target_x][target_y]
if dist_left > dist_right:
answer += "R"
right_loc = loc_maps[num]
elif dist_left < dist_right:
answer += "L"
left_loc = loc_maps[num]
else: #같다
if hand == 'right':
answer += 'R'
right_loc = loc_maps[num]
else:
answer += 'L'
left_loc = loc_maps[num]
return answer
if __name__ == '__main__':
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
a = solution([1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5], "right")
print(a)
print(a == "LRLLLRLLRRL") | true |
c9b9d7ab241b414d1442594dc173c229e10feced | Python | VSydorskyy/iasa_multiagent | /matk/models/determenistic_chaos.py | UTF-8 | 2,419 | 2.65625 | 3 | [] | no_license | import math
from typing import Tuple
import numpy as np
from .base_model import _BaseModel
class DetermenisticChaosModel(_BaseModel):
def __init__(
self,
n_points: int,
field_size: Tuple[int, int],
step_size: int,
r: float,
keep_trajoctories: bool = False,
):
super().__init__(
n_points=n_points,
field_size=field_size,
step_size=step_size,
keep_trajoctories=keep_trajoctories,
)
self.r = r
self.points = []
self.angles = []
self.real_angles = []
def create_field(self):
point_coords = [
np.random.randint(0, f_size, self.n_points)
for f_size in self.field_size
]
point_coords = np.stack(point_coords, axis=-1).astype(float)
angle = np.random.uniform(0, 1, self.n_points)
self.angles.append(angle)
self.real_angles.append(angle * 360)
self.points.append(point_coords)
self.markup_field(point_coords)
def step(self):
current_coord = self.points[-1].copy()
current_angle = self.angles[-1].copy()
current_real_angle = self.real_angles[-1].copy()
for i in range(current_coord.shape[0]):
new_coord, new_angle, new_real_angle = self.step_function(
current_coord[i], current_angle[i], current_real_angle[i]
)
new_coord = self.continious_boarder_mode(new_coord)
current_coord[i] = new_coord
current_angle[i] = new_angle
current_real_angle[i] = new_real_angle
self.real_angles.append(current_real_angle)
self.angles.append(current_angle)
self.points.append(current_coord)
self.markup_field(current_coord)
def step_function(
self, previous_coord: np.ndarray, angle: float, real_angle: float
):
new_angle = self.r * angle * (1 - angle)
real_angle = (real_angle + (new_angle * 360)) % 360
rad = math.radians(real_angle)
previous_coord[0] += math.cos(rad) * self.step_size
previous_coord[1] += math.sin(rad) * self.step_size
previous_coord = self.continious_boarder_mode(previous_coord)
return previous_coord, new_angle, real_angle
def reset_partial(self):
self.angles = []
self.real_angles = []
self.points = []
| true |
a72a37558b9b897ec9ff953286e926785cabaaa6 | Python | duckdb/duckdb | /tools/pythonpkg/tests/fast/api/test_duckdb_query.py | UTF-8 | 6,213 | 2.96875 | 3 | [
"MIT"
] | permissive | import duckdb
import pytest
from conftest import NumpyPandas, ArrowPandas
from pyduckdb import Value
class TestDuckDBQuery(object):
def test_duckdb_query(self, duckdb_cursor):
# we can use duckdb.query to run both DDL statements and select statements
duckdb.query('create view v1 as select 42 i')
rel = duckdb.query('select * from v1')
assert rel.fetchall()[0][0] == 42
# also multiple statements
duckdb.query('create view v2 as select i*2 j from v1; create view v3 as select j * 2 from v2;')
rel = duckdb.query('select * from v3')
assert rel.fetchall()[0][0] == 168
# we can run multiple select statements - we get only the last result
res = duckdb.query('select 42; select 84;').fetchall()
assert res == [(84,)]
@pytest.mark.parametrize('pandas', [NumpyPandas(), ArrowPandas()])
def test_duckdb_from_query_multiple_statements(self, pandas):
tst_df = pandas.DataFrame({'a': [1, 23, 3, 5]})
res = duckdb.sql(
'''
select 42; select *
from tst_df
union all
select *
from tst_df;
'''
).fetchall()
assert res == [(1,), (23,), (3,), (5,), (1,), (23,), (3,), (5,)]
def test_duckdb_query_empty_result(self):
con = duckdb.connect()
# show tables on empty connection does not produce any tuples
res = con.query('show tables').fetchall()
assert res == []
def test_named_param(self):
con = duckdb.connect()
original_res = con.execute(
"""
select
count(*) FILTER (WHERE i >= $1),
sum(i) FILTER (WHERE i < $2),
avg(i) FILTER (WHERE i < $1)
from
range(100) tbl(i)
""",
[5, 10],
).fetchall()
res = con.execute(
"""
select
count(*) FILTER (WHERE i >= $param),
sum(i) FILTER (WHERE i < $other_param),
avg(i) FILTER (WHERE i < $param)
from
range(100) tbl(i)
""",
{'param': 5, 'other_param': 10},
).fetchall()
assert res == original_res
def test_named_param_not_dict(self):
con = duckdb.connect()
with pytest.raises(
duckdb.InvalidInputException,
match="Values were not provided for the following prepared statement parameters: name1, name2, name3",
):
con.execute("select $name1, $name2, $name3", ['name1', 'name2', 'name3'])
def test_named_param_basic(self):
con = duckdb.connect()
res = con.execute("select $name1, $name2, $name3", {'name1': 5, 'name2': 3, 'name3': 'a'}).fetchall()
assert res == [
(5, 3, 'a'),
]
def test_named_param_not_exhaustive(self):
con = duckdb.connect()
with pytest.raises(
duckdb.InvalidInputException,
match="Invalid Input Error: Values were not provided for the following prepared statement parameters: name3",
):
con.execute("select $name1, $name2, $name3", {'name1': 5, 'name2': 3})
def test_named_param_excessive(self):
con = duckdb.connect()
with pytest.raises(
duckdb.InvalidInputException,
match="Values were not provided for the following prepared statement parameters: name3",
):
con.execute("select $name1, $name2, $name3", {'name1': 5, 'name2': 3, 'not_a_named_param': 5})
def test_named_param_not_named(self):
con = duckdb.connect()
with pytest.raises(
duckdb.InvalidInputException,
match="Values were not provided for the following prepared statement parameters: 1, 2",
):
con.execute("select $1, $1, $2", {'name1': 5, 'name2': 3})
def test_named_param_mixed(self):
con = duckdb.connect()
with pytest.raises(
duckdb.NotImplementedException, match="Mixing named and positional parameters is not supported yet"
):
con.execute("select $name1, $1, $2", {'name1': 5, 'name2': 3})
def test_named_param_strings_with_dollarsign(self):
con = duckdb.connect()
res = con.execute("select '$name1', $name1, $name1, '$name1'", {'name1': 5}).fetchall()
assert res == [('$name1', 5, 5, '$name1')]
def test_named_param_case_insensivity(self):
con = duckdb.connect()
res = con.execute(
"""
select $NaMe1, $NAME2, $name3
""",
{'name1': 5, 'nAmE2': 3, 'NAME3': 'a'},
).fetchall()
assert res == [
(5, 3, 'a'),
]
def test_named_param_keyword(self):
con = duckdb.connect()
result = con.execute("SELECT $val", {"val": 42}).fetchone()
assert result == (42,)
result = con.execute("SELECT $value", {"value": 42}).fetchone()
assert result == (42,)
def test_conversion_from_tuple(self):
con = duckdb.connect()
# Tuple converts to list
result = con.execute("select $1", [(21, 22, 42)]).fetchall()
assert result == [([21, 22, 42],)]
# If wrapped in a Value, it can convert to a struct
result = con.execute("select $1", [Value(('a', 21, True), {'v1': str, 'v2': int, 'v3': bool})]).fetchall()
assert result == [({'v1': 'a', 'v2': 21, 'v3': True},)]
# If the amount of items in the tuple and the children of the struct don't match
# we throw an error
with pytest.raises(
duckdb.InvalidInputException,
match='Tried to create a STRUCT value from a tuple containing 3 elements, but the STRUCT consists of 2 children',
):
result = con.execute("select $1", [Value(('a', 21, True), {'v1': str, 'v2': int})]).fetchall()
# If we try to create anything other than a STRUCT or a LIST out of the tuple, we throw an error
with pytest.raises(duckdb.InvalidInputException, match="Can't convert tuple to a Value of type VARCHAR"):
result = con.execute("select $1", [Value((21, 42), str)])
| true |
7242fc404c9c299e572a385ab4c607b41631a7ea | Python | CFker/Python | /Chapter_9.py | UTF-8 | 3,666 | 3.859375 | 4 | [] | no_license | # """9.1.1"""
# class Dog():
# """一次模拟小狗的简单尝试"""
#
# def __init__(self, name, age):
# """初始化属性name和age"""
# self.name = name
# self.age = age
#
# def sit(self):
# """模拟小狗被命令时蹲下"""
# print(self.name.title() + " is now sitting.")
#
# def roll_over(self):
# """模拟小狗被命令时打滚"""
# print(self.name.title() + " rolled over!")
#
#
# my_dog = Dog('willie', 6)
#
# print(my_dog.name.title())
# print(my_dog.age)
# my_dog.sit()
# my_dog.roll_over()
#
# 9.1
class Restaurant():
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = 0
def set_number_served(self, number):
self.number_served = number
print(str(self.number_served) + " person has luanch in restaurant.")
def increment_number_served(self, numbers):
self.number_served += numbers
print("The restaurant can service " + str(self.number_served))
def describe_restaurant(self):
print("The restaurant name is :" + self.restaurant_name.title())
print("The cuisine type is " + self.cuisine_type)
def open_restaurant(self):
print(self.restaurant_name.title() + " is opening")
restaurant = Restaurant('beijing fan dian', 'china')
restaurant.describe_restaurant()
restaurant.open_restaurant()
restaurant.set_number_served(20)
restaurant.increment_number_served(10)
#
# class User():
#
# def __init__(self, first_name, last_name, sex, tall, wight):
#
# self.first_name = first_name
# self.last_name = last_name
# self.sex = sex
# self.tall = tall
# self.wight = wight
#
# def describe_user(self):
# print("The name is :" + self.first_name + ' ' + self.last_name)
# print("sex is " + self.sex)
# print("tall is " + self.tall)
# print("wight is " + self.wight)
#
# def greet_user(self):
# print("Hello " + self.first_name + self.last_name)
#
# per_1 = User('chen', 'haha', 'man', '170', '150')
# per_1.describe_user()
# per_1.greet_user()
# class Car():
# """一次模拟汽车的简单测试"""
#
# def __init__(self, make, model, year):
# """初始化描述汽车的属性"""
# self.make = make
# self.model = model
# self.year = year
# self.odometer_reading = 110
#
# def get_descriptive_name(self):
# """返回整洁的描述性信息"""
# long_name = str(self.year) + ' ' + self.make + ' ' + self.model
# return long_name.title()
#
# def increment_odometer(self, miles):
# """将里程碑按照读数增加指定的量"""
# self.odometer_reading += miles
#
# def update_odometer_reading(self, mileage):
# """
# 将里程表读数设置为指定的数
# 禁止往回修改里程值
# """
# if mileage > self.odometer_reading:
# self.odometer_reading = mileage
# else:
# print("Stop! You can't roll back an odometer!")
#
# def read_odometer_reading(self):
# """打印一条指出汽车里程的消息"""
# print("This car has " + str(self.odometer_reading) + " miles on it.")
#
# my_new_car = Car('audi', 'a8', 2020)
# print(my_new_car.get_descriptive_name())
#
# my_new_car.update_odometer_reading(50)
# my_new_car.increment_odometer(100)
# print (my_new_car.read_odometer_reading()) | true |
7fb7bbe5d972c8d9b41b1676d09b32631f733678 | Python | SpionSkummis/Advent-of-Code-2019 | /Erik/day03.py | UTF-8 | 3,092 | 2.671875 | 3 | [] | no_license | with open("Erik/inputs/input03.txt") as f:
cable1 = f.readline().strip().split(",")
cable2 = f.readline().strip().split(",")
#Test cases:
#cable1 = ["R8","U5","L5","D3"]
#cable2 = ["U7","R6","D4","L4"]
#cable1 = ["R75","D30","R83","U83","L12","D49","R71","U7","L72"]
#cable2 = ["U62","R66","U55","R34","D71","R55","D58","R83"]
#cable1 = ["R98","U47","R26","D63","R33","U87","L62","D20","R33","U53","R51"]
#cable2 = ["U98","R91","D20","R16","D67","R40","U7","R15","U6","R7"]
def makeVisitedSet(inList):
firstVisited = set()
xPos = 0
yPos = 0
firstVisited.add((xPos,yPos))
for instruction in inList:
direction = instruction[0]
length = int(instruction[1:])
if(direction == "U"):
for i in range(yPos,(yPos+length)):
firstVisited.add((xPos,i))
yPos += length
elif(direction == "D"):
for i in range(yPos,(yPos-length),-1):
firstVisited.add((xPos,i))
yPos -= length
elif(direction == "R"):
for i in range(xPos,(xPos+length)):
firstVisited.add((i,yPos))
xPos += length
elif(direction == "L"):
for i in range(xPos, (xPos-length),-1):
firstVisited.add((i,yPos))
xPos -= length
return firstVisited
visited1 = makeVisitedSet(cable1)
visited2 = makeVisitedSet(cable2)
crossSet = set()
for elem in visited1:
if(elem in visited2):
crossSet.add(elem)
crossSet.remove((0,0))
lenList = []
for elem in crossSet:
x, y = elem
lenList.append(abs(x) + abs(y))
print(sorted(lenList)[0])
def makeVisitedSet2(inList):
firstVisited = set()
xPos = 0
yPos = 0
steps = 0
firstVisited.add(((xPos,yPos),(steps)))
for instruction in inList:
direction = instruction[0]
length = int(instruction[1:])
if(direction == "U"):
for i in range(yPos,(yPos+length)):
firstVisited.add(((xPos,i),(steps)))
steps += 1
yPos += length
elif(direction == "D"):
for i in range(yPos,(yPos-length),-1):
firstVisited.add(((xPos,i),(steps)))
steps += 1
yPos -= length
elif(direction == "R"):
for i in range(xPos,(xPos+length)):
firstVisited.add(((i,yPos),(steps)))
steps += 1
xPos += length
elif(direction == "L"):
for i in range(xPos, (xPos-length),-1):
firstVisited.add(((i,yPos),(steps)))
steps += 1
xPos -= length
return firstVisited
visited21 = makeVisitedSet2(cable1)
visited22 = makeVisitedSet2(cable2)
distSet = set()
for c1elem in visited21:
if c1elem[0] in crossSet:
for c2elem in visited22:
if ((c2elem[0] == c1elem[0]) and (c2elem[0] in crossSet)):
distSet.add((c1elem,c2elem))
distList = []
for elem in distSet:
distA = elem[0][1]
distB = elem[1][1]
distList.append(distA + distB)
print(sorted(distList)[0])
| true |
6a31971a5c42fdce480b9d0c97f03b055d9805cf | Python | denisgubin/share_scripts | /validate_ipv4_mask_witout_cidr.py | UTF-8 | 3,390 | 2.6875 | 3 | [] | no_license | import re
from ipaddress import AddressValueError, IPv4Address
def _check_hwaddress(mac_address):
# mac_address_re = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$"
mac_address_re = "([0-9a-f]{2}:){5}[0-9a-f]{2}$"
if not re.match(mac_address_re, mac_address.lower()):
error = '{} mac-address is invalid. Should set mac-address in format xx:xx:xx:xx:xx:xx\n'.format(mac_address)
return False, error
return True, None
def _validate_args(ipv4_address, ipv4_netmask, ipv4_gateway, mac_address=None):
errors = []
# 1
if mac_address:
check_result, error = _check_hwaddress(mac_address)
if not check_result:
errors.append(error)
# 2
try:
IPv4Address(ipv4_address)
except AddressValueError:
errors.append(f"{ipv4_address} ip-address is invalid.\n")
# 3
try:
IPv4Address(ipv4_gateway)
except AddressValueError:
errors.append(f"{ipv4_gateway} gateway address is invalid.\n")
if not errors:
# 4
if len(ipv4_netmask.split(".")) != 4:
errors.append(f"{ipv4_netmask} network mask should be in format xxx.xxx.xxx.xxx.\n")
# 5
if not ipv4_netmask.replace(".", "").isdigit():
errors.append(f"{ipv4_netmask} network mask should consist only digits.\n")
bin_mask = "".join(f"{int(octet):08b}" for octet in ipv4_netmask.split("."))
# 6
if len(bin_mask) != 32:
errors.append(f"Every {ipv4_netmask} network mask octet should be in range from 0 to 255.\n")
# 7
match = re.fullmatch(r"1+0+", bin_mask)
if not match:
errors.append(f"{ipv4_netmask} network mask bits shouldn't be start from zero and 1 bits shouldn't "
f"been interrupted zero bits.\n")
bin_address = "".join(f"{int(octet):08b}" for octet in ipv4_address.split("."))
net_number = bin_mask.count("1")
bin_network_address = bin_address[0:net_number].ljust(32, "0")
bin_broadcast_network_address = bin_address[0:net_number].ljust(32, "1")
bin_gateway = "".join(f"{int(octet):08b}" for octet in ipv4_gateway.split("."))
network_address = []
ip_octet = ""
for i in bin_network_address:
ip_octet += i
if len(ip_octet) == 8:
network_address.append(str(int(ip_octet, 2)))
ip_octet = ""
str_network_address = ".".join(network_address)
# 8
# не включачем в проверку network адрес и broadcast адрес
if int(bin_gateway, 2) not in range(int(bin_network_address, 2) + 1, int(bin_broadcast_network_address, 2)):
errors.append(f"{ipv4_gateway} gateway address is out of {str_network_address} network's "
f"addresses scope.\n")
# 9
if bin_address == bin_gateway:
errors.append(f"{ipv4_address} ip-address and {ipv4_gateway} gateway shouldn't be the same.\n")
if errors:
print(''.join(errors))
return False
return True
if __name__ == "__main__":
data = dict(
ipv4_address='192.168.20.120',
ipv4_netmask='255.255.255.252',
ipv4_gateway='192.168.20.121',
mac_address='00:00:00:00:00:00')
result = _validate_args(**data)
| true |
43ecf60dee6c4c8b49f4b168b4c4b02f4e59f488 | Python | JohnNavi/sandbox | /GlobalSearch/globalTest.py | UTF-8 | 6,226 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
"""
Script to perform global database search for ips
Copyright 2014, NaviSite, Inc.
"""
#from sqllib import sqlcld
import sys
import psycopg2
import sys
import re
def search_for_ip_addr(cursor, ip_table_columns, ip_addr):
"""
Search the provided list of tables/column/data_type table for the provided ip_addr
args
cursor - handle to the database
ip_tables - list of tuples that contains the table name / column name / column data type
for all tables that contained 'ip' in their name and column that contains
ip data
ip_addr - ip address that we will be searching the provided table list for
returns
list of tables for which we find an ip match
"""
ip_table_match = []
# rip through the provided table / column list and look for provided ip address
for table_column in ip_table_columns:
cursor.execute("SELECT * FROM " + table_column[0] + " WHERE " + table_column[1] + " = '" + ip_addr + "';" )
records = cursor.fetchall()
# if we find a match save it off
if records:
ip_table_match.append(table_column)
print "@@@@@@@@@@@@@@@@@@@@@@@@@@"
print ip_table_match
print "@@@@@@@@@@@@@@@@@@@@@@@@@@"
return ip_table_match
def search_ip_tables(cursor, tables):
"""
Search a provided list of database tables for a columns that contains 'ip'
and also where 'ip' column is of the correct 'string' type (ip values are
stored in the database as strings).
args
cursor - handle to the database
tables - list of tables containing 'ip' in their name
returns
list of tuples that describe the table, column name and column data type
for table columns that contain 'ip' and are of the expected ip data type
"""
ip_data_type = 1043 # data type for ip address in the database (character varying)
table_column_names = []
# make SQL query to get list of columns in provided table
for table in tables:
#print "table: " + table
cursor.execute("SELECT * FROM " + table + " LIMIT 0;")
# list to hold column names and associated data type for that column
column_names = []
# extract column names and associated data type from tables
for description in cursor.description:
column_names.append( (description[0], description[1]) )
# DEBUG
#print "=========================="
#print column_names
#print "=========================="
# rip column list and find the column names that contain 'ip'
for column_name in column_names:
contains_ip = column_name[0].lower().find("ip")
is_correct_data_type = column_name[1] == ip_data_type
# add table name and column name to list of columns with 'ip' in thier name
# if we did indeed find 'ip' in their name, the name starts with 'ip and
# the ip address is of the correct data type
if contains_ip != -1 and is_correct_data_type:
table_column_names.append( (table, column_name[0], column_name[1]) )
# DEBUG
#print "--------------------------"
#print table_column_names
#print "--------------------------"
# DEBUG
#print "00000000000000000000000000"
#print table_column_names
#print "00000000000000000000000000"
return table_column_names
def get_ip_tables(cursor):
"""
Get list of tables from proddb that contain columns with 'ip'. NOTE - method will filter
out non 'cl' and 'cladm' tables from the return list. List will also contain only
DISTINCT table names (duplicate table names will be filtered out).
args
None.
returns
List of table names from proddb that contain columns with 'ip' in thier name.
"""
ip_tables = []
potential_issue = False
# make SQL query and get result from cursor
cursor.execute("SELECT DISTINCT table_name FROM information_schema.columns WHERE column_name LIKE '%ip%';")
records = cursor.fetchall()
# rip through the result of SQL query
for record in records:
# first element of the returned tuple is the string with the resulting table name of our query
record_string = record[0]
# remove formatting characters so we are left with just the table name
match = re.search(r'\w*', record_string)
# save off the table name once we have removed the formatting characters and if
# table is a cl or cladm table
if match and match.group().lower().startswith('cl'):
# save off table name for later processing
ip_tables.append(match.group())
#print(match.group())
else:
print "Table filtered out: " + record_string
return ip_tables
def connect_to_db():
"""
Connects to local database (currently setup for local test and checkout only).
args
none.
returns
cursor - handle to the local prod database.
"""
#Define our connection string
conn_string = "host='localhost' dbname='clouddb' user='cloud' password='jrdlocaldb'"
# print the connection string we will use to connect
print "Connecting to database\n ->%s" % (conn_string)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print "Connected!\n"
return cursor
def main(ip_addr):
"""
main entry point for global ip search.
"""
# connect to the database
cursor = connect_to_db()
# get list of tables in database that contain columns with the text 'ip'
ip_tables = get_ip_tables(cursor)
# search list of ip tables for provided ip address
ip_table_columns = search_ip_tables(cursor, ip_tables)
search_for_ip_addr(cursor, ip_table_columns, ip_addr)
if __name__ == "__main__":
ipAddr = sys.argv[1]
main(ipAddr)
| true |
84a88d490e5b04b1fce10b122f69f337eb12c254 | Python | scolemann/CanisLeptons | /v1/mlmodel/mlclassfier.py | UTF-8 | 5,026 | 2.734375 | 3 | [] | no_license | '''
__author__ = "Canis Leptons"
__copyright__ = "Copyright (C) 2018 Canis Leptons"
__license__ = "Private@ Canis Leptons"
__version__ = "1.0"
'''
# This class contains all the functions to create different type of bars (candlestick) such as Volume bar
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import classification_report, accuracy_score
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import load_model
from sklearn.externals import joblib
from .split import Split
class MLClassifier(object):
def __init__(self):
pass
##############################################################
######## Generic function to call ML algo ####################
##############################################################
def ml_classfr(self, X, y, avgU, method, saveModel=False):
if method == 'LR':
return lr_classfr(X, y)
elif method == 'SGD':
return sgd_classfr(X, y)
elif method == 'LSTM':
return model_lstm(X, y)
elif method == 'RF':
return model_randomForest(X, y, saveModel, avgU)
##############################################################
######## Specific function to call ML algo ####################
##############################################################
def lr_classfr(X, y):
ml_model = [ None, float("-inf") ]
train_X, train_y, valid_X, valid_y = createData_TrainTest(X, y, 0.7) # split training-testing data
# Create the LogisticRegression object
clf = LogisticRegression()
clf = clf.fit(train_X, train_y)
# Evaluate the learned model on the validation set
accuracy = clf.score(valid_X, valid_y)
ml_model = [ clf, accuracy ]
return ml_model
def sgd_classfr(X, y):
ml_model = [ None, float("-inf") ]
train_X, train_y, valid_X, valid_y = createData_TrainTest(X, y, 0.7) # split training-testing data
# Create the Stochastic GRadient Classifier object
clf = SGDClassifier()
clf = clf.fit(train_X, train_y)
# Evaluate the learned model on the validation set
accuracy = clf.score(valid_X, valid_y)
ml_model = [ clf, accuracy ]
return ml_model
def model_lstm(X, y):
#LSTM model for time-series data
#Initialising the LSTM
lstm_model = Sequential()
#Adding the first LSTM layer and some Dropout regularisation
lstm_model.add(LSTM(units = 50, return_sequences = True, input_shape = (trainX.shape[1], 1)))
lstm_model.add(Dropout(0.2))
#Adding a second LSTM layer and some Dropout regularisation
lstm_model.add(LSTM(units = 50, return_sequences = True))
lstm_model.add(Dropout(0.2))
#Adding a third LSTM layer and some Dropout regularisation
lstm_model.add(LSTM(units = 50, return_sequences = True))
lstm_model.add(Dropout(0.2))
#Adding a fourth LSTM layer and some Dropout regularisation
lstm_model.add(LSTM(units = 50))
lstm_model.add(Dropout(0.2))
#Adding the output layer
lstm_model.add(Dense(units = 1))
#Compiling the LSTM
lstm_model.compile(optimizer = 'adam', loss = 'mean_squared_error')
print(lstm_model.summary())
#Fitting the LSTM to the Training set
lstm_model.fit(trainX, trainY, epochs = 100, batch_size = 200, verbose = 1)
#Saving the model
model.save('lstm_model.h5')
# Incase the fitting is taking time, we can comment the fit and save, and directly load the model if it is
# available in the same folder
#model = load_model('lstm_model.h5')
scores = model.evaluate(trainX, trainY, verbose=1, batch_size=200)
return scores
def model_randomForest(X, y, saveModel, avgU=1.):
ml_model = [ None, float("-inf") ]
rf_split = Split()
train_X, train_y, valid_X, valid_y = rf_split.train_test_split(X, y, 0.7) # split training-testing data
# Create the LogisticRegression object
clf = RandomForestClassifier(n_estimators=1,criterion='entropy',bootstrap=False,class_weight='balanced_subsample')
clf = BaggingClassifier(base_estimator=clf,n_estimators=1000,max_samples=avgU,max_features=1.)
clf = clf.fit(train_X, train_y)
if (saveModel):
filename = 'trained_randomForest.sav'
joblib.dump(clf,filename)
# Evaluate the learned model on the validation set
accuracy = clf.score(valid_X, valid_y)
ml_model = [ clf, accuracy ]
return ml_model
| true |
ca576fd9d4af212a53ac9223e58460b19bd37e68 | Python | farma11/NegoAnalysis_forGENIUS | /classes/bids.py | UTF-8 | 558 | 3.296875 | 3 | [] | no_license | # coding: UTF-8
import re
class Bid(object):
def __init__(self):
self.issueSize = 0
self.valueSize = []
def divideValue(line):
"""文字列Bid[a: a1, b: b2, ...]からValueのListに変換"""
ansValues = []
r = re.compile("Bid\[(.*), \]")
bid = r.search(line)
if bid != None:
values = bid.group(1).split(',')
for value in values:
r = re.compile(": (.+)")
v = r.search(str(value))
if v != None:
ansValues.append(v.group(1))
return ansValues
| true |
4ce83ddca0ad90ad3870dcf64ea81bc53c5bafc0 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_138/1123.py | UTF-8 | 1,333 | 3.6875 | 4 | [] | no_license | """
For each test case, output one line containing "Case #x: y z",
where x is the test case number (starting from 1),
y is the number of points Naomi will score if she plays Deceitful War optimally,
and z is the number of points Naomi will score if she plays War optimally.
"""
def kenChoose(naomiBlock, ken):
for block in ken:
if block > naomiBlock:
return ken.pop(ken.index(block))
return ken.pop(0)
infile = open('input.in', 'r')
outfile = open('output.out','w')
numCases = int(infile.readline())
for case in range(numCases):
numBlocks = int(infile.readline())
naomi = infile.readline().split()
ken = infile.readline().split()
for i in range(numBlocks):
naomi[i] = float(naomi[i])
ken[i] = float(ken[i])
naomi.sort()
ken.sort()
warnaomi = list(naomi)
warken = list(ken)
#play war
war = 0
for i in range(numBlocks):
naomiChosen = warnaomi.pop()
if kenChoose(naomiChosen,warken) < naomiChosen:
war +=1
#play deceitful war
def loseCond():
for i in range(len(ken)):
if ken[i] > naomi[i]:
return True
return False
lose = loseCond()
while lose:
naomiChosen = naomi.pop(0)
naomiTold = ken[-1]-0.000001
kenChoose(naomiTold,ken)
lose=loseCond()
deceitful = len(naomi)
outfile.write("Case #{x}: {y} {z}\n".format(x=case+1, y=deceitful, z=war))
infile.close()
outfile.close()
| true |
b124460a7fa4568c0b812006ee2650a1690bc4a0 | Python | tlake/project-euler | /pe02/02.py | UTF-8 | 177 | 2.96875 | 3 | [] | no_license | n1 = n2 = 1
n3 = n1 + n2
sums = 0
while n3 <= 4000000:
if not n3%2:
sums = n3 + sums
n1 = n2
n2 = n3
n3 = n1 + n2
else:
n1 = n2
n2 = n3
n3 = n1 + n2
print sums
| true |
d451a9f2f8095d7d5b97aac434071372841cefd9 | Python | jadenpadua/Data-Structures-and-Algorithms | /bruteforce/listSum.py | UTF-8 | 210 | 4.0625 | 4 | [] | no_license | #calculate the sum of a list in python
def sum_of_list(list) :
total = 0
for i in range(0, len(list)):
total = total + list[i]
return total
list = [2,3,6,8,2,6]
print(sum_of_list(list))
| true |
81cf2764f095fd9185b6e8af1694bf3992d77fab | Python | jemarsha/leetcode_shenanigans | /Recursion_Problems/Powerset.py | UTF-8 | 1,072 | 3.90625 | 4 | [] | no_license | class PowerSet:
"""Class to generate the power set."""
def __init__(self):
self.result = []
def generate_power_set(self, nums):
results = []
self.dfs(sorted(nums), 0, [], results)
return results
def dfs(self, nums, index, path, res):
res.append(path)
# print(path)
for i in range(index, len(nums)):
# print(nums[i])
self.dfs(nums, i + 1, path + [nums[i]], res)
if __name__ =="__main__":
s = PowerSet()
li = [1, 2, 3]
print(s.generate_power_set(li))
#Recursion path with backtracking
#f(0) [1] 2 numbers left to loop through ,index/i= 0
#f(1) [1,2] 1 number left to loop through, index/i =1
#f(2) [1,2,3] 0 numbers left to loop through, index/i =2
#f(1) [1,3] 0 numbers now left for index/i= 1 so this function is completely done
#f(0) [2] 1 number left to loop through, index/i= 1 now for f(0) because we're in the same loop still as the first call
#f(1) [2,3] 0 numbers left to loop through, index/i = 1
#f(0) [3] 0 numbers left to loop through, index/i=2 | true |
c28820e07ae87f5768bd1dca5131dfe571273480 | Python | amiya912/PES1-PythonAssignment-SET1 | /program13.py | UTF-8 | 794 | 4.5625 | 5 | [] | no_license | '''Write a program to find the biggest of 4 numbers.
a)Read 4 numbers from user using Input statement.
b) extend the above program to find the biggest of 5 numbers.
(PS: Use IF and IF & Else, If and ELIf, and Nested IF) '''
a=int(input('enter the first num: '))
b=int(input('enter the second num: '))
c=int(input('enter the third num: '))
d=int(input('enter the fourth num: '))
if a>b and a>c and a>d:
print('a:%d is the biggest number'%a)
max=a
elif b>a and b>c and b>d:
print('b:%d is the biggest number'%b)
max=b
elif c>a and c>b and c>d:
print('c:%d is the biggest number'%c)
max=c
else:
print('d:%d is the biggest number'%d)
max=d
e=int(input('enter the fifth num: '))
if e>max:
print('e:%d is the new max'%e)
else:
print('max is still %d',max) | true |
532f1f4f588f28d527e61931c7500d5ea7028e24 | Python | srijitravi94/Page-Rank-Implementation | /GenerateInLinkCount.py | UTF-8 | 721 | 3.15625 | 3 | [] | no_license | def generateInLinkCount(fileName):
noInLink = []
inLinkDict = {}
file = open(fileName, "r").read()
links = file.splitlines()
for link in links:
pages = link.split()
inLinkDict[pages[0]] = len(pages[1:])
if(len(pages[1:]) == 0):
noInLink.append(pages[0])
return noInLink, inLinkDict
G1, G1Dict = generateInLinkCount("G1.txt")
print("Number of pages with no InLinks(Sources) for G1 : " + str(len(G1)))
G2, G2Dict = generateInLinkCount("G2.txt")
print("Number of pages with no InLinks(Sources) for G2 : " + str(len(G2)))
print(sorted(G1Dict.items(), key=lambda x:x[1], reverse=True)[:10])
print(sorted(G2Dict.items(), key=lambda x:x[1], reverse=True)[:10])
| true |
1684a1b6a35534232f53f56e3996a5d60e8f0a12 | Python | bennames/AeroComBAT-Project | /Tutorials/Tutorial_2.py | UTF-8 | 2,110 | 3.0625 | 3 | [
"MIT"
] | permissive | # =============================================================================
# AEROCOMBAT TUTORIAL 2 - CQUADX AND AIRFOIL
# =============================================================================
# IMPORT SYSTEM PACKAGES
# ======================
import sys
import os
sys.path.append(os.path.abspath('..'))
# IMPORT AEROCOMBAT CLASSES
# =========================
from AeroComBAT.Structures import Node, MaterialLib, CQUADX
from AeroComBAT.Aerodynamics import Airfoil
# IMPORT NUMPY MODULES
# ====================
import numpy as np
import matplotlib.pyplot as plt
# Material Info
mat_lib = MaterialLib()
# Add an aluminum isotropic material
mat_lib.addMat(1, 'AL-2050', 'iso',[75.8, 0.33, 2.7e3], .15e-3)
# CQUADX 2D ELEMENT CREATION
# ==========================
# Create a node 1 object
n1 = Node(1,[0.,0.,0.])
# Create a node 2 object
n2 = Node(2,[2.,0.,0.])
# Create a node 3 object
n3 = Node(3,[2.,3.,0.])
# Create a node 4 object
n4 = Node(4,[0.,5.,0.])
# Create a CQUADX element
elem1 = CQUADX(1,[n1,n2,n3,n4],1,mat_lib)
# Print a summary of the element
elem1.printSummary(nodes=True)
# AIRFOIL OUTER MOLD LINE VALIDATION
# ==================================
# Initialize a chord length of 1
c = 1.
# Create an airfoil object with a 'box' profile
af1 = Airfoil(c,name='box')
# Generate a set of non-dimensional x-coordinates
x = np.linspace(-.5,.5,50)
# Create the upper and lower box airfoil curves
xu,yu,xl,yl = af1.points(x)
# Create a matplotlib figure
plt.figure(num=1)
plt.plot(xu,yu)
plt.hold(True)
plt.plot(xl,yl)
plt.axes().set_aspect('equal', 'datalim')
plt.xlabel('x coordinate along the airfoil')
plt.ylabel('y coordinate along the airfoil')
plt.title('Box airfoil profile')
plt.hold(False)
# Create a NACA2412 airfoil profile
af2 = Airfoil(c,name='NACA2412')
# Generate a set of non-dimensional x-coordinates
x = np.linspace(0,1.,500)
# Create the upper and lower airfoil curves
xu,yu,xl,yl = af2.points(x)
# Create a matplotlib figure
plt.figure(num=2)
plt.plot(xu,yu)
plt.hold(True)
plt.plot(xl,yl)
plt.hold(False)
plt.axes().set_aspect('equal', 'datalim') | true |
f21df602f44444a371c759a8cbe3ee60fcd8adca | Python | julioteleco/jesse | /jesse/indicators/high_pass.py | UTF-8 | 1,185 | 2.828125 | 3 | [
"MIT"
] | permissive | import math
from typing import Union
import numpy as np
from jesse.helpers import get_candle_source
def high_pass(candles: np.ndarray, period: int = 48, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
High Pass Filter indicator by John F. Ehlers
:param candles: np.ndarray
:param period: int - default=48
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
if not sequential and len(candles) > 240:
candles = candles[-240:]
source = get_candle_source(candles, source_type=source_type)
hpf = np.full_like(source, 0)
for i in range(source.shape[0]):
if not (i < 2):
alpha_arg = 2 * math.pi / (period * 1.414)
alpha1 = (math.cos(alpha_arg) + math.sin(alpha_arg) - 1) / math.cos(alpha_arg)
hpf[i] = math.pow(1.0 - alpha1 / 2.0, 2) * (source[i] - 2 * source[i - 1] + source[i - 2]) + 2 * (
1 - alpha1) * hpf[i - 1] - math.pow(1 - alpha1, 2) * hpf[i - 2]
if sequential:
return hpf
else:
return None if np.isnan(hpf[-1]) else hpf[-1]
| true |
3f30c0ee4bd612e09c0bbf32498a16e46f882ada | Python | beta-yumatsud/python-beginner | /section3.py | UTF-8 | 2,137 | 4.3125 | 4 | [] | no_license | import math
num = 1
name = 'mike'
is_ok = True
print(num, type(num))
print(name, type(name))
print(is_ok, type(is_ok))
# 違う型にもいけちゃう
num = name
print(num, type(num))
name = '1'
# 型変換
new_num = int(name)
print(new_num, type(new_num))
# num: int とかで型宣言は可能。とはいえ、上記のように違う型に代入とかはできちゃう><
# sepを指定しないと半角スペースになるんだってさ
print('Hi', 'Mike', sep=',', end='\n')
print(17 / 3)
# 整数部分のみは // で取れんだってさ、ヘェ〜
print(17 // 3)
# 下記のようなものや、math関数とかはあるんだっばよ
print(round(3.141515151, 2))
print(math.sqrt(25))
# 下記でパッケージのヘルプ情報も出せるんだってさ
#print(help(math))
# 文字列はシングルクォーとでも、ダブルクォートでも大丈夫。
print('say "I don\'t know"')
# 文字列の前に r をつけるrawデータとみなさせるぜよ
print(r'C:\name\name')
# """の後に \ をつけると次の行から出力的な意味合いになるんすね
print("######")
print("""\
line1
line2
line3\
""")
print("######")
# literalどうしは下記のようにも書ける
literal = ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
'bbbbbbbbbbbbbbbbbbbbbbbbbbbbb')
print(literal)
# 下記のようにindex指定、slice指定も可能。ただしindex指定で文字列代入とかはできへんよ
word = 'python'
print(word[0])
print(word[-1])
print(word[0:2])
word = 'js' + word[4:]
print(word)
print(len(word))
# 文字列には便利なメソッドが色々あって便利すね
s = 'My name is Mike. Hi, Mike.'
is_start = s.startswith('Mi')
print(is_start)
print(s.find("Mike"))
print(s.count("Mike"))
print(s.replace("Mike", 'Job'))
# こんな書き方できんのか。{0}とか指定するのとと同じ
print('a are {} {} {}'.format(1, 2, 3))
print('My name is {name} {family}.'.format(name='Yuki', family='Matsuda'))
# 3.6から上記は `f-strings` というもので書き換えれるらしい
name = 'Yuki'
family = 'Matsuda'
print(f'My name is {name} {family}!!')
| true |
d89cf0e58db844f10364566ca6f351d6451f9b40 | Python | meghaahuja0904/acadview-python | /Assignment12.py | UTF-8 | 1,152 | 3.859375 | 4 | [] | no_license | #question1
try:
a=3
if(a<4):
a=a/(a-3)
raise handle
except ZeroDivisionError:
print("zero division error")
#it is zero division error
#question2
try:
import megha
l=[1,2,3]
print(l[3])
except Exception:
print("index error")
#it is index error
#question3
try:
raise NameError("hi there")
except NameError:
print("An exception")
#question4
def abyB(a ,b):
try:
c =((a+b) / (a-b))
except ZeroDivisionError:
print("a/b result in 0")
else:
print(c)
#driver program to test above function
abyB(2.0 ,3.0)
abyB(3.0 ,3.0)
#question5
#import error
try:
import Megha
except ImportError:
print("enter a import file")
#value error
try:
a = int(input("enter no"))
except ValueError:
print("please enter Int")
#question6
class Ageerror(Exception):
pass
a=True
while(a):
try:
age=int(input("enter age"))
if(age>=18):
a=False
raise Ageerror
else:
print(age)
except Ageerror:
print("age is greater than 18")
except ValueError:
print("only int allowed")
| true |
babd3e5042805dd56927e7cc4b4b3a0c6c0ada62 | Python | njcuk9999/g_clustering | /GClusterSim/astrokin.py | UTF-8 | 8,285 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2018-03-01 at 16:58
@author: cook
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
from tqdm import tqdm
import warnings
import time
# =============================================================================
# Define variables
# =============================================================================
# -----------------------------------------------------------------------------
# =============================================================================
# Define functions
# =============================================================================
def convert_XYZ(ra, dec, distance):
"""
Convert ra, dec and distance to X Y and Z
:param ra: numpy array of floats, right ascension in degrees
:param dec: numpy array of floats, declination in degrees
:param distance: numpy array of floats, distance in parsecs
adapted from:
https://github.com/dr-rodriguez/uvwxyz/blob/master/uvwxyz/uvwxyz.py
:param x: numpy array of floats, X, units must be in parsecs
:param y: numpy array of floats, Y, units must be in degrees
:param z: numpy array of floats, Z, units must be in degrees
"""
# get coordinate array
coords = SkyCoord(ra=ra, dec=dec, frame='icrs', unit='deg')
# convert to galactic longitude and latitude
l = coords.galactic.l.radian
b = coords.galactic.b.radian
# get X Y and Z
x = distance * np.cos(b) * np.cos(l)
y = distance * np.cos(b) * np.sin(l)
z = distance * np.sin(b)
# return x, y, z
return x, y, z
def convert_ra_dec_distance(x, y, z):
"""
Convert x, y and z into ra, dec and distance
:param x: numpy array of floats, x in parsecs
:param y: numpy array of floats, y in parsecs
:param z: numpy array of floats, z in parsecs
:return ra: numpy array of floats, right ascension in degrees
:return dec: numpy array of floats, declination in degrees
:return distance: numpy array of floats, distance in parsecs
"""
# get distance
distance = np.sqrt(x**2 + y**2 + z**2)
# get l and b in radians
lrad = np.arctan2(y, x)
brad = np.arcsin(z/distance)
# get coordinate array
coords = SkyCoord(lrad, brad, frame='galactic', unit=u.rad)
# convert to ra and dec
ra = coords.icrs.ra.deg
dec = coords.icrs.dec.deg
# return ra, dec, distance
return ra, dec, distance
def convert_uvw(ra, dec, distance, pmra, pmde, rv):
"""
adapted from:
https://github.com/dr-rodriguez/uvwxyz/blob/master/uvwxyz/uvwxyz.py
:param ra: numpy array of floats, right ascension in degrees
:param dec: numpy array of floats, declination in degrees
:param distance: numpy array of floats, distance in parsecs
:param pmra: numpy array of floats, proper motion (right ascension) in
mas/yr
:param pmde: numpy array of floats, proper motion (declination) in mas/yr
:param rv: numpy array of floats, radial velocity in km/s
:return:
"""
# set up matrix
T = np.array([[-0.054875560, -0.87343709, -0.48383502],
[+0.494109430, -0.44482963, +0.74698224],
[-0.867666150, -0.19807637, +0.45598378]])
k = (1 * u.AU/u.yr).to(u.km/u.s).value
# work out trigs
cosdec = np.cos(np.deg2rad(dec))
sindec = np.sin(np.deg2rad(dec))
cosra = np.cos(np.deg2rad(ra))
sinra = np.sin(np.deg2rad(ra))
# get A
A = np.array([[+cosra * cosdec, -sinra, -cosra * sindec],
[+sinra * cosdec, +cosra, -sinra * sindec],
[+sindec, 0.0 * ra, +cosdec]])
# get the TA array
TA = T @ A
# get vectors
vec1 = rv
vec2 = k*(pmra/1000.0) * distance
vec3 = k*(pmde/1000.0) * distance
# get the UVW array
vu = TA[0, 0] * vec1 + TA[1, 0] * vec2 + TA[2, 0] * vec3
vv = TA[0, 1] * vec1 + TA[1, 1] * vec2 + TA[2, 1] * vec3
vw = TA[0, 2] * vec1 + TA[1, 2] * vec2 + TA[2, 2] * vec3
# return U, V and W
return vu, vv, vw
def convert_xyzuvw(ra, dec, distance, pmra, pmde, rv):
x, y, z = convert_XYZ(ra, dec, distance)
vu, vv, vw = convert_uvw(ra, dec, distance, pmra, pmde, rv)
return x, y, z, vu, vv, vw
def convert_ra_dec_distance_motion(x,y,z,vu,vv,vw):
# get ra, dec and distance
ra, dec, distance = convert_ra_dec_distance(x, y, z)
# set up matrix
T = np.array([[-0.054875560, -0.87343709, -0.48383502],
[+0.494109430, -0.44482963, +0.74698224],
[-0.867666150, -0.19807637, +0.45598378]])
k = (1 * u.AU/u.yr).to(u.km/u.s).value
# work out trigs
cosdec = np.cos(np.deg2rad(dec))
sindec = np.sin(np.deg2rad(dec))
cosra = np.cos(np.deg2rad(ra))
sinra = np.sin(np.deg2rad(ra))
# get A
A = np.array([[+cosra * cosdec, -sinra, -cosra * sindec],
[+sinra * cosdec, +cosra, -sinra * sindec],
[+sindec, 0.0 * ra, +cosdec]])
# get the TA array
TA = T @ A
# get the inverse
iTA = np.linalg.inv(TA.T).T
# get the vec array using UVW = (TA).VEC --> VEC = (iTA).UVW
vec1 = iTA[0, 0] * vu + iTA[1, 0] * vv + iTA[2, 0] * vw
vec2 = iTA[0, 1] * vu + iTA[1, 1] * vv + iTA[2, 1] * vw
vec3 = iTA[0, 2] * vu + iTA[1, 2] * vv + iTA[2, 2] * vw
# get pmra, pmde, rv
rv = vec1
pmra = (vec2/(k * distance)) * 1000
pmde = (vec3/(k * distance)) * 1000
# return
return ra, dec, distance, pmra, pmde, rv
def convert(**kwargs):
set1 = ['ra', 'dec', 'distance', 'pmra', 'pmde', 'rv']
set2 = ['x', 'y', 'z', 'vu', 'vv', 'vw']
# define which set we have
cond1 = True
for set1i in set1:
cond1 &= (set1i in kwargs)
cond2 = True
for set2i in set2:
cond2 &= (set2i in kwargs)
# generic error messages
emsg2 = "\n\tMust define either: "
emsg3 = "\n\t\t{0}".format(', '.join(set1))
emsg4 = "\n\tor"
emsg5 = "\n\t\t{0}".format(', '.join(set2))
# if cond1 is true and cond2 is true we have too much information
if cond1 and cond2:
emsg1 = "\n Too many parameters defined."
raise ValueError(emsg1 + emsg2 + emsg3 + emsg4 + emsg5)
elif cond1:
args = [', '.join(set2), ', '.join(set1)]
print("Calculating {0} from {1}".format(*args))
return convert_xyzuvw(**kwargs)
elif cond2:
args = [', '.join(set1), ', '.join(set2)]
print("Calculating {0} from {1}".format(*args))
return convert_ra_dec_distance_motion(**kwargs)
else:
emsg1 = "\n Not enough parameters defined."
raise ValueError(emsg1 + emsg2 + emsg3 + emsg4 + emsg5)
def back_test():
ntest = 100000
# create inputs
ra_input = np.linspace(0, 20, ntest)
dec_input = np.linspace(0, 20, ntest)
dist_input = np.linspace(20, 30, ntest)
pmra_input = np.linspace(-10, 10, ntest)
pmde_input = np.linspace(-10, 10, ntest)
rv_input = np.linspace(-5, 5, ntest)
# try convert
pointa = time.time()
output2a = convert(ra=ra_input, dec=dec_input, distance=dist_input,
pmra=pmra_input, pmde=pmde_input, rv=rv_input)
pointb = time.time()
X2, Y2, Z2, U2, V2, W2 = output2a
# back convert
pointc = time.time()
output2b = convert(x=X2, y=Y2, z=Z2, vu=U2, vv=V2, vw=W2)
pointd = time.time()
ra2, dec2, dist2, pmra2, pmde2, rv2 = output2b
print("Timing for N={0}".format(ntest))
print("\tra,dec,dist,pmra,pmde,rv --> XYZUVW = {0} s".format(pointb-pointa))
print("\tXYZUVW --> ra,dec,dsit,pmra,pmde,rv = {0} s".format(pointd-pointc))
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
back_test()
# =============================================================================
# End of code
# =============================================================================
| true |
8ef1f12fc5cae8288a23b26a177ad65e6d60a9a1 | Python | sethmichel/AI-tic-tac-toe | /minimax.py | UTF-8 | 2,924 | 3.265625 | 3 | [] | no_license | import PlayGame
# scores each tree node will be
# x wins = 10, loses = -10, tie = 0
# so in the tree, each non-winning node will be 0
scores = {"X": 10, "O": -10, "tie": 0}
# Handles AI picking a spot. uses MiniMax
# called by Directory()
# pm board = 2d list of curr game state, mirror of kivy grid
def BestMove(board, openSpots):
bestScore = -5000 # ai can't possibly get anything smaller than this random small int
move = () # will contain the best move
score = 0
# get the scores for all the moves - pick the best one
for i in range(0, 3):
for j in range(0, 3):
if (board[i][j] == ''): # is the spot available?
board[i][j] = "O" # go there
score = minimax(board, 0, False, openSpots) # get the score of that move
board[i][j] = '' # since I move the ai to that spot for tracking purposes, undo that
if (score > bestScore): # keep track of best score
bestScore = score
move = (i, j)
board[move[0]][move[1]] = "O" # do the best move
print(3 * move[0] + move[1]) # testing
return 3 * move[0] + move[1] # need that numb to update gridlayout
# does the actual minimax algorithm to find the score of the next node
# called by BestMove()
# pm board = state of game board
# pm depth = int, depth of tree curr testing
# pm maxPlayer = bool, mini or max, player or computer (X or O)
def minimax(board, depth, maxPlayer, openSpots):
result = PlayGame.CheckWinner(board, openSpots)
score = 0
bestScore = 0
# terminal node
if (result != ""):
return scores[result]
# else, find the best possible score for all the availble nodes by the AI player
if (maxPlayer):
bestScore = -5000
for i in range(0, 3):
for j in range(0, 3):
if (board[i][j] == ''): # Is the spot available?
board[i][j] = "O" # go there
score = minimax(board, depth + 1, False, openSpots) # recursion, find the max move
board[i][j] = '' # since I move the X to that spot to tracking purposes, undo that
bestScore = max(score, bestScore)
return bestScore
# this is the non-ai player
else:
bestScore = 5000
for i in range(0, 3):
for j in range(0, 3):
if (board[i][j] == ''):
board[i][j] = "X" # move the human here
score = minimax(board, depth + 1, True, openSpots) # recursion, find the mini move
board[i][j] = ''
bestScore = min(score, bestScore)
return bestScore
| true |
f9c30ad017d4ccff2b7432512fdd68b949091db3 | Python | Manos-Ar/AdvancedDB | /src/2nd/repartition.py | UTF-8 | 2,114 | 2.75 | 3 | [] | no_license | #!/bin/python3
from pyspark.sql import SparkSession
from io import StringIO
from itertools import product
import csv
import sys
import time
times = open('times_2nd.txt', 'w+')
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
def map_genre(x):
tokens=x.split(",")
_id=int(tokens[0])
genre=tokens[1]
return (_id,("g",genre))
def map_rating(x):
tokens = x.split(",")
movie_id = int(tokens[1])
user_id = int(tokens[0])
rating = float(tokens[2])
time = tokens[3]
return (movie_id,("r",(user_id,rating,time)))
def map_list(x):
movie_id = x[0]
tag = x[1][0]
value = x[1][1]
if tag=="r":
return (movie_id,([value],[]))
else :
return (movie_id,([],[value]))
# (movie_id,(rating,movie))
def reducer(x,y):
listx_m = x[1]
listx_r = x[0]
listy_m = y[1]
listy_r = y[0]
list_movies = listx_m + listy_m
list_ratings = listx_r + listy_r
return (list_ratings,list_movies)
# (list_ratings,list_movies)
def map_output(x):
movie_id = x[0]
list_ratings = x[1][0]
list_movies = x[1][1]
if list_ratings==[] or list_movies==[]:
return []
else:
return ((movie_id, j, i[0],i[1],i[2]) for i, j in product(list_ratings, list_movies))
spark = SparkSession.builder.appName("repartition-join").getOrCreate()
sc = spark.sparkContext
start_time = time.time()
genres = sc.textFile('hdfs://master:9000/movie_data/movie_genres.csv')
rating = sc.textFile('hdfs://master:9000/movie_data/ratings.csv')
movies = sc.parallelize(genres.map(map_genre).take(100))
rating = rating.map(map_rating)
output = rating.union(movies).map(map_list).reduceByKey(reducer).flatMap(map_output)
output_list = output.collect()
end_time = time.time()
times.write("Repartition: "+str(end_time-start_time)+'\n')
print(output_list)
output_file = open("Repartition.txt", "w+")
output_file.write("Movie_id\tGenre\tUser_id\tRating\tTimestamp\n")
for line in output_list:
for l in line:
output_file.write("%s\t" %l)
output_file.write("\n")
output_file.close()
times.close() | true |
1edc2d40821035b4c9f7e2b9fcd637178671dc4b | Python | datamade/macoupin-budget | /data/add_fund_id_desc.py | UTF-8 | 1,727 | 2.640625 | 3 | [
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | import csv
from csvkit.sql import make_table, make_create_table_statement
from csvkit.unicsv import UnicodeCSVWriter, UnicodeCSVReader
from csvkit.table import Table
import sqlite3
import codecs
FUNDS = {
'General Fund': 1,
'Health Fund': 4,
'Highway Fund': 3,
'Special Purpose Fund': 2,
}
def add_attrs(reader, curs):
for row in reader:
fund_id = FUNDS[row[0].strip()]
row.insert(1, fund_id)
print(row[0], row[4])
curs.execute('select Department_Description, URL from description where Fund = ? and Department = ?', (row[0], row[4]))
res = curs.fetchone()
if res and res[0] != 'None':
row[7] = res[0]
row[6] = res[1]
yield row
def make_db(fname, tblname):
conn = sqlite3.connect(':memory:')
t = Table.from_csv(open(fname, 'rb'), name=tblname)
sql_table = make_table(t)
create_st = make_create_table_statement(sql_table)
print create_st
insert = sql_table.insert()
curs = conn.cursor()
curs.execute(create_st)
headers = t.headers()
print headers
rows = [dict(zip(headers, row)) for row in t.to_rows()]
for row in rows:
curs.execute(str(insert), row)
return curs
if __name__ == '__main__':
curs = make_db('macoupin-budget-update/moucoupin-budget-department-desc.csv', 'description')
outp = open('macoupin-budget-update/macoupin-budget-2014-update.csv', 'wb')
writer = UnicodeCSVWriter(outp)
with open('macoupin-budget-update/macoupin-budget.csv', 'rb') as f:
reader = UnicodeCSVReader(f)
headers = reader.next()
headers.insert(1, 'Fund ID')
writer.writerow(headers)
writer.writerows(add_attrs(reader, curs))
| true |
0f1d670632457b32f9fe59d06da86ba568c5889f | Python | matheusreis0/crud-products | /model/product.py | UTF-8 | 298 | 3.71875 | 4 | [] | no_license | class Product:
def __init__(self, id, name, price):
self.__id = id
self.__name = name
self.__price = price
def serialize(self):
return {
'id': int(self.__id),
'name': self.__name,
'price': float(self.__price)
}
| true |
76cb9b50a809a117cb5e34d04094e5e050c3debd | Python | SugarZ3ro/Internship-Spectrum | /Python task 1/prgm8.py | UTF-8 | 326 | 4.15625 | 4 | [] | no_license | # Q. no -> 8
#Function to print a pattern starting with x number of stars
def print_stars(x):
i=0
y=x
while(x>0):
print (" "*i,"* "*x,sep="")
x-=1
i+=1
j=2
x=y
while(j<=x):
print (" "*(y-2),"* "*j,sep="")
y=y-1
j+=1
#driver code
print_stars(5) | true |
f00357d3992ab3e9fddecb21dfee01f381a2edfd | Python | OleksiyPuzikov/very-simple-nle | /lib/xmeml/iter.py | UTF-8 | 16,474 | 2.75 | 3 | [] | no_license | #-*- encoding: utf-8 -*-
#
# This is an xmeml parser that tries to be super fast,
# using the lxml module for all xml stuff and python's
# efficient iterative parsing whenever possible.
#
# This leads to a dramatic decrease of both mem and cpu
# usage compared to the minidom api of the standard xmeml
# code.
#
# This module is not a full replacement though,
# and has a totally different api (it never made sense
# to keep it, since everything is done differently
# from the original parser.)
#
# (C) 2011 havard.gulldahl@nrk.no
# License: BSD
import lxml.etree as etree
AUDIOTHRESHOLD=0.0001
class Range(object):
def __init__(self, iterable=None):
if iterable is not None:
self.start, self.end = iterable
else:
self.start = None
self.end = None
def __repr__(self):
return "Range"+repr(self.get())
def __string__(self):
return u'<Range: %.5(start)f–%.5(end)f>' % vars(self)
def __add__(self, other):
self.extend( (other.start, other.end) )
return self
def __len__(self):
if None in (self.start, self.end):
raise TypeError("Range is not complete")
return self.end-self.start
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __iter__(self):
for z in (self.start, self.end):
yield z
def extend(self, iterable):
start, end = iterable
if self.start is None or start < self.start:
self.start = start
if end > self.end:
self.end = end
def get(self):
return (self.start, self.end)
def overlaps(self, other):
return other.start <= self.start <= other.end or \
self.start <= other.start <= self.end
class Ranges(object):
def __init__(self, range=None):
self.r = []
if range is not None:
self.extend(range)
def __repr__(self):
return 'Ranges: '+repr(self.r)
def __str__(self):
return u'<Ranges: %i ranges, totalling %.2d frames>' % (len(self.r),
len(self))
def __add__(self, other):
for range in other.r:
self.extend(range)
return self
def __len__(self):
return sum([len(r) for r in self.r])
def __iter__(self):
return iter(self.r)
def extend(self, otherrange):
for range in self.r:
if range == otherrange:
return None
elif range.overlaps(otherrange):
range.extend(otherrange)
return True
self.r.append(otherrange)
return True
class BaseObject(object):
"""Base class for *Item, File"""
def __init__(self, tree):
self.name = tree.findtext('name')
self.timebase = float(tree.findtext('rate/timebase'))
class Item(BaseObject):
"""Base class for ClipItem, TransitionItem, GeneratorItem"""
def __init__(self, tree):
super(Item, self).__init__(tree)
self.ntsc = tree.findtext('rate/ntsc', '') == 'TRUE'
self.start = float(tree.findtext('start'))
self.end = float(tree.findtext('end'))
self.id = tree.get('id')
class TransitionItem(Item):
"""transitionitem
Description: Encodes a transition in a track.
Parent: track
Subelements: rate, *start, *end, *alignment, effect, *name
"""
# <!ELEMENT transitionitem (name | rate | start | end | alignment | effect)*>
def __init__(self, tree):
super(TransitionItem, self).__init__(tree)
# A string specifying an alignment for a transition.
# Valid entries are start, center, end, end-black, or start-black.
self.alignment = tree.findtext('alignment')
self.effect = Effect(tree.find('effect'))
self.duration = self.end - self.start
self.centerframe = self.start+(self.duration/2)
class ClipItem(Item):
"""
Description: Encodes a clip in a track.
Parent: track
Subelements: +*name, +duration, +rate, +*start, +*end, link, syncoffset,
*enabled, *in, *out, *masterclipid, *subclipmasterid,
ismasterclip, *logginginfo, file, *timecode, *marker,
*anamorphic, *alphatype, *alphareverse, *labels, *comments,
sourcetrack, *compositemode, subclipinfo, *filter, stillframe,
*stillframeoffset, *sequence, multiclip,mediadelay,
subframeoffset, *mixedratesoffset,filmdata, pixelaspectratio,
fielddominance, gamma, primarytimecode*, itemhistory
Attribute: id
Notes: Note that start, end, link, syncoffset, and enabled are
subelements of clipitem, but not of clip.
"""
# (name | duration | rate | enabled | in | out | start | end | anamorphic | alphatype | alphareverse | compositemode | masterclipid | ismasterclip | labels | comments | stillframeoffset | sequence | subclipinfo | logginginfo | stillframe | timecode | syncoffset | file | primarytimecode | marker | filter | sourcetrack | link | subframeoffset | pixelaspectratio | fielddominance)
def __init__(self, tree):
super(ClipItem, self).__init__(tree)
self.tree = tree
self.inpoint = int(tree.findtext('in'))
self.outpoint = int(tree.findtext('out'))
if self.inpoint > self.outpoint:
# clip is reversed, just flip it back
self.inpoint, self.outpoint = self.outpoint, self.inpoint
self.duration = self.outpoint-self.inpoint
if self.start == -1.0: # start is within a transition
self.start = self.getprevtransition().centerframe
if self.end == -1.0: # end is within a transition
self.end = self.getfollowingtransition().centerframe
try:
self.file = File.filelist[tree.find('file').get('id')]
except AttributeError:
#print self.name
self.file = None # there might be a nested <sequence> instead of a file
self.mediatype = tree.findtext('sourcetrack/mediatype')
self.trackindex = int(tree.findtext('sourcetrack/trackindex'))
self.linkedclips = [Link(el) for el in tree.iter('link')]
self.isnestedsequence = tree.find('sequence/media') is not None
def getfilters(self):
return [ Effect(el) for el in self.tree.iterdescendants(tag='effect') ]
def getlevels(self):
for e in self.getfilters():
if e.effectid == 'audiolevels': return e
return None
def getprevtransition(self):
item = self.tree.xpath('./preceding-sibling::transitionitem[1]')[0]
return TransitionItem(item)
def getfollowingtransition(self):
item = self.tree.xpath('./following-sibling::transitionitem[1]')[0]
return TransitionItem(item)
def audibleframes(self, threshold=AUDIOTHRESHOLD):
"Returns list of (start, end) pairs of audible chunks"
if not self.mediatype == 'audio': return None # is video
if isinstance(threshold, Volume) and threshold.gain is not None:
threshold = threshold.gain
levels = self.getlevels()
keyframelist = list(levels.parameters)
if not len(keyframelist):
# no list of params, use <value>
if levels.value > threshold:
return Ranges(Range( (self.start, self.end) ) )
else:
return Ranges()
# add our subclip inpoint to the keyframelist if it's not in it already.
#
if self.inpoint < keyframelist[0][0]:
keyframelist.insert(0, (self.inpoint, keyframelist[0][1]))
else:
i = 0
while self.inpoint > keyframelist[i][0]:
try:
if self.inpoint < keyframelist[i+1][0]:
# add inpoint keyframe with volume of next keyframe
#print ' add inpoint keyframe with volume of next keyframe'
keyframelist.insert(i+1, (self.inpoint, keyframelist[i+1][1]))
except IndexError:
# all keyframes in keyframelist are _before_ inpoint
#print ' all keyframes in keyframelist are _before_ inpoint'
keyframelist.append((self.inpoint, keyframelist[i][1]))
i = i + 1
del i
# add our sublicp outpoint to the keyframelist, too
if self.outpoint > keyframelist[-1][0]:
# last existing keyframe is earlier than outpoint, add last keyframe volume
keyframelist.append((self.outpoint, keyframelist[-1][1]))
else:
i = len(keyframelist) - 1
while self.outpoint < keyframelist[i][0]:
try:
if self.outpoint > keyframelist[i-1][0]:
# add outpoint keyframe with volume of previous keyframe
#print ' add outpoint keyframe with volume of previous keyframe'
keyframelist.insert(i, (self.outpoint, keyframelist[i][1]))
except IndexError:
# TODO: properly diagnose and fix this
#print self.name, keyframelist, i
raise
i = i - 1
del i
# now, run through the keyframelist and keep the keyframes that are within
# our audible range (self.inpoint - self.outpoint), whose volume is
# at or above our current gain level ('threshold' method argument)
#
audible = False
ranges = Ranges()
for keyframe, volume in keyframelist:
# discard everything outside .inpoint and .outpoint
if keyframe < self.inpoint:
# keyframe falls outside of the current clip, to the left
continue
if keyframe > self.outpoint:
# keyframe falls outside of the current clip, to the right
break # we're finished
# store this frame, and translate the keyframe from local to the clip
# to global to the full sequence
thisframe = self.start + (keyframe - self.inpoint)
if volume >= threshold:
if audible is True: continue # previous frame was also audible
audible = True
prevframe = thisframe
else:
if audible is False: continue # previous frame was also inaudible
# level has gone below threshold, write out range so far
ranges.extend(Range( (prevframe, thisframe) ) )
audible = False
#write out the last frame if it hasn't been written
if audible is True:
ranges.extend(Range( (prevframe, thisframe) ) )
return ranges
class Link(object):
"""<link> elements"""
def __init__(self, tree):
self.linkclipref = tree.findtext('linkclipref')
self.mediatype = tree.findtext('mediatype')
self.trackindex = tree.findtext('trackindex')
self.clipindex = tree.findtext('clipindex')
class File(BaseObject):
# <!ELEMENT file (name | rate | duration | media | timecode | pathurl | width | height | mediaSource)*>
filelist = {}
def __init__(self, tree):
super(File, self).__init__(tree)
self.id = tree.get('id')
self.filelist[self.id] = self
self.duration = float(tree.findtext('duration'))
self.pathurl = tree.findtext('pathurl')
if tree.find('media/video') is not None:
self.mediatype = 'video'
else:
self.mediatype = 'audio'
class Effect(object):
"""Eeffect
Description: Encodes an effect or processing operation.
Parents: transitionitem, filter, generatoritem
Subelements: +*name, +*effectid, +*effecttype, +*mediatype, *effectcategory,
parameter, keyframe, appspecificdata, wipecode, wipeaccuracy, rate,
startratio, endratio, reverse, duration , privatestate, multiclip,
effectclass
"""
def __init__(self, tree):
self.name = tree.findtext('name')
self.effectid = tree.findtext('effectid')
params = tree.find('parameter')
if params is not None:
self.parameters = self.getparameters(params)
self.value = params.findtext('value', 0.0)
self.max = float(tree.findtext('parameter/valuemax'))
self.min = float(tree.findtext('parameter/valuemin'))
def getparameters(self, tree):
for el in tree.iterchildren(tag='keyframe'):
yield ( float(el.findtext('when')), float(el.findtext('value')) )
class Volume(object):
"""Helper class to convert to and from gain and dB.
Create an instance with your known value as keyword argument, and you'll be
able get the unknown value from the object:
v1 = Volume(gain=0.4)
db = v1.decibel
...
v2 = Volume(decibel=-60)
gain = v2.gain
Quoting the dev library:
"The volume level for the audio track of a clip is encoded by the Audio Levels effect.
The parameter Level expresses linear gain rather than decibels.
To convert gain to decibels, use the formula
decibels = 20 * log10(Level).
Conversely, to convert decibels to gain, use
Level = 10 ^ (decibels / 20)."
"""
def __init__(self, gain=None, decibel=None):
from math import log10
self.gain = self.decibel = None
if gain:
self.gain = float(gain)
self.decibel = 20 * log10(self.gain)
if decibel:
self.decibel = float(decibel)
self.gain = 10 ** (self.decibel / 20)
class XmemlParser(object):
def __init__(self, filename):
try:
self.tree = etree.parse(filename)
except AttributeError:
raise XmemlFileError('Parsing xml failed. Seems like a broken XMEML file.')
try:
self.version = self.tree.getroot().get('version')
self.name = self.tree.getroot().find('sequence').get('id')
except AttributeError:
raise XmemlFileError('No sequence found. Seems like a broken XMEML file.')
# find all file references
File.filelist = {f.get('id'):File(f) for f in self.tree.getroot().iter('file') if f.findtext('name') is not None}
def iteraudioclips(self, onlypureaudio=True):
"""Iterator to get all audio clips.
onlypureaudio parameter controls whether to limit to clips that have no video
clip assosiated with it (i.e. music, sound effects). Defaults to true.
"""
audio = self.tree.getroot().find('sequence/media/audio')
for track in audio.iterchildren(tag='track'):
for clip in track.iterchildren(tag='clipitem'):
ci = ClipItem(clip)
if ci.isnestedsequence:
#print clip.find('sequence').get('name')
for nestedtrack in clip.find('sequence/media/audio').iterchildren(tag='track'):
for nestedclip in nestedtrack.iterchildren(tag='clipitem'):
nestedci = ClipItem(nestedclip)
if not onlypureaudio:
yield nestedci
elif nestedci.file.mediatype == 'audio':
yield nestedci
continue
if not onlypureaudio:
yield ci
elif ci.file is not None and ci.file.mediatype == 'audio':
yield ci
def audibleranges(self, threshold=AUDIOTHRESHOLD):
clips = {}
files = {}
for clip in self.iteraudioclips():
if clips.has_key(clip.name):
clips[clip.name] += clip.audibleframes(threshold)
else:
clips[clip.name] = clip.audibleframes(threshold)
files.update( {clip.name: clip.file} )
return clips, files
if __name__ == '__main__':
import sys
from pprint import pprint as pp
xmeml = XmemlParser(sys.argv[1])
#pp( [cl.name for cl in xmeml.iteraudioclips() if cl.name.startswith('SCD0')])
clips, files = xmeml.audibleranges(0.0300)
pp([(clip,r) for (clip,r) in clips.iteritems()])# if clip.startswith('SCD048720')])
| true |
ca8a71bcf61a1d06704a37db321c7c6d30218f3f | Python | krisjuune/pre-post-noisi | /benchmark/functions.py | UTF-8 | 12,298 | 3.25 | 3 | [] | no_license | # %% Calculations
import numpy as np
import numpy.ma as ma
from math import pi
from coordinate_transformation.functions.get_spherical \
import radius_cnt, wgs84, geographic_to_geocentric
from coordinate_transformation.functions.get_domain \
import find_nearest
def get_curvature(lat, lon, radius = 6370287.272978241, \
theta = 37.5, phi = -16.5):
"""
Function to calculate the curvature relative to a
flat surface at the given radius assuming a sphere
with the given radius. Inputs include arrays of
latitude, longitude, and a radius. Function returns
array of depths relative to this flat surface with
the dimensions of lon, lat.
Units in degrees for angles, distances same as radius.
Default radius calculated at default geographic theta.
"""
# preallocate output array
curvature = np.zeros((len(lon), \
len(lat)), float)
# transform to geocentric
lat = geographic_to_geocentric(lat)
theta = geographic_to_geocentric(theta)
# convert to radians
lon = pi/180*lon
lat = pi/180*lat
phi = pi/180*phi
theta = pi/180*theta
# loop over the lats and lons
for i in range(len(lon)):
for j in range(len(lat)):
# find angle between point i,j and
# centre
a = radius*np.sin(lon[i] - phi)
b = radius*np.sin(lat[j] - theta)
c = np.sqrt(np.square(a) + \
np.square(b))
# arcsin(x), x has to be [-1,1]
alpha = np.arcsin(c/radius)
# calculate depth to curve from flat
# surface
y = radius/np.cos(alpha) - radius
x = y*np.cos(alpha)
curvature [i,j] = x*(-1)
return(curvature)
def get_curvature_wgs84(lat, lon, radius = 6370287.272978241, \
theta = 37.5, phi = -16.5):
"""
Function to calculate the curvature relative to a
flat surface at the given radius for an ellipsoid
defined by wgs84. Inputs include arrays of latitude,
longitude, and a radius. Function returns array of
depths relative to this flat surface with the
dimensions of lon, lat.
Units in degrees for angles, distances same as radius.
Default radius calculated at default geographic theta.
"""
# preallocate output array
curvature = np.zeros((len(lon), \
len(lat)), float)
# transform to geocentric
lat = geographic_to_geocentric(lat)
theta = geographic_to_geocentric(theta)
# convert to radians
lon = pi/180*lon
lat = pi/180*lat
phi = pi/180*phi
theta = pi/180*theta
# loop over the lats and lons
for i in range(len(lon)):
for j in range(len(lat)):
# find radius at j-th latitude
if round(radius/1000, 3) == radius_cnt(theta)/1000:
# when look at the surface curvature
# centred around default lat, lon
a = wgs84()[0]
b = wgs84()[1]
else:
# for when looking at shallower levels
# centred about any lat, lon
r_theta = radius_cnt(theta)/1000
a = wgs84()[0]*radius/r_theta
b = wgs84()[1]*radius/r_theta
radius_j = np.sqrt((a**2*(np.cos(lat[j])**2)) + \
(b**2*(np.sin(lat[j])**2)))/1000
# find angle between point i,j and centre
l1 = abs(radius*np.tan(lon[i] - phi))
l2 = abs(radius*np.tan(lat[j] - theta))
l3 = np.sqrt(np.square(l1) + \
np.square(l2))
alpha = np.arctan(l3/radius)
# Checked and up to alpha everything seems to
# be working
# calculate depth to curve from flat surface
y = radius/np.cos(alpha) - radius_j
x = y*np.cos(alpha)
curvature [i,j] = x*(-1)
# Cannot seem to find reason why curvature !=0 at
# (theta, phi), so just substituting that value
# from all elements, tested this against spherical
# case and get same values to ~10m accuracy
m = find_nearest(lon, phi)
n = find_nearest(lat, theta)
curvature = curvature - curvature [m,n]
return(curvature)
# %% Save as netCDF files, add a check function
import netCDF4 as nc4
import datetime as dt
from mpl_toolkits.basemap import Basemap
from pathlib import Path
def get_nc_curvature(filename, z_variable, x_var, y_var):
"""
Writes a netCDF4 file with x_distance, y_distance,
and curvature. filename should be a string (with
no file extension) and curvature_variable an array
containing the calculated curvature values.
"""
# Create .nc file
import netCDF4 as nc4
f = nc4.Dataset(filename + '.nc','w', format = 'NETCDF4')
f.description = 'Curvature calculated relative to tangent' + \
' surface at the centre of domain assuming spherical Earth'
# Create dimensions
f.createDimension('x', len(x_var))
f.createDimension('y', len(y_var))
# Create variables, 'f4' for single precision floats, i.e. 32bit
z = f.createVariable('z', 'f4', ('x', 'y'))
z [:] = z_variable
x = f.createVariable('x', 'f4', 'x')
x [:] = x_var
y = f.createVariable('y', 'f4', 'y')
y [:] = y_var
# Add attributes to the file
today = dt.datetime.now()
f.history = "Created " + today.strftime("%d/%m/%y")
#Add local attributes to variable instances
z.units = 'm'
x.units = 'm'
y.units = 'm'
f.close()
def check_nc(path, filename):
from pathlib import Path
path = Path(path)
f = nc4.Dataset(path / filename, 'r')
for i in f.variables:
print(i, f.variables[i].units, \
f.variables[i].shape)
# %% Plotting
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
def plot_geographic(lat, lon, data, filename, \
lat_max = 39.5, lat_min = 35.5, lon_max = -14, \
lon_min = -19, cbar_label = 'Bathymetry (km)'):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig = Basemap(projection = 'mill', llcrnrlat = lat_min, \
urcrnrlat = lat_max, llcrnrlon = lon_min, \
urcrnrlon = lon_max, resolution = 'c')
fig.drawmapboundary()
# Draw a lon/lat grid (20 lines for an interval of one degree)
fig.drawparallels(np.linspace(lat_min, lat_max, num = 5), \
labels=[1, 0, 0, 0], fmt="%.2f", dashes=[2, 2])
fig.drawmeridians(np.arange(round(lon_min), round(lon_max), 1), \
labels=[0, 0, 0, 1], fmt="%.2f", dashes=[2, 2])
# Add elevation data to map
cmap = 'viridis'
Lon, Lat = np.meshgrid(lon, lat)
fig.pcolormesh(Lon, Lat, data, latlon = True, \
cmap = cmap)
# Colorbar construction
i = ax.imshow(data, interpolation='nearest')
cbar = fig.colorbar(i, shrink = 0.5, aspect = 5)
cbar.set_label(cbar_label, rotation = 270, labelpad=15, y=0.45)
plt.savefig(filename, dpi = 600)
plt.show()
# TODO fix these dependencies issues, had to run to define each of
# the functions below in order to be able to use the plotting function
def wgs84():
"""
WGSS84 coordinate system with Greenwich as lon = 0.
Define Earth's semi-major, semi-minor axes, and
inverse flattening, eccentricity in this order.
"""
# set semi-major axis of the oblate spheroid Earth, in m
a = 6378137.0
# set semi-minor axis of the oblate spheroid Earth, in m
b = 6356752.314245
# calculate inverse flattening f
f = a/(a-b)
# calculate squared eccentricity e
e_2 = (a**2-b**2)/a**2
return(a,b,e_2,f)
def geographic_to_geocentric(lat):
"""
Calculate latitude defined in the wgs84 coordinate
system given the geographic latitude. Input and
output latitude in degrees.
"""
e_2 = wgs84()[2] # eccentricity as defined by wgs84
lat = np.rad2deg(np.arctan((1 - e_2) * np.tan(np.deg2rad(lat))))
return lat
def radius_cnt(lat):
"""
Get radius at latitude lat for the Earth as defined
by the wgs84 system.
"""
a = wgs84()[0]
b = wgs84()[1]
# Calculate radius for reference ellipsoid, in m
lat = pi/180*lat
# for i in range(len(lat)):
r_cnt = np.sqrt((a**2*(np.cos(lat)**2)) + \
(b**2*(np.sin(lat)**2)))
return(r_cnt)
def get_cartesian_distance(lon, lat, \
src_lat = 37.5, src_lon = -16.5):
"""
Calculate distance of each point of lat and lon
from the source location on a flat surface,
tangential to the source. Returns x (lon), y
(lat) in km for AxiSEMCartesian.
"""
# transform to geocentric
lat = geographic_to_geocentric(lat)
src_lat = geographic_to_geocentric(src_lat)
# find radius at source
r_greatcircle = radius_cnt(src_lat)/1000
# find radius of small circle at source lat
r_smallcircle = r_greatcircle*np.cos(np.deg2rad(src_lat))
# convert differences in angles to radians
phi = pi/180*lon - pi/180*src_lon
theta = pi/180*lat - pi/180*src_lat
# preallocate output arrays
x = np.zeros(len(phi), float)
y = np.zeros(len(theta), float)
# find distances
x = r_smallcircle*np.tan(phi)
y = r_greatcircle*np.tan(theta)
return(x,y)
def plot_curvature(lat, lon, curvature, src_lat = 37.5, \
src_lon = -16.5, cbar_label = 'Curvature (m)', \
filename = 'noname'):
"""
Function to plot a 3d surface once transformed
into Cartesian distances. Figure saved as png if
filename is not noname. BUG fixed with transposing.
"""
# TODO error with get_cart_dist so just added it in here
# Transform lat, lon to be centered around the N Pole
(x, y) = get_cartesian_distance(lon, lat, \
src_lat, src_lon)
x, y = np.meshgrid(x, y)
x = x.transpose()
y = y.transpose()
# Create figure handle
fig = plt.figure()
ax = plt.gca(projection = '3d')
# TODO how to scale the axes, so z not so exaggerated
# Plot
surf = ax.plot_surface(x, y, curvature, \
cmap = 'viridis')
# Add colorbar
cbar = fig.colorbar(surf, shrink = 0.5, aspect = 5)
cbar.set_label(cbar_label, rotation = 270, labelpad=15, \
y=0.45)
plt.show()
if filename != 'noname':
plt.savefig((filename + '.png'), dpi = 600)
# %% Processing functions
import numpy as np
from pathlib import Path
from math import pi
from coordinate_transformation.functions.get_spherical \
import wgs84
def station_data(path, station):
"""
Function that retrieves the seismic data from station
'station' given the relative path 'path', both inputs
are strings. This works for II type (not IU) stations.
Function returns the data array.
"""
path = Path(path)
file = 'II.' + station + '.RTZ.ascii'
# file handle
file = path/file
# Open file and retrieve data
raw_data = open(file, 'r')
raw_data = raw_data.read()
raw_data = np.fromstring(raw_data, dtype = float, sep=' ')
# nr of columns is always 4 since time, rr, tt, zz
m = int(len(raw_data)/4)
# preallocate output array
data = np.zeros(((m),4), float)
# retrieve data (which has been sorted row-wise)
# and sort it column-wise, returning every 4th element
data[:,0] = raw_data[0::4]
data[:,1] = raw_data[1::4]
data[:,2] = raw_data[2::4]
data[:,3] = raw_data[3::4]
return(data)
# Calculate the length of one degree of lat and lon as a function of lat
def len_deg_lon(lat):
"""
Calculates length of one degree of longitude
at latitudes lat. Input lat must be an array
of integers.
"""
e_2 = wgs84()[2]
a = wgs84() [0]
# This is the length of one degree of longitude
# approx. after WGS84, at latitude lat
# in m
lat = pi/180*lat
dlon = (pi*a*np.cos(lat))/180*np.sqrt((1-e_2*np.sin(lat)**2))
return np.round(dlon,5)
def len_deg_lat(lat):
"""
Calculates length of one degree of latitude
at latitudes lat. Input lat must be an array
of integers.
"""
# This is the length of one degree of latitude
# approx. after WGS84, between lat-0.5deg and lat+0.5 deg
# in m
lat = pi/180*lat
dlat = 111132.954 - 559.822 * np.cos(2*lat) + 1.175*np.cos(4*lat)
return np.round(dlat,5)
| true |
bb8f0410351c72994596cc3b5ce4476eaa22e0c0 | Python | seungmidev/sparta-project | /week03/db_practice.py | UTF-8 | 343 | 2.671875 | 3 | [] | no_license | from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
same_ages = list(db.users.find({'age': 40}, {'_id': False}))
print(same_ages) | true |
9420ddcd33e50b2e46618b02cc8ab7714b7d8c1a | Python | zkchong/UDP-RC | /Coder.py | UTF-8 | 5,577 | 2.921875 | 3 | [] | no_license | #
# Filename: Coder.py
# To generate the encoded symbol from a file. And, to reconstruct the original message.
#
# by Chong Zan Kai zkchong@gmail.com
# Last modify on 21-Jun-2015.
#
# import pickle
from Coding import Coding
import logging
# import random
# import time
from Random_Code import Random_Code, Random_Code_Generator
from Gaussian_Elimination import Gaussian_Elimination
# import threading
# import Hybrid_Packet as HYBRID_PACKET
# import socket
#------------------------------------------------------------------------------
# Data Encoder
#------------------------------------------------------------------------------
class Data_Encoder():
def __init__(self, file_name, symbol_size):
'''
filename: File name.
symbol_size: Size of a symbol in bytes.
'''
self.__file_name = file_name
self.__symbol_size = symbol_size
#
# Process the file
#
file_bitarr = Coding.file_to_bitarray(self.__file_name)
self.__message_symbol_list = Coding.bit_list_to_symbol_list(file_bitarr, (self.__symbol_size * 8))
self.__total_message_symbol = len(self.__message_symbol_list)
self.__file_size = int(len(file_bitarr)/8) # one byte = 8 bits.
self.__code = Random_Code(self.__message_symbol_list)
def generate_encoded_symbol(self):
# gen_seed = random.randrange(1, 10**5)
# g_bitarr = Coding.get_random_bitarr(self.__total_message_symbol, gen_seed)
# coded_symbol_bitarr = Coding.generate_coded_symbol(g_bitarr, self.__message_symbol_list)
seed, g_bitarr, encoded_bitarr = self.__code.generate_encoded_symbol()
return seed, encoded_bitarr
def get_file_size(self):
return self.__file_size
def get_total_message_symbol(self):
return self.__total_message_symbol
#------------------------------------------------------------------------------
# Data Decoder
#------------------------------------------------------------------------------
class Data_Decoder():
def __init__(self):
pass
# self.__total_message_symbol = total_message_symbol
# self.__message_size = message_size
# self.__g_bitarr_list = []
# self.__encoded_bitarr_list = []
# self.__reconstructed_message = None
# self.__code_generator = None
# # Random_Code_Generator(total_message_symbol)
# self.__ge = Gaussian_Elimination()
def reconstruct_message(self, total_message_symbol, message_size, g_seed_list, encoded_symbol_list):
'''
Let the caller decide when to attempt reconstructing message.
Note that the encoded_symbol_list will be disturbed.
'''
# Get ready the generator list
code_generator = Random_Code_Generator(total_message_symbol)
g_list = [code_generator.get_generator_vector(seed) for seed in g_seed_list]
# Employ Gaussian elimination.
ge = Gaussian_Elimination()
ge.form_triangle_matrix (g_list, encoded_symbol_list)
ge.backward_substitution(g_list, encoded_symbol_list)
decoded_symbol_list = encoded_symbol_list[:total_message_symbol]
decoded_message_bitarr = Coding.symbol_list_to_bit_list(decoded_symbol_list, message_size * 8) # Note: (file_size*8) because this function count the string in bits.
return decoded_message_bitarr.tobytes()
# def process_encoded_symbol(self, g_seed, encoded_bitarr):
# g_bitarr = self.__code_generator.get_generator_vector(g_seed)
# self.__g_bitarr_list.append(g_bitarr)
# self.__encoded_bitarr_list.append(encoded_bitarr)
# # Condition to reconstruct original message
# if len(self.__g_bitarr_list) >= (self.__total_message_symbol + 10):
# self.__ge.form_triangle_matrix(self.__g_bitarr_list, self.__encoded_bitarr_list)
# self.__ge.backward_substitution(self.__g_bitarr_list, self.__encoded_bitarr_list)
# # self.__reconstructed_message = self.__encoded_bitarr_list
# decoded_symbol_list = self.__encoded_bitarr_list[:self.__total_message_symbol]
# decoded_message_bitarr = Coding.symbol_list_to_bit_list(decoded_symbol_list, self.__message_size * 8) # Note: (file_size*8) because this function count the string in bits.
# self.__reconstructed_message = decoded_message_bitarr.tobytes()
# def get_reconstructed_message(self):
# return self.__reconstructed_message
#------------------------------------------------------------------------------
# Test
#------------------------------------------------------------------------------
if __name__ == '__main__':
filename = 'sample1.txt'
symbol_size = 2 # byte
encoder = Data_Encoder(filename, symbol_size)
g_seed_list = []
encoded_symbol_list = []
file_size = encoder.get_file_size()
total_message_symbol = encoder.get_total_message_symbol()
print ('file_size = %d bytes.' % file_size)
print ('get_total_message_symbol = %d symbols.' % total_message_symbol)
# Put k+10 encoded symbols into list.
for i in range(total_message_symbol + 10):
seed, encoded_symbol_bitarr = encoder.generate_encoded_symbol()
g_seed_list.append(seed)
encoded_symbol_list.append(encoded_symbol_bitarr)
print ('%d. seed = %s, encoded_symbol = %s' % (i+1, seed, encoded_symbol_bitarr))
decoder = Data_Decoder()
message = decoder.reconstruct_message(total_message_symbol, file_size, g_seed_list, encoded_symbol_list)
print ('message = %s' % message)
| true |
cdd2a7821bcc3f25fb388be1da3a628868dd31de | Python | fukushin821/streamLit | /main.py | UTF-8 | 1,050 | 3.0625 | 3 | [] | no_license | import streamlit as st
import time
st.title('Streamlit 超入門')
st.write('プログレスバーの表示')
'Start!!'
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(100):
latest_iteration.text(f'Iteration {i+1}')
bar.progress(i + 1)
time.sleep(0.1)
left_column,right_column = st.beta_columns(2)
button = left_column.button('右カラムに文字を表示')
if button:
right_column.write('ここは右カラム')
expander = st.beta_expander('問い合わせ')
expander.write('問い合わせ内容をかく')
# text = st.text_input('Please your hobby')
# condition = st.sidebar.slider('あなたの今の調子は?',0,100,50)
# 'あなたの趣味:', option,
# 'コンディション:',condition
# option = st.selectbox(
# 'あなたが好きな数字を教えてください',
# list(range(1,11))
# )
# 'あなたの好きは数字は、', option ,'です'
# if(st.checkbox('Show Image')):
# img = Image.open('./demo_image.jpg')
# st.image(img,caption='',use_column_width=True)
| true |
40664f2dd9c1de25071e0343f84231e2d233e75a | Python | ONSdigital/response-operations-ui | /scripts/align_events_and_rules.py | UTF-8 | 3,298 | 2.96875 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/usr/bin/python
import argparse
import datetime
from os import abort
import requests
from dateutil import tz
def parse_args():
parser = argparse.ArgumentParser(description="Align collection exercise events and rules")
parser.add_argument("url", help="Collection exercise service URL")
parser.add_argument("user", help="Basic auth user")
parser.add_argument("password", help="Basic auth password")
return parser.parse_args()
def update_event(collex_id, event_tag, date, url, user, password):
path = "/collectionexercises/{id}/events/{tag}".format(id=collex_id, tag=event_tag)
response = requests.put(url + path, data=date, auth=(user, password), headers={"content-type": "text/plain"})
status_code = response.status_code
if status_code != 204:
detail_text = response.text
print("{} <= {} ({})".format(status_code, date, detail_text))
def get_collection_exercises(user, password, url):
print(url)
response = requests.get(url + "/collectionexercises", auth=(user, password))
status_code = response.status_code
if status_code == 200:
ces = response.json()
print("{} <= {} collection exercises retrieved".format(status_code, len(ces)))
return ces
print("{} <= {}".format(status_code, response.text))
abort()
def is_mandatory_event(event):
mandatory_events = ["mps", "go_live", "reminder", "reminder1", "reminder2"]
return event["tag"] in mandatory_events
def align_events_and_rules(collection_exercises, user, password, url):
for collection_exercise in collection_exercises:
print(
"\nPROCESSING COLLECTION_EXERCISE: {} {} {} {}".format(
collection_exercise["name"],
collection_exercise["exerciseRef"],
collection_exercise["state"],
collection_exercise["id"],
)
)
for event in collection_exercise["events"]:
if not is_mandatory_event(event):
continue
formatted_new_date = change_time_to_9_am(event["timestamp"])
print(
"EVENT: {} {} currently: {} changing to: {}".format(
event["tag"], event["id"], event["timestamp"], formatted_new_date
)
)
update_event(
collex_id=collection_exercise["id"],
event_tag=event["tag"],
date=formatted_new_date,
url=url,
user=user,
password=password,
)
def change_time_to_9_am(event_timestamp):
date_format = "%Y-%m-%dT%H:%M:%S.%f"
date = datetime.datetime.strptime(event_timestamp[:-1], date_format)
london_timezone = tz.gettz("Europe/London")
new_date = date.replace(hour=9, minute=0, second=0, microsecond=0, tzinfo=london_timezone)
return new_date.isoformat(timespec="milliseconds")
if __name__ == "__main__":
args = parse_args()
url = args.url
user = args.user
password = args.password
collection_exercises = get_collection_exercises(user=user, password=password, url=url)
align_events_and_rules(collection_exercises=collection_exercises, user=user, password=password, url=url)
print("Finished aligning events and rules")
| true |
ae401ec0c62653e421f840c65144a807f0681ef7 | Python | zhangshv123/superjump | /interview/google/face/boxes.py | UTF-8 | 641 | 2.515625 | 3 | [] | no_license | """
天花板上悬吊着很多箱子。重点:箱子的上下左右若有箱子相邻,则他们两个之间是扣住的,最上面的箱子都扣着天花板。现在用炮弹打掉一个箱子,问一共会有多少个箱子会掉下来。显然,若3个箱子上下扣成一列吊在天花板上,打掉中间那个箱子,则中间的箱子和下面的箱子会掉下来,因为下面的那个箱子没有别的拉力来源。若6个箱子排成等高且相邻两列吊在天花板上,打掉第一列中间那个箱子,并不会有其它箱子掉下来。这题不要求写出代码,但请谈谈思路。
"""
# union find | true |
cadd6ea3618cae891d76b1139c7d9c04e240178c | Python | muddulur/Nikhil | /Finding Factors.py | UTF-8 | 209 | 3.546875 | 4 | [] | no_license | input_num=int(input("Enter the number to which factors are needed: "));
for i in range(1,input_num):
factor=input_num%i;
if factor == 0:
print (i,"\t");
else:
continue;
| true |
cef33bdf9f736ed357db5f02734effdfc329fdca | Python | vasetousa/Python-Advanced | /Multidimentional lists/Matrix shuffling.py | UTF-8 | 1,024 | 3.359375 | 3 | [] | no_license | def read_matrix(is_test=False):
if is_test:
return [
[1, 2, 3],
[4, 5, 6],
]
else:
rows, columns = [int(el) for el in (input().split())]
matrix = []
for r in range(rows):
x = input().split()
matrix.append(x)
return matrix
matrix = read_matrix() # for local testing use matrix = read_matrix(is_test=True)
# pprint(matrix)
command = input()
while not command == "END":
if command.startswith("swap"):
try:
command_string, row_1, col_1, row_2, col_2 = command.split()
row_1 = int(row_1)
row_2 = int(row_2)
col_1 = int(col_1)
col_2 = int(col_2)
x = matrix[row_1][col_1]
matrix[row_1][col_1] = matrix[row_2][col_2]
matrix[row_2][col_2] = x
for el in matrix:
print(*el)
except:
print("Invalid input!")
else:
print("Invalid input!")
command = input()
| true |
fca3e483e922512bcd525154875d8647c4182efa | Python | andela-sjames/paystack-python | /paystackapi/refund.py | UTF-8 | 1,055 | 2.671875 | 3 | [
"MIT"
] | permissive | from paystackapi.base import PayStackBase
class Refund(PayStackBase):
@classmethod
def create(cls, **kwargs):
"""
Function defined to create a refund.
Args:
transaction: Transaction reference or id
amount: How much in kobo to be refunded to the customer - Optional
currency: Three-letter ISO currency - Optional
customer_note: Customer reason - Optional
merchant_note: Merchant reason - Optional
Returns:
Json data from paystack API.
"""
return cls().requests.post('refund', data=kwargs)
@classmethod
def list(cls, **kwargs):
"""
List Refunds
Args:
reference: Identifier for transaction to be refunded - Optional
currency: Three-letter ISO currency - Optional
Returns:
JSON data from paystack's API.
"""
return cls().requests.get('refund', data=kwargs)
@classmethod
def fetch(cls, refund_id):
"""
Fetch a Refund
Args:
refund_id: Identifier for refund to be fetched
Return:
JSON data from paystack API
"""
return cls().requests.get(f"refund/{refund_id}")
| true |
be5262fdca4c4808f6e49d7ff0c9f472d787f629 | Python | paulosrlj/PythonCourse | /Módulo 3 - POO/Aula6 - Encapsulamento/testes.py | UTF-8 | 1,113 | 3.984375 | 4 | [] | no_license | class Pokemon:
def __init__(self, nome, tipo, evolucoes):
self.nome = nome
self.tipo = tipo
self.evolucoes = evolucoes
self.__mega = False
def imprimirAtributos(self):
print(
f'Nome: {self.nome}\nTipo: {self.tipo}\nEvoluções: {self.evolucoes}\nMega-evolução: {self.__mega}')
# Usando getters e setters pra ver os atributos privados '__mega' de forma mais fácil
@property
def mega(self):
# Agora pode-se obter o atributo '__mega', usando: pikachu.mega
return self.__mega
@mega.setter
def mega(self, valor):
if not isinstance(valor, bool):
return
self.__mega = valor
pikachu = Pokemon('Pikachu', 'Elétrico', 3)
# Vai dar erro, pois '__mega' é privado
# print(pikachu.__mega)
# Criou-se outro atributo '__mega'
pikachu.__mega = True
print(pikachu.__mega)
# Para imprimir o verdadeiro
print(pikachu._Pokemon__mega)
print()
pikachu.imprimirAtributos()
print()
# Usando o setter pra setar um novo valor
pikachu.mega = True
# Usando getter pra obter o valor
print(pikachu.mega)
| true |
bf8689c6dfbb445a9ede200759dcc1ecb2c917e5 | Python | Board2Death-OSU/bot_helper | /bot/client.py | UTF-8 | 2,619 | 3.09375 | 3 | [] | no_license | import discord
from typing import Callable, List, Dict, Tuple
# This is a comment
class Client(discord.Client):
def __init__(self):
self.on_message_functions = []
self.on_message_args = []
self.on_message_file_functions = []
self.on_message_file_args = []
super().__init__()
def register_on_message_callback(
self,
fun: Callable[[discord.Message, any], Tuple[str, discord.TextChannel]],
args: List[any]
) -> None:
"""
This function adds a call back function to be executed when the
client receives a message.
When called, the function will receive the discord message, then the arguments.
The function should return a tuple containing the message to send, and then the channel
to send the response to.s
"""
self.on_message_functions.append(fun)
self.on_message_args.append(args)
def register_on_message_send_file_callback(
self,
fun: Callable[[discord.Message, any], Tuple[str, discord.TextChannel]],
args: List[any]
) -> None:
self.on_message_file_functions.append(fun)
self.on_message_file_args.append(args)
async def on_ready(self) -> None:
print('Successfully Logged in as {0}'.format(self.user))
async def on_message(self, message: discord.Message) -> None:
"""
Called when a message is received, executed all registered callback functions,
passing in the message and there arguments.
"""
# Don't Respond to Yourself
if message.author == self.user:
return
# Responses to be sent after processing all messages.
responses: List[Tuple[str, discord.TextChannel]] = []
file_responses = []
content: str = str(message.content)
# Loop Over Message Functions
for fun, args in zip(self.on_message_functions, self.on_message_args):
value = fun(message, *args)
if value is not None:
responses.append(value)
for fun, args in zip(self.on_message_file_functions, self.on_message_file_args):
value = fun(message, *args)
if value is not None:
file_responses.append(value)
for (response, channel) in responses:
if response is not None and response != '':
await channel.send(response)
for (response, channel) in file_responses:
if response is not None and response != '':
await channel.send('', file=discord.File(response))
| true |
3b25e59cd8ef2f769e36aca0497e4dd7b9db38ad | Python | SamJ2018/LeetCode | /python/python语法/pyexercise/Exercise04_39.py | UTF-8 | 744 | 4.21875 | 4 | [] | no_license | import turtle
x1, y1, r1 = eval(input("Enter circle1's center x-, y-coordinates, and radius: "))
x2, y2, r2 = eval(input("Enter circle2's center x-, y-coordinates, and radius: "))
# Draw circle 1
turtle.penup()
turtle.goto(x1, y1 - r1)
turtle.pendown()
turtle.circle(r1)
# Draw circle 2
turtle.penup()
turtle.goto(x2, y2 - r2)
turtle.pendown()
turtle.circle(r2)
turtle.penup()
turtle.goto(x1 - r1, y1 - r1 - 30)
turtle.pendown()
distance = ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5
if distance + r2 <= r1:
turtle.write("circle2 is inside circle1")
elif distance <= r1 + r2:
turtle.write("circle2 overlaps circle1")
else:
turtle.write("circle2 does not overlap circle1")
turtle.hideturtle()
turtle.done()
| true |
2321d2601f06c96a1a621b3422bebf9a68fa755f | Python | est22/PS_algorithm | /others/chaining.py | UTF-8 | 1,003 | 3.671875 | 4 | [] | no_license | class Chaining:
class Node:
# 노드 객체 생성자 : key, data, link
def __init__(self, key, data, link):
self.key = key
self.data = data
self.next = link
# chaining 객체 생성자 : 해시테이블 a
def __init__(self, size):
self.M = size # M = 테이블 사이즈
self.a = [None] * size
def hash(self, key):
return key % self.M # 나눗셈 해시함수
def put(self, key, data): # 삽입 연산
i = self.hash(key)
p = self.a[i]
while p != None:
if key == p.key:
p.data = data
return
p = p.next
self.a[i] = self.Node(key, data, self.a[i])
def get(self, key): # 탐색 연산
i = self.hash(key)
p = self.a[i]
while p != None:
if key == p.key: # 탐색 성공
return p.data
p = p.next
return None # 탐색 실패
| true |
5df47f5bb3251374659feeec6f9c67288df4ebde | Python | nilamkurhade/Week2 | /DataStructurePrograms/bankCashCounter.py | UTF-8 | 4,389 | 3.84375 | 4 | [] | no_license |
from DataStructurePrograms.util import Test_LinkedList
l1 = Test_LinkedList()
class Bank_Queue:
cash = []
while True:
try:
print("enter the amount minimum 1000 to open account...\n")
amount = int(input()) # initial amount to open the account
while amount < 1000: # validating the input
print("please enter above 1000 to open account...")
amount = int(input()) # accepting valid input
print("enter number of peoples in queue..\n")
customer = int(input()) # number of people in queue
temp = amount # assigning inputs into temp,temp1 variables
temp1 = customer
while customer > 0: # customers greater than 0 process further
print("WELCOME TO BANK.... \n")
print("1.Deposit \n 2.Withdraw... \n 3.process 4.exit..\n")
print("enter your choice..\n")
choice = int(input()) # taking customers choice as a input
while choice > 4: # validating the inputs
print("please enter choice within given range \n")
choice = int(input())
if choice == 1: # if choice is 1 deposit the money
print("enter amount to deposit...\n")
deposit = int(input()) # input amount
while deposit < 100: # validating amount for deposit
print("enter above 100 rupees..\n")
deposit = int(input())
amount = amount + deposit # add deposit amount to the initial amount
cash = l1.queue_push(deposit) # push into queue
customer -= 1 # decrement customer size
if choice == 2: # if choice is 2 withdrawing amount form bank
print("enter amount to be withdraw from bank...")
withdraw = int(input())
while withdraw <= 0: # validating
print("enter amount in positive numbers...\n")
withdraw = int(input())
if withdraw < amount: # withdraw amount is less than initial amount then withdraw from a bank
amount = amount - withdraw
cash = l1.queue_push(withdraw) # push withdraw amount into a queue
customer -= 1 # decrement customer size by 1
else: # else print
print("insufficient balance please give below bank_balance...\n")
if choice == 3: # choice 3 to process the queue
if len(cash) != 0: # len of cash not equal to 0
if customer > 0: # customer size should greater than 0
print("your transaction is complete..:", cash[0]) # printing the cash
l1.queue_pop() # then pop
customer += 1 # increment by 1
else:
print("no transaction to process....\n") # else print
if choice == 4: # choice 4 to exit
break
if len(cash) != 0:
print("Queue is full,process it..\n ")
for i in range(temp1): # printing each users progress
print("process is complete.. ", cash[0])
l1.queue_pop()
print()
if amount >= temp: # to balance the cash must satisfy if condition
print("cash is balanced correctly...\n")
else:
print("cash is not balanced...\n")
break
except ValueError: # handling exception
print("please enter valid input.....")
continue
except RuntimeError:
print("oops something went wrong..\n")
continue
except IndexError:
print("give correct index....\n")
continue
| true |
d8d4574cc0ba2bbd2ad06473f2b5c573f76c36d0 | Python | laurenpaljusaj/SI506-2021Winter | /lab_exercise_03/lab_exercise_03_solution.py | UTF-8 | 1,165 | 3.46875 | 3 | [
"BSD-3-Clause"
] | permissive | # START LAB EXERCISE 03
print('Lab Exercise 03 \n')
# PROBLEM 1 (5 Points)
inventors = {'Marie Van Brittan Brown': 'Home security system',
'Alice H. Parker': 'Furnace for central heating', 'Leonidas Berry':
'Gastroscope pioneer', 'Otis Boykin': 'Artificial heart pacemaker control unit',
'David Crosthwait': 'Heating' }
# END SETUP
# PROBLEM 2 (4 Points)
invention = 'Heating, ventilation, and air conditioning'
inventors['David Crosthwait'] = invention
# PROBLEM 3 (4 Points)
# SETUP
new_inventor = {'Alexander Miles': 'Automatic electric elevator doors'}
# END SETUP
inventors.update(new_inventor)
print(f'The updated inventor list: {inventors}')
# PROBLEM 4 (4 Points)
inventors.pop('Marie Van Brittan Brown')
print(f'The inventors in the list are: {inventors}')
# PROBLEM 5 (4 Points)
# SETUP
gastroscope_inventor = 'Leonidas Berry'
# END SETUP
tuple_gastroscope_inventor = (gastroscope_inventor,)
print(f'''The data type of < tuple_gastroscope_inventor > is {type(tuple_gastroscope_inventor)} and\
prints as {tuple_gastroscope_inventor}''')
# PROBLEM 6 (4 Points)
medical_inventors = tuple_gastroscope_inventor + ('Otis Boykin',)
print(f'''Two inventors with medical related inventions: {medical_inventors}''')
# END LAB EXERCISE | true |
0e39b473d47f9042b81afb4a6f65db1cb85bedd8 | Python | zhangchizju2012/LeetCode | /37.py | UTF-8 | 5,432 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 22 21:37:45 2017
@author: zhangchi
"""
import copy
class Solution(object):
def solveSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
full = set(["1","2","3","4","5","6","7","8","9"])
left = {}
for i in xrange(9):
for j in xrange(9):
if board[i][j] == ".":
left[(i,j)] = set()
for i in xrange(9):
temp = set()
indexList = []
for j in xrange(9):
if board[i][j] != ".":
temp.add(board[i][j])
else:
indexList.append(j)
for j in indexList:
left[(i,j)] = left[(i,j)].union(temp)
for i in xrange(9):
temp = set()
indexList = []
for j in xrange(9):
if board[j][i] != ".":
temp.add(board[j][i])
else:
indexList.append(j)
for j in indexList:
left[(j,i)] = left[(j,i)].union(temp)
for m in xrange(3):
for n in xrange(3):
temp = set()
for i in xrange(3):
for j in xrange(3):
if board[3*m+i][3*n+j] != ".":
temp.add(board[3*m+i][3*n+j])
for i in xrange(3):
for j in xrange(3):
if (3*m+i,3*n+j) in left:
left[(3*m+i,3*n+j)] = left[(3*m+i,3*n+j)].union(temp)
for i in xrange(9):
for j in xrange(9):
if (i,j) in left:
left[(i,j)] = full.difference(left[(i,j)])
return self.helper(dict(left), list(board))
# 这里不用加dict,list
def helper(self, lastLeft, lastBoard):
if len(lastLeft) == 0:
return lastBoard
else:
# 先找candidate少的,所以排个序,本质是DFS
temp = [(item,lastLeft[item]) for item in lastLeft]
temp.sort(key=lambda x:len(x[1]))
if len(temp[0][1]) > 0:
for value in temp[0][1]:
# 直接dict(lastLeft)会出错
left = copy.deepcopy(lastLeft)
board = copy.deepcopy(lastBoard)
label = True
position = temp[0][0]
board[position[0]][position[1]] = value
left.pop(position)
for i in xrange(9):
if (i,position[1]) in left and value in left[(i,position[1])]:
left[(i,position[1])].remove(value)
if len(left[(i,position[1])]) == 0:
# candidate数量为0,可以提前结束这种可能性
label = False
break
if (position[0],i) in left and value in left[(position[0],i)]:
left[(position[0],i)].remove(value)
if len(left[(position[0],i)]) == 0:
label = False
break
if label is True:
m = position[0] // 3
n = position[1] // 3
for i in xrange(3):
for j in xrange(3):
if (3*m+i,3*n+j) in left and value in left[(3*m+i,3*n+j)]:
left[(3*m+i,3*n+j)].remove(value)
if len(left[(3*m+i,3*n+j)]) == 0:
label = False
break
if label is True:
result = self.helper(left, board)
if result is not None:
return result
s = Solution()
#a = [["."]*9 for _ in xrange(9)]
a = [[".",".","9","7","4","8",".",".","."],
["7",".",".",".",".",".",".",".","."],
[".","2",".","1",".","9",".",".","."],
[".",".","7",".",".",".","2","4","."],
[".","6","4",".","1",".","5","9","."],
[".","9","8",".",".",".","3",".","."],
[".",".",".","8",".","3",".","2","."],
[".",".",".",".",".",".",".",".","6"],
[".",".",".","2","7","5","9",".","."]]
# =============================================================================
# a = [["5","3",".",".","7",".",".",".","."],
# ["6",".",".","1","9","5",".",".","."],
# [".","9","8",".",".",".",".","6","."],
# ["8",".",".",".","6",".",".",".","3"],
# ["4",".",".","8",".","3",".",".","1"],
# ["7",".",".",".","2",".",".",".","6"],
# [".","6",".",".",".",".","2","8","."],
# [".",".",".","4","1","9",".",".","5"],
# [".",".",".",".","8",".",".","7","9"]]
# =============================================================================
b = s.solveSudoku(a)
print b | true |
9cdfc1bfabdba0a7b5876e28928546a93c3f97ad | Python | erccarls/county_covid_seir_models | /pyseir/reports/names.py | UTF-8 | 1,408 | 3.0625 | 3 | [
"MIT"
] | permissive |
compartment_to_name_map = {
'S': 'Susceptible',
'I': 'Infected',
'E': 'Exposed',
'A': 'Asymptomatic (Contagious)',
'R': 'Recovered and Immune',
'D': 'Direct Death',
'HGen': 'Hospital Non-ICU',
'HICU': 'Hospital ICU',
'HVent': 'Hospital Ventilated',
'deaths_from_hospital_bed_limits': 'Deaths: Non-ICU Capacity',
'deaths_from_icu_bed_limits': 'Deaths: ICU Capacity',
'deaths_from_ventilator_limits': 'Deaths: Ventilator Capacity',
'total_deaths': 'Total Deaths (All Cause)',
'HGen_cumulative': 'Cumulative Hospitalizations',
'HICU_cumulative': 'Cumulative ICU',
'HVent_cumulative': 'Cumulative Ventilators',
'direct_deaths_per_day': 'Direct Deaths Per Day',
'total_deaths_per_day': 'Total Deaths Per Day (All Cause)',
'general_admissions_per_day': 'General Admissions Per Day',
'icu_admissions_per_day': 'ICU Admissions Per Day',
'total_new_infections': 'Total New Infections'
}
def policy_to_mitigation(s):
"""
We have defined suppression as 1=unmitigated. For display we usually want
mitigated = (1 - suppression). This converts that string repr.
Parameters
----------
s: str
String to convert. Structure is e.g. suppression_policy__1.0
Returns
-------
mitigation: str
Mitigation display.
"""
return f'{100 * (1 - float(s.split("__")[1])):.0f}% Mitigation'
| true |
c8e4b8542a6903a553b842af51f586c3b770b279 | Python | DataDaveH/exercise_1 | /investigations/best_states/best_states.py | UTF-8 | 6,500 | 2.703125 | 3 | [] | no_license | #
# best_state.py
#
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import pyspark.sql.functions as F
from math import sqrt
sc = SparkContext("local", "Exercise1")
sqlContext = SQLContext(sc)
# read the dataframe in from the parguet file
dfHospitals = sqlContext.read.parquet("/user/w205/hospital_compare/hospitalParquet")
dfMeasures = sqlContext.read.parquet("/user/w205/hospital_compare/measuresParquet")
dfProcedures = sqlContext.read.parquet("/user/w205/hospital_compare/proceduresParquet")
# columns we want that are ranges ((x - min) / (max - min))
measuresRanges = ["EDV"]
dfRanges = dfProcedures.where(F.col("measureID").isin(measuresRanges))
mins = [dfRanges.where(F.col("measureID").like(m)).agg(F.min("score")).collect()[0][0] for m in measuresRanges]
maxs = [dfRanges.where(F.col("measureID").like(m)).agg(F.max("score")).collect()[0][0] for m in measuresRanges]
ranges = [maxs[i] - mins[i] for i in range(0,len(maxs))]
# compute range percents
rangeUDF = F.udf(lambda score: 100 * (score - mins[0]) / ranges[0], DecimalType(10,3))
dfQuality = dfRanges.withColumn("score", F.when(dfRanges.measureID.like(measuresRanges[0]), rangeUDF(dfRanges.score)))\
.where(F.col("score").isNotNull())
for i in range(1,len(mins)):
rangeUDF = F.udf(lambda score: 100 * (score - mins[i]) / ranges[i], DecimalType(10,3))
dfQuality = dfQuality.unionAll( \
dfRanges.withColumn("score", F.when(dfRanges.measureID.like(measuresRanges[i]), rangeUDF(dfRanges.score)))\
.where(F.col("score").isNotNull()))
# compute reverse range (a higher number is worse)
measuresReverseRanges = ["VTE_6", "ED_1b", "ED_2b", "OP_18b", "OP_20", "OP_21", "OP_5"]
dfReverseRanges = dfProcedures.where(F.col("measureID").isin(measuresReverseRanges))
mins = [dfReverseRanges.where(F.col("measureID").like(m)).agg(F.min("score")).collect()[0][0] for m in measuresReverseRanges]
maxs = [dfReverseRanges.where(F.col("measureID").like(m)).agg(F.max("score")).collect()[0][0] for m in measuresReverseRanges ]
ranges = [maxs[i] - mins[i] for i in range(0,len(maxs))]
# compute reverse range percents ((max - x) / (max - min))
reverseRangeUDF = F.udf(lambda score: 100 * (maxs[0] - score) / ranges[0], DecimalType(10,3))
dfQuality = dfQuality.unionAll(dfReverseRanges.withColumn(
"score", F.when(dfReverseRanges.measureID.like(measuresReverseRanges[0]),
reverseRangeUDF(dfReverseRanges.score))).where(F.col("score").isNotNull()))
for i in range(1,len(mins)):
reverseRangeUDF = F.udf(lambda score: 100 * (maxs[i] - score) / ranges[i], DecimalType(10,3))
dfQuality = dfQuality.unionAll( dfReverseRanges.withColumn(
"score", F.when( dfReverseRanges.measureID.like(measuresReverseRanges[i]), reverseRangeUDF(dfReverseRanges.score)))\
.where(F.col("score").isNotNull()))
# columns we want that are already percentages
measuresRates = ["OP_23", "OP_29", "OP_30", "OP_4", "VTE_5", "STK_4"]
dfQuality = dfQuality.unionAll(dfProcedures.where(F.col("measureID").isin(measuresRates)))
measuresQuality = measuresRates + measuresReverseRanges + measuresRanges
numMeasures = len(measuresQuality)
# now the penalties
# readmission measure
measuresRead = ["READM_30_HF"]
dfRead = dfProcedures.where(F.col("measureID").isin(measuresRead))
# measures for mortality
measuresMort = ["MORT_30_AMI", "MORT_30_CABG", "MORT_30_COPD", "MORT_30_HF", "MORT_30_PN", "MORT_30_STK"]
dfMort = dfProcedures.where(F.col("measureID").isin(measuresMort))
dfPenalty = dfMort.unionAll(dfRead)
# use quality and penalty scores to compute variance
rddQuality = dfQuality.rdd
rddPenalty = dfPenalty.rdd
# compute average quality and penalty scores
# aggregate by adding values and increment count each time
rddAvgQ = rddQuality.map( lambda x: (x[0], x[2]))\
.aggregateByKey((0.0,0.0),\
(lambda x, newVal: ((x[0] + float(newVal)), (x[1] + 1))),\
(lambda rdd1, rdd2: (rdd1[0] + rdd2[0], rdd1[1] + rdd2[1])))
rddAvgQ = rddAvgQ.mapValues( lambda x: round((x[0] / (numMeasures)), 5))
# aggregate by adding values and increment count each time
rddAvgP = rddPenalty.map( lambda x: (x[0], x[2]))\
.aggregateByKey((0.0,0.0),\
(lambda x, newVal: ((x[0] + float(newVal)), (x[1] + 1))),\
(lambda rdd1, rdd2: (rdd1[0] + rdd2[0], rdd1[1] + rdd2[1])))
# we are penalizing a small amount based on the number of quality measures
rddAvgP = rddAvgP.mapValues( lambda x: round((x[0] / (x[1])), 5))
# break the columns apart after the joins
rddFinal = rddAvgQ.join( rddAvgP).map( lambda x: (x[0], x[1][0], x[1][1]))
# build final dataframes
dfFinal = rddFinal.toDF( ["ProviderID", "QualityScore", "Penalty"])\
.withColumn("FinalScore", F.round(F.col("QualityScore") - F.col("Penalty"), 5))\
.select("ProviderID", "FinalScore")
# now find measure for states
rddState = dfFinal.join(dfHospitals, dfHospitals.id == dfFinal.ProviderID).select("state", "FinalScore").rdd
# then take rddState and compute std dev for each state
# to build the score variance per state, aggregate (sum of score^2, sum of score, count)
rddVar = rddState.map( lambda x: (x[0], x[1]))\
.aggregateByKey((0.0,0.0,0.0),\
(lambda x, newVal: ((x[0] + (float(newVal) ** 2)), (x[1] + float(newVal)), (x[2] + 1))),\
(lambda rdd1, rdd2: (rdd1[0] + rdd2[0], rdd1[1] + rdd2[1], rdd1[2] + rdd2[2])))
# then map by values to compute the variance = (sum(score^2) / count) - (sum(score) / count)^2
# which is the average sum of squares minus the mean squared
rddStdDev = rddVar.mapValues( lambda x: round( sqrt((x[0] / x[2]) - ((x[1] / x[2]) ** 2)), 5))
# average per state
# aggregate by adding values and increment count each time
rddAvgState = rddState.map( lambda x: (x[0], x[1]))\
.aggregateByKey((0.0,0.0),\
(lambda x, newVal: ((x[0] + float(newVal)), (x[1] + 1))),\
(lambda rdd1, rdd2: (rdd1[0] + rdd2[0], rdd1[1] + rdd2[1])))
rddAvgState = rddAvgState.mapValues( lambda x: round((x[0] / (x[1])), 5))
# join together with dfHospitals and break apart columns
rddStateScores = rddAvgState.join( rddStdDev).map( lambda x: (x[0], x[1][0], x[1][1])).sortBy( lambda x: x[1], ascending = False)
# and print that sumbitch out
dfStateScores = rddStateScores.zipWithIndex().map(lambda x: (x[1] + 1, x[0][0], x[0][1], x[0][2]))\
.toDF().select(F.col("_1").alias("Rank"), F.col("_2").alias("State"), F.col("_4").alias("StandardDeviation"),\
F.col("_3").alias("Score"))\
.show(10, False)
| true |
89dcb429e972d3d54991cd911d3af26ae8af79f7 | Python | yewool0818/TIL | /algorithm/SWEA/List1/D2/1966_숫자를정렬하자/s1.py | UTF-8 | 672 | 3.375 | 3 | [] | no_license | import sys
sys.stdin = open("input.txt")
T = int(input())
for tc in range(1, T + 1):
# 각 테이스 케이스 별 숫자의 개수
N = int(input())
# 정렬 대상 숫자 리스트
numbers = list(map(int, input().split()))
# 버블 정렬로 정렬해보자!
for i in range(N-1, 0, -1):
for j in range(0, i):
if numbers[j] > numbers[j+1]:
numbers[j], numbers[j+1] = numbers[j+1], numbers[j]
# 결과값을 하나의 문자열로 만들어주기 위해 numbers요소들을 str으로 변환 후 join해준다.
result = ' '.join(map(str, numbers))
# 출력
print('#{} {}'.format(tc, result))
| true |
73e50aaa15804d7025a14b2dba84a998776bf36e | Python | danielchristie/Portfolio | /Python/Database Programs/Program2/Python_Database_Example_Explainations.py | UTF-8 | 3,673 | 3.3125 | 3 | [] | no_license |
from Tkinter import *
#from Tkinter import tkMessageBox
import sqlite3
#Paint the GUI
root = Tk()
root.title("Database Demo")
root.minsize(width = 300,height = 300)
root.maxsize(width = 300, height = 300)
#=========================================================
# Connect to database
conn = sqlite3.connect('dbWebPages.db')
# Create table named webpages
conn.execute("CREATE TABLE if not exists tblWebContent( \
ID INTEGER PRIMARY KEY AUTOINCREMENT, \
colHead TEXT, \
colBody TEXT \
);")
### Add data to the table
##conn.execute("INSERT INTO tblWebContent \
## (colHead, colBody) VALUES \
## ('My First Header', 'This is a lot of fun body text')");
##
### Add data to the table
##conn.execute("INSERT INTO tblWebContent \
## (colHead, colBody) VALUES \
## ('My Second Header', 'This is still a lot of fun body text')");
##
### Add data to the table
##conn.execute("INSERT INTO tblWebContent \
## (colHead, colBody) VALUES \
## ('My Third Header', 'This body text is getting a bit stale now')");
# Save changes & close the database connection
conn.commit()
conn.close()
#=========================================================
#Select item in ListBox
def onSelect(event):
w = event.widget #ListBox widget
index = int(w.curselection()[0]) #Index for the highlighted item in the ListBox
value = w.get(index)
txtText1.delete (0, END)
txtText1.insert (0, value)
#Define Listbox & Paint it
lstList1 = Listbox(root)
lstList1.bind('<<ListboxSelect>>', onSelect)
lstList1.pack()
#Define TextEntryBox & Paint it
varText1 = StringVar() #Corresponds with the Entry's txtvar value
txtText1 = Entry(root, textvariable = varText1, width = 200)
txtText1.pack()
varText1.set
varTemp = varText1.get()
#insert text function
def insert():
varTemp = txtText1.get()
if varTemp != "":
conn = sqlite3.connect('dbWebPages.db')
with conn:
cursor = conn.cursor()
cursor.execute("INSERT INTO colBody (colBody) VALUES (?)",[varTemp])
lstList1.insert(END, varTemp)
conn.close()
#Error handle when entry widget is empty
if txtText1.get().strip() == "":
#messagebox.showerror("ERROR - Missing Data!","Text field is empty, please enter some text.")
print("ERROR - Missing Data!", "Text field is empty, please enter some text.")
#Delete entry field
txtText1.delete(0, END)
#Populate the Listbox
conn = sqlite3.connect('dbWebPages.db')
with conn:
cursor = conn.cursor()
cursor.execute("SELECT colBody FROM tblWebContent")
rows = cursor.fetchall()
m = 0
mi = 0
for row in range(len(rows)):
print("This is the total items in the array or (items in the tuple): {}".format(len(rows)))
for x in rows:
print("This is the item in the array or (item in the tuple): {}".format(x[0]))
z = x[0]
print("Print z: {}".format(z))
z = str(z)
print(type(z))
varText1 = z
#lstList1.insert[0, z]
if m <= rows[row]:
m = rows[row]
mi = row
#lstList1.insert(END, str())
print("This is a tuple out of the array: {}".format(m))
print(type(m))
print("This is the index of the array: {}".format(row))
print(type(row))
for i in m:
print("This is data out of the tuple: {}".format(i))
print(type(i))
i = str(i)
print(type(i))
lstList1.insert(0, i)
conn.close()
root.mainloop()
| true |
2b2767853859184d2885e9cbc7a7a797ced0bf0d | Python | hyeongnam/project-musics | /crawling/singer_name.py | UTF-8 | 2,961 | 2.8125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import numpy as np
genre = {
'ballad': ['ballad','dance','pop','folk','manidol','girlidol'],
'rnh': ['hnp','jni'],
'rns': ['rnb','soul','fnd'],
'elec': ['elec','club'],
'rock': ['modern','punk','metal'],
'jazz': ['vocal','play'],
'indie': ['rock','modern','hiphop','elec']
}
sing_num = []
with open('singer_genre.csv','w', encoding='utf-8') as f:
for tmp in genre:
for temp in genre.get(tmp):
for i in range(1,2):
url = f'https://music.bugs.co.kr/genre/kpop/{tmp}/{temp}?tabtype=5&sort=default&nation=all&page={i}'
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
singers = soup.select('#container section div ul li')
for singer in singers:
link = singer.select_one('figure figcaption a')
sing_num.append(link.attrs['href'].split('/')[4].split('?')[0])
singer_name = singer.select('figure figcaption a')[0].text.strip().split("(")[0]
f.write(f'{tmp},{temp},{singer_name}\n')
lyrics_num = []
with open('sings.csv','w', encoding='utf-8') as f:
for item in sing_num:
url = f'https://music.bugs.co.kr/artist/{item}?wl_ref=list_ar_02'
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
sings = soup.select(f'#DEFAULT{item} table tbody tr')
for sing in sings:
sing_name = sing.select_one('th p a').text.strip().split("(")[0]
singer_name = sing.select_one('.artist a').text.strip().split("(")[0]
singers_type = soup.select('#contentArea section div div table tbody tr td')[0].text
images = soup.select(f'#DEFAULT{item} table tbody tr')[0].select('td')[1].select_one('a img').attrs['src']
albums = soup.select(f'#DEFAULT{item} table tbody tr')[0].select('td')[4].select_one('a').text
sing_link = sing.select_one('td:nth-child(3) a')
lyrics_num.append(sing_link.attrs['href'].split('/')[4].split('?')[0])
f.write(f'{singer_name},{sing_name},{singers_type},{albums},{images}\n')
with open('lyrics.csv','w', encoding='utf-8') as f:
for tem in lyrics_num:
url = f'https://music.bugs.co.kr/track/{tem}?wl_ref=list_tr_08_ar'
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
lyrics = soup.select_one('.lyricsContainer xmp')
sing_name = soup.select_one('#container h1').text.replace("[19금]","").strip().split("(")[0]
if lyrics is not None:
ly = lyrics.text.replace("\n", "")
f.write(f'{sing_name},{ly}\n')
else:
ly = '해당 곡은 가사가 없습니다.'
f.write(f'{sing_name},{ly}\n')
# musics = np.concatenate([sings.csv, lyrics], axis=1)
| true |
028f1eea7b538aa426a0e97909a69642e0793aee | Python | yjw0216/Samsung-MultiCampus-python-edu | /py_basic/p25.py | UTF-8 | 2,311 | 3.59375 | 4 | [] | no_license |
# 파일 처리 -> 내장 함수
'''
파일 오픈 : open(파일명,엑세스 모드 , 버퍼링)
엑세스 모드 :
r -> 읽기모드
b -> 바이너리/이진형식으로 ~
w -> 쓰기모드
+ -> 반대속성이 추가되는 것을 의미함
r+ -> 읽고 쓰기
a -> 추가 , 덧붙이기
a+ -> 덧붙이고 읽기
ab -> 이진형식 덧붙이기
버퍼링 :
0 : 안함
1 : 라인별 버퍼링
-1(음수) : 버퍼링하는 크기를 시스템 크기에 맞춤
1 이상 : 버퍼링의 크기를 부여함
'''
# 파일I/O (입출력) -> 반드시 사용이 끝나면 닫아야 한다.
# 파일이 없으면 만들어서 오픈
# f= open('test.txt' , 'w')
# f.close()
f= open('test.txt' , 'r+')
# 10byte를 읽겠다
s = f.read(15)
print(s,f.tell()) ## .tell() 은 현재 파일 포인터의 위치를 알려주는 함수
f.seek(4) ## 파일포인터를 처음 위치 기준에서 이동
s = f.read(5)
print(s,f.tell())
f.close()
f = open( 't1.txt' , 'w')
for n in range(10):
str = '%d line(라인) \n' % n
f.write(str)
f.close()
f= open('t1.txt' , 'r')
while True:
data = f.readline() ## 한 줄 씩 읽는다
if not data:
break
print( data )
f.close()
## 파이썬은 I/O에서 닫는 부분을 자동을 처리해주는 기능을 가지고 있다. with문 !!
with open('t1.txt' , 'r') as f:
while True:
data = f.readline() ## 한 줄 씩 읽는다
if not data:
break
print( data )
####################################################################
# 외장함수 : 구조화된 모듈 , 저장 및 로드
# 피클
import pickle as p
data = {
1:[1,2,3,4],
2:{"name":'멀티'},
3:(5,6,7,8)
}
# 기록
with open('data.p' ,'wb' ) as f1: ## 이진데이터로 읽기
p.dump(data,f1,p.HIGHEST_PROTOCOL)
# 로드 -> 데이터 원복
with open('data.p' ,'rb' ) as f1:
print(p.load(f1))
with open('data.p' ,'rb' ) as f1:
tmp = p.load(f1)
print(tmp,type(tmp))
####################################################################
# os 모듈
import os
print('현재 프로젝트 디렉토리(운영체계에 관계없이) ' , os.getcwd())
os.mkdir('tmp') ## 주석처리로 빼놓고 코드 구동하기
os.chdir('tmp')
print(os.getcwd())
os.chdir('..')
print(os.getcwd()) | true |
c3d168bfa193dfb9275551edc88050b2bba6ee97 | Python | shaversj/100-days-of-code-r2 | /days/01/parse-csv-python/test_parse_football.py | UTF-8 | 935 | 3.125 | 3 | [] | no_license | import unittest
import parse_football
class MyTestCase(unittest.TestCase):
def test_read_invalid_path_throws_exception(self):
with self.assertRaises(FileNotFoundError):
parse_football.read_csv_file("jgfjgfygjgj")
def test_read_skips_header(self):
expected_output = [["Arsenal", "38", "26", "9", "3", "79", "36", "87"],
["Liverpool", "38", "24", "8", "6", "67", "30", "80"]]
self.assertEqual(parse_football.read_csv_file("../test-football-data.csv"), expected_output)
def test_get_team_with_smallest_difference(self):
parsed_data = [["Arsenal", "38", "26", "9", "3", "79", "36", "87"],
["Liverpool", "38", "24", "8", "6", "67", "30", "80"]]
results = parse_football.find_team_with_smallest_difference(parsed_data)
self.assertEqual(results, ["Liverpool", 37])
if __name__ == '__main__':
unittest.main()
| true |
bc4b2c48316b71dc357649287ad046a93efbaf7e | Python | general-programming/tumblrarchives | /web/archives/lib/classes.py | UTF-8 | 1,721 | 2.859375 | 3 | [] | no_license | # This Python file uses the following encoding: utf-8
import re
from paginate import make_html_tag
from paginate_sqlalchemy import SqlalchemyOrmPage
# Pagination class credit https://github.com/ckan/ckan/blob/fd4d60c64a28801ed1dea76f353f8f6ee9f74d45/ckan/lib/helpers.py#L890-L925
class Page(SqlalchemyOrmPage):
# Put each page link into a <li> (for Bootstrap to style it)
@staticmethod
def default_link_tag(item, extra_attributes=None):
"""
Create an A-HREF tag that points to another page.
"""
extra_attributes = extra_attributes or {}
text = item["value"]
target_url = item["href"]
a_html = make_html_tag("a", text=text, href=target_url, **item["attrs"])
return make_html_tag("li", a_html, **extra_attributes)
# Curry the pager method of the webhelpers.paginate.Page class, so we have
# our custom layout set as default.
def pager(self, *args, **kwargs):
kwargs.update(
format='<ul class="pagination">$link_previous ~2~ $link_next</ul></nav>',
symbol_previous='«',
symbol_next='»',
dotdot_attr={'class': 'pager_dotdot'},
curpage_attr={'class': 'active waves-effect'},
link_attr={'class': 'waves-effect'}
)
return super(Page, self).pager(*args, **kwargs)
# Change 'current page' link from <span> to <li><a>
# and '..' into '<li><a>..'
# (for Bootstrap to style them properly)
def _range(self, link_map, radius):
html = super(Page, self)._range(link_map, radius)
# Convert ..
dotdot = '<span class="pager_dotdot">..</span>'
html = re.sub(dotdot, "", html)
return html
| true |
b86a26ff6501927760af332cc8b08e93415fb923 | Python | aoyueRay/Leetcode | /31_NextPermutation/next_permutation.py | UTF-8 | 2,421 | 4.28125 | 4 | [] | no_license | # -*- coding:utf-8 -*-
"""
Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).
The replacement must be in-place, do not allocate extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.
1,2,3 → 1,3,2
3,2,1 → 1,2,3
1,1,5 → 1,5,1
"""
# 题意是,查找比当前序列大的下一个序列,若不存在,则按升序返回
# 下面这种算法据说是STL中的经典算法。
# 在当前序列中,从尾端往前寻找两个相邻升序元素,升序元素对中的前一个标记为partition。
# 然后再从尾端寻找另一个大于partition的元素,并与partition指向的元素交换,
# 然后将partition后的元素(不包括partition指向的元素)逆序排列。
# 比如14532,那么升序对为45,partition指向4。
# 由于partition之后除了5没有比4大的数,所以45交换为54,即15432.
# 然后将partition之后的元素逆序排列,即432排列为234,则最后输出的next permutation为15234。
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return([])
index = len(nums) - 2 # 定位到倒数第二个位置
while index >= 0 and (nums[index] >= nums[index + 1]):
index -= 1 # 找到升序元素,定位partition
partition = index
if partition == -1:
return(nums[::-1]) # nums为最大序列时,返回其最小序列
swap_index = len(nums) - 1
while swap_index >= 0:
if nums[swap_index] > nums[partition]:
nums[swap_index],nums[partition] = nums[partition],nums[swap_index] # 交换位置
break
else:
swap_index -= 1
nums[partition + 1:] = nums[(partition + 1):][::-1] # 将partition后的部分逆序排列
return(nums)
if __name__ == '__main__':
solution = Solution()
nums = [1,2,3,4,5,6]
nums = [5,3,4,2,1]
nums = [1,5,1]
# nums = [1]
# nums = [3,2,1]
ans = solution.nextPermutation(nums)
print(ans) | true |
4ee4a358da9af398a550cfb51c5fc2b4ea4e9fad | Python | github-userx/DownloadRedditImages | /utils.py | UTF-8 | 3,458 | 2.984375 | 3 | [
"MIT"
] | permissive | import json
import os
import platform
import pwd
import time
from typing import List, Dict
from os import path as osp
try:
import urllib.request as urllib2 # For Python 3.x
except ImportError:
import urllib2 # For Python 2.x
class Utils:
"""
Bunch of utils used for the Reddit Downloader.
"""
@staticmethod
def save_to_preferences(preferences: Dict[str, Dict], preferences_file: str):
"""
Save the preferences to a JSON file.
:param preferences: Dict containing preferences to save to file.
:param preferences_file: Location of the file where you want to save.
"""
with open(preferences_file, 'w') as f:
json.dump(preferences, f)
@staticmethod
def load_preferences(preferences_file: str) -> Dict:
"""
Load the preferences from JSON file and return as Dict.
:param preferences_file: Location of the file containing the preferences.
:return: preferences - Dict containing preferences to save to file.
"""
with open(preferences_file, 'r') as f:
preferences = json.load(f)
return preferences
@staticmethod
def get_os():
"""
Get the OS type (Linux or Macbook), and set the wallpaper folder accordingly.
:return:
os_type: Type of OS (Linux for Linux, Darwin for Mac).
wallpapers_directory: Directory where the wallpapers will be saved.
"""
os_type = platform.system()
assert os_type in {'Darwin', 'Linux'}
# Get the username
username = pwd.getpwuid(os.getuid()).pw_name
# Set the directory to download images.
wallpapers_directory = '/Users/{}/Pictures/Wallpapers/'.format(username) if os_type == 'Darwin' \
else '/home/{}/Wallpapers/'.format(username)
return platform.system(), wallpapers_directory
@staticmethod
def remove_unwanted_images(images: List[str]):
"""
Remove unwanted images. Since this is a naive approach, we might end up downloading some unwanted images, so we
delete them.
:param images: List of image file locations to filter and remove unwanted images from.
"""
count_removed = 0
for image in images:
# These are some random html pages that might have been downloaded.
# This is a fairly quick and naive approach to downloading images from reddit.
if osp.getsize(image) < 102400:
os.remove(image)
count_removed += 1
return count_removed
@staticmethod
def fetch_subreddit_data(subreddit_url: str, max_trials: int = 20) -> Dict:
"""
Fetch the subreddit JSON page based on the URL.
:param subreddit_url: URL created based on user inputs (subreddit, sort_type, sort_time, max_download_count).
:param max_trials: Maximum number of trial to use for fetching the subreddit JSON data.
:return: subreddit_data - Nested Dict containing Subreddit data for query.
"""
subreddit_data = None
for _ in range(max_trials):
try:
subreddit_page = urllib2.urlopen(subreddit_url)
subreddit_data = json.load(subreddit_page)
break
except:
time.sleep(2) # If we cannot access the reddit page, we wait for 2 seconds and retry.
return subreddit_data
| true |
8350411e8582ad32dd8d8676c9fa8d02fadd230b | Python | jj0526/my-files-1-1 | /python/hw/numpy2.py | UTF-8 | 80 | 2.84375 | 3 | [] | no_license | import numpy as np
a = np.array ([[2,1,3],[4,1,0]])
b = a.transpose()
print(b)
| true |
6489ea02133a2c66db2d88db69ca5f6ac981d229 | Python | ChoiHeon/algorithm | /02_백준/[1717] 집합의표현.py | UTF-8 | 695 | 3.390625 | 3 | [] | no_license | # https://www.acmicpc.net/problem/1717
"""
Union Find 를 구현하는 문제
"""
import sys
sys.setrecursionlimit(10**6)
parents = []
def get_parent(x):
global parents
if parents[x] == x:
return x
parents[x] = get_parent(parents[x])
return parents[x]
def union(x, y):
global parents
a = get_parent(x)
b = get_parent(y)
parents[a] = b
def find(x, y):
a = get_parent(x)
b = get_parent(y)
return a == b
i = sys.stdin.readline
n, m = map(int, i().split())
parents = list(range(n+1))
for _ in range(m):
op, x, y = map(int, i().split())
if op:
print("YES") if find(x, y) else print("NO")
else:
union(x, y)
| true |
ac8f2ae611d1b3294329cf38e40ae07cc5ffbfcf | Python | italoadler/Troop | /src/interface/drag.py | UTF-8 | 1,673 | 2.71875 | 3 | [] | no_license | try:
from Tkinter import Frame
except ImportError:
from tkinter import Frame
class Dragbar(Frame):
def __init__(self, master, *args, **kwargs):
self.app = master
self.root = master.root
Frame.__init__( self,
self.root ,
bg="white",
height=2,
cursor="sb_v_double_arrow")
self.mouse_down = False
self.bind("<Button-1>", self.drag_mouseclick)
self.bind("<ButtonRelease-1>", self.drag_mouserelease)
self.bind("<B1-Motion>", self.drag_mousedrag)
def drag_mouseclick(self, event):
""" Allows the user to resize the console height """
self.mouse_down = True
self.root.grid_propagate(False)
return
def drag_mouserelease(self, event):
self.mouse_down = False
self.app.text.focus_set()
return
def drag_mousedrag(self, event):
if self.mouse_down:
textbox_line_h = self.app.text.dlineinfo("@0,0")
if textbox_line_h is not None:
line_height = textbox_line_h[3]
text_height = int( self.app.text.winfo_height() / line_height ) # In lines
widget_y = self.app.console.winfo_rooty() # Location of the console
new_height = ( self.app.console.winfo_height() + (widget_y - event.y_root) )
# Update heights of console / graphs
self.app.graphs.config(height = new_height)
self.app.console.config(height = max(2, new_height / line_height))
return "break"
| true |
e1c20dfb24889e15098d302f3c76e01a23a35855 | Python | SemonoffArt/hikvision-camera-bot | /hikcamerabot/config.py | UTF-8 | 2,844 | 2.625 | 3 | [
"MIT"
] | permissive | """Config module."""
import json
import logging
from multiprocessing import Queue
from pathlib import Path
from hikcamerabot.exceptions import ConfigError
_CONFIG_FILE_MAIN = 'config.json'
_CONFIG_FILE_LIVESTREAM = 'livestream_templates.json'
_CONFIG_FILE_ENCODING = 'encoding_templates.json'
_CONFIG_FILES = (_CONFIG_FILE_MAIN,
_CONFIG_FILE_LIVESTREAM,
_CONFIG_FILE_ENCODING)
_LOG = logging.getLogger(__name__)
class Config:
"""Dot notation for JSON config file."""
def __init__(self, conf_data):
self.__conf_data = conf_data
def __iter__(self):
return self.__conf_data
def __repr__(self):
return repr(self.__conf_data)
def __getitem__(self, item):
return self.__conf_data[item]
def items(self):
return self.__conf_data.items()
def pop(self, key):
return self.__conf_data.pop(key)
def get(self, key, default=None):
return self.__conf_data.get(key, default)
@classmethod
def from_dict(cls, conf_data):
"""Make dot-mapped object."""
conf_dict = cls._conf_raise_on_duplicates(conf_data)
obj = cls(conf_dict)
obj.__dict__.update(conf_data)
return obj
@classmethod
def _conf_raise_on_duplicates(cls, conf_data):
"""Raise ConfigError on duplicate keys."""
conf_dict = {}
for key, value in conf_data:
if key in conf_dict:
err_msg = f'Malformed configuration file, duplicate key: {key}'
raise ConfigError(err_msg)
else:
conf_dict[key] = value
return conf_dict
def _load_configs():
"""Loads telegram and camera configuration from config file
and returns json object.
"""
config_data = []
path = Path(__file__).parent.parent
for conf_file in _CONFIG_FILES:
conf_file = path / conf_file
if not conf_file.is_file():
err_msg = f'Cannot find {conf_file} configuration file'
_LOG.error(err_msg)
raise ConfigError(err_msg)
_LOG.info('Reading config file %s', conf_file)
with open(conf_file, 'r') as fd:
config = fd.read()
try:
config = json.loads(config, object_pairs_hook=Config.from_dict)
except json.decoder.JSONDecodeError:
err_msg = f'Malformed JSON in {conf_file} configuration file'
raise ConfigError(err_msg)
config_data.append(config)
return config_data
_RESULT_QUEUE = Queue()
def get_result_queue():
return _RESULT_QUEUE
_CONF_MAIN, _CONF_LIVESTREAM_TPL, _CONF_ENCODING_TPL = _load_configs()
def get_main_config():
return _CONF_MAIN
def get_livestream_tpl_config():
return _CONF_LIVESTREAM_TPL
def get_encoding_tpl_config():
return _CONF_ENCODING_TPL
| true |
4a4839159bfe69b7f9ddb67d96e9adc7d4ef72e6 | Python | adamgreig/Pyph | /crop.py | UTF-8 | 390 | 2.890625 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # Pyph Crop
# Crop an image
# Copyright 2011 Adam Greig
# Released under the simplified BSD license, see LICENSE
import Image
import numpy
def do_crop(infile, outfile, c):
"""Crop infile, saving the result to outfile, by geometry in c."""
img = numpy.asarray(Image.open(infile))
img = img[int(c['y']):int(c['y2']), int(c['x']):int(c['x2'])]
Image.fromarray(img).save(outfile)
| true |