seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
32954738112 | from ast import Num
from tkinter import Image
from cv2 import Mat, addWeighted
# def brightnessContrast(source: Mat, brightness: int, contrast: int):
# clone = source.copy()
# result: Mat
# result = brightness(source, brightness)
# if contrast != 0:
# f = 131 * (contrast + 127) / (127 * (131 - contrast))
# alpha_c = f
# gamma_c = 127 * (1 - f)
# result = addWeighted(result, alpha_c, result, 0, gamma_c)
# return result.clone()
def brightness(source: Mat, brightness: int):
clone = source.copy()
if brightness != 0:
if brightness > 0:
shadow = brightness
highlight = 255
else:
shadow = 0
highlight = 255 + brightness
alpha_b = (highlight - shadow) / 255
gamma_b = shadow
return addWeighted(clone, alpha_b, clone, 0, gamma_b)
else:
return source.copy()
| TheColorRed/image-editor | image-processor/Source/filters/brightness.py | brightness.py | py | 922 | python | en | code | 0 | github-code | 13 |
7831720020 | import random
from faker import Faker
from faker.providers import BaseProvider
from juriscraper.lib.string_utils import titlecase
from reporters_db import REPORTERS
from cl.custom_filters.templatetags.text_filters import oxford_join
fake = Faker()
class LegalProvider(BaseProvider):
def random_id(self) -> str:
"""Generate a random ID that can be used in a handful of places like:
- The PK of the court (because they use chars)
:return a str with random chars
"""
return "".join(fake.random_letters(length=15)).lower()
def court_name(self) -> str:
"""
Generate court names like:
- First circuit for the zoo
- District court of albatross
- Appeals court of eczema
:return: A court name
"""
first_word = random.choice(
[
"Thirteenth circuit",
"District court",
"Appeals court",
"Superior court",
]
)
mid_word = random.choice(["of the", "for the"])
last_word = random.choice(
[
"Zoo",
"Medical Worries",
"Programming Horrors",
"dragons",
"Dirty Dishes",
"Eruptanyom", # Kelvin's pretend world
]
)
return " ".join([first_word, mid_word, last_word])
def federal_district_docket_number(self) -> str:
"""Make a docket number like you'd see in a district court, of the
form, "2:13-cv-03239"
"""
office = random.randint(1, 7)
year = random.randint(0, 99)
letters = random.choice(["cv", "bk", "cr", "ms"])
number = random.randint(1, 200_000)
return f"{office}:{year:02}-{letters}-{number:05}"
@staticmethod
def _make_random_party(full: bool = False) -> str:
do_company = random.choice([True, False])
if do_company:
if full:
return oxford_join([fake.company() for _ in range(5)])
else:
return fake.company()
else:
if full:
return oxford_join([fake.name() for _ in range(5)])
else:
return fake.last_name()
def case_name(self, full: bool = False) -> str:
"""Makes a clean case name like "O'Neil v. Jordan" """
plaintiff = self._make_random_party(full)
defendant = self._make_random_party(full)
return titlecase(f"{plaintiff} v. {defendant}")
def citation(self) -> str:
"""Make or fetch a citation e.g. 345 Mass. 76
Grab a random reporter if it contains a typical full_cite pattern
Exclude reporters that rely on regex patterns.
:return Citation as a string
"""
# Filter to only vol-reporter-page reporters
reporters = [
edition
for edition in REPORTERS.keys()
if "regexes"
not in REPORTERS[edition][0]["editions"][edition].keys()
]
reporter = random.choice(reporters)
volume = random.randint(1, 999)
page = random.randint(1, 999)
return f"{volume} {reporter} {page}"
def random_id_string(self) -> str:
"""Generate a random integer and convert to string.
:return: Random integer as string.
"""
return str(random.randint(100_000, 400_000))
| freelawproject/courtlistener | cl/tests/providers.py | providers.py | py | 3,442 | python | en | code | 435 | github-code | 13 |
22645886082 | from threadlocal_aws.resources import s3_Bucket as bucket_r
from ec2_utils.utils import prune_array, delete_selected
def prune_s3_object_versions(
bucket=None,
prefix="",
ten_minutely=288,
hourly=168,
daily=30,
weekly=13,
monthly=6,
yearly=3,
dry_run=False,
):
time_func = lambda version: version.last_modified
versions = sorted(
bucket_r(bucket).object_versions.filter(Prefix=prefix),
key=time_func,
reverse=True,
)
# def prune_array(prunable, time_func, group_by_func, ten_minutely=None,
# hourly=None, daily=None, weekly=None, monthly=None, yearly=None,
# dry_run=False):
keep, delete = prune_array(
versions,
time_func,
lambda version: version.key,
ten_minutely=ten_minutely,
hourly=hourly,
daily=daily,
weekly=weekly,
monthly=monthly,
yearly=yearly,
)
delete_selected(versions, delete, lambda v: v.key, time_func, dry_run=dry_run)
| NitorCreations/ec2-utils | ec2_utils/s3.py | s3.py | py | 1,036 | python | en | code | 1 | github-code | 13 |
71397740498 | import ahocorasick
'''
function substring_intersect (substrings text[], search_strings text[)**
A fast, multi-string to joining 2 datasets using a 'like %pattern%'
- returns substrings and what they matched
- substring text, matched_search_strings text[]
'''
def substring_intersect(substrings, search_strings):
if not substrings or len(substrings) == 0 or not isinstance(substrings, list):
return {}
if not search_strings or len(search_strings) == 0 or not isinstance(search_strings, list):
return {}
# build trie from substrings to search for
A = ahocorasick.Automaton()
for index, str in enumerate(substrings, start=0):
A.add_word(str, index)
A.make_automaton()
# prep search
SEPARATOR = chr(31) # unit seperator code
corpus = SEPARATOR.join(search_strings)
corpus_length = len(corpus)
# results keyed on substring, value is a dict of search_strings
results = {}
# search
for corpus_index, substring_index in A.iter(corpus):
# extract the item around the corpus_index
left_index = corpus.rfind(SEPARATOR, 0, corpus_index)
left_index = left_index + 1 if left_index > -1 else 0
right_index = corpus.find(SEPARATOR, corpus_index) or corpus_length
right_index = right_index if right_index > -1 else corpus_length
substring = substrings[substring_index]
search_string = corpus[left_index:right_index]
# add the found substring and its search_string to results
if not substring in results:
results[substring] = {search_string}
elif search_string not in results[substring]:
results[substring].add(search_string)
# complete
return results
| jmfn/py-substring-intersect | substring_intersect.py | substring_intersect.py | py | 1,647 | python | en | code | 0 | github-code | 13 |
32276755893 | # exercise 12: Distance Between Two Point on Earth
import math
t1 = float(input('enter latitude 1: '))
t2 = float(input('enter latitude 2: '))
g1 = float(input('enter longitude 1: '))
g2 = float(input('enter longitude 2: '))
# converting into radians
t1 = math.radians(t1)
t2 = math.radians(t2)
g1 = math.radians(g1)
g2 = math.radians(g2)
distance = 6371.01 * math.acos(math.sin(t1) * math.sin(t2) + math.cos(t1) * math.cos(t2) * math.cos(g1 - g2))
print('the distance between the two points on Earth is %.4f kilometers' % distance)
| sara-kassani/1000_Python_example | books/Python Workbook/introduction_to_programming/ex12.py | ex12.py | py | 539 | python | en | code | 1 | github-code | 13 |
73654501139 | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(name='random-brain',
version='0.1.2',
description='Python Random Brain Module',
long_description=long_description,
long_description_content_type="text/markdown",
author='Ethan Nelson',
author_email='ethanisaacnelson@gmail.com',
url='https://github.com/einelson/Random-brain',
packages=['random_brain'],
install_requires=['numpy', 'keras'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
# notes
# https://stackoverflow.com/questions/52700692/a-guide-for-updating-packages-on-pypi
# python setup.py sdist
# python setup.py bdist_wheel
# twine upload dist/* | einelson/Random-brain | setup.py | setup.py | py | 947 | python | en | code | 0 | github-code | 13 |
24550906423 | import time, copy
from smbus import SMBus
from RPiSensors.BMP280 import BMP280
from RPiSensors.MPU9250 import MPU9250
class SensorChannel:
def __init__(self, sample, period, coefficients, bands, secondary_duration):
self.sample_func = sample
self.period = period
self.coefficients = coefficients
self.bands = bands
self.secondary_duration = secondary_duration
self.state = None
self.next_time = 0
self.last_sample_time = 0
self.secondary_filter_count = 0
self.reset_counts()
self.raw_collection_expiration = None
self.raw_collection_file = None
def reset_counts(self):
self.sample_count = 0
self.reject_count = 0
self.secondary_use_count = 0
def filter_values (self, values):
if self.raw_collection_file is not None:
raw = ['%.4f'%a for a in values]
raw.append ('%.2f'%self.last_sample_time)
self.raw_collection_file.write(
','.join(raw) + '\n')
if time.time() >= self.raw_collection_expiration:
self.raw_collection_file.close()
self.raw_collection_file = None
print ("Raw collection complete.")
if self.state is None:
self.state = [v for v in values]
else:
trigger_secondary = False
filt = 0
diff = [(i-s) for i,s in zip(values,self.state)]
adiff = [abs(d) for d in diff]
max_adiff = max(adiff)
if max_adiff > self.bands[1]:
self.reject_count += 1
return
elif max_adiff > self.bands[0]: trigger_secondary = True
if trigger_secondary: self.secondary_filter_count = 1
if self.secondary_filter_count > 0:
filt = 1
self.secondary_use_count += 1
self.state = [(s+self.coefficients[filt]*d)
for s,d in zip(self.state,diff)]
if self.secondary_filter_count > 0:
self.secondary_filter_count += 1
if self.secondary_filter_count > self.secondary_duration:
self.secondary_filter_count = 0
self.sample_count += 1
def sample(self):
values = self.sample_func()
if values is not None:
self.last_sample_time = time.time()
self.filter_values (values)
def read(self):
tm = time.time()
if tm > self.next_time:
stats = [self.sample_count, self.secondary_use_count, self.reject_count]
if self.reject_count > self.sample_count:
# Reset filter state
values = self.sample_func()
self.state = [v for v in values]
print ("channel reset state %s because reject_count(%d) > sample_count(%d)"%(
str(self.state), self.reject_count, self.sample_count))
self.reset_counts()
self.next_time = tm + self.period
return self.state,stats,self.last_sample_time
else:
return None,None,None
def update_sensor(self, pname, val):
ret = True
if 'polling period (ms)'.startswith(pname):
self.period = val
elif 'f0'.startswith(pname):
self.coefficients[0] = val
elif 'f1'.startswith(pname):
self.coefficients[1] = val
elif 'sband'.startswith(pname):
self.bands[0] = val
elif 'rejection_band'.startswith(pname):
self.bands[1] = val
elif 'sduration'.startswith(pname):
self.secondary_duration = val
else:
print ("Invalid parameter ID: %s"%pname)
ret = False
return ret
def print_stats(self):
print ("sample_count %d, sec %d, rej %d"%(
self.sample_count, self.secondary_use_count, self.reject_count))
def print_data(self):
print (str(self.state))
def save_config(self):
self.save_period = self.period
self.save_coefficients = copy.copy(self.coefficients)
self.save_bands = copy.copy(self.bands)
self.save_secondary_duration = self.secondary_duration
def restore_config(self):
self.period = self.save_period
self.coefficients = self.save_coefficients
self.bands = self.save_bands
self.secondary_duration = self.save_secondary_duration
base_sensor_suite = dict()
sensor_objects = dict()
def start(config):
global base_sensor_suite
ado = 0 if 'ado' not in config else config['ado']
base_sensor_suite[9250] = MPU9250(ado=ado)
base_sensor_suite[280] = BMP280(ado=ado)
devnum = 1 if 'devnum' not in config else config['devnum']
# Open i2c bus
bus = SMBus(devnum)
sensor_list = config['sensors']
if 'accel' == sensor_list or 'magnet' in sensor_list or \
'gyro' in sensor_list:
if not base_sensor_suite[9250].begin(bus):
return False
if 'pressure' == sensor_list or 'temperature' in sensor_list:
if not base_sensor_suite[280].begin(bus):
return False
for measurement,parms in config['sensors'].items():
if 'pressure' == measurement:
sample_func = base_sensor_suite[280].readPressure
elif 'temperature' == measurement:
sample_func = base_sensor_suite[280].readTemperature
elif 'accel' == measurement:
sample_func = base_sensor_suite[9250].readAccel
elif 'gyro' == measurement:
sample_func = base_sensor_suite[9250].readGyro
elif 'magnet' == measurement:
sample_func = base_sensor_suite[9250].readMagnetometer
else:
raise RuntimeError ("sensor type %s from config not found"%measurement)
sensor_objects[measurement] = SensorChannel (sample_func,
parms['period'],
[parms['filter_coefficient1'],
parms['filter_coefficient2']],
[parms['secondary_band'],
parms['rejection_band']],
parms['secondary_filter_duration'])
return True
def sample_sensors():
for o in sensor_objects.values():
o.sample()
def read_magnetic():
if 'magnet' in sensor_objects:
return sensor_objects['magnet'].read()
else:
return None,None,None
def read_gyroscope():
if 'gyro' in sensor_objects:
return sensor_objects['gyro'].read()
else:
return None,None,None
def read_accelerometer():
if 'accel' in sensor_objects:
return sensor_objects['accel'].read()
else:
return None,None,None
def read_temperature():
if 'temperature' in sensor_objects:
return sensor_objects['temperature'].read()
else:
return None,None,None
def read_pressure():
if 'pressure' in sensor_objects:
return sensor_objects['pressure'].read()
else:
return None,None,None
def modify_sensor_parm(sname, pname, val):
global sensor_objects
try:
val = float(val)
except Exception as e:
print ("Invalid value: %s (%s)"%(val, str(e)))
return
for chname,obj in sensor_objects.items():
if chname.startswith(sname):
if obj.update_sensor(pname, val):
print ("Update %s[%s] = %g"%(chname,pname,val))
return
else:
print ("Can't find sensor %s"%sname)
def collect_raw (sname, seconds, filename):
global sensor_objects
try:
seconds = float(seconds)
except Exception as e:
print ("Invalid time: %s (%s)"%(seconds, str(e)))
return
for chname,obj in sensor_objects.items():
if chname.startswith(sname):
print ("collect_raw: begin")
obj.raw_collection_expiration = time.time() + seconds
obj.raw_collection_file = open(filename, 'a+')
return
else:
print ("Can't find sensor %s"%sname)
def print_sensor(sname):
global sensor_objects
for chname,obj in sensor_objects.items():
if chname.startswith(sname):
print ("%s:"%chname)
obj.print_data()
obj.print_stats()
return
else:
print ("Can't find sensor %s"%sname)
| Maker42/openEFIS | RPiSensors/sensors.py | sensors.py | py | 8,358 | python | en | code | 13 | github-code | 13 |
31773626554 | import requests
import socket
import json
from uuid import getnode as get_mac
url = "http://18.235.27.33/api/user/login"
payload = "email=jfbauer%40oakland.edu&password=432234"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.request("POST", url, data=payload, headers=headers)
data = response.json()
token = data['token']
mac = str(get_mac())
host = socket.gethostname()
url = "http://18.235.27.33/api/device/register"
payload = "name=" + host + "&macaddress="+ mac + "&model=Flyboi5000&ipaddress=192.168.1.100&ispublic=true"
headers = {
'Authorization': token,
'Content-Type': "application/x-www-form-urlencoded"
}
response = requests.request("POST", url, data=payload, headers=headers)
regReturn = response.json()
print(regReturn)
id = regReturn['_id']
file = open('id.key', 'w')
file.write(id)
file.close
print(response.text)
| mblaul/skypi | pi/py/register.py | register.py | py | 955 | python | en | code | 1 | github-code | 13 |
35290099459 | import yagmail
from smtplib import SMTPAuthenticationError
from os import path
from typing import Optional
from tempfile import mkdtemp
from ovos_utils.log import LOG, log_deprecation
from ovos_config.locations import get_xdg_config_save_path
from ovos_config.config import Configuration
from neon_utils.file_utils import decode_base64_string_to_file
from neon_utils.configuration_utils import NGIConfig, init_config_dir
_CONFIG = None
def get_config():
global _CONFIG
if not _CONFIG:
try:
init_config_dir()
legacy_config_file = path.join(get_xdg_config_save_path(),
"ngi_auth_vars.yml")
if path.isfile(legacy_config_file):
log_deprecation(f"Legacy configuration found at: "
f"{legacy_config_file}", "1.0.0")
_CONFIG = NGIConfig("ngi_auth_vars").get("emails")
else:
_CONFIG = Configuration().get("keys", {}).get("emails")
except Exception as e:
LOG.exception(e)
_CONFIG = Configuration().get("keys", {}).get("emails")
return _CONFIG
def write_out_email_attachments(attachments: dict) -> list:
"""
Write out email attachments to local files
:param attachments: dict of attachment file names to string-encoded bytes
:return: list of paths to attachment files
"""
att_files = []
# Write out attachment message data to files
if attachments:
LOG.debug("Handling attachments")
for att_name, data in attachments.items():
if not data:
continue
temp_dir = mkdtemp()
file_name = path.join(temp_dir, att_name)
filename = decode_base64_string_to_file(data, file_name)
att_files.append(filename)
return att_files
def send_ai_email(subject: str, body: str, recipient: str,
attachments: Optional[list] = None,
email_config: dict = None):
"""
Email a user. Email config may be provided or read from configuration
:param subject: Email subject
:param body: Email body
:param recipient: Recipient email address (or list of email addresses)
:param attachments: Optional list of attachment file paths
:param email_config: Optional SMTP config to use as sender
"""
config = email_config or get_config()
try:
mail = config['mail']
password = config['pass']
host = config['host']
port = config['port']
except (TypeError, KeyError):
LOG.error(f"Invalid Config: {config}")
raise RuntimeError("Invalid email auth config")
LOG.info(f"send {subject} to {recipient}")
try:
with yagmail.SMTP(mail, password, host, port) as yag:
yag.send(to=recipient, subject=subject, contents=body,
attachments=attachments)
except SMTPAuthenticationError as e:
LOG.error(f"Invalid credentials provided in config: {config}")
raise e
| NeonGeckoCom/neon_email_proxy | neon_email_proxy/email_utils.py | email_utils.py | py | 3,042 | python | en | code | 0 | github-code | 13 |
14934780693 | import os
import base64
import random
from flask import Flask, request, jsonify
from predict import Tampering_Detection_Service
ELA_EXT = ".ela.png"
TMP_EXT = ".temp.jpg"
# instantiate flask app
app = Flask(__name__)
@app.route("/predict", methods=["POST"])
def predict():
"""
"""
file_name = request.files['name']
_, ext = os.path.splitext(file_name)
name = str(random.randint(0, 100000))
name_key = name + ext
image = request.files['file']
image = image[image.find(",")+1:]
decode = base64.b64decode(image + "===")
with open(name_key, "wb") as fh:
fh.write(decode)
# instantiate keyword spotting service singleton and get prediction
tds = Tampering_Detection_Service()
predicted = tds.predict(name_key, name)
ela_key = name + ELA_EXT
elab64 = ""
with open(ela_key, "rb") as ela_img:
elab64 = base64.b64encode(ela_img.read())
# result json
result = {"accurency": predicted, "ela": elab64}
tmp_key = name + TMP_EXT
# remove temps images
os.remove(name_key)
os.remove(ela_key)
os.remove(tmp_key)
return jsonify(result)
if __name__ == "__main__":
app.run(debug=False)
| carlosagil/ela | server/flask/server.py | server.py | py | 1,199 | python | en | code | 1 | github-code | 13 |
32307868435 | def level_averages(root):
if root is None:
return []
results = []
levels = []
stack = [(root, 0)]
while stack:
curr_node, level = stack.pop()
if len(levels) == level:
levels.append([curr_node.val])
else:
levels[level].append(curr_node.val)
if curr_node.right:
stack.append((curr_node.right, level+1))
if curr_node.left:
stack.append((curr_node.left, level+1))
for l in levels:
total = sum(l)
average = total / len(l)
results.append(average)
return results | kabszac/dsandalgo | binarytree/levelaverages.py | levelaverages.py | py | 541 | python | en | code | 0 | github-code | 13 |
35451451042 | # -*- coding:utf-8 -*-
import requests
from scrapy.selector import Selector
import pymysql
headers = {
"Host": "www.xicidaili.com",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.360",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
}
def crawl_ips():
"""从西刺网爬取ip代理"""
response = requests.get("http://www.xicidaili.com/nn", headers=headers)
selector = Selector(text=response.text)
last_page = selector.css(".pagination a::text").extract()[-2]
ip_list = []
for ip_page in range(1, int(last_page)+1):
if ip_page != 1:
response = requests.get("http://www.xicidaili.com/nn/{0}".format(ip_page), headers=headers)
selector = Selector(text=response.text)
all_trs = selector.css("#ip_list tr")
for tr in all_trs[1:]:
speed = tr.css(".bar::attr(title)").extract_first(None)
if speed:
speed = float(speed.split('秒')[0])
all_texts = tr.css("td::text").extract()
ip = all_texts[0]
port = all_texts[1]
proxy_type = all_texts[5]
ip_list.append((ip, port, proxy_type, speed))
for ip_info in ip_list:
cursor.execute("""
insert into ippool (ip, port, speed, proxy_type) VALUES (%s, %s, %s, %s)
on duplicate KEY update
speed=VALUES (speed),
proxy_type=VALUES (proxy_type)
""", (ip_info[0], ip_info[1], ip_info[3], ip_info[2]))
conn.commit()
class GetIp(object):
"""从数据库里获取随机ip"""
def __init__(self, cursor):
self.cursor = cursor
def _judge_ip(self, *mess):
"""判断ip地址是否可用"""
ip, port, http = mess
http_url = 'https://www.bilibili.com'
proxy_url = "{0}://{1}:{2}".format(http.lower(), ip, port)
proxy_dict = {
"{}".format(http.lower()): proxy_url
}
try:
response = requests.get(http_url, proxies=proxy_dict, timeout=5)
# print(response)
except Exception:
print('invalid ip address and port {}'.format(proxy_url))
self._delete_ip(ip)
return False
else:
code = response.status_code
if code >= 200 and code < 302:
print('effective proxy {}'.format(proxy_url))
return True
else:
print('invalid ip address and port {}'.format(proxy_url))
self._delete_ip(ip)
return False
def get_random_ip(self):
"""获取随机ip"""
random_sql = """
select ip, port, proxy_type from ippool
ORDER BY rand()
limit 1
"""
res = self.cursor.execute(random_sql)
for ip_info in self.cursor.fetchall():
if self._judge_ip(*ip_info):
return "{}://{}:{}".format(ip_info[-1].lower(), ip_info[0], ip_info[1])
else:
return self.get_random_ip()
def _delete_ip(self, ip):
self.cursor.execute("delete from ippool where ip='%s'" % ip)
return True
if __name__ == '__main__':
conn = pymysql.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='123456',
db='learning',
charset='utf8'
)
cursor = conn.cursor()
# crawl_ips()
get_ip = GetIp(cursor)
ip_proxy = get_ip.get_random_ip()
print(ip_proxy) | SatoKoi/BilibiliSpider | tools/getIp.py | getIp.py | py | 3,641 | python | en | code | 15 | github-code | 13 |
72870809939 | class Solution:
def isValid(self, s: str) -> bool:
opening = "([{"
closing = ")]}"
parens = dict(zip(opening, closing))
stack = []
for ch in s:
if ch in opening:
stack.append(ch)
elif ch in closing:
if not stack or ch != parens[stack.pop()]: # stack이 비어있거나 / 스택의 마지막 아이템에 대응하지 않으면 return False
return False
return not stack # stack이 비어있는 경우에만 return True
solution_instance = Solution()
print(solution_instance.isValid("()")) | dahui-sharon-kim/algorithms | stack/valid_parentheses02.py | valid_parentheses02.py | py | 618 | python | en | code | 0 | github-code | 13 |
74332251538 | import json
import random
from collections import Counter
class AgenteAdivinhacaoPalavras:
def __init__(self, palavras):
self.palavras = palavras
self.palavra = random.choice(palavras)
self.palavra_oculta = ['_' for _ in self.palavra]
self.palavras_tentadas = set()
def adivinhar_palavra(self):
# Filtrar palavras que têm o mesmo comprimento que a palavra oculta
palavras_possiveis = [palavra for palavra in self.palavras if len(palavra) == len(self.palavra) and palavra not in self.palavras_tentadas]
# Contar a frequência das letras nas palavras possíveis
frequencia_letras = Counter(''.join(palavras_possiveis))
# Ordenar as letras pela frequência
letras_ordenadas = sorted(frequencia_letras, key=frequencia_letras.get, reverse=True)
# Tentar adivinhar com base na frequência das letras
for letra in letras_ordenadas:
for palavra in palavras_possiveis:
if all(palavra[i] == self.palavra_oculta[i] or self.palavra_oculta[i] == '_' for i in range(len(palavra))):
return palavra
# Se não houver correspondência, tentar adivinhar uma palavra aleatória
if palavras_possiveis:
return random.choice(palavras_possiveis)
def jogar(self):
print("Bem-vindo ao jogo de adivinhação de palavras!")
tentativas = 0
while True:
print(' '.join(self.palavra_oculta))
palpite = self.adivinhar_palavra()
self.palavras_tentadas.add(palpite)
tentativas += 1
print(f"Palpite do agente: {palpite}") # Mostrar o palpite do agente
acertos = [i for i, letra in enumerate(self.palavra) if letra == palpite[i]]
for i in acertos:
self.palavra_oculta[i] = self.palavra[i]
if palpite == self.palavra:
print(f"Parabéns! O agente adivinhou a palavra '{self.palavra}' em {tentativas} tentativas.")
break
# Carregar a lista de palavras do arquivo JSON
with open('Adivinhador-de-palavras\palavras.json', 'r') as f:
data = json.load(f)
palavras = data['palavras']
# Agora você pode usar a lista de palavras no seu jogo
agente = AgenteAdivinhacaoPalavras(palavras)
agente.jogar()
| HianPraxedes/Adivinhador-de-palavras | adivinharPalavras.py | adivinharPalavras.py | py | 2,329 | python | pt | code | 0 | github-code | 13 |
68716082 | import datetime
import csv
import pandas as pd
import ipaddress
WAIT=2 #タイムアウトがこの回数を超えたとき故障とみなす WAIT=2なら3回連続でタイムアウトした時に故障と判定される
PING_OVER=100 #平均がこの値を越えたら過負荷状態とみなす
AVE_RANGE=3 #平均をとる範囲
df = pd.read_csv("server_log_subnet.csv",index_col=["server_address"])
df= df.sort_values(["server_address","datetime"])
allserver=set()
for index,dt,ping in zip( df.index,df["datetime"],df["ping"]):
allserver.add(index)
now_index=0
crash_list=[]
CrashFrag=0
for index,dt,ping in zip( df.index,df["datetime"],df["ping"]):
dt_s=str(dt)
y,mon,d,h,m,s=map(int,(dt_s[0:4],dt_s[4:6],dt_s[6:8],dt_s[8:10],dt_s[10:12],dt_s[12:14]))
d_time=datetime.datetime(y,mon,d,h,m,s)
if(now_index!=index):#別のサーバーについての処理を始める前にリセットする
same_subnet=set()
crash_subnet=set()
crash_subnet_time=set()
now_index=index
cr_count=0
ping_range=[]
ol_count=0
sncr_count=0
allstop=0
elif(now_index==index):
y,mon,d,h,m,s=map(int,(dt_s[0:4],dt_s[4:6],dt_s[6:8],dt_s[8:10],dt_s[10:12],dt_s[12:14]))
d_time=datetime.datetime(y,mon,d,h,m,s)
s_ip=ipaddress.ip_interface(index)
#現在見ているサーバーと同一のサブネットを持つサーバーを全て取得
for ip in allserver:
if(s_ip in ipaddress.ip_network(ip, strict=False)):
same_subnet.add(ip)
#故障に関する処理
if(cr_count==0 and ping=="-"):
cr_count+=1
crash_time=d_time
elif(cr_count!=0 and ping=="-"):
cr_count+=1
if(cr_count==WAIT+1):
print(str(d_time)+"_ Server"+index+" Crash")
crash_list.append([index,d_time])
elif(cr_count>WAIT+1):
crash_list.append([index,d_time])
if(cr_count>=WAIT+1):
for ip,ctime in crash_list:
#現在のipが故障リストの中にいるとき集合に追加
if(s_ip in ipaddress.ip_network(ip, strict=False)):
crash_subnet.add(ip)
subnet=ipaddress.ip_network(index, strict=False)
#タイムアウトの時間差が1分以内の時集合に追加
if(abs(d_time-ctime)<datetime.timedelta(minutes=1)):
crash_subnet_time.add(ip)
for ip,ctime in crash_list:#サブネットスイッチ故障の判定
#同一のサブネット内のサーバーがすべて止まったときカウント
if(same_subnet==crash_subnet and same_subnet==crash_subnet_time ):
allstop+=1
#allstop がWAIT+1回カウントされた場合サブネットスイッチの故障と判定する
if(allstop>=WAIT+1):
sncr_count+=1
if(sncr_count==1):
print(str(d_time)+"_ Subnet switch"+str(subnet)+" Crash")
sncr_time=d_time
elif(cr_count!=0 and ping!="-"):
crash_length=d_time-crash_time
if(sncr_count!=0 and cr_count>=WAIT+1):
print(str(d_time)+"_ Subnet switch"+str(subnet)+" Crash_length: "+str(d_time-sncr_time+datetime.timedelta(minutes=WAIT)))
if(cr_count>=WAIT+1):
print(str(d_time)+"_ Server"+index+" Crash_length: "+str(crash_length))
CrashFrag=1
cr_count=0
sncr_count=0
allstop=0
#過負荷状態に関する処理
if(ping=="-"):
ping_new=PING_OVER*AVE_RANGE #平均をとる範囲にタイムアウトが存在すると必ず過負荷状態と判定される
else:
ping_new=int(ping)
if(len(ping_range)<AVE_RANGE):
ping_range.append(ping_new)
else:
ping_range.pop(0)
ping_range.append(ping_new)
ping_ave=sum(ping_range)/len(ping_range)
if(ping_ave>=PING_OVER):
ol_count+=1
if(ol_count==1):
ol_time=d_time
print(str(d_time)+"_ Server"+index+" Overload")
elif(ol_count!=0 and ping_ave<PING_OVER):
ol_length=d_time-ol_time
if(CrashFrag==0):#故障が発生した場合は過負荷状態の時間を出力しない
print(str(d_time)+"_ Server"+index+" Overload_length: "+str(ol_length))
else:
CrashFrag=0
ol_count=0
same_subnet.clear()
crash_subnet.clear()
| TNishikubo/programming_exam | 設問4.py | 設問4.py | py | 5,254 | python | ja | code | 0 | github-code | 13 |
11611697077 | import unittest
from factoryMethod import pessoa
__author__ = 'Bruno'
class Test(unittest.TestCase):
def test_customer(self):
customer = pessoa.PersonFactory().build_person("customer")
customer.name = "Bruno"
customer.say_hello()
self.assertTrue(isinstance(customer, pessoa.Customer))
def test_employee(self):
employee = pessoa.PersonFactory().build_person("employee")
employee.name = "Jarvis"
employee.say_hello()
self.assertTrue(isinstance(employee, pessoa.Employee))
if __name__ == '__main__':
unittest.main()
| brunodmartins/PythonPatterns | factoryMethod/test_factoryMethod.py | test_factoryMethod.py | py | 594 | python | en | code | 2 | github-code | 13 |
35604571139 | import numbers
import warnings
import networkx as nx
import numpy as np
from queueing_tool.graph.graph_functions import _test_graph, _calculate_distance
from queueing_tool.graph.graph_wrapper import QueueNetworkDiGraph
from queueing_tool.union_find import UnionFind
def generate_transition_matrix(g, seed=None):
"""Generates a random transition matrix for the graph ``g``.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
seed : int (optional)
An integer used to initialize numpy's psuedo-random number
generator.
Returns
-------
mat : :class:`~numpy.ndarray`
Returns a transition matrix where ``mat[i, j]`` is the
probability of transitioning from vertex ``i`` to vertex ``j``.
If there is no edge connecting vertex ``i`` to vertex ``j``
then ``mat[i, j] = 0``.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
nV = g.number_of_nodes()
mat = np.zeros((nV, nV))
for v in g.nodes():
ind = [e[1] for e in sorted(g.out_edges(v))]
deg = len(ind)
if deg == 1:
mat[v, ind] = 1
elif deg > 1:
probs = np.ceil(np.random.rand(deg) * 100) / 100.
if np.isclose(np.sum(probs), 0):
probs[np.random.randint(deg)] = 1
mat[v, ind] = probs / np.sum(probs)
return mat
def generate_random_graph(num_vertices=250, prob_loop=0.5, **kwargs):
"""Creates a random graph where the edges have different types.
This method calls :func:`.minimal_random_graph`, and then adds
a loop to each vertex with ``prob_loop`` probability. It then
calls :func:`.set_types_random` on the resulting graph.
Parameters
----------
num_vertices : int (optional, default: 250)
The number of vertices in the graph.
prob_loop : float (optional, default: 0.5)
The probability that a loop gets added to a vertex.
**kwargs :
Any parameters to send to :func:`.minimal_random_graph` or
:func:`.set_types_random`.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with the position of the vertex set as a property.
The position property is called ``pos``. Also, the ``edge_type``
edge property is set for each edge.
Examples
--------
The following generates a directed graph with 50 vertices where half
the edges are type 1 and 1/4th are type 2 and 1/4th are type 3:
>>> import queueing_tool as qt
>>> pTypes = {1: 0.5, 2: 0.25, 3: 0.25}
>>> g = qt.generate_random_graph(100, proportions=pTypes, seed=17)
>>> non_loops = [e for e in g.edges() if e[0] != e[1]]
>>> p1 = np.sum([g.ep(e, 'edge_type') == 1 for e in non_loops])
>>> float(p1) / len(non_loops) # doctest: +ELLIPSIS
0.486...
>>> p2 = np.sum([g.ep(e, 'edge_type') == 2 for e in non_loops])
>>> float(p2) / len(non_loops) # doctest: +ELLIPSIS
0.249...
>>> p3 = np.sum([g.ep(e, 'edge_type') == 3 for e in non_loops])
>>> float(p3) / len(non_loops) # doctest: +ELLIPSIS
0.264...
To make an undirected graph with 25 vertices where there are 4
different edge types with random proportions:
>>> p = np.random.rand(4)
>>> p = p / sum(p)
>>> p = {k + 1: p[k] for k in range(4)}
>>> g = qt.generate_random_graph(num_vertices=25, is_directed=False, proportions=p)
Note that none of the edge types in the above example are 0. It is
recommended use edge type indices starting at 1, since 0 is
typically used for terminal edges.
"""
g = minimal_random_graph(num_vertices, **kwargs)
for v in g.nodes():
e = (v, v)
if not g.is_edge(e):
if np.random.uniform() < prob_loop:
g.add_edge(*e)
g = set_types_random(g, **kwargs)
return g
def generate_pagerank_graph(num_vertices=250, **kwargs):
"""Creates a random graph where the vertex types are
selected using their pagerank.
Calls :func:`.minimal_random_graph` and then
:func:`.set_types_rank` where the ``rank`` keyword argument
is given by :func:`networkx.pagerank`.
Parameters
----------
num_vertices : int (optional, the default is 250)
The number of vertices in the graph.
**kwargs :
Any parameters to send to :func:`.minimal_random_graph` or
:func:`.set_types_rank`.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with a ``pos`` vertex property and the ``edge_type``
edge property.
Notes
-----
This function sets the edge types of a graph to be either 1, 2, or
3. It sets the vertices to type 2 by selecting the top
``pType2 * g.number_of_nodes()`` vertices given by the
:func:`~networkx.pagerank` of the graph. A loop is added
to all vertices identified this way (if one does not exist
already). It then randomly sets vertices close to the type 2
vertices as type 3, and adds loops to these vertices as well. These
loops then have edge types that correspond to the vertices type.
The rest of the edges are set to type 1.
"""
g = minimal_random_graph(num_vertices, **kwargs)
r = np.zeros(num_vertices)
# networkx 2.8.6 throws a warning with all pagerank functions except
# _pagerank_python. We would need to ignore the warning even if we used
# the recommended networkx.pagerank function.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
# In networkx 2.8.6, this function requires scipy, which isn't
# a requirement of either networkx or queueing-tool. But the
# other pagerank_* functions are deprecated so we'll only try
# those if the recommended one fails.
page_rank = nx.pagerank(g)
except ImportError as exe:
try:
# This function is deprecated and is supposed to be removed
# in networkx 3.0.
page_rank = nx.pagerank_numpy(g)
except:
raise exe
for k, pr in page_rank.items():
r[k] = pr
g = set_types_rank(g, rank=r, **kwargs)
return g
def minimal_random_graph(num_vertices, seed=None, **kwargs):
"""Creates a connected graph with random vertex locations.
Parameters
----------
num_vertices : int
The number of vertices in the graph.
seed : int (optional)
An integer used to initialize numpy's psuedorandom number
generators.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
A graph with a ``pos`` vertex property for each vertex's
position.
Notes
-----
This function first places ``num_vertices`` points in the unit square
randomly (using the uniform distribution). Then, for every vertex
``v``, all other vertices with Euclidean distance less or equal to
``r`` are connect by an edge --- where ``r`` is the smallest number
such that the graph ends up connected.
"""
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
points = np.random.random((num_vertices, 2)) * 10
edges = []
for k in range(num_vertices - 1):
for j in range(k + 1, num_vertices):
v = points[k] - points[j]
edges.append((k, j, v[0]**2 + v[1]**2))
mytype = [('n1', int), ('n2', int), ('distance', float)]
edges = np.array(edges, dtype=mytype)
edges = np.sort(edges, order='distance')
unionF = UnionFind([k for k in range(num_vertices)])
g = nx.Graph()
for n1, n2, dummy in edges:
unionF.union(n1, n2)
g.add_edge(n1, n2)
if unionF.nClusters == 1:
break
pos = {j: p for j, p in enumerate(points)}
g = QueueNetworkDiGraph(g.to_directed())
g.set_pos(pos)
return g
def set_types_random(g, proportions=None, loop_proportions=None, seed=None,
**kwargs):
"""Randomly sets ``edge_type`` (edge type) properties of the graph.
This function randomly assigns each edge a type. The probability of
an edge being a specific type is proscribed in the
``proportions``, ``loop_proportions`` variables.
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
proportions : dict (optional, default: ``{k: 0.25 for k in range(1, 4)}``)
A dictionary of edge types and proportions, where the keys are
the types and the values are the proportion of non-loop edges
that are expected to be of that type. The values can must sum
to one.
loop_proportions : dict (optional, default: ``{k: 0.25 for k in range(4)}``)
A dictionary of edge types and proportions, where the keys are
the types and the values are the proportion of loop edges
that are expected to be of that type. The values can must sum
to one.
seed : int (optional)
An integer used to initialize numpy's psuedorandom number
generator.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
Returns the a graph with an ``edge_type`` edge property.
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`networkx.DiGraph`.
ValueError
Raises a :exc:`~ValueError` if the ``pType`` values do not sum
to one.
Notes
-----
If ``pTypes`` is not explicitly specified in the arguments, then it
defaults to four types in the graph (types 0, 1, 2, and 3). It sets
non-loop edges to be either 1, 2, or 3 33% chance, and loops are
types 0, 1, 2, 3 with 25% chance.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
if proportions is None:
proportions = {k: 1. / 3 for k in range(1, 4)}
if loop_proportions is None:
loop_proportions = {k: 1. / 4 for k in range(4)}
edges = [e for e in g.edges() if e[0] != e[1]]
loops = [e for e in g.edges() if e[0] == e[1]]
props = list(proportions.values())
lprops = list(loop_proportions.values())
if not np.isclose(sum(props), 1.0):
raise ValueError("proportions values must sum to one.")
if not np.isclose(sum(lprops), 1.0):
raise ValueError("loop_proportions values must sum to one.")
eTypes = {}
types = list(proportions.keys())
values = np.random.choice(types, size=len(edges), replace=True, p=props)
for k, e in enumerate(edges):
eTypes[e] = values[k]
types = list(loop_proportions.keys())
values = np.random.choice(types, size=len(loops), replace=True, p=lprops)
for k, e in enumerate(loops):
eTypes[e] = values[k]
g.new_edge_property('edge_type')
for e in g.edges():
g.set_ep(e, 'edge_type', eTypes[e])
return g
def set_types_rank(g, rank, pType2=0.1, pType3=0.1, seed=None, **kwargs):
"""Creates a stylized graph. Sets edge and types using `pagerank`_.
This function sets the edge types of a graph to be either 1, 2, or
3. It sets the vertices to type 2 by selecting the top
``pType2 * g.number_of_nodes()`` vertices given by the
:func:`~networkx.pagerank` of the graph. A loop is added
to all vertices identified this way (if one does not exist
already). It then randomly sets vertices close to the type 2
vertices as type 3, and adds loops to these vertices as well. These
loops then have edge types the correspond to the vertices type. The
rest of the edges are set to type 1.
.. _pagerank: http://en.wikipedia.org/wiki/PageRank
Parameters
----------
g : :any:`networkx.DiGraph`, :class:`~numpy.ndarray`, dict, etc.
Any object that :any:`DiGraph<networkx.DiGraph>` accepts.
rank : :class:`numpy.ndarray`
An ordering of the vertices.
pType2 : float (optional, default: 0.1)
Specifies the proportion of vertices that will be of type 2.
pType3 : float (optional, default: 0.1)
Specifies the proportion of vertices that will be of type 3 and
that are near pType2 vertices.
seed : int (optional)
An integer used to initialize numpy's psuedo-random number
generator.
**kwargs :
Unused.
Returns
-------
:class:`.QueueNetworkDiGraph`
Returns the a graph with an ``edge_type`` edge property.
Raises
------
TypeError
Raised when the parameter ``g`` is not of a type that can be
made into a :any:`DiGraph<networkx.DiGraph>`.
"""
g = _test_graph(g)
if isinstance(seed, numbers.Integral):
np.random.seed(seed)
tmp = np.sort(np.array(rank))
nDests = int(np.ceil(g.number_of_nodes() * pType2))
dests = np.where(rank >= tmp[-nDests])[0]
if 'pos' not in g.vertex_properties():
g.set_pos()
dest_pos = np.array([g.vp(v, 'pos') for v in dests])
nFCQ = int(pType3 * g.number_of_nodes())
min_g_dist = np.ones(nFCQ) * np.infty
ind_g_dist = np.ones(nFCQ, int)
r, theta = np.random.random(nFCQ) / 500., np.random.random(nFCQ) * 360.
xy_pos = np.array([r * np.cos(theta), r * np.sin(theta)]).transpose()
g_pos = xy_pos + dest_pos[np.array(np.mod(np.arange(nFCQ), nDests), int)]
for v in g.nodes():
if v not in dests:
tmp = np.array([_calculate_distance(g.vp(v, 'pos'), g_pos[k, :]) for k in range(nFCQ)])
min_g_dist = np.min((tmp, min_g_dist), 0)
ind_g_dist[min_g_dist == tmp] = v
ind_g_dist = np.unique(ind_g_dist)
fcqs = set(ind_g_dist[:min(nFCQ, len(ind_g_dist))])
dests = set(dests)
g.new_vertex_property('loop_type')
for v in g.nodes():
if v in dests:
g.set_vp(v, 'loop_type', 3)
if not g.is_edge((v, v)):
g.add_edge(v, v)
elif v in fcqs:
g.set_vp(v, 'loop_type', 2)
if not g.is_edge((v, v)):
g.add_edge(v, v)
g.new_edge_property('edge_type')
for e in g.edges():
g.set_ep(e, 'edge_type', 1)
for v in g.nodes():
if g.vp(v, 'loop_type') in [2, 3]:
e = (v, v)
if g.vp(v, 'loop_type') == 2:
g.set_ep(e, 'edge_type', 2)
else:
g.set_ep(e, 'edge_type', 3)
return g
| djordon/queueing-tool | queueing_tool/graph/graph_generation.py | graph_generation.py | py | 14,578 | python | en | code | 60 | github-code | 13 |
2503835996 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 10:13:13 2015
@author: droz
DO NOT USE - DEPRECIATED
"""
from DataManager import NewsSource, News
from MessageManager import MessageManager
import datetime
import time
def hasAnyofTheresKeywords(keywords, text):
for word in keywords:
if(word in text):
return True
return False
def _fct(lookingList, line):
newsList = []
try:
lines = line.split(',')
head = lines[1]
msg = ''.join(lines[2:])
date = datetime.datetime.strptime(lines[0], "%Y-%m-%d %H:%M:%S")
for lookingArgs in lookingList:
if(date >= lookingArgs['startDate'] and date <= lookingArgs['endDate']):
if(hasAnyofTheresKeywords(lookingArgs['keywords'], head.upper()) or hasAnyofTheresKeywords(lookingArgs['keywords'], msg.upper())):
#MessageManager.debugMessage("ReutersNewsSource : head or msg has keywords")
newsList.append(News(pubDate=date, symbole=lookingArgs['symbole'], publication=head+msg, pubSource="Reuters"))
except:
pass # explicative line or empty
return newsList
class ReutersNewsSourceHDFSV2(NewsSource):
def __init__(self, filenameRdd):
NewsSource.__init__(self)
self.filenameRdd = filenameRdd
self.lookingList = []
def lookingAll(self, symbole, keywords):
startDate = "2000-01-01"
endDate = time.strftime('%Y-%m-%d')
startDate = datetime.datetime.strptime(startDate, "%Y-%m-%d")
endDate = datetime.datetime.strptime(endDate, "%Y-%m-%d")
self.lookingAt(symbole, startDate, endDate, keywords)
def lookingAt(self, symbole, startDate, endDate, keywords):
keywords.append(symbole)
upperKeywords = [x.upper() for x in keywords]
self.lookingList.append({'symbole' : symbole, 'startDate' : startDate, 'endDate' : endDate, 'keywords' : upperKeywords})
def doIt(self):
lookingList = self.lookingList
newsRdd = self.filenameRdd.flatMap(lambda x: _fct(lookingList, x)).filter(lambda x: x != [])
MessageManager.debugMessage("ReutersNewsSourceHDFS : stop reading Reuters corpus")
return newsRdd
class ReutersNewsSourceHDFS(NewsSource):
'''
Classe qui est charger de récolter les news depuis l'HDFS
'''
def __init__(self, filenameRdd):
NewsSource.__init__(self)
self.filenameRdd = filenameRdd
def lookingAll(self, symbole, keywords):
startDate = "2000-01-01"
endDate = time.strftime('%Y-%m-%d')
startDate = datetime.datetime.strptime(startDate, "%Y-%m-%d")
endDate = datetime.datetime.strptime(endDate, "%Y-%m-%d")
return self.lookingAt(symbole, startDate, endDate, keywords)
def lookingAt(self, symbole, startDate, endDate, keywords):
upperKeywords = [x.upper() for x in keywords]
MessageManager.debugMessage("ReutersNewsSourceHDFS : start reading Reuters corpus")
def hasAnyofTheresKeywords(keywords, text):
for word in keywords:
if(word in text):
return True
return False
def fct(line):
try:
lines = line.split(',')
date = datetime.datetime.strptime(lines[0], "%Y-%m-%d %H:%M:%S")
if(date >= startDate and date <= endDate):
head = lines[1]
msg = ''.join(lines[2:])
if(hasAnyofTheresKeywords(upperKeywords, head.upper()) or hasAnyofTheresKeywords(upperKeywords, msg.upper())):
#MessageManager.debugMessage("ReutersNewsSource : head or msg has keywords")
return News(pubDate=date, symbole=symbole, publication=head+msg, pubSource="Reuters")
except:
pass # explicative line or empty
return None
newsRdd = self.filenameRdd.map(fct).filter(lambda x: x != None)
MessageManager.debugMessage("ReutersNewsSourceHDFS : stop reading Reuters corpus")
return newsRdd
class ReutersNewsSource(NewsSource):
'''
Classe qui est charger de récolter les news depuis un chemin local
'''
def __init__(self, filename):
NewsSource.__init__(self)
self.filename = filename
def hasAnyofTheresKeywords(self, keywords, text):
for word in keywords:
if(word in text):
return True
return False
def lookingAt(self, symbole, startDate, endDate, keywords):
upperKeywords = [x.upper() for x in keywords]
MessageManager.debugMessage("ReutersNewsSource : start reading Reuters corpus")
f = open(self.filename, 'r')
for line in f:
try:
lines = line.split(',')
date = datetime.datetime.strptime(lines[0], "%Y-%m-%d %H:%M:%S")
if(date >= startDate and date <= endDate):
head = lines[1]
msg = ''.join(lines[2:])
if(self.hasAnyofTheresKeywords(upperKeywords, head.upper()) or self.hasAnyofTheresKeywords(upperKeywords, msg.upper())):
MessageManager.debugMessage("ReutersNewsSource : head or msg has keywords")
self.news.append(News(pubDate=date, symbole=symbole, publication=head, pubSource="Reuters"))
except:
pass # explicative line or empty
f.close()
MessageManager.debugMessage("ReutersNewsSource : stop reading Reuters corpus")
MessageManager.debugMessage("ReutersNewsSource : %d news found" % len(self.news)) | wdroz/TM_2014-2015S2 | src/ReutersNewsSource.py | ReutersNewsSource.py | py | 5,711 | python | en | code | 1 | github-code | 13 |
27216179938 | import glob
import logging
import os
import random
import types
from collections import namedtuple
from enum import Enum, auto
import numpy as np
import pandas as pd
import mne
from config import LABELED_ROOT, PROJ_ROOT, DATA_ROOT, CHANNEL_NAMES
from data.utils import (get_index, get_trial, df_from_tdt, df_from_fif,
data_from_npy, get_meta_df, mne_from_file)
class DataKindDefinition:
def __init__(self, name='', data_folder='', exp_exts=(), df_from_path=None):
self.name = name
self.data_folder = data_folder
self.exp_exts = exp_exts
self.df_from_path = df_from_path
class DataKind(Enum):
META = 'meta'
RAW = 'raw'
PROCESSED = 'processed'
MNE = 'mne'
SURROGATE = 'surrogate'
RECPLOT = 'recplot'
GAF = 'gaf'
DIRECT = 'direct'
DATA_KINDS = {
DataKind.RAW: DataKindDefinition(
name='raw',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'raw')),
exp_exts=('.tdt',),
df_from_path=df_from_tdt),
DataKind.PROCESSED: DataKindDefinition(
name='processed',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'processed')),
exp_exts=('.fif',),
df_from_path=df_from_fif),
DataKind.SURROGATE: DataKindDefinition(
name='surrogate',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'surrogate')),
exp_exts=('.csv',),
df_from_path=df_from_tdt),
DataKind.RECPLOT: DataKindDefinition(
name='recplot',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'recplots')),
exp_exts=('.npy',),
df_from_path=data_from_npy),
DataKind.GAF: DataKindDefinition(
name='gaf',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'gaf')),
exp_exts=('.npy',),
df_from_path=data_from_npy),
DataKind.DIRECT: DataKindDefinition(
name='direct',
data_folder=os.path.abspath(os.path.join(DATA_ROOT, 'direct')),
exp_exts=('.npy',),
df_from_path=data_from_npy),
}
File = namedtuple('File', 'df path id trial name kind number')
def files_builder(kind=None, ext=None, file=None, subfolder=(), *args, **kwargs):
"""Creates a DataFiles iterator based on kind, extension, or returns a
single dataframe based on file."""
def kind_from_extension(ext):
"""Selects the first datakind matching the provided extension."""
for kind, definition in DATA_KINDS.items():
if ext in definition.exp_exts:
return kind
raise NotImplementedError(f'File extension {ext} not supported.')
if ext is not None and kind is None:
kind = kind_from_extension(ext)
if kind in DATA_KINDS:
return DataFiles(DATA_KINDS[kind], subfolder=subfolder)
elif kind == DataKind.META:
return get_meta_df()
elif kind == DataKind.MNE:
return mne_from_file(file)
else:
raise NotImplementedError
class DataFiles:
"""
Iterator over file names of supplied properties. It supports shuffling,
subfolders, absolute / relative paths, and selection only before / after
trials."""
def __init__(self, kind, shuffle=False, subfolder=()):
assert os.path.isdir(kind.data_folder), kind.data_folder
self.kind = kind.name
self.exp_exts = kind.exp_exts
self.data_folder = kind.data_folder
if len(subfolder) > 0:
self.data_folder = os.path.join(*((kind.data_folder,) + subfolder))
self.df_from_path = kind.df_from_path
self.shuffle = shuffle
self.numfiles = len(os.listdir(self.data_folder))
def file_names(self, include_path=False, subfolder=(), recursive=False,
index_trials=None):
"""Generator of file names."""
data_folder = os.path.join(*((self.data_folder,) + subfolder))
if recursive:
file_names = glob.glob(data_folder + '/**/*'+self.exp_exts[0], recursive=True)
else:
file_names = os.listdir(data_folder)
if index_trials is not None:
file_names = [fn for fn in file_names
if fn.split('-')[0] in index_trials]
if include_path and not recursive:
file_names = [os.path.join(data_folder, fn) for fn in file_names]
if self.shuffle:
random.shuffle(file_names)
for i, file_name in enumerate(file_names):
_, ext = os.path.splitext(file_name)
if ext not in self.exp_exts:
logging.debug(
f'Unexpected extension: skipping file {file_name}.')
continue
yield i, file_name
def train_test_file_names(self, test_size=0.3):
"""Split the file names into train / test samples."""
assert test_size < 1, 'test_size must be < 1'
all_names = [os.path.join(self.data_folder, name[1])
for name in self.file_names()]
return all_names[int(test_size*len(all_names)):], \
all_names[:int(test_size*len(all_names))]
def get_filenames_with_labels(self, file_names=None, label='dep',
trial=None):
"""Get only labels to supplied filenames or all filenames with
corresponding labels."""
if file_names is None:
file_names = [fn for _, fn in self.file_names(include_path=True)]
ls = pd.read_pickle(
os.path.join(LABELED_ROOT, 'processed', 'meta', 'meta.pkl'))
if trial is None:
file_names, labels = file_names, [
ls.loc[(self.get_index(fn), get_trial(fn)), label]
for fn in file_names
]
else:
file_names = [fn for fn in file_names if
get_trial(fn) == trial]
labels = [
ls.loc[(self.get_index(fn), trial), label]
for fn in file_names
]
return file_names, labels
def single_file(self, file_name):
"""Get file instance corresponding to file of supplied name."""
_, ext = os.path.splitext(file_name)
assert (file_name in os.listdir(self.data_folder)
and ext in self.exp_exts)
file_path = os.path.join(self.data_folder, file_name)
return File(
df=self.df_from_path(file_path),
id=get_index(file_path),
trial=get_trial(file_path),
path=file_path,
name=file_name,
kind=self.kind,
number=None)
def from_index_trial(self, index, trial):
"""Get File instance of the file corresponding to supplied index and
trial."""
file_name = ''.join((str(index), str(trial))) + self.exp_exts[0]
assert (file_name in os.listdir(self.data_folder)), file_name
file_path = os.path.join(self.data_folder, file_name)
return File(
df=self.df_from_path(file_path),
id=index,
trial=trial,
path=file_path,
name=file_name,
kind=self.kind,
number=None)
def __iter__(self):
for i, file_name in self.file_names():
file_path = os.path.join(self.data_folder, file_name)
yield File(
df=self.df_from_path(file_path),
id=get_index(file_name),
trial=get_trial(file_name),
path=file_path,
name=file_name,
kind=self.kind,
number=f'{i}/{self.numfiles}')
| mirgee/thesis_project | src/data/data_files.py | data_files.py | py | 7,560 | python | en | code | 0 | github-code | 13 |
35652549979 | from splinter import Browser
from bs4 import BeautifulSoup
import re
import time
import requests
import datetime as dt
from flask import Flask
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
data = mongo.db.mars.find_one()
print(data)
title=f"<h1>{data['title']}</h1>"
paragraph=f"<h2>{data['paragraph']}</h2></br>"
image=data['image']
imageHTML=f' <img src="{image}" alt="Image from NASA" width="500" height="600"> </br>'
weather = f"<h2>Weather:</h2><h3>{data['weather']}</h3>"
return(title+paragraph+imageHTML+weather)
@app.route("/scrape")
def scrape():
db = mongo.db.mars
info = {
"title":"TITLE",
"paragraph": "PARAGRAPH",
"image": "IMAGE_URL",
"weather": "TWEET-WEATHER",
"last_modified": dt.datetime.now()
}
# SE EJECUTRA EL DRIVER DE CHROME SELENIUM
executable_path = {'executable_path': 'D:\ChromeDriver/chromedriver'}
brow = Browser('chrome', **executable_path)
# ABRE LA PAGINA Y GUARDA SU HTML
mars_url="https://mars.nasa.gov/news/"
brow.visit(mars_url)
time.sleep(2)
mars_html=brow.html
mars_news= BeautifulSoup(mars_html, 'html.parser')
# # SE BUCA EL TITLE DEL ARTICULO
title = mars_news.select_one('ul.item_list')
news_title=title.find("div", class_='content_title').text
info["title"]=news_title
# # SACA Y ASIGNA EL VALOR DE PARRAFO
content = mars_news.select_one('li.slide')
news_p=content.find("div", class_='article_teaser_body').text
info["paragraph"]=news_p
# # # SACA Y ASIGNA EL VALOR DE LA URL DE LA IMAGEN
jpl_url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
brow.visit(jpl_url)
boton = brow.find_by_id('full_image')
boton.click()
time.sleep(2)
moreInfo=brow.find_link_by_partial_text('more info')
moreInfo.click()
parse=brow.html
parseSoup = BeautifulSoup(parse, 'html.parser')
imagen=parseSoup.select_one('figure')
imagen2=imagen.select_one('img')
src=imagen2.get('src')
featured_image_url = 'https://www.jpl.nasa.gov'+src
info["image"]=featured_image_url
# # SACA Y ASIGNA EL VALO DEL TWEET
mars_tweet="https://twitter.com/marswxreport?lang=en"
brow.visit(mars_tweet)
time.sleep(3)
mars_wtml=brow.html
weatherS= BeautifulSoup(mars_wtml, 'html.parser')
# mars_weather= weatherS.find_all('div', attrs={"data-testid":"tweet"})
for el in weatherS.find_all('div', attrs={"data-testid":"tweet"}):
if ("InSight" in el.get_text()):
info["weather"]=el.get_text()
print (el.get_text())
break
# print (el.get_text())
print(info)
db.replace_one({}, info, upsert=True)
return "Se Extrajo la información correctamente"
# VARIABLE INICIAL
if __name__ == '__main__':
app.run(debug=True, port=4000)
| avillalobosd/web-scraping-challenge | scrape_mars.py | scrape_mars.py | py | 3,080 | python | en | code | 0 | github-code | 13 |
26187440655 | from time import sleep
import unittest
from androidparent import *
class AndroidTestApiDemos(AndroidParentTest):
def changeDesiredCaps(self):
self.desired_caps['appPackage'] = 'io.appium.android.apis'
self.desired_caps['appActivity'] = 'io.appium.android.apis.ApiDemos'
def test_find_elements(self):
el = self.driver.find_element_by_accessibility_id('Graphics')
el.click()
el = self.driver.find_element_by_accessibility_id('Arcs')
self.assertIsNotNone(el)
self.driver.back()
el = self.driver.find_element_by_accessibility_id("App")
self.assertIsNotNone(el)
els = self.driver.find_elements_by_android_uiautomator("new UiSelector().clickable(true)")
self.assertGreaterEqual(12, len(els))
self.driver.find_element_by_android_uiautomator('text("API Demos")')
self.writeTimer("AndroidTestApiDemos.test_find_elements")
def test_simple_actions(self):
el = self.driver.find_element_by_accessibility_id('Graphics')
el.click()
el = self.driver.find_element_by_accessibility_id('Arcs')
el.click()
self.driver.find_element_by_android_uiautomator('new UiSelector().text("Graphics/Arcs")')
self.writeTimer("AndroidTestApiDemos.test_simple_actions")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AndroidTestApiDemos)
unittest.TextTestRunner(verbosity=2).run(suite) | tdeuren/Appium_android_test_setup | android/androidtest1.py | androidtest1.py | py | 1,472 | python | en | code | 0 | github-code | 13 |
28374545049 | import numpy as np
from . import VecEnvWrapper
class VecMonitor(VecEnvWrapper):
def __init__(self, venv):
VecEnvWrapper.__init__(self, venv)
self.eprets = None
self.eplens = None
self.epcount = 0
def reset(self):
self.eprets = np.zeros(self.num_envs, "f")
self.eplens = np.zeros(self.num_envs, "i")
return self.venv.reset()
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
self.eprets += rews
self.eplens += 1
newinfos = []
for (i, (done, ret, eplen, info)) in enumerate(
zip(dones, self.eprets, self.eplens, infos)
):
info = info.copy()
if done:
epinfo = {
"reward": ret,
"length": eplen,
}
info["episode"] = epinfo
self.epcount += 1
self.eprets[i] = 0
self.eplens[i] = 0
newinfos.append(info)
return obs, rews, dones, newinfos
| ASzot/rl-utils | rl_utils/envs/vec_env/vec_monitor.py | vec_monitor.py | py | 1,064 | python | en | code | 3 | github-code | 13 |
17552702055 | from PIL import Image
""" useful link for mixing image https://defpython.ru/prostoe_nalozhenie_izobrazhenii_v_Python """
img = Image.open("image/SZE3.png")
watermark = Image.open("image/6363.png")
secondmark = Image.open("image/123333.png").convert("RGBA")
img.paste(watermark, (500,100), watermark)
img.paste(secondmark, (700,700), secondmark)
img.paste(watermark, (700,700), watermark)
img.show("image/SZE3.png") | IDDeltaQDelta/crpyt | mixer.py | mixer.py | py | 417 | python | en | code | 0 | github-code | 13 |
74855045777 | # coding:utf-8
__author__ = 'ym'
import json
import pymysql
import jieba
import pygal
from wordcloud import WordCloud
class XmComment(object):
def __init__(self):
self.conn = pymysql.connect(user='root', password='123', db='test')
self.cursor = self.conn.cursor()
def get_dict(self, num):
"""获取评论数据"""
data = json.load(open(f'tb_comments_{num}.json', encoding='utf-8'))
comment_list = data['rateDetail']['rateList']
comment_info = []
for i in comment_list: # 构造评论数据字典
comment = i['rateContent']
rateDate = i['rateDate']
id = int(i['id'])
auctionSku = i['auctionSku']
if i['appendComment'] == None: # 判断用户是否追加评论
appendComment = '用户没有追加评论'
else:
appendComment = i['appendComment']['content']
# 添加到数据列表中返回
comment_info.append({'评论': comment, '评论时间': rateDate, 'comment_id': id, '商品类型': auctionSku, '追加评论': appendComment})
return comment_info # 返回评论数据列表
def save_db(self, comment_info):
"""保存到数据库"""
self.cursor.execute("""CREATE TABLE IF NOT EXISTS xmcomment
(
id INT PRIMARY KEY AUTO_INCREMENT,
comment_id BIGINT ,
comment VARCHAR (500),
create_date VARCHAR (30),
append_comment VARCHAR (500),
action_sku VARCHAR (100)
)
"""
)
for i in comment_info:
self.cursor.execute("SELECT comment_id FROM xmcomment WHERE comment_id=%s" % i['comment_id'])
q = self.cursor.fetchone()
if q == None: # 判断是否以存入
try:
self.cursor.execute("INSERT INTO xmcomment(comment_id,comment,create_date,append_comment,action_sku) VALUES (%s,%s,%s,%s,%s)", [i['comment_id'], i['评论'], i['评论时间'], i['追加评论'], i['商品类型']])
except:
# 由于编码问题,可能会出错,所以需要修改存储字段为utf8mb4编码。
self.cursor.execute('ALTER TABLE xmcomment MODIFY comment VARCHAR(500) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
self.cursor.execute(
"INSERT INTO xmcomment(comment_id,comment,create_date,append_comment,action_sku) VALUES (%s,%s,%s,%s,%s)",
[i['comment_id'], i['评论'], i['评论时间'], i['追加评论'], i['商品类型']])
else:
print('该评论已存入数据库')
self.conn.commit()
def read_30(self):
"""读数据库根据rate_date倒序前30条评论的 评论"""
self.cursor.execute('SELECT create_date, comment, append_comment FROM xmcomment ORDER BY create_date DESC ')
data = self.cursor.fetchall()
return data
def jieba_word(self):
self.cursor.execute('SELECT comment FROM xmcomment')
comment_t = self.cursor.fetchall()
comment_list = []
# 取出评论数据添加到列表
for i in comment_t:
comment_list.append(i[0])
comment_string = ''.join(comment_list) # 构造长字符串
word_list = jieba.cut(comment_string, cut_all=False)
word = ' '.join(word_list)
return word
def word_jpg(self):
word = self.jieba_word()
font = 'simkai.ttf'
wc = WordCloud(font_path=font,
background_color='white',
width=2000,
height=1500,
max_font_size=300,
min_font_size=50
).generate(word)
wc.to_file('xiaomi.png')
print('已生成词云')
def chart(self):
"""生成图片分析svg"""
self.cursor.execute('SELECT DISTINCT action_sku FROM xmcomment')
cate = self.cursor.fetchall()
cate_list = [] # 商品类型列表
for i in cate:
cate_list.append(i[0])
self.cursor.execute('SELECT COUNT(id) FROM xmcomment')
num_all = self.cursor.fetchone()[0] # 查询总数
cate_num_list = [] # 构造商品类型+统计数列表
for b in cate_list:
self.cursor.execute(
"SELECT COUNT(id) FROM xmcomment WHERE action_sku='%s'" % b)
de_num = self.cursor.fetchone()[0]
cate_num_list.append((b, de_num))
pie_chart = pygal.Pie()
pie_chart.title = '小米商品分析 (in %)'
for a in cate_num_list:
pie_chart.add(a[0], a[1] / num_all * 100)
pie_chart.render_to_file('xiaomi.svg')
if __name__ == '__main__':
xm = XmComment()
xm.word_jpg()
# data = xm.read_30()
# print('读数据库根据rate_date倒序前30条评论的 评论')
# for xx in data:
# print(xx[0])
# print('-'*50)
# print('\n')
# print('正在获取评论数据')
# num = 2 # 要保存几页
# for i in range(1, num+1):
# x = xm.get_dict(i)
# print('正在存入数据库')
# xm.save_db(x)
# print('保存完成')
# xm.jieba_word()
# print('-'*50)
# print('正在生成svg')
# xm.chart()
# print('生成完成→xiaomi.svg')
| Ewenwan/python_study | tutorial/爬虫考试/taobao.py | taobao.py | py | 5,419 | python | en | code | 1 | github-code | 13 |
26993751915 | '''
Implement the total_words() function which will find the total number of words in a trie.
'''
from Trie import Trie
from TrieNode import TrieNode
# TrieNode => {children, is_end_word, char,
# mark_as_leaf(), unmark_as_leaf()}
def total_words(root):
num = 1 if root.is_end_word else 0
for child in root.children:
if child:
num+= total_words(child)
return(num) | myers-dev/Data_Structures | trie/total_num_words.py | total_num_words.py | py | 424 | python | en | code | 1 | github-code | 13 |
21582346356 | from django import template
register = template.Library()
@register.filter
def agenda_width_scale(filter_categories, spacer_scale):
"""Compute the width scale for the agenda filter button table
Button columns are spacer_scale times as wide as the spacer columns between
categories. There is one fewer spacer column than categories.
"""
category_count = len(filter_categories)
column_count = sum([len(cat) for cat in filter_categories])
# Refuse to return less than 1 to avoid width calculation problems.
return max(spacer_scale * column_count + category_count - 1, 1)
| ietf-tools/old-datatracker-branches | ietf/meeting/templatetags/agenda_filter_tags.py | agenda_filter_tags.py | py | 608 | python | en | code | 5 | github-code | 13 |
9925371231 | from maya import cmds
from zUtils import attributes
from .tags import ZIVA_MUSCLES
class Muscles(object):
def __init__(self, root, character=None):
# define variables
self._root = root
# validate character
if not character and not self.character:
raise RuntimeError("Declare the 'character' variable!")
# set character if declared
if character:
attributes.createTag(self.root, ZIVA_MUSCLES, character)
# ------------------------------------------------------------------------
@classmethod
def getMuscleSystemsFromScene(cls):
"""
Loop over all transforms and that contain an import animation tag.
Read the value of this tag and add it into a dictionary.
:return: Exported animation from current scene
:rtype: dict
"""
# data variable
data = {}
# loop transforms
for node in cmds.ls(transforms=True):
# get plug
plug = attributes.getPlug(node, ZIVA_MUSCLES)
# validate plug
if cmds.objExists(plug):
# get character
character = cmds.getAttr(plug)
# add character to dictionary
if character not in data.keys():
data[character] = []
# add animation to dictionary
data[character].append(cls(node, character))
return data
# ------------------------------------------------------------------------
@property
def character(self):
"""
The character name gets stored throughout the importing and exporting
process. It is the value that links an import and export.
:return: Character name
:rtype: str
"""
return attributes.getTag(self.root, ZIVA_MUSCLES)
@property
def root(self):
"""
The root node is the node that indicates the root of the animation
node. Either in import or export mode.
:return: Root node
:rtype: str
"""
return self._root
| jonntd/maya-ziva-dynamics-utils | scripts/zMuscles/base.py | base.py | py | 2,122 | python | en | code | 0 | github-code | 13 |
31629219282 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MoNeT_MGDrivE as monet
import matplotlib.pyplot as plt
import mating_auxiliary as aux
import numpy as np
plt.rcParams.update({'figure.max_open_warning': 0})
##############################################################################
# Notes
##############################################################################
STACK = False
TRACES = True
FACTORIAL = False
nameExp = ""
pathRoot = "/Volumes/marshallShare/pgSIT/"
pathExperiments = "CRISPR_SIT/"
pathPlots = pathRoot + "images/"
##############################################################################
RELEASE_DAY = 25
colors = ["#f20060", "#29339b", "#c6d8ff", "#7fff3a", "#7692ff", "#29339b"]
cmaps = monet.generateAlphaColorMapFromColorArray(colors)
aggregationDictionary = monet.generateAggregationDictionary(
["W", "R"],
[
[0, 0, 1],
[1, 2, 2]
]
)
styleS = {
"width": 0, "alpha": .9, "dpi": 1024, "legend": False,
"aspect": .00175, "colors": colors, "xRange": [0, 600], "yRange": [0, 8*11000]
}
styleT = {
"width": .075, "alpha": .1, "dpi": 1024, "legend": False,
"aspect": .00075, "colors": colors, "xRange": [0, 600], "yRange": [0, 8*11000]
}
##############################################################################
# Stack
##############################################################################
if STACK is True:
folderNames = monet.listDirectoriesInPath(pathRoot + pathExperiments + "/ANALYZED/")
for nameExp in folderNames[0:]:
pathFull = pathRoot + pathExperiments + "ANALYZED/" + nameExp
#####################################################################
# Stack
#####################################################################
filenames = monet.readExperimentFilenames(pathFull)
landscapeSumData = monet.sumLandscapePopulationsFromFiles(
filenames, male=True, female=True, dataType=float
)
aggData = monet.aggregateGenotypesInNode(
landscapeSumData, aggregationDictionary
)
###############
ssDay = aux.reachedSteadtStateAtDay(aggData, RELEASE_DAY + 100, .01)
figB = monet.plotMeanGenotypeStack(
aggData,
styleS,
vLinesCoords=[RELEASE_DAY, ssDay]
)
figB.get_axes()[0].set_xlim(styleS["xRange"][0], styleS["xRange"][1])
figB.get_axes()[0].set_ylim(styleS["yRange"][0], styleS["yRange"][1])
figB.get_axes()[0].set_title(
"[tSS: " + str(ssDay-RELEASE_DAY) + "]",
fontsize=4
)
monet.quickSaveFigure(
figB, pathRoot + "images/stack/" + nameExp + ".png", dpi=600
)
plt.close()
##########################################################################
# Garbage (Traces)
##########################################################################
if TRACES is True:
folderNames = monet.listDirectoriesInPath(pathRoot + pathExperiments + "/GARBAGE/")
for nameExp in folderNames[0:]:
pathFull = pathRoot + pathExperiments + "GARBAGE/" + nameExp + "/"
paths = monet.listDirectoriesWithPathWithinAPath(pathFull)
landscapeReps = monet.loadAndAggregateLandscapeDataRepetitions(
paths, aggregationDictionary,
male=True, female=True, dataType=float
)
figsArray = monet.plotLandscapeDataRepetitions(landscapeReps, styleT)
for i in range(0, len(figsArray)):
figsArray[i].get_axes()[0].set_xlim(
styleT["xRange"][0], styleT["xRange"][1]
)
figsArray[i].get_axes()[0].set_ylim(
styleT["yRange"][0], styleT["yRange"][1],
)
monet.quickSaveFigure(
figsArray[i],
pathRoot + "images/traces/" + nameExp + "_" +
str(i).rjust(3, "0") + ".png",
dpi=1024
)
plt.close()
##########################################################################
# Factorial
##########################################################################
if FACTORIAL is True:
meanStats = []
folderNames = monet.listDirectoriesInPath(pathRoot + pathExperiments + "/GARBAGE/")
for nameExp in folderNames[0:]:
pathFull = pathRoot + pathExperiments + "GARBAGE/" + nameExp + "/"
paths = monet.listDirectoriesWithPathWithinAPath(pathFull)
landscapeReps = monet.loadAndAggregateLandscapeDataRepetitions(
paths, aggregationDictionary,
male=True, female=True, dataType=float
)
# Mean stat ######################################################
repsStat = []
for rep in range(0,len(landscapeReps["landscapes"])):
wild, drive = landscapeReps["landscapes"][rep][0][-1]
ratio = 0
if (wild + drive > 0):
ratio = wild / (wild + drive)
repsStat.append(ratio)
# Summary array #################################################
none, mating, releases, size = nameExp.split("_")
meanStats.append([int(mating),int(releases),int(size),np.mean(repsStat)])
np.savetxt(
pathRoot + pathExperiments + "factorial.csv",
meanStats,
delimiter=',',
fmt='%10.5f'
)
| Chipdelmal/MoNeT | DataAnalysis/MatingEfficiency/mating_main.py | mating_main.py | py | 5,320 | python | en | code | 7 | github-code | 13 |
11680095890 | from flask import Flask, jsonify, request
app = Flask(__name__)
import sqlite3
import datetime
from datetime import timedelta
import pdb
from gevent.pywsgi import WSGIServer
DATABASE = "/home/pi/greenhouse/data.db"
def rounder(t):
"""
Rounds the time down to the minute
"""
return t.replace(second=0, microsecond=0, minute=t.minute)
@app.route('/current')
def get_current():
"""
Get the current temperature. This reads the latest entry from the DB
"""
with sqlite3.connect(DATABASE) as con:
cur = con.cursor()
stmt = f'''SELECT * FROM greenhouse ORDER BY timestamp DESC limit 1'''
cur.execute(stmt)
data = cur.fetchall()
ret = {
'timestamp': data[0][0],
'temperature': float(data[0][1]),
'humidity': float(data[0][2]),
'pressure': data[0][3]
}
return jsonify(ret)
@app.route('/between')
def get_between():
start = int(request.args.get('start'))
end = int(request.args.get('end'))
# print(f'''start={start}, end={end}''')
start_str = datetime.datetime.fromtimestamp(start)
end_str = datetime.datetime.fromtimestamp(end)
# print("Searching between " + str(start_str) + "->" + str(end_str))
with sqlite3.connect(DATABASE) as con:
cur = con.cursor()
ret = []
stmt = f'''SELECT * FROM greenhouse WHERE timestamp >= {start} and timestamp <= {end};'''
# print(stmt)
cur.execute(stmt)
values = cur.fetchall()
print(values)
ret = [
{
'timestamp': data[0],
'temperature': data[1],
'humidity': data[2],
'pressure': data[3]
} for data in values
]
return jsonify(ret)
@app.route('/hilo')
def get_hilo():
days = int(request.args.get('days'))
ret = []
end = datetime.datetime.now()
start = end.replace(hour=0, minute=0, microsecond=0)
while days > 0:
high_time, high_temp, low_time, low_temp = find_hilo(start.timestamp(), end.timestamp())
ret.append({
'high_time': high_time,
'high_temp': high_temp,
'low_time': low_time,
'low_temp': low_temp
})
end = start
start = start - timedelta(days=1)
days -= 1
return jsonify(ret)
def find_hilo(start, end):
with sqlite3.connect(DATABASE) as con:
cur = con.cursor()
stmt = f'''
SELECT timestamp, MAX(temperature) FROM greenhouse
WHERE timestamp >= {start} AND timestamp <= {end}
'''
# print(stmt)
cur.execute(stmt)
values = cur.fetchall()
high_time = values[0][0]
high_temp = values[0][1]
stmt = f'''
SELECT timestamp, MIN(temperature) FROM greenhouse
WHERE timestamp >= {start} AND timestamp <= {end}
'''
cur.execute(stmt)
values = cur.fetchall()
low_time = values[0][0]
low_temp = values[0][1]
return high_time, high_temp, low_time, low_temp
if __name__ == "__main__":
server = WSGIServer(('', 5000), app)
server.serve_forever()
| juatabot/greenhouse | server.py | server.py | py | 3,212 | python | en | code | 0 | github-code | 13 |
30998817591 |
#!/urs/bin/python3.8
import turtle as tt
from turtle import *
import sys
import random
from sys import stdin
import time
from random import randint as rand
import math
# el orden de los factores si altera el producto
colors =[
'#FFFFFF' , '#F01bc4',
'#000000' , '#11E511',
'#1bc3F7' , '#1e1f11',
'#0bc3F7' , '#4575fa',
'#1bc3F7' , '#c63c55',
'#1bF722' , '#00ce11',
'#11c0F7' , '#F0000F'
]
def figures( vect_a, vect_b, vect_c, vect_d ):
func = tt.Turtle()
screen = tt.Screen()
func.speed(1)
# operators native
showturtle()
screen.bgcolor("#000011") # obscure color down black
yambl = lambda x : x + 89 / math.pi
func.color('#014Fc1')
func.right(10)
# func.forward(vect_a + 89)
func.forward(vect_a + 89)
func.down()
func.left(yambl(vect_b))
func.color('#FF0000')
func.right(180)
#func.up()
func.forward(100)
func.left(45)
func.forward(yambl(vect_b))
#func.left(lambda vect_b : vect_b + 109)
func.goto(10,10)
# command is better use command blocks make build vector and create data
mainloop()
def shampes(vectv , vectx , vecty , vectz):
pass
if __name__ == "__main__" :
figures( int(10), 10, int(10), 10 )
# print(lambda x ; x + 99 / math.pi)
#print(yambl(10))
| Ron4-kw0rk3r/GVphjs_practice | Pjct_V-gphs/attemp002.py | attemp002.py | py | 1,338 | python | en | code | 1 | github-code | 13 |
29136008833 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('assessment', '0125_trackapp'),
]
operations = [
migrations.DeleteModel(
name='G_Form_ID',
),
migrations.DeleteModel(
name='G_Session_ID',
),
migrations.AddField(
model_name='trackapp',
name='f_id',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='trackapp',
name='s_id',
field=models.IntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='trackapp',
name='state',
field=models.ForeignKey(default=None, blank=True, to='assessment.SolidState', null=True),
preserve_default=True,
),
]
| ilewis01/SimeonAcademy | SimeonAcademy/assessment/migrations/0126_auto_20160720_0510.py | 0126_auto_20160720_0510.py | py | 999 | python | en | code | 0 | github-code | 13 |
6475994992 | import toolutils
class Hostapd(object):
''' basic hostapd conf file handling '''
def __init__(self, path, backup_path=None):
self._config = {}
self._path = path
if not backup_path:
self.backup_path = path + ".bak"
else:
self.backup_path = backup_path
@property
def config(self):
return self._config
def set(self, key, value):
if isinstance(value, str):
self._config[str(key).strip()] = value.strip()
else:
self._config[str(key).strip()] = value
def validate(self):
'''
Not sure which ones are really necessary for everyone,
here are the ones I require
I created 4 groups of keys : basic must always be there,
wireless if you want to be an AP, auth if you want to add
some security, bridge for, well, bridging
Not foul proof !
Raise KeyError and ValueError
'''
basic = ['interface', 'driver']
bridge = ['bridge']
wireless = ['ssid', 'channel', 'hw_mode']
auth = ['wpa', 'wpa_passphrase', 'wpa_key_mgmt']
for k in basic:
if self._config[k] is None:
raise ValueError("Missing required {} option".format(k))
if 'bridge' in self._config:
for k in bridge:
if self._config[k] is None:
raise ValueError("Missing required {} option for bridge".format(k))
if 'ssid' in self._config:
for k in wireless:
if self._config[k] is None:
raise ValueError("Missing required {} option for wireless".format(k))
self._config['channel'] = int(self._config['channel']) # will raise value error if not int
if 'wpa' in self._config:
self._config['wpa'] = int(self._config['wpa'])
if not self._config['wpa'] in [1, 2, 3]:
raise ValueError("Wpa option is not valid")
for k in auth:
if self._config[k] is None:
raise ValueError("Missing required {} option for wireless security".format(k))
if self._config['wpa'] in [1, 3]:
if not self._config['wpa_pairwise']:
raise ValueError("Missing required option for wireless security : wpa_pairwise")
if self._config['wpa'] in [2, 3]:
if not self._config['rsn_pairwise']:
raise ValueError("Missing required option for wireless security rsn_pairwise")
def set_defaults(self):
''' Defaults for my needs, you should probably override this one '''
self._config = {
'interface': 'wlan0',
'driver': 'nl80211',
# logs
'logger_syslog': -1,
'logger_syslog_level': 2,
'logger_stdout': -1,
'logger_stdout_level': 2,
# debug
'debug': 4,
# wifi
'hw_mode': 'g',
# security goodies
'macaddr_acl': 0,
'eapol_key_index_workaround': 0,
'eap_server': 0,
'eapol_version': 1,
# wifi auth
# please note ssid and wpa-passphrase are missing
'auth_algs': 3,
'wpa': 3, # WPA + WPA2. set to 2 to restrict to WPA2
'wpa_key_mgmt': 'WPA-PSK',
'wpa_pairwise': 'TKIP',
'rsn_pairwise': 'CCMP' # some windows clients may have issues with this one
}
def read(self, path=None):
if path is None:
path = self._path
self._config = {}
with open(path, "r") as hostapd:
for line in hostapd:
if line.startswith('#') is True:
pass
else:
param, value = line.split("=")
if param and value:
self.set(param, value)
def write(self, path=None):
self.validate()
if path is None:
path = self._path
self.backup()
with toolutils.atomic_write(path) as hostapd:
for k, v in self._config.iteritems():
hostapd.write("{}={}\n".format(str(k).strip(), str(v).strip()))
def controlService(self, action):
''' return True/False, command output '''
if action not in ["start", "stop", "restart"]:
return False, "Invalid action"
return toolutils.safe_subprocess(["/etc/init.d/hostapd", action])
def backup(self):
''' return True/False, command output '''
if self.backup_path:
return toolutils.safe_subprocess(["cp", self._path, self.backup_path])
def restore(self):
''' return True/False, command output '''
if self.backup_path:
return toolutils.safe_subprocess(["cp", self.backup_path, self._path])
def delete(self):
''' return True/False, command output '''
if self.backup_path:
return toolutils.safe_subprocess(["rm", self._path])
| dggreenbaum/debinterface | hostapd.py | hostapd.py | py | 5,110 | python | en | code | 26 | github-code | 13 |
15458709307 | # -*- coding: utf-8 -*-
from AWSScout2.configs.regions import RegionalServiceConfig, RegionConfig
########################################
# DirectConnectRegionConfig
########################################
class DirectConnectRegionConfig(RegionConfig):
"""
DirectConnect configuration for a single AWS region
"""
def parse_connection(self, global_params, region, connection):
"""
Parse a single connection and fetch additional attributes
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param connection_url: URL of the AWS connection
"""
connection['id'] = connection.pop('connectionId')
connection['name'] = connection.pop('connectionName')
self.connections[connection['id']] = connection
########################################
# DirectConnectConfig
########################################
class DirectConnectConfig(RegionalServiceConfig):
"""
DirectConnect configuration for all AWS regions
"""
region_config_class = DirectConnectRegionConfig
def __init__(self, service_metadata, thread_config = 4):
super(DirectConnectConfig, self).__init__(service_metadata, thread_config)
| nccgroup/Scout2 | AWSScout2/services/directconnect.py | directconnect.py | py | 1,299 | python | en | code | 1,729 | github-code | 13 |
6312838424 | import cx_Oracle
from config.config import *
from population.helper import noApos
ORACLE_CONN_STRING = sql_login
SQL_STATEMENT = (
"WITH {} "
"SELECT DISTINCT lnm0.movie_id, lnm0.title, lnm0.release_date, lnm0.budget, lnm0.revenue, lnm0.popularity, lnm0.rating_average, lnm0.rating_count, lnm0.overview, lnm0.poster_path, lnm0.run_time "
"FROM {} "
"{} "
"INTERSECT "
"SELECT * FROM ( "
)
SQL_STATEMENT_DEFAULT = (
"SELECT m.* FROM Movies m "
"INNER JOIN has_genre hg ON m.movie_id = hg.movie_id "
"INNER JOIN Genre g ON g.genre_id = hg.genre_id "
"INNER JOIN produced_by pb ON m.movie_id = pb.movie_id "
"INNER JOIN Company c ON c.company_id = pb.company_id "
"WHERE "
"LOWER(m.title) like '%{}%' "
"AND TO_DATE(m.release_date, 'YYYY-MM-DD') > TO_DATE('{}', 'MM-DD-YYYY') "
"AND TO_DATE(m.release_date, 'YYYY-MM-DD') < TO_DATE('{}', 'MM-DD-YYYY') "
"AND m.rating_average {} "
"AND m.rating_count {} "
"AND m.revenue {} "
"AND m.budget {} "
"AND m.run_time {} "
"{} "
"{} "
"GROUP BY m.movie_id, m.title, m.budget, m.overview, m.popularity, m.poster_path, m.release_date, m.rating_average, m.rating_count, m.revenue, m.run_time "
"HAVING COUNT(DISTINCT g.name) >= {} "
"AND COUNT(DISTINCT c.name) >= {} "
"ORDER BY m.popularity DESC"
)
def rows_to_dict_list(cursor):
columns = [i[0] for i in cursor.description]
columns = [column.lower() for column in columns]
return [dict(zip(columns, row)) for row in cursor]
def get_movies_advanced(title, startDate, endDate, avgRating, ratingCount, revenue, budget, runtime, genres, companies, people, limit):
con = cx_Oracle.connect(ORACLE_CONN_STRING)
cursor = con.cursor()
ret = {}
genreConditions = ""
companyConditions = ""
if (len(genres) > 0):
genreConditions += "AND ("
for i in range(0, len(genres)):
if (i > 0):
genreConditions += "OR "
genreConditions += "LOWER(g.name) = '" + noApos(genres[i].lower()) + "' "
genreConditions += ") "
if (len(companies) > 0):
companyConditions += "AND ("
for i in range(0, len(companies)):
if (i > 0):
companyConditions += "OR "
companyConditions += "LOWER(c.name) = '" + noApos(companies[i].lower()) + "' "
companyConditions += ") "
default = SQL_STATEMENT_DEFAULT.format(title.lower(), startDate, endDate, avgRating, ratingCount, revenue, budget, runtime, genreConditions, companyConditions, len(genres), len(companies))
if not people:
cursor.execute(default)
else:
table = (
"likeNameMovie{} AS "
"(SELECT * FROM nameMovie nm "
"WHERE nm.name like '%{}%') "
)
tables = table.format(0,noApos(people[0].lower()))
fromTable = "likeNameMovie{} lnm{}"
fromTables = fromTable.format(0,0)
notEqual = "lnm0.movie_id = lnm{}.movie_id "
if (len(people) == 1):
notEquals = ""
else:
notEquals = "WHERE "
for i in range(1,len(people)):
if people[i]:
tables += ", "
tables += table.format(i,noApos(people[i].lower()))
fromTables += ", "
fromTables += fromTable.format(i,i)
if (i > 1):
notEquals += "AND "
notEquals += notEqual.format(i,i)
query = SQL_STATEMENT.format(tables, fromTables, notEquals).replace('\n', '') + default + ")"
cursor.execute(query)
ret = rows_to_dict_list(cursor)
con.close()
return ret[0:limit]
| chasefarmer2808/ReposiMovie-API | queries/adv_search.py | adv_search.py | py | 4,098 | python | en | code | 0 | github-code | 13 |
18029119882 | from Products.CMFCore.utils import getToolByName
from Products.CMFPlomino.PlominoForm import PlominoForm
from Products.CMFPlomino.PlominoDocument import PlominoDocument
from Products.CMFPlomino.index import PlominoIndex
from Products.CMFPlomino.exceptions import PlominoScriptException
#from Products.CMFPlomino.PlominoAction import PlominoAction
from workflow_utils import getChainFor, getStatesInfo, getTransitionsInfo
from workflow_utils import getWorkflowInfo, getInfoForState, getInfoFor
from url_utils import urllib_urlencode
# Security import
from AccessControl import ClassSecurityInfo
from Globals import InitializeClass
from Products.CMFPlomino.config import READ_PERMISSION
PlominoIndex.security = ClassSecurityInfo()
PlominoIndex.security.declareProtected(READ_PERMISSION, 'create_child')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'oncreate_child')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'onsave_child')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'ondelete_child')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'ondelete_parent')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'beforecreate_child')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'wf_getChainFor')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'wf_statesInfo')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'wf_transitionsInfo')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'wf_workflowInfo')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'addLocalRoles')
PlominoIndex.security.declareProtected(READ_PERMISSION, 'get_js_hidden_fields')
InitializeClass(PlominoDocument)
defaults = dict(
parentKey = 'parentDocument',
parentLinkKey = 'linkToParent',
childrenListKey = 'childrenList_%s'
)
def getPath(doc, virtual=False):
'''
uniformo il recupero del valore per il doclink
'''
doc_path = doc.doc_path()
pd_path_list = doc.REQUEST.physicalPathToVirtualPath(doc_path) if virtual else None
return '/'.join(pd_path_list or doc_path)
def setParenthood(ChildDocument, parent_id, CASCADE=True, setDocLink=False, **kwargs):
'''
Set parent reference in child document
'''
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
parentLinkKey = kwargs.get('parentLinkKey') or defaults.get('parentLinkKey')
ParentDocument = ChildDocument.getParentDatabase().getDocument(parent_id)
Parent_path = getPath(ParentDocument)
ChildDocument.setItem(parentKey, ParentDocument.getId())
ChildDocument.setItem('CASCADE', CASCADE)
if setDocLink:
# utile per la procedura bash
ChildDocument.setItem(parentLinkKey, [Parent_path])
# utile per la creazione via web
ChildDocument.REQUEST.set(parentLinkKey, [Parent_path])
def setChildhood(ChildDocument, parent_id, backToParent='anchor', **kwargs):
'''
Set child reference on parent document
'''
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
childrenListKey = kwargs.get('childrenListKey') or defaults.get('childrenListKey')
db = ChildDocument.getParentDatabase()
ParentDocument = db.getDocument(parent_id)
childrenList_name = childrenListKey % ChildDocument.Form
childrenList = ParentDocument.getItem(childrenList_name, []) or []
childrenList.append(getPath(ChildDocument))
idx = db.getIndex()
for fieldname in (parentKey, 'CASCADE', ):
if fieldname not in idx.indexes():
idx.createFieldIndex(fieldname, 'TEXT', refresh=True)
ParentDocument.setItem(childrenList_name, childrenList)
if backToParent:
backUrl = ParentDocument.absolute_url()
if backToParent == 'anchor':
backUrl = '%s#%s' % (backUrl, childrenList_name)
ChildDocument.setItem('plominoredirecturl', backUrl)
def oncreate_child(self, parent_id='', backToParent='anchor', **kwargs):
'''
Actions to perform on creation of a ChildDocument
'''
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
# if no parent_id passed
# first take from the child itself
#if not parent_id:
#parent_id = self.getItem(parentKey)
# second take from the request
if not parent_id:
parent_id = self.REQUEST.get(parentKey)
if parent_id:
setParenthood(self, parent_id, **kwargs)
setChildhood(self, parent_id, backToParent, **kwargs)
def onsave_child(self, backToParent=False):
'''
Actions to perform on save of a ChildDocument
'''
if not self.isNewDocument():
if self.getItem('plominoredirecturl') and not backToParent:
self.removeItem('plominoredirecturl')
def ondelete_child(self, anchor=True, **kwargs):
'''
Actions to perform on deletion of a ChildDocument
'''
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
childrenListKey = kwargs.get('childrenListKey') or defaults.get('childrenListKey')
if parentKey in self.getItems():
db = self.getParentDatabase()
ParentDocument = db.getDocument(self.getItem(parentKey))
childrenList_name = childrenListKey % self.Form
childrenList = ParentDocument.getItem(childrenList_name)
url = getPath(self)
childrenList.remove(url)
ParentDocument.setItem(childrenList_name, childrenList)
backUrl = ParentDocument.absolute_url()
if anchor:
backUrl = '%s#%s' % (backUrl, childrenList_name)
self.REQUEST.set('returnurl', backUrl)
def ondelete_parent(self, **kwargs):
'''
Actions to perform on deletion of a ParentDocument
'''
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
db = self.getParentDatabase()
idx = db.getIndex()
request = {parentKey: self.id}
res = idx.dbsearch(request)
toRemove = []
for rec in res:
if rec.CASCADE:
toRemove += [rec.id]
else:
rec.getObject().removeItem(parentKey)
db.deleteDocuments(ids=toRemove, massive=False)
self.REQUEST.set('returnurl', db.absolute_url())
def getWhereToRedirect(db, redirect_to, using, **kwargs):
destination = db.getView(redirect_to) or db.getForm(redirect_to) or db
messages = []
if destination==db and redirect_to:
messages.append(('Destination "%s" not found.' % redirect_to, 'error'))
if hasattr(destination, using):
destinationUrl = '%s/%s' % (destination.absolute_url(), using)
else:
destinationUrl = destination.absolute_url()
if using:
messages.append(('Template "%s" not found.' % using, 'error'))
if kwargs:
query_string = urllib_urlencode(kwargs)
destinationUrl += '?%s' % query_string
return destinationUrl, messages
def beforecreate_child(self, redirect_to='', using='', message=(), **kwargs):
"""
Action to take before child creation.
message: ("Indicazioni per l'utente", 'info')
"""
parentKey = kwargs.get('parentKey') or defaults.get('parentKey')
db = self.getParentDatabase()
if not db.getDocument(self.REQUEST.get(parentKey)):
destinationUrl, messages = getWhereToRedirect(db, redirect_to, using, **kwargs)
if self.REQUEST.get(parentKey):
messages.append(('Given id seams not to correspond to a valid plominoDocument.', 'error'))
else:
if isinstance(message, basestring):
message = (message, )
messages.append(message or ('No plominoDocument id given.', 'warning'))
plone_tools = getToolByName(db.aq_inner, 'plone_utils')
for msg in messages:
plone_tools.addPortalMessage(*msg, request=self.REQUEST)
self.REQUEST.RESPONSE.redirect(destinationUrl)
def create_child(self, form_name, request={}, applyhidewhen=True, **kwargs):
'''
Use it to create a child document from scripts
'''
db = self.getParentDatabase()
form = db.getForm(form_name)
ChildDocument = db.createDocument()
ChildDocument.setItem('Form', form_name)
form.readInputs(ChildDocument, request, applyhidewhen=applyhidewhen)
setParenthood(ChildDocument, self.id, **kwargs)
setChildhood(ChildDocument, self.id, **kwargs)
ChildDocument.save()
return ChildDocument.getId()
PlominoDocument.create_child = create_child
PlominoDocument.oncreate_child = oncreate_child
PlominoDocument.onsave_child = onsave_child
PlominoDocument.ondelete_child = ondelete_child
PlominoDocument.ondelete_parent = ondelete_parent
PlominoForm.beforecreate_child = beforecreate_child
def wf_getChainFor(self):
return getChainFor(self)
def wf_workflowInfo(self, wf_ids=None, single=True, args=[], **kwargs):
return getWorkflowInfo(self, wf_ids=wf_ids, args=args, **kwargs)
def wf_statesInfo(self, state_id='review_state', args=[], **kwargs):
return getStatesInfo(self, state_id=state_id, args=args, **kwargs)
def wf_transitionsInfo(self, supported_only=True, state_id='review_state', args=[], **kwargs):
return getTransitionsInfo(self, supported_only=supported_only, state_id=state_id, args=args, **kwargs)
def wf_getInfoForState(self, wf_id, state_id, args=[]):
return getInfoForState(self, wf_id, state_id, args=args)
def wf_getInfoFor(self, arg, *args, **kwargs):
return getInfoFor(self, arg, *args, **kwargs)
def addLocalRoles(self, group, roles):
return self.manage_addLocalRoles(group, roles)
PlominoDocument.wf_getChainFor = wf_getChainFor
PlominoDocument.wf_workflowInfo = wf_workflowInfo
PlominoDocument.wf_statesInfo = wf_statesInfo
PlominoDocument.wf_transitionsInfo = wf_transitionsInfo
PlominoDocument.wf_getInfoFor = wf_getInfoFor
PlominoDocument.addLocalRoles = addLocalRoles
PlominoForm.wf_getChainFor = wf_getChainFor
PlominoForm.wf_workflowInfo = wf_workflowInfo
PlominoForm.wf_statesInfo = wf_statesInfo
PlominoForm.wf_transitionsInfo = wf_transitionsInfo
PlominoForm.wf_getInfoForState = wf_getInfoForState
def get_js_hidden_fields(self, REQUEST, doc):
'''
expose the protected method _get_js_hidden_fields
'''
return self._get_js_hidden_fields(REQUEST=REQUEST, doc=doc)
PlominoForm.get_js_hidden_fields = get_js_hidden_fields
| gisweb/gisweb.utils | src/gisweb/utils/plomino_addons.py | plomino_addons.py | py | 10,213 | python | en | code | 1 | github-code | 13 |
41822896269 | import datetime
import sys
import logging
import os
from enum import Enum
from datetime import date
import json
import traceback
from cookie import ws_key_to_pt_key
# 封装UserInfo
# 方便扩展修改维护
import requests
class LoginStatus(Enum):
# ck 更新过
NEED_CHECK = 0
# 上次检查登陆有效
LAST_LOGINED = 1
# 上次检查是无效登陆
INVALID_LOGIN = 2
@staticmethod
def is_valid(value):
for i in LoginStatus.__members__:
if value == LoginStatus.__getitem__(i).value:
return True
return False
class UserInfo:
# ck 和 appkey 没有配置,一律返回None type
def __init__(self, ck=None, sign_server=None, **kwargs):
if ck is None:
self.cookie = kwargs.get('cookie', None)
if self.cookie is not None:
self.cookie = str(self.cookie).replace(' ', '').replace(';', '')
else:
self.cookie = str(ck).replace(' ', '').replace(';', '')
self.appkey = kwargs.get('appkey', None)
if self.appkey is not None:
self.appkey = str(self.appkey).replace(' ', '').replace(';', '')
# todo move to global config
self.sign_server = sign_server
self.name = kwargs.get('name')
self.uuid = kwargs.get('uuid', '')
self.wechart = kwargs.get('wechart')
self.out_of_time = kwargs.get('out_of_time')
self.register_time = kwargs.get('register_time')
if self.register_time is None:
self.register_time = date.today()
self.last_login_date = kwargs.get('last_login_date') # 记录上次查询到登陆成功的日期
if self.last_login_date is None:
self.last_login_date = date.today()
self.priority = kwargs.get('priority')
if self.priority is None:
self.priority = 888
self.vip_level = kwargs.get('vip_level')
if self.vip_level is None:
self.vip_level = 888
self.nick_name = kwargs.get('nick_name')
self.pushplus_token = kwargs.get('pushplus_token')
if self.pushplus_token is not None:
self.pushplus_token = str(self.pushplus_token).strip().replace('\'', '').replace(' ', '')
if kwargs.get('login_status') is None:
self.login_status = LoginStatus.NEED_CHECK.value
else:
assert LoginStatus.is_valid(kwargs.get('login_status'))
self.login_status = kwargs.get('login_status')
def has_config_key(self) -> bool:
if self.cookie == '' and self.appkey == '':
return False
else:
return True
def get_name(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_uuid(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_cookie(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_appkey(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def set_cookie(self, value: str):
value = value.strip().replace(' ', '').replace('\n', '')
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def set_appkey(self, value: str):
value = value.strip().replace(' ', '').replace('\n', '')
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def get_wechart(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def set_wechart(self, value: str):
value = value.strip().replace(' ', '').replace('\n', '')
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def get_out_of_time(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_register_time(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_last_login_date(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_priority(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_vip_level(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def get_pt_pin(self):
result_pin = None
if self.get_appkey() is not None:
for key in str(self.get_appkey()).split(';'):
key = key.strip().replace(' ', '')
if 'pin=' in key:
result_pin = key.split('pin=')[-1].replace(';', '').replace('"', '').replace('\'', '')
elif self.get_cookie() is not None:
for key in str(self.get_cookie()).split(';'):
key = key.strip().replace(' ', '')
if 'pt_pin=' in key:
result_pin = key.split('pt_pin=')[-1].replace(';', '').replace('"', '').replace('\'',
'')
if result_pin is None:
logging.error("no pt_pin")
return "no_config_pt_pin"
return result_pin
def get_wskey(self):
if self.get_appkey() is not None:
return self.get_appkey().split('wskey=')[-1].replace(';', '').replace('"', '').replace('\'', '')
else:
return None
def to_string(self):
for attr, value in self.__dict__.items():
logging.info("{}={}".format(str(attr), str(value)))
def get_login_status(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def set_login_status(self, value: str):
assert LoginStatus.is_valid(value)
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def get_nick_name(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def set_nick_name(self, value: str):
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def set_uuid(self, value: str):
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def get_pushplus_token(self):
attr = str(sys._getframe().f_code.co_name).replace('get_', '')
return getattr(self, attr)
def set_pushplus_token(self, value: str):
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def set_priority(self, value: str):
attr = str(sys._getframe().f_code.co_name).replace('set_', '')
setattr(self, attr, value)
def update_last_login_date(self):
attr = str(sys._getframe().f_code.co_name).replace('update_', '')
return setattr(self, attr, date.today())
def is_login(self):
# headers = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-cn',
# 'Connection': 'keep-alive',
# 'Cookie': self.get_cookie(),
# 'Referer': "https://home.m.jd.com/myJd/newhome.action",
# 'User-Agent': "jdapp;iPhone;10.0.2;14.3;network/wifi;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
# 'Host': 'wq.jd.com',
# }
# response = requests.get(
# 'https://wq.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder',
# headers=headers)
if True:
logging.error("GetJDUserInfoUnion failed, switch to new API me-api.jd.com")
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-cn',
'Connection': 'keep-alive',
'Cookie': self.get_cookie(),
'Referer': "hhttps://home.m.jd.com/myJd/newhome.action?sceneval=2&ufc=&",
'User-Agent': "jdapp;iPhone;10.0.2;14.3;network/wifi;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
'Host': 'me-api.jd.com',
}
response = requests.get('https://me-api.jd.com/user_new/info/GetJDUserInfoUnion', headers=headers)
try:
response = response.json()
except json.JSONDecodeError as err:
logging.error(traceback.format_exc())
logging.error("decode jason failed: error: {} ck:{} response code:{} response.txt:{}".format(str(err),
self.get_cookie(),
response,
response.text))
return False
# print(response)
if response.get('retcode') == 0 or response.get('retcode') == '0':
user_name = response['data']['userInfo']['baseInfo']['nickname']
curPin = response['data']['userInfo']['baseInfo']['curPin']
logging.info("{} {} 登陆成功".format(self.get_cookie(), user_name))
logging.info("curPin={}".format(curPin))
self.set_nick_name(user_name)
return True
elif response.get('retcode') == 13 or response.get('retcode') == '13':
logging.info("{} 登陆失效".format(self.get_cookie()))
return False
elif response.get('retcode') == 1001 or response.get('retcode') == '1001':
logging.info("{} 登陆失效".format(self.get_cookie()))
return False
else:
logging.error("Error: {} {}".format(self.get_cookie(), response))
return False
def is_expired(self):
out_of_date = datetime.datetime.strptime(str(self.get_register_time()), '%Y-%m-%d')
current_date = datetime.datetime.strptime(str(date.today()), '%Y-%m-%d')
return {
"is_expired": (current_date - out_of_date).days < 0,
"remaining_days": (current_date - out_of_date).days
}
def get_last_login_date_expired_days(self) -> int:
last_login_day = datetime.datetime.strptime(str(self.get_last_login_date()), '%Y-%m-%d')
current_date = datetime.datetime.strptime(str(date.today()), '%Y-%m-%d')
return (current_date - last_login_day).days
def get_user_dict(self):
result_ck = {'name': self.get_name(),
'nick_name': self.get_nick_name(),
'priority': self.get_priority(),
'vip_level': self.get_vip_level(),
'wechart': self.get_wechart(),
'out_of_time': self.get_out_of_time(),
'register_time': self.get_register_time(),
'login_status': self.get_login_status(),
'last_login_date': self.get_last_login_date(),
'cookie': self.get_cookie(),
'appkey': self.get_appkey(),
'pushplus_token': self.get_pushplus_token(),
'uuid': self.get_uuid()
}
return result_ck
def format_ck(self):
if self.cookie != None and self.cookie != '' and len(str(self.cookie)) > 0:
ck = self.cookie.strip().replace(' ', '').replace('\n', '')
if not ck.endswith(';'):
ck = ck + ';'
self.cookie = ck
return self
def update_ck_from_user(self, new_user, update_cookie=True):
if new_user.get_appkey() != None and new_user.get_appkey() != '':
self.set_appkey(new_user.get_appkey())
if new_user.get_pushplus_token() != None and new_user.get_pushplus_token() != '':
self.set_pushplus_token(new_user.get_pushplus_token())
if new_user.get_cookie() != None and new_user.get_cookie() != '' and update_cookie:
self.set_cookie(new_user.get_cookie())
if new_user.get_wechart() != None and new_user.get_wechart() != '':
self.set_wechart(new_user.get_wechart())
def update_ws_key_to_pt_key(self, delay=True):
if self.get_appkey() is not None:
appkey = ws_key_to_pt_key(self.get_pt_pin(), self.get_wskey(), sign_server=self.sign_server,
uuid=self.get_uuid(), delay=delay)
if appkey is not None:
self.set_cookie(appkey)
return True
else:
logging.error('ws_key 可能已失效')
return False
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)
ck = os.getenv('cookie')
if ck is None:
ck = 1
userInfo = UserInfo(ck, name='name', wechart='3', out_of_time=date.today(), priority=1)
assert userInfo.get_cookie() == str(ck), userInfo.get_cookie()
assert userInfo.get_name() == 'name', userInfo.get_name()
assert userInfo.get_wechart() == '3', userInfo.wechart()
assert userInfo.get_out_of_time() == date.today(), userInfo.out_of_time()
assert userInfo.get_priority() == 1, userInfo.priority()
a = datetime.datetime.strptime(str(userInfo.get_register_time()), '%Y-%m-%d')
b = datetime.datetime.strptime(str(date.today()), '%Y-%m-%d')
assert (b - a).days == 0
print(userInfo.is_login())
user = UserInfo()
assert user.get_cookie() is None
assert user.get_wskey() is None
| imwcc/jd_imwcc | pythonProjects/ck_manager/UserInfo.py | UserInfo.py | py | 14,176 | python | en | code | 2 | github-code | 13 |
73525333137 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 12:02:20 2017
@author: lenovo
"""
class Setting():
#储存游戏的设置信息
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230,230,230)
#飞船静态设置
self.ship_limit = 3
#子弹静态设置
self.bullet_width = 300
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 3
#外星人静态设置
self.fleet_drop_speed = 10
#游戏节奏
self.speedup_scale = 1.1
self.score_scale = 1.5
self.initialize_dynamic_setting()
def initialize_dynamic_setting(self):
#初始化随时间变化的设置
self.ship_speed_factor = 1.5
self.alien_speed_factor = 1
self.bullet_speed_factor = 1
self.alien_point = 50
#fleet_direction为1表示右移,-1表示左移
self.fleet_direction = 1
def increase_speed(self):
#提高速度设置
self.ship_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_point = int(self.alien_point * self.score_scale)
| funiazi/Alien-Game | setting.py | setting.py | py | 1,355 | python | en | code | 0 | github-code | 13 |
35981797802 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# author:@Jack.Wang
# time :2018/8/14 17:57
from vnctpmd import MdApi
import time
import os
import pandas as pd
from data_get_save.PostgreSQL import PostgreSQL
import datetime
from data_get_save.futures_time import futures_time
from data_get_save.futures_info_process import futures_cal
class ly_time:
def __init__(self):
pass
def ly_timeNow(self):
t = time.localtime(time.time())
tS = time.strftime("%Y-%m-%d %H:%M:%S", t)
return tS
def ly_print(self, strG):
tS = self.ly_timeNow()
print(tS + ' ' + strG)
class ly_ctpmd(MdApi, ly_time):
# 继承ctp的行情
def __init__(self, address='tcp://180.168.146.187:10011', userID='123609', password='wangyun199', brokerID='9999'):
MdApi.__init__(self)
ly_time.__init__(self)
tick_sql = PostgreSQL('futures_tick')
minu_sql = PostgreSQL('futures_min')
self.tick_sql = tick_sql
self.minu_sql = minu_sql
self.ly_print('ly_ctpmd 类')
self.reqID = 0 # 操作请求编号
self.tick = {}
self.tick_his = {}
self.bar = {}
self.address = address # 服务器地址
self.time_point = {} # 用来记录'rb1901'合成一分钟所需要的时点
# path = os.getcwd() + '/md/con/'
path = 'c:/ctp_con/md/'
if not os.path.exists(path):
os.makedirs(path)
self.createFtdcMdApi(path) # 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
self.registerFront(self.address)
# 注册服务器地址
self.init()
# 初始化连接,成功会调用onFrontConnected
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
def onFrontConnected(self):
self.ly_print('初始化连接成功,前置无身份服务器已连接!!!可以使用账号密码登陆')
self.login()
def onFrontDisconnected(self):
self.ly_print('前置无身份服务器断开连接!!!')
def login(self):
req = {}
req['UserID'] = self.userID
req['Password'] = self.password
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
self.ly_print('账号密码登陆已连接!!!')
def onRspUserLogout(self, data, error, n, last):
self.ly_print('行情登出回报')
def onRspUserLogin(self, data, error, n, last):
self.ly_print('行情已登录,可以订阅深度行情')
self.subscribe_contract()
# self.subscribeMarketData('cu1810') # 登录成功了才能订阅行情
# 退订合约 self.unSubscribeMarketData(str(symbol))
def subscribe_contract(self):
futures_t = futures_time()
futures_c = futures_cal()
nowdate = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
contract = futures_c.ctp_list(nowdate)
contract = ['j1901']
trade_day = futures_t.trade_day_now_cal()
for con in contract:
self.minu_sql.minute_create_table(con)
self.tick_sql.tick_create_table(con)
min_list = futures_t.trade_time_min(con[0:-4], trade_day)
self.time_point[con] = min_list
self.subscribeMarketData(con)
def onRspSubMarketData(self, data, error, n, last):
self.ly_print('订阅合约回报' + str(data))
def onRtnDepthMarketData(self, data):
"""行情推送"""
self.data = data
try:
tick = {}
tick['业务日期'] = data['ActionDay']
tick['最后修改时间'] = data['UpdateTime']
tick['时间'] = tick['业务日期'][:4] + '-' + tick['业务日期'][4:6] + '-' + tick['业务日期'][6:8]
tick['时间'] = tick['时间'] + ' ' + tick['最后修改时间']
tick['最新价'] = data['LastPrice']
tick['数量'] = data['Volume']
tick['持仓量'] = data['OpenInterest']
tick['涨停板价'] = data['UpperLimitPrice']
tick['跌停板价'] = data['LowerLimitPrice']
tick['申买一价'] = data['BidPrice1']
tick['申买一量'] = data['BidVolume1']
tick['申卖一价'] = data['AskPrice1']
tick['申卖一量'] = data['AskVolume1']
# 输入到本地数据库
tablename = data['InstrumentID']
if tablename is 'j1901':
self.ly_print('j1901')
self.tick_sql.tick_insert(tablename, tick['时间'], float(tick['最新价']),
int(tick['数量']), int(tick['持仓量']),float(tick['涨停板价']),
float(tick['跌停板价']), float(tick['申买一价']),
int(tick['申买一量']),float(tick['申卖一价']), int(tick['申卖一量']))
# 根据传入时间点,处理
if tick['时间'] > self.time_point[tablename][0][1]:
timestart = self.time_point[tablename][0][0]
timeend = self.time_point[tablename][0][1]
self.tick_to_minute(tablename, timestart, timeend)
self.time_point[tablename].remove(self.time_point[tablename][0])
self.ly_print('合成数据成功')
except:
# self.ly_print('行情异常')
pass
def tick_to_minute(self, tablename, timestart, timeend): # tick数据转min数据
# 根据输入,选取本地tick数据
try:
data = self.tick_sql.tick_select_time(tablename, timestart, timeend)
# 将本地数据合成minute数据
data = pd.DataFrame(data, columns=['时间', '最新价', '数量', '持仓量'])
if len(data) != 0: # 有些没有数据的情况
t = str(data['时间'][0])[0:19]
o = data['最新价'][0]
h = data['最新价'].max()
l = data['最新价'].min()
c = data['最新价'][len(data) - 1]
vo = data['数量'][len(data) - 1] - data['数量'][0]
op = data['持仓量'][len(data) - 1]
self.minu_sql.minute_insert(tablename, t, o, h, l, c, vo, op)
except:
print('合成数据出现问题,请检查')
def onRspUnSubMarketData(self, data, error, n, last):
self.ly_print('退订合约回报' + str(data))
def onRspError(self, error, n, last):
self.ly_print('错误回报' + str(error))
def onHeartBeatWarning(self):
self.ly_print("连接仍继续")
if __name__ == '__main__':
# 测试
self = ly_ctpmd()
# self = ly_ctpmd(address='tcp://27.115.57.128:41213', userID='999819992', password='5172187a', brokerID='9000')
#由于是分线程的程序,所以,此时主线程必须等待分线程进行到了之后,才能加入
# self.subscribeMarketData('cu1810') # 登录成功了才能订阅行情
"""
第一组:Trade:180.168.146.187:10000,Market:180.168.146.187:10010;【电信】
第二组:Trade:180.168.146.187:10001,Market:180.168.146.187:10011;【电信】
第三组:Trade:218.202.237.33 :10002,Market:218.202.237.33 :10012;【移动】
交易前置:180.168.146.187:10030,行情前置:180.168.146.187:10031;【7x24】
# 这是实盘
# self.connect('999819992', '5172187a', '9000', 'tcp://61.140.230.188:41205')
"""
| wangyundlut/Futures_Quant | ctp/md_api_mine.py | md_api_mine.py | py | 7,633 | python | en | code | 1 | github-code | 13 |
12117567713 | """
爬取某人抖音账号中所有短视频信息
"""
import time
from pandas import DataFrame
from selenium.webdriver.common.by import By
from common.selenium_tools import SeleniumHelper
with SeleniumHelper() as driver:
driver.get(
"https://www.douyin.com/user/MS4wLjABAAAAb4b5X-nNL8bEJv1WtUAfxVPdLOFNZftqHYPk5_FZqJXj2AhqfcyvX0mvbTUkNHZd?vid=7173332388968205581")
while True:
try:
# 如果标签存在返回,否则报错
driver.find_element(By.XPATH,'//div[@class="Bllv0dx6"]') # "没有更多视频"
break # 执行到本行说明没有报错,也就是没有更多视频了
except:
# 如果程序报错,进入当前代码块 -> 说明还有短视频,需要向下滚动
driver.scroll_by(0, 1000)
time.sleep(3)
list_video = []
for item in driver.find_elements(By.XPATH, '//li[@class="Eie04v01"]'):
dict_info = {
"标题": item.find_element(By.XPATH, './a/p').text,
"点赞数": item.find_element(By.XPATH, './/span[@class="jjKJTf4P author-card-user-video-like"]').text,
"链接": item.find_element(By.XPATH, './a').get_attribute("href"),
}
list_video.append(dict_info)
DataFrame(list_video).to_excel("达内账号所有短视频信息.xlsx",index=False) | 15149295552/Code | Month07/day18_python/demo04.py | demo04.py | py | 1,338 | python | en | code | 1 | github-code | 13 |
41882953210 |
def determine_bfs(start, edges, seen):
found_vertices = 0
previous = -1
current = start
while current != start or previous == -1:
if current not in edges:
return False
seen.add(current)
found_vertices += 1
tmp = previous
previous = current
current = edges[current][0] if edges[current][0] != tmp else edges[current][1]
if found_vertices % 2 == 0:
return True
return False
def determine_result(n, edges):
seen = set()
for i in range(n):
if (i + 1) not in seen:
res = determine_bfs(i + 1, edges, seen)
if not res:
return False
return True
if __name__ == '__main__':
for _ in range(int(input())):
n = int(input())
edges = {}
is_possible = True
correct_numbers = 0
for _ in range(n):
a, b = tuple(int(x) for x in input().split())
if not is_possible:
continue
if a == b:
is_possible = False
if a not in edges:
edges[a] = [b]
else:
edges[a].append(b)
correct_numbers += 1
if len(edges[a]) > 2:
is_possible = False
if b not in edges:
edges[b] = [a]
else:
edges[b].append(a)
correct_numbers += 1
if len(edges[b]) > 2:
is_possible = False
if not is_possible or correct_numbers != n:
print("NO")
continue
good_result = determine_result(n, edges)
if good_result:
print("YES")
else:
print("NO")
| danherbriley/acm4 | 01/split_into_two_sets.py | split_into_two_sets.py | py | 1,754 | python | en | code | 0 | github-code | 13 |
73924062739 | """"
lists
"""
import random
# I learnt to access a nested list you have to access the row you want first then the element in the list.
# Think of each list in a nested list as a row
# Some people pass 13 meaning column 1 row 3.
# ===========================================================================================================
# Rock, Paper, Scissors game.
rock = r"""
,--.--._
------" _, \___)
/ _/____)
\//(____)
------\ (__)
`-----" """
paper = r"""
8b,dPPYba, ,adPPYYba, 8b,dPPYba, ,adPPYba, 8b,dPPYba,
88P' "8a "" `Y8 88P' "8a a8P_____88 88P' "Y8
88 d8 ,adPPPPP88 88 d8 8PP""""""" 88
88b, ,a8" 88, ,88 88b, ,a8" "8b, ,aa 88
88`YbbdP"' `"8bbdP"Y8 88`YbbdP"' `"Ybbd8"' 88
88 88
88 88 """
scissors = r"""
\ /
X
O O"""
game_images = [rock, paper, scissors]
print("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.")
user_choice = int(input())
computer_choice = random.randint(0, 2)
if user_choice < 0 or user_choice >= 3:
print("You typed an invalid number. You lose!")
else:
print(game_images[user_choice])
print("Computer chose:")
print(game_images[computer_choice])
if computer_choice == user_choice:
print("it's a draw!")
elif computer_choice == 0 and user_choice == 2:
print("You lose!")
elif computer_choice == 2 and user_choice == 0:
print("You win!")
elif computer_choice > user_choice:
print("You lose!")
elif computer_choice < user_choice:
print("You win!")
| kvngdre/100DaysofPythonCode | Day4.py | Day4.py | py | 1,661 | python | en | code | 0 | github-code | 13 |
25191683254 | """
Advent of Code 2015
Day 3: Perfectly Spherical Houses in a Vacuum (Part Two)
"""
with open("./input.txt", encoding="utf-8") as file:
data = file.read()
class Houses:
def __init__(self) -> None:
self._grid = {}
self._x = 0
self._y = 0
def _add(self) -> None:
try:
self._grid[f"{self._x}:{self._y}"] += 1
except KeyError:
self._grid[f"{self._x}:{self._y}"] = 0
def deliver(self, movement) -> None:
self._add()
if movement == "^":
self._y += 1
elif movement == "v":
self._y -= 1
elif movement == ">":
self._x += 1
else:
self._x -= 1
self._add()
def visited(self) -> set:
return set(self._grid.keys())
houses = Houses()
robo = Houses()
for index in range(0, len(data), 2):
move_1 = data[index]
move_2 = data[index+1]
houses.deliver(move_1)
robo.deliver(move_2)
result = len(houses.visited().union(robo.visited()))
print("Result =", result)
| pedroboechat/adventofcode | 2015/Day 03/part_two.py | part_two.py | py | 1,055 | python | en | code | 0 | github-code | 13 |
73599058577 | import copy
def calculate_occupied(rows):
next_round = rows
while True:
current_sp = next_round.copy()
next_round = []
for row in range(len(rows)):
new_row = ''
for seat in range(len(rows[row])):
occ_count = 0
# Top Left
if row > 0 and seat > 0 and current_sp[row-1][seat-1] == '#':
occ_count += 1
# Top
if row > 0 and current_sp[row-1][seat] == '#':
occ_count += 1
# Top Right
if row > 0 and seat < len(rows[row])-1 and current_sp[row-1][seat+1] == '#':
occ_count += 1
# Left
if seat > 0 and current_sp[row][seat-1] == '#':
occ_count += 1
# Right
if seat < len(rows[row])-1 and current_sp[row][seat+1] == '#':
occ_count += 1
# Bottom Left
if seat > 0 and row < len(rows)-1 and current_sp[row+1][seat-1] == '#':
occ_count += 1
# Bottom
if row < len(rows)-1 and current_sp[row+1][seat] == '#':
occ_count += 1
# Bottom Right
if seat < len(rows[row])-1 and row < len(rows)-1 and current_sp[row+1][seat+1] == '#':
occ_count += 1
if current_sp[row][seat] == 'L':
if occ_count == 0:
new_row += '#'
else:
new_row += 'L'
elif current_sp[row][seat] == '#':
if occ_count >= 4:
new_row += 'L'
else:
new_row += '#'
if current_sp[row][seat] == '.':
new_row += '.'
next_round.append(new_row)
if current_sp == next_round:
break
final_count = 0
for line in current_sp:
final_count += line.count('#')
return current_sp, final_count
with open("11_input.txt", "r") as f:
rows = []
for x in f:
rows.append(x.rstrip())
final_sp, occ_seats = calculate_occupied(rows)
for line in final_sp:
print(line)
print(occ_seats)
| mereszd/aoc2020 | 11_01.py | 11_01.py | py | 2,305 | python | en | code | 0 | github-code | 13 |
4970127523 | n = int(input())
a = list(map(int, input().split()))
cnt = [0] * 100001
l = res = count = 0
for r in range(n):
if (cnt[a[r]] == 0):
count += 1
cnt[a[r]] += 1
while count > 2:
cnt[a[l]] -= 1
if cnt[a[l]] == 0:
count -= 1
l += 1
res = max(res, r - l + 1)
print(res)
| truclycs/code_for_fun | algorithms/python/python_blue/L02/B._Approximating_a_Constant_Range.py | B._Approximating_a_Constant_Range.py | py | 345 | python | en | code | 7 | github-code | 13 |
75079112976 | # -*- coding: UTF-8 -*-
from flask import Flask, request
import cloudscraper
app = Flask(__name__)
# 默认路由/,支持GET方法
@app.route('/', methods=['GET'])
def index():
return 'Hello World!'
@app.route('/proxy', methods=['GET'])
def proxy():
url = request.args.get('url')
UserAgent_str = request.args.get('User-Agent')
Referer_str = request.args.get('Referer')
Cookie_str = request.args.get('Cookie')
scraper = cloudscraper.create_scraper()
if UserAgent_str is not None:
scraper.headers.update({'User-Agent': UserAgent_str})
if Referer_str is not None:
scraper.headers.update({'Referer': Referer_str})
if Cookie_str is not None:
scraper.cookies.update({'Cookie': Cookie_str})
try:
response = scraper.get(url)
return response.text
except :
return "error"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) | tridiamondli/forwardrequest | forwardrequest.py | forwardrequest.py | py | 931 | python | en | code | 0 | github-code | 13 |
36925151312 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 10:00:10 2018
@author: WakeSurfin1
@ created 2018-11-18
parse a .csv file, skip the header row
calculate age based on dob
out put year of service and age to scatter chart
"""
from datetime import datetime
from logger_class import Logger
from configparser import ConfigParser
import matplotlib.pyplot as plt
import os
# 0 ---------------------------------------------------------------------
# dynamically build the log path and file name
# log pathFile = current path + /Logs/ + base script name + _log.txt
strCurDir = os.path.dirname(os.path.realpath(__file__)).replace('\\', '/')
strBaseName = os.path.basename(__file__).split(".")[0]
strLogFile = strCurDir + "/logs/" + strBaseName + "_log.txt"
# create the log object and pass the log path file
logger_object = Logger(strLogFile)
# 1 --------------------------------------------------------------------
logger_object.info("1: Begin Parse_CSV_File.py - open input file")
# get input file from config.ini
config = ConfigParser()
config.read('config.ini')
strDataFile = config.get('config_a', 'strDataFile')
try:
infile = open(strDataFile,"r");
except Exception as e:
logger_object.error("Can not open " + strDataFile + " : " + str(e))
exit(1);
# 2 --------------------------------------------------------------------
logger_object.info("2: loop through the input file " + strDataFile)
# calculate current date in format YYYY-MM-DD
strToday = datetime.today().strftime('%Y-%m-%d')
dateToday = datetime.strptime(strToday, '%Y-%m-%d')
#skip the header row and calculate age in years
#create a new lists years of service and age
X=[]
Y=[]
i=0
for line in infile:
if i > 0:
field = line.split(',')
dateDob = datetime.strptime(field[3], '%Y-%m-%d')
# Calculate age as the difference between DOB and Today
ageYears = abs((dateDob - dateToday).days)//365
X.append(field[4])
Y.append(ageYears)
i += 1
logger_object.info(strDataFile + " record count = " + str(i))
# 3 -------------------------------------------------------------------
logger_object.info("3: Build Scatter Chart ")
# contruct scatter chart
plt.scatter(X,Y)
#customize point size, point color, and point character
plt.scatter(X, Y, s=60, c='red', marker='^')
# Add title and labels to the graph
plt.title('Relationship Between Years of Service and Age')
plt.xlabel('Years of Service')
plt.ylabel('Age')
# 4 -------------------------------------------------------------------
logger_object.info("4: exit \n") | WakeSurfin1/Python | Parse_Csv_to_ScatterChart.py | Parse_Csv_to_ScatterChart.py | py | 2,670 | python | en | code | 0 | github-code | 13 |
73912920979 |
from cement import App, TestApp, init_defaults
from cement.core.exc import CaughtSignal
from .core.exc import DotBakError
from .controllers.base import Base
# configuration defaults
CONFIG = init_defaults('dotbak')
CONFIG['dotbak']['suffix'] = '.bak'
CONFIG['dotbak']['timestamps'] = True
class DotBak(App):
"""DotBak primary application."""
class Meta:
label = 'dotbak'
# configuration defaults
config_defaults = CONFIG
# call sys.exit() on close
exit_on_close = True
# load additional framework extensions
extensions = [
'yaml',
'jinja2',
]
# configuration handler
config_handler = 'yaml'
# configuration file suffix
config_file_suffix = '.yml'
# set the output handler
output_handler = 'jinja2'
# register handlers
handlers = [
Base
]
class DotBakTest(TestApp,DotBak):
"""A sub-class of DotBak that is better suited for testing."""
class Meta:
label = 'dotbak'
def main():
with DotBak() as app:
try:
app.run()
except AssertionError as e:
print('AssertionError > %s' % e.args[0])
app.exit_code = 1
if app.debug is True:
import traceback
traceback.print_exc()
except DotBakError as e:
print('DotBakError > %s' % e.args[0])
app.exit_code = 1
if app.debug is True:
import traceback
traceback.print_exc()
except CaughtSignal as e:
# Default Cement signals are SIGINT and SIGTERM, exit 0 (non-error)
print('\n%s' % e)
app.exit_code = 0
if __name__ == '__main__':
main()
| datafolklabs/dotbak | dotbak/main.py | main.py | py | 1,810 | python | en | code | 2 | github-code | 13 |
2609948517 | import turtle
import random
names = []
a = turtle.textinput("Name of Student", "Enter a student's name, click enter to exit")
while a != " ":
names.append(a)
a = turtle.textinput("Name of Student", "Enter a student's name, click space and enter to exit")
for x in range(len(names)):
print(x + 1, ".", names[x])
print("The person who got chose is:",random.choice(names))
| Aadhithr/Personal | PythonWork/PythonCourse/homework/uses_random/random name genarator.py | random name genarator.py | py | 386 | python | en | code | 0 | github-code | 13 |
13642456081 | import os
import subprocess
locations = [
"C:\\Program Files (x86)\\Proxifier\\Proxifier.exe",
]
def start_proxifier():
for l in locations:
if os.path.isfile(l):
subprocess.Popen(l)
return True, None
return False, "INSTALLATION_NOT_FOUND"
def close_proxifier():
subprocess.call(
"taskkill /f /IM proxifier.exe",
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
| sandbox-pokhara/proxifier-cli | proxifier_cli/process.py | process.py | py | 443 | python | en | code | 0 | github-code | 13 |
8242375230 | from __future__ import print_function, division
import numbers
import random
import copy
from . import tools
class MeshBin(object):
def __init__(self, cfg, coo, bounds):
self.cfg = cfg
self.coo = coo
self.bounds = bounds
self.elements = []
def add(self, e, metadata, counter):
self.elements.append((counter, e, metadata))
def __len__(self):
return len(self.elements)
def __iter__(self):
for e in self.elements:
yield e
def draw(self, replace=True):
if len(self.elements) == 0:
return ValueError("can't draw from an empty bin")
idx = random.randint(0, len(self.elements) - 1)
if replace:
choice = self.elements[idx]
else:
choice = self.elements.pop(idx)
return choice
class MeshGrid(object):
"""\
Groups elements in bins and draw elements by choosing a bin at random.
"""
BinClass = MeshBin
def __init__(self, cfg, bounds):
self.cfg = cfg
assert all(isinstance(b_min, numbers.Real) and
isinstance(b_max, numbers.Real) for b_min, b_max in bounds)
self.bounds = bounds
self.res = self.cfg['res']
if isinstance(self.res, numbers.Integral):
self.res = len(bounds)*[self.res]
assert (len(self.res) == len(bounds) and
all(isinstance(e, numbers.Integral) for e in self.res))
self.dim = len(bounds)
self._bins = {None: self.BinClass(self.cfg, None, None)} # a bin for everything not inside the bounds
self._size = 0
self._nonempty_bins = []
self._counter = 0 # uuid for points, != size
def _coo(self, p):
assert len(p) == self.dim
coo = []
for pi, (si_min, si_max), res_i in zip(p, self.bounds, self.res):
if si_min == si_max:
coo.append(0)
else:
coo.append(int((pi - si_min)/(si_max - si_min)*res_i))
if pi == si_max:
coo[-1] = res_i - 1
if si_min > pi or si_max < pi:
return None
return tuple(coo)
def _bounds(self, coo):
if coo is None:
return None
else:
bounds = []
for c_i, (si_min, si_max), res_i in zip(coo, self.bounds, self.res):
divsize = (si_max - si_min)/res_i
bounds.append((si_min + c_i*divsize, si_min + (c_i+1)*divsize))
return bounds
def __len__(self):
return self._size
def resize(self, bounds, res=None):
elements = [e for bin_ in self._bins.values() for e in bin_]
res = self.res if res is None else res
cfg = copy.deepcopy(self.cfg)
cfg['res'] = res
self.__init__(cfg, bounds)
for c, p, md in elements:
self.add(p, metadata=md)
def empty_bin(self, p):
coo = self._coo(p)
return not coo in self._bins
def _add_to_coo(self, coo, p, metadata):
if not coo in self._bins:
self._bins[coo] = self.BinClass(self.cfg, coo, self._bounds(coo))
bin_ = self._bins[coo]
if len(bin_) == 0:
self._nonempty_bins.append(bin_)
bin_.add(p, metadata, self._counter)
self._counter += 1
def add(self, p, metadata=None):
assert len(p) == self.dim
coo = self._coo(p)
self._add_to_coo(coo, p, metadata)
self._size += 1
return coo
def draw(self, replace=True, metadata=False):
"""Draw uniformly between existing (non-empty) bins"""
try:
idx = random.randint(0, len(self._nonempty_bins) - 1)
except ValueError:
raise ValueError("can't draw from an empty meshgrid")
c, e, md = self._nonempty_bins[idx].draw(replace=replace)
if not replace:
self._size -= 1
if len(self._nonempty_bins[idx]) == 0:
self._nonempty_bins.pop(idx)
if metadata:
return e, md
else:
return e
def draw_bin(self):
if len(self._nonempty_bins) == 0:
raise ValueError("can't draw from an empty meshgrid")
return random.choice(self._nonempty_bins)
class ExplorerMeshGrid(object):
"""A meshgrid that accepts and returns s_signals instead of s_vectors"""
def __init__(self, cfg, s_channels, m_channels=None):
self.m_channels = m_channels
self.s_channels = s_channels
s_bounds = [c.bounds for c in self.s_channels]
self._meshgrid = MeshGrid(cfg, s_bounds)
self._m_map = {}
def add(self, s_signal, m_signal=None):
s_vector = tools.to_vector(s_signal, self.s_channels)
coo = self._meshgrid._coo(s_vector)
self._meshgrid._add_to_coo(coo, s_signal, metadata=m_signal)
self._meshgrid._size += 1
if m_signal is not None:
m_vector = tools.to_vector(m_signal, self.m_channels)
self._m_map[m_vector] = self._meshgrid._bins[coo]
return coo
@property
def bins(self):
return self._meshgrid._bins
@property
def nonempty_bins(self):
return self._meshgrid._nonempty_bins
def draw(self, replace=True):
s_signal, m_signal = self._meshgrid.draw(replace=replace, metadata=True)
return s_signal, m_signal
def draw_bin(self):
return self._meshgrid.draw_bin()
def s_signal2bin(self, s_signal):
"""\
Return the bin which would contains point, if the bin is not empty.
Return None otherwise.
"""
s_vector = tools.to_vector(s_signal, self.s_channels)
coo = self._meshgrid._coo(s_vector)
try:
return self._meshgrid._bins[coo]
except KeyError:
return None
def __len__(self):
return len(self._meshgrid)
| benureau/explorers | explorers/meshgrid.py | meshgrid.py | py | 5,920 | python | en | code | 0 | github-code | 13 |
15567006016 |
from datetime import date
year = [i for i in xrange(1006, 1997) if ((i%4 == 0 or i%400 == 0) and i%100 != 0) and str(i)[-1] == '6']
birthday = []
for i in year:
if date(i, 1, 27).weekday() == 1:
birthday.append(i)
print(birthday)
| YoTro/Python_repository | Pygame/15.py | 15.py | py | 247 | python | en | code | 2 | github-code | 13 |
9557462620 | #!/usr/bin/env python2
import rospy
from math import atan2, degrees, radians, fmod
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
from swc_msgs.msg import Control, RobotState, Gps
# Path data
desired_path = None
# State data
robot_state = None
# Control data
ctrl = Control()
ctrl_pub = rospy.Publisher("/sim/control", Control, queue_size=1)
# Goal data
goal = Gps()
# Grid size constant
GRID_SIZE = 0.1
def state_estimate_callback(state):
global robot_state
robot_state = state
def path_callback(path):
global desired_path
desired_path = path
def goal_callback(gps):
global goal
goal = gps
def angle_diff(angle1, angle2):
a = degrees(angle1)
b = degrees(angle2)
dif = fmod(b - a + 180, 360)
if dif < 0:
dif += 360
return radians(dif - 180)
def trash_pursuit(timer):
# Control gains
Kp = 7
# Stop if the path or state is invalid
if desired_path is None or robot_state is None:
ctrl.speed = 0
ctrl.turn_angle = 0
# If we are right next to the waypoint, just target it instead of a path
elif len(desired_path.poses) == 1:
# Slow down
ctrl.speed = 2
# Calculate desired heading difference
dLat = goal.latitude - robot_state.latitude
dLon = goal.longitude - robot_state.longitude
desired_hdg = atan2(dLat, dLon)
# Get the robot's heading
hdg = robot_state.heading
# Compute turn angle as a function of error
error = angle_diff(desired_hdg, hdg)
# print(hdg, desired_hdg, error)
ctrl.turn_angle = Kp * error
# Try to follow the path
else:
ctrl.speed = 8
lookahead = 2 # meters
# Get the target waypoint
num_poses = len(desired_path.poses)
lookahead_idx = int(lookahead / GRID_SIZE)
# Dirty Pursuit
if num_poses < lookahead_idx:
lookahead_idx = num_poses - 1
if lookahead_idx < 0 or lookahead_idx >= num_poses:
return
# Get next desired path point
path_pt0 = desired_path.poses[0]
look_pt = desired_path.poses[lookahead_idx]
# If the robot is really far from the start of the path, wait for the path planner to catch up
rdx = robot_state.x - path_pt0.pose.position.x
rdy = robot_state.y - path_pt0.pose.position.y
dist = rdx**2 + rdy**2
if dist > 100:
ctrl.speed = 0
ctrl.turn_angle = 0
# Otherwise follow the path normally
else:
# Calculate desired heading difference
dx = look_pt.pose.position.x - path_pt0.pose.position.x
dy = look_pt.pose.position.y - path_pt0.pose.position.y
desired_hdg = atan2(dy, dx)
# Get the robot's heading
hdg = robot_state.heading
# Compute turn angle as a function of error
error = angle_diff(desired_hdg, hdg)
# print(hdg, desired_hdg, error)
ctrl.turn_angle = Kp * error
# Publish the control needed
ctrl_pub.publish(ctrl)
def setup():
rospy.init_node("basic_control_node")
# Subscribe to the path and the current state
path_sub = rospy.Subscriber("/path", Path, path_callback, queue_size=1)
pose_sub = rospy.Subscriber("/robot/state", RobotState, state_estimate_callback, queue_size=1)
# Subscribe to the goal for close range alignment
goal_sub = rospy.Subscriber("/task/goal", Gps, goal_callback, queue_size=1)
# Try to follow the first part of the path at all times
pursuit_timer = rospy.Timer(rospy.Duration(0.02), trash_pursuit, oneshot=False)
# Pump callbacks
rospy.spin()
if __name__ == "__main__":
try:
setup()
except rospy.ROSInterruptException:
pass | jkleiber/SCR-Software-Challenge-2020 | swc_ws/src/kleiber_control/src/basic_control_node.py | basic_control_node.py | py | 3,831 | python | en | code | 0 | github-code | 13 |
41888166430 | # question 1
def maximum(L):
n = len(L)
maxi = L[0]
for i in range(1, n):
if L[i] > maxi:
maxi = L[i]
return maxi
def maximum_pos(L):
n = len(L)
maxi = L[0]
for i in range(1, n):
if L[i] > maxi:
maxi = L[i]
return maxi, i
print(maximum_pos([2, 5, 22, 5, 0]))
# question 2
def premier_deuxieme_maximum(L):
n = len(L)
m1, m2, p1, p2 = L[0], L[1], 0,
if m2 > m1:
m1, m2, p1, p2 = m2, m1, p2, p1
for i in range(2, n):
e = L[i]
if e > m1:
m1 = e
p1 = i
elif e > m2:
m2 = m1
p2 = i
return (m1, p1), (m2, p2)
print(premier_deuxieme_maximum([2, 88, 67, 9, 10 ])) | danhab05/InformatiquePrepa | Mpsi/TD1/ex2.py | ex2.py | py | 753 | python | en | code | 0 | github-code | 13 |
33257653737 | #By Sujay Sundar
from tkinter import *
import tkinter as tk
from tkinter.scrolledtext import *
from PIL import Image, ImageTk
#Open Help menu screen in a new window, so user is able to refer to Help Contents as the user continues to use the FBLA Quiz application
def openhelp():
#Creating the Help GUI window
root = Tk()
root.title("Help Contents")
root.geometry("1300x700")
root.images=[]
#ScrolledText object to hold the Help contents
help_text = ScrolledText(root,wrap=WORD,font=("Courier",12, "bold"),width=140,height=40)
help_text.grid(column=0, columnspan=3)
#Populating the Help contents into the ScrolledText widget
help_text.insert(END, "FBLA Quiz:\n\n")
#Adding screenshots from the application for the Help screen
screenshot1 = PhotoImage(file='../img/Leaderboard.png',master=root)
root.images.append(screenshot1)
help_text.image_create(INSERT, image=screenshot1)
help_text.image=screenshot1
#Adding screenshots from the application for the Help screen
screenshot11 = PhotoImage(file='../img/GetStudentScore.png',master=root)
root.images.append(screenshot11)
help_text.image_create(INSERT, image=screenshot11)
help_text.image=screenshot11
#Adding screenshots from the application for the Help screen
screenshot9 = PhotoImage(file='../img/FBLAQuiz.png',master=root)
root.images.append(screenshot9)
help_text.image_create(INSERT, image=screenshot9)
help_text.image=screenshot9
help_text.insert(END, "\n\nAccess The Quiz\n\nTo access the quiz, go to File >> FBLA Quiz, select Student ID and answer all five questions. Click on the Submit Quiz button to submit the quiz and record your score. The application will shutdown in order to allow you to reaccess the application and get a different set of questions to answer.\n\nGet Student Score\n\nTo retrieve a student's quiz score, select the Student ID from the drop down and click on the Get Student Score button.\n\nLeaderboard\n\nTo get the quiz scores of all students (ordered by top score), click on the Leaderboard button.\n\n")
help_text.insert(END, "Manage Students:\n\n")
#Adding screenshots from the application for the Help screen
screenshot2 = PhotoImage(file='../img/ManageStudents.png',master=root)
root.images.append(screenshot2)
help_text.image_create(END, image=screenshot2)
help_text.image=screenshot2
#Adding screenshots from the application for the Help screen
screenshot10 = PhotoImage(file='../img/GetAllStudents.png',master=root)
root.images.append(screenshot10)
help_text.image_create(END, image=screenshot10)
help_text.image=screenshot10
help_text.insert(END, "\n\nAdd New Student\n\nTo add a new student, go to File >> Manage Students, enter Student ID, Student Name, Student Grade. Click on the Add Student button.\n\nUpdate Student\n\nTo update student information, go to File >> Manage Students and enter Student ID and Student Grade. Click on the Update Student button.\n\nGet All Student Records\n\nTo view all student records, go to File >> Manage Students and click on the Get All Student Records button.\n\n")
help_text.insert(END, "Reports:\n\n")
#Adding screenshots from the application for the Help screen
screenshot4 = PhotoImage(file='../img/DownloadMenu.png',master=root)
root.images.append(screenshot4)
help_text.image_create(END, image=screenshot4)
help_text.image=screenshot4
#Adding screenshots from the application for the Help screen
screenshot8 = PhotoImage(file='../img/ReportGenerated.png',master=root)
root.images.append(screenshot8)
help_text.image_create(END, image=screenshot8)
help_text.image=screenshot8
help_text.insert(END, "\n\nStudent Quiz Score Report\n\nTo generate a student's Quiz Score report, go to File >> FBLA Quiz, select Student ID and click on the Quiz Score Report button. A PDF version of the report will be downloaded.\n\nAll Students' Scores Report\n\nTo generate the Quiz Scores of all students, go to Download >> Download Report - All Student Scores. A PDF version of the report will be downloaded.\n\nGenerate Student Report\n\nTo generate a report of all student information, go to Download >> Download Report - All Students and a PDF version of the report will be downloaded.\n\nQuiz Question and Answers Report\n\nTo generate a report of all the FBLA Quiz questions and answers, go to Download >> Download Report - Quiz Question & Answers and a PDF version of the report will be downloaded.\n\n")
help_text.insert(END, "Help:")
help_text.insert(END, "\n\nApplication Help\n\nFor help on how to use this application, go to Help >> Help Contents\n\n")
help_text.insert(END, "About the application:\n\n")
#Adding screenshots from the application for the Help screen
screenshot6 = PhotoImage(file='../img/About.png',master=root)
root.images.append(screenshot6)
help_text.image_create(END, image=screenshot6)
help_text.image=screenshot6
help_text.insert(END, "\n\nAbout\n\nTo know more about the application, go to Help >> About\n\n")
help_text.insert(END, "Exit:\n\n")
#Adding screenshots from the application for the Help screen
screenshot7 = PhotoImage(file='../img/FileMenu.png',master=root)
root.images.append(screenshot7)
help_text.image_create(END, image=screenshot7)
help_text.image=screenshot7
help_text.insert(END, "\n\nTo exit the application, go to File >> Exit.\n\n")
#Disabling the ScrolledText widget so user cannot modify content
help_text.configure(state=DISABLED)
#Exit button to quit from Help screen
exit = Button(root, text="Exit Help", command=root.destroy)
exit.grid(row=1,column=0) | sujaysundar/FBLAQuiz | fblaquiz/help.py | help.py | py | 5,631 | python | en | code | 0 | github-code | 13 |
10022420966 | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
copy_dict = a_dictionary.copy() # Makes a copy of the dictionary
lst = list(copy_dict.keys()) # Convert dictionary to a list
for i in lst: # Itterate over the dictionary
copy_dict[i] *= 2
return copy_dict
| Jay-Kip/alx-higher_level_programming | 0x04-python-more_data_structures/9-multiply_by_2.py | 9-multiply_by_2.py | py | 287 | python | en | code | 1 | github-code | 13 |
24534118320 | from time import time
class Solution:
def isPalindrome_v1(self, s: str):
s_strip = [char.lower() for char in s if char.isalnum()]
s_rev = s_strip[::-1]
return bool(s_strip == s_rev)
def isPalindrome_v2(self, s: str):
def isalnum(num):
return bool(48 <= num < 58 or 65 <= num < 91 or 97 <= num < 123)
s_strip = [char for char in s if isalnum(ord(char))]
s_strip = ''.join(s_strip)
s_strip = s_strip.lower()
s_rev = s_strip[::-1]
return bool(s_strip == s_rev)
def isPalindrome_v3(self, s: str) -> bool:
i, j = 0, len(s) - 1
while i < j:
a, b = s[i].lower(), s[j].lower()
if a.isalnum() and b.isalnum():
if a != b:
return False
else:
i, j = i + 1, j - 1
continue
i, j = i + (not a.isalnum()), j - (not b.isalnum())
return True
def test_series(self, s):
v1_start = time()
print(v1_start)
print(f'v1 - {self.isPalindrome_v1(s)}')
v1_duration = time() - v1_start
print(v1_duration)
v2_start = time()
print(v2_start)
print(f'v2 - {self.isPalindrome_v2(s)}')
v2_duration = time() - v2_start
print(v2_duration)
v3_start = time()
print(v3_start)
print(f'v3 - {self.isPalindrome_v3(s)}')
v3_duration = time() - v3_start
print(v3_duration)
if __name__ == '__main__':
sol = Solution()
# test cases
str1 = '0P' #true
str2 = 'UPPERCAS!)(*#@$ E lETters' #
str3 = 'nowawon'
str4 = ''
for _ in range(10000):
str4 += '1'
sol.test_series(str2)
| Hintzy/leetcode | Easy/125_valid_palindrome/valid_palindrome.py | valid_palindrome.py | py | 1,749 | python | en | code | 0 | github-code | 13 |
16079722615 | # -*- mode: python -*-
a = Analysis(['./src/htpc-updater.py'],
hiddenimports=[],
hookspath=None,
runtime_hooks=None)
a.datas.append(('cacert.pem', 'cacert.pem', 'DATA'))
a.binaries = [x for x in a.binaries if x[0].lower() != 'kernel32.dll']
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts + [('O','','OPTION')],
a.binaries,
a.zipfiles,
a.datas,
name='htpc-updater.exe',
debug=False,
strip=None,
upx=True,
console=True,
manifest='htpc-updater.exe.manifest',
)
| nikola/htpc-updater | htpc-updater.spec | htpc-updater.spec | spec | 593 | python | en | code | 20 | github-code | 13 |
34308768634 | import pandas as pd;
import numpy as np;
from sklearn.ensemble import RandomForestClassifier
digitDf=pd.read_csv('C:\Kaggle\\DigitRecog\\train.csv');
subDf=pd.read_csv('C:\Kaggle\\DigitRecog\\sample_submission.csv')
#print digitDf.head();
X_test=digitDf[[0]].values.ravel();
X_train=digitDf.iloc[:,1:].values;
testDf=pd.read_csv('C:\Kaggle\\DigitRecog\\test.csv');
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train, X_test)
pred = rf.predict(testDf);
#print pred;
mySol=pd.DataFrame({'Label':np.array(pred)}, index=subDf.ImageId);
mySol.to_csv('digitSol.csv');
| sridharRavi/CheebsRepo | MachLearningandDataAnalysis/DigitRecog/digitanalysis.py | digitanalysis.py | py | 587 | python | en | code | 0 | github-code | 13 |
73654039377 | import logging
import os
import time
import uuid
from decimal import *
# Logger
from dynamo_operation import DynamoOperation
from rest.parsers.grafana_parser import GrafanaParser
from rest.parsers.slackbot_parser import SlackbotParser
from rest.parsers.winston_parser import WinstonParser
from rest.builders.grafana_builder import GrafanaBuilder
from rest.builders.slackbot_builder import SlackbotBuilder
from rest.builders.winston_builder import WinstonBuilder
from slack_notification import post_slack
from spam_protection import SpamProtection
from constants import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def __retrieve_handlers(event):
if 'reporter' in event:
return WinstonParser(event), WinstonBuilder()
if 'user_name' in event:
return SlackbotParser(event), SlackbotBuilder()
if 'ruleId' in event:
return GrafanaParser(event), GrafanaBuilder()
def __new_dynamo_record(event):
record = event
record['DataID'] = str(uuid.uuid4())
record['CreatedAt'] = int(time.time() * 1000000)
record['Enabled'] = True
record['Delayed'] = False
return record
def __enabled():
return os.environ.get('enabled', 'true') == 'true'
def __process_request(event, request_parser):
try:
request_parser.validate()
data = request_parser.parse()
if 'sensor' in data:
msg = data.get('msg', GRAFANA_ALERT_MSG.format(data['sensor'],
data['value']))
else:
msg = data.get('msg', ALERT_MSG.format(data['floor']))
success, slack_response = post_slack(msg)
if success:
message = SUCCESS_MSG
logger.info(f"Slack status: {success} with {slack_response}")
else:
message = FAILURE_SLACK_MSG
logger.error(f"Slack status: {success} with {slack_response}")
except Exception as err:
success = False
message = GENERIC_FAILURE_MSG
logger.error(f"Process Request error: {err}", exc_info=True)
return success, message
def __build_request(response_builder, message, success):
if success:
return response_builder.success(message)
return response_builder.error(message)
def post_handler(event, context):
"""
Lambda invocation entry point.
:param event:
:param context:
:return:
"""
logger.info(f"Received event: {event}")
record = __new_dynamo_record(event)
request_parser, response_builder = __retrieve_handlers(event)
if request_parser and response_builder:
logger.info(f"Using parser {request_parser.__class__.__name__}")
logger.info(f"Using builder {response_builder.__class__.__name__}")
spam_protection = SpamProtection()
if not __enabled():
record['Enabled'] = True
success = False
message = DISABLED_MSG
elif spam_protection.validate():
success, message = __process_request(event, request_parser)
else:
record['Delayed'] = False
success = True
message = spam_protection.error
DynamoOperation().store_event(record)
else:
success = False
message = UNKNOWN_AGENT
return __build_request(response_builder, message, success)
| gorillalogic/ns-alert-api | handler.py | handler.py | py | 3,332 | python | en | code | 0 | github-code | 13 |
41130611233 | import pygame
from utilidades import get_surface_form_sprite_sheet
class Kame:
'''
Representa al poder kame hame ha, para la ultima instancia del juego.
lo puede usar el personaje como el enemigo.
'''
def __init__(self, screen : pygame.Surface,ancho_bar : int, alto_bar: int,poder_enemigo : int, poder_personaje :int, pos_x : int, pos_y : int)-> None:
"""
Inicializa una instancia de la clase Kame.
Recibe:
Args:
screen (pygame.Surface): La superficie de la pantalla del juego.
ancho_bar (int): El ancho inicial de la barra.
alto_bar (int): El alto de la barra.
poder_enemigo (int): El poder del enemigo.
poder_personaje (int): El poder del personaje.
pos_x (int): La posición inicial en el eje X.
pos_y (int): La posición inicial en el eje Y.
Devuelve. None
"""
self.choque_list = get_surface_form_sprite_sheet("asset\choque\choque_set.png", 3, 1, 0, 0, 2, False)
self.frame = 0
self.image_choque = self.choque_list [self.frame]
self.rect_choque = self.image_choque.get_rect()
self.rect_choque.x = 0
self.rect_choque.y = 0
#goku
self.image_goku = pygame.image.load("asset\goku_poder_final.png")
self.image_goku = pygame.transform.scale(self.image_goku, (182, 155))
self.rect_goku = self.image_goku.get_rect()
self.rect_goku.x = 0
self.rect_goku.y = 465
#jacky chun
self.image_jack = pygame.image.load("asset\jacky_poder_final.png")
self.image_jack = pygame.transform.scale(self.image_jack, (182,155))
self.rect_jack = self.image_goku.get_rect()
self.rect_jack .x = 820
self.rect_jack .y = 465
self.ancho = ancho_bar
self.alto = alto_bar
self.poder_enemigo = poder_enemigo
self.poder_personaje = poder_personaje
self.screen = screen
self.ancho_bar_2 = ancho_bar
self.image_1 = pygame.Surface((self.ancho / 2, self.alto))
self.image_1.fill((64, 144, 244))
self.rect = self.image_1.get_rect()
self.rect.x = pos_x
self.rect.y = pos_y
self.caida_kame = 5 # representa el poder de jaki en su kame ha que viene hacia goku
self.aumento_kame = 40 #representa el contraresto del kame ha . goku contraresta el kame de jaki
self.image_2 = pygame.Surface((self.ancho_bar_2, self.alto))
self.image_2.fill((39, 117, 211))
self.rect_2 = self.image_2.get_rect()
self.rect.x = pos_x
self.rect.y = pos_y
self.limit_power_screen = screen.get_width()
self.time_render = 5
self.time_render_limit = 5
self.color_texto = (238, 51, 10)
self.tamanio_fuente = 45
self.type_fuente = "Impact"
self.fuente = pygame.font.SysFont(self.type_fuente, self.tamanio_fuente)
def draw(self, screen : pygame.Surface)-> None:
"""
Dibuja los elementos del personaje y enemigo en la pantalla del juego.
Recibe:
Args:
screen (pygame.Surface): La superficie de la pantalla del juego.
Devuelve: None
"""
screen.blit(self.image_goku, self.rect_goku)
screen.blit(self.image_jack, self.rect_jack)
screen.blit(self.image_2, self.rect) # Azul Roshi
screen.blit(self.image_1, self.rect)# celeste Goku
self.verificar_frames()
self.draw_choque()
if(self.time_render > 0):
text = self.fuente.render("PRESS E!!", True, self.color_texto)
screen.blit(text, (100,530))
self.time_render -= 1
else:
self.time_render = self.time_render_limit
def update(self)-> None:
"""
Actualiza el objeto y realiza las acciones correspondientes.
Recibe : None
Devuelve: None
"""
if(self.image_1.get_width() > 0):
self.image_2 = pygame.Surface((self.ancho_bar_2, self.alto))
self.image_2.fill((39, 117, 211))
self.caida_poder()
self.draw(self.screen)
def caida_poder(self)-> None:
"""
Realiza la caída del poder, disminuyendo el ancho de la barra
y actualizando la imagen correspondiente. Kame ha
Recibe : None
Devuelve: None
"""
if(self.image_1.get_width() > self.caida_kame):
self.ancho -= self.caida_kame
self.image_1 = pygame.Surface(((self.ancho / 2) - self.caida_kame, self.alto))
self.image_1.fill((182, 209, 242))
def contra_poder(self)-> None:
"""
Aumenta el poder contrarrestando el poder del enemigo.
Recibe: None
Devuelve:None
"""
self.ancho += self.aumento_kame
def verificar_frames(self):
"""
Verifica los cuadros de animación y ajusta el índice del cuadro
actual.
Recibe:None
Devuelve: None
"""
if(self.frame < len(self.choque_list) -1):
self.frame += 1
else:
self.frame = 0
def draw_choque(self):
"""
Dibuja la imagen de choque en la pantalla.
Configura la posición de la imagen de acuerdo con el ancho de
la barra y el rectángulo correspondiente.
Recibe: None
Devuelve: None
"""
self.rect_choque.x = (self.ancho / 2) - 130 #pos choque
self.rect_choque.y = self.rect.y - 45
if(self.frame < len(self.choque_list) -1):
image = self.choque_list[self.frame]
self.screen.blit(image, self.rect_choque) | HoracioxBarrios/mi_juego_final_limpio | class_kame.py | class_kame.py | py | 5,767 | python | es | code | 2 | github-code | 13 |
23325248463 |
#############################
## DEMO PARAMETERS
#############################
# params = {
# 'project': {
# 'directory_project': '/media/rich/bigSSD/analysis_data/face_rhythm/demo_faceRhythm_svoboda/fr_run_20221013_new_script1/',
# 'overwrite_config': False,
# 'initialize_visualization': False,
# 'verbose': 2,
# },
# 'figure_saver': {
# 'format_save': ['png'],
# 'kwargs_savefig': {'bbox_inches': 'tight', 'pad_inches': 0.1, 'transparent': True, 'dpi': 300},
# 'overwrite': True,
# 'verbose': 2,
# },
# 'paths_videos': {
# 'directory_videos': '/media/rich/bigSSD/other lab data/Svoboda_lab/BCI34_2022-07-19/side/2022-07-19_13-34-06',
# 'filename_videos_strMatch': 'trial_.*mp4', ## You can use regular expressions to search and match more complex strings
# 'depth': 2, ## How many folders deep to search directory
# },
# 'BufferedVideoReader': {
# 'buffer_size': 1000,
# 'prefetch': 1,
# 'posthold': 1,
# 'method_getitem': 'by_video',
# 'verbose': 1,
# },
# 'Dataset_videos': {
# 'contiguous': False,
# 'frame_rate_clamp': 240,
# 'verbose': 2,
# },
# 'ROIs': {
# 'select_mode': 'file',
# 'path_file': '/media/rich/bigSSD/analysis_data/face_rhythm/demo_faceRhythm_svoboda/fr_run_20221013_new_2/analysis_files/ROIs.h5',
# 'verbose': 2,
# 'rois_points_idx': [0],
# 'point_spacing': 12,
# },
# 'PointTracker': {
# 'rois_masks_idx': [1],
# 'contiguous': False,
# 'params_optical_flow': {
# 'method': 'lucas_kanade',
# 'mesh_rigidity': 0.01,
# 'mesh_n_neighbors': 15,
# 'relaxation': 0.001,
# 'kwargs_method': {
# 'winSize': [20,20],
# 'maxLevel': 2,
# 'criteria': [2, 0.03],
# },
# },
# 'visualize_video': False,
# 'params_visualization': {
# 'alpha': 0.2,
# 'point_sizes': 2,
# 'writer_cv2': None,
# },
# 'params_outlier_handling': {
# 'threshold_displacement': 80, ## Maximum displacement between frames, in pixels.
# 'framesHalted_before': 30, ## Number of frames to halt tracking before a violation.
# 'framesHalted_after': 30, ## Number of frames to halt tracking after a violation.
# },
# 'verbose': 2,
# },
# 'VQT_Analyzer': {
# 'params_VQT': {
# 'Fs_sample': 240,
# 'Q_lowF': 2,
# 'Q_highF': 8,
# 'F_min': 1,
# 'F_max': 30,
# 'n_freq_bins': 40,
# 'win_size': 901,
# 'plot_pref': False,
# 'downsample_factor': 20,
# 'padding': "valid",
# 'DEVICE_compute': 'cuda:0',
# 'batch_size': 1000,
# 'return_complex': False,
# 'progressBar': True
# },
# 'normalization_factor': 0.95,
# 'spectrogram_exponent': 1.0,
# 'one_over_f_exponent': 0.5,
# 'verbose': 2,
# },
# 'TCA': {
# 'verbose': 2,
# 'rearrange_data':{
# 'names_dims_array': ['xy', 'points', 'frequency', 'time'],
# 'names_dims_concat_array': [['xy', 'points']],
# 'concat_complexDim': False,
# 'name_dim_concat_complexDim': 'time',
# 'name_dim_dictElements': 'trials',
# 'method_handling_dictElements': 'concatenate',
# 'name_dim_concat_dictElements': 'time',
# 'idx_windows': None,
# 'name_dim_array_window': 'time',
# },
# 'fit': {
# 'method': 'CP_NN_HALS',
# # method='CP',
# 'params_method': {
# 'rank': 12,
# 'n_iter_max': 1000,
# 'init': 'random',
# 'svd': 'truncated_svd',
# 'tol': 1e-09,
# # 'nn_modes': [0,1],
# 'verbose': True,
# },
# 'DEVICE': 'cuda:0',
# 'verbose': 2,
# },
# 'rearrange_factors': {
# 'undo_concat_complexDim': False,
# 'undo_concat_dictElements': True,
# },
# },
# }
########################################
## Import parameters from CLI
########################################
import os
print(f"script environment: {os.environ['CONDA_DEFAULT_ENV']}")
## Argparse --path_params, --directory_save
import argparse
parser = argparse.ArgumentParser(
prog='Face-Rhythm Basic Pipeline',
description='This script runs the basic pipeline using a json file containing the parameters.',
)
parser.add_argument(
'--path_params',
'-p',
required=True,
metavar='',
type=str,
default=None,
help='Path to json file containing parameters.',
)
parser.add_argument(
'--directory_save',
'-d',
required=False,
metavar='',
type=str,
default=None,
help="Directory to use as 'directory_project' and save results to. Overrides 'directory_project' field in parameters file.",
)
args = parser.parse_args()
path_params = args.path_params
directory_save = args.directory_save
## Checks for path_params and directory_save
from pathlib import Path
## Check path_params
### Check if path_params is valid
assert Path(path_params).exists(), f"Path to parameters file does not exist: {path_params}"
### Check if path is absolute. If not, convert to absolute path.
if not Path(path_params).is_absolute():
path_params = Path(path_params).resolve()
print(f"Warning: Input path_params is not absolute. Converted to absolute path: {path_params}")
### Warn if suffix is not json
print(f"Warning: suffix of path_params is not .json: {path_params}") if Path(path_params).suffix != '.json' else None
print(f"path_params: {path_params}")
## Check directory_save
### Check if directory_save is valid
if args.directory_save is not None:
assert Path(args.directory_save).exists(), f"Path to directory_save does not exist: {args.directory_save}"
### Check if directory_save is absolute. If not, convert to absolute path.
if not Path(args.directory_save).is_absolute():
args.directory_save = Path(args.directory_save).resolve()
print(f"Warning: Input directory_save is not absolute. Converted to absolute path: {args.directory_save}")
### Check that directory_save is a directory
assert Path(args.directory_save).is_dir(), f"Input directory_save is not a directory: {args.directory_save}"
### Set directory_save
print(f"directory_save: {directory_save}")
## Load parameters
import json
with open(path_params, 'r') as f:
params = json.load(f)
########################################
## Start script
########################################
import face_rhythm as fr
from pprint import pprint
from pathlib import Path
import time
import cv2
import numpy as np
tic_start = time.time()
fr.util.get_system_versions(verbose=True);
directory_project = params['project']['directory_project'] if directory_save is None else directory_save
directory_videos = params['paths_videos']['directory_videos']
filename_videos_strMatch = params['paths_videos']['filename_videos_strMatch']
path_config, path_run_info, directory_project = fr.project.prepare_project(
directory_project=directory_project,
overwrite_config=params['project']['overwrite_config'], ## WARNING! CHECK THIS. If True, will overwrite existing config file!
mkdir=True,
initialize_visualization=params['project']['initialize_visualization'],
verbose=params['project']['verbose'],
)
figure_saver = fr.util.Figure_Saver(
path_config=path_config,
format_save=params['figure_saver']['format_save'],
kwargs_savefig=params['figure_saver']['kwargs_savefig'],
overwrite=params['figure_saver']['overwrite'],
verbose=params['figure_saver']['verbose'],
)
########################################
## Prepare video data for point tracking
########################################
paths_videos = fr.helpers.find_paths(
dir_outer=directory_videos,
reMatch=filename_videos_strMatch, ## string to use to search for files in directory. Uses regular expressions!
depth=0, ## how many folders deep to search
)[:]
pprint('Paths to videos:') if params['project']['verbose'] > 1 else None
pprint(paths_videos, width=1000) if params['project']['verbose'] > 1 else None
## Make a `BufferedVideoReader` object for reading video file data
videos = fr.helpers.BufferedVideoReader(
# video_readers=data.videos,
paths_videos=paths_videos,
buffer_size=params['BufferedVideoReader']['buffer_size'],
prefetch=params['BufferedVideoReader']['prefetch'],
posthold=params['BufferedVideoReader']['posthold'],
method_getitem=params['BufferedVideoReader']['method_getitem'],
verbose=params['BufferedVideoReader']['verbose'],
)
## Make a `Dataset_videos` object for referencing the raw video data
data = fr.data_importing.Dataset_videos(
bufferedVideoReader=videos,
# paths_videos=paths_videos,
contiguous=params['Dataset_videos']['contiguous'],
frame_rate_clamp=params['Dataset_videos']['frame_rate_clamp'],
verbose=params['Dataset_videos']['verbose'],
);
## Save the `Dataset_videos` object in the 'analysis_files' project folder
data.save_config(path_config=path_config, overwrite=True, verbose=1)
data.save_run_info(path_config=path_config, overwrite=True, verbose=1)
data.save_run_data(path_config=path_config, overwrite=True, verbose=1)
########################################
## Define ROIs
########################################
## Either select new ROIs (`select_mode='gui'`), or import existing ROIs (`path_file=path_to_ROIs.h5_file`).\
## Typically, you should make 1 or 2 ROIs. One for defining where the face points should be and one for cropping the frame.
# %matplotlib notebook
rois = fr.rois.ROIs(
# select_mode='gui',
# exampleImage=data[0][0],
select_mode=params['ROIs']['select_mode'],
path_file=params['ROIs']['path_file'],
verbose=params['ROIs']['verbose'],
)
rois.make_points(
rois=[rois[ii] for ii in params['ROIs']['rois_points_idx']],
point_spacing=params['ROIs']['point_spacing'],
)
## Save the `ROIs` object in the 'analysis_files' project folder
rois.save_config(path_config=path_config, overwrite=True, verbose=1)
rois.save_run_info(path_config=path_config, overwrite=True, verbose=1)
rois.save_run_data(path_config=path_config, overwrite=True, verbose=1)
# ## visualize the ROIs
# rois.plot_masks(data[0][0])
########################################
# Point Tracking
########################################
## Prepare `PointTracker` object.\
## Set `visualize_video` to **`True`** to tune parameters until they look appropriate, then set to **`False`** to run the full dataset through at a much faster speed.
##
## Key parameters:
## - `point_spacing`: distance between points. Vary so that total number of points is appropriate.
## - `mesh_rigidity`: how rigid the mesh elasticity is. Vary so that points track well without drift.
## - `relaxation`: how quickly the points relax back to their home position. Vary so that points track well without dift.
## - `kwargs_method > winSize`: the spatial size of the optical flow calculation. Smaller is better but noisier, larger is less accurate but more robust to noise.
## - `params_outlier_handling > threshold_displacement`: point displacements above this value will result in freezing of the points.
pt = fr.point_tracking.PointTracker(
# buffered_video_reader=videos[:5],
buffered_video_reader=videos,
point_positions=rois.point_positions,
rois_masks=[rois[ii] for ii in params['PointTracker']['rois_masks_idx']],
contiguous=params['PointTracker']['contiguous'],
params_optical_flow={
"method": params['PointTracker']['params_optical_flow']['method'],
"mesh_rigidity": params['PointTracker']['params_optical_flow']['mesh_rigidity'],
"mesh_n_neighbors": params['PointTracker']['params_optical_flow']['mesh_n_neighbors'],
"relaxation": params['PointTracker']['params_optical_flow']['relaxation'],
"kwargs_method": {
"winSize": params['PointTracker']['params_optical_flow']['kwargs_method']['winSize'],
"maxLevel": params['PointTracker']['params_optical_flow']['kwargs_method']['maxLevel'],
"criteria": tuple([cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT] + list(params['PointTracker']['params_optical_flow']['kwargs_method']['criteria'])),
},
},
visualize_video=params['PointTracker']['visualize_video'],
params_visualization={
'alpha': params['PointTracker']['params_visualization']['alpha'],
'point_sizes': params['PointTracker']['params_visualization']['point_sizes'],
},
params_outlier_handling = {
'threshold_displacement': params['PointTracker']['params_outlier_handling']['threshold_displacement'],
'framesHalted_before': params['PointTracker']['params_outlier_handling']['framesHalted_before'],
'framesHalted_after': params['PointTracker']['params_outlier_handling']['framesHalted_after'],
},
verbose=params['PointTracker']['verbose'],
)
## Perform point tracking
pt.track_points()
## Save the `PointTracker` object in 'analysis_files' project directory.\
## Using compression can reduce file sizes slightly but is very slow.
pt.save_config(path_config=path_config, overwrite=True, verbose=1)
pt.save_run_info(path_config=path_config, overwrite=True, verbose=2)
pt.save_run_data(path_config=path_config, overwrite=True, use_compression=False, verbose=1)
## Clear some memory if needed. Optional.
pt.cleanup()
## Load the `PointTracker` data as a dictionary
pt_data = fr.h5_handling.simple_load(str(Path(directory_project) / 'analysis_files' / 'PointTracker.h5'))
########################################
# Spectral Analysis
########################################
## Prepare `VQT_Analyzer` object.
##
## Key parameters:
## - `Q_lowF`: Quality of the lowest frequency band of the spectrogram. Q value is number of oscillation periods.
## - `Q_highF`: Quality of the highest frequency band...
## - `F_min`: Lowest frequency band to use.
## - `F_max`: Highest frequency band to use.
## - `downsample_factor`: How much to downsample the spectrogram by in time.
## - `return_complex`: Whether or not to return the complex spectrogram. Generally set to False unless you want to try something fancy.
Fs = fr.util.load_run_info_file(path_run_info)['Dataset_videos']['frame_rate']
spec = fr.spectral_analysis.VQT_Analyzer(
params_VQT={
'Fs_sample': Fs,
'Q_lowF': params['VQT_Analyzer']['params_VQT']['Q_lowF'],
'Q_highF': params['VQT_Analyzer']['params_VQT']['Q_highF'],
'F_min': params['VQT_Analyzer']['params_VQT']['F_min'],
'F_max': params['VQT_Analyzer']['params_VQT']['F_max'],
'n_freq_bins': params['VQT_Analyzer']['params_VQT']['n_freq_bins'],
'win_size': params['VQT_Analyzer']['params_VQT']['win_size'],
'plot_pref': params['VQT_Analyzer']['params_VQT']['plot_pref'],
'downsample_factor': params['VQT_Analyzer']['params_VQT']['downsample_factor'],
'padding': params['VQT_Analyzer']['params_VQT']['padding'],
'DEVICE_compute': params['VQT_Analyzer']['params_VQT']['DEVICE_compute'],
'batch_size': params['VQT_Analyzer']['params_VQT']['batch_size'],
'return_complex': params['VQT_Analyzer']['params_VQT']['return_complex'],
'progressBar': params['VQT_Analyzer']['params_VQT']['progressBar'],
},
normalization_factor=params['VQT_Analyzer']['normalization_factor'],
spectrogram_exponent=params['VQT_Analyzer']['spectrogram_exponent'],
one_over_f_exponent=params['VQT_Analyzer']['one_over_f_exponent'],
verbose=params['VQT_Analyzer']['verbose'],
)
## Look at a demo spectrogram of a single point.\
## Specify the point with the `idx_point` and `name_points` fields.\
## Note that the `pt_data['points_tracked']` dictionary holds subdictionaries withe numeric string names (ie `['0'], ['1']`) for each video.
# demo_sepc = spec.demo_transform(
# points_tracked=pt_data['points_tracked'],
# point_positions=pt_data['point_positions'],
# idx_point=30,
# name_points='0',
# plot=False,
# );
## Generate spectrograms
spec.transform_all(
points_tracked=pt_data['points_tracked'],
point_positions=pt_data['point_positions'],
)
## Save the `VQT_Analyzer` object in 'analysis_files' project directory.\
## Using compression can reduce file sizes slightly but is very slow.
spec.save_config(path_config=path_config, overwrite=True, verbose=1)
spec.save_run_info(path_config=path_config, overwrite=True, verbose=1)
spec.save_run_data(path_config=path_config, overwrite=True, use_compression=False, verbose=1)
## Clear some memory if needed. Optional.
spec.cleanup()
## Load the `VQT_Analyzer` data as a dictionary
spec_data = fr.h5_handling.simple_load(str(Path(directory_project) / 'analysis_files' / 'VQT_Analyzer.h5'))
########################################
# Decomposition
########################################
## Prepare `TCA` object, and then rearrange the data with the `.rearrange_data` method.
##
## Key parameters for `.rearrange_data`:
## - `names_dims_array`: Enter the names of the dimensions of the spectrogram. Typically these are `'xy', 'points', 'frequency', 'time'`.
## - `names_dims_concat_array`: Enter any dimensions you wish to concatenate along other dimensions. Typically we wish to concatenate the `'xy'` dimension along the `'points'` dimension, so we make a list containing that pair as a tuple: `[('xy', 'points')]`.
## - `concat_complexDim`: If your input data are complex valued, then this can concatenate the complex dimension along another dimension.
## - `name_dim_dictElements`: The `data` argument is expected to be a dictionary of dictionaries of arrays, where the inner dicts are trials or videos. This is the name of what those inner dicts are. Typically `'trials'`.
# spectrograms = spec_data['spectrograms']
spectrograms = {key: np.abs(val) for key,val in list(spec_data['spectrograms'].items())[:]}
tca = fr.decomposition.TCA(
verbose=params['TCA']['verbose'],
)
tca.rearrange_data(
data=spectrograms,
names_dims_array=params['TCA']['rearrange_data']['names_dims_array'],
names_dims_concat_array=params['TCA']['rearrange_data']['names_dims_concat_array'],
concat_complexDim=params['TCA']['rearrange_data']['concat_complexDim'],
name_dim_concat_complexDim=params['TCA']['rearrange_data']['name_dim_concat_complexDim'],
name_dim_dictElements=params['TCA']['rearrange_data']['name_dim_dictElements'],
method_handling_dictElements=params['TCA']['rearrange_data']['method_handling_dictElements'],
name_dim_concat_dictElements=params['TCA']['rearrange_data']['name_dim_concat_dictElements'],
idx_windows=params['TCA']['rearrange_data']['idx_windows'],
name_dim_array_window=params['TCA']['rearrange_data']['name_dim_array_window'],
)
## Fit TCA model.
##
## There are a few methods that can be used:
## - `'CP_NN_HALS'`: non-negative CP decomposition using the efficient HALS algorithm. This should be used in most cases.
## - `'CP'`: Standard CP decomposition. Use if input data are not non-negative (if you are using complex valued spectrograms or similar).
## - `'Randomized_CP'`: Randomized CP decomposition. Allows for large input tensors. If you are using huge tensors and you are memory constrained or want to run on a small GPU, this is your only option.
##
## If you have and want to use a CUDA compatible GPU:
## - Set `DEVICE` to `'cuda'`
## - GPU memory can be saved by setting `'init'` method to `'random'`. However, fastest convergence and highest accuracy typically come from `'init': 'svd'`.
tca.fit(
method=params['TCA']['fit']['method'],
params_method={
'rank': params['TCA']['fit']['params_method']['rank'],
'n_iter_max': params['TCA']['fit']['params_method']['n_iter_max'],
'init': params['TCA']['fit']['params_method']['init'],
'svd': params['TCA']['fit']['params_method']['svd'],
'tol': params['TCA']['fit']['params_method']['tol'],
'verbose': params['TCA']['fit']['params_method']['verbose'],
},
DEVICE=params['TCA']['fit']['DEVICE'],
verbose=params['TCA']['fit']['verbose'],
)
## Rearrange the factors.\
## You can undo the concatenation that was done during `.rearrange_data`
tca.rearrange_factors(
undo_concat_complexDim=params['TCA']['rearrange_factors']['undo_concat_complexDim'],
undo_concat_dictElements=params['TCA']['rearrange_factors']['undo_concat_dictElements'],
)
## Save the `TCA` object in 'analysis_files' project directory.
tca.save_config(path_config=path_config, overwrite=True, verbose=1)
tca.save_run_info(path_config=path_config, overwrite=True, verbose=1)
tca.save_run_data(path_config=path_config, overwrite=True, use_compression=False, verbose=1)
## Clear some memory if needed. Useful if you ran the fit on a GPU. Optional.
tca._cleanup()
# ## Plot factors
# tca.plot_factors(
# figure_saver=None,
# show_figures=True,
# )
## Load the `TCA` data as a dictionary
tca_data = fr.h5_handling.simple_load(str(Path(directory_project) / 'analysis_files' / 'TCA.h5'))
########################################
# Demo playback
########################################
# ## Playback a video with points overlayed.\
# ## Make sure you have a `BufferedVideoReader` object called `videos` made of your videos
# idx_video_to_use = 0
# idx_frames_to_use = np.arange(0,5000)
# videos.method_getitem = 'by_video'
# frame_visualizer = fr.visualization.FrameVisualizer(
# display=True,
# error_checking=True,
# # path_save=str(Path(directory_project) / 'visualizations' / 'point_tracking_demo.avi'),
# path_save=None,
# frame_height_width=videos.frame_height_width,
# frame_rate=240,
# point_sizes=3,
# points_colors=(0,255,255),
# alpha=0.3,
# )
# fr.visualization.play_video_with_points(
# bufferedVideoReader=videos[idx_video_to_use],
# frameVisualizer=frame_visualizer,
# points=list(pt_data['points_tracked'].values())[0],
# idx_frames=idx_frames_to_use,
# )
########################################
# Complete messages
########################################
print(f'RUN COMPLETE')
print(f'Project directory: {directory_project}')
print(f'Time elapsed: {time.time() - tic_start:.2f} seconds') | RichieHakim/face-rhythm | scripts/pipeline_basic.py | pipeline_basic.py | py | 22,787 | python | en | code | 2 | github-code | 13 |
37991093302 | from logging import getLogger
import flask
from app import drones
from app.drones.message import FromDrone
from app.drones.drone import State
# Drone API; Requests: POST; Response: json
drone_api = flask.Blueprint('drone_api', __name__, url_prefix='/drone_api')
# received a message from the drone; authenticated by nginx via mTLS
@drone_api.route('/', methods=['POST'])
def message():
req = flask.request.json
getLogger('app').info(f'DR_RCV: {req}')
if req['type'] == FromDrone.HEARTBEAT.value:
pos = req['pos']
battery = req['battery']
drones.droneObj.on_heartbeat(pos, battery)
elif req['type'] == FromDrone.STATUS_UPDATE.value:
state = State(req['state'])
latest_facility_id_str = req['latest_facility_id'] if req['latest_facility_id'] else drones.home.id_str
goal_facility_id_str = req['goal_facility_id'] if req['goal_facility_id'] else drones.home.id_str
drones.droneObj.on_state_update(state, latest_facility_id_str, goal_facility_id_str)
else:
getLogger('app').warning(f"DR_RCV: unknown type")
rep = reply()
getLogger('app').info(f"DR_SND: {rep}")
return rep
# messages are answered with an order from us or just an empty order
def reply():
if drones.droneObj.outbox:
msg = drones.droneObj.outbox
drones.droneObj.outbox = None
else:
msg = { 'type': None }
return msg
| Dronesome-Archive/server | app/blueprints/drone_api.py | drone_api.py | py | 1,315 | python | en | code | 0 | github-code | 13 |
69970215058 | from discord.ext import commands
from assets.functions import initembed
class Ping(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ping(self, ctx):
e = initembed(ctx, ":ping_pong: Pong!")
e.add_field(name="Latency", value=f"`{round(self.bot.latency * 1000)}ms`")
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(Ping(bot))
| ThatOtherAndrew/ThatOtherBot-Old | cogs/tools/stats.py | stats.py | py | 416 | python | en | code | 0 | github-code | 13 |
29848231963 | # Date - Friday August 31, 2018
# This system is built as part of the reasonableness monitoring system
# Author - LHG
import rdflib
from rdflib import Graph
from rdflib import URIRef
import pprint
import os
from datetime import date
# Noun is the actor - by default
def write_rdf(noun, verb, object, context, phrase_dict, fileName='log.n3'):
graph = Graph()
rdflib = Namespace('http://rdflib.net/test/')
graph.bind("test", "http://rdflib.net/test/")
graph.add((rdflib['pic:1'], rdflib['name'], Literal('Jane & Bob')))
graph.add((rdflib['pic:2'], rdflib['name'], Literal('Squirrel in Tree')))
graph.commit()
print("Triples in RDF Log after add: %d"% len(graph))
# display the graph in RDF/XML
print(graph.serialize())
graph.close()
# Clean up the mkdtemp spoor to remove the Sleepycat database files...
for f in os.listdir(path):
os.unlink(path+'/'+f)
os.rmdir(path)
def read_output(file='out.n3'):
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
rule = False
info = []
for line in content:
if rule:
if line == '\n': # we're done
print("Detected a line")
print(rules)
rule = False
rules = dict()
else: # Do some processing
if '"' in line and line.endswith(');'):
start = line.find("\"")
end = line.find("\"", start+1)
quote = line[start+1:end]
rules['description'] = quote
else:
tokens = line.split()
try:
if tokens[0] == 'a:rule':
rules['name'] = tokens[1]
print(rules)
elif tokens[0] == 'pml:inputdata':
rules['input'] = (tokens[1], tokens[2], tokens[3])
elif tokens[0] == 'pml:outputdata':
if 'non-compliant' in tokens[2]:
print("Found non-compliant activity!")
reasonable = False
rules['compliant'] = False
else:
rules['compliant'] = True
rules['output'] = (tokens[1], tokens[2], tokens[3])
except IndexError:
rule = False
info.append(rules)
rules = dict()
continue
if line.startswith('run') and line.endswith('RuleApplication;'):
rule = True
rules = dict()
pprint.pprint(info)
return info
# TODO - formatting for Lalana
def write_output(act, file='in.n3'):
raw = open(file, 'r+')
contents = raw.read().split("\n")
raw.seek(0)
raw.truncate()
# Header information
raw.write("# Author: lgilpin\n")
raw.write("# Date: %s\n\n" % str(date.today()))
raw.write("@prefix foo: <http://foo#>.\n")
raw.write("@prefix ontology: <http://ontology#>.\n")
raw.write("\n")
raw.close()
print(act)
return
def explain(info, summary):
sentence = make_sentence(summary)
judgement = ''
explanation = ""
for item in info:
print("next iteration")
print(item)
if 'compliant' in item:
if item['compliant']:
explanation += item['description']
explanation += ". "
else:
judgement = 'unreasonable'
explanation += item['description']
explanation += ". "
else:
print("not compliant")
if judgement == '':
judgement = 'reasonable'
print("This perception is %s" %judgement.upper())
print("==========================================")
print(explanation)
print('So it is %s to perceive %s' %(judgement, sentence))
def make_sentence(summary):
toRet = ''
for word in summary:
if toRet=='':
toRet += word.lower()
else:
toRet += ' '
toRet += word
return toRet.strip()
def process_line(line):
tokens = line.split("\\s")
if 'description' in tokens:
return true
# Read the RDF and get the descriptions
def read_rdf(file='out.n3'):
g = Graph()
g.parse('out.n3', format='n3')
# What are we looking for
car = URIRef('http://foo#my_car')
justification = URIRef('http://dig.csail.mit.edu/2009/AIR/air#then')
compliant = URIRef('http://dig.csail.mit.edu/TAMI/2007/amord/air#compliant-with')
notCompliant = URIRef('http://dig.csail.mit.edu/TAMI/2007/amord/air#non-compliant-with')
description = URIRef('http://dig.csail.mit.edu/2009/AIR/airjustification#description')
ruleApplication = URIRef('http://dig.csail.mit.edu/2009/AIR/airjustification#RuleApplication')
runs = []
rulesFired = g.subject_predicates(ruleApplication)
for stmt in rulesFired:
runs.append(stmt[0])
pprint.pprint(stmt)
for stmt in runs:
print("For the following")
print(stmt)
important = g.predicate_objects(URIRef(stmt))
for thing in important:
print(thing)
# others = g.subject_objects(description)
# for stmt in others:
# pprint.pprint(stmt)
# so = g.subject_objects(notCompliant)
# for stmt in so:
# pprint.pprint(stmt)
#
# sp = g.subject_predicates(notCompliant)
# for stmt in sp:
# pprint.pprint(stmt)#
# po = g.predicate_objects(notCompliant)
# for stmt in po:
# pprint.pprint(stmt)
#for car in g.subjects(car):
# print("%s %p %o") %(s,p,o)
print("wtf happened")
print(len(g)) # prints 2
print(" ")
supports = []
contradictions = []
for stmt in g:
pprint.pprint(stmt)
if (None, notCompliant, None) in g:
print("This perception is UNREASONABLE")
print("============================================")
else:
print("This perception is REASONABLE")
print("============================================")
#
# Get the literal
# if everything compliant, great then it's reasonable
# if something is non-compliant, its not reasonable
| lgilpin/adaptable-monitoring | process.py | process.py | py | 6,394 | python | en | code | 0 | github-code | 13 |
35831956838 | import copy
from ..bag import Bag
from ..items import Items
from ..equipment import Equipment
from ..item_set_general import Item_sets
from simul_items import Item_model,Item_update_table
from simul_item_builder import Item_builder,Item_set_stage_builder,Item_set_item_model
from simul_sets import Item_set
from simul_skills import Skills
def get_simul_items(bag:Bag,current_equipment:Equipment,items:Items) -> list[Item_model]:
simul_bag = copy.deepcopy(bag)
for equipped_item in current_equipment:
simul_bag.add_item(
item_id = equipped_item[1]
)
return [
Item_builder(
item_specific_dict = items.get_item(item_id= x )
).build_all()
for x in simul_bag.item_dict]
def null_simul_set_instance() -> Item_set_item_model:
update_table = Item_update_table(*[False for _ in range(8)])
return Item_set_item_model(
updates = update_table,
status = Skills.null_skill(),
item_drop = 0,
product_drop = 0 ,
workpoints = 0 ,
regeneration = 0 ,
damage = 0,
speed = 0,
exp_bonus = 0
)
def get_simul_set_instance(sets:Item_sets) -> Item_set:
for set_name,set in sets.set_list.items():
yield Item_set(
name = set.name,
set_id= set.key,
item_list= set.list_items,
bonuses_dict_by_number = {
i :Item_set_stage_builder( x ).build()
for i,x in set.bonus_dict.items()
} if type(set.bonus_dict) != list else null_simul_set_instance())
def get_simul_sets(sets:Item_sets) ->list[Item_set]:
return list(
get_simul_set_instance(sets)
) | MariusSilviuHriscu/python_the_west | the_west_inner/simulation_data_library/load_items_script.py | load_items_script.py | py | 1,949 | python | en | code | 2 | github-code | 13 |
73155240336 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 13:48:17 2019
@author: danaukes
"""
#derived from https://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html
import PIL
import numpy
import scipy.fftpack as fft
from PIL.Image import Image
import matplotlib.pyplot as plt
import os
import yaml
import file_sorter.support as support
#Image.resize()
#d = 64
#d2 =18
#byte_order = 'big'
def bool_array_to_int(aaa):
try:
aaa = aaa.flatten().tolist()
except AttributeError:
pass
return sum(1<<ii for ii,item in enumerate(reversed(aaa)) if item)
def int_to_bool_array(phash):
a1 = numpy.array([((phash>>(ii)&1)) for ii in reversed(range(64))],dtype = numpy.bool)
a2 = a1.reshape((8,8))
return a2
def square_shrink(i,d):
i2 = i.resize((d,d),resample = PIL.Image.BILINEAR)
return i2
def dct2(a):
return fft.dct( fft.dct( a, axis=0, norm='ortho' ), axis=1, norm='ortho' )
def idct2(a):
return fft.idct( fft.idct( a, axis=0 , norm='ortho'), axis=1 , norm='ortho')
def gen_p_hash_alt(aname,d=32, d2 = 18):
i=PIL.Image.open(aname)
i2 = numpy.array(i)
p_hash=0
# shape = i2.shape
# try:
found = False
if len(i2.shape)==3:
if i2.shape[2]==4:
i3 = 255-i2[:,:,3]
i4 = PIL.Image.fromarray(i3)
a = i4.convert('L')
found = True
elif len(i2.shape)==2:
i3 = 255-(numpy.array((i2/32*255)))
i3 = numpy.array(i3,dtype=numpy.uint8)
i4=numpy.array([i3,i3,i3])
i5 = i4.transpose(1,2,0)
i6 = PIL.Image.fromarray(i5)
a = i6.convert('L')# except IndexError:
found = True
if found:
s = square_shrink(a,d)
adct = dct2(numpy.array(s))
adct2 = adct[:d2,:d2]
mean = adct2.flatten()[1:].mean()
p_hash = bool_array_to_int(adct2>mean)
# try:
# p_hash=0
return p_hash
def gen_p_hash(aname,d=32, d2 = 18):
try:
i=PIL.Image.open(aname)
a = i.convert('L')
s = square_shrink(a,d)
adct = dct2(numpy.array(s))
adct2 = adct[:d2,:d2]
mean = adct2.flatten()[1:].mean()
p_hash = bool_array_to_int(adct2>mean)
if p_hash==0:
p_hash = gen_p_hash_alt(aname,d,d2)
return p_hash
except PIL.UnidentifiedImageError:
return None
def gen_p_hash_opt(aname,d=32, d2 = 18):
try:
adct2 = dct2(numpy.array(square_shrink(PIL.Image.open(aname).convert('L'),d)))[:d2,:d2]
mean = adct2.flatten()[1:].mean()
p_hash = bool_array_to_int(adct2>mean)
if p_hash==0:
p_hash = gen_p_hash_alt(aname,d,d2)
return p_hash
except PIL.UnidentifiedImageError:
return None
def int_to_img(phash):
a2=int_to_bool_array(phash)
i = PIL.Image.fromarray(a2)
return i
def filter_img_filetype(filename):
return os.path.splitext(filename)[1].lower() in ['.jpg','.jpeg','.png']
if __name__=='__main__':
# p1 = 'C:/Users/danaukes/Dropbox (Personal)/Camera Uploads'
# support.rebuild_compare_info(p1,hasher = gen_p_hash_opt,file_filter = filter_img_filetype, filename='image_compare_info.yaml')
# with open('image_compare_info.yaml') as f:
# image_compare_info = yaml.load(f,Loader=yaml.FullLoader)
#
# for key,value in image_compare_info['hash_file_dict'].items():
# if len(value)>1:
# print(value)
#
p1 = 'C:/Users/danaukes/Dropbox (ASU)/idealab/presentations/2020-03-05 Research Talk/reduced/images-reduced/image330.png'
# r = gen_p_hash(p1)
i = PIL.Image.open(p1)
i2 = numpy.array(i)
i3 = 255-(numpy.array((i2/32*255)))
i3 = numpy.array(i3,dtype=numpy.uint8)
i4=numpy.array([i3,i3,i3])
i5 = i4.transpose(1,2,0)
i6 = PIL.Image.fromarray(i5)
# i3 = 255-i2[:,:,3]
# i3[:,:,3]=255
# i4 = PIL.Image.fromarray(i3)
i7 = i6.convert('L')
# i5.show() | danb0b/code_file_sorter | python/file_sorter/images.py | images.py | py | 3,998 | python | en | code | 0 | github-code | 13 |
24770249019 | import wx
from pug.syswx.wxconstants import *
class AguiLabelSizer(wx.BoxSizer):
"""AguiLabelSizer(parent, label='', line=True, font=None)
parent: the parent window
label: the text to display
line: if True, create a line at the bottom of the sizer
font: a font object to be used for the font... defaults to default font
A sizer that contains properly formatted text for attribute guis
Includes a spacer to lower text WX_TEXTEDIT_LABEL_YOFFSET pixels, the static
text object, which is stored in AguiLabelSizer.text, and a wx.line control,
which is stored in AguiLabelSizer.line, to visually separate text from text on
next line.
"""
def __init__(self, parent, label='', line=True, style=0, font=None):
wx.BoxSizer.__init__(self, orient=wx.VERTICAL)
text = wx.StaticText(parent, label=label, style=style)
if font:
text.SetFont(font)
text.SetMinSize((-1,-1))
self.AddSpacer((1, WX_TEXTEDIT_LABEL_YOFFSET))
textSizer = wx.BoxSizer(orient=wx.HORIZONTAL)
textSizer.Add(text, 1, wx.EXPAND | wx.WEST, 2)
self.AddSizer(textSizer, 1, wx.EXPAND)
if line:
line = wx.StaticLine(parent=parent, style = 0)
self.Add(line, flag=wx.EXPAND)
self.line = line
self.textCtrl = text
# FOR SASH
# self.preferredWidth = self.MinSize[0]
def SetLabel(self, text=''):
self.textCtrl.Label = text
| sunsp1der/pug | pug/syswx/agui_label_sizer.py | agui_label_sizer.py | py | 1,481 | python | en | code | 0 | github-code | 13 |
27302631165 | SECTION = "section"
CHAPTER = "chapter"
class SectionItem(object):
def __init__(self, section_name, section_type=SECTION, exercise=False, exercise_path='', line_pos=0):
self.contains_exercises = None
self.section_type = section_type
self.section_name = section_name
self.exercise = exercise
self.exercise_path = exercise_path
self.line_pos = line_pos
self.lines = []
self.markdown = None
self.codio_section = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return str({
'section_name': self.section_name,
'section_type': self.section_type,
'exercise': self.exercise,
'exercise_path': self.exercise_path,
'contains_exercises': self.contains_exercises,
'line_pos': self.line_pos
})
| codio/book-converter | converter/guides/item.py | item.py | py | 883 | python | en | code | 2 | github-code | 13 |
38148684454 | import os
from pathlib import Path
from tqdm import tqdm
DATA_DIR = Path(__file__).parents[2] / 'data/stpp'
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def download_url(url, root, filename=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
"""
import urllib
if not filename:
filename = os.path.basename(url)
fpath = DATA_DIR / root
fpath.mkdir(parents=True, exist_ok=True)
fpath = fpath / filename
urllib.request.urlretrieve(url, str(fpath), reporthook=gen_bar_updater())
return True
| mbilos/neural-flows-experiments | nfe/experiments/stpp/data/download_utils.py | download_utils.py | py | 1,011 | python | en | code | 73 | github-code | 13 |
29520988185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import http.cookiejar
import os
import urllib.request
import urllib.parse
import urllib.error
cookie_filename = "cookies"
class SimpleScraper():
def __init__(self):
self.cj = http.cookiejar.MozillaCookieJar(cookie_filename)
if os.access(cookie_filename, os.F_OK):
self.cj.load()
self.opener = urllib.request.build_opener()
self.opener = urllib.request.build_opener(
urllib.request.HTTPRedirectHandler(),
urllib.request.HTTPHandler(debuglevel=0),
urllib.request.HTTPSHandler(debuglevel=0),
urllib.request.HTTPCookieProcessor(self.cj)
)
self.opener.addheaders = [
('User-agent', ('Mozilla/5.0 (Windows NT 5.1; rv:24.0) Gecko/20100101 Firefox/24.0'))
]
self.cj.save()
def get(self, url):
try:
response = self.opener.open(url)
text = ''.join(v.decode('utf-8') for v in response.readlines())
return text
except:
return False
def post(self, url, data):
try:
post_data = urllib.parse.urlencode(dict([k, v.encode('utf-8')]
for k, v in list(data.items())))
response = self.opener.open(url, post_data.encode())
text = ''.join(v.decode('utf-8') for v in response.readlines())
return text
except:
return False
| in-rolls/indian-politician-bios | get_data/archive_india_gov/scraper.py | scraper.py | py | 1,485 | python | en | code | 12 | github-code | 13 |
33038182016 | import sys
sys.stdin = open('input.txt')
def dfs(index, sm):
global N, ans
# 종료 조건
if index == N:
# 갱신
if ans < sm:
ans = sm
return
# return 조건
if index > N-1:
return
# 순회 추가 조건
# if lst[index][0] not in visited:
dfs(index+1, sm)
dfs(index + lst[index][0], sm + lst[index][1])
N = int(input())
lst = []
ans = 0
for _ in range(N):
A, B = map(int, input().split())
lst.append((A, B))
dfs(0, 0)
print(ans)
# print(lst)
| Seobway23/Laptop | Algorithm/May/퇴사/퇴사.py | 퇴사.py | py | 545 | python | en | code | 0 | github-code | 13 |
72213359059 |
import argparse, logging, copy
from types import SimpleNamespace
from contextlib import nullcontext
import torch
from torch import optim
import torch.nn as nn
import numpy as np
from fastprogress import progress_bar
from utils import *
from modules import UNet_conditional #,EMA
#best results achieved with 500 epochs, 32x32 images and 4000 training pictures
config = SimpleNamespace(
run_name = "DDPM_conditional",
epochs = 500,
noise_steps=1000,
seed = 42,
batch_size = 10,
image_size = 32,
num_classes = 5,
dataset_path = r"dataConditional",
train_folder = r"train_folder",
val_folder = r"test_folder",
device = "cuda",
slice_size = 1,
do_validation = True,
fp16 = True,
log_every_epoch = 10,
num_workers=2,
lr = 5e-3)
logging.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s", level=logging.INFO, datefmt="%I:%M:%S")
class Diffusion:
def __init__(self, noise_steps=1000, beta_start=1e-4, beta_end=0.02, image_size=256, num_classes=10, c_in=3, c_out=3, device="cuda", **kwargs):
self.noise_steps = noise_steps
self.beta_start = beta_start
self.beta_end = beta_end
self.beta = self.prepare_noise_schedule().to(device)
self.alpha = 1. - self.beta
self.alpha_hat = torch.cumprod(self.alpha, dim=0)
self.image_size = image_size
self.model = UNet_conditional(c_in, c_out, num_classes=num_classes, **kwargs).to(device)
self.device = device
self.c_in = c_in
self.num_classes = num_classes
def prepare_noise_schedule(self):
return torch.linspace(self.beta_start, self.beta_end, self.noise_steps)
def sample_timesteps(self, n):
return torch.randint(low=1, high=self.noise_steps, size=(n,))
def noise_images(self, x, t):
"Add noise to images at instant t"
sqrt_alpha_hat = torch.sqrt(self.alpha_hat[t])[:, None, None, None]
sqrt_one_minus_alpha_hat = torch.sqrt(1 - self.alpha_hat[t])[:, None, None, None]
Ɛ = torch.randn_like(x)
return sqrt_alpha_hat * x + sqrt_one_minus_alpha_hat * Ɛ, Ɛ
@torch.inference_mode()
def sample(self, use_ema, labels, cfg_scale=3):
model = self.model
n = len(labels)
logging.info(f"Sampling {n} new images....")
model.eval()
with torch.inference_mode():
x = torch.randn((n, self.c_in, self.image_size, self.image_size)).to(self.device)
for i in progress_bar(reversed(range(1, self.noise_steps)), total=self.noise_steps-1, leave=False):
t = (torch.ones(n) * i).long().to(self.device)
predicted_noise = model(x, t, labels)
if cfg_scale > 0:
uncond_predicted_noise = model(x, t, None)
predicted_noise = torch.lerp(uncond_predicted_noise, predicted_noise, cfg_scale)
alpha = self.alpha[t][:, None, None, None]
alpha_hat = self.alpha_hat[t][:, None, None, None]
beta = self.beta[t][:, None, None, None]
if i > 1:
noise = torch.randn_like(x)
else:
noise = torch.zeros_like(x)
x = 1 / torch.sqrt(alpha) * (x - ((1 - alpha) / (torch.sqrt(1 - alpha_hat))) * predicted_noise) + torch.sqrt(beta) * noise
x = (x.clamp(-1, 1) + 1) / 2
x = (x * 255).type(torch.uint8)
print("shape of sampled output:", x.shape)
return x
def train_step(self, loss):
self.optimizer.zero_grad()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.scheduler.step()
def one_epoch(self, train=True):
avg_loss = 0.
if train: self.model.train()
else: self.model.eval()
pbar = progress_bar(self.train_dataloader, leave=False)
for i, (images, labels) in enumerate(pbar):
with torch.autocast("cuda") and (torch.inference_mode() if not train else torch.enable_grad()):
images = images.to(self.device)
labels = labels.to(self.device)
t = self.sample_timesteps(images.shape[0]).to(self.device)
x_t, noise = self.noise_images(images, t)
if np.random.random() < 0.1:
labels = None
predicted_noise = self.model(x_t, t, labels)
loss = self.mse(noise, predicted_noise)
avg_loss += loss
if train:
self.train_step(loss)
pbar.comment = f"MSE={loss.item():2.3f}"
return avg_loss.mean().item()
"""A function that tracks the progress of the diffusion training.
A picture with 5 class colated is saved in the results folder"""
def images_progress(self, epoch):
labels = torch.arange(self.num_classes).long().to(self.device)
sampled_images = self.sample(use_ema=False, labels=labels)
save_images(sampled_images, os.path.join("results", f"progress",f"epoch{epoch}.jpg"))
"""A function that generates n photos for each class, storing them in separate folders"""
def generate_n(self, n, model):
labels = torch.arange(5).long().to("cuda")
self.model = model
for i in range(n):
sampled_images = self.sample(use_ema=False, labels=labels)
for j in labels:
save_images(sampled_images[j], os.path.join("results", f"class{j+1}",f"picture{i+1}.jpg"))
def load(self, model_cpkt_path, model_ckpt="ckpt.pt"):
self.model.load_state_dict(torch.load(os.path.join(model_cpkt_path, model_ckpt)))
def save_model(self, run_name, epoch=-1):
"Save model locally"
#torch.save(self.model.state_dict(), os.path.join("models", run_name, f"ckpt.pt"))
torch.save(self.model, os.path.join("models", run_name, f"model_cond.pt"))
#torch.save(self.ema_model.state_dict(), os.path.join("models", run_name, f"ema_ckpt.pt"))
#torch.save(self.optimizer.state_dict(), os.path.join("models", run_name, f"optim.pt"))
def prepare(self, args):
mk_folders(args.run_name)
self.train_dataloader, self.val_dataloader = get_data(args)
self.optimizer = optim.AdamW(self.model.parameters(), lr=args.lr, eps=1e-5)
self.scheduler = optim.lr_scheduler.OneCycleLR(self.optimizer, max_lr=args.lr,
steps_per_epoch=len(self.train_dataloader), epochs=args.epochs)
self.mse = nn.MSELoss()
self.scaler = torch.cuda.amp.GradScaler()
def fit(self, args):
for epoch in progress_bar(range(args.epochs), total=args.epochs, leave=True):
logging.info(f"Starting epoch {epoch}:")
_ = self.one_epoch(train=True)
## validation
if args.do_validation:
avg_loss = self.one_epoch(train=False)
#log progress
if epoch % args.log_every_epoch == 0:
self.images_progress(epoch)
#save model
self.save_model(run_name=args.run_name, epoch=epoch)
def parse_args(config):
parser = argparse.ArgumentParser(description='Process hyper-parameters')
parser.add_argument('--run_name', type=str, default=config.run_name, help='name of the run')
parser.add_argument('--epochs', type=int, default=config.epochs, help='number of epochs')
parser.add_argument('--seed', type=int, default=config.seed, help='random seed')
parser.add_argument('--batch_size', type=int, default=config.batch_size, help='batch size')
parser.add_argument('--image_size', type=int, default=config.image_size, help='image size')
parser.add_argument('--num_classes', type=int, default=config.num_classes, help='number of classes')
parser.add_argument('--dataset_path', type=str, default=config.dataset_path, help='path to dataset')
parser.add_argument('--device', type=str, default=config.device, help='device')
parser.add_argument('--lr', type=float, default=config.lr, help='learning rate')
parser.add_argument('--slice_size', type=int, default=config.slice_size, help='slice size')
parser.add_argument('--noise_steps', type=int, default=config.noise_steps, help='noise steps')
args = vars(parser.parse_args())
for k, v in args.items():
setattr(config, k, v)
if __name__ == '__main__':
parse_args(config)
set_seed(config.seed)
#train the model
diffuser = Diffusion(config.noise_steps, image_size=config.image_size, num_classes=config.num_classes)
diffuser.prepare(config)
diffuser.fit(config)
#generate n images for each class
model = torch.load("models/DDPM_conditional/model_cond.pt")
model.eval() #for dropout and batchnorm
diffuser.generate_n(30, model) | ShreyaSridhar5/DiffusionModels | ddpm_conditional.py | ddpm_conditional.py | py | 9,150 | python | en | code | 0 | github-code | 13 |
6606408792 | __author__ = "Heta Rekilä \n Sami Voutilainen"
__version__ = "2.0"
import time
import widgets.input_validation as iv
import widgets.gui_utils as gutils
from PyQt5 import uic
from PyQt5 import QtWidgets
from widgets.scientific_spinbox import ScientificSpinBox
class TargetInfoDialog(QtWidgets.QDialog):
"""
Dialog for editing target name and description.
"""
def __init__(self, target):
"""
Initialize the dialog.
Args:
target: Target object.
"""
super().__init__()
uic.loadUi(gutils.get_ui_dir() / "ui_target_info.ui", self)
self.target = target
self.okPushButton.clicked.connect(self.__accept_settings)
self.cancelPushButton.clicked.connect(self.close)
self.fields_are_valid = True
iv.set_input_field_red(self.nameLineEdit)
self.nameLineEdit.textChanged.connect(
lambda: iv.check_text(self.nameLineEdit, qwidget=self))
self.nameLineEdit.textEdited.connect(
lambda: iv.sanitize_file_name(self.nameLineEdit))
self.nameLineEdit.setEnabled(False)
self.name = ""
self.nameLineEdit.setText(target.name)
self.descriptionLineEdit.setPlainText(target.description)
self.description = ""
self.isOk = False
self.dateLabel.setText(time.strftime("%c %z %Z", time.localtime(
target.modification_time)))
self.manual_value = self.target.reference_density.manual_density
self.dynamic_value = self.target.reference_density.dynamic_density
self.use_user_value = self.target.reference_density.use_user_value
self.scientific_spinbox_manual = ScientificSpinBox(
value=self.manual_value, minimum=0.0, maximum=9.99e26)
if not self.use_user_value:
self.scientific_spinbox_manual.setEnabled(False)
self.useManualValueCheckBox.setChecked(self.use_user_value)
self.useManualValueCheckBox.stateChanged.connect(self.toggle_settings)
self.formLayout.insertRow(
4,
QtWidgets.QLabel(r"Manual [at./cm<sup>3</sup>]:"),
self.scientific_spinbox_manual)
self.valueLabelDynamic.setText(f"{self.dynamic_value}")
# Hide unnecessary UI elements for now, instead of deleting.
self.nameLineEdit.hide()
self.descriptionLineEdit.hide()
self.dateLabel.hide()
self.label_31.hide()
self.label_32.hide()
self.label_33.hide()
self.resize(self.minimumSizeHint())
self.__close = True
self.exec_()
def toggle_settings(self):
if self.use_user_value:
self.scientific_spinbox_manual.setEnabled(False)
self.use_user_value = False
else:
self.scientific_spinbox_manual.setEnabled(True)
self.use_user_value = True
def __accept_settings(self):
"""Function for accepting the current settings and closing the dialog
window.
"""
if not self.fields_are_valid:
QtWidgets.QMessageBox.critical(self, "Warning",
"Target must always have "
"a name.\nPlease input a name for "
"the target.",
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.Ok)
self.__close = False
else:
self.name = self.nameLineEdit.text()
self.description = self.descriptionLineEdit.toPlainText()
if self.use_user_value:
self.manual_value = self.scientific_spinbox_manual.value()
self.isOk = True
self.__close = True
if self.__close:
self.close()
| JYU-IBA/potku | dialogs/simulation/target_info_dialog.py | target_info_dialog.py | py | 3,852 | python | en | code | 7 | github-code | 13 |
23845828026 | from collections import defaultdict
def _star1() -> int:
nums = sorted([int(line.strip()) for line in open("../../inputs/day10.txt")])
num_one_jolts, num_three_jolts = 1, 1
for i, n in enumerate(nums[:-1]):
if (nums[i] + 1) == nums[i + 1]:
num_one_jolts += 1
if (nums[i] + 3) == nums[i + 1]:
num_three_jolts += 1
return num_one_jolts * num_three_jolts
def _star2() -> int:
nums = [0] + sorted([int(line.strip()) for line in open("../../inputs/day10.txt")])
counts = defaultdict(int, {0: 1})
for i in nums[1:]:
counts[i] = counts[i - 1] + counts[i - 2] + counts[i - 3]
return counts[nums[-1]]
def day10():
print("DAY 10\n=====\nSTAR 1: {}\nSTAR 2: {}\n".format(_star1(), _star2()))
| henryjetmundsen/AdventOfCode | src/2020/python/src/solutions/day10.py | day10.py | py | 773 | python | en | code | 0 | github-code | 13 |
17783237833 | from itertools import islice
from math import prod
def day16(inp):
bits = f'{int(inp.strip(), 16):b}'
padded_len = -(-len(bits) // 4) * 4
bits = bits.zfill(padded_len)
it = iter(bits)
metadata = parse_packet(it)
part1, part2 = compute_scores(metadata)
return part1, part2
def parse_packet(it):
version_id = int(''.join(islice(it, 3)), 2)
type_id = int(''.join(islice(it, 3)), 2)
if type_id == 4:
# literal
val = []
while True:
flag, *block = islice(it, 5)
val.append(''.join(block))
if flag == '0':
break
val = int(''.join(val), 2)
metadata = (version_id, type_id, val)
else:
# operator
flag = next(it)
if flag == '0':
length = int(''.join(islice(it, 15)), 2)
section = ''.join(islice(it, length))
it_sub = iter(section)
subpackets = []
while True:
try:
subpacket = parse_packet(it_sub)
except ValueError:
# failed int('') on next missing header
break
subpackets.append(subpacket)
else:
n_packets = int(''.join(islice(it, 11)), 2)
subpackets = []
for _ in range(n_packets):
subpacket = parse_packet(it)
subpackets.append(subpacket)
metadata = (version_id, type_id, tuple(subpackets))
return metadata
def compute_scores(metadata):
version_id, type_id, payload = metadata
if type_id == 4:
return version_id, payload
subversions, subscores = zip(*[compute_scores(subpacket) for subpacket in payload])
version_total = version_id + sum(subversions)
if type_id == 0:
score = sum(subscores)
elif type_id == 1:
score = prod(subscores)
elif type_id == 2:
score = min(subscores)
elif type_id == 3:
score = max(subscores)
elif type_id == 5:
score = subscores[0] > subscores[1]
elif type_id == 6:
score = subscores[0] < subscores[1]
elif type_id == 7:
score = subscores[0] == subscores[1]
return version_total, int(score)
if __name__ == "__main__":
testinp = open('day16.testinp').read()
print(*day16(testinp))
inp = open('day16.inp').read()
print(*day16(inp))
| adeak/AoC2021 | day16.py | day16.py | py | 2,406 | python | en | code | 1 | github-code | 13 |
3285189623 | __all__ = ["init", "current_export_schema_ver"]
import atexit
import sys
import colorama
from packaging.version import Version
from . import command_impl_core
from . import completions
from . import locks
from . import sequence_impl_core
from . import shared
from . import shortcuts
__version__ = "0.3.0.dev0"
INTERNAL_SCHEMA_CHANGE_VERSIONS = ["0.3.0"]
EXPORT_SCHEMA_CHANGE_VERSIONS = ["0.3.0"]
if sys.version_info < (3, 7):
sys.stderr.write("\nPython version 3.7 or later is required.\n")
sys.exit(1)
def schema_ver_for_package_ver(query_package_ver_str, schema_change_versions):
"""Return the requested schema version for a chaintool package version.
This function takes the base of the given package version (e.g. if given
"0.3.0.dev0" it will work with "0.3.0") and compares it to the
``schema_change_versions`` to determine the appropriate schema version
number used by that version of chaintool.
:param query_package_ver_str: package version to calculate schema for
:type query_package_ver_str: str
:param schema_change_versions: package versions at which schema changed
:type schema_change_versions: list[str]
:returns: schema version for the given package version, or None if the
input version string could not be evaluated
:rtype: int | None
"""
query_package_ver = Version(Version(query_package_ver_str).base_version)
if query_package_ver >= Version(schema_change_versions[-1]):
return len(schema_change_versions)
for prev_schema_ver, package_ver_str in reversed(
list(enumerate(schema_change_versions))
):
if query_package_ver < Version(package_ver_str):
return prev_schema_ver
return None
def internal_schema_ver_for_package_ver(query_package_ver_str):
"""Return the internal schema version for a chaintool package version.
Delegate to :func:`schema_ver_for_package_ver` using the given
``query_package_ver_str`` and the schema-change info from
:const:`INTERNAL_SCHEMA_CHANGE_VERSIONS`.
:param query_package_ver_str: package version to calculate schema for
:type query_package_ver_str: str
:returns: internal schema version for the given package version, or None
if the input version string could not be evaluated
:rtype: int | None
"""
return schema_ver_for_package_ver(
query_package_ver_str, INTERNAL_SCHEMA_CHANGE_VERSIONS
)
def init_modules():
"""If schema is changing, check for validity; then init modules.
Initialize the non-schema-dependent :mod:`colorama`, :mod:`.shared`, and
:mod:`.locks` modules. Then grab the meta-lock.
While holding the meta-lock, load the schema version for the current
stored config/data and compare it to the schema version used by our
current package. If the stored version is larger, that's bad... a newer
chaintool that uses a different format has been running, and has changed
the schema to something we don't understand. In that case, exit with error.
Otherwise, call the init functions for :mod:`.command_impl_core`,
:mod:`.sequence_impl_core`, :mod:`.shortcuts`, and :mod:`.completions`.
Pass them the old and new schema versions in case they need to update
their stored data formats.
Finally update the last-stored-version info for schema, the chaintool
package, and Python (last two are just informative). Release the meta-lock
and return.
"""
colorama.init()
atexit.register(colorama.deinit)
shared.init()
locks.init()
with locks.META_LOCK:
this_schema_ver = internal_schema_ver_for_package_ver(__version__)
last_schema_ver = shared.get_last_schema_version()
last_chaintool_ver = shared.get_last_chaintool_version()
last_python_ver = shared.get_last_python_version()
if last_schema_ver > this_schema_ver:
shared.errprint(
"\nA more recent version of chaintool ({}) has been run on"
" this system (using Python version {}). The version of"
" chaintool you are attempting to run ({}) cannot use the"
" newer config/data format that is now in place.\n".format(
last_chaintool_ver, last_python_ver, __version__
)
)
sys.exit(1)
command_impl_core.init(last_schema_ver, this_schema_ver)
sequence_impl_core.init(last_schema_ver, this_schema_ver)
shortcuts.init(last_schema_ver, this_schema_ver)
completions.init(last_schema_ver, this_schema_ver)
shared.set_last_schema_version(this_schema_ver)
shared.set_last_chaintool_version(__version__)
this_python_ver = sys.version
if " " in this_python_ver:
this_python_ver = this_python_ver[: this_python_ver.index(" ")]
shared.set_last_python_version(this_python_ver)
def init():
"""Idempotent initialization of chaintool's files and configurations.
This function must be called at least once before using chaintool for
the first time, and after any upgrade to a newer chaintool version. It
is automatically invoked with every use of the chaintool command; you
would only need to explicitly invoke it if you are calling functions in
the chaintool modules from other code.
"""
init_modules()
def current_export_schema_ver():
"""Return the export schema version understood by this chaintool version.
Delegate to :func:`schema_ver_for_package_ver` using the current
chaintool version and the schema-change info from
:const:`EXPORT_SCHEMA_CHANGE_VERSIONS`.
:returns: export schema version for the given package version, or None
if the current chaintool version string could not be evaluated
:rtype: int | None
"""
return schema_ver_for_package_ver(
__version__, EXPORT_SCHEMA_CHANGE_VERSIONS
)
| neogeographica/chaintool | src/chaintool/__init__.py | __init__.py | py | 5,958 | python | en | code | 0 | github-code | 13 |
70273468178 | from __future__ import print_function
import tensorflow as tf
import random
import sys,glob
if './360video/' not in sys.path:
sys.path.insert(0, './360video/')
# from mycode.dataLayer import DataLayer
from mycode.dataLayer2 import DataLayer
import mycode.cost as costfunc
from mycode.provide_hidden_state import multilayer_perceptron_hidden_state_series,\
multilayer_perceptron_hidden_state,dynamicRNN_hidden_state
from mycode.config import cfg
from mycode.dataIO import clip_xyz
import mycode.utility as util
import _pickle as pickle
import numpy as np
import pdb
is_test = True
test_epoch=1
# tag = "july11"
# tag = "pred_10_july11"
# tag = "meanvar2fc_pred_10_july12"
# tag = "raw_pred_10_july13"
# tag='5-5meanvar_july13'
# tag='3-3meanvar_july13'
# tag='1-1meanvar_july14'
# tag = "raw_pred_10_TV_july13"
# tag = "raw_pred_10_TV_sum1reg_july13"
# tag='phi_theta_2dGaussian_july15'
# tag='mixture_2dGaussian_july16'
# tag='1dlikelihood_xyz_july17'
# tag='1dlikelihood_xyz_july17_noreg_5-5'
# tag='oneD_gaussian_loss_5-5'
# tag='1dlikelihood_xyz_noreg_5-5_mixed_july18'
# tag='1dlikelihood_xyz_noreg_5-5_mixed_stride1_july18'
# tag='mse_mixed_stride1_july19'
# tag='mixture_framelvl_july19' #overlapping 15
# tag='pred_end_of_stroke_noresidualinput_july23'
# tag='pred_end_of_stroke_300-300_subsampled_july25'
# tag='300-300_subsampled_residual_july25'
# tag='300-300_subsampled_residual_10bernolli_july25'
# tag='60-60_subsampled_residual_xyz_july26'
# tag='60-60_subsampled_residual_xyz_mapfunc_july26'
tag='shanghai_split_aug22'
# use_reg = True
# training_epochs = cfg.training_epochs
training_epochs = 5
batch_size = cfg.batch_size #need to be smaller than (one video length)/running_length
display_step = 200
fps = cfg.fps
experiment = 1
# Network Parameters
num_layers = 2
# num_user = 48
truncated_backprop_length = cfg.running_length
n_hidden = 400 # hidden layer num of features
# n_output = cfg.running_length
if cfg.use_xyz:
if cfg.use_mixed_dataset:
all_video_data = pickle.load(open('./data/merged_dataset.p','rb'))
else:
# Tsinghua dataset
# all_video_data = pickle.load(open('./data/new_exp_'+str(experiment)+'_xyz.p','rb'))
# Shanghai dataset
if not is_test:
all_video_data = pickle.load(open('./360video/data/shanghai_dataset_xyz_train.p','rb'))
else:
all_video_data = pickle.load(open('./360video/data/shanghai_dataset_xyz_test.p','rb'))
data_dim = 3
datadb = clip_xyz(all_video_data)
elif cfg.use_yaw_pitch_roll:
all_video_data = pickle.load(open('./data/exp_2_raw.p','rb'))
data_dim = 2 #only use yaw and pitch
datadb = all_video_data
elif cfg.use_cos_sin:
all_video_data = pickle.load(open('./data/exp_2_raw_pair.p','rb'))
data_dim = 2
datadb = all_video_data
elif cfg.use_phi_theta:
all_video_data = pickle.load(open('./data/new_exp_'+str(experiment)+'_phi_theta.p','rb'))
data_dim = 2
if cfg.predict_eos:
data_dim += 1
datadb = all_video_data
if cfg.process_in_seconds:
data_dim = data_dim*fps
if cfg.subsample_datadb:
datadb = util.subsample_datadb(datadb)
if cfg.use_residual_input:
datadb_original = datadb
# mu1,std1,mu2,std2 = util.get_mu_std(datadb_original,vid_ind=cfg.test_video_ind)
datadb = util.data_to_step_residual(datadb)
else:
if cfg.predict_eos:
datadb = util._insert_end_of_stroke2(datadb)
if not is_test:
# tf Graph input
if cfg.own_history_only:
x = tf.placeholder("float", [None, truncated_backprop_length, data_dim])
else:
x = tf.placeholder("float", [None, truncated_backprop_length, 48*data_dim])
if cfg.has_reconstruct_loss:
y = tf.placeholder("float", [None, cfg.running_length, data_dim])
else:
y = tf.placeholder("float", [None, cfg.predict_len, data_dim])
else:
x = tf.placeholder(dtype=tf.float32, shape=[None, 1, data_dim])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1, data_dim])
# TODO: embedding
# can only handle int inputs???
# embeddings = tf.Variable(tf.random_uniform([200, 64], -1.0, 1.0), dtype=tf.float32)
# x = tf.nn.embedding_lookup(embeddings, x)
# decoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, decoder_inputs)
# for population distribution
# others_future_further = tf.placeholder("float", [None, 47, cfg.predict_step*fps, data_dim/fps])
# states c,h
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, n_hidden])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple([tf.contrib.rnn.LSTMStateTuple(state_per_layer_list[idx][0],
state_per_layer_list[idx][1])
for idx in range(num_layers)])
# A placeholder for indicating each sequence length
# seqlen = tf.placeholder(tf.int32, [None])
dropout = tf.placeholder(tf.float32)
# Define weights
# weights = {
# 'out': tf.Variable(tf.random_normal([n_hidden, n_output]))
# }
# biases = {
# 'out': tf.Variable(tf.random_normal([n_output]))
# }
def pred_cnn_model_fn(inputs):
"""use cnn to predict raw trj"""
# inputs.shape (batch_size, time, latent_dim) e.g. 8*1*64
inputs = tf.expand_dims(inputs,1)
# if input timporal dimension=1, it's equivalent to 3 fc layers
conv1 = tf.layers.conv1d(
inputs=inputs,
filters=128,
kernel_size=5,
padding="same",
activation=tf.nn.relu)
conv2 = tf.layers.conv1d(
inputs=conv1,
filters=256,
kernel_size=5,
padding="same",
activation=tf.nn.relu)
conv3 = tf.layers.conv1d(
inputs=conv2,
filters=fps*3,
kernel_size=5,
padding="same",
activation=tf.nn.tanh)
prediction = conv3
return prediction
def manual_dynamicRNN(x, seqlen, weights, biases):
"""CG: this func use static_rnn and manually change it into dynamic (?)"""
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, truncated_backprop_length, 1)
# Define a lstm cell with tensorflow
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
# Get lstm cell output, providing 'sequence_length' will perform dynamic
# calculation.
outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32,
sequence_length=seqlen)
# When performing dynamic calculation, we must retrieve the last
# dynamically computed output, i.e., if a sequence length is 10, we need
# to retrieve the 10th output.
# However TensorFlow doesn't support advanced indexing yet, so we build
# a custom op that for each sample in batch size, get its length and
# get the corresponding relevant output.
# 'outputs' is a list of output at every timestep, we pack them in a Tensor
# and change back dimension to [batch_size, n_step, n_input]
outputs = tf.stack(outputs)
outputs = tf.transpose(outputs, [1, 0, 2])
# Hack to build the indexing and retrieve the right output.
batch_size = tf.shape(outputs)[0]
# Start indices for each sample
index = tf.range(0, batch_size) * truncated_backprop_length + (seqlen - 1)
# Indexing
outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)
# Linear activation, using outputs computed above
return tf.matmul(outputs, weights['out']) + biases['out']
def dynamicRNN(x,dropout):
"""two layer LSTM using dynamic_rnn"""
cells = []
for _ in range(num_layers):
# cell = tf.contrib.rnn.GRUCell(n_hidden)
cell = tf.contrib.rnn.LSTMCell(n_hidden,state_is_tuple=True)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=1.0 - dropout)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells)
if cfg.predict_len>1:
with tf.variable_scope("dynamicRNN", reuse=tf.AUTO_REUSE):
# Batch size x time steps x features.
states_series, current_state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32,
initial_state=rnn_tuple_state)
# # Batch size x time steps x 1 (1 output at 1 timestamp)
# pred_output = tf.contrib.layers.fully_connected(
# states_series, 1, activation_fn=None)
else:
states_series, current_state = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32,
initial_state=rnn_tuple_state)
return states_series, current_state
def concat_current_state(current_state,mlp_state):
"""concat last hidden and memery states from LSTM and MLP,
predict using another FC"""
if len(current_state)>1:
# multiple layers
# only keep the last [c,h]
current_state = current_state[-1]
c,h = current_state[0],current_state[1]
concat_state = tf.reshape(tf.stack((c,h,mlp_state),axis=-1),[batch_size,-1])
# CAUTION: directly predicting whole length!!
finalpred = tf.contrib.layers.fully_connected(
concat_state, cfg.running_length, activation_fn=None)
return finalpred
def concat_states_series(states_series,states_series_others):
"""concat state_series from LSTM and MLP,
predict using another FC"""
concat_state = tf.reshape(tf.stack((states_series,states_series_others),axis=-1),[batch_size,cfg.running_length,n_hidden*2])
# Batch size x time steps x 1 (1 output at 1 timestamp)
finalpred = tf.contrib.layers.fully_connected(
concat_state, 2, activation_fn=None)
return finalpred
def _pred_mean_var_xyz(state):
# predict mean and variance for x,y,z
pred = tf.contrib.layers.fully_connected(state, 3*2, activation_fn=None)
ux = tf.slice(pred,[0,0],[-1,1])
uy = tf.slice(pred,[0,1],[-1,1])
uz = tf.slice(pred,[0,2],[-1,1])
# variance must >0
varx = tf.abs(tf.slice(pred,[0,3],[-1,1]))
vary = tf.abs(tf.slice(pred,[0,4],[-1,1]))
varz = tf.abs(tf.slice(pred,[0,5],[-1,1]))
return ux,uy,uz,varx,vary,varz
def _pred_mean_var_xyz2(state):
# predict mean and variance for x,y,z
# using 2 fc layers
pred = tf.contrib.layers.fully_connected(state, 3, activation_fn=tf.nn.tanh)
ux = tf.slice(pred,[0,0],[-1,1])
uy = tf.slice(pred,[0,1],[-1,1])
uz = tf.slice(pred,[0,2],[-1,1])
# variance must >0
pred2 = tf.contrib.layers.fully_connected(state, 3, activation_fn=tf.nn.relu)
varx = tf.slice(pred2,[0,0],[-1,1])
vary = tf.slice(pred2,[0,1],[-1,1])
varz = tf.slice(pred2,[0,2],[-1,1])
return ux,uy,uz,varx,vary,varz
def _pred_mean_var_phi_theta(state):
# predict mean and variance for phi,theta
pred = tf.contrib.layers.fully_connected(state, 2*2, activation_fn=None)
u1 = tf.slice(pred,[0,0],[-1,1])
u2 = tf.slice(pred,[0,1],[-1,1])
# variance must >0
var1 = tf.abs(tf.slice(pred,[0,2],[-1,1]))
var2 = tf.abs(tf.slice(pred,[0,3],[-1,1]))
return u1,u2,var1,var2
def _pred_mean_var_phi_theta_2dgassian(state):
# predict mean and variance for phi,theta in 2D gaussian
pred = tf.contrib.layers.fully_connected(internal, 5, activation_fn=None)
utheta = tf.slice(pred,[0,0],[-1,1])
uphi = tf.slice(pred,[0,1],[-1,1])
sigma_theta = tf.slice(pred,[0,2],[-1,1])
sigma_phi = tf.slice(pred,[0,3],[-1,1])
rho = tf.slice(pred,[0,4],[-1,1])
sigma_theta = tf.abs(sigma_theta)
sigma_phi = tf.abs(sigma_phi)
rho = tf.clip_by_value(rho,-1,1)
return utheta,uphi,sigma_theta,sigma_phi,rho
def _pred_mean_var_xyz2_new(state):
# predict mean and variance for x,y,z
# using 2 fc layers
pred = tf.contrib.layers.fully_connected(state, 32, activation_fn=tf.nn.relu)
pred = tf.contrib.layers.fully_connected(pred, 3, activation_fn=tf.nn.tanh)
ux = tf.slice(pred,[0,0],[-1,1])
uy = tf.slice(pred,[0,1],[-1,1])
uz = tf.slice(pred,[0,2],[-1,1])
# variance must >0
pred2 = tf.contrib.layers.fully_connected(state, 32, activation_fn=tf.nn.relu)
pred2 = tf.contrib.layers.fully_connected(pred2, 3, activation_fn=None)
pred2 = tf.exp(pred2)
varx = tf.slice(pred2,[0,0],[-1,1])
vary = tf.slice(pred2,[0,1],[-1,1])
varz = tf.slice(pred2,[0,2],[-1,1])
return ux,uy,uz,varx,vary,varz
def _GMM_2dgassian(state):
#4 layer MLP
internal = tf.contrib.layers.fully_connected(state, 64, activation_fn=tf.nn.relu)
internal = tf.layers.dropout(internal,rate=0.2)
internal = tf.contrib.layers.fully_connected(internal, 128, activation_fn=tf.nn.relu)
internal = tf.layers.dropout(internal,rate=0.2)
internal = tf.contrib.layers.fully_connected(internal, 256, activation_fn=tf.nn.relu)
if cfg.predict_eos:
pred = tf.contrib.layers.fully_connected(internal, 121, activation_fn=None)
else:
pred = tf.contrib.layers.fully_connected(internal, 120, activation_fn=None)
#use 20 mixtures by default
#20 mixture weights, 20*2 means, 20*2 stds, 20 correlation rhos
#1 param to model "end-of-stroke" behavior (in our case, changing from 2pi to -2pi)
mixture_pi = tf.slice(pred,[0,0],[-1,20])
us = tf.slice(pred,[0,20],[-1,40])
sigmas = tf.slice(pred,[0,60],[-1,40])
rhos = tf.slice(pred,[0,100],[-1,20])
if cfg.predict_eos:
end_stroke = tf.slice(pred,[0,120],[-1,1])
end_stroke = 1/(1+tf.exp(end_stroke)) #(0,1)
# mixture_pi = tf.nn.softmax(mixture_pi) #(0,1)
pi_exp = tf.exp(mixture_pi)
pi_exp_sum = tf.reduce_sum(pi_exp, 1)
mixture_pi = pi_exp / tf.concat([tf.expand_dims(pi_exp_sum, 1) for _ in range(20)],1)
sigmas = tf.exp(sigmas)
rhos = tf.tanh(rhos) #(-1,1)
if cfg.predict_eos:
return end_stroke,mixture_pi,us,sigmas,rhos
else:
return mixture_pi,us,sigmas,rhos
def _GMM_3dgassian(state):
#4 layer MLP
internal = tf.contrib.layers.fully_connected(state, 64, activation_fn=tf.nn.relu)
internal = tf.layers.dropout(internal,rate=0.2)
internal = tf.contrib.layers.fully_connected(internal, 128, activation_fn=tf.nn.relu)
internal = tf.layers.dropout(internal,rate=0.2)
internal = tf.contrib.layers.fully_connected(internal, 256, activation_fn=tf.nn.relu)
pred = tf.contrib.layers.fully_connected(internal, 200, activation_fn=None)
#use 20 mixtures by default
#20 mixture weights, 20*3 means, 20*3 stds, 60 correlation rhos
mixture_pi = tf.slice(pred,[0,0],[-1,20])
us = tf.slice(pred,[0,20],[-1,60])
sigmas = tf.slice(pred,[0,80],[-1,60])
rhos = tf.slice(pred,[0,140],[-1,60])
# mixture_pi = tf.nn.softmax(mixture_pi) #(0,1)
pi_exp = tf.exp(mixture_pi)
pi_exp_sum = tf.reduce_sum(pi_exp, 1)
mixture_pi = pi_exp / tf.concat([tf.expand_dims(pi_exp_sum, 1) for _ in range(20)],1)
sigmas = tf.exp(sigmas)
rhos = tf.tanh(rhos) #(-1,1)
return mixture_pi,us,sigmas,rhos
# pred1 = manual_dynamicRNN(x, seqlen, weights, biases)
states_series, current_state = dynamicRNN(x,dropout)
if not cfg.own_history_only:
# others_future = tf.placeholder("float", [None, 47, cfg.predict_len, data_dim])
others = tf.placeholder("float", [None, 47, cfg.running_length, data_dim])
states_series_others, current_state_others = dynamicRNN(x_others,dropout)
if cfg.concat_state:
# mlp_state_series = multilayer_perceptron_hidden_state_series(others_future,n_hidden)
# mlp_state = multilayer_perceptron_hidden_state(others_future,batch_size,n_hidden)
# pred1 = concat_current_state(current_state,mlp_state)
# pred = concat_states_series(states_series,mlp_state_series)
states_series_others, current_state_others = dynamicRNN_hidden_state(others_future,num_layers,n_hidden,rnn_tuple_state)
pred = concat_states_series(states_series,states_series_others)
else:
# pred = tf.contrib.layers.fully_connected(current_state[1][1], data_dim, activation_fn=None)
if cfg.use_xyz:
if cfg.predict_mean_var:
ux,uy,uz,varx,vary,varz = _pred_mean_var_xyz2_new(current_state[1][1])
# target = util.tf_get_gt_target_xyz(y)
if cfg.predict_len==1 or is_test:
# population_target = util.tf_get_gt_target_xyz_pop(others_future_further)
# cost = costfunc._mean_var_cost_xyz(ux,uy,uz,varx,vary,varz,target,population_target)
# cost = costfunc._mean_var_cost_xyz(ux,uy,uz,varx,vary,varz,target)
cost = costfunc.likelihood_loss_tf([ux,uy,uz,varx,vary,varz],y)
# cost = costfunc.oneD_gaussian_loss([ux,uy,uz,varx,vary,varz],y)
# # cost,cost_x,cost_y,cost_z = costfunc._mean_var_cost_xyz_metric(ux,uy,uz,varx,vary,varz,target,costfunc.Bhattacharyya_distance)
# # cost,cost_x,cost_y,cost_z = costfunc._mean_var_cost_xyz_metric(ux,uy,uz,varx,vary,varz,target,costfunc.Wasserstein_distance)
# # cost,cost_x,cost_y,cost_z = costfunc._mean_var_cost_xyz_metric(ux,uy,uz,varx,vary,varz,target,costfunc.Kullback_Leibler_divergence_Gaussian)
# directly predict new samples
# predict_sample = tf.contrib.layers.fully_connected(current_state[1][1], fps*1*3, activation_fn=None)
# predict_sample = tf.reshape(predict_sample,[batch_size,fps*1,3])
# cost,inspect1,inspect2 = costfunc.conditional_prob_loss(predict_sample,target,population_target)
elif cfg.predict_len>1 and not is_test:
#compute the first step loss
input_temp = x
# target_now = [tf.squeeze(tf.slice(target[0],[0,0,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[1],[0,0,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[2],[0,0,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[3],[0,0,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[4],[0,0,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[5],[0,0,0],[-1,1,-1]),axis=-1)]
# cost = costfunc._mean_var_cost_xyz(ux,uy,uz,varx,vary,varz,target_now)
# cost = costfunc.oneD_gaussian_loss([ux,uy,uz,varx,vary,varz],tf.expand_dims(y[:,0,:],1))
cost = costfunc.likelihood_loss_tf([ux,uy,uz,varx,vary,varz],tf.expand_dims(y[:,0,:],1))
#compute the sequence loss after the first step in a loop: cost=\sum cost_{t=0,T}
for time_ind in range(1,cfg.predict_len):#already computed the first future step cost
#generate fake batch
this_input_temp = tf.stack((util.generate_fake_batch_tf(ux,varx),
util.generate_fake_batch_tf(uy,vary),
util.generate_fake_batch_tf(uz,varz)),axis=-1)
this_input_temp = tf.reshape(this_input_temp,[batch_size,1,fps*3])
#shift the input_temp
history_input = tf.slice(input_temp,[0,1,0],[-1,-1,-1])
input_temp = tf.concat((history_input,this_input_temp),axis=1)
states_series, current_state = dynamicRNN(input_temp,dropout)
ux,uy,uz,varx,vary,varz = _pred_mean_var_xyz2_new(current_state[1][1])
# target_now = [tf.squeeze(tf.slice(target[0],[0,time_ind,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[1],[0,time_ind,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[2],[0,time_ind,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[3],[0,time_ind,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[4],[0,time_ind,0],[-1,1,-1]),axis=-1),
# tf.squeeze(tf.slice(target[5],[0,time_ind,0],[-1,1,-1]),axis=-1)]
# cost += costfunc._mean_var_cost_xyz(ux,uy,uz,varx,vary,varz,target_now)
# cost += costfunc.oneD_gaussian_loss([ux,uy,uz,varx,vary,varz],tf.expand_dims(y[:,time_ind,:],1))
cost += costfunc.likelihood_loss_tf([ux,uy,uz,varx,vary,varz],tf.expand_dims(y[:,time_ind,:],1))
elif cfg.use_GMM:
mixture_pi,us,sigmas,rhos = _GMM_3dgassian(current_state[1][1])
y_pred = [mixture_pi,us,sigmas,rhos]
cost = costfunc.mixture_3d_gaussian_loss(y,y_pred)
else:#directly predict raw
predict_sample = pred_cnn_model_fn(current_state[1][1])
this_input_temp = predict_sample
if cfg.predict_len==1:
cost = tf.losses.mean_squared_error(y,predict_sample)
elif cfg.predict_len>1:
input_temp = x
#compute the first step loss
this_y = tf.slice(y,[0,0,0],[-1,1,-1])
# cost = tf.losses.mean_squared_error(this_y,predict_sample)
cost = costfunc.pred_raw_loss_tf(this_y,predict_sample,use_reg=use_reg)
#compute the sequence loss after the first step
for time_ind in range(1,cfg.predict_len):#already computed the first future step cost
#shift the input_temp
history_input = tf.slice(input_temp,[0,1,0],[-1,-1,-1])
input_temp = tf.concat((history_input,this_input_temp),axis=1)
states_series, current_state = dynamicRNN(input_temp,dropout)
predict_sample = pred_cnn_model_fn(current_state[1][1])
this_input_temp = predict_sample
this_y = tf.slice(y,[0,time_ind,0],[-1,1,-1])
# cost += tf.losses.mean_squared_error(this_y,predict_sample)
cost += costfunc.pred_raw_loss_tf(this_y,predict_sample,use_reg=use_reg)
elif cfg.use_phi_theta:
# target = get_gt_target_phi_theta(y)
# u1,u2,var1,var2 = _pred_mean_var_phi_theta(current_state[1][1])
# cost = costfunc._mean_var_cost_phi_theta(u1,u2,var1,var2,target)
if cfg.use_GMM:
if cfg.predict_eos:
end_stroke,mixture_pi,us,sigmas,rhos = _GMM_2dgassian(current_state[1][1])
y_pred = [end_stroke,mixture_pi,us,sigmas,rhos]
else:
mixture_pi,us,sigmas,rhos = _GMM_2dgassian(current_state[1][1])
y_pred = [mixture_pi,us,sigmas,rhos]
# cost = costfunc.mixture_likelihood_loss_phi_theta_tf(y,y_pred)
cost = costfunc.mixture_bivariate_gaussian_loss(y,y_pred)
else:
utheta,uphi,sigma_theta,sigma_phi,rho = _pred_mean_var_phi_theta_2dgassian(current_state[1][1])
cost = costfunc.likelihood_loss_phi_theta_tf(y,[utheta,uphi,sigma_theta,sigma_phi,rho])
# Define loss and optimizer
# cost = tf.losses.mean_squared_error(pred,y)
# cost = costfunc._modified_mse(pred1,y)
# cost = costfunc._modified_mse(pred,y)
# cost = costfunc._modified_mse(pred[:,:int(1/2*cfg.running_length)],y[:,:int(1/2*cfg.running_length)])\
# + 10*_modified_mse(pred[:,int(1/2*cfg.running_length):],y[:,int(1/2*cfg.running_length):])
# cost = costfunc._modified_mse(pred,y[:,0,:])
# summary
all_losses_dict = {}
all_losses_dict['MSE_loss'] = cost
# all_losses_dict['modified_MSE_loss'] = cost
# all_losses_dict['modified_MSE_loss_staticRNN'] = cost1
event_summaries = {}
event_summaries.update(all_losses_dict)
summaries = []
for key, var in event_summaries.items():
summaries.append(tf.summary.scalar(key, var))
summary_op = tf.summary.merge(summaries)
saver = tf.train.Saver()
model_path = "./model/LSTM_"+tag+".ckpt"
lr = tf.Variable(cfg.LEARNING_RATE, trainable=False)
# optimizer = tf.train.AdamOptimizer(learning_rate=lr)
optimizer = tf.train.RMSPropOptimizer(learning_rate=lr)
gvs = optimizer.compute_gradients(cost)
def ClipIfNotNone(grad):
if grad is None:
return grad
return tf.clip_by_value(grad, -1, 1)
if cfg.clip_gradient:
clipped_gradients = [(ClipIfNotNone(grad), var) for grad, var in gvs]
train_op = optimizer.apply_gradients(clipped_gradients)
else:
train_op = optimizer.apply_gradients(gvs)
init = tf.global_variables_initializer()
batch_nums = []
for ii in datadb.keys():
if ii == cfg.test_video_ind:
continue
batch_nums.append(datadb[ii]['x'].shape[1])
if cfg.process_in_seconds:
total_batch = (np.sum(batch_nums)-cfg.running_length*fps)/cfg.data_chunk_stride/batch_size
else:
total_batch = (np.sum(batch_nums)-cfg.running_length)/cfg.data_chunk_stride/batch_size
total_batch*=datadb[ii]['x'].shape[0]
# Start training
if not is_test:
data_io = DataLayer(datadb, random=False, is_test=False)
with tf.Session() as sess:
sess.run(init)
starting_epoch = 2
summary_writer = tf.summary.FileWriter('./tfsummary/'+tag, sess.graph)
if len(glob.glob(model_path[:-5]+'epoch'+str(starting_epoch-1)+".ckpt.meta"))>0:
saver.restore(sess, model_path[:-5]+'epoch'+str(starting_epoch-1)+".ckpt")
print("Model restored.")
starting_epoch = training_epochs
_current_state = np.zeros((num_layers, 2, batch_size, n_hidden))
for epoch in range(starting_epoch,starting_epoch+training_epochs,1):
if epoch>0 and epoch%2==0:
save_path = saver.save(sess, model_path[:-5]+'epoch'+str(epoch)+'.ckpt')
print("Model saved: %s" % save_path)
lr_temp = cfg.LEARNING_RATE*(0.5**(epoch/cfg.lr_epoch_step))
print('epoch: ',epoch, ', change lr=lr*0.5, lr=', lr_temp)
sess.run(tf.assign(lr, lr_temp))
for step in range(total_batch):
# TODO: zero before every new minibatch?
# _current_state = np.zeros((num_layers, 2, batch_size, n_hidden))
# batch_x, batch_y, batch_seqlen = trainset.next(batch_size)
batch_x, batch_y, batch_x_others,_,_,batch_x_others_further = data_io._get_next_minibatch(datadb,batch_size)
# ## collapse last dimension, batch*len*(num_user*2)
# batch_x = np.reshape(batch_x,[batch_size,batch_x.shape[1],-1])
# batch_seqlen = np.array([cfg.running_length]*batch_size) #fixed length
if cfg.change_xyz2xxxyyyzzz:
batch_x = util.change_input_format(batch_x)#even worse, why
# Run optimization op (backprop)
_, _current_state = sess.run([train_op,current_state],
feed_dict={x: batch_x, y: batch_y,
dropout: 0.1,
# others_future_further:batch_x_others_further,
# seqlen: batch_seqlen,
# others_future: batch_x_others,
init_state: _current_state})
count = (step+1)*batch_size+epoch*total_batch*batch_size
if count<200:
display_step=10
else:
display_step=200
if count% display_step == 0 or count==0:
if cfg.predict_mean_var:
if cfg.use_xyz:
loss,summary,_clipped_gradients,_gv,_current_state,_states_series,ux_temp,uy_temp,uz_temp,varx_temp,vary_temp,varz_temp = sess.run(
[cost,summary_op,clipped_gradients[2:],gvs[2:],current_state,states_series,ux,uy,uz,varx,vary,varz],
feed_dict={x: batch_x, y: batch_y,
dropout: 0.1,
# others_future_further:batch_x_others_further,
# seqlen: batch_seqlen,
# others_future: batch_x_others,
init_state: _current_state})
elif cfg.use_phi_theta:
loss,summary,_clipped_gradients,_gv,_current_state,_states_series = sess.run(
[cost,summary_op,clipped_gradients[2:],gvs[2:],current_state,states_series],
feed_dict={x: batch_x, y: batch_y,
dropout: 0.1,
init_state: _current_state})
if not cfg.predict_mean_var:
#predict raw
loss,summary = sess.run(
[cost,summary_op],
feed_dict={x: batch_x, y: batch_y,
dropout: 0.1,
# others_future_further:batch_x_others_further,
# seqlen: batch_seqlen,
# others_future: batch_x_others,
init_state: _current_state})
summary_writer.add_summary(summary, float(count))
print("Step " + str(count) + ", Minibatch Loss= " + \
"{:.6f}".format(loss))
print("Optimization Finished!")
save_path = saver.save(sess, model_path[:-5]+'epoch'+str(epoch)+'.ckpt')
print("Model saved: %s" % save_path)
if is_test:
# test
with tf.Session() as sess:
# Run the initializer
sess.run(init)
if test_epoch==None:
saver.restore(sess, model_path)
else:
saver.restore(sess, model_path[:-5]+'epoch'+str(test_epoch)+'.ckpt')
print("Model restored.")
data_io_test = DataLayer(datadb, random=False, is_test=True)
# if cfg.use_GMM:
# data_io_test_original = DataLayer(datadb_original, random=False, is_test=True)
# batch_seqlen = np.array([cfg.running_length]*batch_size) #fixed length
test_out = []
gt_out = []
unnormalized_input = []
num_trials = 2393/batch_size+1
strokes=np.zeros((num_trials,cfg.predict_step,cfg.batch_size,2))
for ii in range(num_trials):
# every test time feed in zero state?
_current_state = np.zeros((num_layers, 2, batch_size, n_hidden))
batch_x, batch_y, batch_x_others,batch_y_further,db_index,batch_x_others_further = data_io_test._get_next_minibatch(datadb,batch_size)
###caution!!!: only feed one frame as batch_x
batch_x = np.expand_dims(batch_x[:,-1,:],1)
# if cfg.predict_eos: batch_x[:,:,2]=1
batch_y = np.expand_dims(batch_y[:,-1,:],1)
if cfg.change_xyz2xxxyyyzzz:
batch_x = util.change_input_format(batch_x)
# batch_x, batch_y, batch_x_others = data_io_test._get_next_minibatch(datadb,batch_size)
# collapse last dimension, batch*len*(num_user*2)
# batch_x = np.reshape(batch_x,[batch_size,batch_x.shape[1],-1])
if cfg.use_GMM:
# batch_x111, batch_y111, _,batch_y_further111,_,_ = data_io_test_original._get_next_minibatch(datadb_original,batch_size)
# gt_out.append(batch_y_further111)
# unnormalized_input.append(batch_x111)
gt_out.append(batch_y_further)
# gt_out.append(util._denormalize_data(batch_y_further,mu1,std1,mu2,std2))
else:
gt_out.append(batch_y_further)
# for predict_step in range(cfg.predict_step):
# # feed prediction back as input!
# pred_temp,_current_state = sess.run([pred,current_state], feed_dict={x: batch_x,
# # y: batch_y,
# dropout: 0.0,
# # seqlen: batch_seqlen,
# # others_future: batch_x_others,
# init_state: _current_state})
# batch_x = np.concatenate((batch_x[:,1:,:],pred_temp[:,np.newaxis,:]),axis=1)
# test_out.append(pred_temp)
for predict_step in range(cfg.predict_step):
# feed prediction back as input!
if cfg.use_xyz:
if cfg.predict_mean_var:
loss,ux_temp,uy_temp,uz_temp,varx_temp,vary_temp,varz_temp,_current_state = sess.run(
[cost,ux,uy,uz,varx,vary,varz,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
# seqlen: batch_seqlen,
# others_future: batch_x_others,
# others_future_further:batch_x_others_further,
init_state: _current_state})
test_out.append([ux_temp,uy_temp,uz_temp,varx_temp,vary_temp,varz_temp])
temp_newdata = np.stack((util.generate_fake_batch_numpy(ux_temp,varx_temp,batch_size),
util.generate_fake_batch_numpy(uy_temp,vary_temp,batch_size),
util.generate_fake_batch_numpy(uz_temp,varz_temp,batch_size)),axis=-1)[:,np.newaxis,:,:].reshape((batch_size,1,-1))
elif cfg.use_GMM:
mixture_pi_temp,us_temp,sigmas_temp,rhos_temp,_current_state = sess.run(
[mixture_pi,us,sigmas,rhos,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
init_state: _current_state})
predictions = [mixture_pi_temp,us_temp,sigmas_temp,rhos_temp]
temp_newdata = util.sample_mixture_3D(predictions)
test_out.append(temp_newdata)
if not cfg.predict_mean_var:
loss,pred_temp,_current_state = sess.run(
[cost,predict_sample,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
# others_future_further:batch_x_others_further,
init_state: _current_state})
test_out.append([pred_temp])
temp_newdata = pred_temp.reshape(batch_size,1,-1)
print ('loss ',loss)
elif cfg.use_phi_theta:
## mse on (u,var)
# uphi_temp,utheta_temp,varphi_temp,vartheta_temp,_current_state = sess.run([u1,u2,var1,var2,current_state],
# feed_dict={x: batch_x,
# # y: batch_y,
# dropout: 0.0,
# # seqlen: batch_seqlen,
# # others_future: batch_x_others,
# init_state: _current_state})
# test_out.append([uphi_temp,utheta_temp,varphi_temp,vartheta_temp])
# temp_newdata = np.stack((util.generate_fake_batch_numpy(uphi_temp,varphi_temp,batch_size),
# util.generate_fake_batch_numpy(utheta_temp,vartheta_temp,batch_size)),axis=-1)[:,np.newaxis,:,:].reshape((batch_size,1,-1))
## likelihood_loss_phi_theta_tf (NLL)
if cfg.use_GMM:
if cfg.predict_eos:
end_of_stroke_temp,mixture_pi_temp,us_temp,sigmas_temp,rhos_temp,_current_state = sess.run(
[end_stroke,mixture_pi,us,sigmas,rhos,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
init_state: _current_state})
predictions = [end_of_stroke_temp,mixture_pi_temp,us_temp,sigmas_temp,rhos_temp]
else:
mixture_pi_temp,us_temp,sigmas_temp,rhos_temp,_current_state = sess.run(
[mixture_pi,us,sigmas,rhos,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
init_state: _current_state})
# strokes[ii,predict_step,:,:] = util.sample(mixture_pi_temp, us_temp[:,:20], us_temp[:,20:],
# sigmas_temp[:,:20], sigmas_temp[:,20:],
# rhos_temp,num_mixtures=20)
# test_out.append([mixture_pi_temp,us_temp,sigmas_temp,rhos_temp])
# temp_newdata = util.generate_fake_batch_mixture(mixture_pi_temp,us_temp,sigmas_temp,rhos_temp)
predictions = [mixture_pi_temp,us_temp,sigmas_temp,rhos_temp]
temp_newdata = util.sample_mixture(predictions)
test_out.append(temp_newdata)
# test_out.append(util._denormalize_data(temp_newdata,mu1,std1,mu2,std2))
if cfg.process_in_seconds:
temp_newdata = temp_newdata.reshape(batch_size,1,-1)
else:
utheta_temp,uphi_temp,sigma_theta_temp,sigma_phi_temp,rho_temp,_current_state = sess.run([utheta,uphi,sigma_theta,sigma_phi,rho,current_state],
feed_dict={x: batch_x,
y: batch_y,
dropout: 0.0,
init_state: _current_state})
# test_out.append([utheta_temp,uphi_temp,sigma_theta_temp,sigma_phi_temp,rho_temp])
temp_newdata = util.generate_fake_batch_multivariate_normal_numpy(utheta_temp,uphi_temp,sigma_theta_temp,sigma_phi_temp,rho_temp,batch_size)
test_out.append(temp_newdata)
if cfg.change_xyz2xxxyyyzzz:
temp_newdata = np.concatenate((temp_newdata[:,:,0::3],temp_newdata[:,:,1::3],temp_newdata[:,:,2::3]),axis=-1)
if cfg.process_in_seconds:
batch_x = np.concatenate((batch_x[:,1:,:],temp_newdata),axis=1)
else:
#shift by half of the running_length
# batch_x = np.concatenate((batch_x[:,int(0.5*cfg.running_length):,:],temp_newdata),axis=1)
batch_x = temp_newdata
pickle.dump(test_out,open('LSTM_test_out'+tag+'.p','wb'))
pickle.dump(gt_out,open('LSTM_gt_out'+tag+'.p','wb'))
if cfg.use_GMM:
pickle.dump(unnormalized_input,open('LSTM_gt_input'+tag+'.p','wb'))
pickle.dump(strokes,open('strokes_'+tag+'.p','wb'))
print("Test finished!")
if cfg.concat_state:
# get activation
mlp_trained_w_series,lstm_states_series = sess.run([mlp_state_series,states_series],feed_dict={x: batch_x, y: batch_y,
dropout: 0.0,
# others_future: batch_x_others,
init_state: _current_state})
pickle.dump(mlp_trained_w_series,open('mlp_trained_w_series.p','wb'))
pickle.dump(lstm_states_series,open('lstm_states_series.p','wb'))
print("weight finished!")
| ChengeLi/LongTerm360FoV | mycode/lstm.py | lstm.py | py | 41,993 | python | en | code | 13 | github-code | 13 |
31475961828 | import socket
import random
import sys
import os
server_address = ('127.0.0.1', 5000)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(server_address)
filenamestart = 'receivedfile_'
filecount = 0
try:
while True:
filecount += 1
filename = filenamestart + str(filecount) + '.txt'
while True:
data, client_address = server_socket.recvfrom(1024)
if data:
break
actual_filesize = int(data.decode())
print(str(actual_filesize))
with open(filename, 'wb') as f:
while True:
try:
server_socket.settimeout(3)
data = server_socket.recv(1024)
if not data:
break
except socket.timeout:
break
else:
f.write(data)
print('finished')
received_filesize = int(os.path.getsize(filename))
print (str(received_filesize))
percentage = (received_filesize / actual_filesize) * 100
print('')
print('Finished receiving from ' + str(client_address) + ' from socket ' + str(server_socket.getsockname()))
print('received ' + str(percentage) + '%% of the file from sender.')
server_socket.sendto(str(percentage).encode(), client_address)
if KeyboardInterrupt:
break
except KeyboardInterrupt:
print('Server is interrupted') | Erlangga28/Assignment-NetworkProgramming-UDP | Challenge1/server1.py | server1.py | py | 1,606 | python | en | code | 0 | github-code | 13 |
24463309259 | from dataclasses import dataclass, field
import numpy as np
import cv2
from supermarket_simulation import Supermarket
from create_supermarket_map import main as create_supermarket_map
from path_finder import main as run_pathfinder
from config import (
CUSTOMER_ARRIVAL_RATE,
Locations,
MARKET,
PATH_SUPERMARKETMAP,
PATH_TILES,
SIMULATION_DURATION,
STORE_LOCATIONS,
TILE_SIZE,
TRANS_PROB_MATRIX,
UNWALKABLES,
)
@dataclass
class VisualizeCustomers(Supermarket):
"""Draws and navigates the avatar of the customer on the map"""
avatar: np.ndarray = np.full(shape=(32, 32, 3), fill_value=255)
store_locations: dict = field(default_factory=dict)
def find_path(
self,
grid: str | list[list[str, ...]] = MARKET,
unwalkables: list[str, ...] = field(default_factory=list),
start_name: str = "entrance",
end_name: str = "exit",
is_efficient: bool = True,
) -> list[tuple[int, int], ...]:
"""Returns a path object with x,y coordinates for each move to the target"""
#! We might not actually need this and can call run_pathfinder directly instead...
all_members = [member for member in dir(Locations) if not member.startswith("_")]
if not all(loc_name in all_members for loc_name in [start_name.upper(), end_name.upper()]):
raise ValueError("Either start or end name is unknown!")
return run_pathfinder(
grid=grid,
unwalkables=unwalkables,
start_symbol=Locations.__members__[start_name.upper()].value,
end_symbol=Locations.__members__[end_name.upper()].value,
store_locations=self.store_locations,
is_efficient=is_efficient,
)
def draw_background(self, background: np.ndarray, supermarket_map: np.ndarray) -> np.ndarray:
"""Draws base image (supermarket map) onto frame"""
frame = background.copy()
try:
supermarket_map.draw(frame)
except AttributeError:
frame[0 : supermarket_map.shape[0], 0 : supermarket_map.shape[1]] = supermarket_map
return frame
def draw_move(self, frame: np.array, location: tuple[int, int]) -> None:
"""Draws a customer avatar on the frame"""
x_coord = location[1] * TILE_SIZE
y_coord = location[0] * TILE_SIZE
frame[
y_coord : y_coord + self.avatar.shape[0], x_coord : x_coord + self.avatar.shape[1]
] = self.avatar
def main() -> None:
supermarket_map = create_supermarket_map(path_map=PATH_SUPERMARKETMAP, path_tile=PATH_TILES)
background = np.zeros(np.shape(supermarket_map), np.uint8)
tiles = cv2.imread(PATH_TILES)
customer_avatar = (4 * TILE_SIZE, 0 * TILE_SIZE)
customer_avatar = tiles[
customer_avatar[0] : customer_avatar[0] + TILE_SIZE,
customer_avatar[1] : customer_avatar[1] + TILE_SIZE,
:,
]
inst_viz_customers = VisualizeCustomers(
store_locations=STORE_LOCATIONS, avatar=customer_avatar
)
# Start simulation
for _minute in range(SIMULATION_DURATION):
frame = inst_viz_customers.draw_background(
background=background, supermarket_map=supermarket_map
)
inst_viz_customers.next_minute()
inst_viz_customers.add_new_customers(
frequency=CUSTOMER_ARRIVAL_RATE, transition_probs=TRANS_PROB_MATRIX
)
# For each customer, get target location, find the path and draw to target location
for customer in inst_viz_customers.customers:
path = inst_viz_customers.find_path(
unwalkables=UNWALKABLES,
start_name=customer.previous_location,
end_name=customer.current_location,
is_efficient=False,
)
inst_viz_customers.draw_move(frame=frame, location=path[-1])
inst_viz_customers.print_customers()
# Show frame with dynamically title, that updates simulation on SPACEBAR and aborts on q
while True:
cv2.imshow("frame", frame)
cv2.setWindowTitle(
"frame", f"My simulated supermarket after {inst_viz_customers.get_time} (HH:MM)!"
)
key = chr(cv2.waitKey(1) & 0xFF)
if key == " ": # Spacebar
cv2.imwrite(f"images/animation/{_minute:02}min.png", frame)
break
if key == "q":
cv2.destroyAllWindows()
raise KeyError("You have stopped the simulation!")
inst_viz_customers.remove_exiting_customers()
if __name__ == "__main__":
main()
| MichlF/projects | data_science/supermarket_markov_simulation/visualize_supermarket_simulation.py | visualize_supermarket_simulation.py | py | 4,651 | python | en | code | 1 | github-code | 13 |
19288276609 | '''
This is used to create a multivariable linear regression.
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import pprint
import pymongo
import statsmodels.api as sm
# establish Mongo database connection
client = pymongo.MongoClient()
# set up CMSC455 database and movies collection
moviedb = client.CMSC455.movies
raw_movies = list(moviedb.find({}))
trimmed_list = []
for movie in raw_movies:
trimmed_list.append([
int("".join(((movie["budget"].split("$"))[1]).split(","))),
(datetime.datetime.strptime(movie["release_date"], "%m/%d/%Y") - datetime.datetime.fromtimestamp(0)).total_seconds(),
int("".join(((movie["daily_earnings"][6]["total_gross"]).split("$")[1]).split(","))),
int("".join(movie["daily_earnings"][6]["num_theaters"].split(","))),
int("".join((movie["domestic_gross"].split("$")[1]).split(",")))
])
# this segment trims the list to exclude outliers by number of theaters
# a possible cause of these outliers is advance screenings (one movie only had 4 theaters on its seventh day)
num_theaters = [i[3] for i in trimmed_list]
mean = np.average(num_theaters)
sd = np.std(num_theaters)
# print("the mean is", mean)
# print("the sd is", sd)
outlier = [(abs(mean-i)>(2*sd)) for i in num_theaters]
print("number of outliers by number of theaters:", sum(outlier))
final_list = []
for i, j in enumerate(outlier):
if not(j):
final_list.append(trimmed_list[i])
indices = np.random.rand(int(len(final_list)))*len(final_list)
indices = list(set([int(i) for i in indices]))
if len(indices) > len(final_list)/2:
indices = indices[:int(len(final_list)/2)]
training = []
testing = []
for i, row in enumerate(final_list):
if i in indices:
training.append(row)
else:
testing.append(row)
final_list = training
print(len(final_list))
y = np.array([i[4] for i in final_list])
df = pd.DataFrame({"budget": [i[0] for i in final_list], "release_date": [i[1] for i in final_list], "7-day_gross": [i[2] for i in final_list], "num_theaters": [i[3] for i in final_list], "domestic_gross": [i[4] for i in final_list]})
X = df[["7-day_gross", "budget", "num_theaters", "release_date"]]
y = df["domestic_gross"]
X = sm.add_constant(X)
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
prediction_correlation = np.corrcoef(predictions, y)[0][1]
prediction_average_percent_error= np.average([abs(predictions[i] - y_i)/y_i for i, y_i in enumerate(y)])
prediction_mse = np.average([(predictions[i] - y_i)**2 for i, y_i in enumerate(y)])
plt.scatter(predictions, y, s=30, c='r', marker='+', zorder=10, label="correlation:"+str(prediction_correlation)+"\naverage percent error:"+str(prediction_average_percent_error)+"\nmse:"+str(prediction_mse))
plt.plot(y, y, label="y=x")
# Comment out one of the following according to what part of the data is used.
# plt.title("Prediction Model for Domestic Gross (Full Data)")
plt.title("Prediction Model for Domestic Gross (Training Data)")
axes = plt.axes()
axes.set_xscale("log")
axes.set_yscale("log")
plt.xlabel("Predicted Revenue from Model")
plt.ylabel("Actual Revenue")
plt.legend(loc="upper left")
plt.show()
print("MSE:", model.mse_model)
print(model.summary())
plt.cla()
# This creates a second graph based on the testing data. It should fail if the testing data is not the same size
# as the training data. One example of such an instance would be an odd-numbered data set. Another such example
# would be the the times that the full data set is used in training.
predictions = model.predict(pd.DataFrame({"budget": [i[0] for i in testing], "release_date": [i[1] for i in testing], "7-day_gross": [i[2] for i in testing], "num_theaters": [i[3] for i in testing], "domestic_gross": [i[4] for i in testing]}))
prediction_correlation = np.corrcoef(predictions, y)[0][1]
prediction_average_percent_error = np.average([abs(predictions[i]-y_i)/y_i for i, y_i in enumerate(y)])
prediction_mse = np.average([(predictions[i] - y_i)**2 for i, y_i in enumerate(list(y))])
plt.scatter(predictions, y, s=30, c='r', marker='+', zorder=10, label="correlation:"+str(prediction_correlation)+"\naverage percent error:"+str(prediction_average_percent_error)+"\nmse:"+str(prediction_mse))
plt.plot(y, y, label="y=x")
plt.title("Prediction Model for Domestic Gross (Testing Data)")
axes = plt.axes()
axes.set_xscale("log")
axes.set_yscale("log")
plt.xlabel("Predicted Revenue from Model")
plt.ylabel("Actual Revenue")
plt.legend(loc="lower left")
plt.show()
| philliard3/455Project | multivariable_linear.py | multivariable_linear.py | py | 4,549 | python | en | code | 0 | github-code | 13 |
73458342099 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
#
# This script converts a .CUCX files to be .CUC (old format)
#
import sqlite3
import datetime
import time
import sys
import os
import kpilot
import json
import math
import pycountry
import config
#-------------------------------------------------------------------------------------------------------------------#
from config import fixcoding
dbpath = "./cucfiles/"
cucpath = "./cuc/"
eventname = "LIVE Pyrenees" # TODO: Why is this initialized here?
taskType = "SailplaneRacing"
#
# to run this program, first copy the .CUCX fiile into the cucfiles direcory and unzip that file.
#
print("Generate .CUC files V1.0 from " + cucpath + \
"contest.db the unzip of the .CUCX file")
start_time = time.time()
local_time = datetime.datetime.now()
print("Time is now:", local_time) # print the time for information only
fl_date_time = local_time.strftime("%Y%m%d") # get the local time
CUC_DATA = cucpath + config.Initials + fl_date_time + \
'.cuc' # name of the CUC to be generated
JSONFILE = cucpath + config.Initials + fl_date_time + \
'.json' # name of the CUC to be generated
print("CUC generated data file is: ", CUC_DATA, JSONFILE) # just a trace
datafile = open(CUC_DATA, 'w') # open the output file
jsonfile = open(JSONFILE, 'w') # open the output file
cuchdr = open(cucpath + "LIVEhdr.txt", 'r') # opend the header file
cuctail = open(cucpath + "LIVEtail2.txt", 'r') # open the trailer file
# open the DB embedded into the .CUCX file unzipped
conn = sqlite3.connect(dbpath+'contest.db')
# open the DB with all the GLIDERS information
connG = sqlite3.connect(config.DBpath+config.DBname)
cursG = connG.cursor() # cursor for the GLIDERS table
cursD = conn.cursor() # cursor for the CONTESTANT table
cursP = conn.cursor() # cursor for the PILOT table
print("From the contest.db ...")
print("Contest data:")
cursD.execute('select * from CONTEST') # get the CONTEST data information
for row in cursD.fetchall():
print(row)
eventname = row[2]
print("Location data:")
cursD.execute('select * from LOCATION') # get the LOCATION information
for row in cursD.fetchall():
print(row)
print("Pilot data:")
cursD.execute('select * from PILOT') # and all the pilots
for row in cursD.fetchall():
print(row)
print("Contestant data:")
cursD.execute('select * from CONTESTANT') # and all the pilots/contestant
for row in cursD.fetchall():
print(row)
print("Waypoint information:")
cursD.execute('select * from POINT') # and all the pilots/contestant
for row in cursD.fetchall():
print(row)
# --------------------------------------------------------------
buf = cuchdr.read() # start reading the pseudo CUC header file
datafile.write(buf) # copy into the output file
# Build the tracks
tracks = [] # create the instance for the tracks
#
# SeeYou database contest.db contestant table SQUEMA
#
# CREATE TABLE contestant (id_contestant BIGINT NOT NULL, ref_class BIGINT DEFAULT NULL, version INTEGER NOT NULL, name VARCHAR(255) NOT NULL, club VARCHAR(255) DEFAULT NULL, team VARCHAR(255) DEFAULT NULL, aircraft_model VARCHAR(255) NOT NULL, contestant_number VARCHAR(8) DEFAULT NULL, aircraft_registration VARCHAR(32) DEFAULT NULL, handicap DOUBLE PRECISION DEFAULT NULL, pure_glider BOOLEAN NOT NULL, flight_recorders CLOB DEFAULT NULL, tag VARCHAR(255) DEFAULT NULL, not_competing BOOLEAN NOT NULL, status VARCHAR(19) DEFAULT NULL, created_at DATETIME DEFAULT NULL, updated_at DATETIME DEFAULT NULL, PRIMARY KEY(id_contestant));
#
# number of pilots found
pn = 0
# number of warnings
nwarnings = 0
# get the pilot information from the contestant table
cursD.execute(
'select name, aircraft_model, contestant_number, aircraft_registration, flight_recorders, id_contestant from CONTESTANT')
# get all the CONTESTAN pilots
for row in cursD.fetchall(): # search all the rows
pname = row[0] # pilot name is the first field
# fix the UTF8 coding
pname = fixcoding(pname).encode('utf8')
if row[1]:
type = row[1] # get glider type
else:
type = "type_NOTYET"
if row[2]:
# get the competition numbers
cn = row[2]
else:
cn = "cn_NOTYET"
if row[3]:
# get the registration
regi = row[3]
else:
regi = "EC-XXX" # dummy reg
if row[4]:
idflarm = row[4] # get the flarmID
# check for FLARMID
idflarm = idflarm.rstrip('\n')
idflarm = idflarm.rstrip('\r')
else:
idflarm = "*" # mark it as not found
# try to get it from the OGN database
cursG.execute('select * from GLIDERS where registration = ?', [regi])
for rowg in cursG.fetchall():
if rowg[0]:
idflarm = "FLR"+rowg[0]
idcont = row[5] # get the global ID of the contestant
if pname == "": # if not pilot name ???
if idflarm in kpilot.kpilot: # check if know the pilot because is our database kpilot.py
# in that case place the name of the pilot
pname = kpilot.kpilot[idflarm]
else:
pname = "Pilot NN-"+str(pn) # otherwise just say: NoName#
# # write the Pilot detail
# "Tpilot","",*0,"FLRDDE1FC","Ventus","EC-TTT","TT","",0,"",0,"",1,"","" # the template to use
#
# increase the number of pilots found
pn += 1
buf = '"' + pname + '","",*0,"' + idflarm + '","' + type + '","' + regi + \
'","' + cn + '","",0,"",0,"",1,"",""\n' # write tha into the psuedo CUC file
# write the pilot information into the pseudo CUC file
datafile.write(buf)
rgb = 0x111*pn # the the RGB color
ccc = hex(rgb) # convert it to hex
color = "#"+ccc[2:] # set the JSON color required
cursP.execute("select nationality, igc_id from PILOT where id_pilot = ? ", [
idcont]) # set the tcountry from the PILOT table
pil = cursP.fetchone()
# get the country of the pilot
country = pil[0]
# convert it to the 3 chars ISO code
ccc = pycountry.countries.get(alpha_2=pil[0])
country = ccc.alpha_3
if pil[1]: # get the IGC ranking list ID
igcid = pil[1]
else:
igcid = -1
tr = {"trackId": config.Initials+fl_date_time+":"+idflarm, "pilotName": pname, "competitionId": cn, "country": country,
"aircraft": type, "registration": regi, "3dModel": "ventus2", "ribbonColors": [color],
"portraitUrl": "http://rankingdata.fai.org/PilotImages/"+str(igcid)+".jpg"}
tracks.append(tr) # add it to the tracks
print("P==>: ", pname, idflarm, country, regi, cn, type, igcid, idcont)
if idflarm == "*": # if not FLARM ID specified ???
print("Warning", pname, regi, " NO Flarm ID")
nwarnings += 1 # increase the number of warnings
# ---------- end of for -----------------------*
# Build the turning points *
# ======================== *
#
# SeeYou database contest.db point table SQUEMA
#
# CREATE TABLE point (id_point BIGINT NOT NULL, name VARCHAR(255) NOT NULL, latitude DOUBLE PRECISION NOT NULL, longitude DOUBLE PRECISION NOT NULL, type VARCHAR(16) NOT NULL, elevation DOUBLE PRECISION NOT NULL, distance DOUBLE PRECISION NOT NULL, course_in DOUBLE PRECISION NOT NULL, course_out DOUBLE PRECISION NOT NULL, oz_type VARCHAR(16) NOT NULL, oz_max_altitude DOUBLE PRECISION DEFAULT NULL, oz_radius1 INTEGER NOT NULL, oz_radius2 INTEGER DEFAULT NULL, oz_angle1 DOUBLE PRECISION NOT NULL, oz_angle2 DOUBLE PRECISION DEFAULT NULL, oz_angle12 DOUBLE PRECISION DEFAULT NULL, oz_move BOOLEAN NOT NULL, oz_line BOOLEAN NOT NULL, oz_reduce BOOLEAN NOT NULL, created_at DATETIME DEFAULT NULL, updated_at DATETIME DEFAULT NULL, PRIMARY KEY(id_point));
#
tp = [] # create the instance for the turn points
buf = "V,HighEnl=300,AsViolate=True,MinFinAlt=0m,MaxFinAlt=10000m,MaxStartAlt=0m,MaxAlt=0m,MaxAltCorr=50.0m,AltTimeout=0,StartGsp=0km/h,FixRate=10,ValFailed=True"
datafile.write(buf) # write the turn point information
buf = "C301299000000301299000003"
datafile.write(buf) # write the information of when the task was created
# get all the turning points of the task flying now
cursD.execute(
'select name, latitude, longitude , elevation, type, oz_type, oz_radius1 from POINT')
for row in cursD.fetchall(): # search all the rows
name = row[0] # waypoint name
lati = row[1] # latutude
long = row[2] # longitude
alti = row[3] # altitude
wtyp = row[4] # waypoint type start/point/finish/none
ozty = row[5] # oz type: next/symmetric/previous
ozra = row[6] # oz radius
if (wtyp == "start"): # change it to the format requested by SW
type = "Start"
oz = "Line"
elif (wtyp == "finish"):
type = "Finish"
oz = "Cylinder"
else:
type = "Turnpoint"
oz = "Cylinder"
# change it from radians to DMS
lati = math.degrees(lati)
long = math.degrees(int)
tpx = {"latitude": lati, "longitude": int, "name": name,
"observationZone": oz, "type": type, "radius": ozra, "trigger": "Enter"}
# add it to the TP list
tp.append(tpx)
print("W==>: ", name, lati, int, alti, type, ozra)
# C4238680N00186830ELa Cerdanya - LECD
N = True
if lati < 0:
lati *= -1.0
N = False
f = lati - int(lati)
f = int(f*100000.0)
buf = 'C'+("%02d" % int(lati))+("%05d" % f)
if (N):
buf += 'N'
else:
buf += 'S'
E = True
if int < 0:
long *= -1.0
E = False
f = int - int(int)
f = int(f*100000.0)
buf += ("%03d" % int(int))+("%05d" % f)
if (E):
buf += 'E'
else:
buf += 'W'
buf += name
buf += "\n"
#print "Buf==>", buf
# write the pilot information into the pseudo CUC file
datafile.write(buf)
# # TP templates
# event # create the event ...
event = {"name": eventname, "description": eventname, "taskType": taskType,
"startOpenTs": 0, "turnpoints": tp, "tracks": tracks}
# dump it in JSON format
j = json.dumps(event, indent=4)
# write it to the JSON file
jsonfile.write(j)
# write the day entry # finish the .CUC file
#
# [Starts] # this is the template
#
# [Day_02/03/2016]
# D02032016-010400000
datafile.write("[Starts]\n") # write it in the output file
datafile.write(" \n")
buf = "[Day_"+local_time.strftime("%d/%m/%Y")+"]\n"
datafile.write(buf)
buf = "D" + local_time.strftime("%d%m%Y") + "-010400000\n"
datafile.write(buf)
# write the trailer in order to complete the format of the .CUC file
buf = cuctail.read() # read the trailer file
datafile.write(buf) # write it into the output file
#
# close the files and exit
#
datafile.close()
jsonfile.close()
cuchdr.close()
cuctail.close()
conn.commit()
conn.close()
connG.close()
if pn == 0 or nwarnings > 0:
print("CUC invalid: No pilots found or warnings found ... ", pn, nwarnings)
exit(-1)
else:
print("Pilots found ... ", pn)
exit(0)
| acasadoalonso/SWiface-PHP | ccucxtocuc.py | ccucxtocuc.py | py | 11,451 | python | en | code | 2 | github-code | 13 |
29862139367 | import os
import unittest
import intelmq.lib.test as test
import intelmq.lib.utils as utils
from intelmq.bots.parsers.shadowserver.parser import ShadowserverParserBot
with open(os.path.join(os.path.dirname(__file__), 'testdata/event4_honeypot_ddos_amp.csv')) as handle:
EXAMPLE_FILE = handle.read()
EXAMPLE_LINES = EXAMPLE_FILE.splitlines()
EXAMPLE_REPORT = {'feed.name': 'Amplification DDoS Victim',
"raw": utils.base64_encode(EXAMPLE_FILE),
"__type": "Report",
"time.observation": "2019-01-01T00:00:00+00:00",
"extra.file_name": "2019-01-01-event4_honeypot_ddos_amp.csv"
}
EVENTS = [{'__type': 'Event',
'feed.name': 'Amplification DDoS Victim',
'classification.identifier': 'amplification-ddos-victim',
'classification.taxonomy': 'availability',
'classification.type': 'ddos',
'raw': utils.base64_encode('\n'.join([EXAMPLE_LINES[0],
EXAMPLE_LINES[1]])),
'time.observation': '2019-01-01T00:00:00+00:00',
'time.source': '2021-03-28T00:00:02+00:00',
'source.ip': '107.141.1.2',
'destination.port': 389,
'source.reverse_dns': '192-0-2-10.example.net',
'source.asn': 7018,
'source.geolocation.cc': 'US',
'source.geolocation.region': 'VISALIA',
'source.geolocation.city': 'VISALIA',
'source.geolocation.region': 'CALIFORNIA',
'extra.end_time': '2021-03-28T00:20:22+00:00',
'extra.public_source': 'CISPA',
'extra.source.naics': 517311,
'extra.source.sector': 'Communications, Service Provider, and Hosting Service',
'malware.name': 'ddos-amplification',
'source.reverse_dns': '107-141-x-x.lightspeed.frsnca.sbcglobal.net',
},
{'__type': 'Event',
'feed.name': 'Amplification DDoS Victim',
'classification.identifier': 'amplification-ddos-victim',
'classification.taxonomy': 'availability',
'classification.type': 'ddos',
'raw': utils.base64_encode('\n'.join([EXAMPLE_LINES[0],
EXAMPLE_LINES[2]])),
'time.observation': '2019-01-01T00:00:00+00:00',
'time.source': '2021-03-28T00:00:02+00:00',
'source.ip': '74.59.3.4',
'destination.port': 389,
'source.reverse_dns': 'modemcablex-x-59-74.mc.videotron.ca',
'source.asn': 5769,
'source.geolocation.cc': 'CA',
'source.geolocation.city': 'CHICOUTIMI',
'source.geolocation.region': 'QUEBEC',
'extra.end_time': '2021-03-28T00:13:50+00:00',
'extra.public_source': 'CISPA',
'extra.source.naics': 517311,
'extra.source.sector': 'Communications, Service Provider, and Hosting Service',
'malware.name': 'ddos-amplification',
},
]
class TestShadowserverParserBot(test.BotTestCase, unittest.TestCase):
"""
A TestCase for a ShadowserverParserBot.
"""
@classmethod
def set_bot(cls):
cls.bot_reference = ShadowserverParserBot
cls.default_input_message = EXAMPLE_REPORT
def test_event(self):
""" Test if correct Event has been produced. """
self.run_bot()
for i, EVENT in enumerate(EVENTS):
self.assertMessageEqual(i, EVENT)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| certtools/intelmq | intelmq/tests/bots/parsers/shadowserver/test_honeypot_ddos_amp.py | test_honeypot_ddos_amp.py | py | 3,576 | python | en | code | 856 | github-code | 13 |
24622081324 | # needs transformers
# path to the intermediate dataset folder: inter_path
# generalized few shot learning parameter: generalized = True
import os
import random
import pandas as pd
import numpy as np
import csv
import tensorflow as tf
import torch
from sklearn.model_selection import train_test_split
import textwrap
import progressbar
import keras
import argparse
import time
import datetime
import json
from keras.preprocessing.sequence import pad_sequences
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig, BertTokenizer
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import classification_report
def parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--inter_data_path', type=str, default = './fsl_dataset/')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--num_samples_per_class', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=2e-6)
parser.add_argument('--epochs', type=int, default=3)
parser.add_argument('--generalized', type=str, default="no")
parser.add_argument('--max_seq_len', type=int, default=48)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--save_folder_name', type=str, default="./ckpt/")
parser.add_argument('--seed', type=int, default=21)
args = parser.parse_args()
config = args.__dict__
return config
def att_masking(input_ids):
attention_masks = []
for sent in input_ids:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
return attention_masks
def label_list_and_additional_col(train_tsv):
uniq_lablist = np.unique(train_tsv[0].to_list())
label_dict = {}
for i,l in enumerate(uniq_lablist):
label_dict[l] = i
lablist = train_tsv[0].to_list()
num_labels = [label_dict[l] for l in lablist]
train_tsv[2] = num_labels
return label_dict, train_tsv
def input_id_maker(dataf, tokenizer, max_seq_len):
input_ids = []
lengths = []
for i in progressbar.progressbar(range(len(dataf[1]))):
sen = dataf[1].iloc[i]
sen = tokenizer.tokenize(sen)
CLS = tokenizer.cls_token
SEP = tokenizer.sep_token
if(len(sen) > max_seq_len):
print("happens")
sen = sen[len(sen)-max_seq_len:]
sen = [CLS] + sen + [SEP]
encoded_sent = tokenizer.convert_tokens_to_ids(sen)
input_ids.append(encoded_sent)
lengths.append(len(encoded_sent))
input_ids = pad_sequences(input_ids, maxlen=max_seq_len, value=0, dtype="long", truncating="post", padding="post")
return input_ids, lengths
def load_model_and_tokenizer(config, label_dict):
model_type = 'bert'
model_class, tokenizer_class, config_class = BertForSequenceClassification, BertTokenizer, BertConfig
model_name = 'bert-base-uncased'
tokenizer = tokenizer_class.from_pretrained(model_name)
device = config["device"]
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=len(label_dict))
model.to(device)
return model, tokenizer
def prepare_train_data(train_tsv, config, tokenizer):
train_input_ids, train_lengths = input_id_maker(train_tsv, tokenizer, config["max_seq_len"])
validation_input_ids, validation_lengths = input_id_maker(train_tsv[:int(len(train_tsv)/9)], tokenizer, config["max_seq_len"])
train_attention_masks = att_masking(train_input_ids)
validation_attention_masks = att_masking(validation_input_ids)
train_labels = train_tsv[2].to_numpy().astype('int')
validation_labels = train_tsv[:int(len(train_tsv)/9)][2].to_numpy().astype('int')
train_inputs = train_input_ids
validation_inputs = validation_input_ids
train_masks = train_attention_masks
validation_masks = validation_attention_masks
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
validation_inputs = torch.tensor(validation_inputs)
validation_labels = torch.tensor(validation_labels)
validation_masks = torch.tensor(validation_masks)
batch_size = config["batch_size"]
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size = batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
validation_sampler = RandomSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size = batch_size)
return train_data, train_sampler, train_dataloader, validation_data, validation_sampler, validation_dataloader
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def train(config, model, tokenizer, train_data, train_sampler, train_dataloader, validation_data, validation_sampler, validation_dataloader):
lr = config["learning_rate"]
device = config["device"]
max_grad_norm = 1.0
epochs = config["epochs"]
num_total_steps = len(train_dataloader)*epochs
num_warmup_steps = 1000
warmup_proportion = float(num_warmup_steps) / float(num_total_steps) # 0.1
optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = num_warmup_steps, num_training_steps = num_total_steps)
seed_val = config["seed"]
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
loss_values = []
# For each epoch...
for epoch_i in range(0, epochs):
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
t0 = time.time()
total_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
if step % 40 == 0 and not step == 0:
print(' Batch {:>5,} of {:>5,}. '.format(step, len(train_dataloader)))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
outputs = model(b_input_ids, attention_mask=b_input_mask, labels=b_labels)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
loss_values.append(avg_train_loss)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in validation_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
outputs = model(b_input_ids, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
# Report the final accuracy for this validation run.
print(" Accuracy: {0:.2f}".format(eval_accuracy/nb_eval_steps))
print("")
print("Training complete!")
return model, tokenizer
if __name__ == "__main__":
# load settings
config = parse_argument()
path = config["inter_data_path"]
train_tsv = pd.read_csv(path + "train_seen.tsv", sep='\t', header=None)
generalized = config["generalized"]
shots = config["num_samples_per_class"]
print("--------------READING FEW-SHOT DATASET---------------")
if generalized == "yes":
test_tsv = pd.read_csv(path + "test_joint.tsv", sep='\t', header=None)
else:
test_tsv = pd.read_csv(path + "test_novel.tsv", sep="\t", header=None)
train_tsv = train_tsv.dropna()
test_tsv = test_tsv.dropna()
train_label_dict, train_tsv = label_list_and_additional_col(train_tsv)
print("--------------LOADING BERT MODEL---------------\n")
model, tokenizer = load_model_and_tokenizer(config, train_label_dict)
print("--------------PREPARING TRAIN DATA---------------\n")
train_data, train_sampler, train_dataloader, validation_data, validation_sampler, validation_dataloader = prepare_train_data(train_tsv, config, tokenizer)
print("----------------STARTING TRAINING------------------\n\n")
model, tokenizer = train(config, model, tokenizer, train_data, train_sampler, train_dataloader, validation_data, validation_sampler, validation_dataloader)
print("----------------SAVING MODEL----------------------\n")
output_dir = config["save_folder_name"]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
| Observeai-Research/NLI-FSL | proto-train.py | proto-train.py | py | 9,261 | python | en | code | 0 | github-code | 13 |
27237331599 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from zope.interface import implements
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
try:
from Products.LinguaPlone.interfaces import ITranslatable
LINGUAPLONE_SUPPORT = True
except ImportError:
# Linguaplone not installed
LINGUAPLONE_SUPPORT = False
from plone.app.vocabularies.catalog import SearchableTextSourceBinder
from plone.app.form.widgets.uberselectionwidget import UberSelectionWidget
from plone.app.controlpanel.widgets import MultiCheckBoxVocabularyWidget
from plone.memoize import instance
from unice.portlet.telechargement import TelechargementPortletMessageFactory as _
from zope.i18nmessageid import MessageFactory
__ = MessageFactory("plone")
class ITelechargementPortlet(IPortletDataProvider):
portlet_title = schema.TextLine(
title=_(u'Titre du portlet dans le manager'),
description=_('help_portlet_title',
default=u'Titre affiché dans l\'ecran "@@manage-portlets". '
'Laisser vide pour "Telechargement portlet".'),
required=False,
)
custom_header = schema.TextLine(
title=_(u"Titre du portlet"),
description=_('help_custom_header',
default=u"Laisser vide pour afficher le titre le l'élément sélectionné"),
required=False,
)
telechargement = schema.Choice(title=_(u"Elément à afficher"),
required=True,
source=SearchableTextSourceBinder(
{},
default_query='path:'
)
)
extra_id = schema.TextLine(
title=_(u'Identifiant CSS à ajouter au portlet'),
description=_('help_extra_id',
default=u""),
default=u'',
required=False,
)
extra_css = schema.TextLine(
title=_(u'Classes CSS à ajouter au portlet'),
description=_('help_extra_css',
default=u""),
default=u'',
required=False,
)
omit_header = schema.Bool(
title=_(u"Masquer le header du portlet"),
description=_('help_omit_header',
default=u""),
required=True,
default=False)
class Assignment(base.Assignment):
implements(ITelechargementPortlet)
portlet_title = u''
telechargement = None
extra_css = u''
extra_id = u''
custom_header = u""
omit_header = False
def __init__(self, portlet_title=u'', telechargement=None, extra_css=u'', extra_id=u'', custom_header=None, omit_header=None):
self.portlet_title = portlet_title
self.telechargement = telechargement
self.custom_header = custom_header
self.omit_header = omit_header
self.extra_css = extra_css
self.extra_id = extra_id
@property
def title(self):
msg = __(u"Telechargement portlet")
return self.portlet_title or msg
class Renderer(base.Renderer):
render = ViewPageTemplateFile('telechargementportlet.pt')
@instance.memoizedproperty
def telechargement(self):
if not self.data.telechargement:
return None
portal_path = getToolByName(self.context, 'portal_url').getPortalPath()
item = self.context.restrictedTraverse(
str(portal_path + self.data.telechargement),
None
)
return item
@instance.memoizedproperty
def subtelechargement(self):
items = []
folders = list(self.telechargement.getFolderContents({'portal_type': 'ATSuccessStoryFolder'}, full_objects=True))
for folder in folders:
item = {}
item['Title'] = folder.Title()
telechargement = list(folder.getFolderContents({'portal_type': 'ATSuccessStory'}, full_objects=True))
item['telechargement'] = telechargement[-1:]
items.append(item)
return items
def header(self):
return self.data.custom_header or self.telechargement.Title()
class AddForm(base.AddForm):
form_fields = form.Fields(ITelechargementPortlet)
form_fields['telechargement'].custom_widget = UberSelectionWidget
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
form_fields = form.Fields(ITelechargementPortlet)
form_fields['telechargement'].custom_widget = UberSelectionWidget
| ComUCA/EssaiSite | Plone/zinstance/src/unice.portlet.telechargement/unice/portlet/telechargement/telechargementportlet.py | telechargementportlet.py | py | 4,592 | python | en | code | 0 | github-code | 13 |
32778984379 | from telegram import Update
from telegram.ext import ContextTypes
"""Обрабатываем входное сообщение. При возможности выполняем вычисления и возвращаем результат."""
async def calculate(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
msg = update.message.text
user_name = update.effective_user.first_name
result = handlerInput("Enter", msg, user_name)
await update.message.reply_text(f'Результат: {result}')
"""Обмен данными с контроллером."""
def handlerInput(command, msg, user_name):
from app.controller.controller import handlerEnter
result = handlerEnter(command, msg, user_name)
return result
| AlekseyNizhnikov/Home_Work_Python | Work_9/app/view/bot_command.py | bot_command.py | py | 767 | python | ru | code | 0 | github-code | 13 |
41996003156 | from todoist import TodoistAPI
import pandas as pd
class TodoistExtractor:
def __init__(self, token, pages, limit):
self.token = token
self.api = TodoistAPI(self.token)
self.api.sync()
self.pages = pages
self.limit = limit
def get_todoist_activities(self):
activities = pd.DataFrame()
all_activities = []
for p in range(self.pages):
if p > 0:
activities = all_activities
activities_on_page_df = pd.DataFrame(self.api.activity.get(page=p, limit=self.limit)['events'])
all_activities = pd.concat([activities, activities_on_page_df])
return all_activities
def get_parent_id(self, object_id):
object_id = str(object_id)
info_list = [self.api.items.get_by_id(object_id)]
for item in info_list:
try:
if ('item' in item.keys()) and ('parent_id' in item['item'].keys()):
return item['item']['parent_id']
except AttributeError:
return None | clovisguerim/todoist-extractor | todoist_extractor/todoist_extractor.py | todoist_extractor.py | py | 1,071 | python | en | code | 0 | github-code | 13 |
41115703251 | import os
import zlib
import struct
import logging
import datetime
from array import array
from io import StringIO
from mogul.media import localize
_ = localize()
from mogul.media import (MediaContainer, MediaEntry, MediaStream,
Tag, TagTarget, TagGroup, MediaHandlerError)
from mogul.media.image import Image
from mogul.media.xmp import XMPHandler
__spec_version__ = '2'
def enum(**enums):
return type('Enum', (), enums)
PNG_COLOR_TYPE = enum(unknown=-1, greyscale=0, truecolor=2, indexed=3,
greyscale_alpha=4, truecolor_alpha=6)
class PNGError(Exception):
pass
class PNGImage(Image):
def __init__(self):
Image.__init__(self, 'image/png')
self._color_type = PNG_COLOR_TYPE.unknown
self._palette = None
self._image_data = None
self._transparency = None
self._gamma = -1
self._primary_chromaticies = None
self._icc_profile = None
self._srgb_intent = -1
self._text = []
self._background_color = None
self._physical_dimensions = None
self._significant_bits = None
self._suggested_palette = None
self._histogram = None
self._last_modification = None
def bytes_per_row():
def fget(self):
if self.color_type == PNG_COLOR_TYPE.indexed:
bpr = self.width
elif self.color_type == PNG_COLOR_TYPE.truecolor:
bpr = self.width * (self.bits_per_sample/8) * 3
elif self.color_type == PNG_COLOR_TYPE.truecolor_alpha:
bpr = self.width * (self.bits_per_sample/8) * 4
elif self.color_type == PNG_COLOR_TYPE.greyscale:
bpr = self.width * (self.bits_per_sample/8)
elif self.color_type == PNG_COLOR_TYPE.greyscale_alpha:
bpr = self.width * ((self.bits_per_sample/8) + 1)
return bpr
return locals()
bytes_per_row = property(**bytes_per_row())
class PNGHandler(object):
def __init__(self):
self.logger = logging.getLogger('mogul.media')
self._chunks = {
b'IHDR': (_('Header'), self._read_header, self._write_header),
b'PLTE': (_('Palette'), None, None),
b'IDAT': (_('Image Data'), self._read_image_data, None),
b'IEND': (_('End Of File'), None, None),
b'tRNS': (_('Transparency'), None, None),
b'gAMA': (_('Image Gamma'), None, None),
b'cHRM': (_('Primary Chromatacies'), None, None),
b'sRGB': (_('Standard RGB Color Space'), None, None),
b'iCCP': (_('Embedded ICC Profile'), None, None),
b'tEXt': (_('Textual Data'), self._read_text, None),
b'zTXt': (_('Compressed Textual Data'), self._read_text, None),
b'iTXt': (_('International Textual Data'), self._read_itext, None),
b'bKGD': (_('Background Color'), self._read_background_color, None),
b'pHYs': (_('Physical Dimensions'), self._read_physical_dimensions, None),
b'sBIT': (_('Significant Bits'), self._read_significant_bits, None),
b'sPLT': (_('Suggested Palette'), self._read_suggested_palette, None),
b'hIST': (_('Palette Histogram'), self._read_histogram, None),
b'tIME': (_('Image Last Modification Time'), self._read_last_modification, None),
}
self.filename = ''
"""The filename to use for reading or writing."""
self._ds = None
self._idat = []
"""Collect the IDAT chunk data until all image data has been read as
the chunks need to be decompressed together"""
@staticmethod
def can_handle(ds):
"""Determine if PNGHandler can parse the stream."""
data = ds.read(8)
ds.seek(-8, os.SEEK_CUR)
if data == '\x89\x50\x4e\x47\x0d\x0a\x1a\x0a':
return 'image/png'
else:
return None
def read(self, filename, doctype=None):
with open(filename, 'rb') as ds:
if doctype is None:
doctype = self.can_handle(ds)
if doctype is not None:
try:
self.read_stream(ds)
except EOFError:
pass
else:
raise MediaHandlerError("PNGHandler: Unable to handle file '%s'" % filename)
def write(self, image, filename=''):
if filename != '':
self.filename = filename
def read_stream(self, ds, doctype=None):
if doctype is None:
doctype = self.can_handle(ds)
if doctype is not None:
self._ds = ds
self.container = MediaContainer()
self._media_entry = MediaEntry()
self._media_entry.container = self.container
self.container.entries.append(self._media_entry)
try:
while True:
self._read_box('root')
except StopIteration:
self._combine_idat()
else:
raise MediaHandlerError("PNGHandler: Unable to handle stream")
def _read_box(self, parent):
box_size, box_id = struct.unpack('>L4s', self._ds.read(8))
name = 'Unknown'
read_handler = None
try:
name, read_handler, _write_handler = self._chunks[box_id]
except KeyError:
pass
self.logger.debug('PNG: %s - %s' % (box_id, name))
if box_size > 0:
if read_handler is not None:
read_handler(parent, box_id, box_size)
else:
self._ds.seek(box_size, os.SEEK_CUR)
_crc32 = struct.unpack('>L', self._ds.read(4))
if box_size == 0:
raise StopIteration
def _read_header(self, parent, box_id, box_size):
stream = self._image.stream
stream.width, stream.height, self._image.bits_per_sample, \
self._image.color_type, self._image.compression_method, \
self._image.filter_method, self._image.interlace_method = \
struct.unpack('>LLBBBBB', self._ds.read(13))
def _read_image_data(self, parent, box_id, box_size):
self._idat.append(self._ds.read(box_size))
def _read_palette(self, parent, box_id, box_size):
self._image._palette = self._ds(box_size)
def _read_text(self, parent, box_id, box_size):
length, keyword = self._read_string()
keyword = keyword.decode('Latin_1')
size_read = length+1
if box_id == 'zTXt':
compression_type = ord(self._ds.read(1))
size_read += 1
value = self._ds.read(box_size - size_read)
if box_id == 'zTXt':
if compression_type == 0:
value = zlib.decompress(value)
else:
raise PNGError('Cannot handle compression type %d in text.' %
compression_type)
value = value.decode('Latin_1')
tag = Tag(keyword, value)
self._image.tag_group.tags.append(tag)
def _read_itext(self, parent, box_id, box_size):
length, keyword = self._read_string()
keyword = keyword.decode('Latin_1')
size_read = length+1
compression_flag = ord(self._ds.read(1))
compression_method = ord(self._ds.read(1))
size_read += 2
length, language = self._read_string()
size_read += length+1
if length == 0:
locale = ('und', 'und')
elif language.startswith('x-'):
locale = (language[2:], 'und')
else:
try:
locale = language.split('-')
except:
locale = (language, 'und')
length, tx_keyword = self._read_string()
tx_keyword = tx_keyword.decode('UTF-8')
size_read += length+1
value = self._ds.read(box_size - size_read)
if compression_flag == 1:
if compression_method == 0:
value = zlib.decompress(value)
else:
raise PNGError('Cannot handle compression type %d in text.' %
compression_method)
value = value.decode('UTF-8')
if keyword == 'XML:com.adobe.xmp':
handler = XMPHandler()
ds = StringIO(value)
handler.read_stream(ds)
group = TagGroup('XMP')
for tag in handler.tags:
t = Tag()
t.set_to(tag)
group.append(t)
self._image.tag_group.append(group)
else:
tag = Tag(keyword, value)
tag.locale = locale
self._image.tag_group.tags.append(tag)
def _read_background_color(self, parent, box_id, box_size):
data = self._ds.read(box_size)
if box_size == 1:
self._image._background_color = ord(data[0])
elif box_size == 2:
self._image._background_color = struct.unpack('>H', data)
elif box_size == 6:
self._image._background_color = struct.unpack('>HHH', data)
def _read_physical_dimensions(self, parent, box_id, box_size):
ppu_x, ppu_y, specifier = struct.unpack('>LLB', self._ds.read(9))
self._image._physical_dimensions = (ppu_x, ppu_y, specifier)
def _read_significant_bits(self, parent, box_id, box_size):
self._image._significant_bits = self._ds.read(box_size)
def _read_suggested_palette(self, parent, box_id, box_size):
bytes_left = box_size
size, name = self._read_string()
name = name.decode('latin-1')
bytes_left -= (size + 1)
sample_depth = ord(self._ds.read(1))
bytes_left -= 1
if sample_depth == 8:
bytes_per_sample = 1
else:
bytes_per_sample = 2
if self._image._suggested_palette is None:
self._image._suggested_palette = {}
palette = (bytes_per_sample, self._ds.read(bytes_left))
self._image._suggested_palette[name] = palette
def _read_histogram(self, parent, box_id, box_size):
rest = box_size - 2
frequency, data = struct.unpack('>H%ds' % rest, self._ds.read(box_size))
self._image._histogram = (frequency, data)
def _read_last_modification(self, parent, box_id, box_size):
year, month, day, hour, minute, second = \
struct.unpack('>HBBBBB', self._ds.read(box_size))
self._image._last_modification = datetime.datetime(year, month, day,
hour, minute, second)
def _write_box(self, name, data, length):
if length > 0:
crc = zlib.crc32(name)
crc = zlib.crc32(data, crc)
self._writes.write(struct.pack('>L4s%dsL' % length,
length, name, data,
crc))
else:
self._writes.write(struct.pack('>L4sL', 0, name, zlib.crc32(name)))
def _write_header(self, width, height, depth, color_type, compression=0,
filter_=0, interlace=0):
data = struct.pack('>LLbbbbb', width, height, depth, color_type,
compression, filter_, interlace)
self._write_box(b'IHDR', data, 9)
def _write_palette(self, data, length):
self._write_box(b'PLTE', data, length)
def _write_data(self, data, length):
compress = zlib.compressobj(self.compression)
compressed = compress.compress(data.tostring())
compressed += compress.flush()
length = len(compressed)
self._write_box(b'IDAT', compressed, length)
def _write_argb_data(self, width, height, argb_data, length):
data = array('B')
for y in range(height):
data.append(0)
for x in range(width):
offset = ((y * width) + x) * 4
data.append(ord(argb_data[offset+1]))
data.append(ord(argb_data[offset+2]))
data.append(ord(argb_data[offset+3]))
data.append(ord(argb_data[offset]))
self._write_data(data)
def _write_end(self):
self._write_box(b'IEND', '', 0)
def _combine_idat(self):
decompressor = zlib.decompressobj()
for idat in self._idat:
decompressed = decompressor.decompress(idat)
self._image.stream.data.fromstring(decompressed)
del self._idat[:]
def _read_string(self):
l = 0
c = ''
s = ''
while c != '\x00':
c = self._ds.read(1)
s += c
l += 1
return (l-1, str(s[:-1]))
| sffjunkie/media | src/media/png.py | png.py | py | 13,159 | python | en | code | 0 | github-code | 13 |
24334579415 | import numpy as np
V = np.zeros(101)
policy = np.zeros(99)
ph = 0.25
theta = 10**-100
action_value = np.zeros(1)
sweep = 0
Value = np.zeros((99, 4))
def Iteration():
global V, theta, sweep
loop = True
while loop == True:
delta = 0
for s in range(1, 100):
list1 = []
v = V[s]
for action in range(1, min(s, 100-s)+1):
if s + action >= 100:
action_value[0] = ph * (1 + 1 * V[action + s]) + (1 - ph) * (0 + 1 * V[s - action])
else:
action_value[0] = ph * (0 + 1 * V[action + s]) + (1 - ph) * (0 + 1 * V[s - action])
list1.append(action_value[0])
V[s] = max(list1)
policy[s-1] = list1.index(max(list1))+1
delta = max(delta, abs(v-V[s]))
if sweep < 3:
Value[s - 1][sweep] = V[s]
else:
Value[s - 1][3] = V[s]
if delta < theta:
loop = False
sweep += 1
if __name__ == '__main__':
Iteration()
#print policy
np.save("Value_function", Value)
np.save("policy", policy)
| NormanZhou123/Reinforcement_Learning_Practice | A3/part1/part1.py | part1.py | py | 1,147 | python | en | code | 0 | github-code | 13 |
21736886850 | import discord
from discord.ext import commands
class main_S(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.application_command(name="info", cls=discord.SlashCommand)(self.joke_slash)
@commands.slash_command(name="info")
async def userinfo(self, ctx):
await ctx.respond(f"{ctx.author.name}")
def setup(bot):
bot.add_cog(main_S(bot)) | jasonchanjj123/waifu-bot | commands/user.py | user.py | py | 375 | python | en | code | 1 | github-code | 13 |
27682679843 | from datetime import date, datetime, timedelta
def get_birthdays_per_week(users):
if len(users) == 0:
return {}
# Визначаємо поточну дату
today = date.today()
# Визначаємо поточний день тижня (0 - понеділок, 1 - вівторок, ..., 6 - неділя)
current_weekday = today.weekday()
# Формуємо словник для зберігання днів народжень
birthdays_per_week = {}
# Проходимося по користувачам і додаємо їх до відповідних днів тижня
for user in users:
birthday = user["birthday"]
birthday = birthday.replace(year=today.year)
# Визначаємо, чи є день народження наступного року
next_year = birthday.replace(year=today.year+1)
if (next_year - today).days < 7:
birthday = next_year
if (birthday - today).days <0:
continue
# Визначаємо день тижня для дня народження
birthday_weekday = birthday.weekday()
day_name = birthday.strftime("%A")
if day_name == "Sunday" or day_name == "Saturday":
day_name = "Monday"
if day_name not in birthdays_per_week.keys():
birthdays_per_week[day_name] = []
birthdays_per_week[day_name].append(user["name"])
return birthdays_per_week
if __name__ == "__main__":
users = [
{"name": "Jan Kim", "birthday": datetime(1976, 9, 5).date()},
{"name": "Jan Gou", "birthday": datetime(1996, 9, 4).date()},
{"name": "Jan Kerr", "birthday": datetime(1976, 9, 10).date()},
{"name": "Jan Kum", "birthday": datetime(1976, 9, 6).date()}
]
result = get_birthdays_per_week(users)
print(result)
| mikekuian/hw8 | main.py | main.py | py | 1,881 | python | uk | code | 0 | github-code | 13 |
4819385393 | #! python3
# resizeAndAddLogo.py - Resizes all images in current working directory to
# fit in 300x300 square, and adds logo.png to the lower-right corner
import os
from PIL import Image
os.chdir('C:\\Users\\ElHassen\\Desktop\\Python\\learningpy\\Maniplating images')
SQUARE_FIT_SIZE = 300
LOGO_FILENAME = 'logo.png'
logoIm = Image.open(LOGO_FILENAME).convert('RGBA')
logoIm = logoIm.resize((50, 50))
logoWidth, logoHeight = logoIm.size
os.makedirs('withlogo', exist_ok=True)
# Loop over all files in the working directory
for filename in os.listdir('.'):
if not (filename.endswith('.png') or filename.endswith('.jpg')) or filename == LOGO_FILENAME :
continue # skip non-image files and the logo file itself
im = Image.open(filename).convert("RGBA")
width, height = im.size
# check if the image needs to be resized
if width > SQUARE_FIT_SIZE and height > SQUARE_FIT_SIZE:
# Calculate the new width and height to resize to.
height = int((SQUARE_FIT_SIZE / width) * height)
width = SQUARE_FIT_SIZE
else:
width = int((SQUARE_FIT_SIZE / height) * width)
height = SQUARE_FIT_SIZE
# Resize the image
print('Resizing %s ...' % (filename))
im = im.resize((width, height))
print('Adding logo to %s ...' % (filename))
im.paste(logoIm, ( width - logoWidth, height - logoHeight), logoIm)
if im.mode != 'RGB':
im = im.convert('RGB')
im.thumbnail(im.size, Image.ANTIALIAS)
im.save(os.path.join('withLogo', filename) , 'JPEG')
# Errors that were fixed :
# you need to convert both the logo and the image to the RGBA mode
# when you try to save the image you have to convert it to RGB
# becaise it won't be saved as RGBA mode
| Elhasssen/Python_learning_path | Maniplating images/resizeAndAddLogo.py | resizeAndAddLogo.py | py | 1,731 | python | en | code | 0 | github-code | 13 |
36697698262 | import functools as ft
from dataclasses import dataclass
from typing import Any
import jax
from .deprecated import deprecated
from .filters import combine, is_array, partition, validate_filters
from .module import Module, static_field
class _Static(Module):
value: Any = static_field()
@ft.lru_cache(maxsize=4096)
def _filter_jit_cache(f, **jitkwargs):
@ft.partial(jax.jit, static_argnums=(0, 1, 4), **jitkwargs)
def f_wrapped(
static_leaves, static_treedef, dynamic_args, dynamic_kwargs, filter_spec_return
):
static_args, static_kwargs = jax.tree_unflatten(static_treedef, static_leaves)
args = combine(dynamic_args, static_args)
kwargs = combine(dynamic_kwargs, static_kwargs)
out = f(*args, **kwargs)
dynamic_out, static_out = partition(out, filter_spec_return)
return dynamic_out, _Static(static_out)
return f_wrapped
def filter_jit(
fun,
*,
filter_spec=is_array,
filter_spec_return=is_array,
static_argnums=None,
static_argnames=None,
donate_argnums=None,
**jitkwargs
):
if static_argnums is not None:
raise ValueError("`static_argnums` should not be passed; use a filter instead.")
if static_argnames is not None:
raise ValueError(
"`static_argnames` should not be passed; use a filter instead."
)
if donate_argnums is not None:
raise NotImplementedError(
"`donate_argnums` is not implemented for filter_jit. Manually combine "
"`equinox.filter` and `jax.jit` instead.."
)
# We choose not to make a distinction between ([arg, ... ,arg], kwargs) and ((arg, ... ,arg), kwargs)
if (
isinstance(filter_spec, tuple)
and len(filter_spec) == 2
and isinstance(filter_spec[0], list)
):
filter_spec = (tuple(filter_spec[0]), filter_spec[1])
@ft.wraps(fun)
def fun_wrapper(*args, **kwargs):
(dynamic_args, dynamic_kwargs), (static_args, static_kwargs) = partition(
(args, kwargs), filter_spec
)
static_leaves, static_treedef = jax.tree_flatten((static_args, static_kwargs))
static_leaves = tuple(static_leaves)
dynamic_out, static_out = _filter_jit_cache(fun, **jitkwargs)(
static_leaves,
static_treedef,
dynamic_args,
dynamic_kwargs,
filter_spec_return,
)
return combine(dynamic_out, static_out.value)
return fun_wrapper
#
# Deprecated
#
@ft.lru_cache(maxsize=4096)
def _jitf_cache(f, args_treedef, **jitkwargs):
@ft.partial(jax.jit, **jitkwargs)
def f_wrapped(*args):
args = jax.tree_unflatten(args_treedef, args)
return f(*args)
return f_wrapped
@dataclass(frozen=True)
class _UnPyTreeAble:
value: Any
def __bool__(self):
return False
_marker_sentinel = object()
@deprecated(in_favour_of=filter_jit)
def jitf(
fun,
*,
filter_fn=None,
filter_tree=None,
static_argnums=None,
static_argnames=None,
donate_argnums=(),
**jitkwargs
):
if isinstance(static_argnums, int):
static_argnums = (static_argnums,)
if static_argnames is not None:
raise NotImplementedError(
"jitf does not yet support `static_argnames`. use static_argnums instead."
)
if donate_argnums != ():
raise NotImplementedError("jitf does not ye support `donate_argnums`.")
validate_filters("jitf", filter_fn, filter_tree)
if static_argnums is None:
len_static_argnums = 0
else:
len_static_argnums = len(static_argnums)
@ft.wraps(fun)
def f_wrapper(*args, **kwargs):
if len(kwargs):
raise NotImplementedError(
"jitf does not yet support keyword arguments. Use positional arguments instead."
)
if filter_tree is not None:
if len(args) - len_static_argnums == 1:
new_filter_tree = (filter_tree,)
else:
new_filter_tree = tuple(filter_tree)
# Mark the arguments that have been explicitly declared static via `static_argnums`
if static_argnums is not None:
args = list(args)
for index in static_argnums:
args[index] = _UnPyTreeAble(args[index])
if filter_tree is not None:
new_filter_tree = list(new_filter_tree)
for index in static_argnums:
new_filter_tree.insert(index, _UnPyTreeAble(None))
# Flatten everything else
args_flat, args_treedef = jax.tree_flatten(args)
if filter_tree is not None:
filter_flat, flat_treedef = jax.tree_flatten(new_filter_tree)
if flat_treedef != args_treedef:
raise ValueError(
"The tree stucture for the filters and the arguments must be the same."
)
# Figure out static argnums with respect to this new flattened structure.
new_static_argnums = []
if filter_tree is None:
# implies filter_fn is not None
for i, arg in enumerate(args_flat):
if isinstance(arg, _UnPyTreeAble) or not filter_fn(arg):
new_static_argnums.append(i)
else:
for i, (arg, filter) in enumerate(zip(args_flat, filter_flat)):
if not filter:
new_static_argnums.append(i)
new_static_argnums = tuple(new_static_argnums)
if static_argnums is not None:
args_flat = [
arg.value if isinstance(arg, _UnPyTreeAble) else arg
for arg in args_flat
]
f_jitted = _jitf_cache(
fun, args_treedef, static_argnums=new_static_argnums, **jitkwargs
)
return f_jitted(*args_flat)
return f_wrapper
| codeaudit/equinox | equinox/jitf.py | jitf.py | py | 5,896 | python | en | code | null | github-code | 13 |
39243239879 | # -*- coding: utf-8 -*-
# 我的方法是首先去寻找了树的根结点
# 进行中序遍历,然后输出结果
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
def __init__(self):
self.lst=[]
def GetNext(self, pNode):
# write code here
if pNode is None:
return None
root=pNode
while root.next is not None:
root=root.next
self.mid_tree(root)
idx=self.lst.index(pNode)
if idx+1 >= len(self.lst):
return None
else:
return self.lst[idx+1]
def mid_tree(self,root):
if root is None:
return
self.mid_tree(root.left)
self.lst.append(root)
self.mid_tree(root.right)
if __name__=="__main__":
s=Solution()
| RellRex/Sword-for-offer-with-python-2.7 | test57_二叉树的下一个结点.py | test57_二叉树的下一个结点.py | py | 997 | python | en | code | 2 | github-code | 13 |
26680068426 | # # 13 - Tendo dado de entrada a altura (h) de uma pessoa, construa um algortimo que calcule seu peso ideal, utilizando as seguinte fórmulas:
# a. para homens: (72.7*h) - 58
# b. para mulheres: (62.1*h) - 44.7
sexo = int(input('Selecione opção 1 se for do sexo Masculino / Selecione opção 2 se for do sexo feminino: '))
h = float(input('Altura:'))
peso = float(input('Peso:'))
peso_ideal = (72.7*h) - 58 if sexo == 1 else (62.1*h) - 44.7
if peso < peso_ideal:
print('Abaixo do peso ideal!')
elif peso == peso_ideal:
print('Dentro do peso ideal!')
else:
print('Acima do peso ideal!')
print ('Peso: %.2f / Peso ideal: %.2f' %(peso, peso_ideal)) | gabswyl/logicapython | Lógica de programação Python/Estrutura Sequencial/Altura & Peso 2/python.py | python.py | py | 673 | python | pt | code | 0 | github-code | 13 |
25933109225 | #!/usr/bin/python3
#_*_coding=utf-8 _*_
import sys
print("计算1/x的值")
while True:
try:
number = int(input("enter a number:"))
if number == 0:
sys.stderr.write("除以0 error\n")
else:
a = "1/%d = %s" % (number,1/number)
sys.stdout.write(a+"\n")
except ValueError:
print("请输入正整数")
exit()
| Ahead180-103/ubuntu | python/shell.py/sys1.py | sys1.py | py | 358 | python | en | code | 0 | github-code | 13 |
35498394229 | import os
from unittest import mock
from datetime import datetime, timezone
import pytest
from fyle_rest_auth.models import User, AuthToken
from rest_framework.test import APIClient
from apps.partner.models import PartnerOrg
from tests.fixture import fixture
def pytest_configure():
os.system('sh ./tests/sql_fixtures/reset_db_fixtures/reset_db.sh')
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture(scope="session", autouse=True)
def default_session_fixture(request):
patched_1 = mock.patch(
'fyle_rest_auth.authentication.get_fyle_admin',
return_value=fixture['my_profile']
)
patched_1.__enter__()
patched_2 = mock.patch(
'fyle.platform.internals.auth.Auth.update_access_token',
return_value='abcd'
)
patched_2.__enter__()
patched_3 = mock.patch(
'apps.partner.helpers.post_request',
return_value={
'access_token': 'abcd.efgh.jklm',
'cluster_domain': 'https://lolo.fyle.tech'
}
)
patched_3.__enter__()
patched_4 = mock.patch(
'fyle.platform.apis.v1beta.spender.MyProfile.get',
return_value=fixture['my_profile']
)
patched_4.__enter__()
patched_5 = mock.patch(
'apps.partner.helpers.get_cluster_domain',
return_value='https://lolo.fyle.tech'
)
patched_5.__enter__()
patched_6 = mock.patch(
'fyle_rest_auth.helpers.get_fyle_admin',
return_value=fixture['my_profile']
)
patched_6.__enter__()
@pytest.fixture()
def access_token():
create_user_and_tokens()
return 'abcd.efgh.jklm'
def create_user_and_tokens():
user = create_user('ashwin.t@fyle.in', 'Joanna', 'usqywo0f3nBY')
create_auth_token(user)
partner_org = PartnerOrg.objects.create(
name='Anagha Org', primary_org_id='orHVw3ikkCxJ', cluster_domain='https://lolo.fyle.tech'
)
partner_org.user.add(user)
user = create_user('ashwin.t+1@fyle.in', 'Joannaa', 'usqywo0f3nBZ')
create_auth_token(user)
def create_user(email: str, name: str, user_id: str) -> User:
return User.objects.create(
password='', last_login=datetime.now(tz=timezone.utc), email=email,
user_id=user_id, full_name=name, active='t', staff='f', admin='f'
)
def create_auth_token(user: User):
AuthToken.objects.create(
refresh_token='refresh_token',
user=user
)
| fylein/fyle-partner-dashboard-api | tests/conftest.py | conftest.py | py | 2,420 | python | en | code | 0 | github-code | 13 |
39701750576 | import networkx as nx
import numpy as np
import connectome_utils as utl
from multiplex import MultiplexConnectome
import os
import bct
import multiprocessing as mp
try:
from metrics.shared import set_seeds, push_exceptions
except (ImportError, SystemError):
from shared import set_seeds, push_exceptions
set_seeds()
DATA_ROOT = '/home/cbarnes/work/code/connectome/construct2/combine/tgt_data'
REPS = 100
SWAP_PROP = 10
permutations = {
'layers': ['GapJunction', 'Synapse', 'Monoamine', 'Neuropeptide'],
'sources': ['ac', 'ww'],
'ma_include_weak': [True, False],
'directed': [False],
'weighted': [False]
}
def networkx_to_unwe_mat(G, directed=False):
return np.array(nx.to_numpy_matrix(
G.to_directed() if directed else G.to_undirected(),
sorted(G.nodes()),
weight=None,
multigraph_weight=min
)) * (1 - np.eye(len(G.nodes())))
def get_original(data_root=DATA_ROOT, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
if weighted:
raise NotImplementedError("Haven't implemented directed or weighted graphs yet")
json_path = os.path.join(data_root, 'including_weak' if include_weak else 'strong_only', source,
'complete.json')
G = utl.json_deserialise(json_path)
C = MultiplexConnectome(G)
if isinstance(layers, str):
return networkx_to_unwe_mat(C[layers], directed)
else:
return networkx_to_unwe_mat(C.compose(*layers), directed)
def get_spec_combinations():
outlst = []
layer_perms = [
['GapJunction'],
['Synapse'],
['Monoamine'],
['Neuropeptide'],
['GapJunction', 'Synapse'],
['GapJunction', 'Synapse', 'Monoamine'],
['GapJunction', 'Synapse', 'Monoamine', 'Neuropeptide']
]
for layers in layer_perms:
for source in permutations['sources']:
for include_weak in permutations['ma_include_weak']:
outlst.append({
'layers': layers,
'source': source,
'include_weak': include_weak
})
return outlst
def get_root_dir_list(root):
dirset = set()
for spec_comb in get_spec_combinations():
dirset.add(get_control_path(
root, **spec_comb
))
return sorted(dirset)
def setup_dirs(root, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
path = get_control_path(root, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False)
os.makedirs(path, exist_ok=True)
def setup_all_dirs(root):
for dir in get_root_dir_list(root):
os.makedirs(dir, exist_ok=True)
abbreviations = {
'GapJunction': 'gj',
'Synapse': 'syn',
'Monoamine': 'ma',
'Neuropeptide': 'np'
}
def spec_to_name(layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
if directed or weighted:
raise NotImplementedError("Haven't implemented directed or weighted graphs yet")
path_elements = []
layers_str = abbreviations[layers] if isinstance(layers, str) else '-'.join(sorted([abbreviations[layer] for
layer in layers]))
path_elements.append(layers_str)
if 'GapJunction' in layers or 'Synapse' in layers:
path_elements.append(source)
if 'Monoamine' in layers:
path_elements.append('wk' if include_weak else 'str')
return '_'.join(path_elements)
def get_root_path(root, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
if directed or weighted:
raise NotImplementedError("Haven't implemented directed or weighted graphs yet")
name = spec_to_name(layers, source, include_weak, directed, weighted)
return os.path.join(root, name)
def get_control_path(root, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
return os.path.join(
get_root_path(root, layers, source, include_weak, directed, weighted),
'controls'
)
def get_real_path(root, layers=permutations['layers'], source='ww', include_weak=False, directed=False,
weighted=False):
return os.path.join(
get_root_path(root, layers, source, include_weak, directed, weighted),
'adj.npy'
)
def make_control(source_adj_and_filepath):
source_adj, filepath = source_adj_and_filepath[:2]
if len(source_adj_and_filepath) > 2:
directed = source_adj_and_filepath[2]
else:
directed = False
randomiser = bct.randmio_dir if directed else bct.randmio_und
np.save(filepath, randomiser(source_adj, SWAP_PROP)[0])
print(' generating {}'.format(filepath))
return True
def filename_iter(nreps=np.inf, ext='.npy'):
i = 0
while i < nreps:
yield '{:03}{}'.format(i, ext)
i += 1
def make_controls(source_adj, out_dir, n=REPS+1, directed=False):
with mp.Pool() as p:
set(p.imap_unordered(
make_control,
((source_adj.copy(), os.path.join(out_dir, filename), directed) for filename in filename_iter(n)),
chunksize=int(n/mp.cpu_count())
))
@push_exceptions
def undi_combinations_setup():
out_root = 'graphs'
for comb in get_spec_combinations():
print(comb)
control_dir = get_control_path(out_root, **comb)
os.makedirs(control_dir, exist_ok=True)
real_path = get_real_path(out_root, **comb)
adj = get_original(**comb)
np.save(real_path, adj)
make_controls(adj, control_dir)
@push_exceptions
def di_layers_setup():
out_root = os.path.join('graphs', 'di_layers')
# everything except gap junctions
di_layers = ['Synapse', 'Neuropeptide', 'Monoamine']
for layer_name in di_layers:
print(layer_name)
out_dir = os.path.join(out_root, abbreviations[layer_name])
controls_dir = os.path.join(out_dir, 'controls')
os.makedirs(controls_dir, exist_ok=True)
adj = get_original(layers=layer_name, source='ac', include_weak=False, directed=True)
np.save(os.path.join(out_dir, 'adj.npy'), adj)
make_controls(adj, controls_dir, directed=True)
# gap junctions
layer_name = 'GapJunction'
print(layer_name)
out_dir = os.path.join(out_root, abbreviations[layer_name])
controls_dir = os.path.join(out_dir, 'controls')
os.makedirs(controls_dir, exist_ok=True)
adj = get_original(layers=layer_name, source='ac', include_weak=False, directed=False)
np.save(os.path.join(out_dir, 'adj.npy'), adj)
make_controls(adj, controls_dir, directed=False)
# combinations
di_combinations_from_layers()
def collapse_list_of_arrays(lst):
adj = lst[0]
for layer in lst[1:]:
adj += layer
return bct.binarize(adj)
def di_combinations_from_layers():
out_root = os.path.join('graphs', 'di_layers')
for combination in [
('gj', 'syn'),
('gj', 'syn', 'ma'),
('gj', 'syn', 'ma', 'np')
]:
print('Combining {}'.format(combination))
out_dir = os.path.join(out_root, '-'.join(sorted(combination)))
control_dir = os.path.join(out_dir, 'controls')
os.makedirs(control_dir, exist_ok=True)
real_adj = collapse_list_of_arrays([np.load(os.path.join(out_root, layer, 'adj.npy')) for layer in combination])
np.save(os.path.join(out_dir, 'adj.npy'), real_adj)
for filename in filename_iter(REPS+1):
print(' generating {}'.format(filename))
adj = collapse_list_of_arrays(
[np.load(os.path.join(out_root, layer, 'controls', filename)) for layer in combination]
)
np.save(os.path.join(control_dir, filename), adj)
make_other_rand()
def make_other_rand():
out_root = os.path.join('graphs', 'di_layers')
out_dir = os.path.join(out_root, 'gj-ma-syn2')
control_dir = os.path.join(out_dir, 'controls')
os.makedirs(control_dir, exist_ok=True)
real_adj = collapse_list_of_arrays([np.load(os.path.join(out_root, layer, 'adj.npy')) for layer in ('gj', 'syn', 'ma')])
np.save(os.path.join(out_dir, 'adj.npy'), real_adj)
for filename in filename_iter(REPS + 1):
print(' generating {}'.format(filename))
adj = collapse_list_of_arrays(
[
np.load(os.path.join(out_root, 'gj', 'adj.npy')),
np.load(os.path.join(out_root, 'syn', 'adj.npy')),
np.load(os.path.join(out_root, 'ma', 'controls', filename)),
]
)
np.save(os.path.join(control_dir, filename), adj)
if __name__ == '__main__':
# undi_combinations_setup()
di_layers_setup()
| clbarnes/connectome_paper | metrics/file_tools.py | file_tools.py | py | 8,988 | python | en | code | 0 | github-code | 13 |
32752689523 | import tkinter as tk
from tkinter import Frame, BOTH
class MatrixtableGUI(Frame):
def __init__(self):
self.height = 500
self.width = 450
self.food = None
self.rectangles = []
self.rectangles_coordinates = []
self.canvas = None
self.show_window()
def show_window(self):
self.window_table = tk.Tk()
self.window_table.title("Snake game")
self.canvas = tk.Canvas(self.window_table, height=self.height, width=self.width)
self.draw_table(0)
def draw_table(self, score):
for i in range(1, 450, 15):
for j in range(1, 460, 15):
self.canvas.create_line(i, j, 460, j) # crtanje horizontalnih linija
self.canvas.create_line(i, j, i, 450) # crtanje vertikanih linija
self.show_score(score)
self.canvas.pack(fill=BOTH, expand=1)
def show_score(self, score):
labela = tk.Label(text="SCORE: " + str(score))
labela.place(relx=0.62, rely=0.95, relwidth=0.2, relheight=0.05)
| rapunzeeel/Snake-game | Projekat/GUI/MatrixtableGUI.py | MatrixtableGUI.py | py | 1,053 | python | en | code | 0 | github-code | 13 |
18074327919 | # The docstring in this module is written in rst format so that it can be
# collected by sphinx and integrated into django-genes/README.rst file.
"""
This command can be used to populate database with WormBase
identifiers. It takes 3 arguments:
* (Required) wb_url: URL of wormbase xrefs file;
* (Optional) db_name: the name of the cross-reference database,
default is 'WormBase'.
As is expected, the WormBase cross-reference database should be
populated using the ``genes_add_xrdb`` command (see command #1)
before this command to populate the WormBase identifiers.
Here is an example:
::
# Find latest version of WormBase here:
# http://www.wormbase.org/about/release_schedule#102--10-1
python manage.py genes_load_wb --wb_url=ftp://ftp.wormbase.org/pub/\
wormbase/releases/WS243/species/c_elegans/PRJNA13758/c_elegans.PRJNA13758.WS243.xrefs.txt.gz
"""
import logging
import urllib2
import gzip
from StringIO import StringIO
from django.core.management.base import BaseCommand, CommandError
from genes.models import Gene, CrossRefDB, CrossRef
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Command(BaseCommand):
help = 'Add wormbase identifiers to database.'
def add_arguments(self, parser):
parser.add_argument(
'--wb_url',
dest='wburl',
required=True,
help="URL of wormbase xrefs file."
)
parser.add_argument(
'--db_name',
dest='dbname',
default="WormBase",
help="Name of the database, defaults to 'WormBase'."
)
def handle(self, *args, **options):
database = CrossRefDB.objects.get(name=options.get('dbname'))
wb_url = options.get('wburl')
xrefs_gzip_fh = gzip.GzipFile(fileobj=StringIO(
urllib2.urlopen(wb_url, timeout=5).read()))
for line in xrefs_gzip_fh:
toks = line.strip().split('\t')
systematic = 'CELE_' + toks[0]
wbid = toks[1]
try:
gene = Gene.objects.get(systematic_name=systematic)
except Gene.DoesNotExist:
logger.info("Unable to find gene %s.", systematic)
continue
wb = None
try:
wb = CrossRef.objects.get(xrid=wbid, crossrefdb=database)
except CrossRef.DoesNotExist:
wb = CrossRef(xrid=wbid, crossrefdb=database)
wb.gene = gene
wb.save()
xrefs_gzip_fh.close()
| greenelab/django-genes | genes/management/commands/genes_load_wb.py | genes_load_wb.py | py | 2,575 | python | en | code | 2 | github-code | 13 |
14129744392 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from time import time
import threading
from queue import Queue
def start_chrome_with_permissions():
chrome_options = Options()
chrome_options.add_argument("--use-fake-ui-for-media-stream")
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', options=chrome_options)
return driver
def selenium_task(q,function,*args,**kwargs):
result =function(*args,**kwargs)
q.put(result)
def selenium_test_demo_button():
result = "Demo , mic and camera buttons are works well"
screenshot_path = None
driver = start_chrome_with_permissions()
try:
driver.get("https://app.percogo.com")
# "We use cookies" uyarısını kapat
try:
cookie_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button#c-p-bn')))
cookie_button.click()
except Exception as e:
# Eğer uyarıyı kapatamazsa, kodun geri kalanını çalıştırmaya devam edin.
pass
except Exception as e:
driver.quit()
return f"Page can't load correctly, Error!!!", None
wait = WebDriverWait(driver, 10)
try:
button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'a.b-purple')))
button.click()
except Exception as e:
screenshot_path = "first_button_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Cannot click First Button Error!!!"
return result, screenshot_path
try:
new_window_handle = [handle for handle in driver.window_handles if handle != driver.current_window_handle][0]
driver.switch_to.window(new_window_handle)
except Exception as e:
screenshot_path = "switch_window_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Cannot switch to the new window Error!!!"
return result, screenshot_path
try:
mic_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'img[alt="mic icon"]')))
mic_button.click()
camera_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'img[alt="camera icon"]')))
camera_button.click()
except Exception as e:
screenshot_path = "mic_camera_button_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Cannot click Mic or Camera Button Error!!!"
return result, screenshot_path
try:
join_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.perculus-button-container')))
join_button.click()
# Click the introjs-skipbutton
try:
skip_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'a.introjs-skipbutton')))
skip_button.click()
# Check for Leave button
try:
leave_button = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.btn-primary.leave')))
except Exception as e:
screenshot_path = "leave_button_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Leave button not found Error!!!"
return result, screenshot_path
except Exception as e:
screenshot_path = "skip_button_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Cannot click Skip Button Error!!!"
return result, screenshot_path
except Exception as e:
screenshot_path = "final_button_screenshot.png"
driver.save_screenshot(screenshot_path)
driver.quit()
result = f"Cannot click Final Button Error!!!"
return result, screenshot_path
driver.quit()
return result, None
async def test_demo_button():
q = Queue()
t = threading.Thread(target=selenium_task, args=(q, selenium_test_demo_button))
t.start()
t.join()
return q.get()
| GedizUcar/control-bot | demo.py | demo.py | py | 4,309 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.