seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20312544908 | from typing import Optional, List, Union
from hisim.simulator import SimulationParameters
from hisim.components import occupancy
from hisim.components import price_signal
from hisim.components import weather
from hisim.components import pvs
from hisim.components import smart_device
from hisim.components import building
from hisim.components import heat_pump
from hisim.components import simple_bucket_boiler
from hisim.components import oil_heater
from hisim.components import district_heating
from hisim.components import sumbuilder
from hisim import utils
import os
__authors__ = "Johanna Ganglbauer - johanna.ganglbauer@4wardenergy.at"
__copyright__ = "Copyright 2021, the House Infrastructure Project"
__credits__ = ["Noah Pflugradt"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Vitor Hugo Bellotto Zago"
__email__ = "vitor.zago@rwth-aachen.de"
__status__ = "development"
def append_to_electricity_load_profiles( my_sim, operation_counter : int, electricity_load_profiles : List[ Union[ sumbuilder.ElectricityGrid, occupancy.Occupancy ] ], elem_to_append : sumbuilder.ElectricityGrid ):
electricity_load_profiles = electricity_load_profiles + [ elem_to_append ]
my_sim.add_component( electricity_load_profiles[ operation_counter ] )
operation_counter += 1
return my_sim, operation_counter, electricity_load_profiles
def modular_household_explicit( my_sim, my_simulation_parameters: Optional[SimulationParameters] = None ):
"""
This setup function emulates an household including
the basic components "building", "occupancy" and "weather". Here it can be freely chosen if a PV system or a boiler are included or not.
The heating system can be either a heat pump, an Oilheater or Districtheating
- Simulation Parameters
- Components
- Occupancy (Residents' Demands)
- Weather
- Photovoltaic System
- Building
- Heat Pump
"""
##### delete all files in cache:
dir = '..//hisim//inputs//cache'
#for file in os.listdir( dir ):
# os.remove( os.path.join( dir, file ) )
##### System Parameters #####
# Set simulation parameters
year = 2021
seconds_per_timestep = 60 * 15
# Set building
building_code = "DE.N.SFH.05.Gen.ReEx.001.002"
building_class = "medium"
initial_temperature = 23
# Set weather
location = "Aachen"
# Set occupancy
occupancy_profile = "CH01"
# Build system parameters
if my_simulation_parameters is None:
my_simulation_parameters = SimulationParameters.full_year_all_options( year= year,
seconds_per_timestep=seconds_per_timestep )
my_sim.SimulationParameters = my_simulation_parameters
#get system configuration
predictive = my_simulation_parameters.system_config.predictive #True or False
pv_included = my_simulation_parameters.system_config.pv_included #True or False
smart_devices_included = my_simulation_parameters.system_config.smart_devices_included #True or False
boiler_included = my_simulation_parameters.system_config.boiler_included #Electricity, Hydrogen or False
heating_device_included = my_simulation_parameters.system_config.heating_device_included
# Set photovoltaic system
time = 2019
power = 10E3
load_module_data = False
module_name = "Hanwha_HSL60P6_PA_4_250T__2013_"
integrateInverter = True
inverter_name = "ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_"
#set boiler
if boiler_included == 'electricity':
definition = '0815-boiler'
smart = 1
elif boiler_included == 'hydrogen':
definition = 'hydrogen-boiler'
smart = 0
elif boiler_included:
raise NameError( 'Boiler definition', boiler_included, 'not known. Choose electricity, hydrogen, or False.' )
#Set heating system
if heating_device_included == 'heat_pump':
# Set heat pump controller
t_air_heating = 16.0
t_air_cooling = 24.0
offset = 0.5
hp_mode = 2
# Set heat pump
hp_manufacturer = "Viessmann Werke GmbH & Co KG"
hp_name = "Vitocal 300-A AWO-AC 301.B07"
hp_min_operation_time = 60
hp_min_idle_time = 15
elif heating_device_included == 'oil_heater':
# Set Oil heater controller
t_air_heating = 21.0
offset = 3.0
# Set Oil Heater
max_power = 5000
min_on_time = 60
min_off_time = 15
elif heating_device_included == 'district_heating':
t_air_heating = 21.0
tol = 1e-2 #tolerance of set point -> considered in control
max_power = 15000
min_power = 1000
efficiency = 0.85
elif heating_device_included:
raise NameError( 'Heating Device definition', heating_device_included, 'not known. Choose heat_pump, oil_heater, district_heating, or False.' )
##### Build Components #####
# Build occupancy
my_occupancy = occupancy.Occupancy( profile_name=occupancy_profile, my_simulation_parameters = my_simulation_parameters )
my_sim.add_component( my_occupancy )
# Add price signal
if predictive == True:
my_price_signal = price_signal.PriceSignal( my_simulation_parameters = my_simulation_parameters )
my_sim.add_component( my_price_signal )
#initialize list of components representing the actual load profile and operation counter
operation_counter = 0
electricity_load_profiles : List[ Union[ sumbuilder.ElectricityGrid, occupancy.Occupancy ] ] = [ my_occupancy ]
operation_counter = 1
# Build Weather
my_weather = weather.Weather( location=location, my_simulation_parameters = my_simulation_parameters )
my_sim.add_component( my_weather )
# Build building
my_building = building.Building( building_code = building_code,
bClass = building_class,
initial_temperature = initial_temperature,
my_simulation_parameters = my_simulation_parameters )
my_building.connect_only_predefined_connections( my_weather, my_occupancy )
my_sim.add_component( my_building )
if pv_included:
my_photovoltaic_system = pvs.PVSystem( time = time,
location = location,
power = power,
load_module_data = load_module_data,
module_name = module_name,
integrateInverter = integrateInverter,
inverter_name = inverter_name,
my_simulation_parameters = my_simulation_parameters )
my_photovoltaic_system.connect_only_predefined_connections( my_weather )
my_sim.add_component( my_photovoltaic_system )
my_sim, operation_counter, electricity_load_profiles = append_to_electricity_load_profiles(
my_sim = my_sim,
operation_counter = operation_counter,
electricity_load_profiles = electricity_load_profiles,
elem_to_append = sumbuilder.ElectricityGrid( name = "BaseLoad" + str( operation_counter ),
grid = [ electricity_load_profiles[ operation_counter - 1 ], "Subtract", my_photovoltaic_system ],
my_simulation_parameters = my_simulation_parameters )
)
if smart_devices_included:
my_smart_device = smart_device.SmartDevice( my_simulation_parameters = my_simulation_parameters )
my_sim.add_component( my_smart_device )
if predictive == True:
my_smart_device_controller = smart_device.SmartDeviceController( my_simulation_parameters = my_simulation_parameters )
my_sim.add_component( my_smart_device_controller )
my_smart_device.connect_only_predefined_connections( my_smart_device_controller )
my_smart_device_controller.connect_only_predefined_connections( my_smart_device )
my_sim, operation_counter, electricity_load_profiles = append_to_electricity_load_profiles(
my_sim = my_sim,
operation_counter = operation_counter,
electricity_load_profiles = electricity_load_profiles,
elem_to_append = sumbuilder.ElectricityGrid( name = "BaseLoad" + str( operation_counter ),
grid = [ electricity_load_profiles[ operation_counter - 1 ], "Sum", my_smart_device ],
my_simulation_parameters = my_simulation_parameters )
)
if boiler_included:
my_boiler = simple_bucket_boiler.Boiler( definition = definition, fuel = boiler_included, my_simulation_parameters = my_simulation_parameters )
# my_boiler.connect_input( my_boiler.WaterConsumption,
# my_occupancy.ComponentName,
# my_occupancy.WaterConsumption )
my_boiler.connect_only_predefined_connections( my_occupancy )
my_sim.add_component( my_boiler )
my_boiler_controller = simple_bucket_boiler.BoilerController( smart = smart, my_simulation_parameters = my_simulation_parameters )
# my_boiler_controller.connect_input( my_boiler_controller.StorageTemperature,
# my_boiler.ComponentName,
# my_boiler.StorageTemperature )
my_boiler_controller.connect_only_predefined_connections( my_boiler )
my_sim.add_component( my_boiler_controller )
# my_boiler.connect_input( my_boiler.State,
# my_boiler_controller.ComponentName,
# my_boiler_controller.State )
my_boiler.connect_only_predefined_connections( my_boiler_controller )
if boiler_included == 'electricity':
my_boiler_controller.connect_input( my_boiler_controller.ElectricityInput,
electricity_load_profiles[ - 1 ].ComponentName,
electricity_load_profiles[ - 1 ].ElectricityOutput )
my_sim, operation_counter, electricity_load_profiles = append_to_electricity_load_profiles(
my_sim = my_sim,
operation_counter = operation_counter,
electricity_load_profiles = electricity_load_profiles,
elem_to_append = sumbuilder.ElectricityGrid( name = "BaseLoad" + str( operation_counter ),
grid = [ electricity_load_profiles[ operation_counter - 1 ], "Sum", my_boiler ],
my_simulation_parameters = my_simulation_parameters )
)
if heating_device_included:
my_heating : Union[ heat_pump.HeatPump, oil_heater.OilHeater, district_heating.DistrictHeating ]
my_heating_controller : Union[ heat_pump.HeatPumpController, oil_heater.OilHeaterController, district_heating.DistrictHeatingController ]
#initialize and connect controller
if heating_device_included == 'heat_pump':
my_heating_controller = heat_pump.HeatPumpController( t_air_heating = t_air_heating,
t_air_cooling = t_air_cooling,
offset = offset,
mode = hp_mode,
my_simulation_parameters = my_simulation_parameters )
hc : heat_pump.HeatPumpController = my_heating_controller # type: ignore
hc.connect_input( hc.ElectricityInput,
electricity_load_profiles[ operation_counter - 1 ].ComponentName,
electricity_load_profiles[ operation_counter - 1 ].ElectricityOutput )
elif heating_device_included == 'oil_heater':
my_heating_controller = oil_heater.OilHeaterController( t_air_heating = t_air_heating,
offset = offset,
my_simulation_parameters = my_simulation_parameters )
my_heating_controller.connect_only_predefined_connections( my_weather )
elif heating_device_included == 'district_heating':
my_heating_controller = district_heating.DistrictHeatingController( max_power = max_power,
min_power = min_power,
t_air_heating = t_air_heating,
tol = tol,
my_simulation_parameters = my_simulation_parameters )
my_heating_controller.connect_only_predefined_connections( my_building )
my_sim.add_component( my_heating_controller)
#initialize and connect heating device
if heating_device_included == 'heat_pump':
my_heating = heat_pump.HeatPump( manufacturer = hp_manufacturer,
name = hp_name,
min_operation_time = hp_min_operation_time,
min_idle_time = hp_min_idle_time,
my_simulation_parameters = my_simulation_parameters )
my_heating.connect_only_predefined_connections( my_weather )
elif heating_device_included == 'oil_heater':
my_heating = oil_heater.OilHeater( max_power = max_power,
min_off_time = min_off_time,
min_on_time = min_on_time,
my_simulation_parameters = my_simulation_parameters )
elif heating_device_included == 'district_heating':
my_heating = district_heating.DistrictHeating( max_power = max_power,
min_power = min_power,
efficiency = efficiency,
my_simulation_parameters = my_simulation_parameters )
my_heating.connect_only_predefined_connections( my_heating_controller )
my_sim.add_component( my_heating )
my_building.connect_input( my_building.ThermalEnergyDelivered,
my_heating.ComponentName,
my_heating.ThermalEnergyDelivered ) | 2022Yalin/MSc | examples/modular_household.py | modular_household.py | py | 15,378 | python | en | code | 0 | github-code | 90 |
12511396881 | """Sc is a distributed service manager."""
import subprocess
import collections
import pathlib
import datetime
import re
import json
import yaml
import flask
import argh
import tabulate
# pyxtermjs imports
import flask_socketio
import pty
import os
import select
import termios
import struct
import fcntl
app = flask.Flask(__name__)
app.config["SECRET_KEY"] = "secret!"
app.config["fd"] = None
app.config["child_pid"] = None
app.config["cmd"] = "false"
app.config["term_proc_exit"] = False
socketio = flask_socketio.SocketIO(app)
def lines_words(text):
"""Return array of lines split into array of words."""
out = list()
for line in text.decode().split("\n"):
out.append(line.split())
return out
MEM_USED_WARN_PCT: float = 0.45
CPU_LOAD_WARN_PCT: float = 0.5
DISK_USED_WARN_PCT: float = 0.90
ACKNOWLEDGED_ALERTS: set[str] = set()
services = None
search_filter = None
cfg_draw_tables = True
cfg_draw_mermaid_diagram = True
class Node:
"""Class encapsulating a worker node for purpose of collecting metrics."""
def __init__(self, node_name):
"""Initialize class variables."""
self.node_name = node_name
self.mem_used = 0
self.mem_avail = 0
self.mem_warn = 0
self.load = 0
self.cpus = 0
self.cpu_warn = 0
self.df = []
self.is_up = True
self.warnings = 0
self.uptime = ""
self.update_time_ms = 0
def update_metrics(self):
"""Update worker node metrics by running commands over ssh."""
time_now = datetime.datetime.now()
self.is_up = True
mem_cmd = ["ssh", "-oConnectTimeout=3", "root@" + self.node_name, "free"]
try:
mem_cmd_out_words = lines_words(subprocess.check_output(mem_cmd))
except subprocess.CalledProcessError:
self.is_up = False
self.warnings += 1
return
self.mem_used = int(int(mem_cmd_out_words[1][2]) // 1e3)
self.mem_avail = int(int(mem_cmd_out_words[1][1]) // 1e3)
load_cmd = ["ssh", "root@" + self.node_name, "uptime"]
load_cmd_out = subprocess.check_output(load_cmd)
uptime_end_idx = load_cmd_out.decode().index(",")
self.uptime = load_cmd_out.decode()[13:uptime_end_idx]
load_cmd_out_words = lines_words(load_cmd_out)
self.load = float(load_cmd_out_words[0][-3][:-1])
cpus_cmd = ["ssh", "root@" + self.node_name, "cat /proc/cpuinfo"]
cpus_cmd_out_words = subprocess.check_output(cpus_cmd).decode().split()
self.cpus = cpus_cmd_out_words.count("vendor_id")
df_cmd = ["ssh", "root@" + self.node_name, "df"]
df_out_words = lines_words(subprocess.check_output(df_cmd))[1:-1]
self.mem_warn = False
if int(self.mem_used) > MEM_USED_WARN_PCT * int(self.mem_avail):
self.mem_warn = True
if not is_node_alert_acked(self.node_name, "mem"):
self.warnings += 1
self.cpu_warn = False
if float(self.load) > CPU_LOAD_WARN_PCT * int(self.cpus):
self.cpu_warn = True
if not is_node_alert_acked(self.node_name, "cpu"):
self.warnings += 1
for df_data in df_out_words:
device = df_data[0]
mounted_on = " ".join(df_data[5:])
used_gb = int(df_data[2]) / 1000000
avail_gb = int(df_data[3]) / 1000000
total_gb = used_gb + avail_gb
percent_used = used_gb / total_gb
if not (
device.startswith("/dev/sd")
or device.startswith("/dev/mapper")
or device.startswith("/dev/vd")
or device.startswith("/dev/root")
):
continue
if mounted_on == "/boot/efi":
continue
warn = percent_used > DISK_USED_WARN_PCT and avail_gb < 10
self.df.append(
{
"mounted_on": mounted_on,
"used_gb": used_gb,
"total_gb": total_gb,
"percent_used": percent_used,
"warn": warn,
}
)
if warn:
mounted_on_nice = mounted_on.replace("/", "-")
if not is_node_alert_acked(self.node_name, mounted_on_nice):
self.warnings += 1
self.update_time_ms = (
datetime.datetime.now() - time_now
).total_seconds() * 1000
class Nodes:
"""Class for storing a collection of worker nodes."""
def __init__(self, node_names):
"""Initialize class variables."""
self.nodes = []
self.warnings = 0
self.total_mem_used = 0
self.total_mem_avail = 0
self.total_load = 0
self.total_cpus = 0
self.total_df_used_gb = 0
self.total_df_total_gb = 0
for node_name in node_names:
self.nodes.append(Node(node_name))
def update(self):
"""Update metrics on all nodes."""
self.warnings = 0
for node in self.nodes:
node.update_metrics()
self.warnings += node.warnings
self.total_mem_used += node.mem_used
self.total_mem_avail += node.mem_avail
self.total_load += node.load
self.total_cpus += node.cpus
self.total_df_used_gb += sum([disk["used_gb"] for disk in node.df])
self.total_df_total_gb += sum([disk["total_gb"] for disk in node.df])
class Service:
"""Class encapsulating a service (across all worker nodes)."""
def __init__(self, service_dict):
"""Initialize class variables."""
self.name = service_dict["name"]
self.nodes = service_dict.get("nodes", [])
self.deploy_script = service_dict.get("deploy", None)
self.delete_script = service_dict.get("delete", None)
self.systemd_unit = service_dict.get("unit", None)
self.svc_uris = service_dict.get("svc_uris", [])
self.doc_sites = service_dict.get("doc_sites", [])
self.status = dict()
self.last_changed = dict()
def update_status_on_node(self, node_name):
"""Update the service status on a node by running systemctl status."""
cmd = [
"ssh",
"root@" + node_name,
"systemctl",
"--no-page",
"status",
self.name,
]
p = subprocess.run(cmd, stdout=subprocess.PIPE)
try:
ws = p.stdout.decode().split("\n")
semi_col_idx = ws[2].index(";")
self.last_changed[node_name] = ws[2][semi_col_idx + 2 :]
except Exception:
print(f"couldn't parse last_changed: {ws}")
if p.returncode == 0:
self.status[node_name] = "active"
elif p.returncode == 3:
self.status[node_name] = "inactive"
else:
self.status[node_name] = "unknown"
def update_status_on_all_nodes(self):
"""Update service status on all nodes."""
for node_name in self.nodes:
self.update_status_on_node(node_name)
def start(self, node_name):
"""Start service on node by running systemctl start."""
cmd = ["ssh", "root@" + node_name, "systemctl", "start", self.name]
subprocess.run(cmd)
def stop(self, node_name):
"""Stop service on node by running systemctl stop."""
cmd = ["ssh", "root@" + node_name, "systemctl", "stop", self.name]
subprocess.run(cmd)
def restart(self, node_name):
"""Restart service on node by running systemctl restart."""
cmd = ["ssh", "root@" + node_name, "systemctl", "restart", self.name]
subprocess.run(cmd)
def deploy(self, node_name):
"""Return deploy script for service on node."""
script = "set -x\n\n"
if self.systemd_unit:
with open(f"/tmp/{self.name}.service", "w") as f:
f.write(self.systemd_unit)
script += f"scp /tmp/{self.name}.service root@{node_name}:/lib/systemd/system/{self.name}.service\n"
script += f"ssh root@{node_name} systemctl daemon-reload\n"
with open(f"/tmp/{self.name}.deploy.sh", "w") as f:
f.write("set -x\n\n")
f.write(self.deploy_script)
script += f"scp /tmp/{self.name}.deploy.sh root@{node_name}:/tmp/sc.{self.name}.deploy.sh\n"
script += f"ssh root@{node_name} bash /tmp/sc.{self.name}.deploy.sh\n"
if self.systemd_unit:
script += f"ssh root@{node_name} systemctl start {self.name}.service\n"
return script
def delete(self, node_name):
"""Return delete deployment script for service on node."""
script = "set -x\n\n"
if self.systemd_unit:
script += f"ssh root@{node_name} systemctl stop {self.name}.service\n"
script += (
f"ssh root@{node_name} rm /lib/systemd/system/{self.name}.service\n"
)
script += f"ssh root@{node_name} systemctl daemon-reload\n"
with open(f"/tmp/{self.name}.delete.sh", "w") as f:
f.write("set -x\n\n")
f.write(self.delete_script)
script += f"scp /tmp/{self.name}.delete.sh root@{node_name}:/tmp/sc.{self.name}.delete.sh\n"
script += f"ssh root@{node_name} bash /tmp/sc.{self.name}.delete.sh\n"
return script
def update(self, node_name):
"""Return update script for service on node by running delete and then deploy."""
script = self.delete(node_name)
script += self.deploy(node_name)
return script
class Services:
"""Class encapsulating a collection of services."""
def __init__(self, conf_str):
"""Initialize class variables."""
self.config = yaml.safe_load(conf_str)
self.all = []
self.by_name = dict()
self.by_node = collections.defaultdict(list)
self.warnings = 0
self._config_changed()
def _config_changed(self):
"""Update class variables to be done when the config changes."""
for service_dict in self.config.get("services", []):
service = Service(service_dict)
self.all.append(service)
self.by_name[service.name] = service
for node_name in service.nodes:
self.by_node[node_name].append(service)
def update_service_status(self):
"""Update services status on all nodes."""
self.warnings = 0
out = []
for service in self.all:
print(f"updating {service.name}")
service.update_status_on_all_nodes()
for node_name, status in service.status.items():
out.append(
[
service.name,
node_name,
status,
service.last_changed.get(node_name),
]
)
if status != "active":
if not is_service_alert_acked(service.name, node_name):
self.warnings += 1
print()
print(
tabulate.tabulate(
out, headers=["service", "node", "status", "last_updated"]
)
)
print()
def get_node_names(self):
"""Return all node names."""
return self.by_node.keys()
def icon(name):
"""Format html for fontawesome icons."""
return f'<i class="fa fa-{name} fa-fw"></i>'
@app.context_processor
def inject_globals():
"""Add some stuff into all templates."""
return {
"icon": icon,
}
def make_service_node_dict():
"""Make a dict[dict] to be used by the dashboard."""
out = collections.defaultdict(dict)
for service in services.all:
for node_name in service.nodes:
out[service.name][node_name] = service
return out
@app.route("/start/<service>/<node_name>")
def start(service, node_name):
"""Start service on node endpoint."""
services.by_name[service].start(node_name)
return flask.redirect(flask.url_for("index"))
@app.route("/stop/<service>/<node_name>")
def stop(service, node_name):
"""Stop service on node endpoint."""
services.by_name[service].stop(node_name)
return flask.redirect(flask.url_for("index"))
@app.route("/restart/<service>/<node_name>")
def restart(service, node_name):
"""Restart service on node endpoint."""
services.by_name[service].restart(node_name)
return flask.redirect(flask.url_for("index"))
def web_run_term(cmd):
"""Start either a native or web terminal."""
if cfg_term_program == "xtermjs":
app.config["cmd"] = cmd
return flask.render_template("term.jinja2")
else:
term_cmd = cfg_term_program.split() + cmd
subprocess.Popen(term_cmd)
return flask.redirect(flask.url_for("index"))
@app.route("/open_terminal_log/<service>/<node_name>")
def open_terminal_log(service, node_name):
"""Open terminal log on node endpoint."""
cmd = ["ssh", "root@" + node_name, "journalctl", "-fu", service]
return web_run_term(cmd)
@app.route("/open_terminal_shell/<service>/<node_name>")
def open_terminal_shell(service, node_name):
"""Open terminal shell on node endpoint."""
cmd = ["ssh", "root@" + node_name]
return web_run_term(cmd)
@app.route("/deploy/<service>/<node_name>")
def deploy(service, node_name):
"""Deploy service on node endpoint."""
script = services.by_name[service].deploy(node_name)
print(script)
with open(f"/tmp/deploy.{service}.{node_name}.sh", "w") as f:
f.write(script)
cmd = [
"bash",
"-c",
f"bash /tmp/deploy.{service}.{node_name}.sh; echo '\n\nDeploy finished\n\n'; sleep infinity",
]
return web_run_term(cmd)
@app.route("/delete/<service>/<node_name>")
def delete(service, node_name):
"""Delete service on node endpoint."""
script = services.by_name[service].delete(node_name)
print(script)
with open(f"/tmp/delete.{service}.{node_name}.sh", "w") as f:
f.write(script)
cmd = [
"bash",
"-c",
f"bash /tmp/delete.{service}.{node_name}.sh; echo '\n\nDelete finished\n\n'; sleep infinity",
]
return web_run_term(cmd)
@app.route("/update/<service>/<node_name>")
def update(service, node_name):
"""Update service on node endpoint."""
script = services.by_name[service].update(node_name)
print(script)
with open(f"/tmp/update.{service}.{node_name}.sh", "w") as f:
f.write(script)
cmd = [
"bash",
"-c",
f"bash /tmp/update.{service}.{node_name}.sh; echo '\n\nUpdate finished\n\n'; sleep infinity",
]
return web_run_term(cmd)
@app.route("/apply_settings", methods=["POST"])
def apply_settings():
"""Save dashboard page settings endpoint."""
print(flask.request.form)
if flask.request.form.get("Submit") == "Submit_apply":
global refresh_rate
refresh_rate = flask.request.form.get("refresh_rate")
if flask.request.form.get("Submit") == "Submit_search":
global search_filter
search_filter = flask.request.form.get("search_filter").strip()
print(search_filter)
print(type(search_filter))
return flask.redirect(flask.url_for("index"))
def process_mermaid_diagram(config, nodes, services):
"""Set colors for the nodes in the mermaid diagram.
And add @ before node names.
"""
service_names = [service["name"] for service in config.get("services")]
node_names = services.get_node_names()
diagram_lines = config.get("mermaid_diagram").split("\n")
out1 = []
out2 = ["classDef good fill:#9f9;", "classDef bad fill:#f99;"]
for diagram_line in diagram_lines:
m = re.match(r"(.*)\[(.*) (.*)\]", diagram_line)
if not m:
out1.append(diagram_line)
else:
item_mermaid_id = m.group(1)
item_service_name = m.group(2).replace("<br/>", "")
item_service_name_orig = m.group(2)
item_node_name = m.group(3)
if item_service_name in service_names and item_node_name in node_names:
out1.append(
f"{item_mermaid_id}[{item_service_name_orig} fa:fa-at {item_node_name}]"
)
item_service_status = services.by_name[item_service_name].status[
item_node_name
]
if item_service_status == "active":
out2.append(f"class {item_mermaid_id} good;")
else:
out2.append(f"class {item_mermaid_id} bad;")
else:
out1.append(diagram_line)
return "\n".join(out1 + out2)
INCLUDED_DOC_SITES = [
{
"name": "Sillycat",
"url": "https://github.com/dvolk/sc",
},
{
"name": "Systemd on Archwiki",
"url": "https://wiki.archlinux.org/title/systemd",
},
{
"name": "Podman",
"url": "https://docs.podman.io/en/latest/",
},
{
"name": "Mermaid.js flowcharts",
"url": "https://mermaid-js.github.io/mermaid/#/flowchart",
},
]
@app.route("/")
def index():
"""Dashboard index endpoint."""
global services
config_text = pathlib.Path(cfg_services_yaml).read_text()
config = yaml.safe_load(config_text)
services = Services(config_text)
services.update_service_status()
out = make_service_node_dict()
nodes = Nodes(services.get_node_names())
doc_sites = INCLUDED_DOC_SITES + config.get("doc_sites", [])
nodes.update()
title = "sillycat dashboard"
if nodes.warnings or services.warnings:
title = "WARN sillycat dashboard"
mermaid_diagram = None
config_paths = [x.name for x in pathlib.Path(".").glob("*.yaml")]
if cfg_draw_mermaid_diagram:
mermaid_diagram = config.get("mermaid_diagram")
mermaid_diagram = process_mermaid_diagram(config, nodes, services)
if flask.request.args.get("json"):
n = [node.__dict__ for node in nodes.nodes]
s = [service.__dict__ for service in services.all]
return json.dumps({"nodes": n, "services": s})
return flask.render_template(
"services.jinja2",
services=services,
out=out,
nodes=nodes,
refresh_rate=refresh_rate,
search_filter=search_filter,
title=title,
doc_sites=doc_sites,
mermaid_diagram=mermaid_diagram,
cfg_draw_mermaid_diagram=cfg_draw_mermaid_diagram,
cfg_draw_tables=cfg_draw_tables,
config_paths=config_paths,
cfg_services_yaml=cfg_services_yaml,
)
@app.route("/change_config", methods=["POST"])
def change_config():
"""Endpoint to change to a different endpoint."""
if flask.request.form.get("Submit") == "Submit_change":
new_config = flask.request.form.get("new_config")
if pathlib.Path(new_config).exists():
global cfg_services_yaml
cfg_services_yaml = new_config
return flask.redirect(flask.url_for("index"))
@app.route("/toggle_acknowledge_alert/<service_name>/<node_name>/<node_alert_type>")
def toggle_acknowledge_alert(service_name, node_name, node_alert_type):
"""Endpoint to toggle alert.
If the argument does not apply to the alert type, pass '-' for that argument.
"""
elem = service_name + node_name + node_alert_type
if elem in ACKNOWLEDGED_ALERTS:
ACKNOWLEDGED_ALERTS.remove(elem)
else:
ACKNOWLEDGED_ALERTS.add(elem)
return flask.redirect(flask.url_for("index"))
@app.route("/toggle_mermaid_diagram")
def toggle_mermaid_diagram():
"""Endpoint to toggle mermaid diagram drawing."""
global cfg_draw_mermaid_diagram
cfg_draw_mermaid_diagram = not cfg_draw_mermaid_diagram
return flask.redirect(flask.url_for("index"))
@app.route("/toggle_tables")
def toggle_tables():
"""Endpoint to toggle mermaid diagram drawing."""
global cfg_draw_tables
cfg_draw_tables = not cfg_draw_tables
return flask.redirect(flask.url_for("index"))
def is_service_alert_acked(service_name, node_name):
"""Return if the service alert is acknowledged."""
return service_name + node_name + "-" in ACKNOWLEDGED_ALERTS
def is_node_alert_acked(node_name, node_alert_type):
"""Return if the node alert is acknowledged."""
return "-" + node_name + node_alert_type in ACKNOWLEDGED_ALERTS
def is_ok_config(sc_config):
"""Check if the argument string is a valid config."""
try:
yaml.safe_load(sc_config)
return True
except Exception as e:
print(e)
return False
@app.route("/config", methods=["GET", "POST"])
def config():
"""Config view/post endpoint."""
global cfg_services_yaml
if flask.request.method == "GET":
sc_config = pathlib.Path(cfg_services_yaml).read_text()
return flask.render_template(
"config.jinja2",
sc_config=sc_config,
title="sillycat configuration",
)
if flask.request.method == "POST":
print(flask.request.form)
if flask.request.form.get("Submit") == "Submit_cancel":
return flask.redirect(flask.url_for("index"))
if flask.request.form.get("Submit") == "Submit_save":
unsafe_sc_config = flask.request.form.get("new_config")
time_now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
with open(f"./config_{time_now}.yaml", "w") as f:
f.write(unsafe_sc_config)
if not is_ok_config(unsafe_sc_config):
return flask.redirect(flask.url_for("config"))
cfg_services_yaml = f"./config_{time_now}.yaml"
return flask.redirect(flask.url_for("index"))
# pyxtermjs functions
@app.route("/close_terminal")
def close_terminal():
"""Close terminal and redirect back to index."""
app.config["term_proc_exit"] = True
os.kill(app.config["child_pid"], 15)
app.config["socketio_thread"].join()
return flask.redirect(flask.url_for("index"))
def set_winsize(fd, row, col, xpix=0, ypix=0):
"""Set window size with termios."""
winsize = struct.pack("HHHH", row, col, xpix, ypix)
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
def read_and_forward_pty_output():
max_read_bytes = 1024 * 20
while True:
socketio.sleep(0.01)
if app.config["fd"]:
try:
timeout_sec = 0
(data_ready, _, _) = select.select(
[app.config["fd"]], [], [], timeout_sec
)
if data_ready:
output = os.read(app.config["fd"], max_read_bytes).decode(
errors="ignore"
)
socketio.emit("pty-output", {"output": output}, namespace="/pty")
except OSError:
app.config["child_pid"] = None
app.config["fd"] = None
app.config["cmd"] = "false"
app.config["term_proc_exit"] = False
print("*** bye!")
return
if app.config["term_proc_exit"]:
app.config["child_pid"] = None
app.config["fd"] = None
app.config["cmd"] = "false"
app.config["term_proc_exit"] = False
print("*** bye!")
return
@socketio.on("pty-input", namespace="/pty")
def pty_input(data):
"""Write to the child pty. The pty sees this as if you are typing in a real terminal."""
if app.config["fd"]:
os.write(app.config["fd"], data["input"].encode())
@socketio.on("resize", namespace="/pty")
def resize(data):
if app.config["fd"]:
set_winsize(app.config["fd"], data["rows"], data["cols"])
@socketio.on("connect", namespace="/pty")
def connect():
"""new client connected"""
if app.config["child_pid"]:
# already started child process, don't start another
return
# create child process attached to a pty we can read from and write to
(child_pid, fd) = pty.fork()
if child_pid == 0:
# this is the child process fork.
# anything printed here will show up in the pty, including the output
# of this subprocess
print(app.config["cmd"])
subprocess.run(app.config["cmd"])
else:
# this is the parent process fork.
# store child fd and pid
app.config["fd"] = fd
app.config["child_pid"] = child_pid
set_winsize(fd, 50, 50)
s = socketio.start_background_task(target=read_and_forward_pty_output)
app.config["socketio_thread"] = s
# end of pyxtermjs functions
def main(services_yaml, term_program="x-terminal-emulator"):
"""Start sc web service."""
global cfg_services_yaml
cfg_services_yaml = services_yaml
global cfg_term_program
cfg_term_program = term_program
socketio.run(app, debug=True, port=1234, host="127.0.0.1")
if __name__ == "__main__":
global refresh_rate
refresh_rate = ""
argh.dispatch_command(main)
| dvolk/sc | app.py | app.py | py | 25,130 | python | en | code | 0 | github-code | 90 |
12572266194 | from setuptools import setup, find_packages
with open('README.md') as f:
description = f.read()
setup(
name="libproton",
version="3.0",
packages=find_packages(),
description=description,
author="Peter Law",
author_email="PeterJCLaw@gmail.com",
install_requires=[
'PyYAML >=3.11, <4',
],
tests_require=[
'nose >=1.3, <2',
'mock >=1.0.1, <2',
],
zip_safe=True,
)
| srobo-legacy/comp-libproton | setup.py | setup.py | py | 435 | python | en | code | 0 | github-code | 90 |
33242279996 | import cx_Oracle
def queryLastElement():
con = cx_Oracle.connect('arushi/harkersoftball')
cur=con.cursor()
obj = cur.execute('''select * from beeGenes where ref_num=147907436''')
for x in obj:
print(x)
print(x[1].read())
cur.close()
con.close()
queryLastElement()
| netyarushi/genome_sequence | 4060Final/qLastElement.py | qLastElement.py | py | 323 | python | en | code | 0 | github-code | 90 |
33857068662 | lista = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
lista1 = max([ szam for szam in lista if szam % 2 == 0])
a = []
for sor in lista:
if sor % 2 == 0:
a.append(sor)
print(max(a))
_____________________________________
'''
N�v;Oszt�ly;Els� nap;Utols� nap;Mulasztott �r�k
Balogh P�ter;6a;1;1;5
'''
class Hianyzas:
def __init__(self, sor):
nev, osztaly, elso, utolso, mulasztott = sor.strip().split(";")
self.nev = nev
self.osztaly = osztaly
self.elso = int(elso)
self.utolso = int(utolso)
self.mulasztott = int(mulasztott)
lista = []
with open("szeptember.csv", encoding="latin2") as f:
f.readline()
lista = [Hianyzas(sor) for sor in f]
#2
mulasztasok = sum([sor.mulasztott for sor in lista])
print(mulasztasok)
x = []
for sor in lista:
x.append(sor.mulasztott)
print(sum(x))
#3
szam = int(input("Kérek egy számot 1 és 30 között! "))
nev = input("Kérek egy nevet! ")
if 1 < szam < 30:
pass
else:
szam = None
#4
volt_e = [sor.mulasztott for sor in lista if nev == sor.nev and sor.mulasztott > 0]
if len(volt_e):
print("Hiányzot szeptemberben!")
else:
print("Nem hiányzot Szeptemberben!")
f = []
for sor in lista:
if nev == sor.nev and sor.mulasztott > 0:
f.append(sor.mulasztott)
if len(f) > 0:
print("Hiányzot szeptemberben!")
else:
print("Nem hiányzot Szeptemberben!")
#5
hiany = [sor for sor in lista if sor.elso <= szam <= sor.utolso]
[print(f"{sor.nev} ({sor.osztaly})") for sor in hiany]
h = []
for sor in lista:
if sor.elso <= szam <= sor.utolso:
h.append(sor.nev, sor.osztaly)
#6
_________________________________________________________________________
| kovacsbalazspeter21/balazs | python/proz.py | proz.py | py | 1,665 | python | hu | code | 0 | github-code | 90 |
42967356298 | import logging
import requests
import uuid
LOGGER = logging.getLogger(__name__)
class BridgeInfo(object):
"""Lightweight helper class for storing bridge information."""
def __init__(self, name='', bridge_type=''):
"""Constructor.
Keyword Arguments:
name -- The name of the bridge (optional)
bridge_type -- The type of bridge (optional)
"""
self.name = name
self.bridge_type = bridge_type
self.uid = str(uuid.uuid4())
class BridgeErrorTest(object):
"""Responsible for testing error conditions during bridge creation."""
DEFAULT_TEST_NAME = "acme"
DEFAULT_TEST_TYPE = "proxy_media"
def __init__(self, ari, event):
"""Constructor.
Keyword Arguments:
ari -- The wrapper object for ARI
event -- The ARI StasisStart event object
"""
# The pass/fail state variable for this test.
# Note: Value is only set to 'True' during initialization.
self.passing = True
# Initialize the baseline bridge info objects used during the tests
self.baseline_bridges = [BridgeInfo('road-runner'),
BridgeInfo(),
BridgeInfo('wiley-coyote', 'holding'),
BridgeInfo('', 'mixing')]
# The channel id that stasis gives us in the event argument
# Needed later for tearing down the test
self.stasis_channel_id = event['channel']['id'] or None
# Record state of ari.allow_errors so that it can
# correctly be reset at the end of the test
self.__set_ari_allow_errors = ari.allow_errors or False
def run_test(self, ari):
"""Runs the test.
Tries to set up the state needed by the test and running the test
against all baseline bridges created during setup. Then tears
down the state created during setup.
Keyword Arguments:
ari -- The wrapper object for ARI
"""
try:
self.__setup_test(ari)
for i in range(len(self.baseline_bridges)):
self.__create_duplicate_bridges(ari, self.baseline_bridges[i])
finally:
self.__tear_down_test(ari)
return self.passing
def __setup_test(self, ari):
"""Sets up the state needed for the test to execute.
Configures ARI to run the test and builds two baseline bridges to use
during the test.
Keyword Arguments:
ari -- The wrapper object for ARI
"""
LOGGER.debug("Performing test setup ...")
# Disable ARI auto-exceptions on HTTP errors
ari.set_allow_errors(True)
# Create a baseline bridge using bridge 0's id and name, but no type
self.__create_bridge(ari,
'ok',
None,
self.baseline_bridges[0].uid,
name=self.baseline_bridges[0].name)
# Create a baseline bridge without a name or type, using bridge 1's id
self.__create_bridge(ari,
'ok',
None,
self.baseline_bridges[1].uid)
# Create a baseline bridge using bridge 2's id, name, and type
self.__create_bridge(ari,
'ok',
None,
self.baseline_bridges[2].uid,
name=self.baseline_bridges[2].name,
type=self.baseline_bridges[2].bridge_type)
# Create a baseline bridge without a name, using bridge 3's id and type
self.__create_bridge(ari,
'ok',
None,
self.baseline_bridges[3].uid,
name=self.baseline_bridges[3].name,
type=self.baseline_bridges[3].bridge_type)
return
def __tear_down_test(self, ari):
"""Tears down the state created during test setup.
Restores ARI to its previous configuration and deletes the channel
and bridges used during test execution.
Keyword Arguments:
ari -- The wrapper object for ARI
"""
LOGGER.debug("Performing test tear down ...")
# Delete stasis channel used during the test
ari.delete('channels', self.stasis_channel_id)
# Delete bridges created during setup
for i in range(len(self.baseline_bridges)):
self.__delete_bridge(ari, self.baseline_bridges[i])
# Restore ARI auto-exceptions on HTTP errors to its original value
ari.set_allow_errors(self.__set_ari_allow_errors or False)
LOGGER.debug("Test tear down complete.")
return
def __validate_server_response(self, expected, resp):
"""Validates the server response against the expected response.
Keyword Arguments:
expected -- The expected http status code from the server.
resp -- The server response object.
"""
expected_code = requests.codes[expected]
if expected_code != resp.status_code:
self.passing = False
LOGGER.error("Test Failed. Expected %d (%s), got %s (%r)",
expected_code,
expected,
resp.status_code,
resp.json())
return False
return True
def __delete_bridge(self, ari, bridge_info):
"""Deletes the bridge using the id of the bridge_info parameter.
Keyword Arguments:
ari -- ARI wrapper object.
bridge_info -- Object containing info about the bridge to delete.
"""
LOGGER.debug("Deleting bridge [%s] with id: [%s]",
bridge_info.name,
bridge_info.uid)
ari.delete('bridges', bridge_info.uid)
return
def __create_bridge(self,
ari,
expected_status_code,
description,
bridge_uid,
**kwargs):
"""Creates a bridge with the expectation of failure.
Using the parameters given, posts to the 'bridges' endpoint. Then,
validates the server responded with the expected status code.
Keyword Arguments:
ari -- The wrapper object for ARI
expected_status_code -- The expected response from the server
description -- The text to write to the log
bridge_uid -- The unique id for the bridge to create
kwargs -- The query parameters
"""
if description:
LOGGER.debug(description)
resp = ari.post('bridges',
bridge_uid,
**kwargs)
self.__validate_server_response(expected_status_code, resp)
return
def __create_duplicate_bridges(self, ari, bridge_info):
"""Attempts to create a duplicate bridges.
Using the details of an existing bridge provided by the bridge_info
parameter, exercises fifteen state combinations to post to the
'bridges' endpoint.
Keyword Arguments:
ari -- The wrapper object for ARI
bridge_info -- The baseline bridge's information to use
"""
LOGGER.debug("Current baseline bridge: [%s]", bridge_info.uid)
# Test AD
description = "Attempting to create a bridge using the same id \
as the current baseline bridge, but with no name \
and a different type specified"
self.__create_bridge(ari,
'internal_server_error',
description,
bridge_info.uid,
type=self.DEFAULT_TEST_TYPE)
# Test CD
description = "Attempting to create a bridge, using the same id \
and name as the current baseline bridge but a \
different type specified"
self.__create_bridge(ari,
'internal_server_error',
description,
bridge_info.uid,
name=bridge_info.name,
type=self.DEFAULT_TEST_TYPE)
# Test DA
description = "Attempting to create a bridge using the same id \
as the current baseline bridge but with a \
different name and no type specified"
self.__create_bridge(ari,
'internal_server_error',
description,
bridge_info.uid,
name=self.DEFAULT_TEST_NAME)
# Test DC
description = "Attempting to create a bridge, using the same id \
and type as the current baseline bridge but with a \
different name specified"
self.__create_bridge(ari,
'internal_server_error',
description,
bridge_info.uid,
name=self.DEFAULT_TEST_NAME,
type=bridge_info.bridge_type)
# Test DD
description = "Attempting to create a bridge using the same id \
as the current baseline bridge but with a \
different name and a different type"
self.__create_bridge(ari,
'internal_server_error',
description,
bridge_info.uid,
name=self.DEFAULT_TEST_NAME,
type=self.DEFAULT_TEST_TYPE)
return
def on_start(ari, event, test_object):
"""Event handler for the StasisStart event.
Keyword Arguments:
ari -- The wrapper object for ARI
event -- The ARI StasisStart event object
test_object -- The TestCase object running the test
"""
LOGGER.debug("Starting bridge error test: on_start(%r)", event)
test = BridgeErrorTest(ari, event)
result = test.run_test(ari)
LOGGER.debug("Finsihed testing for bridge creation error conditions.")
test_object.stop_reactor()
return result
| asterisk/testsuite | tests/rest_api/bridges/error/error.py | error.py | py | 10,618 | python | en | code | 30 | github-code | 90 |
18360532959 | import numpy as np
N = int(input())
l = np.array(list(map(int,input().split())))
ans = 'Yes'
M = l[0]
for i in range(1,N):
if l[i] > l[i-1]:
M = l[i]
if l[i]< M-1:
ans ='No'
break
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02953/s149825130.py | s149825130.py | py | 224 | python | en | code | 0 | github-code | 90 |
217730967 | import argparse
import time
import numpy as np
import os
import errno
import sys
from object_detector import ObjectDetector as TFObjectDetector
from object_detector_lite import ObjectDetector as LiteObjectDetector
import cv2
description_text = """\
Use this script to visualize network output on each frame of a video.
Once you've trained a network, you may want to intuitively understand its
performance on different videos, especially looking at frame to frame
performance in a single video. This script enables that visualization for both
TensorFlow and TFLite model formats. Additionally, this script lets you save a
video with each frame annotated with output from the network, as well as save
individual annotated frames if desired.
"""
epilog_text = """\
example:
./camera_cv.py --movie [movie.mp4] --path_to_model [model.pb]
"""
parser = argparse.ArgumentParser(
description=description_text,
epilog=epilog_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--movie", type=str, default="",
help="Movie file to run prediction on")
parser.add_argument("--write_images", default=False, action="store_true",
help="Whether to write each frame as a separate image")
parser.add_argument("--write_movie", default=False, action="store_true",
help="Whether to write an annotated movie")
parser.add_argument("--tflite", default=False, action="store_true",
help="Whether model is tflite")
parser.add_argument("--path_to_model", type=str,
default="output_inference_graph/frozen_inference_graph.pb",
help="Directory containing frozen checkpoint file or .tflite model")
parser.add_argument("--path_to_labels", type=str,
default="train_data/label.pbtxt",
help="Text proto (TF) or text (tflite) file containing label map")
parser.add_argument("--num_classes", type=int, default=2,
help="Number of classes")
parser.add_argument("--threshold", type=float, default=0.6,
help="Threshold for displaying detections")
parser.add_argument("--box_priors", type=str,
default="box_priors.txt",
help="Path to box_priors.txt file containing priors (only required for TFLite)")
args = parser.parse_args()
if args.movie is not "" and not os.path.exists(args.movie):
print("Movie file %s missing" % args.movie)
sys.exit(1)
if args.movie is not "":
cam = cv2.VideoCapture(args.movie)
else:
cam = cv2.VideoCapture(0)
args.movie = "movie.mkv"
width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
if args.tflite:
objdet = LiteObjectDetector(args.path_to_model, args.path_to_labels,
args.box_priors)
else:
objdet = TFObjectDetector(args.path_to_model, args.path_to_labels,
args.num_classes)
movie_name = os.path.splitext(os.path.basename(args.movie))[0]
if args.write_movie:
out_path = os.path.join(os.path.dirname(args.movie), movie_name + "_boxes")
movie_path = "%s.mkv" % out_path
print("Writing movie to", movie_path)
writer = cv2.VideoWriter(
movie_path,
cv2.VideoWriter_fourcc(*"MJPG"),
int(cam.get(cv2.CAP_PROP_FPS)),
(width, height)
)
# Quit if there was a problem
if not writer.isOpened():
print("Unable to open video!")
sys.exit()
if args.write_images:
movie_dir = os.path.dirname(args.movie)
images_dir = os.path.join(movie_dir, "%s_images" % movie_name)
print("Writing images to %s" % images_dir)
try:
os.makedirs(images_dir)
except OSError as e:
if e.errno == errno.EEXIST:
print("Directory exists already, continuing!")
else:
raise
counter = 0
ret, frame = cam.read()
while ret == True:
img = frame.copy() # Aliased, but lets us turn off transformations as necessary.
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img.shape
expand = np.expand_dims(img, axis=0)
result = objdet.detect(expand)
boxes = []
for i in range(result['num_detections']):
if result['detection_scores'][i] > args.threshold:
class_ = result['detection_classes'][i]
box = result['detection_boxes'][i]
score = result['detection_scores'][i]
y1, x1 = int(box[0] * h), int(box[1] * w)
y2, x2 = int(box[2] * h), int(box[3] * w)
if args.tflite:
x1, y1, x2, y2 = y1, x1, y2, x2
boxes.append((class_, score, x1, y1, x2, y2))
for box in boxes:
class_, score, x1, y1, x2, y2 = box
w1 = x2-x1
h1 = y2-y1
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2)
cv2.putText(img, "%s: %5.2f" % (class_-1, score), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if args.write_movie:
writer.write(img)
if args.write_images:
print("[%d] Writing original to %s" % (counter, images_dir))
cv2.imwrite(os.path.join(images_dir, "orig_%05d.png" % counter), frame)
print("[%d] Writing boxes to %s" % (counter, images_dir))
cv2.imwrite(os.path.join(images_dir, "box_%05d.png" % counter), img)
counter += 1
ret, frame = cam.read()
if args.write_movie:
writer.release()
| google/ftc-object-detection | training/camera_cv.py | camera_cv.py | py | 5,453 | python | en | code | 40 | github-code | 90 |
41191854000 | # Surfs Up
from flask import Flask, jsonify
# Add Dependencies
import numpy as np
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"<h2>Welcome to the Surfs Up! API</h2>"
f"<h3>Available Routes:</h3>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Convert the query results to a Dictionary using date as the key and prcp as the value"""
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
max_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date_2 = max_date_query[0][0]
max_date = datetime.datetime.strptime(max_date_2, "%Y-%m-%d")
# Subtract one year for start date.
first_date = ((max_date - relativedelta(years = 1)).strftime('%Y-%m-%d'))
# Perform a query to retrieve the date and precipitation scores
precip_data = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= first_date).order_by(Measurement.date.desc()).all()
# Convert the query results to a Dictionary using date as the key and prcp as the value.
precip_dict = {}
for result in precip_data:
precip_dict[result[0]] = result[1]
return jsonify(precip_dict)
session.close()
@app.route("/api/v1.0/stations")
def stations():
"""Return a JSON list of stations from the dataset."""
session = Session(engine)
# List of the stations and the counts in descending order.
stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
return jsonify(stations)
session.close()
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
max_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date_2 = max_date_query[0][0]
max_date = datetime.datetime.strptime(max_date_2, "%Y-%m-%d")
# Subtract one year for start date.
first_date = ((max_date - relativedelta(years = 1)).strftime('%Y-%m-%d'))
# Query the last 12 months of temperature observation data for the past year.
temp_data = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= first_date).all()
return jsonify(temp_data)
session.close()
@app.route("/api/v1.0/<start>")
def start(start):
"""Return a JSON list of the minimum, average, and maximum temperatures from the start date until
the end of the database."""
session = Session(engine)
#First we find the last date in the database
max_date_query = session.query(func.max(func.strftime("%Y-%m-%d", Measurement.date))).all()
max_date_2 = max_date_query[0][0]
max_date = datetime.datetime.strptime(max_date_2, "%Y-%m-%d")
# Pull min, avg, max temps utilizing the idea of the calc-temps function from the jupyter notebook
temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= max_date).all()
# Create list and dictionary to produce output.
return_list = []
date_dict = {'start_date': start, 'end_date': max_date}
return_list.append(date_dict)
return_list.append({'Min Temperature': temps[0][0]})
return_list.append({'Avg Temperature': temps[0][1]})
return_list.append({'Max Temperature': temps[0][2]})
return jsonify(return_list)
session.close()
@app.route("/api/v1.0/<start>/<end>")
def start_end(start, end):
"""Return a JSON list of the minimum, average, and maximum temperatures from the start date until the end date."""
session = Session(engine)
# Pull min, avg, max temps utilizing the idea of the calc-temps function from the jupyter notebook
temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all()
# Create list and dictionary to produce output.
return_list = []
date_dict = {'start_date': start, 'end_date': end}
return_list.append(date_dict)
return_list.append({'Min Temperature': temps[0][0]})
return_list.append({'Avg Temperature': temps[0][1]})
return_list.append({'Max Temperature': temps[0][2]})
return jsonify(return_list)
session.close()
if __name__ == '__main__':
app.run(debug=True) | shaymusmc/sqlalchemy-challenge | app.py | app.py | py | 5,854 | python | en | code | 0 | github-code | 90 |
73781566055 | import simdata_gen_func as sfunc
from scipy import stats
import numpy as np
import db_connect as dbc
import json
def trial_gen(prior, history,
lott_mag, lott_prob, sure_mag,
alpha0, beta0):
# generate trial by trial simulated data
# inputs:
# prior: prior prob of each strategy used, a dictionary
# config: 3 element list/array, in the order of
# fix_port, sure_port, lott_port
# alpha, beta: parameters in evaluting utility curve
# the data has the following fields:
# fix_port, sure_port, lottery_port, action, reward, strategy,
# applicable, contribute_sure, contribute_lott
# randomize the strategy going to be used
strat_prob = np.array(sorted(prior.items()))
p = (strat_prob[:, 1]).astype(float)
s = strat_prob[:, 0]
idx = np.arange(len(strat_prob))
strat_gen = stats.rv_discrete(name='strat_gen', values=(idx, p))
s = strat_prob[strat_gen.rvs(), 0]
f = getattr(sfunc, s)
poke, re = f(history,
lott_mag, lott_prob, sure_mag,
alpha=alpha0, beta=beta0)
return poke, re, s
if __name__ == '__main__':
cur, con = dbc.connect()
dbc.overwrite(cur, con, 'strattag_pokereward')
# try:
# cur.execute("DELETE FROM strattag_pokereward")
# con.commit()
# except:
# con.rollback()
sqlcmd = 'SELECT * FROM strattag_config'
cur.execute(sqlcmd)
records = cur.fetchall()
# chose initial bet to be lottery
# initial reward = 0
pre_poke = records[0][2]
pre_reward = 0.0
pre_config = records[0][1:3] + (pre_poke, pre_reward)
pre_trialid = 1
with open('inputs.json', 'r') as f:
para = json.load(f)
prior = para['prior']
sqlstr = """INSERT INTO strattag_pokereward(
trialid,
poke,
reward,
strategy) VALUES (
"%d", "%d", "%f", "%s")"""
sqlcmd = sqlstr % (pre_trialid, pre_poke, pre_reward, 'utility')
cur.execute(sqlcmd)
for rec in records[1:]:
alpha = rec[-2]
beta = rec[-1]
trialid = rec[0]
cur_config = rec[1:3]
history = pre_config + cur_config
lott_mag = rec[3]
lott_prob = rec[4]
sure_mag = rec[5]
poke, re, strat = trial_gen(prior, history,
lott_mag,
lott_prob,
sure_mag,
alpha,
beta)
pre_config = cur_config + (poke, re)
sqlcmd = sqlstr % (trialid, poke, re, strat)
cur.execute(sqlcmd)
con.close()
| boptimism/strategy_tagging | simdata_gen.py | simdata_gen.py | py | 2,712 | python | en | code | 0 | github-code | 90 |
17037824833 | from tkinter import *
from random import *
koloda = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Валет', 'Дама', 'Король', 'Туз'] * 4
shuffle(koloda)
count = 0
game_version = 0.1
def take():
global count, koloda
karta = koloda.pop()
if karta == 'Валет' or karta == 'Дама' or karta == 'Король':
karta = 10
if karta == 'Туз':
karta = 11
count += karta
if count > 21:
results['text'] = 'Ви програли, набравши {} очок'.format(str(count))
count_label['text'] = count
button1.grid_forget()
button2.grid_forget()
button3.grid(row=2, column=0)
else:
count_label['text'] = 'У вас {} очків, взяти ще?'.format(str(count))
def enough():
global koloda, count
button1.grid_forget()
button2.grid_forget()
button3.grid(row=2, column=0)
count_label.grid_forget()
if count == 21:
results['text'] = 'Вітаємо з перемогою, у вас очко'
else:
results['text'] = 'Ви завершили гру з результатом в {} очок'.format(count)
def restart():
global koloda, count
count = 0
shuffle(koloda)
button1.grid(row=2, column=0)
button2.grid(row=3, column=0)
button3.grid_forget()
results.grid_forget()
root = Tk()
root.title("The black-jack")
root.geometry("400x300")
game_label = Label(root, text="Блекджек 4444", font="ubuntu", fg="red", bg="black")
game_label.grid(row=0, columnspan=3)
count_label = Label(root, text="У вас 0 очок")
count_label.grid(row=1, column=0)
button1 = Button(root, width="15", font=("ubuntu", 30), text="Взяти карту", command=take)
button1.grid(row=2, column=0)
button2 = Button(root, width="15", font=("ubuntu", 30), text="Хватить", command=enough)
button2.grid(row=3, column=0)
button3 = Button(root, width="15", font=("ubuntu", 30), text="Спробувати ще", command=restart)
results = Label(root, text="", fg="red")
results.grid(row=4, column=0)
root.mainloop()
| rudiq4/BlackJack | main.py | main.py | py | 2,092 | python | en | code | 0 | github-code | 90 |
3720584561 | # class A:
# def __init__(self):
# self.x = 0 # public переменная
# self._x = 0 # private - не использовать!
# self.__x = 0 # hidden
#
#
# a = A()
# print(a.x)
# print(a._x)
# #print(a.__x)
#
# print(vars(a))
#
# print(a._A__x)
# class A:
# def __init__(self):
# self.__x = 1 # hidden
#
# def f(self):
# print(self.__x)
#
# class B(A):
# def __init__(self):
# self.__x = 2
# super().__init__()
# def g(self):
# print(self.__x)
#
# b = B()
# print(b.f())
#-------------
# class A:
# def __init__(self):
# self.a = 1
# a = 2
#
# def f(self):
# print(self.a)
# @staticmethod # позволяет вызывать функции от имени класса
# def g():
# print(A.a)
# @classmethod
# def h(cls):
# print(cls.a)
#
# a = A()
#
# print(a.f())
# a.g()
# a.h()
# A.g()
# A.h()
# #A.f()
#-------------------
class A:
a = 1
@classmethod
def f(cls):
print(cls.a)
@staticmethod # явно привязан к классу
def g():
print(A.a)
class B(A):
a = 22
A.f()
B.f()
A.g()
B.g() | alexzinoviev/itea_c | advance/advance_04_3.py | advance_04_3.py | py | 1,234 | python | ru | code | 0 | github-code | 90 |
13717372228 | import logging
from gym.spaces import Box, Discrete
import numpy as np
from typing import Dict, Tuple
from maddpg_torch_model import build_maddpg_models, _make_continuous_space
from ray.rllib.utils.torch_ops import apply_grad_clipping, huber_loss, l2_loss
from ray.rllib.utils.typing import TrainerConfigDict, TensorType, LocalOptimizer
from ray.rllib.agents.dqn.dqn_tf_policy import _adjust_nstep
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.agents.ddpg.ddpg_torch_policy import (
apply_gradients_fn,
make_ddpg_optimizers,
TargetNetworkMixin,
)
from ray.rllib.agents.ddpg.ddpg_tf_policy import (
build_ddpg_models,
get_distribution_inputs_and_class,
)
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.torch.torch_action_dist import TorchDeterministic
from ray.rllib.agents.ddpg.noop_model import TorchNoopModel
from ray.rllib.models import ModelCatalog
logger = logging.getLogger(__name__)
torch, nn = try_import_torch()
def validate_spaces(policy: Policy, obs_space, action_space, config) -> None:
policy.observation_space = _make_continuous_space(obs_space)
policy.action_space = _make_continuous_space(action_space)
def build_maddpg_models_and_action_dist(
policy: Policy, obs_space, action_space, config: TrainerConfigDict
) -> Tuple[ModelV2, ActionDistribution]:
model = build_maddpg_models(policy, policy.observation_space, policy.action_space, config)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
policy.model = policy.model.to(device)
policy.target_model = policy.target_model.to(device)
return model, TorchDeterministic
def maddpg_actor_critic_loss(
policy: Policy, model: ModelV2, _, train_batch: SampleBatch
) -> TensorType:
if not hasattr(policy, "td_error") or policy.td_error is None:
policy.actor_loss = torch.zeros(len(train_batch))
policy.critic_loss = torch.zeros(len(train_batch))
policy.td_error = torch.zeros(len(train_batch))
policy.q_t = torch.zeros(len(train_batch))
return policy.actor_loss, policy.critic_loss
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
agent_id = policy.config["agent_id"]
n_agents = len(policy.config["multiagent"]["policies"])
input_dict = {
"obs": train_batch["_".join([SampleBatch.CUR_OBS, str(agent_id)])],
"is_training": True,
}
input_dict_next = {
"obs": train_batch["_".join([SampleBatch.NEXT_OBS, str(agent_id)])],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
policy_t = model.get_policy_output(model_out_t)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
policy_tp1 = policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = torch.clamp(
torch.normal(
mean=torch.zeros(policy_tp1.size()), std=policy.config["target_noise"]
).to(policy_tp1.device),
-target_noise_clip,
target_noise_clip,
)
policy_tp1_smoothed = torch.min(
torch.max(
policy_tp1 + clipped_normal_sample,
torch.tensor(
policy.action_space.low,
dtype=torch.float32,
device=policy_tp1.device,
),
),
torch.tensor(
policy.action_space.high, dtype=torch.float32, device=policy_tp1.device
),
)
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
obs_n = [
train_batch["_".join([SampleBatch.CUR_OBS, str(id)])] for id in range(n_agents)
]
act_n = [
train_batch["_".join([SampleBatch.ACTIONS, str(id)])] for id in range(n_agents)
]
next_obs_n = [
train_batch["_".join([SampleBatch.NEXT_OBS, str(id)])] for id in range(n_agents)
]
next_policy_n = [train_batch["new_actions_{}".format(id)] for id in range(n_agents)]
next_policy_n[agent_id] = policy_tp1_smoothed
rewards = train_batch["rewards_{}".format(agent_id)]
dones = train_batch["dones_{}".format(agent_id)]
if policy.config["use_state_preprocessor"]:
# Create all state preprocessors
model_n = [
ModelCatalog.get_model_v2(
obs_space,
act_space,
1,
policy.config["model"],
default_model=TorchNoopModel,
)
for obs_space, act_space in zip(policy.obs_space_n, policy.act_space_n)
]
# Get states from preprocessors
model_out_n = [
model.forward({SampleBatch.OBS: obs, "is_training": True}, [], None)[0]
for model, obs in zip(model_n, obs_n)
]
model_out_next_n = [
model.forward({SampleBatch.OBS: next_obs, "is_training": True}, [], None)[0]
for model, next_obs in zip(model_n, next_obs_n)
]
else:
model_out_n = obs_n
model_out_next_n = next_obs_n
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_n, act_n)
# Compute this here so policy_n can be modified without deepcopying act_n
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_n, act_n)
# Q-values for current policy (no noise) in given current state
policy_n = act_n
policy_n[agent_id] = policy_t
q_t_det_policy = model.get_q_values(model_out_n, policy_n)
actor_loss = -torch.mean(q_t_det_policy)
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(model_out_next_n, next_policy_n)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
model_out_next_n, next_policy_n
)
q_t_selected = torch.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = torch.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = torch.min(q_tp1, twin_q_tp1)
q_tp1_best = torch.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (~dones).float() * q_tp1_best
q_t_selected_target = (rewards + gamma ** n_step * q_tp1_best_masked).detach()
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + huber_loss(
twin_td_error, huber_threshold
)
else:
errors = 0.5 * (torch.pow(td_error, 2.0) + torch.pow(twin_td_error, 2.0))
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * torch.pow(td_error, 2.0)
critic_loss = torch.mean(errors)
# Add l2-regularization if required.
if l2_reg is not None:
for name, var in model.policy_variables(as_dict=True).items():
if "bias" not in name:
actor_loss += l2_reg * l2_loss(var)
for name, var in model.q_variables(as_dict=True).items():
if "bias" not in name:
critic_loss += l2_reg * l2_loss(var)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return two loss terms (corresponding to the two optimizers, we create).
return policy.actor_loss, policy.critic_loss
def build_maddpg_stats(policy: Policy, batch: SampleBatch) -> Dict[str, TensorType]:
stats = {
"actor_loss": policy.actor_loss,
"critic_loss": policy.critic_loss,
"mean_q": torch.mean(policy.q_t),
"max_q": torch.max(policy.q_t),
"min_q": torch.min(policy.q_t),
"mean_td_error": torch.mean(policy.td_error),
"td_error": policy.td_error
}
return stats
def postprocess_nstep(
policy: Policy, batch: SampleBatch, other_agent_batches=None, episode=None
):
# N-step Q adjustments
if policy.config["n_step"] > 1:
_adjust_nstep(
policy.config["n_step"],
policy.config["gamma"],
batch[SampleBatch.CUR_OBS],
batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS],
batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.DONES],
)
return batch
def make_maddpg_optimizers(
policy: Policy, config: TrainerConfigDict
) -> Tuple[LocalOptimizer]:
return make_ddpg_optimizers(policy, config)
def before_init_fn(
policy: Policy, obs_space, action_space, config: TrainerConfigDict
) -> None:
policy.global_step = 0
# Check agent_id
agent_id = config["agent_id"]
if agent_id is None:
raise ValueError("Must set `agent_id` in the policy config.")
if type(agent_id) is not int:
raise ValueError("Agent ids must be integers for MADDPG.")
class ComputeTDErrorMixin:
def __init__(self, loss_fn):
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask):
input_dict = self._lazy_tensor_dict(
SampleBatch(
{
SampleBatch.CUR_OBS: obs_t,
SampleBatch.ACTIONS: act_t,
SampleBatch.REWARDS: rew_t,
SampleBatch.NEXT_OBS: obs_tp1,
SampleBatch.DONES: done_mask,
}
)
)
# Do forward pass on loss to update td errors attribute
loss_fn(self, self.model, None, input_dict)
# Self.td_error is set within actor_critic_loss call.
return self.td_error
self.compute_td_error = compute_td_error
class SetJointSpacesMixin:
def __init__(self, config: TrainerConfigDict):
self.obs_space_n = [
_make_continuous_space(space)
for _, (_, space, _, _) in config["multiagent"]["policies"].items()
]
self.act_space_n = [
_make_continuous_space(space)
for _, (_, _, space, _) in config["multiagent"]["policies"].items()
]
def setup_late_mixins(
policy: Policy, obs_space, action_space, config: TrainerConfigDict
) -> None:
ComputeTDErrorMixin.__init__(policy, maddpg_actor_critic_loss)
TargetNetworkMixin.__init__(policy)
SetJointSpacesMixin.__init__(policy, config)
def get_default_config():
# Hacky workaround to fix imports
import maddpg
return maddpg.DEFAULT_CONFIG
MADDPGTorchPolicy = build_policy_class(
name="MADDPGTorchPolicy",
framework="torch",
loss_fn=maddpg_actor_critic_loss,
get_default_config=get_default_config,
stats_fn=build_maddpg_stats,
postprocess_fn=postprocess_nstep,
action_distribution_fn=get_distribution_inputs_and_class,
extra_grad_process_fn=apply_grad_clipping,
optimizer_fn=make_maddpg_optimizers,
validate_spaces=validate_spaces,
before_init=before_init_fn,
before_loss_init=setup_late_mixins,
make_model_and_action_dist=build_maddpg_models_and_action_dist,
apply_gradients_fn=apply_gradients_fn,
mixins=[TargetNetworkMixin, ComputeTDErrorMixin, SetJointSpacesMixin],
)
| Rohan138/rllib-torch-maddpg | maddpg_torch_policy.py | maddpg_torch_policy.py | py | 11,922 | python | en | code | 8 | github-code | 90 |
35032378083 | #!/usr/bin/env python
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from selenium import webdriver
import re
from Bio import SeqIO
from math import log2
VAXIJEN_TARGET = "virus" # You need to replace the field
VAXIJEN_THRESHOLD = 0.5 # You need to replace the field
# The File must contains results from IEDB consensus method and must contains fields splited by tab:
# allele
# start
# end
# length
# peptide
# percentile_rank
# adjusted_rank
# nn_align_core
# nn_align_ic50
# nn_align_rank
RESULT_FILENAME = "txt/NS5_DR.txt" # You need to replace the field
MSA_FILENAME = "aln/st/denv1_NS5_aln.fasta" # You need to replace the field
MSA_REF_ID = "NP_722465.1" # You need to replace the field; id must splited by '|' charter
AAS=['V','I','L','F','M','W','Y','C','A','T', 'D', 'E','G','P','R','K','H','N','Q','S','-']
CONSERVATION_THRESHOLD = 1.3
SEP = ";"
def chSep(v):
a = v.split('.')
return ','.join(a)
def vaxijen_test(aa_seq):
browser = webdriver.Chrome()
browser.get("http://www.ddg-pharmfac.net/vaxijen/VaxiJen/VaxiJen.html")
seq_f = browser.find_element_by_name("seq")
seq_f.send_keys(aa_seq)
target = browser.find_element_by_name("Target")
all_options = target.find_elements_by_tag_name("option")
for option in all_options:
if option.get_attribute("value") == VAXIJEN_TARGET:
option.click()
threshold = browser.find_element_by_name("threshold")
threshold.send_keys(str(VAXIJEN_THRESHOLD))
browser.find_element_by_name("submit").click()
r = re.findall(r'=\<b\> ([-\.0-9]+) \<\/b\>', browser.page_source)
browser.close()
return float(r[0])
def msa_stat():
ra=[]
rs = ""
msa=[]
count = 0.0
for r in SeqIO.parse(MSA_FILENAME, "fasta"):
ids = r.id.split('|')
s = str(r.seq)
count += 1
if ids[0] == MSA_REF_ID:
rs = s
for i, aa in enumerate(s):
if len(msa) < i + 1:
msa.append({})
d = msa[i]
if not aa in d:
d[aa] = 0
d[aa] += 1
for i, aa in enumerate(rs):
col = msa[i]
aafs=[]
s = 0
for ar in AAS:
q = 0
if ar in col:
q = col[ar] / count
s -= q * log2(q)
aafs.append(str(q))
if aa != '-':
ra.append([aa, (2**s) <= CONSERVATION_THRESHOLD])
return ra
def pep_consv_count(st, end, consv):
c = 0
st1 = int(st) - 1
end1 = int(end)
count = float(end1 - st1)
for i in range(st1, end1):
if consv[i][1]:
c += 1
return int(c / count * 100)
def f1():
d = {}
d1 = {}
consv = msa_stat()
vjd = {}
for l in open(RESULT_FILENAME).readlines():
ta = l[:-2].split('\t')
if ta[0] == 'allele' or ta[8] == '-':
continue
seq = ta[4]
if not seq in d:
d[seq] = []
d[seq].append(ta)
for k in d.keys():
a = d[k]
core = a[0][7]
if not core in d1:
d1[core] = []
d1[core].append(k)
coreOrdered = {}
for core in d1.keys():
pepsOredered = {}
mic50 = 0
peps = d1[core]
for pep in peps:
idtPeps = d[pep]
ic50 = 0
for a in idtPeps:
ic50 += float(a[8])
ic50 /= len(idtPeps)
mic50 += ic50
if not ic50 in pepsOredered:
pepsOredered[ic50] = []
pepsOredered[ic50].append(idtPeps)
mic50 /= len(peps)
if not mic50 in coreOrdered:
coreOrdered[mic50] = []
coreOrdered[mic50].append(pepsOredered)
num = 1
for mic50 in sorted(coreOrdered.keys()):
a = coreOrdered[mic50]
for pepsOredered in a:
print ('')
for ic50 in sorted(pepsOredered.keys()):
idtPepsL = pepsOredered[ic50]
for idtPeps in idtPepsL:
print ('')
a1 = idtPeps[0]
cv = pep_consv_count(int(a1[1]), int(a1[2]), consv)
pep = a1[4]
vj = '0'
if cv == 100:
if pep in vjd:
vj = vjd[pep]
else:
vj = str(vaxijen_test(pep))
vjd[pep] = vj
vj = chSep(vj)
print (str(num) + SEP + a1[0] + SEP + a1[1] + SEP + a1[2] + SEP + a1[3] + SEP + pep + SEP + chSep(a1[5]) + SEP + chSep(a1[6]) + SEP + a1[7] + SEP + chSep(a1[8]) + SEP + chSep(a1[9]) + SEP + str(cv) + SEP + vj)
num += 1
a3 = idtPeps[1:]
for a2 in a3:
print (str(num) + SEP + a2[0] + SEP +SEP + SEP + SEP + SEP + chSep(a2[5]) + SEP + chSep(a2[6]) + SEP + SEP + chSep(a2[8]) + SEP + chSep(a2[9]))
num += 1
f1()
| lioj/bioinformatics | py/sele_MHCII.py | sele_MHCII.py | py | 5,048 | python | en | code | 0 | github-code | 90 |
441495921 | import math
def el(i,j):
if (i*j) % 2 == 0:
return math.factorial(j)
else:
s = 0
for x in range(1, i+1):
s += x
return s
n = int(input("n: "))
m = int(input("m: "))
A = [
[el(i,j) for j in range(1,m+1)] for i in range(1,n+1)
]
A1 = []
for row in A:
A1 += row
print(A1) | AntalDima1/labwork | 7/7_2.py | 7_2.py | py | 335 | python | en | code | 0 | github-code | 90 |
41579343127 | import torch
import time
import os
import shutil
import numpy as np
import random as rd
import argparse
from loguru import logger
from rdkit import Chem
from model.Lmser_Transformerr import MFT as DrugTransformer
# from model.Transformer import MFT as DrugTransformer
# from model.Transformer_Encoder import MFT as DrugTransformer
from utils.docking import CaculateAffinity, ProteinParser
from utils.log import timeLable, readSettings, VisualizeMCTS, saveMCTSRes, VisualizeInterMCTS
from beamsearch import sample
QE = 9
QMIN = QE
QMAX = QE
groundIndex = 0 # MCTS Node唯一计数
infoma = {}
class Node:
def __init__(self, parentNode=None, childNodes=[], path=[],p=1.0, smiMaxLen=999):
global groundIndex
self.index = groundIndex
groundIndex += 1
self.parentNode = parentNode
self.childNodes = childNodes
self.wins = 0
self.visits = 0
self.path = path #MCTS 路径
self.p = p
self.smiMaxLen = smiMaxLen
def SelectNode(self):
nodeStatus = self.checkExpand()
if nodeStatus == 4:
puct = []
for childNode in self.childNodes:
puct.append(childNode.CaculatePUCT())
m = np.max(puct)
indices = np.nonzero(puct == m)[0]
ind=rd.choice(indices)
return self.childNodes[ind], self.childNodes[ind].checkExpand()
return self, nodeStatus
def AddNode(self, content, p):
n = Node(self, [], self.path + [content], p=p, smiMaxLen=self.smiMaxLen)
self.childNodes.append(n)
return n
def UpdateNode(self, wins):
self.visits += 1
self.wins += wins
def CaculatePUCT(self):
if not self.parentNode:
return 0.0 # 画图用的
c = 1.5
if QMAX == QMIN:
wins = 0
else:
if self.visits:
wins = (self.wins/self.visits - QMIN) / (QMAX - QMIN)
else:
wins = (QE - QMIN) / (QMAX - QMIN)
return wins + c*self.p*np.sqrt(self.parentNode.visits)/(1+self.visits)
# return wins/self.visits+50*self.p*np.sqrt(self.parentNode.visits)/(1+self.visits)
def checkExpand(self):
"""
node status: 1 terminal; 2 too long; 3 legal leaf node; 4 legal noleaf node
"""
if self.path[-1] == '$':
return 1
elif not (len(self.path) < self.smiMaxLen):
return 2
elif len(self.childNodes) == 0:
return 3
return 4
def JudgePath(path, smiMaxLen):
return (path[-1] != '$') and (len(path) < smiMaxLen)
def Select(rootNode):
while True:
rootNode, nodeStatus = rootNode.SelectNode()
if nodeStatus != 4:
return rootNode, nodeStatus
def Expand(rootNode, atomList, plist):
if JudgePath(rootNode.path, rootNode.smiMaxLen):
for i, atom in enumerate(atomList):
rootNode.AddNode(atom, plist[i])
def Update(node, wins):
while node:
node.UpdateNode(wins)
node = node.parentNode
def updateMinMax(node):
# muzero method
global QMIN
global QMAX
if node.visits:
QMAX = max(QMAX, node.wins/node.visits)
QMIN = min(QMIN, node.wins/node.visits)
for child in node.childNodes:
updateMinMax(child)
def rollout(node, model):
path = node.path[:]
smiMaxLen = node.smiMaxLen
allScore = []
allValidSmiles = []
allSmiles = []
while JudgePath(path, smiMaxLen):
# 快速走子
atomListExpanded, pListExpanded = sample(model, path, vocabulary, proVoc, smiMaxLen, proMaxLen, device, 30, protein_seq)
m = np.max(pListExpanded)
indices = np.nonzero(pListExpanded == m)[0]
ind=rd.choice(indices)
path.append(atomListExpanded[ind])
if path[-1] == '$':
smileK = ''.join(path[1:-1])
allSmiles.append(smileK)
try:
mols = Chem.MolFromSmiles(smileK)
except:
pass
if mols and len(smileK) < smiMaxLen:
global infoma
if smileK in infoma:
affinity = infoma[smileK]
else:
affinity = CaculateAffinity(smileK, file_protein=pro_file[args.k], file_lig_ref=ligand_file[args.k], out_path=resFolderPath)
infoma[smileK] = affinity
if affinity == 500:
Update(node, QMIN)
else:
logger.success(smileK + ' ' + str(-affinity))
Update(node, -affinity)
allScore.append(-affinity)
allValidSmiles.append(smileK)
else:
logger.error('invalid: %s'%(''.join(path)))
Update(node, QMIN)
else:
logger.warning('abnormal ending: %s'%(''.join(path)))
Update(node, QMIN)
return allScore, allValidSmiles, allSmiles
def MCTS(rootNode):
allScore = []
allValidSmiles = []
allSmiles = []
currSimulationTimes = 0
while currSimulationTimes < simulation_times:
global QMIN
global QMAX
QMIN = QE
QMAX = QE
updateMinMax(rootNode)
currSimulationTimes += 1
#MCTS SELECT
node, _ = Select(rootNode)
# VisualizeInterMCTS(rootNode, modelName, './', times, QMAX, QMIN, QE)
#rollout
score, validSmiles, aSmiles = rollout(node, model)
allScore.extend(score)
allValidSmiles.extend(validSmiles)
allSmiles.extend(aSmiles)
#MCTS EXPAND
atomList, logpListExpanded = sample(model, node.path, vocabulary, proVoc, smiMaxLen, proMaxLen, device, 30, protein_seq)
pListExpanded = [np.exp(p) for p in logpListExpanded]
Expand(node, atomList, pListExpanded)
if args.max:
indices = np.argmax([n.visits for n in rootNode.childNodes])
else:
allvisit = np.sum([n.visits for n in rootNode.childNodes]) * 1.0
prList = np.random.multinomial(1, [(n.visits)/allvisit for n in rootNode.childNodes], 1)
indices = list(set(np.argmax(prList, axis=1)))[0]
logger.info([(n.visits)/allvisit for n in rootNode.childNodes])
return rootNode.childNodes[indices], allScore, allValidSmiles, allSmiles
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-k', type=int, default=0, help='protein index')
parser.add_argument('--device', type=str, default='0')
parser.add_argument('-st', type=int, default=50, help='simulation times')
parser.add_argument('--source', type=str, default='new')
parser.add_argument('-p', type=str, default='LT', help='pretrained model')
parser.add_argument('--max', action="store_true", help='max mode')
args = parser.parse_args()
if args.source == 'new':
test_pdblist = sorted(os.listdir('./data/test_pdbs/'))
pro_file = ['./data/test_pdbs/%s/%s_protein.pdb'%(pdb,pdb) for pdb in test_pdblist]
ligand_file = ['./data/test_pdbs/%s/%s_ligand.sdf'%(pdb,pdb) for pdb in test_pdblist]
protein_seq = ProteinParser(test_pdblist[args.k])
else:
raise NotImplementedError('Unknown source: %s' % args.source)
simulation_times = args.st
experimentId = os.path.join('experiment', args.p)
ST = time.time()
modelName = '30.pt'
hpc_device = "gpu" if torch.cuda.is_available() else "cpu"
mode = "max" if args.max else "freq"
resFolder = '%s_%s_mcts_%s_%s_%s_%s_%s'%(hpc_device,mode,simulation_times, timeLable(), modelName, args.k, test_pdblist[args.k])
resFolderPath = os.path.join(experimentId, resFolder)
if not os.path.isdir(resFolderPath):
os.mkdir(resFolderPath)
logger.add(os.path.join(experimentId, resFolder, "{time}.log"))
shutil.copyfile('./mcts.py',os.path.join(experimentId, resFolder) + '/mcts.py')
if len(protein_seq) > 999:
logger.info('skipping %s'%test_pdblist[args.k])
else:
s = readSettings(experimentId)
vocabulary = s.smiVoc
proVoc = s.proVoc
smiMaxLen = int(s.smiMaxLen)
proMaxLen = int(s.proMaxLen)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device_ids = [i for i in range(torch.cuda.device_count())] # 10卡机
model = DrugTransformer(**s)
model = torch.nn.DataParallel(model, device_ids=device_ids) # 指定要用到的设备
model = model.to(device) # 模型加载到设备0
model.load_state_dict(torch.load(experimentId +'/model/'+ modelName, map_location=device))
model.to(device)
model.eval()
node = Node(path=['&'],smiMaxLen=smiMaxLen)
times = 0
allScores = []
allValidSmiles = []
allSmiles = []
while(JudgePath(node.path, smiMaxLen)):
times += 1
node, scores, validSmiles, smiles = MCTS(node)
allScores.append(scores)
allValidSmiles.append(validSmiles)
allSmiles.append(smiles)
VisualizeMCTS(node.parentNode, modelName, resFolderPath, times)
alphaSmi = ''
affinity = 500
if node.path[-1] == '$':
alphaSmi = ''.join(node.path[1:-1])
if Chem.MolFromSmiles(alphaSmi):
logger.success(alphaSmi)
if alphaSmi in infoma:
affinity = infoma[alphaSmi]
else:
affinity = CaculateAffinity(alphaSmi, file_protein=pro_file[args.k], file_lig_ref=ligand_file[args.k], out_path=resFolderPath)
# affinity = CaculateAffinity(alphaSmi, file_protein=pro_file[args.k], file_lig_ref=ligand_file[args.k])
logger.success(-affinity)
else:
logger.error('invalid: ' + ''.join(node.path))
else:
logger.error('abnormal ending: ' + ''.join(node.path))
saveMCTSRes(resFolderPath, {
'score': allScores,
'validSmiles': allValidSmiles,
'allSmiles': allSmiles,
'finalSmile': alphaSmi,
'finalScore': -affinity
})
ET = time.time()
logger.info('time {}'.format((ET-ST)//60))
| CMACH508/AlphaDrug | mcts.py | mcts.py | py | 10,440 | python | en | code | 28 | github-code | 90 |
16902149307 | # 人物出现次数
import re
def item_num(hero):
with open('sanguo.txt') as f:
data = f.read().replace('/n', '')
name_num = re.findall(hero, data)
# print('主角 %s 出现 %s 次' % (hero, len(name_num)))
return len(name_num)
# 读取人物信息
name_dict = {}
with open('name.txt') as f:
names = f.read().split('|')
# print(names)
for n in names:
name_dict[n] = item_num(n)
print(name_dict)
| bobiwang/study | class_func/sanguo_v2.py | sanguo_v2.py | py | 460 | python | en | code | 0 | github-code | 90 |
73211685418 | #
# @lc app=leetcode id=124 lang=python
#
# [124] Binary Tree Maximum Path Sum
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
# Different from Path Sum III.
# Exactly like Diameter of a Binary Tree
# You're finding the max possible, instead of counting all node sequences that sum up to target
global max_sum
max_sum = float("-inf")
# Returns the max_sum without split
def dfs(node):
global max_sum
if not node:
return 0
# Ignore negative sum, better to node include them
left_subtree_sum = max(dfs(node.left), 0)
right_subtree_sum = max(dfs(node.right), 0)
# Update max_sum with split
max_sum = max(max_sum, node.val + left_subtree_sum + right_subtree_sum)
# Return the max_sum possible without split
return node.val + max(left_subtree_sum, right_subtree_sum)
dfs(root)
return max_sum
# @lc code=end
| ashshekhar/leetcode-problems-solutions | 124.binary-tree-maximum-path-sum.py | 124.binary-tree-maximum-path-sum.py | py | 1,391 | python | en | code | 0 | github-code | 90 |
17971128559 |
def solve():
h, w = map(int, input().split())
n = int(input())
a = list(map(int, input().split()))
a_line = []
for i, i_cnt in enumerate(a):
color = i+1
for j in range(i_cnt):
a_line.append(color)
# wごとに区切ってlistに詰める
ans = [['']*w for _ in range(h)]
cur = 0
for i in range(h):
for j in range(w):
ans[i][j] = str(a_line[cur])
cur += 1
for i in range(h):
print(' '.join(ans[i] if i%2==0 else reversed(ans[i])))
solve() | Aasthaengg/IBMdataset | Python_codes/p03638/s776620575.py | s776620575.py | py | 547 | python | en | code | 0 | github-code | 90 |
74750508455 | """
Buttons:
0 - A
1 - B
2 - X
3 - Y
4 - left shoulder button
5 - right shoulder button
6 - SYS
7 - Menu
11 - Stadia
12 - Box
"""
import os
# this will fool the system to think it has video access
os.environ["SDL_VIDEODRIVER"] = "dummy"
import asyncio
import pprint
import pygame
import threading
class GoogleStadiaController:
"""Class representing the PS4 controller. Pretty straightforward functionality."""
BUTTON_A = 0
BUTTON_B = 1
BUTTON_X = 2
BUTTON_Y = 3
# 0: left axis: x, [-1, 1]
# 1: left axis: y, [-1, 1]
# 2: right axis: x, [-1, 1]
# 3: right axis: y, [-1, 1]
# 4: right trigger, [-1, 1],
# 5: left trigger, [-1, 1]
AXIS_1_X = 0
AXIS_1_Y = 1
AXIS_2_X = 2
AXIS_2_Y = 3
AXIS_TRIGGER_X = 4
AXIS_TRIGGER_Y = 5
controller = None
axis_data = None
button_data = None
hat_data = None
def init(self):
"""Initialize the joystick components"""
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.controller.init()
# what events
self.events = {
'drive':None, 'turret_left_right':None, 'gun_up_down':None, 'fire':None
}
# callback funcs on interested events
self.events_callback = {k:[] for k in self.events}
#print(self.events_callback)
def register(self, event, callback):
"""
run callback() when event happens
"""
if event in self.events_callback and callback not in self.events_callback[event]:
self.events_callback[event].append(callback)
def unregister(self, event, callback):
"""
remove callback()
"""
if event in self.events_callback and callback in self.events_callback[event]:
self.events_callback[event].remove(callback)
async def listen(self):
"""Listen for events to happen"""
if not self.axis_data:
self.axis_data = {}
if not self.button_data:
self.button_data = {}
for i in range(self.controller.get_numbuttons()):
self.button_data[i] = False
if not self.hat_data:
self.hat_data = {}
for i in range(self.controller.get_numhats()):
self.hat_data[i] = (0, 0)
while True:
event = pygame.event.wait()
print(event)
"""
#for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
#self.axis_data[event.axis] = round(event.value,2)
if event.axis == GoogleStadiaController.AXIS_2_X:
self.events['turret_left_right'] = round(event.value, 2)
if event.axis == GoogleStadiaController.AXIS_2_Y:
self.events['gun_up_down'] = round(event.value, 2)
#if event.axis == GoogleStadiaController.AXIS_TRIGGER_X:
# self.event_value['gun_x'] = event.value
#if event.axis == GoogleStadiaController.AXIS_TRIGGER_Y:
# self.event_value['gun_y'] = event.value
elif event.type == pygame.JOYBUTTONDOWN:
#self.button_data[event.button] = True
if event.button == GoogleStadiaController.BUTTON_A:
self.events['fire'] = True
elif event.type == pygame.JOYBUTTONUP:
# self.button_data[event.button] = False
if event.button == GoogleStadiaController.BUTTON_A:
self.events['fire'] = False
elif event.type == pygame.JOYHATMOTION:
#self.hat_data[event.hat] = event.value
self.events['drive'] = event.value
await asyncio.sleep(0)
"""
async def execute(self):
#index = 0
while True:
print('CONSUME')
#os.system('clear')
#print(index, self.events)
#index += 1
#turn = 0.0
for evt, callbacks in self.events_callback.items():
for cb in callbacks: # multiple callbacks on one event?
if self.events[evt] is not None:
cb(self.events[evt])
#self.events[evt] = None
await asyncio.sleep(0)
def controller_main():
ps4 = GoogleStadiaController()
ps4.init()
ps4.listen()
def drive(args):
direction, throttle = args
print('I am driving', direction, throttle)
def fire(args):
print('Fire!!!!', args)
INDEX1 = 0
INDEX2 = 0
def move_turret(args):
global INDEX1
if -0.3 > args or args > 0.3:
print(INDEX1, 'turret', args)
INDEX1 += 1
def move_gun(args):
global INDEX2
print(INDEX2, 'gun', args)
INDEX2 += 1
async def main():
stadia = GoogleStadiaController()
stadia.init()
stadia.register('drive', drive)
stadia.register('turret_left_right', move_turret)
#stadia.register('gun_up_down', move_gun)
#stadia.register('fire', fire)
stadia.listen()
try:
listener = asyncio.create_task(stadia.listen())
executor = asyncio.create_task(stadia.execute())
await listener
except Exception as e:
print(e)
finally:
pass
if __name__ == '__main__':
asyncio.run(main())
| home9464/battletank | pad.py | pad.py | py | 5,482 | python | en | code | 0 | github-code | 90 |
17999607430 | # 18 - Python Kivy - Propriedades e atribuição simultânea
# https://www.youtube.com/watch?v=kDu1HJPruIE&list=PLsMpSZTgkF5AV1FmALMgW8W-TvrfR3nrs&index=18
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.behaviors.button import ButtonBehavior
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.utils import get_color_from_hex
# se formos mudar uma propriedade de lista como cores importamos ListProperty,
# ser texto --> StringProperty, se numero --> NumericProperty
from kivy.properties import ListProperty
from kivy.lang import Builder
Builder.load_file('21kivy.kv')
class Gerenciador_de_telas(ScreenManager):
pass
class Menu(Screen):
pass
class Botao_dinamico(ButtonBehavior, Label):
# Criamos uma variavel e definimos uma cor padrão para a mesma, lembre que
# tambem poderiamos colocar com lista e não usar o formato hex. Ex:
# cor_dinamica = ListProperty([0.1,0.5,0.7,1])
cor_dinamica = ListProperty(get_color_from_hex('#23a3bc'))
# Esta cor é para quando o botão for pressionado.
cor_dinamica_pressed = ListProperty(get_color_from_hex('#7FFF00'))
def __init__(self, **kwargs):
super(Botao_dinamico, self).__init__(**kwargs)
self.atualizar_canvas()
def on_pos(self, *args):
self.atualizar_canvas()
def on_size(self, *args):
self.atualizar_canvas()
# Esta função serve para ao pressionar o botão use a cor
# cor_dinamica_pressed.
def on_press(self, *args):
# Aqui podemos trocar as cores a cor que esta em self.cor_dinamica ira
# para self.cor_dinamica_pressed e vice-versa.
self.cor_dinamica = self.cor_dinamica_pressed
self.cor_dinamica_pressed = self.cor_dinamica
# Apos trocar a cor atualizamos para modificar
self.atualizar_canvas()
# Esta função serve para ao soltar o botão use a cor cor_dinamica.
def on_release(self, *args):
self.cor_dinamica = self.cor_dinamica_pressed
self.cor_dinamica_pressed = self.cor_dinamica
self.atualizar_canvas()
def atualizar_canvas(self, *args):
self.canvas.before.clear()
with self.canvas.before:
# Aqui colocamos rgba=self.cor_dinamica para definir a variavel e
# agora no .kv, chamamos esta variavel e colocamos a cor que
# queremos.
Color(rgba=self.cor_dinamica)
Ellipse(size=(self.height, self.height), pos=(self.pos))
Ellipse(size=(self.height, self.height), pos=(
self.x + self.width - self.height, self.y))
Rectangle(size=(self.width - self.height, self.height),
pos=(self.x + self.height / 2.0, self.y))
class Widget_geral(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.scroll_para_colocar_as_tarefas.add_widget(
Tarefa_mais_botao_remover(text=tarefa))
def on_pre_enter(self):
Window.bind(on_keyboard=self.voltar)
def voltar(self, window, key, *args):
if key == 27:
App.get_running_app().root.current = 'menu'
return True
def on_pre_leave(self):
Window.unbind(on_keyboard=self.voltar)
def adicionar_nova_tarefa(self):
nova_tarefa = self.ids.texto_da_tarefa.text
self.ids.scroll_para_colocar_as_tarefas.add_widget(
Tarefa_mais_botao_remover(text=nova_tarefa))
self.ids.texto_da_tarefa.text = ''
class Tarefa_mais_botao_remover(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.descricao_tarefa.text = text
class HashLDash_Tutorial_App(App):
def build(self):
return Gerenciador_de_telas()
if __name__ == '__main__':
HashLDash_Tutorial_App().run()
| LivioAlvarenga/Tutoriais_Kivy_KivyMD | Tutorial_Kivy_HashLDash/21kivy.py | 21kivy.py | py | 4,003 | python | pt | code | 1 | github-code | 90 |
29335905117 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 00:42:57 2018
@author: jan
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('mushrooms.csv')
X = dataset.iloc[:, 1:23].values
y = dataset.iloc[:, 0].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
for i in range(0, 22):
X[:, i] = labelencoder_X.fit_transform(X[:, i])
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)
import tensorflow as tf
# Python optimisation variables
learning_rate = 0.5
epochs = 10
batch_size = 100
# declare the training data placeholders
# input x - for 28 x 28 pixels = 784
x = tf.placeholder(tf.float32, [None, 22])
# now declare the output data placeholder - 10 digits
y = tf.placeholder(tf.float32, [None, 1])
# now declare the weights connecting the input to the hidden layer
W1 = tf.Variable(tf.random_normal([22, 300], stddev=0.03), name='W1')
b1 = tf.Variable(tf.random_normal([300]), name='b1')
# and the weights connecting the hidden layer to the output layer
W2 = tf.Variable(tf.random_normal([300, 1], stddev=0.03), name='W2')
b2 = tf.Variable(tf.random_normal([1]), name='b2')
# calculate the output of the hidden layer
hidden_out = tf.add(tf.matmul(x, W1), b1)
hidden_out = tf.nn.relu(hidden_out)
# now calculate the hidden layer output - in this case, let's use a softmax activated
# output layer
y_ = tf.nn.softmax(tf.add(tf.matmul(hidden_out, W2), b2))
y_clipped = tf.clip_by_value(y_, 1e-10, 0.9999999)
cross_entropy = -tf.reduce_mean(tf.reduce_sum(y * tf.log(y_clipped)
+ (1 - y) * tf.log(1 - y_clipped), axis=1))
# add an optimiser
optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# finally setup the initialisation operator
init_op = tf.global_variables_initializer()
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# start the session
with tf.Session() as sess:
# initialise the variables
sess.run(init_op)
total_batch = int(6499 / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x = X_train[i * 100 : (i * 100) + batch_size, 0:11]
batch_y = y_train.reshape(6499, 1)[i * 100 : (i * 100) + batch_size, 0:1]
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
print(sess.run(accuracy, feed_dict={x: X_test, y: y_test.reshape(1625, 1)}))
# start the session
with tf.Session() as sess:
# initialise the variables
sess.run(init_op)
total_batch = int(6499 / batch_size)
for epoch in range(epochs):
avg_cost = 0
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: X_train, y: y_train.reshape(6499, 1)})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
print(sess.run(accuracy, feed_dict={x: X_test, y: y_test.reshape(1625, 1)}))
# start the session
with tf.Session() as sess:
# initialise the variables
sess.run(init_op)
total_batch = int(6499 / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = next_batch(batch_size, X_train, y_train.reshape(6499, 1))
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost))
print(sess.run(accuracy, feed_dict={x: X_test, y: y_test.reshape(1625, 1)}))
our_test_data = np.array([5, 3, 9, 1, 0, 1, 0, 0, 5, 0, 4, 2, 3, 7, 7, 0, 2, 1, 4, 2, 5, 4])
feed_dict = {x: [our_test_data]}
classification = sess.run(y_, feed_dict)
print (classification)
import numpy as np
def next_batch(num, data, labels):
'''
Return a total of `num` random samples and labels.
'''
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
| omerkocbil/Artificial-Neural-Networks | my_ann_tensorflow_different_dataset.py | my_ann_tensorflow_different_dataset.py | py | 4,619 | python | en | code | 0 | github-code | 90 |
34191516368 | """Testing module graph case study."""
from graph import Graph
from copy import deepcopy
from dfs import DFS_complete
from bfs import BFS_complete
from topological_sort import topological_sort
from answers import *
def read_file(path):
"""Reads file and generates graph graph."""
# Text file parsing.
data = []
with open(path, "r") as text:
for line in text.readlines()[1:]:
temp = (
line.rstrip().replace("(", "").replace(")", "").replace(",", "").split()
)
for vertex in range(len(temp)):
if temp[vertex] == "none":
temp[vertex] = None
data.append(temp)
# New graph and list for vertices generating.
graph = Graph(True)
data_graph = deepcopy(data)
# Graph vertices inserting.
for element in data:
temp_vertex = graph.insert_vertex(element[0])
for line in range(len(data_graph)):
for vertex in range(len(data_graph[line])):
if data_graph[line][vertex] == element[0]:
data_graph[line][vertex] = temp_vertex
# Graph edges inserting.
for element in data_graph:
for vertex in element[1:]:
if vertex:
graph.insert_edge(element[0], vertex)
return graph
def bfs_test(graph):
"""BFS algorithm testing."""
test = BFS_complete(graph)
output = [str(vertex) for vertex in test]
assert output == bfs_answer
print("BFS algoorythm completed successfully. Here are results:")
print("\n".join(output))
def dfs_test(graph):
"""DFS algorithm testing."""
test = DFS_complete(graph)
output = [str(vertex) for vertex in test]
assert output == dfs_answer
print("DFS algoorythm completed successfully. Here are results:")
print("\n".join(output))
def topological_sort_test(graph):
"""Topolocial sort testing."""
test = topological_sort(graph)
output = [str(vertex) for vertex in test]
assert output == topological_sorting_answer
print("Topological sorting completed successfully. Here are results:")
print("\n".join(output))
if __name__ == "__main__":
path = "stanford_cs.txt"
graph = read_file(path)
dfs_test(graph)
print()
bfs_test(graph)
print()
topological_sort_test(graph)
| vbshuliar/Programming_Projects_and_Labs_from_Ukrainian_Catholic_University | 02/programming/labs/12_data_structures_graphs/01_graph_map_testing/graph_map_testing.py | graph_map_testing.py | py | 2,326 | python | en | code | 1 | github-code | 90 |
8803799392 | import sys
import json
import numpy as np
from PIL import Image
from glob import glob
import os
import pandas as pd
import albumentations as alb
import cv2
def load_json(path):
d = {}
with open(path, mode="r") as f:
d = json.load(f)
return d
def IoUfrom2bboxes(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def crop_face(img,landmark=None,bbox=None,margin=False,crop_by_bbox=True,abs_coord=False,only_img=False,phase='train'):
assert phase in ['train','val','test']
#crop face------------------------------------------
H,W=len(img),len(img[0])
assert landmark is not None or bbox is not None
H,W=len(img),len(img[0])
if crop_by_bbox:
x0,y0=bbox[0]
x1,y1=bbox[1]
w=x1-x0
h=y1-y0
w0_margin=w/4#0#np.random.rand()*(w/8)
w1_margin=w/4
h0_margin=h/4#0#np.random.rand()*(h/5)
h1_margin=h/4
else:
x0,y0=landmark[:68,0].min(),landmark[:68,1].min()
x1,y1=landmark[:68,0].max(),landmark[:68,1].max()
w=x1-x0
h=y1-y0
w0_margin=w/8#0#np.random.rand()*(w/8)
w1_margin=w/8
h0_margin=h/2#0#np.random.rand()*(h/5)
h1_margin=h/5
if margin:
w0_margin*=4
w1_margin*=4
h0_margin*=2
h1_margin*=2
elif phase=='train':
w0_margin*=(np.random.rand()*0.6+0.2)#np.random.rand()
w1_margin*=(np.random.rand()*0.6+0.2)#np.random.rand()
h0_margin*=(np.random.rand()*0.6+0.2)#np.random.rand()
h1_margin*=(np.random.rand()*0.6+0.2)#np.random.rand()
else:
w0_margin*=0.5
w1_margin*=0.5
h0_margin*=0.5
h1_margin*=0.5
y0_new=max(0,int(y0-h0_margin))
y1_new=min(H,int(y1+h1_margin)+1)
x0_new=max(0,int(x0-w0_margin))
x1_new=min(W,int(x1+w1_margin)+1)
img_cropped=img[y0_new:y1_new,x0_new:x1_new]
if landmark is not None:
landmark_cropped=np.zeros_like(landmark)
for i,(p,q) in enumerate(landmark):
landmark_cropped[i]=[p-x0_new,q-y0_new]
else:
landmark_cropped=None
if bbox is not None:
bbox_cropped=np.zeros_like(bbox)
for i,(p,q) in enumerate(bbox):
bbox_cropped[i]=[p-x0_new,q-y0_new]
else:
bbox_cropped=None
if only_img:
return img_cropped
if abs_coord:
return img_cropped,landmark_cropped,bbox_cropped,(y0-y0_new,x0-x0_new,y1_new-y1,x1_new-x1),y0_new,y1_new,x0_new,x1_new
else:
return img_cropped,landmark_cropped,bbox_cropped,(y0-y0_new,x0-x0_new,y1_new-y1,x1_new-x1)
class RandomDownScale(alb.core.transforms_interface.ImageOnlyTransform):
def apply(self,img,**params):
return self.randomdownscale(img)
def randomdownscale(self,img):
keep_ratio=True
keep_input_shape=True
H,W,C=img.shape
ratio_list=[2,4]
r=ratio_list[np.random.randint(len(ratio_list))]
img_ds=cv2.resize(img,(int(W/r),int(H/r)),interpolation=cv2.INTER_NEAREST)
if keep_input_shape:
img_ds=cv2.resize(img_ds,(W,H),interpolation=cv2.INTER_LINEAR)
return img_ds | mapooon/SelfBlendedImages | src/utils/funcs.py | funcs.py | py | 3,527 | python | en | code | 147 | github-code | 90 |
31058411493 | # 第一种方式:csv文件,建立行列关键词。一个文件保存,太大,不建议。https://www.cnblogs.com/xiaozi/p/10488653.html
# 第二种方式:直接写无格式文件。来尝试。f.open() python 写入中文文本文件。
# 关于数据格式问题,选择字典dict方式考虑最合适。
import time
# 第一种情况:
# f = open("中文输入测试", "rt", encoding="utf-8")
# data = f.read()
# # print(data)
# f.close()
# 第二种情况:
# with open("中文输入测试", "rt", encoding="utf-8") as f:
# # 新读法:
# for line in f:
# print(line)
# # 老读法:
# data = f.read()
# print(data)
# 写入文件:
# with open("写入文件", "wt", encoding="utf-8", newline="\n") as f:
# f.write("写入测试\n")
# f.write("空格测试\n")
# f.writelines("中文写入测试\n")
# f.writelines("李四\n")
# f.writelines("女\n")
# f.writelines("18\n")
# 考虑字典的最佳保存方式:
import json
dict_info = {"basic_info": {"Name": "张三", "Age": 19, "Sex": "男"}, "bg_info": {"Home": "北京", "Blood": "", }}
# 写入方式一:
# jsonData = json.dumps(dict_info)
# with open("写入json测试.json", "wt", encoding="utf-8") as f:
# f.write(jsonData)
# 写入方式二:这种写法最漂亮。
t1 = time.time()
with open("写入json测试.json", "wt", encoding="utf-8") as f:
json.dump(dict_info, f, ensure_ascii=False, indent=4)
t2 = time.time()
print("time:", t2-t1)
# json中文显示的问题: 通过:ensure_ascii=False,来解决。
# https://blog.csdn.net/weixin_44731100/article/details/90903110
# https://www.cnblogs.com/XhyTechnologyShare/p/12033690.html
# 读取一下json文件:
# with open("写入json测试.json", "rt", encoding="utf-8") as fr:
# data = json.load(fr)
# print(data)
# python 结构体的方式来保存数据。https://www.cnblogs.com/nyist-xsk/p/10470527.html
# class Myclass(object):
# class Struct(object):
# def __init__(self, name, age, job):
# self.name = name
# self.age = age
# self.job = job
#
# def make_struct(self, name, age, job):
# return self.Struct(name, age, job)
#
#
# myclass = Myclass()
# test1 = myclass.make_struct('xsk', '22', 'abc')
# test2 = myclass.make_struct('mtt', '23', 'def')
#
# print(test1.name)
# print(test1.job)
# print(test1.job)
# print(test2.name)
# print(test2.age)
# print(test2.job)
| byst4nder/knowledge-structure | temp/配附/01数据存储和读取.py | 01数据存储和读取.py | py | 2,451 | python | en | code | 0 | github-code | 90 |
18292809519 | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
from itertools import permutations
N = int(readline())
P = tuple(map(int, readline().split()))
Q = tuple(map(int, readline().split()))
d = {x: i for i, x in enumerate(permutations(range(1, N + 1)))}
print(abs(d[Q] - d[P]))
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02813/s227841911.py | s227841911.py | py | 414 | python | en | code | 0 | github-code | 90 |
18172410499 | import sys
input = sys.stdin.readline
#n = int(input())
#l = list(map(int, input().split()))
'''
a=[]
b=[]
for i in range():
A, B = map(int, input().split())
a.append(A)
b.append(B)'''
k=int(input())
if k%2==0:
print("-1")
sys.exit()
flg=False
cnt=0
a=7
for i in range(k):
cnt+=1
a%=k
if a%k==0:
print(cnt)
sys.exit()
a*=10
a+=7
if not flg:
print(-1) | Aasthaengg/IBMdataset | Python_codes/p02596/s060973254.py | s060973254.py | py | 418 | python | en | code | 0 | github-code | 90 |
73696376615 | import pytest
import requests
import json
import re
class TestMethods():
@pytest.yield_fixture()
def setUp(self):
self.url = "https://samples.openweathermap.org/data/2.5/forecast/hourly?q=London,us&appid=b6907d289e10d714a6e88b30761fae22"
response = requests.get(self.url)
# Response data is null
if response == None:
print("Response data is null.")
self.json_response = json.loads(response.text)["list"] # list assign value in the json_response
# List data is empty
if self.json_response == None:
print("Response list data is null.")
yield
print("<---------Completed Test Case--------->")
# def test_1(self, setUp):
# # Loop Iterate all list value
# array_list = []
# for i in range(len(self.json_response)):
# days = self.json_response[i]['dt_txt'][8:10] # days value get and assign through slicing
# array_list.append(days)
#
# data_set = sorted(set(array_list))
# count = 0
# for j in range(len(data_set)):
# for k in range(len(self.json_response)):
# if count <= 3:
# if list(data_set)[count] == self.json_response[k]['dt_txt'][8:10]:
# print(self.json_response)
# assert True
# count += 1
#
# def test_2(self):
# # Loop Iterate all list value
# for i in range(len(self.json_response)):
# times = self.json_response[i]['dt_txt'][11:13] # times value get and assign through slicing
# if int(times) != None: # Data check for days
# assert True
# else:
# assert False
#
# def test_3(self):
# # temp value not be assign global level so I am assuming main temp less then equel to and greater then equel to
# for i in range(len((self.json_response))):
# response = self.json_response[i]['main'] # main data assign
#
# min = response['temp_min'] # assign temp min value
# max = response['temp_max'] # assign temp max value
# temp = response['temp'] # assign temp value
#
# if temp >= min and temp <= max: # condition true check min and max
# assert True
# else:
# assert False
#
# def test_4(self):
# # Loop Iterate all list value
# for i in range(len(self.json_response)):
# weather_data = self.json_response[i]['weather'][0] # weather object value assign in weather_data
#
# if weather_data['id'] == 500: # Confition check weather
# print(weather_data['description'])
# msg = weather_data['description']
# assert re.match(r'light rain', msg) # Assert re module through match string value
def test_5(self, setUp):
# Loop Iterate all list value
for i in range(len(self.json_response)):
weather_data = self.json_response[i]['weather'][0] # weather object value assign in weather_data
if weather_data['id'] == 800: # Confition check weather
print(weather_data['description'])
msg = weather_data['description']
assert re.match(r'clear sky', msg) # Assert re module through match string value
# if __name__ == '__main__':
# unittest.main()
| govind794/APIAutomation | AllTestCase/TestMethods.py | TestMethods.py | py | 3,279 | python | en | code | 1 | github-code | 90 |
16144618405 | # -*- coding: utf-8 -*-
"""
对原始数据进行处理,获取神经网络模型输入的特征值。
@author:chenli0830(李辰)
@source:https://github.com/happynoom/DeepTrade
"""
import numpy
import talib
from LSTM_LOSS_MODEL.rawdate import read_sample_data
class ChartFeature(object):
def __init__(self, selector):
self.selector = selector
self.suppoorted = {"ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME"}
self.feature = []
def moving_extract(self, window=30, open=None, close=None, high=None, low=None,
volumes=None, with_label=True, flatten=True):
self.extract(open=open, close=close, high=high, low=low,volumes=volumes)
feature_arr = numpy.asarray(self.feature)
print("featurn_arr: " + str(feature_arr.shape))
p = 0
rows = feature_arr.shape[0]
print("feature dimension: %s" %rows)
if with_label:
moving_features = []
moving_labels = []
while p + window < feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
p_change = (close[p + window] - close[p + window - 1]) / close[p + window - 1]
y = p_change
# 返回一个折叠成一维的数组,"F"按列展平。
if flatten:
x = x.flatten("F")
print("flatten: " + str(x.shape))
moving_features.append(numpy.nan_to_num(x))
moving_labels.append(y)
p += 1
return numpy.asarray(moving_features), numpy.asarray(moving_labels)
else:
moving_features = []
while p + window <= feature_arr.shape[1]:
x = feature_arr[:, p:p + window]
if flatten:
x = x.flatten("F")
moving_features.append(numpy.nan_to_num(x))
p += 1
return moving_features
def extract(self, open=None, close=None, high=None, low=None, volumes=None):
self.feature = []
for feature_type in self.selector:
if feature_type in self.suppoorted:
print("extracting featuen : %s" %feature_type)
self.extract_by_type(feature_type, open=open, close=close, high=high, low=low, volumes=volumes)
else:
print("feature type not supported: %s" %feature_type)
self.feature_distribution()
return self.feature
def feature_distribution(self):
k = 0
for feature_column in self.feature:
fc = numpy.nan_to_num(feature_column)
mean = numpy.mean(fc)
var = numpy.var(fc)
max_value = numpy.max(fc)
min_value = numpy.min(fc)
print("[%s_th feature] mean: %s, var: %s, max: %s, min: %s" %(k, mean, var, max_value,min_value))
k = k + 1
def extract_by_type(self, feature_type, open=None, close=None, high=None, low=None, volumes=None):
if feature_type == 'ROCP':
rocp = talib.ROCP(close, timeperiod=1)
self.feature.append(rocp)
if feature_type == 'OROCP':
orocp = talib.ROCP(open, timeperiod=1)
self.feature.append(orocp)
if feature_type == 'HROCP':
hrocp = talib.ROCP(high, timeperiod=1)
self.feature.append(hrocp)
if feature_type == 'LROCP':
lrocp = talib.ROCP(low, timeperiod=1)
self.feature.append(lrocp)
if feature_type == 'MACD':
macd, signal, hist = talib.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
# 标准化处理,负值归一化为-1,正值归一化为1,Nan值为0。
norm_signal = numpy.minimum(numpy.maximum(numpy.nan_to_num(signal), -1), 1)
norm_hist = numpy.minimum(numpy.maximum(numpy.nan_to_num(hist), -1), 1)
norm_macd = numpy.minimum(numpy.maximum(numpy.nan_to_num(macd), -1), 1)
# num.diff():计算离散差值,后一个元素减去前一个元素,shape = [1, macd_n - 1]
# numpy.concatenate(): 组合0 和 离散差值 [0, diff], shape = [1, macd_n]
# numpy.minimum(numpy.maximun(x,-1),1):标准化处理,负值归一化为-1,正值归一化为1,Nan值为0。
zero = numpy.asarray([0])
macdrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(macd)))), -1), 1)
signalrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(signal)))), -1), 1)
histrocp = numpy.minimum(numpy.maximum(numpy.concatenate((zero, numpy.diff(numpy.nan_to_num(hist)))), -1), 1)
self.feature.append(norm_macd)
self.feature.append(norm_signal)
self.feature.append(norm_hist)
self.feature.append(macdrocp)
self.feature.append(signalrocp)
self.feature.append(histrocp)
if feature_type == 'RSI':
rsi6 = talib.RSI(close, timeperiod=6)
rsi12 = talib.RSI(close, timeperiod=12)
rsi24 = talib.RSI(close, timeperiod=24)
rsi6rocp = talib.ROCP(rsi6 + 100., timeperiod=1)
rsi12rocp = talib.ROCP(rsi12 + 100., timeperiod=1)
rsi24rocp = talib.ROCP(rsi24 + 100., timeperiod=1)
self.feature.append(rsi6 / 100.0 - 0.5)
self.feature.append(rsi12 / 100.0 - 0.5)
self.feature.append(rsi24 / 100.0 - 0.5)
self.feature.append(rsi6rocp)
self.feature.append(rsi12rocp)
self.feature.append(rsi24rocp)
if feature_type == 'VROCP':
# numpy.maximum(x,1):元素最小值设定为1。
# numpy.nan_to_num:Nan值为0。
# numpy.arctan:对矩阵a中每个元素取反正切
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
self.feature.append(vrocp)
if feature_type == 'MA':
ma5 = numpy.nan_to_num(talib.MA(close, timeperiod=5))
ma10 = numpy.nan_to_num(talib.MA(close, timeperiod=10))
ma20 = numpy.nan_to_num(talib.MA(close, timeperiod=20))
ma30 = numpy.nan_to_num(talib.MA(close, timeperiod=30))
ma60 = numpy.nan_to_num(talib.MA(close, timeperiod=60))
ma90 = numpy.nan_to_num(talib.MA(close, timeperiod=90))
ma120 = numpy.nan_to_num(talib.MA(close, timeperiod=120))
ma180 = numpy.nan_to_num(talib.MA(close, timeperiod=180))
ma360 = numpy.nan_to_num(talib.MA(close, timeperiod=360))
ma720 = numpy.nan_to_num(talib.MA(close, timeperiod=720))
ma5rocp = talib.ROCP(ma5, timeperiod=1)
ma10rocp = talib.ROCP(ma10, timeperiod=1)
ma20rocp = talib.ROCP(ma20, timeperiod=1)
ma30rocp = talib.ROCP(ma30, timeperiod=1)
ma60rocp = talib.ROCP(ma60, timeperiod=1)
ma90rocp = talib.ROCP(ma90, timeperiod=1)
ma120rocp = talib.ROCP(ma120, timeperiod=1)
ma180rocp = talib.ROCP(ma180, timeperiod=1)
ma360rocp = talib.ROCP(ma360, timeperiod=1)
ma720rocp = talib.ROCP(ma720, timeperiod=1)
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append((ma5 - close) / close)
self.feature.append((ma10 - close) / close)
self.feature.append((ma20 - close) / close)
self.feature.append((ma30 - close) / close)
self.feature.append((ma60 - close) / close)
self.feature.append((ma90 - close) / close)
self.feature.append((ma120 - close) / close)
self.feature.append((ma180 - close) / close)
self.feature.append((ma360 - close) / close)
self.feature.append((ma720 - close) / close)
if feature_type == 'VMA':
ma5 = numpy.nan_to_num(talib.MA(volumes, timeperiod=5))
ma10 = numpy.nan_to_num(talib.MA(volumes, timeperiod=10))
ma20 = numpy.nan_to_num(talib.MA(volumes, timeperiod=20))
ma30 = numpy.nan_to_num(talib.MA(volumes, timeperiod=30))
ma60 = numpy.nan_to_num(talib.MA(volumes, timeperiod=60))
ma90 = numpy.nan_to_num(talib.MA(volumes, timeperiod=90))
ma120 = numpy.nan_to_num(talib.MA(volumes, timeperiod=120))
ma180 = numpy.nan_to_num(talib.MA(volumes, timeperiod=180))
ma360 = numpy.nan_to_num(talib.MA(volumes, timeperiod=360))
ma720 = numpy.nan_to_num(talib.MA(volumes, timeperiod=720))
# numpy.nan_to_num:Nan值为0。
# numpy.arctan:对矩阵a中每个元素取反正切
ma5rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma5, timeperiod=1)))
ma10rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma10, timeperiod=1)))
ma20rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma20, timeperiod=1)))
ma30rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma30, timeperiod=1)))
ma60rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma60, timeperiod=1)))
ma90rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma90, timeperiod=1)))
ma120rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma120, timeperiod=1)))
ma180rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma180, timeperiod=1)))
ma360rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma360, timeperiod=1)))
ma720rocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(ma720, timeperiod=1)))
self.feature.append(ma5rocp)
self.feature.append(ma10rocp)
self.feature.append(ma20rocp)
self.feature.append(ma30rocp)
self.feature.append(ma60rocp)
self.feature.append(ma90rocp)
self.feature.append(ma120rocp)
self.feature.append(ma180rocp)
self.feature.append(ma360rocp)
self.feature.append(ma720rocp)
self.feature.append(numpy.arctan(numpy.nan_to_num((ma5 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma10 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma20 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma30 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma60 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma90 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma120 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma180 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma360 - volumes) / (volumes + 1))))
self.feature.append(numpy.arctan(numpy.nan_to_num((ma720 - volumes) / (volumes + 1))))
if feature_type == 'PRICE_VOLUME':
rocp = talib.ROCP(close, timeperiod=1)
vrocp = numpy.arctan(numpy.nan_to_num(talib.ROCP(numpy.maximum(volumes, 1), timeperiod=1)))
pv = rocp * vrocp
self.feature.append(pv)
def extract_feature(raw_data, selector, window=30, with_label=True, flatten=True):
chart_feature = ChartFeature(selector)
sorted_data = sorted(raw_data, key=lambda x:x.date)
closes = []
opens = []
highs = []
lows = []
volumes = []
for item in sorted_data:
closes.append(item.close)
opens.append(item.open)
highs.append(item.high)
lows.append(item.low)
volumes.append(float(item.volume))
closes = numpy.asarray(closes)
opens = numpy.asarray(opens)
highs = numpy.asarray(highs)
lows = numpy.asarray(lows)
volumes = numpy.asarray(volumes)
if with_label:
moving_features, moving_labels = chart_feature.moving_extract(window=window, open=opens,close=closes, high=highs,
low=lows, volumes=volumes, with_label=with_label,
flatten=flatten)
return moving_features, moving_labels
else:
moving_features = chart_feature.moving_extract(window=window, open=opens,close=closes, high=highs, low=lows,
volumes=volumes, with_label=with_label, flatten=flatten)
return moving_features
# from LSTM_LOSS_MODEL.rawdate import read_sample_data
#
# #test
# if __name__ == "__main__":
# raw_data = read_sample_data("data/000001.csv")
# moving_featrues, moving_labels = extract_feature(raw_data, ["ROCP","MACD"])
# print("moving_featrun: " +str(moving_featrues.shape))
# print("moving_labels " + str(moving_labels.shape)) | flag625/Stock_trade_predition | LSTM_LOSS_MODEL/chart.py | chart.py | py | 13,336 | python | en | code | 0 | github-code | 90 |
35224597569 | """
TODO:
- Sorting by standard deviation: Use coefficient of variation (std/mean)
or quartile coefficient of dispersion (Q3 - Q1) / (Q3 + Q1)
- Standard deviation for nominal: try out Variation ratio (1 - n_mode/N)
"""
import datetime
from enum import IntEnum
from itertools import chain
from typing import Any, Optional, Tuple, List
import numpy as np
import scipy.stats as ss
import scipy.sparse as sp
from AnyQt.QtCore import Qt, QSize, QRectF, QModelIndex, pyqtSlot, \
QItemSelection, QItemSelectionRange, QItemSelectionModel
from AnyQt.QtGui import QPainter, QColor, QPalette
from AnyQt.QtWidgets import QStyledItemDelegate, QGraphicsScene, QTableView, \
QHeaderView, QStyle, QStyleOptionViewItem
import Orange.statistics.util as ut
from Orange.data import Table, StringVariable, DiscreteVariable, \
ContinuousVariable, TimeVariable, Domain, Variable
from Orange.util import utc_from_timestamp
from Orange.widgets import widget, gui
from Orange.widgets.data.utils.histogram import Histogram
from Orange.widgets.settings import Setting, ContextSetting, \
DomainContextHandler
from Orange.widgets.utils.itemmodels import DomainModel, AbstractSortTableModel
from Orange.widgets.utils.signals import Input, Output
from Orange.widgets.utils.widgetpreview import WidgetPreview
def _categorical_entropy(x):
"""Compute the entropy of a dense/sparse matrix, column-wise. Assuming
categorical values."""
p = [ut.bincount(row)[0] for row in x.T]
p = [pk / np.sum(pk) for pk in p]
return np.fromiter((ss.entropy(pk) for pk in p), dtype=np.float64)
def coefficient_of_variation(x: np.ndarray) -> np.ndarray:
mu = ut.nanmean(x, axis=0)
mask = ~np.isclose(mu, 0, atol=1e-12)
result = np.full_like(mu, fill_value=np.inf)
result[mask] = np.sqrt(ut.nanvar(x, axis=0)[mask]) / mu[mask]
return result
def format_time_diff(start, end, round_up_after=2):
"""Return an approximate human readable time difference between two dates.
Parameters
----------
start : int
Unix timestamp
end : int
Unix timestamp
round_up_after : int
The number of time units before we round up to the next, larger time
unit e.g. setting to 2 will allow up to 2 days worth of hours to be
shown, after that the difference is shown in days. Or put another way
we will show from 1-48 hours before switching to days.
Returns
-------
str
"""
start = utc_from_timestamp(start)
end = utc_from_timestamp(end)
diff = abs(end - start) # type: datetime.timedelta
# Get the different resolutions
seconds = diff.total_seconds()
minutes = seconds // 60
hours = minutes // 60
days = diff.days
weeks = days // 7
months = (end.year - start.year) * 12 + end.month - start.month
years = months // 12
# Check which resolution is most appropriate
if years >= round_up_after:
return f'~{years} years'
elif months >= round_up_after:
return f'~{months} months'
elif weeks >= round_up_after:
return f'~{weeks} weeks'
elif days >= round_up_after:
return f'~{days} days'
elif hours >= round_up_after:
return f'~{hours} hours'
elif minutes >= round_up_after:
return f'~{minutes} minutes'
else:
return f'{seconds} seconds'
class FeatureStatisticsTableModel(AbstractSortTableModel):
CLASS_VAR, META, ATTRIBUTE = range(3)
COLOR_FOR_ROLE = {
CLASS_VAR: QColor(160, 160, 160),
META: QColor(220, 220, 200),
ATTRIBUTE: QColor(255, 255, 255),
}
HIDDEN_VAR_TYPES = (StringVariable,)
class Columns(IntEnum):
ICON, NAME, DISTRIBUTION, CENTER, MODE, MEDIAN, DISPERSION, MIN, MAX, \
MISSING = range(10)
@property
def name(self):
return {self.ICON: '',
self.NAME: 'Name',
self.DISTRIBUTION: 'Distribution',
self.CENTER: 'Mean',
self.MODE: 'Mode',
self.MEDIAN: 'Median',
self.DISPERSION: 'Dispersion',
self.MIN: 'Min.',
self.MAX: 'Max.',
self.MISSING: 'Missing',
}[self.value]
@property
def index(self):
return self.value
@classmethod
def from_index(cls, index):
return cls(index)
def __init__(self, data=None, parent=None):
"""
Parameters
----------
data : Optional[Table]
parent : Optional[QWidget]
"""
super().__init__(parent)
self.table = None # type: Optional[Table]
self.domain = None # type: Optional[Domain]
self.target_var = None # type: Optional[Variable]
self.n_attributes = self.n_instances = 0
self.__attributes = self.__class_vars = self.__metas = None
# sets of variables for fast membership tests
self.__attributes_set = set()
self.__class_vars_set = set()
self.__metas_set = set()
self.__distributions_cache = {}
no_data = np.array([])
self._variable_types = self._variable_names = no_data
self._min = self._max = no_data
self._center = self._median = self._mode = no_data
self._dispersion = no_data
self._missing = no_data
# Clear model initially to set default values
self.clear()
self.set_data(data)
def set_data(self, data):
if data is None:
self.clear()
return
self.beginResetModel()
self.table = data
self.domain = domain = data.domain
self.target_var = None
self.__attributes = self.__filter_attributes(
domain.attributes, self.table.X)
self.__class_vars = self.__filter_attributes(
domain.class_vars, self.table.Y.reshape((len(self.table.Y), -1)))
self.__metas = self.__filter_attributes(
domain.metas, self.table.metas)
self.__attributes_set = set(self.__metas[0])
self.__class_vars_set = set(self.__class_vars[0])
self.__metas_set = set(self.__metas[0])
self.n_attributes = len(self.variables)
self.n_instances = len(data)
self.__distributions_cache = {}
self.__compute_statistics()
self.endResetModel()
def clear(self):
self.beginResetModel()
self.table = self.domain = self.target_var = None
self.n_attributes = self.n_instances = 0
self.__attributes = (np.array([]), np.array([]))
self.__class_vars = (np.array([]), np.array([]))
self.__metas = (np.array([]), np.array([]))
self.__attributes_set = set()
self.__class_vars_set = set()
self.__metas_set = set()
self.__distributions_cache.clear()
self.endResetModel()
@property
def variables(self):
matrices = [self.__attributes[0], self.__class_vars[0], self.__metas[0]]
if not any(m.size for m in matrices):
return []
return np.hstack(matrices)
@staticmethod
def _attr_indices(attrs):
# type: (List) -> Tuple[List[int], List[int], List[int], List[int]]
"""Get the indices of different attribute types eg. discrete."""
disc_var_idx = [i for i, attr in enumerate(attrs) if isinstance(attr, DiscreteVariable)]
cont_var_idx = [i for i, attr in enumerate(attrs)
if isinstance(attr, ContinuousVariable)
and not isinstance(attr, TimeVariable)]
time_var_idx = [i for i, attr in enumerate(attrs) if isinstance(attr, TimeVariable)]
string_var_idx = [i for i, attr in enumerate(attrs) if isinstance(attr, StringVariable)]
return disc_var_idx, cont_var_idx, time_var_idx, string_var_idx
def __filter_attributes(self, attributes, matrix):
"""Filter out variables which shouldn't be visualized."""
attributes = np.asarray(attributes)
mask = [idx for idx, attr in enumerate(attributes)
if not isinstance(attr, self.HIDDEN_VAR_TYPES)]
return attributes[mask], matrix[:, mask]
def __compute_statistics(self):
# Since data matrices can of mixed sparsity, we need to compute
# attributes separately for each of them.
matrices = [self.__attributes, self.__class_vars, self.__metas]
# Filter out any matrices with size 0
matrices = list(filter(lambda tup: tup[1].size, matrices))
self._variable_types = np.array([type(var) for var in self.variables])
self._variable_names = np.array([var.name.lower() for var in self.variables])
self._min = self.__compute_stat(
matrices,
discrete_f=lambda x: ut.nanmin(x, axis=0),
continuous_f=lambda x: ut.nanmin(x, axis=0),
time_f=lambda x: ut.nanmin(x, axis=0),
)
self._dispersion = self.__compute_stat(
matrices,
discrete_f=_categorical_entropy,
continuous_f=coefficient_of_variation,
)
self._missing = self.__compute_stat(
matrices,
discrete_f=lambda x: ut.countnans(x, axis=0),
continuous_f=lambda x: ut.countnans(x, axis=0),
string_f=lambda x: (x == StringVariable.Unknown).sum(axis=0),
time_f=lambda x: ut.countnans(x, axis=0),
default_val=len(matrices[0]) if matrices else 0
)
self._max = self.__compute_stat(
matrices,
discrete_f=lambda x: ut.nanmax(x, axis=0),
continuous_f=lambda x: ut.nanmax(x, axis=0),
time_f=lambda x: ut.nanmax(x, axis=0),
)
# Since scipy apparently can't do mode on sparse matrices, cast it to
# dense. This can be very inefficient for large matrices, and should
# be changed
def __mode(x, *args, **kwargs):
if sp.issparse(x):
x = x.todense(order="C")
# return ss.mode(x, *args, **kwargs)[0]
return ut.nanmode(x, *args, **kwargs)[0] # Temporary replacement for scipy
self._center = self.__compute_stat(
matrices,
discrete_f=None,
continuous_f=lambda x: ut.nanmean(x, axis=0),
time_f=lambda x: ut.nanmean(x, axis=0),
)
self._mode = self.__compute_stat(
matrices,
discrete_f=lambda x: __mode(x, axis=0),
continuous_f=lambda x: __mode(x, axis=0),
time_f=lambda x: __mode(x, axis=0),
)
self._median = self.__compute_stat(
matrices,
discrete_f=None,
continuous_f=lambda x: ut.nanmedian(x, axis=0),
time_f=lambda x: ut.nanmedian(x, axis=0),
)
def get_statistics_table(self):
"""Get the numeric computed statistics in a single matrix."""
if self.table is None or not self.rowCount():
return None
# don't match TimeVariable, pylint: disable=unidiomatic-typecheck
contivars = [type(var) is ContinuousVariable for var in self.variables]
if any(contivars):
def c(column):
return np.choose(contivars, [np.nan, column])
x = np.vstack((
c(self._center), c(self._median), self._dispersion,
c(self._min), c(self._max), self._missing,
)).T
attrs = [ContinuousVariable(column.name) for column in (
self.Columns.CENTER, self.Columns.MEDIAN,
self.Columns.DISPERSION,
self.Columns.MIN, self.Columns.MAX, self.Columns.MISSING)]
else:
x = np.vstack((self._dispersion, self._missing)).T
attrs = [ContinuousVariable(name)
for name in ("Entropy", self.Columns.MISSING.name)]
names = [var.name for var in self.variables]
modes = [var.str_val(val)
for var, val in zip(self.variables, self._mode)]
metas = np.vstack((names, modes)).T
meta_attrs = [StringVariable('Feature'), StringVariable('Mode')]
domain = Domain(attributes=attrs, metas=meta_attrs)
statistics = Table.from_numpy(domain, x, metas=metas)
statistics.name = f'{self.table.name} (Feature Statistics)'
return statistics
def __compute_stat(self, matrices, discrete_f=None, continuous_f=None,
time_f=None, string_f=None, default_val=np.nan):
"""Apply functions to appropriate variable types. The default value is
returned if there is no function defined for specific variable types.
"""
if not matrices:
return np.array([])
results = []
for variables, x in matrices:
result = np.full(len(variables), default_val)
# While the following caching and checks are messy, the indexing
# turns out to be a bottleneck for large datasets, so a single
# indexing operation improves performance
*idxs, str_idx = self._attr_indices(variables)
for func, idx in zip((discrete_f, continuous_f, time_f), idxs):
idx = np.array(idx)
if func and idx.size:
x_ = x[:, idx]
if x_.size:
if not np.issubdtype(x_.dtype, np.number):
x_ = x_.astype(np.float64)
try:
finites = np.isfinite(x_)
except TypeError:
result[idx] = func(x_)
else:
mask = np.any(finites, axis=0)
if np.any(mask):
result[idx[mask]] = func(x_[:, mask])
if string_f:
x_ = x[:, str_idx]
if x_.size:
if x_.dtype is not np.object:
x_ = x_.astype(np.object)
result[str_idx] = string_f(x_)
results.append(result)
return np.hstack(results)
def sortColumnData(self, column):
"""Prepare the arrays with which we will sort the rows. If we want to
sort based on a single value e.g. the name, return a 1d array.
Sometimes we may want to sort by multiple criteria, comparing
continuous variances with discrete entropies makes no sense, so we want
to group those variable types together.
"""
# Prepare indices for variable types so we can group them together
order = [ContinuousVariable, TimeVariable,
DiscreteVariable, StringVariable]
mapping = {var: idx for idx, var in enumerate(order)}
vmapping = np.vectorize(mapping.__getitem__)
var_types_indices = vmapping(self._variable_types)
# Store the variable name sorted indices so we can pass a default
# order when sorting by multiple keys
# Double argsort is "inverse" argsort:
# data will be *sorted* by these indices
var_name_indices = np.argsort(np.argsort(self._variable_names))
# Prepare vartype indices so ready when needed
disc_idx, _, time_idx, str_idx = self._attr_indices(self.variables)
# Sort by: (type)
if column == self.Columns.ICON:
return var_types_indices
# Sort by: (name)
elif column == self.Columns.NAME:
# We use `_variable_names` here and not the indices because the
# last (or single) row is actually sorted and we don't want to sort
# the indices
return self._variable_names
# Sort by: (None)
elif column == self.Columns.DISTRIBUTION:
return np.ones_like(var_types_indices)
# Sort by: (type, center)
elif column == self.Columns.CENTER:
# Sorting discrete or string values by mean makes no sense
vals = np.array(self._center)
vals[disc_idx] = var_name_indices[disc_idx]
vals[str_idx] = var_name_indices[str_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (type, mode)
elif column == self.Columns.MODE:
# Sorting discrete or string values by mode makes no sense
vals = np.array(self._mode)
vals[disc_idx] = var_name_indices[disc_idx]
vals[str_idx] = var_name_indices[str_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (type, median)
elif column == self.Columns.MEDIAN:
# Sorting discrete or string values by median makes no sense
vals = np.array(self._median)
vals[disc_idx] = var_name_indices[disc_idx]
vals[str_idx] = var_name_indices[str_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (type, dispersion)
elif column == self.Columns.DISPERSION:
# Sort time variables by their dispersion, which is not stored in
# the dispersion array
vals = np.array(self._dispersion)
vals[time_idx] = self._max[time_idx] - self._min[time_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (type, min)
elif column == self.Columns.MIN:
# Sorting discrete or string values by min makes no sense
vals = np.array(self._min)
vals[disc_idx] = var_name_indices[disc_idx]
vals[str_idx] = var_name_indices[str_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (type, max)
elif column == self.Columns.MAX:
# Sorting discrete or string values by min makes no sense
vals = np.array(self._max)
vals[disc_idx] = var_name_indices[disc_idx]
vals[str_idx] = var_name_indices[str_idx]
return np.vstack((var_types_indices, np.zeros_like(vals), vals)).T
# Sort by: (missing)
elif column == self.Columns.MISSING:
return self._missing
return None
def _sortColumnData(self, column):
"""Allow sorting with 2d arrays."""
data = np.asarray(self.sortColumnData(column))
data = data[self.mapToSourceRows(Ellipsis)]
assert data.ndim <= 2, 'Data should be at most 2-dimensional'
return data
def _argsortData(self, data, order):
if data.ndim == 1:
if np.issubdtype(data.dtype, np.number):
if order == Qt.DescendingOrder:
data = -data
indices = np.argsort(data, kind='stable')
# Always sort NaNs last
if np.issubdtype(data.dtype, np.number):
indices = np.roll(indices, -np.isnan(data).sum())
else:
# When not sorting by numbers, we can't do data = -data, but
# use indices = indices[::-1] instead. This is not stable, but
# doesn't matter because we use this only for variable names
# which are guaranteed to be unique
indices = np.argsort(data)
if order == Qt.DescendingOrder:
indices = indices[::-1]
else:
assert np.issubdtype(data.dtype, np.number), \
'We do not deal with non numeric values in sorting by ' \
'multiple values'
if order == Qt.DescendingOrder:
data[:, -1] = -data[:, -1]
# In order to make sure NaNs always appear at the end, insert a
# indicator whether NaN or not. Note that the data array must
# contain an empty column of zeros at index -2 since inserting an
# extra column after the fact can result in a MemoryError for data
# with a large amount of variables
assert np.all(data[:, -2] == 0), \
'Add an empty column of zeros at index -2 to accomodate NaNs'
np.isnan(data[:, -1], out=data[:, -2])
indices = np.lexsort(np.flip(data.T, axis=0))
return indices
def headerData(self, section, orientation, role):
# type: (int, Qt.Orientation, Qt.ItemDataRole) -> Any
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return self.Columns.from_index(section).name
return None
def data(self, index, role):
# type: (QModelIndex, Qt.ItemDataRole) -> Any
def background():
if attribute in self.__attributes_set:
return self.COLOR_FOR_ROLE[self.ATTRIBUTE]
if attribute in self.__metas_set:
return self.COLOR_FOR_ROLE[self.META]
if attribute in self.__class_vars_set:
return self.COLOR_FOR_ROLE[self.CLASS_VAR]
return None
def text_alignment():
if column == self.Columns.NAME:
return Qt.AlignLeft | Qt.AlignVCenter
return Qt.AlignRight | Qt.AlignVCenter
def decoration():
if column == self.Columns.ICON:
return gui.attributeIconDict[attribute]
return None
def display():
# pylint: disable=too-many-branches
def format_zeros(str_val):
"""Zeros should be handled separately as they cannot be negative."""
if float(str_val) == 0:
num_decimals = min(self.variables[row].number_of_decimals, 2)
str_val = f"{0:.{num_decimals}f}"
return str_val
def render_value(value):
if np.isnan(value):
return ""
if np.isinf(value):
return "∞"
str_val = attribute.str_val(value)
if attribute.is_continuous and not attribute.is_time:
str_val = format_zeros(str_val)
return str_val
if column == self.Columns.NAME:
return attribute.name
elif column == self.Columns.DISTRIBUTION:
if isinstance(attribute,
(DiscreteVariable, ContinuousVariable)):
if row not in self.__distributions_cache:
scene = QGraphicsScene(parent=self)
histogram = Histogram(
data=self.table,
variable=attribute,
color_attribute=self.target_var,
border=(0, 0, 2, 0),
bottom_padding=4,
border_color='#ccc',
)
scene.addItem(histogram)
self.__distributions_cache[row] = scene
return self.__distributions_cache[row]
elif column == self.Columns.CENTER:
return render_value(self._center[row])
elif column == self.Columns.MODE:
return render_value(self._mode[row])
elif column == self.Columns.MEDIAN:
return render_value(self._median[row])
elif column == self.Columns.DISPERSION:
if isinstance(attribute, TimeVariable):
return format_time_diff(self._min[row], self._max[row])
elif isinstance(attribute, DiscreteVariable):
return f"{self._dispersion[row]:.3g}"
else:
return render_value(self._dispersion[row])
elif column == self.Columns.MIN:
if not isinstance(attribute, DiscreteVariable):
return render_value(self._min[row])
elif column == self.Columns.MAX:
if not isinstance(attribute, DiscreteVariable):
return render_value(self._max[row])
elif column == self.Columns.MISSING:
missing = self._missing[row]
perc = int(round(100 * missing / self.n_instances))
return f'{missing} ({perc} %)'
return None
roles = {Qt.BackgroundRole: background,
Qt.TextAlignmentRole: text_alignment,
Qt.DecorationRole: decoration,
Qt.DisplayRole: display}
if not index.isValid() or role not in roles:
return None
row, column = self.mapToSourceRows(index.row()), index.column()
# Make sure we're not out of range
if not 0 <= row <= self.n_attributes:
return None
attribute = self.variables[row]
return roles[role]()
def rowCount(self, parent=QModelIndex()):
return 0 if parent.isValid() else self.n_attributes
def columnCount(self, parent=QModelIndex()):
return 0 if parent.isValid() else len(self.Columns)
def set_target_var(self, variable):
self.target_var = variable
self.__distributions_cache.clear()
start_idx = self.index(0, self.Columns.DISTRIBUTION)
end_idx = self.index(self.rowCount(), self.Columns.DISTRIBUTION)
self.dataChanged.emit(start_idx, end_idx)
class FeatureStatisticsTableView(QTableView):
HISTOGRAM_ASPECT_RATIO = (7, 3)
MINIMUM_HISTOGRAM_HEIGHT = 50
MAXIMUM_HISTOGRAM_HEIGHT = 80
def __init__(self, model, parent=None, **kwargs):
super().__init__(
parent=parent,
showGrid=False,
cornerButtonEnabled=False,
sortingEnabled=True,
selectionBehavior=QTableView.SelectRows,
selectionMode=QTableView.ExtendedSelection,
horizontalScrollMode=QTableView.ScrollPerPixel,
verticalScrollMode=QTableView.ScrollPerPixel,
**kwargs
)
self.setModel(model)
hheader = self.horizontalHeader()
hheader.setStretchLastSection(False)
# Contents precision specifies how many rows should be taken into
# account when computing the sizes, 0 being the visible rows. This is
# crucial, since otherwise the `ResizeToContents` section resize mode
# would call `sizeHint` on every single row in the data before first
# render. However this, this cannot be used here, since this only
# appears to work properly when the widget is actually shown. When the
# widget is not shown, size `sizeHint` is called on every row.
hheader.setResizeContentsPrecision(5)
# Set a nice default size so that headers have some space around titles
hheader.setDefaultSectionSize(100)
# Set individual column behaviour in `set_data` since the logical
# indices must be valid in the model, which requires data.
hheader.setSectionResizeMode(QHeaderView.Interactive)
columns = model.Columns
hheader.setSectionResizeMode(columns.ICON.index, QHeaderView.ResizeToContents)
hheader.setSectionResizeMode(columns.DISTRIBUTION.index, QHeaderView.Stretch)
vheader = self.verticalHeader()
vheader.setVisible(False)
vheader.setSectionResizeMode(QHeaderView.Fixed)
hheader.sectionResized.connect(self.bind_histogram_aspect_ratio)
# TODO: This shifts the scrollarea a bit down when opening widget
# hheader.sectionResized.connect(self.keep_row_centered)
self.setItemDelegate(NoFocusRectDelegate(parent=self))
self.setItemDelegateForColumn(
FeatureStatisticsTableModel.Columns.DISTRIBUTION,
DistributionDelegate(parent=self),
)
def bind_histogram_aspect_ratio(self, logical_index, _, new_size):
"""Force the horizontal and vertical header to maintain the defined
aspect ratio specified for the histogram."""
# Prevent function being exectued more than once per resize
if logical_index is not self.model().Columns.DISTRIBUTION.index:
return
ratio_width, ratio_height = self.HISTOGRAM_ASPECT_RATIO
unit_width = new_size // ratio_width
new_height = unit_width * ratio_height
effective_height = max(new_height, self.MINIMUM_HISTOGRAM_HEIGHT)
effective_height = min(effective_height, self.MAXIMUM_HISTOGRAM_HEIGHT)
self.verticalHeader().setDefaultSectionSize(effective_height)
def keep_row_centered(self, logical_index, _1, _2):
"""When resizing the widget when scrolled further down, the
positions of rows changes. Obviously, the user resized in order to
better see the row of interest. This keeps that row centered."""
# TODO: This does not work properly
# Prevent function being exectued more than once per resize
if logical_index is not self.model().Columns.DISTRIBUTION.index:
return
top_row = self.indexAt(self.rect().topLeft()).row()
bottom_row = self.indexAt(self.rect().bottomLeft()).row()
middle_row = top_row + (bottom_row - top_row) // 2
self.scrollTo(self.model().index(middle_row, 0), QTableView.PositionAtCenter)
class NoFocusRectDelegate(QStyledItemDelegate):
"""Removes the light blue background and border on a focused item."""
def paint(self, painter, option, index):
# type: (QPainter, QStyleOptionViewItem, QModelIndex) -> None
option.state &= ~QStyle.State_HasFocus
super().paint(painter, option, index)
class DistributionDelegate(NoFocusRectDelegate):
def paint(self, painter, option, index):
# type: (QPainter, QStyleOptionViewItem, QModelIndex) -> None
scene = index.data(Qt.DisplayRole) # type: Optional[QGraphicsScene]
if scene is None:
return super().paint(painter, option, index)
painter.setRenderHint(QPainter.Antialiasing)
scene.render(painter, target=QRectF(option.rect), mode=Qt.IgnoreAspectRatio)
# pylint complains about inconsistent return statements
return None
class OWFeatureStatistics(widget.OWWidget):
name = 'Feature Statistics'
description = 'Show basic statistics for data features.'
icon = 'icons/FeatureStatistics.svg'
class Inputs:
data = Input('Data', Table, default=True)
class Outputs:
reduced_data = Output('Reduced Data', Table, default=True)
statistics = Output('Statistics', Table)
want_main_area = False
settingsHandler = DomainContextHandler()
settings_version = 2
auto_commit = Setting(True)
color_var = ContextSetting(None) # type: Optional[Variable]
# filter_string = ContextSetting('')
sorting = Setting((0, Qt.AscendingOrder))
selected_vars = ContextSetting([], schema_only=True)
def __init__(self):
super().__init__()
self.data = None # type: Optional[Table]
self.model = FeatureStatisticsTableModel(parent=self)
self.table_view = FeatureStatisticsTableView(self.model, parent=self)
self.table_view.selectionModel().selectionChanged.connect(self.on_select)
self.table_view.horizontalHeader().sectionClicked.connect(self.on_header_click)
box = gui.vBox(self.controlArea)
box.setContentsMargins(0, 0, 0, 4)
pal = QPalette()
pal.setColor(QPalette.Window,
self.table_view.palette().color(QPalette.Base))
box.setAutoFillBackground(True)
box.setPalette(pal)
box.layout().addWidget(self.table_view)
self.color_var_model = DomainModel(
valid_types=(ContinuousVariable, DiscreteVariable),
placeholder='None',
)
self.cb_color_var = gui.comboBox(
self.buttonsArea, master=self, value='color_var', model=self.color_var_model,
label='Color:', orientation=Qt.Horizontal, contentsLength=13,
searchable=True
)
self.cb_color_var.activated.connect(self.__color_var_changed)
gui.rubber(self.buttonsArea)
gui.auto_send(self.buttonsArea, self, "auto_commit")
@staticmethod
def sizeHint(): # pylint: disable=arguments-differ
return QSize(1050, 500)
@Inputs.data
def set_data(self, data):
# Clear outputs and reset widget state
self.closeContext()
self.selected_vars = []
self.model.resetSorting()
self.Outputs.reduced_data.send(None)
self.Outputs.statistics.send(None)
# Setup widget state for new data and restore settings
self.data = data
if data is not None:
self.color_var_model.set_domain(data.domain)
self.color_var = None
if self.data.domain.class_vars:
self.color_var = self.data.domain.class_vars[0]
else:
self.color_var_model.set_domain(None)
self.color_var = None
self.model.set_data(data)
self.openContext(self.data)
self.__restore_selection()
self.__restore_sorting()
self.__color_var_changed()
self.commit_statistics()
self.commit.now()
def __restore_selection(self):
"""Restore the selection on the table view from saved settings."""
selection_model = self.table_view.selectionModel()
selection = QItemSelection()
if self.selected_vars:
var_indices = {var: i for i, var in enumerate(self.model.variables)}
selected_indices = [var_indices[var] for var in self.selected_vars]
for row in self.model.mapFromSourceRows(selected_indices):
selection.append(QItemSelectionRange(
self.model.index(row, 0),
self.model.index(row, self.model.columnCount() - 1)
))
selection_model.select(selection, QItemSelectionModel.ClearAndSelect)
def __restore_sorting(self):
"""Restore the sort column and order from saved settings."""
sort_column, sort_order = self.sorting
if self.model.n_attributes and sort_column < self.model.columnCount():
self.model.sort(sort_column, sort_order)
self.table_view.horizontalHeader().setSortIndicator(sort_column, sort_order)
@pyqtSlot(int)
def on_header_click(self, *_):
# Store the header states
sort_order = self.model.sortOrder()
sort_column = self.model.sortColumn()
self.sorting = sort_column, sort_order
@pyqtSlot(int)
def __color_var_changed(self, *_):
if self.model is not None:
self.model.set_target_var(self.color_var)
def on_select(self):
selection_indices = list(self.model.mapToSourceRows([
i.row() for i in self.table_view.selectionModel().selectedRows()
]))
self.selected_vars = list(self.model.variables[selection_indices])
self.commit.deferred()
@gui.deferred
def commit(self):
if not self.selected_vars:
self.Outputs.reduced_data.send(None)
else:
# Send a table with only selected columns to output
self.Outputs.reduced_data.send(self.data[:, self.selected_vars])
def commit_statistics(self):
if not self.data:
self.Outputs.statistics.send(None)
return
# Send the statistics of the selected variables to ouput
statistics = self.model.get_statistics_table()
self.Outputs.statistics.send(statistics)
def send_report(self):
view = self.table_view
self.report_table(view)
@classmethod
def migrate_context(cls, context, version):
if not version or version < 2:
selected_rows = context.values.pop("selected_rows", None)
if not selected_rows:
selected_vars = []
else:
# This assumes that dict was saved by Python >= 3.6 so dict is
# ordered; if not, context hasn't had worked anyway.
all_vars = [
(var, tpe)
for (var, tpe) in chain(context.attributes.items(),
context.metas.items())
# it would be nicer to use cls.HIDDEN_VAR_TYPES, but there
# is no suitable conversion function, and StringVariable (3)
# was the only hidden var when settings_version < 2, so:
if tpe != 3]
selected_vars = [all_vars[i] for i in selected_rows]
context.values["selected_vars"] = selected_vars, -3
if __name__ == '__main__': # pragma: no cover
WidgetPreview(OWFeatureStatistics).run(Table("iris"))
| biolab/orange3 | Orange/widgets/data/owfeaturestatistics.py | owfeaturestatistics.py | py | 36,801 | python | en | code | 4,360 | github-code | 90 |
6778904582 | import pandas as pd
from bs4 import BeautifulSoup
import re
import nltk
from nltk.corpus import stopwords
from sklearn.ensemble import RandomForestClassifier
import pickle
def testdata():
test = pd.read_csv("songs_test_set_data.csv")
def lyrics_to_words(raw_lyric):
lyric_text = BeautifulSoup(raw_lyric, "html5lib").get_text()
letters_only = re.sub("[^a-zA-Z]", " ", lyric_text)
words = letters_only.lower().split()
stops = set(stopwords.words("english"))
meaningful_words = [w for w in words if not w in stops]
return(" ".join(meaningful_words))
forest_pickle = open('song_model.pickle','rb')
forest = pickle.load(forest_pickle)
vectorizer_pickle = open('song_vectorizer.pickle','rb')
vectorizer = pickle.load(vectorizer_pickle)
num_lyrics = len(test["Lyrics"])
clean_test_lyrics = []
# print("Cleaning and parsing the test set movie reviews...\n")
for i in range(0,num_lyrics):
# if( (i+1) % 10 == 0 ):
# print("Lyric %d of %d\n" % (i+1, num_lyrics))
clean_lyrics = lyrics_to_words( test["Lyrics"][i] )
clean_test_lyrics.append( clean_lyrics )
# print(len(clean_test_lyrics))
test_data_features = vectorizer.transform(clean_test_lyrics)
test_data_features = test_data_features.toarray()
result = forest.predict(test_data_features)
output = pd.DataFrame( data={"Title":test["Title"], "Actual_sentiment":test["Mood"], "Predicted_sentiment":result} )
output.to_csv( "songdata_BOW_model.csv")
compare_data = pd.read_csv("songdata_BOW_model.csv")
num_data = compare_data["Title"].size
count = 0
for i in range(0,num_data):
if compare_data["Actual_sentiment"][i] == compare_data["Predicted_sentiment"][i]:
count += 1
# print("Count = "+str(count))
accuracy = (count/num_data)*100
# print("Accuracy = "+str(accuracy)+"%")
return accuracy
# print("Data Tested")
| sohini-roy/major_project | songdata_test.py | songdata_test.py | py | 1,974 | python | en | code | 0 | github-code | 90 |
71168343977 | """ Clean String
DESCRIPTION/CONTEXT
---------------------
Orginally, used to remove crap from an email that I got from Google.
Here's a small passage of that email:
'...<1t1a1b1l1e1 1c1l1a1s1s1=1'1s1i1t1e1s1-1l1a1y1o1u1t1-1n1a1m1e1-1o1n1e1-1c1'
Basically, the process was to...
1) Copy email into string.txt
2) Remove 1's
3) Remove HTML tags
INPUT
-----
.txt file
OUTPUT
-----
None [Prints the cleaned string in console]
RUN
---
>>> ls
clean_string.py string.txt
>>> python3 clean_string.py string.txt
"""
import sys
import re
def clean(str_path):
f = open(str_path)
string = f.read()
string = re.sub('1|<.*?>', "", string)
return string
if __name__ == "__main__":
path = "./" + sys.argv[1]
print(clean(path))
| eltonlaw/misc-scripts | clean_string.py | clean_string.py | py | 770 | python | en | code | 0 | github-code | 90 |
4399927922 | import pygame
class Explosion(pygame.sprite.Sprite):
def __init__(self, x, y):
super(Explosion, self).__init__()
self.images = []
for num in range(1,6):
img = pygame.image.load(f"./assets/explosion/exp{num}.png")
img = pygame.transform.scale(img, (100,100))
self.images.append(img)
self.index = 0
self.surf = self.images[self.index]
self.rect = self.surf.get_rect()
self.rect.center = [x,y]
self.counter = 0
self.sound()
def sound(self):
sound = pygame.mixer.Sound("./assets/explosion/explosion.wav")
channel = pygame.mixer.find_channel(True)
channel.set_volume(0.4)
channel.play(sound)
def update(self):
explosion_speed = 20
self.counter += 1
if self.counter >= explosion_speed and self.index < len(self.images) -1:
self.counter = 0
self.index += 1
self.surf = self.images[self.index]
if self.index >= len(self.images) - 1 and self.counter >= explosion_speed:
print("kill")
self.kill()
| ronaldo-ramos-dev/space-ghost | explosion.py | explosion.py | py | 1,155 | python | en | code | 0 | github-code | 90 |
18373697819 | from collections import deque
N, K = map(int, input().split())
X = [list(map(int, input().split())) for _ in range(N - 1)]
tree = [[] for _ in range(N + 1)]
for a, b in X:
tree[a].append(b)
tree[b].append(a)
MAX = 10 ** 5 + 1
MOD = 10 ** 9 + 7
# Factorial
fac = [0] * (MAX + 1)
fac[0] = 1
fac[1] = 1
for i in range(2, MAX + 1):
fac[i] = fac[i - 1] * i % MOD
# Inverse factorial
finv = [0] * (MAX + 1)
finv[MAX] = pow(fac[MAX], MOD - 2, MOD)
for i in reversed(range(1, MAX + 1)):
finv[i - 1] = finv[i] * i % MOD
def comb(n, k):
if n < k or k < 0:
return 0
return (fac[n] * finv[k] * finv[n - k]) % MOD
visited = [False] * (N + 1)
visited[0] = True
visited[1] = True
stack = deque([1])
ans = K
while stack:
u = stack.popleft()
child_num = sum(not visited[v] for v in tree[u])
ans *= comb(K - 1 - int(u != 1), child_num) * fac[child_num]
ans %= MOD
for v in tree[u]:
if not visited[v]:
visited[u] = True
stack.append(v)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02985/s466569212.py | s466569212.py | py | 1,024 | python | en | code | 0 | github-code | 90 |
1622518975 | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
def Conv2d(*args, **kwargs):
return weight_norm(nn.Conv2d(*args, **kwargs))
class Encoder(nn.Module):
def __init__(self, d_model):
super().__init__()
model = [
# 28 -> 28
Conv2d(1, d_model, (1, 1)),
nn.ReLU(inplace=True),
# 28 -> 14
Conv2d(d_model, d_model // 2, (5, 5), (2, 2), padding=2),
nn.ReLU(inplace=True),
# 14 -> 7
Conv2d(d_model // 2, d_model // 4, (5, 5), (2, 2), padding=2),
nn.ReLU(inplace=True),
# 7 -> 4
Conv2d(d_model // 4, d_model // 8, (4, 4)),
nn.ReLU(inplace=True),
Conv2d(d_model // 8, 2 * d_model // 8, (1, 1)),
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x).chunk(2, dim=1)
class Decoder(nn.Module):
def __init__(self, d_model):
super().__init__()
model = [
Conv2d(d_model // 8, d_model // 8, (3, 3), padding=(1, 1)),
nn.ReLU(inplace=True),
# 4 -> 7
nn.Upsample(scale_factor=2),
Conv2d(d_model // 8, d_model // 4, (2, 2)),
nn.ReLU(inplace=True),
# 7 -> 14
nn.Upsample(scale_factor=2),
Conv2d(d_model // 4, d_model // 2, (5, 5), padding=2),
nn.ReLU(inplace=True),
# 14 -> 28
nn.Upsample(scale_factor=2),
Conv2d(d_model // 2, d_model, (3, 3), padding=(1, 1)),
nn.ReLU(inplace=True),
# 28 -> 28
Conv2d(d_model, 1, (1, 1)),
]
self.model = nn.Sequential(*model)
def sample(self, batch_size=1):
z = self.sample_z(batch_size=batch_size)
return self.model(z), z
def forward(self, z):
return self.model(z)
def sample_z(self, batch_size=1):
z = torch.distributions.Normal(loc=self.z_loc, scale=self.z_std)
return z.sample((batch_size,))
class VAE(nn.Module):
def __init__(self, d_model):
super().__init__()
self.register_buffer("z_loc", torch.zeros((d_model // 8, 4, 4)))
self.register_buffer("z_std", torch.ones((d_model // 8, 4, 4)))
self.encoder = Encoder(d_model=d_model)
self.decoder = Decoder(d_model=d_model)
def sample_z(self, batch_size: int = 1):
z = torch.distributions.Normal(self.z_loc, self.z_std)
z = z.sample((batch_size,))
return z
def forward(self, x):
mu, log_std = self.encoder(x)
z = self.sample_z(x.size(0))
z = z * log_std.exp() + mu
return self.decoder(z), (mu, log_std)
def decode(self, z):
return self.decoder(z)
| lgestin/generative_dl_toy_experiements | vae/unconditional/vae_uncond.py | vae_uncond.py | py | 2,785 | python | en | code | 0 | github-code | 90 |
12887079589 | # Import statements
from datetime import date
from django.template.loader import render_to_string
from django.db.models import Max, Avg
from django.http import JsonResponse
import django.utils.datetime_safe
from django.shortcuts import render, redirect, get_object_or_404
from item.models import ItemStats
from offer.forms.offer_form import CreateOfferForm, CreateOfferDetailsForm, PaymentForm, RatingForm
from django.contrib.auth import get_user_model
from rating.models import Rating
from user.forms.user_form import CheckOutUserUpdateForm, CheckOutProfileUpdateForm
from offer.models import Offer
from django.contrib.auth.decorators import login_required
from item.models import ItemImage,ItemStats
from user.models import User, Notification
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from .service import OfferService
@login_required
def offer_details(request, offer_id):
"""
Displays the details of a specific offer.
Args:
request (HttpRequest): The HTTP request object.
offer_id (int): The ID of the offer to display.
Returns:
HttpResponse: If the user has permission to view the offer, renders the 'offer/offer_details.html' template
with the offer details, including item images, highest price, and seller rating.
Raises:
PermissionDenied: If the current user does not have permission to view the offer.
"""
offer = Offer.objects.get(pk=offer_id)
if offer.buyer.id != request.user.id and offer.seller.id != request.user.id:
raise PermissionDenied()
else:
item_images = ItemImage.objects.filter(item=offer.item)
highest_price = Offer.objects.filter(item_id=offer.item_id).aggregate(Max('amount'))['amount__max'] or '(No offers)'
try:
seller_rating = round(Rating.objects.filter(offer_id__seller=offer.seller).aggregate(Avg('rating'))['rating__avg'], 1)
except TypeError:
seller_rating = 'No rating'
return render(request, 'offer/offer_details.html', {
"offer": offer,
'item_images': item_images,
'highest_price': highest_price,
'seller_rating': seller_rating,
})
@login_required
def open_offer_window(request, item_id):
"""
Opens a pop-up window to create a new offer for the specified item.
Args:
request (HttpRequest): The HTTP request object.
item_id (int): The ID of the item for which the offer is being created.
Returns:
JsonResponse: A JSON response containing the rendered HTML form for creating a new offer.
"""
offer_form = CreateOfferForm()
offer_details_form = CreateOfferDetailsForm()
html_form = render_to_string('offer/create_offer.html', {
'offer_form': offer_form,
'offer_details_form': offer_details_form,
'item_id': item_id
}, request=request)
return JsonResponse({'html_form': html_form})
@login_required
def create_offer(request, item_id):
"""
Creates a new offer for the specified item.
Args:
request (HttpRequest): The HTTP request object.
item_id (int): The ID of the item for which the offer is being created.
Returns:
HttpResponse: If the request method is POST and the form data is valid, redirects to the 'offer-details' page
for the created offer.
Otherwise, renders the 'offer/create_offer.html' template with the offer creation forms.
"""
if request.method == 'POST':
print('posting')
offer_form = CreateOfferForm(data=request.POST)
offer_details_form = CreateOfferDetailsForm(data=request.POST)
if offer_form.is_valid() and offer_details_form.is_valid():
offer_id = OfferService.create_offer(offer_form, offer_details_form, request.user.id, item_id)
return redirect('offer-details', offer_id)
else:
offer_form = CreateOfferForm()
offer_details_form = CreateOfferDetailsForm()
return render(request, 'offer/create_offer.html', {
'offer_form': offer_form,
'offer_details_form': offer_details_form,
'item_id': item_id
})
@login_required
def change_offer_status(request, id, itemid, button):
"""
Changes the status of an offer and sends notifications accordingly.
Args:
request (HttpRequest): The HTTP request object.
id (int): The ID of the offer to change the status.
itemid (int): The ID of the item associated with the offer.
button (str): The new status for the offer.
Returns:
HttpResponse: Redirects to the 'item-offers' page for the specified item.
"""
if request.method == 'POST':
offer = Offer.objects.get(pk=id)
offer.status = button
offer.save()
changed_offer_send_notification(offer)
if offer.status == 'Accepted':
other_offers = Offer.objects.filter(item_id=offer.item_id).exclude(id=offer.id)
for other_offer in other_offers:
other_offer.status = "Rejected"
changed_offer_send_notification(other_offer)
other_offer.save()
return redirect('item-offers', item_id=itemid)
def changed_offer_send_notification(offer):
"""
Sends a notification for a changed offer status.
Args:
offer (Offer): The offer for which the status has changed.
"""
notification = Notification()
notification.message = f'Your offer for "{offer.item}" has been {offer.status}!'
notification.datetime = django.utils.datetime_safe.datetime.now()
notification.href = 'offer-details'
notification.href_parameter = offer.id
notification.receiver = offer.buyer
notification.save()
@login_required
def edit_offer(request, id, itemid):
"""
Edits an existing offer.
Args:
request (HttpRequest): The HTTP request object.
id (int): The ID of the offer to edit.
itemid (int): The ID of the item associated with the offer.
Returns:
HttpResponse: If the user has permission and the request method is POST with valid form data,
redirects to the 'offer-details' page for the edited offer.
Otherwise, renders the 'offer/edit_offer.html' template with the offer edit forms.
Raises:
PermissionDenied: If the current user does not have permission to edit the offer.
"""
offer_to_change = get_object_or_404(Offer, pk=id)
offer = Offer.objects.get(pk=id)
if offer.buyer.id != request.user.id:
raise PermissionDenied()
else:
if request.method == "POST":
offer_form = CreateOfferForm(data=request.POST, instance=offer_to_change)
offer_details_form = CreateOfferDetailsForm(data=request.POST, instance=offer_to_change.offerdetails)
if offer_form.is_valid() and offer_details_form.is_valid():
offer_id = OfferService.edit_offer(offer_form, offer_details_form, offer.seller)
return redirect('offer-details', offer_id=id)
else:
offer_form = CreateOfferForm(instance=offer_to_change)
offer_details_form = CreateOfferDetailsForm(instance=offer_to_change.offerdetails)
return render(request, 'offer/edit_offer.html', {
'offer_form': offer_form,
'offer_details_form': offer_details_form,
'offer_to_change': offer_to_change,
'id': id,
'item_id': itemid
})
@login_required
def delete_offer(request, id):
"""
Deletes an existing offer.
Args:
request (HttpRequest): The HTTP request object.
id (int): The ID of the offer to delete.
Returns:
HttpResponse: Redirects to the 'my-offers' page after deleting the offer.
"""
offer_to_delete = get_object_or_404(Offer, pk=id)
if offer_to_delete.buyer.id != request.user.id:
raise PermissionDenied()
offer_to_delete.delete()
return redirect('my-offers')
@login_required
def checkout(request, offer_id):
"""
Handles the checkout process for an offer.
Args:
request (HttpRequest): The HTTP request object.
offer_id (int): The ID of the offer to checkout.
Returns:
HttpResponse: If the user has permission and the request method is POST with valid form data,
redirects to the appropriate page based on the checkout process.
Otherwise, renders the 'offer/checkout.html' template with the checkout forms.
Raises:
PermissionDenied: If the current user does not have permission to checkout the offer.
"""
auth_user = get_user_model()
offer = get_object_or_404(Offer, pk=offer_id)
user_instance = get_object_or_404(User, pk=offer.buyer_id)
user_info_instance = user_instance.userinfo
user_profile_instance = user_info_instance.userprofile
auth_user_instance = auth_user.objects.get(pk=offer.buyer.id)
other_offers_on_item = Offer.objects.filter(item_id=offer.item_id)
item_stats = get_object_or_404(ItemStats, pk=offer.item_id)
if offer.buyer.id != request.user.id:
raise PermissionDenied()
else:
if request.method == 'POST':
user_form = CheckOutUserUpdateForm(request.POST, instance=user_instance)
user_profile_form = CheckOutProfileUpdateForm(request.POST, instance=user_profile_instance)
payment_form = PaymentForm(data=request.POST)
rating_form = RatingForm(data=request.POST)
if user_form.is_valid() and user_profile_form.is_valid() and payment_form.is_valid():
redirect_url = OfferService.handle_checkout(user_form, user_profile_form, rating_form, request, auth_user_instance, user_info_instance, item_stats, other_offers_on_item, offer)
return redirect(redirect_url)
else:
user_form = CheckOutUserUpdateForm(instance=user_instance)
user_profile_form = CheckOutProfileUpdateForm(instance=user_profile_instance)
payment_form = PaymentForm()
rating_form = RatingForm()
return render(request, 'offer/checkout.html', {
'user_form': user_form,
'user_profile_form': user_profile_form,
'payment_form': payment_form,
'rating_form': rating_form,
'offer_id': offer_id,
})
| steinarb1234/Fire-Sale | fire_sale/offer/views.py | views.py | py | 10,494 | python | en | code | 0 | github-code | 90 |
17967558929 | from collections import defaultdict,deque
import sys
finput=lambda: sys.stdin.readline().strip()
def main():
n=int(finput())
edges=[tuple(map(int,finput().split())) for _ in range(n-1)]
q,k=map(int,finput().split())
xy=[tuple(map(int,finput().split())) for _ in range(q)]
ed=defaultdict(deque)
wt=defaultdict(int)
for e in edges:
ed[e[0]].append(e[1])
ed[e[1]].append(e[0])
wt[(e[0],e[1])]=e[2]
wt[(e[1],e[0])]=e[2]
stack=deque([k])
cv=stack[0]
dist=defaultdict(int)
while stack:
while ed[cv]:
if cv!=k:
if ed[cv][-1]==stack[-1]:
ed[cv].pop()
if not ed[cv]:
break
stack.append(cv)
cv=ed[cv].pop()
dist[cv]=dist[stack[-1]]+wt[(stack[-1],cv)]
cv=stack.pop()
for que in xy:
print(dist[que[0]]+dist[que[1]])
if __name__=='__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03634/s327647374.py | s327647374.py | py | 858 | python | en | code | 0 | github-code | 90 |
73875789738 | """beta_vae_train.py"""
import argparse
import sys
import os
import torch
import torch.nn.parallel
from torch.autograd import Variable
import torch.optim as optim
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../')))
sys.path.append(os.path.abspath(os.path.join(BASE_DIR, '../../dataloaders')))
import shapenet_part_loader
import shapenet_core13_loader
import shapenet_core55_loader
from model import PointCapsNet
from solver import kl_divergence, reconstruction_loss
from logger import Logger
USE_CUDA = True
LOGGING = True
def main():
USE_CUDA = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#capsule_net = BetaPointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
capsule_net = PointCapsNet(opt.prim_caps_size, opt.prim_vec_size, opt.latent_caps_size, opt.latent_vec_size, opt.num_points)
if opt.model != '':
capsule_net.load_state_dict(torch.load(opt.model))
if USE_CUDA:
print("Let's use", torch.cuda.device_count(), "GPUs!")
capsule_net = torch.nn.DataParallel(capsule_net)
capsule_net.to(device)
# create folder to save trained models
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
# create folder to save logs
if LOGGING:
log_dir='./logs'+'/'+opt.dataset+'_dataset_'+str(opt.latent_caps_size)+'caps_'+str(opt.latent_vec_size)+'vec'+'_batch_size_'+str(opt.batch_size)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = Logger(log_dir)
# select dataset
if opt.dataset=='shapenet_part':
train_dataset = shapenet_part_loader.PartDataset(classification=True, npoints=opt.num_points, split='train')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
elif opt.dataset=='shapenet_core13':
train_dataset = shapenet_core13_loader.ShapeNet(normal=False, npoints=opt.num_points, train=True)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
elif opt.dataset=='shapenet_core55':
train_dataset = shapenet_core55_loader.Shapnet55Dataset(batch_size=opt.batch_size, npoints=opt.num_points, shuffle=True, train=True)
# BVAE CONFIGURATIONS HARDCODING
#loss_mode = 'gaussian' # loss_mode was decoder_list in bVAE
loss_mode = 'chamfer'
global_iter = 0
# training process for 'shapenet_part' or 'shapenet_core13'
#capsule_net.train()
if 'train_dataloader' in locals().keys() :
for epoch in range(opt.n_epochs+1):
if epoch < 50:
optimizer = optim.Adam(capsule_net.parameters(), lr=0.01)
elif epoch<150:
optimizer = optim.Adam(capsule_net.parameters(), lr=0.001)
else:
optimizer = optim.Adam(capsule_net.parameters(), lr=0.0001)
capsule_net.train()
train_loss_sum, recon_loss_sum, beta_loss_sum = 0, 0, 0
for batch_id, data in enumerate(train_dataloader):
global_iter += 1
points, _= data
if(points.size(0)<opt.batch_size):
break
points = Variable(points)
points = points.transpose(2, 1)
if USE_CUDA:
points = points.cuda()
optimizer.zero_grad()
# ---- CRITICAL PART: new train loss computation (train_loss in bVAE was beta_vae_loss)
#x_recon, latent_caps, caps_recon, logvar = capsule_net(points) # returns x_recon, latent_caps, caps_recon, logvar
latent_capsules, x_recon = capsule_net(points)
recon_loss = reconstruction_loss(points, x_recon, "chamfer") # RECONSTRUCTION LOSS
train_loss = recon_loss
# combining per capsule loss (pyTorch requires)
train_loss.backward()
optimizer.step()
train_loss_sum += train_loss.item()
# ---- END OF CRITICAL PART ----
if LOGGING:
info = {'train loss': train_loss.item()}
for tag, value in info.items():
logger.scalar_summary(
tag, value, (len(train_dataloader) * epoch) + batch_id + 1)
if batch_id % 50 == 0:
print('batch_no: %d / %d, train_loss: %f ' % (batch_id, len(train_dataloader), train_loss.item()))
print('\nAverage train loss of epoch %d : %f\n' %\
(epoch, (train_loss_sum / len(train_dataloader))))
if epoch% 5 == 0:
dict_name = "%s/%s_dataset_%dcaps_%dvec_%d.pth"%\
(opt.outf, opt.dataset, opt.latent_caps_size, opt.latent_vec_size, epoch)
torch.save(capsule_net.module.state_dict(), dict_name)
# training process for 'shapenet_core55' (NOT UP-TO-DATE)
else:
raise NotImplementedError()
if __name__ == "__main__":
print("[INFO] tmp_checkpoints folder will be in your program run folder")
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='input batch size')
parser.add_argument('--n_epochs', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--prim_caps_size', type=int, default=1024, help='number of primary point caps')
parser.add_argument('--prim_vec_size', type=int, default=16, help='scale of primary point caps')
parser.add_argument('--latent_caps_size', type=int, default=64, help='number of latent caps')
parser.add_argument('--latent_vec_size', type=int, default=64, help='scale of latent caps')
parser.add_argument('--num_points', type=int, default=2048, help='input point set size')
parser.add_argument('--outf', type=str, default='tmp_checkpoints', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, default='shapenet_part', help='dataset')
opt = parser.parse_args()
print("Args:", opt)
main()
| ArthLeu/beta-capsnet | main/AE/train_ae.py | train_ae.py | py | 6,406 | python | en | code | 0 | github-code | 90 |
13002951418 | '''
1
10 6 12 8 9 4 1 3
'''
def code(queue):
while True:
for k in range(1, 5+1):
last = queue.pop(0)-k
if last <= 0:
last = 0
queue.append(last)
return queue
queue.append(last)
T = 10
for tc in range(1, T+1):
N = int(input())
Queue = list(map(int, input().split()))
ans = code(Queue)
print(f'#{tc}', end=' ')
for i in ans:
print(i, end=' ')
# 출력형식 다음과 같이도 가능
# print(f'#{N} {" ".join(map(str, password(arr)))}')
# print(*ans)
| hyeinkim1305/Algorithm | SWEA/D3/SWEA_1225_암호생성기.py | SWEA_1225_암호생성기.py | py | 592 | python | en | code | 0 | github-code | 90 |
18419573079 | #import numpy as np
#import functools
#import operator
#from itertools import combinations as comb
#from itertools import combinations_with_replacement as comb_with
#from itertools import permutations as perm
#import collections as C #most_common
#N = int(input())
#N,M= map(int,input().split())
#P = list(map(int,input().split()))
S= str(input())
#prod = functools.partial(functools.reduce, operator.mul)
#c=np.array(A)
#p=np.prod(A)
one=[1,0]*10**5
zero=[0,1]*10**5
o=0
z=0
for i in range(len(S)):
if one[i] != int(S[i]):
o+=1
if zero[i] != int(S[i]):
z+=1
print(min(o,z))
| Aasthaengg/IBMdataset | Python_codes/p03073/s288742088.py | s288742088.py | py | 604 | python | en | code | 0 | github-code | 90 |
41917554204 | # person = ["Quý", 20, 0, "Vĩnh Phúc", 2, ["Manga","Coding"], 3, 20]
#dictionary
person = {
"name": "Quy",
"Age": 20,
"ex": 0,
"favs": ["Manga","Coding"]
}
# print(person)
# name = person["favs"]
# print(name)
person["length"] = 20
# print(person)
person["length"] = 10
# print(person)
# key = "length"
# del person["length"]
# if key in person:
# print(person[key])
# else:
# print("Not Found")
for k in person:
print(k) #duyet key
for v in person.values():
print(v) #duyet value
for k,v in person.items():
print(k, ":", v)
| duyvukhanh/vukhanhduy-fundamental-c4e18 | session5/test.py | test.py | py | 585 | python | en | code | 0 | github-code | 90 |
33885507418 | """ business days module """
from datetime import timedelta, date
from collections.abc import Generator
import holidays
def business_days_list(start_date: date, end_date: date) -> list[date]:
""" business days func """
working_days = []
us_holidays = holidays.UnitedStates()
for num in range((end_date - start_date).days + 1):
the_date = start_date + timedelta(days=num)
if (the_date.weekday() < 5) and (the_date not in us_holidays):
working_days.append(the_date)
return working_days
def business_days(start_date: date, end_date: date) -> Generator[
date, None, None]:
""" business days func """
us_holidays = holidays.UnitedStates()
for num in range((end_date - start_date).days + 1):
the_date = start_date + timedelta(days=num)
if (the_date.weekday() < 5) and (the_date not in us_holidays):
yield the_date
def main() -> None:
""" main """
start_date = date(2021, 7, 1)
end_date = date(2021, 7, 7)
for working_day in business_days(start_date, end_date):
print("in the loop")
print(working_day.strftime("%Y-%m-%d"))
print("\n".join([ working_day.strftime("%Y-%m-%d")
for working_day in business_days(start_date, end_date)]))
if __name__ == "__main__":
main() | t4d-classes/advanced-python_04192021 | python-demos/datetime_demos/business_days.py | business_days.py | py | 1,313 | python | en | code | 1 | github-code | 90 |
44790022216 | from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from math import ceil
class PaginationToFrontEnd(PageNumberPagination):
"""
Paginacao criada para facilitar a implementacao do front-end com algumas novas variaveis retornadas
"""
page_size = 15
def get_paginated_response(self, data):
nextPageNumber = self.get_next_link()
previousPageNumber = self.get_previous_link()
PageActive = 1
TotalPages = ceil(self.page.paginator.count/self.page_size) # Sempre será um numero inteiro arredondado para cima
if(data):
try:
nextPageNumber = nextPageNumber.split('?')[1]
nextPageNumber = nextPageNumber.split('=')[1]
except:
nextPageNumber = None
try:
previousPageNumber = previousPageNumber.split('?')[1]
previousPageNumber = previousPageNumber.split('=')[1]
except:
if (previousPageNumber == None):
previousPageNumber = None
else:
previousPageNumber = "1"
if(nextPageNumber != None):
PageActive = int(nextPageNumber) - 1
elif(previousPageNumber != None):
PageActive = int(previousPageNumber) + 1
else:
PageActive = 1
return Response({
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'nextPageNumber':nextPageNumber,
'previousPageNumber':previousPageNumber,
'PageActive':str(PageActive),
'TotalPages':TotalPages
},
'count': self.page.paginator.count,
'results': data
})
class PaginationToEmprestimo(PaginationToFrontEnd):
"""
Paginacao customizada criada apenas para alterar o page size
"""
page_size = 7
class PaginationToRecomedacao(PaginationToFrontEnd):
"""
Paginacao customizada criada apenas para recomedacao
"""
page_size = 5 | SobrancelhaDoDragao/Sistema-Biblioteca | ApiBiblioteca/Api/pagination.py | pagination.py | py | 2,204 | python | en | code | 5 | github-code | 90 |
7462326139 | import argparse
import os
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.request import URLopener
import sanity_check
BASE_URL = "http://flibusta.is/sql/"
parser = argparse.ArgumentParser(description="Downloads data dumps and converts to sqlite3")
parser.add_argument('--skip_download', default=False, action='store_true', help='Skips download')
parser.add_argument('--base_url', default=BASE_URL, help='Flibusta base URL')
args = parser.parse_args()
def build_url_list():
urls = []
soup = BeautifulSoup(urlopen(args.base_url).read(), 'html.parser')
all_links = soup.find_all(name = "a")
for a in all_links:
href = a['href']
if href.startswith('lib.lib'):
urls.append(href)
return urls
if __name__ == '__main__':
if not args.skip_download:
BASE_URL = "http://flibusta.is/sql/"
soup = BeautifulSoup(urlopen(BASE_URL).read(), 'html.parser')
all_links = soup.find_all(name = "a")
for a in all_links:
href = a['href']
if href.startswith('lib.lib'):
url = BASE_URL + href
print("Downloading " + url)
os.system("wget " + url)
print("Running gunzip ...")
os.system("gunzip *.gz")
os.system("cat lib*.sql > sqldump.sql")
print("Converting to SQLite3...")
os.system("/usr/bin/awk -f mysql2sqlite sqldump.sql | sqlite3 flibusta_new.db")
print("Applying SQL scripts...")
os.system("sqlite3 flibusta_new.db < SequenceAuthor.sql")
print("All done")
os.system("rm lib*.sql")
if sanity_check.check_file_sanity("flibusta_new.db"):
os.system("rm flibusta.db")
os.system("mv flibusta_new.db flibusta.db")
else:
print("New file not sane, keeping things as is")
| sgzmd/flibustier | import/importer.py | importer.py | py | 1,807 | python | en | code | 0 | github-code | 90 |
8447024767 | '''
wapp to check if given string are anagrams
s1 = listen
s2 = silent
'''
s1 = input("enter first string ")
s2 = input("enter second string ")
ls1= sorted(s1)
ls2= sorted(s2)
if(ls1 == ls2):
print("anagram")
else:
print(" no anagram")
| dravya08/workshop-python | L5/p4.py | p4.py | py | 241 | python | en | code | 0 | github-code | 90 |
22771344786 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
import time
import pyrebase
config = {
"apiKey": "AIzaSyAHYW60cI1kChuFx3Z__DeLvKBGyrGLkZg",
"authDomain": "gnu6-d9c5b.firebaseapp.com",
"databaseURL": "https://gnu6-d9c5b-default-rtdb.firebaseio.com",
"storageBucket": "gnu6-d9c5b.appspot.com",
}
firebase = pyrebase.initialize_app(config)
driver = webdriver.Chrome("C:\Server_gnu\chromedriver")
department_url = "https://newgh.gnu.ac.kr/anse/na/ntt/selectNttList.do?mi=3041&bbsId=1383" #학과공지사항 주소
Dormitory_url = "https://newgh.gnu.ac.kr/dorm/na/ntt/selectNttList.do?mi=7285&bbsId=2489" #기숙사공지사항 주소
class Initialization :
def __init__(self):
self.Posting_Address = 0
def crawler(self, Homepage_name, Homepage_url ,searchValue):
driver.get(Homepage_url) #학과공지사항 또는 기숙사 공지사항 접속
select = Select(driver.find_element_by_name("searchType")) # 검색 목록 : 제목
select.select_by_visible_text('제목')
elem = driver.find_element_by_name("searchValue") # 검색어 : searchValue
elem.send_keys(searchValue)
elem.send_keys(Keys.RETURN) # 엔터
time.sleep(2)
list = driver.find_elements_by_css_selector(".nttInfoBtn") # 반복문
length = len(list)
while length > 0 :
number = driver.find_elements_by_css_selector("td.BD_tm_none")[length-1].text
Title = driver.find_elements_by_css_selector(".nttInfoBtn")[length-1].text
driver.find_elements_by_css_selector(".nttInfoBtn")[length-1].click()
time.sleep(2)
self.Posting_Address = driver.current_url #현재 게시글 주소
db = firebase.database()
input_data = {"제목": Title , "주소" : self.Posting_Address}
db.child( Homepage_name + "/" + searchValue).child(number).set(input_data)
driver.back() #뒤로가기
time.sleep(2)
print("업데이트가 되었습니다")
time.sleep(2)
length -= 1
init = Initialization()
init.crawler("학과공지사항", department_url, "수강신청 일정")
init.crawler("학과공지사항", department_url, "국가장학금")
init.crawler("기숙사공지사항", Dormitory_url, "년도 학생생활관 관생")
driver.close() | GoSeongJin/gnu-app | Server_gnu/DB_Initialization.py | DB_Initialization.py | py | 2,328 | python | en | code | 0 | github-code | 90 |
17984992819 | s=input()
an=[]
for m in "qwertyuiopasdfghjklzxcvbnm":
i=0
d=[len(s)-j for j in range(len(s))]
while s.find(m,i)>=0:
t=s.find(m,i)
for j in range(t+1):
d[j]=min(t-j,d[j])
i=t+1
mi=0
for i in range(len(s)):
mi=max(mi,d[i])
an.append(max(d))
print(min(an))
| Aasthaengg/IBMdataset | Python_codes/p03687/s944262960.py | s944262960.py | py | 290 | python | en | code | 0 | github-code | 90 |
35225743429 | import os
import shutil
from argparse import ArgumentParser
from os import path as osp
from pathlib import Path
import pandas as pd
def file_info(f):
split = f.split('_')
cid, assessment, group, date, time, camera = split
return cid, assessment, group, date, time, camera
def get_data_from_pc(src_root, dst_root, table_file):
r"""Copy files from PC to removable disk. Must support agreement table.
Args:
src_root (str): Directory to scan.
dst_root (str): Destination directory.
table_file (str): A csv file with 'patient_key', 'redcap_repeat_instance', 'redcap_repeat instrument' - Child id, Date, Assesment respectively.
"""
df = pd.read_csv(table_file)
df['assessment'] = df['redcap_repeat_instrument'].apply(lambda a: "ADOS" if 'ados' in a else "PLS" if "pls" in a else "Cognitive")
df['date'] = df['redcap_repeat_instance'].dt.strftime('%d%m%y')
df['child_id'] = df['patient_key'].astype(str)
def agreed(f):
cid, assessment, _, date, _, _ = file_info(f)
return df[(df['child_id'] == cid) & (df['assessment'] == assessment) & (df['date'] == date)].empty
for root, dirs, files in os.walk(src_root):
files = [f for f in files if osp.splitext(f.lower())[1] in ['.avi', 'mp4'] and agreed(f)]
for f in files:
src = osp.join(root, f)
dst = osp.join(dst_root, f)
# shutil.copyfile(src, dst)
print(f'{src}\n---->\n{dst}')
def add_data_to_storage(src_root, dst_root):
r"""Copy files from removable disk to our main storage.
Args:
src_root (str): Directory to scan.
dst_root (str): Storage source root.
"""
for root, dirs, files in os.walk(src_root):
files = [f for f in files if osp.splitext(f.lower())[1] in ['.avi', 'mp4']]
for f in files:
cid, assessment, group, date, time, camera = file_info(f)
src = osp.join(root, f)
dst = osp.join(dst_root, group, cid, f'{cid}_{assessment}_{group}_{date}', f)
# Path(dst).mkdir(parents=True, exist_ok=True)
# shutil.copyfile(src, dst)
print(f'{src}\n---->\n{dst}')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-m", "--mode", type=str)
parser.add_argument("-s", "--src", type=str)
parser.add_argument("-d", "--dst", type=str)
parser.add_argument("-t", "--table", type=str, default='')
args = vars(parser.parse_args())
mode = args['mode']
if mode == 'get':
get_data_from_pc(args['src'], args['dst'], args['table'])
else:
add_data_to_storage(args['src'], args['dst'])
# Instructions:
# 1. Install python 3.6 <
# 2. Run: pip install pandas
# 3. Go to the directory of "autism_center_data_storage.py" and run either:
# > python autism_center_data_storage.py -m get -s "C:/users/recordings" -d "F:/recordings_backup" -t "F:/aggrement_table.csv"
# > python autism_center_data_storage.py -m add -s "F:/recordings_backup" -d "Z:/recordings" | TalBarami/SkeletonTools | skeleton_tools/utils/autism_center_data_storage.py | autism_center_data_storage.py | py | 3,035 | python | en | code | 0 | github-code | 90 |
35615688597 | import hypothesis
import numpy as np
import pytest
from epyg import epyg as epyg
from epyg import operators
from hypothesis import assume, example, given
@pytest.fixture
def state():
return epyg.epg()
def test_call_multiply():
a = epyg.epg()
b = epyg.epg()
T = operators.Transform(alpha=90.0, phi=0.0)
T * a
T(b)
assert a == b
def test_identity(state):
I = operators.Identity() # noqa: E741
assert (I * state) == state
def test_T_with_no_flip(state):
T = operators.Transform(alpha=0.0, phi=180.0)
assert (T * state) == state
@given(alpha=hypothesis.strategies.floats(), phi=hypothesis.strategies.floats())
def test_neutral_flip_and_backflip(alpha, phi):
state = epyg.epg()
T_forward = operators.Transform(alpha=alpha, phi=phi)
T_backward = operators.Transform(alpha=alpha, phi=-1.0 * phi)
state2: epyg.epg = T_backward * (T_forward * state)
print(state2.get_state_matrix())
assert (T_backward * (T_forward * state)) == state
@given(alpha=hypothesis.strategies.floats(), phi=hypothesis.strategies.floats())
@example(alpha=np.pi / 2.0, phi=0.0)
def test_trivial_flip(alpha, phi):
state: epyg.epg = epyg.epg(m0=1.0)
T_forward = operators.Transform(alpha=alpha, phi=phi)
state = T_forward * state
magnetisation = np.abs(state.get_f())
assume(not np.isnan(magnetisation)) # Large phase values may create a nan value
assert magnetisation == pytest.approx(np.abs(np.sin(alpha)), 1e-8)
@given(shifts=hypothesis.strategies.integers(min_value=-128, max_value=-128))
@example(shifts=1)
@example(shifts=-1)
def test_shift_and_shift_back_relaxed_state(shifts):
state = epyg.epg(initial_size=256)
S_forward = operators.Shift(shifts=shifts)
S_back = operators.Shift(shifts=-shifts)
assert (S_back * (S_forward * state)) == state
@given(shifts=hypothesis.strategies.integers(min_value=0, max_value=64))
@example(shifts=1)
def test_shift_and_shift_back_excited_state(shifts):
state = epyg.epg(initial_size=256)
T = operators.Transform(alpha=np.deg2rad(45.0), phi=np.deg2rad(45.0))
state = T * state
S_forward = operators.Shift(shifts=shifts)
S_back = operators.Shift(shifts=-shifts)
assert (S_back * (S_forward * state)) == state
@given(relax_factor=hypothesis.strategies.floats(min_value=0.0))
@example(relax_factor=1.0)
def test_relaxed_state_can_not_relax_further(relax_factor):
state = epyg.epg(initial_size=256)
E = operators.Epsilon(TR_over_T1=relax_factor, TR_over_T2=relax_factor)
assert state == E * state
@given(relax_factor=hypothesis.strategies.floats(min_value=0.0))
def test_excited_state_will_return_to_equilibrium(relax_factor):
state = epyg.epg(initial_size=256)
T = operators.Transform(alpha=np.deg2rad(90.0), phi=0.0)
E = operators.Epsilon(TR_over_T1=np.inf, TR_over_T2=np.inf)
assert E * (T * state) == state
| brennerd11/EpyG | tests/test_operators.py | test_operators.py | py | 2,895 | python | en | code | 4 | github-code | 90 |
12333429097 | #!/usr/bin/env python3
# robot_say_hi.py
class Robot:
def __init__(self,
name=None,
build_year=None):
self.name = name
self.build_year = build_year
def say_hi(self):
if self.name:
print("Hi, I am " + self.name)
else:
print("Hi, I am a robot without a name")
if self.build_year:
print("I was built in " + str(self.build_year))
else:
print("It's not known, when I was created!")
if __name__ == "__main__":
x = Robot("Henry", 2008)
y = Robot()
y.name = "Marvin"
x.say_hi()
y.say_hi()
| philippdrebes/MSCIDS_PDS01 | Pycharm/SW08/exercise/_01_Robots_parts1/robot_say_hi.py | robot_say_hi.py | py | 674 | python | en | code | 0 | github-code | 90 |
22945145608 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import Dict, Any, List
from datetime import datetime
from collections import Counter
from tabun_stat import utils
from tabun_stat.processors.base import BaseProcessor
class CharsProcessor(BaseProcessor):
def __init__(self) -> None:
super().__init__()
self._chars = {} # type: Dict[str, List[int]]
def process_post(self, post: Dict[str, Any]) -> None:
self._process(post['body'], post['created_at'])
def process_comment(self, comment: Dict[str, Any]) -> None:
self._process(comment['body'], comment['created_at'])
def _process(self, body: str, tm: datetime) -> None:
body = body.strip()
counter = Counter(body)
for c, cnt in counter.items():
try:
self._chars[c][0] += cnt
except KeyError:
self._chars[c] = [cnt, int((tm - datetime(1970, 1, 1, 0, 0, 0)).total_seconds())]
def stop(self) -> None:
assert self.stat
with open(os.path.join(self.stat.destination, 'chars.csv'), 'w', encoding='utf-8') as fp:
fp.write(utils.csvline('Символ', 'Сколько раз встретился'))
for c, x in sorted(self._chars.items(), key=lambda x: [-x[1][0], x[1][1]]):
cnt, created_at_unix = x
# created_at = utils.apply_tzinfo(
# datetime.utcfromtimestamp(created_at_unix),
# self.stat.timezone
# )
if c == '"':
c = 'Кавычка'
elif c == ',':
c = 'Запятая'
elif c == ' ':
c = 'Пробел'
elif c == '\u00a0':
c = 'Неразр. пробел'
elif c == '\t':
c = 'Табуляция'
elif c == '\n':
c = 'Перенос строки'
elif c == '\r':
c = 'Возврат каретки'
elif c.lower() in ('a', 'o', 'e', 'c', 'k', 'p', 'x', 'm') or c in ('B', 'T', 'H', 'y'):
c = c + ' (англ.)'
elif not c.strip():
c = repr(c)
fp.write(utils.csvline(c, cnt))
super().stop()
| andreymal/stuff | tabun_stat/tabun_stat/processors/chars.py | chars.py | py | 2,376 | python | en | code | 7 | github-code | 90 |
72685904936 | import jieba
def preprocess(ctx):
import re
pattern = re.compile(r'\t|\n|\.|-|:|;|\)|\(|\?|" ')
ctx = re.sub(pattern, '', ctx)
return ctx
def readfile(path: str, suff: str = "txt"):
with open(path + "." + suff, mode="r", encoding="utf-8") as file:
f = file.read()
return f
def genpic(font: str = "serif", name: str = "test", ctx=[], stopword=""):
from wordcloud import WordCloud
wc = WordCloud(
# 设置字体旋转度,0-1,1为垂直
# prefer_horizontal=1,
# 设置词云的中文字体所在路径,不指定就会出现乱码
font_path='/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc',
# 设置宽
width=1280,
# 设置高
height=640,
# 设置背景色
background_color='white',
max_words=100,
# 字号上限
max_font_size=300,
# 字号下限
min_font_size=30,
# 词频与放大程度的相关性
relative_scaling=0.7,
stopwords=stopword,
mode='RGBA',
mask=mask,
)
# wc.generate(ctx) # 加载词云文本
wc.generate_from_frequencies(ctx)
wc.to_file(name + ".png") # 保存词云文件
def wordcount(ctx: list, top: int = 10):
from collections import Counter # 词频统计库
counts = Counter(ctx) # 对分词做词频统计
counts_top = counts.most_common(top) # 获取前n最高频的词
# print(counts_top) # 输出检查
return counts_top
def cleanlist(ctx, list):
for i in list:
for x in range(ctx.count(i)):
ctx.remove(i)
return ctx
def maskpic(path: str = ""):
import numpy as np
from PIL import Image
img = Image.open(path)
mask = np.array(img) # 将图片转换为数组
return mask
def writefile(name: str, context: str):
with open(name + ".txt", mode="w", encoding="utf-8") as file:
file.write(context)
if __name__ == "__main__":
filename = input("请输入纯文本格式文件名:")
f = readfile(filename)
f = preprocess(f)
jieba.load_userdict("lis_dict.txt")
ls = jieba.lcut(f) # 生成分词列表
ls = cleanlist(ls, ['与', ' '])
text = dict(wordcount(ls, 1000))
writefile(filename + "处理后", str(text))
mask = maskpic("2023/books1280.png")
genpic(name=filename + "word", ctx=text)
'''
with open("处理后1.txt", mode="r", encoding="utf-8") as test:
man = json.load(test)
genpic(name="1205man", ctx=man)
'''
| DaoMingze/yztool | wordart.py | wordart.py | py | 2,527 | python | en | code | 1 | github-code | 90 |
18527272609 | import sys
input=sys.stdin.readline
N,C=map(int,input().split())
d=[list(map(int,input().split())) for i in range(C)]
c=[list(map(int,input().split())) for i in range(N)]
ans=10**18
mod0=[]
mod1=[]
mod2=[]
for i in range(N):
for j in range(N):
k=(i+j+2)%3
if k==0:
mod0.append((i,j))
elif k==1:
mod1.append((i,j))
else:
mod2.append((i,j))
cnt0=[0]*(C+1)
cnt1=[0]*(C+1)
cnt2=[0]*(C+1)
for c0 in range(1,C+1):
for x,y in mod0:
cnt0[c0]+=d[c[x][y]-1][c0-1]
for c1 in range(1,C+1):
for x,y in mod1:
cnt1[c1]+=d[c[x][y]-1][c1-1]
for c2 in range(1,C+1):
for x,y in mod2:
cnt2[c2]+=d[c[x][y]-1][c2-1]
ans=10**18
for c0 in range(1,C+1):
for c1 in range(1,C+1):
for c2 in range(1,C+1):
if c1==c2 or c2==c0 or c0==c1:
continue
cnt=cnt0[c0]+cnt1[c1]+cnt2[c2]
ans=min(ans,cnt)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03330/s773407191.py | s773407191.py | py | 861 | python | en | code | 0 | github-code | 90 |
44187470435 | import numpy as np
from numpy import linalg as la
np.set_printoptions(precision=3)
class DiGraph:
"""A class for representing directed graphs via their adjacency matrices.
Attributes:
Labellist (list(str)): List of labels for the n nodes in the
graph.
A_hat ((n,n) ndarray): The adjacency matrix of a directed graph,
where A_hat has been calculated from matrix A.
"""
# Problem 1
def __init__(self, A, labels=None):
"""Modify A so that there are no sinks in the corresponding graph,
then calculate Ahat. Save Ahat and the labels as attributes.
Parameters:
A ((n,n) ndarray): the adjacency matrix of a directed graph.
A[i,j] is the weight of the edge from node j to node i.
labels (list(str)): labels for the n nodes in the graph.
If None, defaults to [0, 1, ..., n-1].
Examples
========
>>> A = np.array([[0, 0, 0, 0],[1, 0, 1, 0],[1, 0, 0, 1],[1, 0, 1, 0]])
>>> G = DiGraph(A, labels=['a','b','c','d'])
>>> G.A_hat
array([[0. , 0.25 , 0. , 0. ],
[0.333, 0.25 , 0.5 , 0. ],
[0.333, 0.25 , 0. , 1. ],
[0.333, 0.25 , 0.5 , 0. ]])
>>> steady_state_1 = G.linsolve()
>>> { k: round(steady_state_1[k],3) for k in steady_state_1}
{'a': 0.096, 'b': 0.274, 'c': 0.356, 'd': 0.274}
>>> steady_state_2 = G.eigensolve()
>>> { k: round(steady_state_2[k],3) for k in steady_state_2}
{'a': 0.096, 'b': 0.274, 'c': 0.356, 'd': 0.274}
>>> steady_state_3 = G.itersolve()
>>> { k: round(steady_state_3[k],3) for k in steady_state_3}
{'a': 0.096, 'b': 0.274, 'c': 0.356, 'd': 0.274}
>>> get_ranks(steady_state_3)
['c', 'b', 'd', 'a']
"""
A = A * 1.0 #security to convert elements in the matrix to floats, since it's required for calculating A_hat.
#check if there's sinks, and if there is, modify the matrix to have none.
i = 0 #iterator to remember which column we're on.
for row in A.transpose():
i = i + 1
if row.sum() == 0:
A[:,i-1] = 1
i = 0
#Now calculating A_hat
for row in A.transpose():
i = i + 1
A[:,i-1] = A[:,i-1] / row.sum()
self.A_hat = A #saving A_hat as attribute
if labels is None:
labellist = list()
labellist = range(0, np.size(A,1))
self.labellist = [str(n) for n in labellist]
if labels is not None:
if len(labels) > np.size(A,1):
raise ValueError('Amount of labels exceeds number of nodes!')
else:
self.labellist = labels
def linsolve(self, epsilon=0.85):
"""Compute the PageRank vector using the linear system method.
Parameters:
epsilon (float): the damping factor, between 0 and 1.
Returns:
dict(str -> float): A dictionary mapping labels to PageRank values.
"""
n = np.size(self.A_hat,1)
results = np.linalg.solve(np.eye(n, n) - np.dot(epsilon, self.A_hat),(1 - epsilon) * (np.array([1] * n) / n))
dict = {}
for i in range(n):
dict[self.labellist[i]] = results[i]
return dict
def eigensolve(self, epsilon=0.85):
"""Compute the PageRank vector using the eigenvalue method.
Normalize the resulting eigenvector so its entries sum to 1.
Parameters:
epsilon (float): the damping factor, between 0 and 1.
Return:
dict(str -> float): A dictionary mapping labels to PageRank values.
"""
n = np.size(self.A_hat,1)
A_flat = epsilon * self.A_hat + (1 - epsilon) * (np.ones((n,n)) / n)
w,v = np.linalg.eig(A_flat)
results = abs(np.real(v[:n,0]) / np.linalg.norm(v[:n,0],1))
dict = {}
for i in range(n):
dict[self.labellist[i]] = results[i]
return dict
def itersolve(self, epsilon=0.85, maxiter=100, tol=1e-12):
"""Compute the PageRank vector using the iterative method.
Parameters:
epsilon (float): the damping factor, between 0 and 1.
maxiter (int): the maximum number of iterations to compute.
tol (float): the convergence tolerance.
Return:
dict(str -> float): A dictionary mapping labels to PageRank values.
"""
raise NotImplementedError("Problem 2 Incomplete")
def get_ranks(d):
"""Construct a sorted list of labels based on the PageRank vector.
Parameters:
d (dict(str -> float)): a dictionary mapping labels to PageRank values.
Returns:
(list) the keys of d, sorted by PageRank value from greatest to least.
"""
raise NotImplementedError("Problem 3 Incomplete")
# Task 2
def rank_websites(filename="web_stanford.txt", epsilon=0.85):
"""Read the specified file and construct a graph where node j points to
node i if webpage j has a hyperlink to webpage i. Use the DiGraph class
and its itersolve() method to compute the PageRank values of the webpages,
then rank them with get_ranks().
Each line of the file has the format
a/b/c/d/e/f...
meaning the webpage with ID 'a' has hyperlinks to the webpages with IDs
'b', 'c', 'd', and so on.
Parameters:
filename (str): the file to read from.
epsilon (float): the damping factor, between 0 and 1.
Returns:
(list(str)): The ranked list of webpage IDs.
Examples
========
>>> print(rank_websites()[0:5])
['98595', '32791', '28392', '77323', '92715']
"""
raise NotImplementedError("Task 2 Incomplete")
# Task 3
def rank_uefa_teams(filename, epsilon=0.85):
"""Read the specified file and construct a graph where node j points to
node i with weight w if team j was defeated by team i in w games. Use the
DiGraph class and its itersolve() method to compute the PageRank values of
the teams, then rank them with get_ranks().
Each line of the file has the format
A,B
meaning team A defeated team B.
Parameters:
filename (str): the name of the data file to read.
epsilon (float): the damping factor, between 0 and 1.
Returns:
(list(str)): The ranked list of team names.
Examples
========
>>> rank_uefa_teams("psh-uefa-2018-2019.csv",0.85)[0:5]
['Liverpool', 'Ath Madrid', 'Paris SG', 'Genk', 'Barcelona']
"""
raise NotImplementedError("Task 3 Incomplete")
if __name__ == "__main__":
import doctest
doctest.testmod()
| Tubnielsen/LinAlg | asg5-pagerank/asg5.py | asg5.py | py | 6,777 | python | en | code | 0 | github-code | 90 |
43577004451 | class ThreeStacks:
def __init__(self, stack_size):
self.stack_size = stack_size
self.array = [None] * (stack_size * 3)
self.tops = [-1, -1, -1] # pointers to the tops of the three stacks
def push(self, stack_num, value):
if self.is_full():
raise Exception("Stack is full")
if stack_num < 0 or stack_num > 2:
raise ValueError("Invalid stack number")
self.tops[stack_num] += 1
self.array[self.index_of_top(stack_num)] = value
def pop(self, stack_num):
if self.is_empty(stack_num):
raise Exception("Stack is empty")
value = self.array[self.index_of_top(stack_num)]
self.array[self.index_of_top(stack_num)] = None
self.tops[stack_num] -= 1
return value
def peek(self, stack_num):
if self.is_empty(stack_num):
raise Exception("Stack is empty")
return self.array[self.index_of_top(stack_num)]
def is_empty(self, stack_num):
return self.tops[stack_num] == -1
def is_full(self):
return self.tops[2] == self.stack_size - 1
def index_of_top(self, stack_num):
return stack_num * self.stack_size + self.tops[stack_num]
def print_arr(self):
print(self.array)
stk = ThreeStacks(10)
# stk.print_arr()
stk.push(2, 100)
stk.push(2, 100)
stk.push(2, 100)
stk.push(2, 100)
stk.push(2, 100)
stk.push(2, 100)
stk.push(2, 100)
# stk.push(2, 100)
# stk.push(2, 100)
# stk.push(2, 100)
# stk.push(2, 100)
# stk.push(2, 100)
# stk.print_arr()
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
stk.push(1, 90)
# stk.print_arr()
stk.push(0, 101)
stk.push(0, 101)
stk.push(0, 101)
stk.push(0, 101)
stk.peek(1)
stk.print_arr()
| mostafijur-rahman299/cracking-coding-interview-solutions | Stack & Queue/three-in-one-stack.py | three-in-one-stack.py | py | 1,966 | python | en | code | 0 | github-code | 90 |
75167582056 | import warnings
from copy import deepcopy
from typing import List, Tuple, Union
import qiskit
import qiskit.ignis.mitigation.measurement as mit
import qiskit.ignis.verification.tomography as tomo
from qiskit import QuantumCircuit
from qiskit.providers import BaseBackend
from qiskit.pulse import InstructionScheduleMap, Schedule
from qiskit.pulse.reschedule import align_measures
from qiskit.qobj import PulseQobj
from qiskit.quantum_info.operators import Choi
from qiskit.result import Result, marginal_counts
__reserved_registers = [0, 1]
def create_qpt_experiment(target_circuits: List[QuantumCircuit],
control: int,
target: int,
backend: BaseBackend,
mit_readout: bool = True,
inst_map: InstructionScheduleMap = None,
basis_gate: List[str] = None,
sanity_check: bool = False,
shots: int = 2048,
return_schedule=False)\
-> Tuple[Union[PulseQobj, Schedule], List[List[QuantumCircuit]], List[str]]:
""" Create circuits and schedules for QPT.
Args:
target_circuits: List of target circuits for QPT experiment.
control: index of control qubit.
target: index of target qubit.
backend: Target quantum system.
mit_readout: If use readout mitigation.
inst_map: instruction mapping object.
basis_gate: basis gates.
sanity_check: check memory slot mapping of generated qobj.
shots: Number of shots.
return_schedule: set ``True`` when return schedule object instead of qobj.
Returns:
Qobj, Schedules, Quantum circuits, Measurement labels
Additional Information:
Bit ordering is little endian as a convention of computational science community,
as the rest of qiskit does. When you measure the CR process tomography of q0 and q1,
you will observe XZ (ZX) interaction when q0 (q1) is control qubit.
"""
qubits = sorted([control, target])
back_config = backend.configuration()
back_defaults = backend.defaults()
if inst_map is None:
inst_map = back_defaults.circuit_instruction_map
if basis_gate is None:
basis_gate = back_config.basis_gates
if isinstance(target_circuits, QuantumCircuit):
target_circuits = [target_circuits]
exp_circs = []
# create the measurement circuits for error mitigation, optional
qr = target_circuits[0].qregs[0]
if mit_readout:
meas_circs, meas_labels = mit.complete_meas_cal(qubit_list=qubits, qr=qr, circlabel='mcal')
exp_circs.extend(meas_circs)
else:
meas_labels = []
# create qpt circuit
qpt_qcs_list = []
for target_circuit in target_circuits:
# extract quantum registers from target circuit
qr = target_circuit.qregs[0]
qr0 = qr[qubits[0]]
qr1 = qr[qubits[1]]
qpt_qcs = tomo.process_tomography_circuits(target_circuit, measured_qubits=[qr0, qr1])
qpt_qcs_list.append(qpt_qcs)
exp_circs.extend(qpt_qcs)
# transpile
exp_circs = qiskit.transpile(exp_circs, backend, basis_gates=basis_gate)
# schedule with measure alignment
exp_scheds = align_measures(qiskit.schedule(exp_circs, backend=backend, inst_map=inst_map),
inst_map=inst_map)
if return_schedule:
return exp_scheds, qpt_qcs_list, meas_labels
# assemble pulse qobj
qobj = qiskit.assemble(exp_scheds, backend=backend, meas_level=2, shots=shots)
# sanity check
if sanity_check:
for experiment in qobj.experiments:
for inst in experiment.instructions:
if inst.name == 'acquire':
memory_slot_map = inst.memory_slot
if memory_slot_map[qubits[0]] != __reserved_registers[0] or \
memory_slot_map[qubits[0]] != __reserved_registers[1]:
warnings.warn('Wrong memory slots are assigned. '
'QPT fitter may return invalid result.')
assert len(qobj.experiments) <= back_config.max_experiments
return qobj, qpt_qcs_list, meas_labels
def extract_choi_matrix(result: Result,
qpt_qcs_list: List[List[QuantumCircuit]],
meas_labels: List[str]) -> Choi:
""" Estimate quantum channel from experiment.
Args:
result: Result of tomography experiment.
qpt_qcs_list: Process tomography circuits.
meas_labels: Measurement labels.
Note:
Need to:
pip install cvxopt
Yields:
Quantum channel in Choi matrix representation.
"""
def format_result(data_index, chunk):
"""Create new result object from partial result and marginalize."""
new_result = deepcopy(result)
new_result.results = []
new_result.results.extend(result.results[data_index:data_index + chunk])
return marginal_counts(new_result, __reserved_registers)
# readout error mitigation
if len(meas_labels) > 0:
mit_result = format_result(data_index=0,
chunk=len(meas_labels))
meas_fitter = mit.CompleteMeasFitter(mit_result, meas_labels,
qubit_list=[0, 1],
circlabel='mcal')
print('readout fidelity = %.3f' % meas_fitter.readout_fidelity())
else:
meas_fitter = None
# format qpt result
qpt_results = []
for ind, qpt_qcs in enumerate(qpt_qcs_list):
qpt_result = format_result(data_index=len(meas_labels) + ind * len(qpt_qcs),
chunk=len(qpt_qcs))
if meas_fitter:
qpt_results.append(meas_fitter.filter.apply(qpt_result))
else:
qpt_results.append(qpt_result)
# process tomography
for qpt_result, qpt_circuit in zip(qpt_results, qpt_qcs_list):
process_fitter = tomo.ProcessTomographyFitter(qpt_result, circuits=qpt_circuit)
qpt_choi = process_fitter.fit(method='cvx', solver='CVXOPT')
yield qpt_choi
| BramDo/custom-cx-gate-on-Casablanca | utils/qpt_utils.py | qpt_utils.py | py | 6,278 | python | en | code | 1 | github-code | 90 |
3392942558 | import os
def writeData(fileName='', data='', openMode='a'):
data = data # 'Hello, world'
with open(fileName, openMode) as f:
data = f.write(data)
f.close()
def openFile(fileName=''):
if fileName != '':
with open(fileName, 'r') as f:
data = f.read()
print('\n\n')
print(data)
print('\n\n')
f.close()
if __name__ == "__main__":
testFile = './test.txt'
wd = '\I love Python'
writeData(testFile, wd, 'w')
openFile(testFile)
# writeData('./test.txt', 'Hello world')
# openFile('./test.txt')
| JasonAlkain/Python_Projects | Test7/importTesting.py | importTesting.py | py | 608 | python | en | code | 0 | github-code | 90 |
14117717006 | from gambling.trade_checker import payoff_calculator, payoff_for_multiple_parlays, Bet, BetType
def test_payoff_calculator_five_bet_flex_five_wins():
lines = [29.5, 32.0, 1.5, 0.5, 4.5]
results = [30.0, 31.0, 2.0, 1.0, 4.0]
bets = [Bet.OVER, Bet.UNDER, Bet.OVER, Bet.OVER, Bet.UNDER]
bet_type = BetType.FIVE_BET_FLEX
bet_amount = 400
payout = payoff_calculator(lines, results, bets, bet_type, bet_amount)
assert payout == 4000
def test_payoff_calculator_five_bet_flex_four_wins():
lines = [29.5, 32.0, 1.5, 0.5, 4.5]
results = [30.0, 31.0, 2.0, 1.0, 4.0]
bets = [Bet.OVER, Bet.UNDER, Bet.OVER, Bet.UNDER, Bet.UNDER]
bet_type = BetType.FIVE_BET_FLEX
bet_amount = 400
payout = payoff_calculator(lines, results, bets, bet_type, bet_amount)
assert payout == 800
def test_payoff_calculator_five_bet_flex_three_wins():
lines = [29.5, 32.0, 1.5, 0.5, 4.5]
results = [30.0, 31.0, 2.0, 1.0, 4.0]
bets = [Bet.UNDER, Bet.UNDER, Bet.OVER, Bet.UNDER, Bet.UNDER]
bet_type = BetType.FIVE_BET_FLEX
bet_amount = 400
payout = payoff_calculator(lines, results, bets, bet_type, bet_amount)
assert payout == 160
def test_payoff_for_multiple_parlays():
parlay_1_lines = [390.5, 1.5, 65.5]
parlay_2_lines = [0.5, 1.5, 2.5, 3.5]
parlay_3_lines = [4.5, 67.5, 10.0, 14.5, 15.5]
parlay_4_lines = [3.5, 7.5, 100.5, 215.5, 38.5, 42.5]
parlay_1_results = [395.0, 1.0, 67.0]
parlay_2_results = [1.0, 0.0, 5.0, 3.0]
parlay_3_results = [4.0, 70.0, 7.0, 30.0, 12.0]
parlay_4_results = [1.0, 8.0, 94.0, 107.0, 40.0, 50.0]
parlay_1_bets = [Bet.UNDER, Bet.UNDER, Bet.OVER]
parlay_2_bets = [Bet.OVER, Bet.UNDER, Bet.UNDER, Bet.OVER]
parlay_3_bets = [Bet.UNDER, Bet.OVER, Bet.UNDER, Bet.UNDER, Bet.OVER]
parlay_4_bets = [Bet.UNDER, Bet.OVER, Bet.UNDER, Bet.UNDER, Bet.OVER, Bet.OVER]
all_bet_types = [BetType.THREE_BET_FLEX, BetType.FOUR_BET_POWER, BetType.FIVE_BET_FLEX, BetType.SIX_BET_FLEX]
all_bet_amounts = [300, 200, 100, 400]
all_lines = [parlay_1_lines, parlay_2_lines, parlay_3_lines, parlay_4_lines]
all_results = [parlay_1_results, parlay_2_results, parlay_3_results, parlay_4_results]
all_bets = [parlay_1_bets, parlay_2_bets, parlay_3_bets, parlay_4_bets]
calculated_payout = payoff_for_multiple_parlays(all_lines, all_results, all_bets, all_bet_types, all_bet_amounts)
expected_payout = 300 * 1.25 + 200 * 0 + 100 * 0.4 + 400 * 25.0
assert calculated_payout == expected_payout
| RishiChillara/gambling | tests/test_trade_checker.py | test_trade_checker.py | py | 2,531 | python | en | code | 0 | github-code | 90 |
19193044767 | class Solution:
def monotoneIncreasingDigits(self, N: int) -> int:
digit = list(str(N))
check = len(digit)
for i in range(len(digit) - 1, 0, -1):
if digit[i] < digit[i - 1]:
k = (int(digit[i - 1]) - 1)
digit[i - 1] = str(k)
check = i
for i in range(check, len(digit)):
digit[i] = '9'
if digit[0] == '0':
return int(''.join(digit[1:]))
return int(''.join(digit)) | 23ksw10/LeetCode_Python | 738. Monotone Increasing Digits/solution.py | solution.py | py | 498 | python | en | code | 0 | github-code | 90 |
3325929128 | import torch
import numpy as np
from torch.utils.data import Dataset
import pickle
class HandwrittenWords(Dataset):
"""Ensemble de donnees de mots ecrits a la main."""
def __init__(self, filename):
# Lecture du text
self.pad_symbol = pad_symbol = '<pad>'
self.start_symbol = start_symbol = '<sos>'
self.stop_symbol = stop_symbol = '<eos>'
self.data = dict()
with open(filename, 'rb') as fp:
self.data = pickle.load(fp)
# Extraction des symboles
self.symb2int = {start_symbol: 0, stop_symbol: 1, pad_symbol: 2, 'a':3, 'b':4, 'c':5, 'd':6, 'e':7, 'f':8, 'g':9, 'h':10, 'i':11, 'j':12, 'k':13, 'l':14, 'm':15, 'n':16, 'o':17, 'p':18, 'q':19, 'r':20, 's':21, 't':22, 'u':23, 'v':24, 'w':25, 'x':26, 'y':27, 'z':28}
self.int2symb = {v: k for k, v in self.symb2int.items()}
self.max_len = dict()
self.max_len['coord'] = 457
self.max_len['word'] = 6
# Ajout du padding aux séquences
for word in self.data:
word[1] = torch.diff(torch.tensor(word[1]), dim=1).cpu().detach().numpy()
if word[1].shape[1] < self.max_len['coord']:
for i in range(self.max_len['coord'] - word[1].shape[1]):
word[1] = np.append(word[1], [[0], [0]], axis=1)
if len(word[0]) < self.max_len['word']:
word[0] = list(word[0])
for i in range(self.max_len['word'] - len(word[0])):
if i == 0:
word[0].append(stop_symbol)
else:
word[0].append(pad_symbol)
self.dict_size = len(self.int2symb)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
word = self.data[idx][0]
coord = self.data[idx][1]
data_seq = torch.tensor(coord)
data_seq = torch.transpose(data_seq, 0, 1)
target_seq = [self.symb2int[j] for j in word]
return data_seq, torch.tensor(target_seq)
def visualisation(self, idx):
# Visualisation des échantillons - JUSTE DANS LE MAIN DE DATASET.PY
pass
if __name__ == "__main__":
# Code de test pour aider à compléter le dataset
a = HandwrittenWords('data_trainval.p')
for i in range(10):
a.visualisation(np.random.randint(0, len(a)))
| IliassBour/S7-APP3 | dataset.py | dataset.py | py | 2,396 | python | en | code | 0 | github-code | 90 |
18403346917 | import os
import shutil
print("************************************************")
print("**** SCRPIT DE BACKUP MANUAL COM PYTHON ******")
print("* [Autor]: Lucas Martins *")
print("* [Versão do Script]: 0.2 *")
print("* [Status]: Em desenvolvimento *")
print("* [Versão Python]: 3.9.4 *")
print("************************************************")
input("Press Enter to continue...")
nome = input("Digite o nome do funcionário: ")
pasta_backup = 'C:/Backup' + nome
#Função para Criar a pasta do Backup e mostrar a onde ela está.
def inicio ():
os.mkdir(pasta_backup)
return print("PASTA CRIADA: " + pasta_backup)
#Função de copiar os diretórios para pasta do Backup.
def criacaoPastas():
desktop = input("Digite o caminho da Área de Trabalho:")
copiaDesktop = pasta_backup + '/Área de Trabalho'
shutil.copytree(src=desktop, dst=copiaDesktop)
downs = input("Digite o caminho dos Downloads: ")
copiaDowns = pasta_backup + '/Downloads'
shutil.copytree(src=downs, dst=copiaDowns)
docs = input("Digite o caminho dos Documentos: ")
copiaDocs = pasta_backup + '/Documentos'
shutil.copytree(src=docs, dst=copiaDocs)
imgs = input("Digite o caminho das Imagens: ")
copiaImgs = pasta_backup + '/Imagens'
shutil.copytree(src=imgs, dst=copiaImgs)
return print("FINALIZADO!")
#Execução das funções
inicio()
criacaoPastas()
| lucasmcampos/PythonBackup | ScriptBackup.py | ScriptBackup.py | py | 1,516 | python | pt | code | 0 | github-code | 90 |
42801880950 | from Layer import Layer
import numpy as np
class Convolutional(Layer):
'''
input_shape: 3d_tensor [channels, width, height]
filter_size: 2d_tensor [width_len, height_len]
'''
def __init__(self, input_shape, n_filters, filter_size, stride=1):
super(Convolutional, self).__init__()
assert len(filter_size) == 2
self.filter_size = filter_size
self.input_shape = input_shape
self.n_filters = n_filters
self.stride = stride
# self.W = np.ones(shape=(n_filters, filter_size[0], filter_size[1]))
self.W = np.random.uniform(low=-0.1, high=0.1, size=(n_filters, filter_size[0], filter_size[1]))
self.b = np.zeros(n_filters)
def forward(self, data):
self.data = data
assert data.shape == self.input_shape
feature_maps = []
for n in range(self.n_filters):
feature_map = []
for i in range(self.input_shape[1] - self.filter_size[0] + 1):
col = []
for j in range(self.input_shape[2] - self.filter_size[1] + 1):
point = np.sum(self.W[n] * data[:, i: i+self.filter_size[0], j: j+self.filter_size[1]]) + self.b[n]
col.append(point)
feature_map.append(col)
feature_maps.append(feature_map)
self.out = np.array(feature_maps)
return self.out
def bp(self, delta):
next_delta = np.dot(delta, self.params[0].T)
g = []
for d in delta:
g.append((d * self.data).reshape((self.input_dim, 1)))
self.grad_W = np.concatenate(g, axis=1)
self.grad_b = delta * 1
self.grads = [self.grad_W, self.grad_b]
return next_delta
def updata_params(self, optimizer):
assert len(self.params) == len(self.grads)
for i in range(len(self.params)):
self.params[i] = optimizer.get_updates(self.params[i], self.grads[i])
| kongjiellx/Frech | Layers/Convolutional.py | Convolutional.py | py | 1,949 | python | en | code | 0 | github-code | 90 |
27532297779 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
import tensorflow.keras
import pickle
import os
import tensorflow as tf
import h5py
from wordcloud import WordCloud, STOPWORDS
def run_eda_app() :
st.subheader('EDA 화면입니다.')
st.write('RNN 딥러닝 학습에 사용한 데이터를 확인 하실 수 있습니다.')
df = pd.read_csv('data/emails.csv')
st.write('학습에 사용한 데이터의 갯수입니다.')
plt.pie(df['spam'].value_counts(), autopct='%.1f')
plt.show()
st.pyplot()
data = ['SPAM', 'NOT_SPAM','ALL']
if st.checkbox('데이터를 보려면 클릭하세요.') :
select = st.radio('보려는 데이터를 선택하세요.', data)
if select == 'SPAM':
df = df[lambda x: x['spam'] == 1]
st.dataframe(df)
elif select == 'NOT_SPAM':
df = df[lambda x: x['spam'] == 0]
st.dataframe(df)
elif select == 'ALL' :
st.dataframe(df)
stop_words = STOPWORDS
wc = WordCloud(background_color='white', stopwords=stop_words)
df = df[lambda x: x['spam'] == 1]
stop_words.add('will')
stop_words.add('subject')
text_list = df['text'].str.lower().tolist()
words_as_one_string = ' '.join(text_list)
my_words = wc.generate(words_as_one_string)
st.set_option ( 'deprecation.showPyplotGlobalUse', False)
if st.checkbox('스팸메일에는 어떤 단어가 많이 사용됐는지 보려면 체크하세요'):
plt.imshow(my_words)
plt.axis("off")
st.pyplot()
| jaechang3456/Stramlit_MySQL_Spam_Classifier | eda_app.py | eda_app.py | py | 1,694 | python | ko | code | 0 | github-code | 90 |
72598761896 | # - * - coding:utf-8 - * -
# - * - coding:utf-8 - * -
from django.urls import path,re_path
from . import views
urlpatterns = [
re_path(r'^$', views.show,name='index_show'),
#评论点赞
re_path(r'^comment_affirm/$',views.comment_affirm,name='comment_affirm'),
# 博客点赞
re_path(r'^affirm_blog/$',views.affirm_blog,name='affirm_blog'),
#添加收藏
re_path(r'addcollection',views.addcollection,name='addcollection'),
#收藏博客
re_path(r'collectblog',views.collectblog,name='collectblog'),
] | ziyiLike/csdn_blog | blog_home/apps/blog/urls.py | urls.py | py | 537 | python | en | code | 1 | github-code | 90 |
22156416088 |
import pandas as pd
import numpy as np
"""Sample Test Data Creation
"""
x = pd.DataFrame(np.random.random((10,6)),
columns=['a','b',
'gradeDetail',
'gradeRecordId',
'lossGivenDefault',
'factorDetails',
])
a = ['USA', 'GBP'] * 5
b = ['Under 50', 'Under 15', 'Over 60', 'Under 25' , 'Under 35'] * 2
x['a'] = a
x['b'] = b
def data_generator(data_frame, column_list_to_change, run_loops=10):
"""Function generates dummy data set using a given dataframe and
running it run_loop times and adding a very negligible value to
columns in column_list_to_change
:param: data_frame : data frame that has to be used for data creation
column_list_to_change: list of columns that has to be modified
run_loops: number of times the columns has to be changed
"""
new_data_frame_list = []
while run_loops:
df = data_frame
for column in column_list_to_change:
df[column] = df[column] + np.random.randn()/10000
new_data_frame_list.append(df)
run_loops -= 1
return_data_frame = pd.concat(new_data_frame_list, axis=0)
return_data_frame = return_data_frame.reset_index()
return return_data_frame
if __name__ == "__main__":
data_generator(data_frame=x,
column_list_to_change=['gradeDetail',
'gradeRecordId',
'lossGivenDefault',
'factorDetails']
) | velamathi/Utilities | data_generator.py | data_generator.py | py | 1,629 | python | en | code | 0 | github-code | 90 |
35219879309 | n = int(input())
data = []
for _ in range(n):
line = list(map(int, input().split()))
if len(line) != n:
line = line + [0] * (n -len(line))
data.append(line)
for i in range(1, n):
for j in range(i+1):
data[i][j] += max(data[i-1][j], data[i-1][j-1])
print(max(data[-1])) | yongwoo97/algorithm | silver/1932_정수삼각형.py | 1932_정수삼각형.py | py | 302 | python | en | code | 0 | github-code | 90 |
18332683389 | import sys
read = sys.stdin.buffer.read
input = sys.stdin.buffer.readline
inputs= sys.stdin.buffer.readlines
#rstrip().decode('utf-8')
import numpy as np
#import operator
#import bisect
#from heapq import heapify,heappop,heappush
#from math import gcd
#from fractions import gcd
#from collections import deque
#from collections import defaultdict
#from collections import Counter
#from itertools import accumulate
#from itertools import groupby
#from itertools import permutations
#from itertools import combinations
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import floyd_warshall
#from scipy.sparse.csgraph import csgraph_from_dense
#from scipy.sparse.csgraph import dijkstra
#sys.setrecursionlimit(10**7)
#map(int,input().split())
def main():
N,M,L=map(int,input().split())
ABCQST=np.array(read().split(),np.int64)
ABC=ABCQST[:3*M]
A=ABC[::3]
B=ABC[1::3]
C=ABC[2::3]
ST=ABCQST[3*M+1:]
S=ST[::2]
T=ST[1::2]
graph1=csr_matrix((C,(A,B)),(N+1,N+1))
graph1_dist=floyd_warshall(graph1,directed=False)
graph2=np.full(((N+1),(N+1)),np.inf)
np.fill_diagonal(graph2,0)
graph2[graph1_dist<=L+0.5]=1
graph2_dist=floyd_warshall(graph2,directed=False)
graph2_dist[graph2_dist==np.inf]=0
graph2_dist=(graph2_dist+0.5).astype(int)
ans=graph2_dist[S,T]-1
print("\n".join(ans.astype(str)))
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p02889/s676188469.py | s676188469.py | py | 1,415 | python | en | code | 0 | github-code | 90 |
10486475246 | # range(stop)
# range(start, stop)
# range(start, stop, step)
# we cannot use range() in this scenario
s = [0, 1, 4, 6, 13]
for i in range(len(s)):
print(s[i])
# we can use the above code as
v = [0, 1, 4, 6, 13]
for i in v:
print(i)
# Enumerate:
# It constructes an iterable of (index, value) tuples around another iterable object
t = [6, 372, 8862, 148800, 2096886]
for p in enumerate(t):
print(p)
# output:
# (0, 6) (index, value)
# (1, 372)
# (2, 8862)
# (3, 148800)
# (4, 2096886)
for i, v in enumerate(t):
print(f"i = {i}, v = {v}")
# output:
# i = 0, v = 6
# i = 1, v = 372
# i = 2, v = 8862
# i = 3, v = 148800
# i = 4, v = 2096886 | iswetha522/Plural_Sight | corepy/range_and_enumerate.py | range_and_enumerate.py | py | 668 | python | en | code | 0 | github-code | 90 |
18588349059 | s = input().split('T')
dx = list(map(len, s[0::2]))
dy = list(map(len, s[1::2]))
x, y = map(int, input().split())
def check(start, ds, goal):
cands = {start}
for d in ds:
new_cands = set()
for c in cands:
new_cands.add(c - d)
new_cands.add(c + d)
cands = new_cands
return goal in cands
x_ok = check(dx[0], dx[1:], x)
y_ok = check(0, dy, y)
print('Yes' if x_ok and y_ok else 'No')
| Aasthaengg/IBMdataset | Python_codes/p03488/s468301167.py | s468301167.py | py | 443 | python | en | code | 0 | github-code | 90 |
35752496461 | #!/usr/bin/env python
import sys
input= sys.stdin.readline
import heapq
INF = int(1e9)
n,m,k,x = map(int,input().split())
graph = [[] for i in range(n+1)]
dist = [INF]*(n+1)
for _ in range(m):
a,b=map(int,input().split())
graph[a].append((b,1))
def dij(x):
q=[]
heapq.heappush(q,(0,x))
dist[x]=0
while q:
distance, now = heapq.heappop(q)
if dist[now]<distance:
continue
for i in graph[now]:
cost = distance+i[1]
if cost<dist[i[0]]:
dist[i[0]] = cost
heapq.heappush(q,(cost,i[0]))
dij(x)
if dist.count(k)==0:
print(-1)
else:
for i in range(1,n+1):
if dist[i]==k:
print(i)
| hansojin/python | graph/bj18352.py | bj18352.py | py | 722 | python | en | code | 0 | github-code | 90 |
72211290858 | def solution(places):
def dfs(i, j, depth, trace):
if depth == 1 and trace == 'P':
result.append(0)
return
if depth == 2:
if trace == 'OP':
result.append(0)
return
for di, dj in dij:
ni = i + di
nj = j + dj
if 0 <= ni < 5 and 0 <= nj < 5 and not visited[ni][nj] and place[ni][nj] != 'X':
dfs(ni, nj, depth+1, trace+place[ni][nj])
answer = []
dij = ((-1, 0), (0, 1), (1, 0), (0, -1))
for place in places:
result = []
visited = [[0 for _ in range(5)] for _ in range(5)]
for i in range(5):
for j in range(5):
if len(result) == 0 and place[i][j] == 'P':
visited[i][j] = 1
dfs(i, j, 0, '')
if len(result) == 0:
answer.append(1)
else:
answer.append(0)
return answer
print(solution([["POOOP", "OXXOX", "OPXPX", "OOXOX", "POXXP"], ["POOPX", "OXPXP", "PXXXO", "OXXXO", "OOOPP"], ["PXOPX", "OXOXP", "OXPOX", "OXXOP", "PXPOX"], ["OOOXX", "XOOOX", "OOOXX", "OXOOX", "OOOOO"], ["PXPXP", "XPXPX", "PXPXP", "XPXPX", "PXPXP"]])) | khyunchoi/Algo | Programmers/python/2021 카카오 인턴/test2.py | test2.py | py | 1,214 | python | en | code | 0 | github-code | 90 |
41305068184 | import re
import re
# Given path
file_path = r"C:\Users\soulo\PaperMate\PaperMate_ui\GUI\source_documents\2307.06435v1.pdf"
# Extract the ID using regular expression
match = re.search(r'\\(\d+\.\d+v\d+)\.pdf$', file_path)
if match:
id_with_version = match.group(1)
print("Extracted ID with version:", id_with_version)
else:
print("No match found.")
| Zaheer-10/PaperMate-RecSys | PaperMate_ui/GUI/check.py | check.py | py | 365 | python | en | code | 0 | github-code | 90 |
19512175676 | '''
Usage:
python main.py <url> <depth> <modulename>
Example:
python main.py http://www.pyregex.com/ 3 pyregex
'''
import lib.scraper as webscraper
import lib.plotter as plotter
import os
import sys
import pickle
if __name__ == '__main__':
try:
_, url, depth, module_name = sys.argv
plot_file = module_name+".png"
module_name += ".py"
print('Starting webscrape.')
result = webscraper.scrape_starter(url, int(depth))
print('Numbers of links pages scraped: {}'.format(len(result)))
print('Webscraping finished succesfully.')
# print('\nSaving result dictionary in a pickle module as: {}'.format(module_name))
# try:
# module_dir = 'pickle_modules'
# if not os.path.isdir(module_dir):
# os.makedirs(module_dir)
# with open(os.path.join(module_dir, module_name), "wb" ) as fp:
# pickle.dump( result, fp)
# print('Pickle module was saved succesfully.')
# except Exception as e:
# print('Pickle module save failure!')
print('\nSaving result dictionary in a python module as: {}'.format(module_name))
try:
module_txt = 'SCRAPED_LINKS = {}'.format(str(result))
module_dir = 'modules'
if not os.path.isdir(module_dir):
os.makedirs(module_dir)
with open(os.path.join(module_dir, module_name), "w" ) as fp:
fp.write(module_txt)
except Exception as e:
print('Python module save failure!')
print('\nCreating and viewing plot.')
plotter.make_graph(result, plot_file)
except Exception as e:
print(__doc__)
sys.exit(1)
| GertMadsen/WebScraper | main.py | main.py | py | 1,803 | python | en | code | 0 | github-code | 90 |
29046649416 | import unittest
import json
import csv
import sqlite3
from si507_finalproject import *
class TestDatabase(unittest.TestCase):
def test_restaurants_table(self):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
sql_1 = '''
SELECT Name
FROM Restaurants
WHERE PriceRange = '$'
ORDER BY Name DESC
'''
result1 = cur.execute(sql_1)
result1_list = result1.fetchall()
self.assertEqual(len(result1_list), 21)
self.assertTrue(("The Lunch Room",) in result1_list)
sql_2 = '''
SELECT DISTINCT Street
FROM Restaurants
'''
result2 = cur.execute(sql_2)
result2_list = result2.fetchall()
self.assertEqual(len(result2_list),99)
conn.close()
def test_ratings_table(self):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
sql_1 = '''
SELECT Restaurant
FROM Ratings
'''
result1 = cur.execute(sql_1)
result1_list = result1.fetchall()
self.assertEqual(len(result1_list), 30)
sql_2 = '''
SELECT TripA_Rating, Yelp_ReviewCount
FROM Ratings
WHERE TripA_Rating == Yelp_Rating
'''
result2 = cur.execute(sql_2)
result2_list = result2.fetchall()
self.assertEqual(len(result2_list), 6)
for item in result2_list:
self.assertNotEqual(type(item[0]), str)
self.assertEqual(type(item[1]), int)
conn.close()
def test_joins(self):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
sql = '''
SELECT Name
FROM Restaurants
JOIN Ratings
ON Restaurants.Id = Ratings.RestaurantId
WHERE PriceRange = '$'
AND Yelp_Rating > 4
'''
results = cur.execute(sql)
result_list = results.fetchall()
self.assertEqual(len(result_list), 2)
self.assertIn(('NeoPapalis',), result_list)
conn.close()
class TestGetData(unittest.TestCase):
def setUp(self):
self.tripa = open("cache_tripa.json", "r", encoding='utf-8')
self.yelp = open("cache_yelp.json", "r", encoding='utf-8')
self.top30 = open("top30.csv", "r")
def test_tripa_json_exist(self):
self.assertTrue(self.tripa.read())
def test_yelp_json_exist(self):
self.assertTrue(self.yelp.read())
def test_top30_csv_exist(self):
self.assertTrue(self.top30.read())
def tearDown(self):
self.tripa.close()
self.yelp.close()
self.top30.close()
class TestClass(unittest.TestCase):
def test_restaurant_class(self):
sample = Restaurant("The Lunch Room", 4.5, "$", "https://www.tripadvisor.com/Restaurant_Review-g29556-d4982890-Reviews-The_Lunch_Room-Ann_Arbor_Michigan.html")
self.assertEqual(sample.street, "123 Main St.")
self.assertEqual(sample.rating2, "0")
self.assertEqual(sample.__str__(), "The Lunch Room: 123 Main St., Ann Arbor, MI(11111)")
class TestWebCrawling(unittest.TestCase):
def test_get_rest_info(self):
restaurant_ls = get_rest_info("")
self.assertEqual(len(restaurant_ls), 30)
self.assertEqual(restaurant_ls[0].name, "The Lunch Room")
self.assertEqual(restaurant_ls[1].rating1, "4.5")
self.assertEqual(restaurant_ls[2].zip, "48104")
self.assertEqual(restaurant_ls[2].street, "117 W Washington St")
class TestMapping(unittest.TestCase):
def test_plot_rests_map(self):
try:
plot_rests_map()
except:
self.fail()
if __name__ == "__main__":
unittest.main(verbosity=2)
| pansyjia/Programming-Final-Project | si507_finalproject_test.py | si507_finalproject_test.py | py | 3,381 | python | en | code | 0 | github-code | 90 |
4668978872 | #Course: CSCI 4140
#Name: Huan-Yun Chen
#Date: 1/29/2018
#Tabs: 8
#The following program is program in Python 3 and Python IDLE
#==========================================================
import nltk
from nltk.corpus import brown
#import matplotlib
#matplotlib.use('TKagg')
from nltk import FreqDist, ConditionalFreqDist
#==========================================================
result =[]
fd = nltk.ConditionalFreqDist(
(genre,word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genres = ["news","reviews","religion","government","romance"]
newsList = ["crime","society","victim","safety"] #set of words for news
reviewsList =["good","wrong","right","correct"] #set of words for review
religionList =["god","believe","religion","heaven"] #set of words for religion
governmentList =["security","society","politics","nuclear"] #set of words for government
romanceList =["love","live","forever","life"] #set of words for romance
commonList=["could","would","can","do","does","should"] #set of words that is more often use in life
commonList2=["he","she","it","they","we","you","I"] #pronoun
print("News:")
fd.tabulate(conditions=genres, samples=newsList)
print("===================================================================")
print("Reviews:")
fd.tabulate(conditions=genres, samples=reviewsList)
print("===================================================================")
print("Religion:")
fd.tabulate(conditions=genres, samples=religionList)
print("===================================================================")
print("Govenment:")
fd.tabulate(conditions=genres, samples=governmentList)
print("===================================================================")
print("Romance:")
fd.tabulate(conditions=genres, samples=romanceList)
print("===================================================================")
print("Common:")
fd.tabulate(conditions=genres, samples=commonList)
print("===================================================================")
print("Pronoun:")
fd.tabulate(conditions=genres, samples=commonList2)
| oliver0616/my_work2 | Natural Language Processing/NLTK/Assignment/Assignment1/c2e19.py | c2e19.py | py | 2,093 | python | en | code | 1 | github-code | 90 |
7341978148 | # -*- coding: utf8 -*-
from telebot import TeleBot
from telebot.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton
import random
import time
import requests
from pathlib import Path
from keyboards import *
from date_validator import *
from word_validator import *
from answers import *
from adress_validator import *
from sqlite_req import main
from popularity_sights import most_popular_sights
''' will
1. Создание подборок по популярности (исходя из лайков в REACTIONS) -> (?) Отдельная reply-кнопка в /FIND
2. Сделать функцию случайная запись в /FIND -> Отдельная reply-кнопка в /FIND ✅
3. Сделать в канале динамически-обновляющиеся лайк и дизлайк под постами
'''
# main("CREATE TABLE Sights (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `area` TEXT, `year` INTEGER, `season` TEXT, `month` TEXT, `description` TEXT, `feedback` TEXT, `mark` INTEGER, `tags` TEXT, `date` TEXT, `userid` INTEGER, `src` TEXT);")
# main("CREATE TABLE Moders (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `moder_id` INTEGER, UNIQUE(moder_id));")
# main("CREATE TABLE Reactions (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `userid` INTEGER, `messageid` INTEGER, `flag` INTEGER);")
# main("DROP TABLE Reactions;")
# main("DELETE FROM Reactions;")
# main(f"INSERT INTO Moders (`moder_id`) VALUES ('806902493')")
TOKEN_BOT = '5729656929:AAHSxglQG-DeuNCWOlENB91H8usjvoAzkps'
bot = TeleBot(TOKEN_BOT)
MODER_IDS = main(f"SELECT moder_id FROM Moders;")
def menu(message):
bot.send_message(message.chat.id, '⬇️<b>Выберите, что вы хотите сделать:\n</b>/new - Создать новую запись о месте\n\n/find - Подобрать подходящие вам места по фильтрам\n\n/feedback - Отзывы/предложения/Нашли баг\n\n/help - Помощь', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_menu_mess)
def get_menu_mess(message):
if message.content_type == 'text':
if message.text == '/start':
start(message)
elif message.text == '/new':
get_area(message)
elif message.text == '/find':
get_find_criterion(message)
elif message.text == '/feedback':
start_support(message)
elif message.text == '/help':
help(message)
elif len(set(message.text.lower().split()) & set(["как", "дела"])) == 2:
bot.send_message(message.chat.id, f'<b>{howareyou[random.randint(0, 3)]}</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
elif len(set(message.text.lower().split()) & set(["что", "делаешь"])) == 2:
bot.send_message(message.chat.id, f'<b>{whatareudo[random.randint(0, 3)]}</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
elif len(set(message.text.lower().split()) & set(["как", "тебя", "зовут"])) == 3:
bot.send_message(message.chat.id, f'<b>{botname[random.randint(0, 2)]}</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
elif len(set(message.text.lower().split()) & set(["кто", "ты"])) == 2:
bot.send_message(message.chat.id, f'<b>{whoareu[random.randint(0, 1)]}</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
@bot.message_handler(commands=['start'])
def start(message):
bot.send_message(message.chat.id, '<b>Здравствуй, хочешь предложить или найти свою достопримечательность/место? Вперёд!</b>\n\nОчень актуально сейчас в связи с геополитической обстановкой в мире давать больше информации населению о привлекательности наших курортов, городов.\n\nНаша страна имеет невероятное количество возможностей для развития внутренней туристической отрасли.\n\nШирокая география, богатство культурных и природных ценностей дает нам возможность внутри страны получать новые впечатления, знания и эмоции. Множество уголков остаются неизведанными для некоторой аудитории нашей огромной Родины.\n\nЕсли непонятно, что нужно делать /help\n\nНаш канал с различными достопримечательностями и прочим: https://t.me/LocalSight73', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
@bot.message_handler(commands=['new'])
def get_area(message):
bot.send_message(message.chat.id, '1️⃣ <b>Напишите адрес места (с указанием города) или отправьте точку на карте:</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_year)
def get_year(message):
if message.content_type == 'text':
if check_words(message.text.lower()):
if get_coords(message.text):
bot.send_message(message.chat.id, '2️⃣ <b>В каком году Вы посещали достопримечательность/место?</b>', reply_markup=kb_year, parse_mode="html")
bot.register_next_step_handler(message, get_season, get_coords(message.text))
else:
bot.send_message(message.chat.id, '🙁<b>Извините, мне не удалось определить координаты места. Проверьте правильность написания адреса (не забудьте указывать город) или попробуйте отправить точку на карте.</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
elif message.text.lower() == "🔙 назад":
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Текст может содержать только буквы русского алфавита, цифры и базовые знаки препинания</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
elif message.location is not None:
coord = str(message.location.longitude) + ',' + str(message.location.latitude)
if get_adress(coord):
bot.send_message(message.chat.id, '2️⃣ <b>В каком году Вы посещали достопримечательность/место?</b>', reply_markup=kb_year, parse_mode="html")
bot.register_next_step_handler(message, get_season, coord)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, мне не удалось определить адрес места. Проверьте правильность написания координат ыили попробуйте отправить адрес места (не забудьте указывать город).</b>', reply_markup=kb_year, parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄📍<b>Следует отправлять текст или геолокацию. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_season(message, ms_area):
if message.content_type == 'text':
if message.text in ("2023", "2022", "2021", "2020", "2019", "2018", "2017", "2016", "2015"):
bot.send_message(message.chat.id, '3️⃣ <b>В каком сезоне Вы посещали её/его?</b>', reply_markup=kb_season, parse_mode="html")
bot.register_next_step_handler(message, get_month, ms_area, message.text)
elif message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '1️⃣ <b>В каком районе находится достопримечательность/место?</b>', reply_markup=kb_area, parse_mode="html")
bot.register_next_step_handler(message, get_year)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_month(message, ms_area, ms_year):
if message.content_type == 'text':
if message.text.lower() in ("зима", "весна", "лето", "осень"):
bot.send_message(message.chat.id, '4️⃣ <b>В каком месяце Вы посещали достопримечательность/место?</b>', reply_markup=set_month(message.text.lower()), parse_mode="html")
bot.register_next_step_handler(message, get_description, ms_area, ms_year, message.text)
elif message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '2️⃣ <b>В каком году Вы посещали достопримечательность/место?</b>', reply_markup=kb_year, parse_mode="html")
bot.register_next_step_handler(message, get_season, message.text)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def set_month(season):
kb_mnh = ReplyKeyboardMarkup(resize_keyboard=True, selective=True)
if season == "зима":
for btn_winter in btns_winter:
kb_mnh.add(btn_winter)
elif season == "весна":
for btn_spring in btns_spring:
kb_mnh.add(btn_spring)
elif season == "лето":
for btn_summer in btns_summer:
kb_mnh.add(btn_summer)
elif season == "осень":
for btn_autumn in btns_autumn:
kb_mnh.add(btn_autumn)
return kb_mnh
def get_description(message, ms_area, ms_year, ms_sesson):
if message.content_type == 'text':
if message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '3️⃣ <b>В каком сезоне Вы посещали её/его?</b>', reply_markup=kb_season, parse_mode="html")
bot.register_next_step_handler(message, get_month, ms_area, message.text)
elif message.text.lower() in ("декабрь", "январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь"):
bot.send_message(message.chat.id, '5️⃣ <b>Опишите данную достопримечательность/место: </b>\n<em>Не менее 100 символов</em>\n<em>Текст может содержать только буквы русского алфавита, цифры и базовые знаки препинания</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_feedback, ms_area, ms_year, ms_sesson, message.text)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_feedback(message, ms_area, ms_year, ms_sesson, ms_month):
if message.content_type == 'text':
if 100 <= len(message.text) <= 1000:
if check_words(message.text.lower()):
bot.send_message(message.chat.id, '6️⃣ <b>Напишите отзыв о ней/нём: </b>\n<em>Не менее 50 символов\nТекст может содержать только буквы русского алфавита, цифры и базовые знаки препинания</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_mark, ms_area, ms_year, ms_sesson, ms_month, message.text)
elif check_words(message.text.lower()) == 'forbidden_words':
bot.send_message(message.chat.id, '🙁<b>Текст содержит недопустимые слова. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Текст может содержать только буквы русского алфавита, цифры и базовые знаки препинания. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, количество символов должно быть не менее 100 и не более 1000. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_mark(message, ms_area, ms_year, ms_sesson, ms_month, ms_description):
if message.content_type == 'text':
if message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '5️⃣ <b>Опишите данную достопримечательность/место: </b>\n<em>Не менее 100 символов\nТекст может содержать только буквы русского алфавита, цифры и базовые знаки препинания</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_feedback, ms_area, ms_year, ms_sesson, message.text)
elif 50 <= len(message.text) <= 1000:
if check_words(message.text.lower()):
bot.send_message(message.chat.id, '7️⃣ <b>Ваша субъективная оценка </b>\n<em>От 1 до 10:</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_tags, ms_area, ms_year, ms_sesson, ms_month, ms_description, message.text)
elif check_words(message.text.lower()) == 'forbidden_words':
bot.send_message(message.chat.id, '🙁<b>Текст содержит недопустимые слова. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Текст может содержать только буквы русского алфавита, цифры и базовые знаки препинания. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, количество символов должно быть не менее 50 и не более 1000. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_tags(message, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback):
if message.content_type == 'text':
if message.text in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"):
bot.send_message(message.chat.id, '8️⃣ <b>Напишите один тег (Через #), относящиеся к данному месту</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, choise_photo, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, message.text)
elif message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '6️⃣ <b>Напишите отзыв о ней/нём: </b>\n<em>Не менее 50 символов</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_mark, ms_area, ms_year, ms_sesson, ms_month, message.text)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, число должно быть от 1 до 10. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def choise_photo(message, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark):
if message.content_type == 'text':
if '#' in message.text and len(message.text.replace('#', '')) >= 2 and check_words(message.text.replace('#', '').lower()) and message.text.lower().count('#') == 1:
bot.send_message(message.chat.id, '9️⃣ <b>У вас есть фотография этой достопримечательности/места?</b>', reply_markup=kb_y_n, parse_mode="html")
bot.register_next_step_handler(message, choise_moder, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, message.text)
elif message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '7️⃣ <b>Ваша субъективная оценка </b>\n<em>От 1 до 10:</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_tags, ms_area, ms_year, ms_sesson, ms_month, ms_description, message.text)
else:
bot.send_message(message.chat.id, '🙁<b>Следует писать единственный тег через "#" и длинною не меньше 2 знаков, используя русские символы</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def choise_moder(message, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags):
if message.content_type == 'text':
if message.text.lower() in ("✅да", "да"):
bot.send_message(message.chat.id, '📸<b>Отправьте фотографию: </b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, get_photo, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags)
elif message.text.lower() in ("❌нет", "нет"):
bot.send_message(message.chat.id, '📥<b>Данные получены, отправить на модерацию?</b>', reply_markup=kb_y_n, parse_mode="html")
bot.register_next_step_handler(message, insert_and_send, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags)
elif message.text.lower() == "🔙 назад":
bot.send_message(message.chat.id, '8️⃣ <b>Напишите один тег (Через #), относящиеся к данному месту</b>\n<em></em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, choise_photo, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, message.text)
else:
bot.send_message(message.chat.id, '🫢<b>Выберите одно из предложенных ответов. Попробуйте снова</b>', reply_markup=kb_y_n, parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_photo(message, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags):
try:
Path(f'files/{message.chat.id}/').mkdir(parents = True, exist_ok = True) # Создание папки
if message.content_type == 'photo':
file_info = bot.get_file(message.photo[len(message.photo) - 1].file_id)
downloaded_file = bot.download_file(file_info.file_path)
src = f'files/{message.chat.id}/{datetime.datetime.now().strftime("%Y%m%d_%H%M")}{file_info.file_path.replace("photos/", "")}'
bot.send_message(message.chat.id, '📥<b>Данные получены, отправить на модерацию?</b>', reply_markup=kb_y_n, parse_mode="html")
with open(src, 'wb') as new_file: # Сохраняем файл
new_file.write(downloaded_file)
bot.register_next_step_handler(message, insert_and_send, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags, src)
else:
bot.send_message(message.chat.id, '🫢<b>Нужно отправлять изображение, а не прочие типы данных. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
except Exception as e:
bot.send_message(message.chat.id, '🫢<b>Произошла ошибка при сохранении фотографии. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
print(f'!!!{e}')
bot.message_handler(content_types=["text"])
def insert_and_send(message, ms_area, ms_year, ms_sesson, ms_month, ms_description, ms_feedback, ms_mark, ms_tags, src = False):
if message.content_type == 'text':
if message.text.lower() in ("✅да", "да"):
bot.send_message(message.chat.id, '🆗 <b>Спасибо за то, что внесли данные. Мы уведовим вас, если данные пройдут модерацию</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
date = datatime.now().strftime("%d.%m.%Y")
for moder_id in MODER_IDS:
if src:
main(f"INSERT INTO Sights (`area`, `year`, `season`, `month`, `description`, `feedback`, `mark`, `tags`, `date`, `userid`, `src`) VALUES ('{ms_area}', '{ms_year}', '{ms_sesson}', '{ms_month}', '{ms_description}', '{ms_feedback}', '{ms_mark}', '{ms_tags}', '{date}', '{message.from_user.id}', '{src}')")
dates_id = main(f"SELECT id FROM Sights WHERE area='{ms_area}' AND year='{ms_year}' AND season='{ms_sesson}' AND month='{ms_month}' AND description='{ms_description}' AND feedback='{ms_feedback}' AND mark='{ms_mark}' AND tags='{ms_tags}' AND userid='{message.from_user.id}' AND `src`='{src}';")
if dates_id:
bot.send_photo(moder_id[0], open(src,'rb'))
bot.send_location(moder_id[0], longitude=ms_area.split(',')[0], latitude=ms_area.split(',')[1])
bot.send_message(moder_id[0], f"<b>Локация:</b> {get_adress(ms_area)}\n\n<b>Год:</b> {ms_year}\n\n<b>Сезон:</b> {ms_sesson}\n\n<b>Месяц:</b> {ms_month}\n\n<b>Описание:</b> {ms_description}\n\n<b>Отзыв:</b> {ms_feedback}\n\n<b>Субъективная оценка пользователя:</b> {ms_mark}\n\n{ms_tags}", reply_markup = mod_markup(dates_id), parse_mode="html")
else:
bot.send_message(message.chat.id, '🫢<b>Произошла ошибка при сохранении фотографии. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
print("dates_id не найдено")
else:
main(f"INSERT INTO Sights (`area`, `year`, `season`, `month`, `description`, `feedback`, `mark`, `tags`, `date`, `userid`, `src`) VALUES ('{ms_area}', '{ms_year}', '{ms_sesson}', '{ms_month}', '{ms_description}', '{ms_feedback}', '{ms_mark}', '{ms_tags}', '{date}', '{message.from_user.id}', '0')")
dates_id = main(f"SELECT id FROM Sights WHERE area='{ms_area}' AND year='{ms_year}' AND season='{ms_sesson}' AND month='{ms_month}' AND description='{ms_description}' AND feedback='{ms_feedback}' AND mark='{ms_mark}' AND tags='{ms_tags}' AND userid='{message.from_user.id}' AND src='0';")
if dates_id:
bot.send_location(moder_id[0], longitude=ms_area.split(',')[0], latitude=ms_area.split(',')[1])
bot.send_message(moder_id[0], f"<b>Локация:</b> {get_adress(ms_area)}\n\n<b>Год:</b> {ms_year}\n\n<b>Сезон:</b> {ms_sesson}\n\n<b>Месяц:</b> {ms_month}\n\n<b>Описание:</b> {ms_description}\n\n<b>Отзыв:</b> {ms_feedback}\n\n<b>Субъективная оценка пользователя:</b> {ms_mark}\n\n{ms_tags}", reply_markup = mod_markup(dates_id), parse_mode="html")
else:
bot.send_message(message.chat.id, '🫢<b>Произошла ошибка при сохранении фотографии. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
print("dates_id не найдено")
elif message.text.lower() in ("❌нет", "нет"):
bot.send_message(message.chat.id, '🔜<b>Будем ждать вас снова!</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
start(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
@bot.message_handler(commands=['feedback'])
def start_support(message):
bot.send_message(message.chat.id , '↘️<b>Оставьте ваше сообщение:</b>\n<em>Я передам его модераторам бота</em>', parse_mode="html", reply_markup=ReplyKeyboardRemove())
bot.register_next_step_handler(message, get_support)
def get_support(message):
bot.send_message(message.chat.id, '😇<b>Cпасибо за обратную связь, мы ценим это!</b>', parse_mode="html")
for moder_id in MODER_IDS:
bot.send_message(moder_id[0], f"<b>Сообщение пользователя</b> @{message.from_user.username}", parse_mode="html", reply_markup=ReplyKeyboardRemove())
bot.forward_message(moder_id[0], message.chat.id, message.message_id)
# Кнопки для модератора
def mod_markup(dates_id):
markup_y_n = InlineKeyboardMarkup()
markup_y_n.row_width = 2
markup_y_n.add(InlineKeyboardButton("✅Запостить", callback_data=f"1 {dates_id[0][0]} Yes"), InlineKeyboardButton("❌Проигноривовать", callback_data=f"1 {dates_id[0][0]} No"))
return markup_y_n
def reaction_markup(postid):
likes, dislikes = get_reactions_from_bd(postid)
markup_reaction = InlineKeyboardMarkup()
markup_reaction.row_width = 2
markup_reaction.add(InlineKeyboardButton(f"🔥 {likes}", callback_data=f"reaction {postid} 1"), InlineKeyboardButton(f"❄ {dislikes}", callback_data=f"reaction {postid} 0"))
return markup_reaction
def get_reactions_from_bd(postid):
flags = main(f'SELECT flag FROM Reactions WHERE messageid = "{postid}"') # [(1,), (1,), (1,)]
sum_or_likes = 0
for flag in flags:
sum_or_likes += flag[0]
dislike = len(flags) - sum_or_likes
return [sum_or_likes, dislike]
# Обработчик call_back
@bot.callback_query_handler(func=lambda call: True)
def get_call(call):
call_data = call.data.split()
if call_data[0] == '1':
print(call_data)
data = main(f"SELECT * FROM Sights WHERE `id`={call_data[1]}")
area, year, season, month, description, feedback, mark, tags, src = data[0][1], data[0][2], data[0][3], data[0][4], data[0][5], data[0][6], data[0][7], data[0][8], data[0][11]
for moder_id in MODER_IDS:
if call_data[2] == "Yes":
if src != '0':
bot.send_photo(-1001701836271, open(src, 'rb'))
bot.send_location(-1001701836271, longitude=area.split(',')[0], latitude=area.split(',')[1])
bot.send_message(-1001701836271, f"<b>🏢Район: </b> {get_adress(area)}\n\n<b>⏳Год: </b> {year}\n\n<b>🏝️Сезон: </b> {season}\n\n<b>📅Месяц: </b> {month}\n\n<b>📝Описание: </b> {description}\n\n<b>📈Отзыв: </b> {feedback}\n\n<b>🌟Оценка пользователя: </b> {mark}\n\n{tags}", parse_mode="html")
bot.send_message(moder_id[0], "✅<b>Пост опубликован</b>", reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.send_message(data[0][10], '✅<b>Спасибо, ваши данные успешно прошли модерацию и опубликованы на канал!</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.answer_callback_query(call.id, "OK, POST PUBLISHED")
elif call_data[2] == "No":
bot.send_message(data[0][10], '❌<b>Простите, данные не прошли модерацию. Повторите попытку</b>', parse_mode="html")
bot.send_message(moder_id[0], "❌<b>Пост проигнорирован</b>", reply_markup=ReplyKeyboardRemove(), parse_mode="html")
main(f"DELETE FROM Sights WHERE `id` = {call_data[1]};")
bot.answer_callback_query(call.id, "OK, POST IGNORED")
if call_data[0] == 'reaction': # ['reaction', '1', '1']
userid_to_check = main(f"SELECT userid, messageid, flag FROM Reactions WHERE messageid = '{call_data[1]}' AND userid = '{call.message.chat.id}'")
if userid_to_check:
if tuple([call.message.chat.id, int(call_data[1])]) == userid_to_check[0][:2]: # сравнивание пары userid - messageid
bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text="Вы уже поставили реакцию!")
else:
bot.edit_message_reply_markup(chat_id = call.message.chat.id, message_id=call.message.message_id, reply_markup = recreation_markup_plus(call_data[1], call_data[2]))
main(f"INSERT INTO Reactions (`userid`, `messageid`, `flag`) VALUES ('{call.message.chat.id}', '{call_data[1]}', '{call_data[2]}')")
if call_data[2] == "1":
bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text="LIKE")
elif call_data[2] == "0":
bot.answer_callback_query(callback_query_id=call.id, show_alert=False, text="DISLIKE")
else:
bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text="Произошла ошибка, попробуйте снова!")
def recreation_markup_plus(postid, flag):
likes, dislikes = get_reactions_from_bd(postid)
if likes >= 0 and dislikes >= 0:
recreation_markup_reaction = InlineKeyboardMarkup()
if flag == '1':
recreation_markup_reaction.row(InlineKeyboardButton(f"🔥 {likes + 1}", callback_data=f"reaction {postid} 1"), InlineKeyboardButton(f"❄ {dislikes}", callback_data=f"reaction {postid} 0"))
elif flag == '0':
recreation_markup_reaction.row(InlineKeyboardButton(f"🔥 {likes}", callback_data=f"reaction {postid} 1"), InlineKeyboardButton(f"❄ {dislikes + 1}", callback_data=f"reaction {postid} 0"))
else:
print('Error recreation_markup_plus')
return recreation_markup_reaction
else:
print('Кол-во лайков меньше 0')
return recreation_markup_reaction.row(InlineKeyboardButton(f"🔥 {likes}", callback_data=f"reaction {postid} 1"), InlineKeyboardButton(f"❄ {dislikes}", callback_data=f"reaction {postid} 0"))
@bot.message_handler(commands=['find'])
def get_find_criterion(message):
bot.send_message(message.chat.id, '🔍<b>Выберите, по чему вы хотите найти место</b>', reply_markup=kb_criterion, parse_mode="html")
bot.register_next_step_handler(message, processing_criterion)
def processing_criterion(message):
if message.content_type == 'text':
if message.text.lower() in ("дата добавления", "📆 дата добавления"):
bot.send_message(message.chat.id , '📆<b>Введите дату в формате ДД.ММ.ГГГГ</b>\n<em>Или ДД.ММ.ГГГГ-ДД.ММ.ГГГГ</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, for_date)
elif message.text.lower() in ("оценка", "🌟 оценка"):
bot.send_message(message.chat.id , '🌟<b>Напишите число или промежуток чисел</b>\n<em>Через "-"</em>\n<em>От 1 до 10:</em>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, for_mark)
elif message.text.lower() in ("тег", "#⃣ тег"):
bot.send_message(message.chat.id , '#⃣<b>Напишите тег через "#"</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
bot.register_next_step_handler(message, for_tag)
elif message.text.lower() in ("популярное", "🔥 популярное"):
for_popularity(message)
elif message.text.lower() in ("случайно", "🎲 случайно"):
for_random(message)
elif message.text.lower() == "🔙 назад":
menu(message)
else:
bot.send_message(message.chat.id , '🙁<b>Пожалуйста, выберите критерий из списка</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def send_posts(message, posts): # Добавить 10 мест (пока что по 1)
if posts:
for post in posts[:1]: # [:5]
print("photo save path:", post[9])
if post[9] != '0':
bot.send_photo(message.chat.id, open(post[9], 'rb'))
bot.send_location(message.chat.id, longitude=post[1].split(',')[0], latitude=post[1].split(',')[1])
send_post = ''
check_post = {
'area': f'<b>🏢Район: </b> {get_adress(post[1])}',
'year': f'\n\n<b>⏳Год: </b> {post[2]}',
'season': f'\n\n<b>🏝️Сезон: </b> {post[3]}',
'month': f'\n\n<b>📅Месяц: </b> {post[4]}',
'description': f'\n\n<b>📝Описание: </b> {post[5]}',
'feedback': f'\n\n<b>📈Отзыв: </b> {post[6]}',
'mark': f'\n\n<b>🌟Оценка: </b> {post[7]}',
'tags': f'\n\n{post[8]}'
}
for el in check_post:
if check_post[el].split()[-1] != 'None':
send_post += check_post[el]
bot.send_message(message.chat.id, send_post, reply_markup = reaction_markup(post[0]), parse_mode="html")
posts.remove(posts[0]) # [x:]
bot.send_message(message.chat.id , '🔄<b>Загрузить еще?</b>', reply_markup=kb_y_n, parse_mode="html")
bot.register_next_step_handler(message, get_answer_to_send, posts)
else:
bot.send_message(message.chat.id , '🔚<b>Записей по данной категории нет</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def get_answer_to_send(message, posts):
if message.content_type == 'text':
if message.text.lower() in ("✅да", "да"):
send_posts(message, posts)
elif message.text.lower() in ("❌нет", "нет"):
bot.send_message(message.chat.id , '😊<b>Надеюсь, вы нашли место по душе</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, не понимаю Вас. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def for_popularity(message):
if message.content_type == 'text':
posts = most_popular_sights()
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def for_random(message):
if message.content_type == 'text':
all_sights = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights')
posts = random.sample(all_sights, len(all_sights))
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def for_tag(message):
if message.content_type == 'text':
if '#' in message.text.lower() and len(message.text.lower().replace('#', '')) > 1 and check_words(message.text.lower().replace('#', '')):
posts = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights WHERE tags LIKE "%{message.text.lower()}%"')
send_posts(message, posts)
elif message.text.lower() == "назад":
processing_criterion(message)
else:
bot.send_message(message.chat.id, '🙁<b>Следует писать тег через "#" и длинною не меньше 2 знаков, используя русские символы</b>', parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def for_mark(message):
if message.content_type == 'text':
if message.text.lower() == "назад":
processing_criterion(message)
elif "-" in message.text:
if message.text.split('-')[0] in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"):
if message.text.split('-')[1] in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"):
mark_first = message.text.split('-')[0]
mark_second = message.text.split('-')[1]
posts = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights WHERE mark >= "{mark_first}" AND date <= "{mark_second}"')
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '🙁<b>Следует вводить число или промежуток чисел через "-". Попробуйте снова</b>', parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Следует вводить число или промежуток чисел через "-". Попробуйте снова</b>', parse_mode="html")
menu(message)
elif message.text in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"):
posts = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights WHERE mark LIKE "{message.text}"')
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '🙁<b>Следует вводить число или промежуток чисел через "-". Попробуйте снова</b>', parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
def for_date(message):
if message.content_type == 'text':
if message.text.lower() == "назад":
processing_criterion(message)
else:
for i in message.text.split():
if '.' in i:
dates = i.split('-')
if len(dates) == 1:
ms_date_part = is_date(dates[0])
if ms_date_part:
posts = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights WHERE date LIKE "%{ms_date_part}%"') # [(), (), ()]
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, мне не удалось найти дату/даты в сообщении. Попробуйте снова</b>', parse_mode="html")
menu(message)
elif len(dates) == 2:
ms_date_part1 = is_date(dates[0])
ms_date_part2 = is_date(dates[1])
if ms_date_part1 and ms_date_part2:
posts = main(f'SELECT id, area, year, season, month, description, feedback, mark, tags, src FROM Sights WHERE date >= "{ms_date_part1}" AND date <= "{ms_date_part2}"')
send_posts(message, posts)
else:
bot.send_message(message.chat.id, '🙁<b>Извините, мне не удалось найти дату/даты в сообщении. Попробуйте снова</b>', parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '🙁<b>Следует вводить дату в формате ДД.ММ.ГГГГ. Попробуйте снова</b>', parse_mode="html")
menu(message)
else:
bot.send_message(message.chat.id, '📄<b>Следует отправлять текст. Попробуйте снова</b>', reply_markup=ReplyKeyboardRemove(), parse_mode="html")
menu(message)
@bot.message_handler(commands=['help'])
def help(message):
bot.send_message(message.chat.id, help_answer, parse_mode="html")
bot.register_next_step_handler(message, get_menu_mess)
while True:
try:
bot.polling(none_stop=True, timeout=90)
except Exception as e:
print(datetime.datetime.now(), e)
time.sleep(5)
continue | phony51/LocalSight | bot.py | bot.py | py | 46,074 | python | ru | code | 0 | github-code | 90 |
18278525463 | ####################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: buildtools.py
## Authors: Chris Lovett
##
## Requires: Python 3.x
##
####################################################################################################
import subprocess
import os
import json
class EllBuildTools:
def __init__(self, ell_root, verbose = False):
self.verbose = verbose
self.ell_root = ell_root
self.build_root = os.path.join(self.ell_root, "build")
self.compiler = None
self.swigexe = None
self.llcexe = None
self.optexe = None
self.blas = None
if not os.path.isdir(self.build_root):
raise Exception("Could not find '%s', please make sure to build the ELL project first" % (self.build_root))
self.find_tools()
def find_tools(self):
jsonPath = os.path.join(self.build_root, "tools/tools.json")
if not os.path.isfile(jsonPath):
raise Exception("Could not find build output: " + jsonPath)
with open(jsonPath) as f:
self.tools = json.loads(f.read())
self.compiler = self.tools['compile']
if self.compiler == "":
raise Exception("tools.json is missing compiler info")
self.swigexe = self.tools['swig']
if self.swigexe == "":
raise Exception("tools.json is missing swig info")
self.llcexe = self.tools['llc']
if self.llcexe == "":
raise Exception("tools.json is missing llc info")
self.optexe = self.tools['opt']
if self.optexe == "":
raise Exception("tools.json is missing opt info")
if ("blas" in self.tools):
self.blas = self.tools['blas'] # this one can be empty.
def run(self, command, print_output=True):
if (self.verbose):
print(" ".join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines = True)
output = ''
for line in proc.stdout:
output += line
if print_output or verbose:
print(line.strip("\n"))
for line in proc.stderr:
output += line
if print_output or verbose:
print(line.strip("\n"))
proc.wait()
if not proc.returncode == 0:
raise Exception(command[0] + " failed: " + output)
return output
def swig(self, output_dir, model_name, language):
# swig -python -modern -c++ -Fmicrosoft -py3 -outdir . -c++ -I%ELL_ROOT%/interfaces/common/include -I%ELL_ROOT%/interfaces/common -I%ELL_ROOT%/libraries/emitters/include -o _darknetReferencePYTHON_wrap.cxx darknetReference.i
args = [self.swigexe,
'-' + language,
'-c++',
'-Fmicrosoft']
if language == "python":
args = args + ["-py3"]
if language == "javascript":
args = args + ["-v8"]
args = args + ['-outdir', output_dir,
'-I' + os.path.join(self.ell_root, 'interfaces/common'),
'-I' + os.path.join(self.ell_root, 'interfaces/common/include'),
'-I' + os.path.join(self.ell_root, 'libraries/emitters/include'),
'-o', os.path.join(output_dir, model_name + language.upper() + '_wrap.cxx'),
os.path.join(output_dir, model_name + ".i")
]
print("generating " + language + " interfaces for " + model_name + " in " + output_dir)
self.run(args)
def get_llc_options(self, target):
common = ["-filetype=obj", "-O3"]
# arch processing
if target == "pi3": # Raspberry Pi 3
return common + ["-mtriple=armv7-linux-gnueabihf", "-mcpu=cortex-a53", "-relocation-model=pic"]
elif target == "pi0": # Raspberry Pi Zero
return common + ["-mtriple=arm-linux-gnueabihf", "-relocation-model=pic"]
elif target == "aarch64" or target == "pi3_64": # arm64 Linux
return common + ["-mtriple=aarch64-unknown-linux-gnu", "-relocation-model=pic"]
else: # host
return common + ["-relocation-model=pic"]
def llc(self, output_dir, model_name, target):
# llc -filetype=obj _darknetReference.ll -O3 -mtriple=armv7-linux-gnueabihf -mcpu=cortex-a53 -relocation-model=pic
args = [self.llcexe,
os.path.join(output_dir, model_name + ".opt.bc"),
"-o", os.path.join(output_dir, model_name + ".obj"),
]
args = args + self.get_llc_options(target)
print("running llc...")
self.run(args)
def opt(self, output_dir, model_name):
# opt compiled_model.ll -o compiled_model_opt.ll -O3
args = [self.optexe,
os.path.join(output_dir, model_name + ".bc"),
"-o", os.path.join(output_dir, model_name + ".opt.bc"),
"-O3"
]
print("running opt...")
self.run(args)
def compile(self, model_file, func_name, model_name, target, output_dir, useBlas=False, profile=False, fuseLinearOps=True):
args = [self.compiler,
"-imap",
model_file,
"-cfn", func_name,
"-cmn", model_name,
"--bitcode",
"--swig",
"--target", target,
"-od", output_dir,
"--fuseLinearOps", str(fuseLinearOps)
]
args.append("--blas")
hasBlas = bool(useBlas)
if target == "host" and hasBlas and not self.blas:
hasBlas = False
args.append(str(hasBlas).lower())
if profile:
args.append("--profile")
print("compiling model...")
self.run(args)
| Crott/ELL | tools/utilities/pythonlibs/buildtools.py | buildtools.py | py | 5,899 | python | en | code | null | github-code | 90 |
41227617629 | from easy_wave import AWG_Writer, REAL_CHANNELS
from wave_library import Empty
import pyqtgraph as pg
def plot_wave(wave, plot_zeros=True, downsample=10, downsample_mode='peak'):
win = pg.GraphicsWindow()
cs = lambda ch: ['y','b','#FF00FF','g'][ch.value[0]-1]
plt = win.addPlot()
plt.hideAxis('left')
# Optimize for large data
plt.setClipToView(True)
if downsample:
plt.setDownsampling(ds=downsample, auto=True, mode=downsample_mode)
dy = 0
offset=3
for i,ch in enumerate(REAL_CHANNELS):
if ch in wave.chs or plot_zeros:
ts, ys = wave.generate(1e9, ch)
ys = ys.astype(float) + dy
plt.plot(ts, ys, pen=cs(ch), fillLevel = dy, brush=(50,50,200,100))
dy -= offset
if plot_zeros and (i+1)%3==0:
dy-=1
#Set to zooming limit
plt.vb.setLimits(yMax=1.5, yMin=dy, xMin=0, xMax=wave.t)
# Add crosshair
label = pg.TextItem("t=0.000 us", anchor=(0,0))
vLine = pg.InfiniteLine(angle=90, movable=False)
plt.addItem(vLine)
label.setParentItem(plt)
def mouseMoved(pos):
if plt.sceneBoundingRect().contains(pos):
mousePoint = plt.vb.mapSceneToView(pos)
if 0<mousePoint.x()<wave.t:
label.setText("t={:.3f} us".format(mousePoint.x()*1e6))
vLine.setPos(mousePoint.x())
# proxy = pg.SignalProxy(plt_item.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
plt.scene().sigMouseMoved.connect(mouseMoved)
return win
def plot_line(writer, line, plot_zeros=True, downsample=10, downsample_mode='peak'):
wave = writer.lines[line-1]['waveform']
return plot_wave(wave, plot_zeros=plot_zeros, downsample=downsample, downsample_mode=downsample_mode)
def plot_block(writer, block, plot_zeros=True, downsample=10, downsample_mode='peak'):
if type(block) == int:
block_name = list(writer.blocks.keys())[block]
elif type(block) == str:
block_name = block
else:
raise Exception("Unrecongnize type for <block> to be plotted")
b = writer.blocks[block_name]
wave = Empty()
for lname, line in b.items():
wave += line['waveform']*line['repeat']
return plot_wave(wave, plot_zeros=plot_zeros, downsample=downsample, downsample_mode=downsample_mode) | AlexBourassa/Easy-Wave | visual_wave.py | visual_wave.py | py | 2,356 | python | en | code | 0 | github-code | 90 |
18420423119 | n,k=map(int, input().split())
s=input()
def rle(string):
_rle_str = string[0]
_rle_cnt = 1
_ans_l = []
for _i in range(1, len(string)):
if _rle_str == string[_i]:
_rle_cnt += 1
else:
_ans_l.append([_rle_str, _rle_cnt])
_rle_str = string[_i]
_rle_cnt = 1
_ans_l.append([_rle_str, _rle_cnt])
return _ans_l
rle_s=rle(s)
ans=-1
left=0
wa=0
if rle_s[0][0]=="0":
cnt=1
wa=rle_s[0][1]
right=1
else:
cnt=0
right=0
if len(rle_s)==1 and k>=1:
print(rle_s[0][1])
exit()
while right<len(rle_s):
# print(right, left, wa, cnt)
if cnt==k:
if rle_s[right][0]=="0":
if rle_s[left][0]=="0":
wa-=rle_s[left][1]
cnt-=1
else:
wa-=rle_s[left][1]
left+=1
else:
wa+=rle_s[right][1]
right+=1
ans=max(ans, wa)
elif cnt<k:
if rle_s[right][0]=="0":
cnt+=1
wa+=rle_s[right][1]
else:
wa+=rle_s[right][1]
right+=1
ans=max(ans, wa)
else:
if rle_s[left][0]=="0":
wa-=rle_s[left][1]
cnt-=1
else:
wa-=rle_s[left][1]
left+=1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03074/s398939429.py | s398939429.py | py | 1,307 | python | en | code | 0 | github-code | 90 |
37137940616 | from proj1_helpers import *
from implementations import *
DATA_TRAIN_PATH = '../data/train.csv'
y_train, x_train, ids_train = load_csv_data(DATA_TRAIN_PATH)
column_names = np.genfromtxt(DATA_TRAIN_PATH, delimiter=",", dtype=str)[0, 2:]
# FEATURE PROCESSING
#handle invalid values
x_train, column_names = handle_invalid(x_train, column_names)
#remove correlated features
to_remove = ['DER_pt_h', 'DER_sum_pt', 'PRI_met_sumet', 'PRI_jet_all_pt', 'DER_mass_vis']
to_remove = [np.where(column_names == name)[0][0] for name in to_remove]
to_remove = np.array(to_remove)
x_train, column_names = remove_columns(x_train, to_remove), np.delete(column_names, to_remove)
#remove outliers
outlier_row_ids = set()
for i in range(x_train.shape[1]):
outlier_row_ids.update(detect_outlier(x_train[:, i], 3))
x_outlier_free = np.delete(x_train, list(outlier_row_ids), axis=0)
y_outlier_free = np.delete(y_train, list(outlier_row_ids), axis=0)
#Feature expansion
pairwise_poly = tuple([expand_poly(x_outlier_free, i) for i in range(1, 3)])
pairwise_poly = np.hstack(pairwise_poly)
col_polynomials = tuple([construct_poly(x_outlier_free, i) for i in range(3, 13)])
col_polynomials = np.hstack(col_polynomials)
x_expanded = np.hstack((pairwise_poly, col_polynomials))
#CREATE MODEL
x_expanded_std, x_exp_mean, x_exp_std = standardize(x_expanded)
best_ridge_params = {
"lambda_": 1e-09,
}
submit_weights, _ = ridge_regression(y_outlier_free, x_expanded_std, **best_ridge_params)
#Creating submission
DATA_TEST_PATH = '../data/test.csv'
_, x_test, ids_test = load_csv_data(DATA_TEST_PATH)
x_test, _ = handle_invalid(x_test)
# Remove correlated columns
x_test = remove_columns(x_test, to_remove)
# Feature expansion
pairwise_poly = tuple([expand_poly(x_test, i) for i in range(1, 3)])
pairwise_poly = np.hstack(pairwise_poly)
col_polynomials = tuple([construct_poly(x_test, i) for i in range(3, 13)])
col_polynomials = np.hstack(col_polynomials)
x_expanded = np.hstack((pairwise_poly, col_polynomials))
# Standardize
x_test_std, _, _ = standardize(x_expanded, x_exp_mean, x_exp_std)
# Predict labels and create submission
OUTPUT_PATH = '../output/test_pred.csv'
y_pred = predict_labels(submit_weights, x_test_std)
create_csv_submission(ids_test, y_pred, OUTPUT_PATH)
| lichangling3/ML-Project-1 | src/run.py | run.py | py | 2,276 | python | en | code | 0 | github-code | 90 |
19400166767 | import re
from http import HTTPStatus
from yacut.error_handlers import InvalidAPIUsage
from yacut.models import URLMap
URL_PATTERN = (
'^https?:\\/{1,2}(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}'
'\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)$'
)
SHORT_URL_PATTERN = r'^[a-zA-Z\d]+$'
def required_fields_validator(data):
if 'url' not in data:
raise InvalidAPIUsage('"url" является обязательным полем!')
def original_url_validator(value):
match = re.match(URL_PATTERN, value)
if not match:
raise InvalidAPIUsage('Указано недопустимое имя для короткой ссылки')
def short_url_validator(value):
if len(value) > 16:
raise InvalidAPIUsage('Указано недопустимое имя для короткой ссылки')
match = re.match(SHORT_URL_PATTERN, value)
if not match:
raise InvalidAPIUsage('Указано недопустимое имя для короткой ссылки')
if URLMap.query.filter_by(short=value).first() is not None:
raise InvalidAPIUsage(f'Имя "{value}" уже занято.')
def get_url_obj_validator(short_id):
url_obj = URLMap.query.filter_by(short=short_id).first()
if url_obj is None:
raise InvalidAPIUsage('Указанный id не найден', HTTPStatus.NOT_FOUND)
return url_obj
def get_data_in_request_validator(request):
data = request.get_json()
if data is None:
raise InvalidAPIUsage('Отсутствует тело запроса')
return data
| iamTroyanskiy/yacut | yacut/api_validators.py | api_validators.py | py | 1,603 | python | ru | code | 0 | github-code | 90 |
44019856382 | import unittest
"""
Given a string, find the minimum number of characters to be inserted to convert it to palindrome.
Input: ab
Output: 1 (bab)
Input: aa
Output: 0
Input: abcd
Output: 3 (dcbabcd)
Input: abcda
Output: 2 (adcbcda) which is same as insertions for bcd.
"""
"""
Approach 1:
1. Let min_insertions(str, start, end) denote the minimum insertions for str[start...end]
2. This problem has the following optimal substructure:
if str[start]==str[end]:
min_insertion(str,start,end) = min_insertions(str,start+1,end-1)
else:
min_insertion(str,start,end) = 1+min(min_insertions(str,start,end-1),min_insertions(str,start+1,end))
Approach 2:
1. Find the length of LCS between str and reverse(str).
2. Min insertions = N - length of LCS where N is number of characters in str.
"""
def min_insertions_to_make_palindrome(string):
n = len(string)
# table[i][j] denotes minimum insertions to make str[i...j] a palindrome.
# Final result is in table[0][n-1]
table = [[0] * n for _ in range(n)]
for L in range(2, n+1):
for i in range(n-L+1):
j = i+L-1
if string[i] != string[j]:
if L == 2:
table[i][j] = 1
else:
table[i][j] = 1 + min(table[i+1][j], table[i][j-1])
elif L > 2:
table[i][j] = table[i+1][j-1]
return table[0][n-1]
class TestMinInsertions(unittest.TestCase):
def test_min_insertions(self):
string = 'geeks'
self.assertEqual(min_insertions_to_make_palindrome(string), 3)
string = 'abcde'
self.assertEqual(min_insertions_to_make_palindrome(string), 4)
| prathamtandon/g4gproblems | DP/min_insertions_to_make_palindrome.py | min_insertions_to_make_palindrome.py | py | 1,682 | python | en | code | 3 | github-code | 90 |
18527541609 | n = int(input())
total=10*100
def sum_digits(num):
sum = 0
while True:
if num == 0:
break
sum += num%10
num //= 10
return sum
for a in range(1, n):
b = n - a
total = min(total, sum_digits(a)+sum_digits(b))
print(total)
| Aasthaengg/IBMdataset | Python_codes/p03331/s115336163.py | s115336163.py | py | 278 | python | en | code | 0 | github-code | 90 |
71631055016 | from django.shortcuts import render, redirect,HttpResponse
from .forms import RagistrationForm, LoginForm
from django.contrib.auth.models import User
from django.contrib.auth import authenticate,login,logout
# from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def home(request):
return render(request,"index.html")
def login_user(request):
if request.method == "POST":
login_form = LoginForm(request.POST)
if login_form.is_valid():
cd = login_form.cleaned_data
user = authenticate(request,username=cd["username"],password=cd["password1"])
if user is not None:
if user.is_active:
login(request,user)
return redirect("home")
else:
# return HttpResponse("Disable account")
messages.error(request, "Disable account")
# return redirect("/")
else:
messages.error(request, "invalid login")
# return redirect("/")
else:
login_form = LoginForm()
return render(request, 'login.html',{"login_form":login_form})
def signout_user(request):
if request.method == "POST":
form = RagistrationForm(request.POST)
if form.is_valid():
form.save()
# return HttpResponse('user create successfully')
return redirect('/')
else:
form =RagistrationForm()
return render(request,'ragistration.html',{'form':form})
@login_required
def logout_user(request):
logout(request)
return redirect("/")
#userprofile view
def user_profile(request):
return render(request,'profile.html')
| Brajesh0/mypoetry | post/views.py | views.py | py | 1,872 | python | en | code | 0 | github-code | 90 |
6859123072 | while True:
n=[int(i) for i in input().split()]
r=0
if n==[0,0]:
break
n_1=n[0]
n_2=n[1]
if n_1%n_2!=0:
if n_2%n_1==0:
print("factor")
else:
print("neither")
else:
if n_1>n_2:
print("multiple")
| dltbwoddl/Algorithmuslearning | 수학3/배수와 약수.py | 배수와 약수.py | py | 305 | python | en | code | 0 | github-code | 90 |
22126557089 | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class forum_post(models.Model):
semester = (
('I', 'I Semestr'),
('II', 'II Semestr'),
('III', 'III Semestr'),
('IV', 'IV Semestr'),
('V', 'V Semestr'),
('VI', 'VI Semestr'),
('VII', 'VII Semestr')
)
title = models.CharField(max_length=255, blank=False)
semestr = models.CharField(blank=False, choices=semester, max_length=15)
category = models.CharField(default='#elektro', max_length=50)
text = models.TextField(default='Write your post here!', verbose_name='Post text:')
creation_date = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
class Comments(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
comment_date = models.DateTimeField(default=timezone.now)
text = models.TextField(blank=False)
edit_date = models.DateTimeField(default=timezone.now)
post_reference = models.ForeignKey(forum_post, default=None, on_delete=models.CASCADE)
| Przemoosz/WikipediaUsingDjango | wikipedia/wiki/models.py | models.py | py | 1,138 | python | en | code | 0 | github-code | 90 |
15180546770 | # Simple Pong in Python3
# Used "turtle module"
import turtle
import os
player1score = 0
player2score = 0
wn = turtle.Screen()
wn.title("PONG")
wn.bgcolor("black")
wn.setup(width = 1280, height = 720)
wn.tracer(0)
# Paddle A
paddle_a = turtle.Turtle()
paddle_a.speed(0)
paddle_a.shape("square")
paddle_a.color("white")
paddle_a.shapesize(stretch_wid = 5, stretch_len = 1)
paddle_a.penup()
paddle_a.goto(-600, 0)
# Paddle B
paddle_b = turtle.Turtle()
paddle_b.speed(0)
paddle_b.shape("square")
paddle_b.color("white")
paddle_b.shapesize(stretch_wid = 5, stretch_len = 1)
paddle_b.penup()
paddle_b.goto(600, 0)
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = 0.25
ball.dy = 0.25
# Score Card
card = turtle.Turtle()
card.speed(0)
card.color("white")
card.penup()
card.hideturtle()
card.goto(0, 320)
card.write("Player A: 0 Player B: 0", align = "center", font = ("New Baskerville", 24, "normal"))
# Functions
# Moving the paddle_a
def paddle_a_up():
y = paddle_a.ycor()
y += 25
paddle_a.sety(y)
def paddle_a_down():
y = paddle_a.ycor()
y -= 25
paddle_a.sety(y)
# Moving the paddle_b
def paddle_b_up():
y = paddle_b.ycor()
y += 25
paddle_b.sety(y)
def paddle_b_down():
y = paddle_b.ycor()
y -= 25
paddle_b.sety(y)
# Key bindings
wn.listen()
# For paddle_a
wn.onkeypress(paddle_a_up, "w")
wn.onkeypress(paddle_a_down, "s")
# For paddle_b
wn.onkeypress(paddle_b_up, "Up")
wn.onkeypress(paddle_b_down, "Down")
# Main game loop
while True:
wn.update()
# Move the ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
# Ball Bounce boundary
if ball.ycor() > 350:
ball.sety(350)
ball.dy *= (-1)
os.system("aplay bounce.wav&")
if ball.ycor() < -350:
ball.sety(-350)
ball.dy *= (-1)
os.system("aplay bounce.wav&")
# Ball bounce paddle
if (ball.xcor() < -590 and ball.xcor() > -600) and (ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50):
ball.setx(-590)
ball.dx *= (-1)
os.system("aplay bounce.wav&")
#ball.dy *= (-1)
if (ball.xcor() > 590 and ball.xcor() < 600) and (ball.ycor() < paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50):
ball.setx(590)
ball.dx *= (-1)
os.system("aplay bounce.wav&")
#ball.dy *= (-1)
# Score
if ball.xcor() > 630:
ball.goto(0, 0)
ball.dx *= (-1)
player1score += 1
card.clear()
card.write("Player A: {} Player B: {}".format(player1score, player2score), align = "center", font = ("New Baskerville", 24, "normal"))
if ball.xcor() < -630:
ball.goto(0, 0)
ball.dx *= (-1)
player2score += 1
card.clear()
card.write("Player A: {} Player B: {}".format(player1score, player2score), align = "center", font = ("New Baskerville", 24, "normal"))
| KillerQueen-BitesZaDusto/pythonPong | Pong.py | Pong.py | py | 3,020 | python | en | code | 0 | github-code | 90 |
28441415400 | #! /usr/bin/env python3
import os
import ast
import json
from difflib import SequenceMatcher
from configparser import (ConfigParser, NoSectionError,
NoOptionError, DuplicateSectionError)
def get_defaults(filename):
"""Returns a dictionary of the configuration properties"""
configs = {}
parser = ConfigParser()
parser.read(filename)
for section in parser.sections():
configs[section] = {}
options = parser.options(section)
for option in options:
value = parser.get(section, option)
configs[section][option] = value
return configs
class ConfigReader:
"""A simple configuration reader class for performing
basic config file operations including reading, setting
and searching for values"""
__defaults = {
'reader': 'configreader'
}
__default_section = 'main'
def __init__(self, filename='settings.ini'):
self.__parser = ConfigParser()
self.__filename = self._set_filename(filename)
self.parser = self.__parser
self.__parser.read(filename)
self._create_config()
@property
def sections(self):
return self.__parser.sections()
@sections.setter
def sections(self, value):
raise AttributeError("'Can't set attribute")
@property
def filename(self):
return self.__filename
@filename.setter
def filename(self, value):
old_name = self.__filename
self.__filename = self._set_filename(value)
os.rename(old_name, self.__filename)
@staticmethod
def _set_filename(value):
if os.path.isabs(value):
full_path = value
else:
full_path = os.path.join(os.path.abspath(
os.path.dirname(__file__)), os.path.basename(value))
return full_path
def _add_section(self, section):
try:
self.__parser.add_section(section)
except DuplicateSectionError:
pass
def _create_config(self):
defaults = get_defaults(self.filename)
self.__defaults.update(defaults)
with open(self.filename, 'w') as config:
for key in self.__defaults.keys():
value = self.__defaults[key]
if isinstance(value, dict):
self._add_section(key)
for item in value.keys():
self.set(key=item,
value=value[item],
section=key)
else:
section = self.__default_section
self._add_section(section)
self.set(key, value, section)
self.__parser.write(config)
def get(self, key, section=None, evaluate=True, default=None):
"""Return the value of the provided key
Returns None if the key does not exist.
The section defaults to 'main' if not provided.
If the value of the key does not exist and default is not None,
the variable default is returned. In this case, providing
section may be a good idea.
If evaluate is True, the returned values are evaluated to
Python data types int, float and boolean."""
section = section or self.__default_section
value = None
try:
value = self.__parser.get(section, option=key)
except (NoSectionError, NoOptionError):
if default is not None:
value = default
self.set(key, default, section)
else:
if evaluate:
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
# ValueError when normal string
# SyntaxError when empty
pass
return value
def set(self, key, value, section=None):
"""Sets the value of key to the provided value
Section defaults to 'main' is not provided.
The section is created if it does not exist."""
with open(self.filename, 'w') as config:
section = section or self.__default_section
self._add_section(section)
self.__parser.set(section, option=key, value=str(value))
self.__parser.write(config)
def print(self, output=True):
"""Prints out all the sections and
returns a dictionary of the same"""
configs = {}
string = '{:-^50}'.format(
os.path.basename(self.filename))
for section in self.sections:
configs[section] = {}
options = self.__parser.options(section)
string += '\n{:^50}'.format(section)
for option in options:
value = self.get(option, section)
configs[section][option] = value
string += '\n{:>23}: {}'.format(option, value)
string += '\n'
string += '\n\n{:-^50}\n'.format('end')
if output:
print('\n\n{}'.format(string))
return configs
def search(self, value, case_sensitive=True,
exact_match=False, threshold=0.36):
"""Returns a tuple containing the key, value and
section if the value matches, else empty tuple
If exact_match is True, checks if there exists a value
that matches above the threshold value. In this case,
case_sensitive is ignored.
The threshold value should be 0, 1 or any value
between 0 and 1. The higher the value the more the accuracy"""
if not 0 <= threshold <= 1:
raise AttributeError(
'threshold must be 0, 1 or any value between 0 and 1')
lowered_value = value.lower()
result = ()
for section in self.sections:
options = self.__parser.options(section)
for option in options:
found = self.get(option, section)
if exact_match:
if case_sensitive:
if value == found:
result = (option, found, section)
return result
else:
if lowered_value == found.lower():
result = (option, found, section)
return result
else:
ratio = SequenceMatcher(None, found, value).ratio()
if ratio >= threshold:
result = (option, found, section)
return result
return result
def to_json(self, filename=None):
"""Export config to JSON
If filename is given, it is exported to the file
else returned as called"""
config = self.print(output=False)
if filename is None:
return json.dumps(config)
else:
with open(filename, 'w') as f:
json.dump(config, f)
| omushpapa/telkombalance | configreader.py | configreader.py | py | 7,028 | python | en | code | 0 | github-code | 90 |
7569850737 | import unittest
from scrapper.bayes import NaiveBayesClassifier
from scrapper.utils import load_data
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.const = 1e-3
self.X_train = [
"I love this sandwich",
"This is an amazing place",
"I feel very good about these beers",
"This is my best work",
"What an awesome view",
"I do not like this restaurant",
"I am tired of this stuff",
"I can't deal with this",
"He is my sworn enemy",
"My boss is horrible"
]
self.y_train = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
self.X_test = [
"The beer was good",
"I do not enjoy my job",
"I ain't feeling dandy today",
"I feel amazing",
"Gary is a friend of mine",
"I can't believe I'm doing this"
]
self.y_test = [1, 0, 0, 1, 1, 0]
self.naive_bayes = NaiveBayesClassifier(alpha=1)
self.naive_bayes.fit(self.X_train, self.y_train)
def test_prior_probability_for_labels(self):
labels = self.naive_bayes.p_c
equals = [
labels[0] == 0.5,
labels[1] == 0.5
]
for test in equals:
with self.subTest():
self.assertTrue(test)
def test_length_of_dictionary(self):
self.assertEqual(self.naive_bayes.d, 36)
def test_count_words_per_label_positive(self):
"""
https://stackoverflow.com/questions/32899/how-do-you-generate-dynamic-parameterized-unit-tests-in-python
"""
words = [
'about', 'am', 'amazing', 'an', 'awesome', 'beers',
'best', 'boss', 'cant', 'deal', 'do', 'enemy',
'feel', 'good', 'he', 'horrible', 'i', 'is',
'like', 'love', 'my', 'not', 'of', 'place',
'restaurant', 'sandwich', 'stuff', 'sworn', 'these', 'this',
'tired', 'very', 'view', 'what', 'with', 'work'
]
count_positive = [
1, 0, 1, 2, 1, 1,
1, 0, 0, 0, 0, 0,
1, 1, 0, 0, 2, 2,
0, 1, 1, 0, 0, 1,
0, 1, 0, 0, 1, 3,
0, 1, 1, 1, 0, 1
]
count_negative = [
0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1,
0, 0, 1, 1, 3, 2,
1, 0, 2, 1, 1, 0,
1, 0, 1, 1, 0, 3,
1, 0, 0, 0, 1, 0
]
count_per_label = self.naive_bayes.n_ic
for word, count_pos, count_neg in zip(words,
count_positive,
count_negative):
with self.subTest():
self.assertEqual(count_per_label[1, word], count_pos)
self.assertEqual(count_per_label[0, word], count_neg)
def test_probability_of_word_per_label(self):
probability_of_word = self.naive_bayes.prob_wi_c
words = ['beer', 'amazing']
prob_positive = [0, 0.032]
prob_negative = [0, 0.016]
for word, pos, neg in zip(words,
prob_positive,
prob_negative):
with self.subTest():
self.assertTrue(
abs(probability_of_word(1, word) - pos) <= self.const
)
self.assertTrue(
abs(probability_of_word(0, word) - neg) <= self.const
)
def test_amount_of_words_per_label(self):
sum_per_labels = self.naive_bayes.sum_per_label
equals = [
sum_per_labels[0] == 26,
sum_per_labels[1] == 25
]
for test in equals:
with self.subTest():
self.assertTrue(test)
def test_log_probability(self):
log_prob = self.naive_bayes.label_log_probability
X_test_clean = self.naive_bayes.cleaner.transform(self.X_test)
dummy = X_test_clean[0]
equals = [
abs(log_prob(0, dummy) - (-4.820)) <= self.const,
abs(log_prob(1, dummy) - (-4.110)) <= self.const,
]
for test in equals:
with self.subTest():
self.assertTrue(test)
def test_predict(self):
y_pred = self.naive_bayes.predict(self.X_test)
self.assertTrue([1, 0, 0, 1, 0, 0] == y_pred)
def test_score(self):
score = self.naive_bayes.score(self.X_test, self.y_test)
self.assertEqual(score, 5 / 6)
class TestNaiveBayesClassifierOnData(unittest.TestCase):
def setUp(self):
self.X, self.y = load_data()
def test_score_bayes(self):
for alpha in [0.05, 0.1, 0.5, 1]:
naive_bayes = NaiveBayesClassifier(alpha=alpha)
sklearn_model = Pipeline([
('vectorizer', TfidfVectorizer()),
('classifier', MultinomialNB(alpha=alpha)),
])
X_train, y_train, X_test, y_test = (
self.X[:3900], self.y[:3900], self.X[3900:], self.y[3900:]
)
naive_bayes.fit(X_train, y_train)
score_my_model = naive_bayes.score(X_test, y_test)
sklearn_model.fit(X_train, y_train)
score_sklearn_model = sklearn_model.score(X_test, y_test)
equals = [
score_my_model >= 0.982,
score_my_model >= score_sklearn_model
]
for test in equals:
with self.subTest():
self.assertTrue(test)
| toofnf/dementiy | homework06/tests/test_bayes.py | test_bayes.py | py | 5,716 | python | en | code | 1 | github-code | 90 |
17728121871 | #! /usr/bin/python
__author__ = "Alexander Rush <srush@csail.mit.edu>"
__date__ = "$Sep 12, 2012"
import json
import sys
import os
import cky
"""
replace infrequent words Count(x)<5 in parse_train.dat
into parse_train.RARE.dat
"""
class Tree:
def __init__(self):
self.terminal_count = {};
def count_terminal(self, tree):
if len(tree) == 3:
self.count_terminal(tree[1]);
self.count_terminal(tree[2]);
elif len(tree) == 2:
self.terminal_count.setdefault(tree[1], 0);
self.terminal_count[tree[1]] += 1;
def replace_with_rare(self, tree, rare_terminals):
if len(tree) == 3:
self.replace_with_rare(tree[1], rare_terminals);
self.replace_with_rare(tree[2], rare_terminals);
elif len(tree) == 2:
if tree[1] in rare_terminals:
tree[1] = "_RARE_";
def count_all_terminals(original_dat):
all_terminal_count = dict();
for l in open(original_dat):
tree_dict = json.loads(l);
tree = Tree();
# count the terminals in this single tree
tree.count_terminal(tree_dict);
terminal_count = tree.terminal_count;
# add them into the all_terminal_count
for terminal in terminal_count:
all_terminal_count.setdefault(terminal, 0);
all_terminal_count[terminal] += 1;
return all_terminal_count;
def get_rare_terminals(original_dat):
all_terminal_count = count_all_terminals(original_dat);
rare_terminals = set();
for terminal in all_terminal_count:
if all_terminal_count[terminal] < 5:
rare_terminals.add(terminal);
return rare_terminals;
def replace_infrequent(original_dat, rare_dat):
rare_terminals = get_rare_terminals(original_dat);
rare_dat = open(rare_dat, "w+");
for l in open(original_dat):
tree_dict = json.loads(l);
tree = Tree();
tree.replace_with_rare(tree_dict, rare_terminals);
rare_dat.write(json.dumps(tree_dict) + '\n');
def usage():
sys.stderr.write("""
Usage: python parser.py q4 parse_train.dat parse_train.RARE.dat
Substitute all the words in count_file which has count(word)<5
into parse_train.RARE.dat.\n""")
if __name__ == "__main__":
if len(sys.argv) <= 1:
usage();
sys.exit(2)
try:
question = sys.argv[1];
if question == 'q4':
# replace infrequent word with count(x)<5
ori_dat_file = sys.argv[2];
rare_dat_file = sys.argv[3];
replace_infrequent(ori_dat_file, rare_dat_file);
count_file = rare_dat_file + '.count';
os.system("python count_cfg_freq.py %s > %s\n" % (rare_dat_file, count_file));
if question == 'q5':
rare_dat_file = sys.argv[2];
count_file = rare_dat_file + '.count';
dev_dat_file = sys.argv[3];
key_file = sys.argv[4];
cky.cky_algorithm(rare_dat_file, count_file, dev_dat_file, key_file);
if question == 'q6':
rare_dat_file = sys.argv[2];
count_file = rare_dat_file + '.count';
dev_dat_file = sys.argv[3];
key_file = sys.argv[4];
cky.cky_algorithm(rare_dat_file, count_file, dev_dat_file, key_file);
except IOError:
sys.stderr.write("ERROR: Cannot read input file %s or %s.\n" % (sys.argv[2], sys.argv[3]));
sys.exit(1)
| Christine-Tan/4705NLP | hw3/parser.py | parser.py | py | 3,469 | python | en | code | 1 | github-code | 90 |
18460288919 | from collections import deque
def main():
H, W = list(map(int, input().split()))
S = [input() for _ in range(H)]
visited = [[0] * W for _ in range(H)]
ans = 0
for h in range(H):
for w in range(W):
if S[h][w] == '#' or visited[h][w] == 1:
continue
n_white, n_black = 0, 0
que = deque()
que.append((h, w))
while len(que) > 0:
ch, cw = que.pop()
if visited[ch][cw] == 1:
continue
visited[ch][cw] = 1
if S[ch][cw] == '.':
n_white += 1
else:
n_black += 1
for dh, dw in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
next_h, next_w = ch + dh, cw + dw
if 0 <= next_h < H and 0 <= next_w < W and \
S[ch][cw] != S[next_h][next_w] and \
visited[next_h][next_w] == 0:
que.append((next_h, next_w))
ans += n_white * n_black
print(ans)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03157/s765861500.py | s765861500.py | py | 1,140 | python | en | code | 0 | github-code | 90 |
41167805408 | import subprocess
from subprocess import check_output
import re
def delete_branch(merged):
# makes a storage to delete and stores all the ones approved and deletes them all together
delete_storage = []
for i in range(len(merged)):
t = triple_check(merged[i])
if t:
delete_storage.append(merged[i])
elif t == None:
break
else:
continue
if delete_storage:
print('*** DELETING BRANCHES PLEASE DO NOT STOP PROGRAM ***')
for brnch in delete_storage:
# deletes branch using command line git
subprocess.call(['git', 'push', 'origin', '--delete', brnch])
def triple_check(branch_judgement):
# ask user if they want to delete a certain branch or exit program
while True:
yes_or_no = str(input('DELETE BRANCH '+branch_judgement+' ? (y/n/done/quit): '))
if yes_or_no == 'y':
return True
elif yes_or_no == 'n':
return False
elif yes_or_no == 'done':
return None
elif yes_or_no == 'quit':
quit()
else:
print("You should enter either \"y\", \"n\", \"done\" or \"quit\".")
def get_merged_branches(output,branch_user):
listy = output.replace('torigin/', '').split('\n')
to_delete = []
if 'master' in listy:
listy.remove('master')
for i in listy:
if branch_user not in i:
to_delete.append((listy.index(i)))
# cross examines index from to_delete to listy and removes the indexs
listy = [i for j, i in enumerate(listy) if j not in to_delete]
# removes trailing spaces for every string in list
listy = [w.replace(branch_user+' ', '') for w in listy]
return listy
def users_that_merged(user_and_branch):
# gives your a list of the users that have branches
# splits the user and branch by new line and origin
split_user = re.split('\n|torigin/', user_and_branch)
# remove branch by removing every other entry in the list
del split_user[1::2]
# removes space and duplicate names
users = [x.strip(' ') for x in list(dict.fromkeys(split_user))]
return users
def main():
# gives user the option to choose name and delete their merged branches
print('*** LOADING BRANCH USERS EMAILS ***\n')
git_info = check_output(
'for branch in `git branch -r --merged | grep -v HEAD`; do echo `git show --format=" %aE" $branch | head -n 1`'
' \\t$branch; done | sort -r', shell=True).decode()
users_merged = users_that_merged(git_info)
# prints the list of users pretty
print('\n'.join(map(str, users_merged)))
print('*** SELECT BRANCH USER EMAIL ***\n')
user_name = input('EMAIL: ')
# gives you list of branches with your name
if user_name != '' and user_name in set(users_merged):
print('\n*** FINDING YOUR BRANCHES ***\n')
merged_branches = get_merged_branches(git_info, user_name)
delete_branch(merged_branches)
print('Done with branches')
else:
print('\nCould not find your email')
if __name__ == '__main__':
main()
| davidmojica/Q2-Internship | git_tools/Python/remove_merged.py | remove_merged.py | py | 3,135 | python | en | code | 0 | github-code | 90 |
4311445597 | import abc
from historico import Historico
from tributavel import Tributavel
from excecoes import SaldoInsuficienteError
class Conta(abc.ABC):
'''
Representação de uma Conta bancária.
Atributos:
numero: número da conta
titular: objeto do tipo Cliente representando o titular da conta
saldo: saldo inicial da conta
limite: limite de cheque especial da conta
data_abertura: objeto do tipo Data representando a data de abertura da conta
'''
# definir os Slots impede que se crie novos atributos na classe dinamicamente em tempo
# de execução (ele apaga o __dict__)
__slots__ = ['_id', '_numero', '_titular', '_saldo', '_limite', '_data_abertura', '_historico', '_tipo']
_total_contas = 0
def __init__(self, numero, titular, saldo, limite, data_abertura):
Conta._total_contas += 1
self._id = Conta._total_contas
self._numero = numero
self._titular = titular
self._saldo = saldo
self._limite = limite
self._data_abertura = data_abertura
self._historico = Historico()
self._tipo = ""
@property
def id(self):
return self._id
@property
def numero(self):
return self._numero
@numero.setter
def numero(self, valor):
self._numero = valor
@property
def titular(self):
return self._titular
@titular.setter
def titular(self, valor):
self._titular = valor
@property
def saldo(self):
return self._saldo
@property
def limite(self):
return self._limite
@limite.setter
def limite(self, valor):
self._limite = valor
@property
def data_abertura(self):
return self._data_abertura
@data_abertura.setter
def data_abertura(self, valor):
self._data_abertura = valor
@property
def historico(self):
return self._historico
@property
def tipo(self):
return self._tipo
@classmethod
def total_contas(cls):
return cls._total_contas
def deposita(self, valor):
'''
Realiza um depósito acrescentando o valor informado ao saldo da conta
'''
if (valor <= 0):
raise ValueError("Valor para depósito deve ser positivo")
else:
self._saldo += valor
self._historico.atualiza_historico(f'Depósito realizado no valor de {valor}. Saldo parcial: {self._saldo}')
def saca(self, valor):
'''
Realiza um saque deduzindo o valor informado do saldo da conta
'''
if (valor <= 0):
raise ValueError("Valor para saque deve ser positivo")
elif ((self._saldo + self._limite) < valor):
raise SaldoInsuficienteError("Saldo insuficiente")
else:
self._saldo -= valor
self._historico.atualiza_historico(f'Saque realizado no valor de {valor}. Saldo parcial: {self._saldo}')
return True
def transfere_para(self, destino, valor):
'''
Realiza uma transferência do valor informado para a conta destino
'''
if (valor <= 0):
raise ValueError("Valor para transferência deve ser positivo")
try:
retirou = self.saca(valor)
if (retirou == False):
return False
else:
destino.deposita(valor)
self._historico.atualiza_historico(f'Transferência realizada no valor de {valor} para a conta número {destino.numero} - titular: {destino.titular.nome} {destino.titular.sobrenome}. Saldo parcial: {self._saldo}')
return True
except SaldoInsuficienteError as e:
raise SaldoInsuficienteError("Saldo insuficente para transferência")
except:
raise e
def extrato(self):
'''
Demonstra o extrato da conta informada
'''
print(f"Titular: {self._titular.nome} {self._titular.sobrenome}\nCPF: {self._titular.cpf}")
print(f"Conta número: {self._numero}\nSaldo: {self._saldo}")
print('\n')
self._historico.imprime()
@abc.abstractmethod
def atualiza(self, taxa):
'''
Atualiza o saldo com a taxa informada
'''
#self._saldo += self._saldo * taxa
#self._historico.atualiza_historico(f'Atualização de rendimentos - taxa: {taxa}. Saldo parcial: {self._saldo}')
pass
def __str__(self):
return f"Tipo: {self._tipo} - Titular: {self._titular.nome} {self._titular.sobrenome} - CPF: {self._titular.cpf}"
class ContaCorrente(Conta):
def __init__(self, numero, titular, saldo, limite, data_abertura):
super().__init__(numero, titular, saldo, limite, data_abertura)
self._tipo = "Conta Corrente"
def atualiza(self, taxa):
'''
Atualiza o saldo com a taxa informada
'''
self._saldo += self._saldo * taxa * 2
self._historico.atualiza_historico(f'Atualização de rendimentos - taxa: {taxa}. Saldo parcial: {self._saldo}')
def deposita(self, valor):
'''
Realiza um depósito acrescentando o valor informado ao saldo da conta
'''
if (valor <= 0):
raise ValueError("Valor para depósito deve ser positivo")
else:
taxa_deposito = 0.1
self._saldo += valor
self._historico.atualiza_historico(f'Depósito realizado no valor de {valor}')
self._saldo -= taxa_deposito
self._historico.atualiza_historico(f'Cobrança da taxa de depósito {-taxa_deposito}')
def get_valor_imposto(self):
'''
Aplica a taxa de imposto sobre o valor do saldo da conta
'''
return self._saldo * 0.01
class ContaPoupanca(Conta):
def __init__(self, numero, titular, saldo, limite, data_abertura):
super().__init__(numero, titular, saldo, limite, data_abertura)
self._tipo = "Conta Poupança"
def atualiza(self, taxa):
'''
Atualiza o saldo com a taxa informada
'''
self._saldo += self._saldo * taxa * 5
self._historico.atualiza_historico(f'Atualização de rendimentos - taxa: {taxa}. Saldo parcial: {self._saldo}')
class ContaInvestimento(Conta):
def __init__(self, numero, titular, saldo, limite, data_abertura):
super().__init__(numero, titular, saldo, limite, data_abertura)
self._tipo = "Conta Investimento"
def atualiza(self, taxa):
'''
Atualiza o saldo com a taxa informada
'''
self._saldo += self._saldo * taxa * 3
self._historico.atualiza_historico(f'Atualização de rendimentos - taxa: {taxa}. Saldo parcial: {self._saldo}')
def get_valor_imposto(self):
'''
Aplica a taxa de imposto sobre o valor do saldo da conta
'''
return self._saldo * 0.03
class SeguroDeVida:
def __init__(self, valor, titular, numero_apolice):
self._valor = valor
self._titular = titular
self._numero_apolice = numero_apolice
self._tipo = "Seguro de Vida"
def get_valor_imposto(self):
'''
Aplica a taxa de imposto sobre o valor do prêmio do seguro
'''
return 50 + self._valor * 0.05
def __str__(self):
return f"Tipo: {self._tipo} - Titular: {self._titular.nome} {self._titular.sobrenome} - CPF: {self._titular.cpf}"
# Testes
if __name__ == "__main__":
from cliente import Cliente
from conta import Conta
from data import Data
data = Data(16, 12, 2019)
# Cadastro Clientes:
joao = Cliente('João', 'da Silva', '333444555-66')
bicca = Cliente('Bruno', 'Bicca', '111222333-44')
marcelo = Cliente('Marcelo', 'Frasca', '222333444-55')
# Abertura das contas:
#conta_joao = Conta('8901-2', joao, 1400.0, 2000.0, data)
conta_joao = ContaInvestimento('8901-2', joao, 1400.0, 2000.0, data)
conta_bicca = ContaCorrente('1234-5', bicca, 15000.0, 30000.0, data)
conta_marcelo = ContaPoupanca('4567-5', marcelo, 5000.0, 10000.0, data)
#print('Métodos da class Conta:')
#vars(conta_marcelo)
# Movimentações nas contas
conta_joao.saca(100.0)
conta_bicca.deposita(50.0)
conta_marcelo.saca(50.0)
conta_bicca.transfere_para(conta_marcelo, 100.50)
conta_marcelo.deposita(0.5)
conta_marcelo.saca(101.0)
conta_joao.transfere_para(conta_bicca, 350.50)
# Atualização de rendimentos
conta_joao.atualiza(0.01)
conta_bicca.atualiza(0.01)
conta_marcelo.atualiza(0.01)
# Extrato
print(f'\nTotal de Contas: {Conta.total_contas()}')
print(f'\nConta do João - Id: {conta_joao.id}')
conta_joao.extrato()
print(f'\nConta do Bicca - Id: {conta_bicca.id}')
conta_bicca.extrato()
print(f'\nConta do Marcelo - Id: {conta_marcelo.id}')
conta_marcelo.extrato()
# Impressão dos dados da conta
print('')
print(conta_joao)
print(conta_bicca)
print(conta_marcelo) | mvfrasca/Caelum-Python | oo/conta.py | conta.py | py | 9,056 | python | pt | code | 0 | github-code | 90 |
70762185578 | """
Hacer un programa que pida las calificaciones
de 15 alumnos y que nos muestre cuantos han pasado
y cuantos reprobaron
"""
aprobados = 0
reprobados = 0
contador = 0
numero_alumnos = int(input("Cúantos alumnos tienes: "))
while contador < numero_alumnos:
calificacion = float(input(f"Escribe la calificación para el alumno {contador}: "))
if calificacion >= 6:
aprobados += 1
else:
reprobados += 1
contador += 1
print("*****Alumnos aprobados*****")
print(aprobados)
print("_______________________________")
print("*****Alumnos reprobados*****")
print(reprobados)
| AlexSR2590/curso-python | 07-ejercicios/ejercicio10.py | ejercicio10.py | py | 580 | python | es | code | 0 | github-code | 90 |
9221177261 |
# coding: utf-8
# # Sistemas Lineales. Metodos directos
# Ax=b; A es un array nxn b es un array nx1
import numpy as np
import scipy.linalg as scla # El comando para resolver sistemas lineales es solve
A = np.array([[1,2,3],[2,4,1],[-1,-1,2]])
b = np.array([[1],[5],[3]])
scla.solve(A,b)
# ## Factorizacion LU
solucion = scla.lu(A)# 1 arry es L, 2 array es U
L = solucion[0]
L
L, U, P = scla.lu(A)
L
U
P
#Factorizacion de cholesky, el comando es cholesky
scla.cholesky(A) #la matriz no es definida positiva
def Newton_Rapshon(f,f1,x0,tol, Nmax):
""""""
import numpy as np
for i in range(1,Nmax+1):
x1 = x0-(f(x0)/f1(x0))
error= np.abs(x0-x1)
if error<tol:
return(x1)
else:
x0 = x1
if i >= Nmax:
print('numero maximo alcanzado')
return(x1)
# ## Metodo de Jacobi- metodo de Gauss-Seidel
#Vamos a programar antes la iteracion de punto fijo
# Programese una funcion llamada "mipuntofijo" que tenga como parametros de entrada g, x0, tol(tolerancia max), Nmax(nº maximo de iteraciones) y devulva el punto fijo o un mensaje de error
def mipuntofijo (g, x0, tol, Nmax):
"""cosas"""
import numpy as np
for i in range(1,Nmax+1):
x1 = g(x0)
error= np.fabs(x0-x1)
if error<tol:
return(x1)
else:
x0 = x1
if i >= Nmax:
print('numero maximo alcanzado')
return(x1)
def g(x):
return(2**(-x))
mipuntofijo(g,1,0.00001,5)
# ### Programacion metodo de Jacobi
get_ipython().magic('reset -f')
# Programaremos el metodo de jacobi utilizando la forma matricial: x^{m+1}=B*x^m+D^-1*b siendo B = D{-1^}*(-(L+U)) y C=D^-1*b
# Programese una funcion llamada "mijacobi" que tenga como parametros de entradas una matriz A, un vector b, una tolerancia de error tol, y un numero maximo de iteranciones Nmax. Debe imprimir por pantalla el mensaje de error adecuado si se excede Nmax o no se alanza la tolerancia de error debe devolver el numero de iteraciones utilizado, la aproximacion al error y ademas imprimirlo por pantalla con un mensaje adecuado.
import numpy as np
A = np.array([[1.2,2.8],[2.3,4.6]])
np.shape(A) #numero de filas y columnas
np.transpose(A)#transpuesta de la matriz
a = np.array([[1],[2]])#vector en colmuna
np.diagonal(A)#Nos proporciona la diagonal de A
b = np.diag(a)
l = np.tril(A)#triangulas inferior
u = np.triu(A)#triangular superior
producto = A.dot(a)#matriz por vector
print(b)
print(l)
print(u)
print(producto)
import numpy.linalg as npla
npla.inv(A)#calcula la inversa de A
npla.norm(A)#calcula la norma infinito
npla.norm(A,2)#norma 2
def mijacobi(A,b,tol,Nmax,x0):
""""""
import numpy as np
import numpy.linalg as npla
error = tol+1
i = 0
D = np.diag(np.diagonal(A))#saca la diagonal y hace una matriz diagonal
L = np.tril(A)-D
U = np.triu(A)-D
B = np.dot(npla.inv(D),(-(L+U)))
print(B)
C = np.dot(npla.inv(D),b)
while (error>=tol and i<=Nmax):
i=i+1
x = np.dot(B,x0)+C
error = npla.norm(x-x0)
if(i==Nmax):
print('se ha alcanzado el numero maximo de iteracciones')
return(x,i)
if error < tol:
print('el error es menor que la tolerancia.')
return(x,i)
else:
x0 = x
import numpy as np
A = np.array([[10,2,1],[1,5,1],[2,3,10]])
b = np.array([[7],[-8],[6]])
I = np.array([[1,0,0],[0,1,0],[0,0,1]])
x0 = np.array([[0.7],[-1.6],[0.6]])
mijacobi(A,b,0.1,4,x0)
get_ipython().magic('reset -f')
# ### Metodo de la secante
# x(n+1)=x(n)-[(x(n)-x(n-1))*f(x(n))]/[f(x(n))-f(x(n-1))]
def metodo_secante(F,x0,x1,tol,Nmax):
""""""
import numpy as np
error = tol+1
i = 0
while (error>=tol and i<=Nmax):
i = i+1
x2 = x1-((x1-x0)*F(x1))/(F(x1)-F(x0))
error= np.fabs(x0-x1)
if(i==Nmax):
print('se ha alcanzado el numero maximo de iteracciones')
return(x2)
if error < tol:
print('el error es menor que la tolerancia.')
return(x2,i)
else:
x0 = x1
x1 = x2
import numpy as np
def F(x):
return(np.cos(5*x)-3*x)
metodo_secante(F,1/6,1/3,0.00000001,100)
# # Simpson
def simp_comp(f,a,b,m):
""""""
import numpy as np
x=0
i=1
h=(b-a)/(2*m)
while i<=m:
x=x+(f(a+h*(2*i-2))+4*f(a+h*(2*i-1))+f(a+h*(2*i)))
i=i+1
return(x*(h/3))
a=0
b=1
h=0.125
x=0
m=(b-a)/(2*h)
def f(x):
return(x/((x+1)*(x+2)))
simp_comp(f,a,b,m)
def simpson(f,a,b):
""""""
import numpy as np
h = (b-a)/2
x = (h/3)*(f(a)+4*f((a+b)/2)+f(b))
return(x)
# ### Prgramacion metodo Runge-Kuta
# Metodo RK de orden 4 clasico (apuntes)
def miRK4(f,t0,y0,tf,h):
"""bla bla bla"""
import numpy as np
t1 = t0+h
i = 0
while(t1 <=tf):
k1 = f(t0,y0)
k2 = f(t0+0.5*h,y0+0.5*h*k1)
k3 = f(t0+0.5*h,y0+0.5*h*k2)
k4 = f(t0+h,y0+h*k3)
y1 = y0+h*((1/6)*k1+(1/3)*k2+(1/3)*k3+(1/6)*k4)
i = i +4
t0 = t1
t1 = t1+h
y0 = y1
if (t0)<tf:
h = tf-(t0)
k1 = f(t0,y0)
k2 = f(t0+0.5*h,y0+0.5*h*k1)
k3 = f(t0+0.5*h,y0+0.5*h*k2)
k4 = f(t0+h,y0+h*k3)
y1 = y0+h*((1/6)*k1+(1/3)*k2+(1/3)*k3+(1/6)*k4)
y0 = y1
i = i+4
print('T final {0:-23.15e}, aproximacion {1:-23.15e}, numero de evaluaciones de funcion {2:23d}'.format(t0+h,y1,i)) #imprimir con formato
return(tf,y0,i)
# y'=-y; y(0)=1
def f(t,y):
return(-y)
def f(t,y):
return(-y)
t0 = 0
y0 = 1
tf = 1
h = 0.1
sol = miRK4(f,t0,y0,tf,h)
import numpy as np
np.exp(-sol[0])
np.exp(-sol[0])-sol[1]
# # Funciones libreria para aproximar soluciones de ecuaciones diferenciales
# El paquete scipy.integrate contienes funciones para resolver EDO: odeint, Isoda, vode, dopri5, dop853
# y'=-y; y(0)=1
get_ipython().magic('reset -f')
import scipy.integrate as scin
import numpy as np
import matplotlib.pyplot as plt
def f(x,y):
return(-y)
def sol(x):
return(np.exp(-x))
y00 = 0, 1
x0 = 0
y0 = 1
xf = 1
x = np.linspace(x0,xf,100)#discretizacion, dividir en intervalos
y = scin.odeint(f,y0,x)
y1 = sol(x)
plt.plot(x,y,'r')
plt.plot(x,y1,'b')
plt.show()
# Polinomio interpolador de lagrange forma de newtoncon las diferencias divididas
| diegobartolome-proyectos/Ejercicios-matematicos-en-Python | Sistemas_Lineales.py | Sistemas_Lineales.py | py | 6,709 | python | es | code | 0 | github-code | 90 |
30387097937 | class Solution:
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
# def str2int(num):
# res = 0
# for i in range(len(num)-1, 0, -1):
# res += int(num[i]) * pow(10, len(num)-1-i)
# return res
if num1 == '0' or num2 == '0': return '0'
dct = {}
for i in range(10):
dct[str(i)] = i
num1_r, num2_r = num1[::-1], num2[::-1]
temp = [0 for i in range(len(num1+num2))]
ret = [0 for i in range(len(num1 + num2))]
for i in range(len(num1)):
for j in range(len(num2)):
temp[i+j] += dct[num1_r[i]] * dct[num2_r[j]]
print(temp)
for i in range(len(temp)):
ret[i] = temp[i] % 10
if i < len(temp)-1:
temp[i+1] += temp[i]//10
return ''.join(str(i) for i in ret[::-1]).lstrip('0')
if __name__ == '__main__':
s = Solution()
print(s.multiply('123','456')) | VRER1997/leetcode_python | middle/043 Multiply Strings.py | 043 Multiply Strings.py | py | 1,047 | python | en | code | 1 | github-code | 90 |
19295028042 | import sys
import pyperclip
from PyQt5.QtCore import QRegExp, QObject, QThread, pyqtSignal
from PyQt5.QtGui import QRegExpValidator, QIcon
from PyQt5.QtWidgets import (
QApplication, QDialog, QMainWindow, QMessageBox, QDialogButtonBox, QLineEdit, QTableWidgetItem, QRadioButton, QFileDialog
)
from PyQt5 import QtWidgets
from PyQt5.uic import loadUi
from main_ui import Ui_MainWindow
import requests
import concurrent.futures
from time import sleep
from sys import exit
from random import uniform as rand
from datetime import datetime
API_URL = 'https://upibankvalidator.com/api/upiValidation?upi='
phone_regex = QRegExp("[0-9]{10}")
email_regex = QRegExp("^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,4}$")
# opting to load lists from a file instead of hardcoding them
# as this would be more flexible, allow for easier updates,
# and allow others to make use of the lists provided
with open("data/general_suffixes.txt", "r") as suffix_file:
upi_suffix_dict = suffix_file.read().splitlines() # read all suffixes into a list
with open("data/mobile_suffixes.txt", "r") as mobile_suffix_file:
mobile_suffix_dict = mobile_suffix_file.read().splitlines()
with open("data/fastag_suffixes.txt", "r") as fastag_suffix_file:
fastag_suffix_dict = fastag_suffix_file.read().splitlines()
with open("data/gpay_suffixes.txt", "r") as gpay_suffix_file:
gpay_suffix_dict = gpay_suffix_file.read().splitlines()
class Scraper(QObject):
finished = pyqtSignal()
progress = pyqtSignal(int)
found = 0
count = 0
def searchvpa(self, searchtext, vpa_dict, threadcount):
if(threadcount == 0):
for suffix in vpa_dict:
try:
self.address_discovery(searchtext + '@' + suffix, API_URL)
except KeyboardInterrupt:
exit(0)
else:
threadcount = 10 if threadcount > 10 else threadcount
with concurrent.futures.ThreadPoolExecutor(max_workers=threadcount) as executor:
try:
for suffix in vpa_dict:
executor.submit(self.address_discovery, searchtext + '@' + suffix, API_URL)
sleep(rand(.1, .2))
except KeyboardInterrupt:
# quit ungracefully on keyboard interrupt:
# considering the bandwidth consumed for requests,
# there is no reason to wait for the threads to finish
# sorry for the inconvenience
executor._threads.clear()
concurrent.futures.thread._threads_queues.clear()
if self.found == 0:
win.set_footer("No VPA found for: "+searchtext, "red")
def address_discovery(self, vpa, api_url):
r = requests.post(api_url+vpa)
print(r.status_code)
if r.status_code == 200 and r.json()['isUpiRegistered'] is True:
rowPosition = win.Result.rowCount()
win.Result.insertRow(rowPosition)
win.Result.setItem(rowPosition , 0, QTableWidgetItem(r.json()['name']))
win.Result.setItem(rowPosition , 1, QTableWidgetItem(vpa))
win.nameEdit.setText(r.json()['name'])
self.found += 1
win.set_footer("Found: "+ str(self.found), "green")
if r.status_code == 400:
win.set_footer("Bad Request", "red")
if r.status_code == 429:
win.set_footer("Too Many Requests", "red")
self.progress.emit(self.count)
self.count += 1
def run(self):
self.searchvpa(win.phoneEdit.text(), win.suffix, 2)
self.finished.emit()
class Window(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.connectSignalsSlots()
self.phoneEdit.setValidator(QRegExpValidator(phone_regex, self.phoneEdit))
self.emailEdit.setValidator(QRegExpValidator(email_regex, self.emailEdit))
self.nameEdit.setReadOnly(True)
self.progressBar.setVisible(False)
self.allUPI.setChecked(True)
self.upibankAPI.setChecked(True)
self.Result.setColumnCount(2)
self.Result.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.Result.setHorizontalHeaderLabels(["Name", "VPA"])
self.setWindowIcon(QIcon('logo.png'))
self.setWindowTitle("gUPI recon")
def connectSignalsSlots(self):
self.action_Manually.triggered.connect(self.add_api)
self.pastePhone.clicked.connect(self.paste_phone)
self.pasteEmail.clicked.connect(self.paste_email)
self.Query.clicked.connect(self.query)
self.saveButton.clicked.connect(self.saveQuery)
self.action_Add_API_URL.triggered.connect(self.add_api)
#self.action_Find_Replace.triggered.connect(self.findAndReplace)
#self.action_About.triggered.connect(self.about)
def add_api(self):
pass
def set_footer(self, text, color = "black"):
self.footer.setStyleSheet("color: " + color)
self.footer.setText(text)
def add_api(self):
dialog = add_API(self)
dialog.exec()
def paste_phone(self):
if (QRegExp.exactMatch(phone_regex, pyperclip.paste())):
self.phoneEdit.setText(pyperclip.paste())
else:
self.set_footer("Invalid phone number", "red")
def paste_email(self):
if (QRegExp.exactMatch(email_regex, pyperclip.paste())):
self.emailEdit.setText(pyperclip.paste())
else:
self.set_footer("Invalid email", "red")
def reportProgress(self, value):
self.progressBar.setValue(round(((value+1)/len(self.suffix))*100))
def saveQuery(self):
if self.Result.rowCount() > 0:
name, _ = QFileDialog.getSaveFileName(self, 'Save Query', self.phoneEdit.text(), 'CSV(*.csv)')
if not name:
return
with open(name,'w') as file:
for i in range(self.Result.rowCount()):
file.write(self.Result.item(i, 0).text())
file.write(", ")
file.write(self.Result.item(i, 1).text())
file.write("\n")
footer_text = "Saved to " + name
if len(footer_text) > 27:
self.set_footer(footer_text[25:]+"...", "green")
self.set_footer(footer_text, "green")
else:
self.set_footer("No results to save", "red")
def query(self):
# set footer
self.set_footer("Querying...")
self.thread = QThread()
self.worker = Scraper()
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.run)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.worker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.worker.progress.connect(self.reportProgress)
self.progressBar.setValue(0)
self.progressBar.setVisible(True)
self.thread.finished.connect(
lambda: self.progressBar.setVisible(False)
)
self.suffix = upi_suffix_dict
if (win.gpayUPI.isChecked()):
self.suffix = gpay_suffix_dict
elif (win.fastagUPI.isChecked()):
self.suffix = fastag_suffix_dict
elif (win.mobileUPI.isChecked()):
self.suffix = mobile_suffix_dict
self.Result.clearContents()
self.Result.setRowCount(0)
self.thread.start()
class add_API(QDialog):
def ok_clicked(self):
print(self.api_input.text())
self.close()
def __init__(self, parent=None):
super().__init__(parent)
loadUi("UI/api_input.ui", self)
self.api_input = self.findChild(QLineEdit, "lineEdit")
self.button_box = self.findChild(QDialogButtonBox, "buttonBox")
self.ok = self.button_box.buttons()[0]
self.ok.clicked.connect(self.ok_clicked)
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec()) | kinshukdua/gUPI-recon | app.py | app.py | py | 8,188 | python | en | code | 13 | github-code | 90 |
22144001820 | import os
import skbuild
import memtrace
uname = os.uname()
memtrace_dir = os.path.join(os.path.dirname(__file__), 'memtrace')
tracer_dir = os.path.join(
memtrace_dir, 'tracer', f'{uname.sysname}-{uname.machine}')
memtrace_data = [
'memtrace.ipynb',
]
for dirpath, dirnames, filenames in os.walk(tracer_dir):
dirpath = os.path.relpath(dirpath, memtrace_dir)
memtrace_data.extend(
os.path.join(dirpath, filename) for filename in filenames)
skbuild.setup(
name='memtrace',
version=memtrace.__version__,
author='mephi42',
author_email='mephi42@gmail.com',
description='Valgrind tool for tracing memory accesses',
url='https://github.com/mephi42/memtrace',
packages=[
'memtrace',
'memtrace_ext',
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
install_requires=[
'click',
'dataclasses; python_version < \'3.7\'',
'sortedcontainers',
],
package_data={
'memtrace': memtrace_data,
},
entry_points={
'console_scripts': [
'memtrace=memtrace.cli:main',
'memtrace-analyze=memtrace.analysis:main',
'memtrace-index=memtrace.index:main',
'memtrace-stats=memtrace.stats:main',
'memtrace-ud=memtrace.ud:main',
],
},
)
| mephi42/memtrace | setup.py | setup.py | py | 1,487 | python | en | code | 10 | github-code | 90 |
71889156457 | from flask import Flask, render_template, request
from k_nearest_neighbors.k_nearest_neighbors import D2KNearestNeighbors, my_distance, poly_weights_recommend, poly_weights_evaluate
from logistic_regression.logistic_regression import D2LogisticRegression
from engine import Engine
import json
URL_PREFIX = ''
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
""" """
def get_api_string(recommendations, prob, individual_hero_p):
recommendations = list(map(str, recommendations))
#individual_hero_p = list(map(float,individual_hero_p)
X = json.dumps({'data': recommendations, 'prob_x': prob, 'indi_hero': individual_hero_p })
print(X)
return X
'''Choose the Engine to run the stats on '''
#engine = Engine(D2LogisticRegression())
engine = Engine(D2KNearestNeighbors())
@app.route('/api/recommend', methods = ['POST'])
def recommend():
content = request.json
print(content['x'])
my_team = content['x']
their_team = content['y']
prob_recommendation_pairs = engine.recommend(my_team, their_team)
recommendations = [hero for prob, hero in prob_recommendation_pairs]
individual_hero_prob = [(float("{0:.2f}".format(prob))) for prob,hero in prob_recommendation_pairs]
print(recommendations)
prob = engine.predict(my_team, their_team)
print(prob)
print(individual_hero_prob)
return get_api_string(recommendations, prob, individual_hero_prob)
if __name__ == '__main__':
app.run(use_reloader=True,port=5000,threaded=True,debug=True)
| Lrisingr/Dota2ML | app.py | app.py | py | 1,545 | python | en | code | 0 | github-code | 90 |
70828694056 | from tkinter import *
def just_buttons():
print("i got clicked")
new_text = input.get()
my_label.config(text=new_text)
window = Tk()
window.minsize(width=500, height=300)
window.title("button creation")
window.config(pady=200, padx=100)
button = Button(text="click here", command=just_buttons)
button.grid(column=1, row=1)
yes_button = Button(text="click yes")
yes_button.grid(column=2, row=0)
def No_button():
print("no")
new_text = input.get()
my_label.config(text=new_text)
No_button = Button(text="click No", command=No_button)
No_button.grid(column=1, row=0)
my_label = Label(text="doing what i know", font=("Arial", 8, "bold"),)
my_label.config(padx=20, pady=20)
my_label.grid(column=0, row=0)
input = Entry(width=10)
input.grid(column=2, row=1)
print(input.get())
window.mainloop() | reykhalid/Daniel-projects | practice.py | practice.py | py | 869 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.