seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
27674411948 | """
Author: Carlos Fernando Castaneda
Class : CS 2302
Date Modified: May 12, 2019
Instructor: Olac Fuentes
Assingment: Lab 8 Algorithm Design Techniques
TA: Anindita Nath & Maliheh Zaragan
Purpose: to implement both randomized algorithms and backtracking teachniques
learned in class to check if two algorithmic identities are the same, and to
check the partitions of a new array.
"""
#Imports various tools to help us calculate the hash tables to be used in this lab
import time
import random
import mpmath
import numpy as np
#Method that goes through all of the strings of a givrn list, and and checks if they are similar in value
def similarities(S):
#Starts a counter to keep track of all the
count=0
#For i in range of the length of S
for i in range(len(S)):#goes through all the strings
#For i in range of the length of S
for j in range(i,len(S)):
#If S[i] is equal to S[j], then it will print both items, and add one to count
if(same_values(S[i],S[j])):
print(S[i],S[j])
count+=1
#Returns the value of count to the user
return count
#Method that calculates if two strings are similar in value to each other,=.
def same_values(string_1, string_2,calls=1000,tolerance=0.0001):
#For i in the range of calls
for i in range(calls):
#Assigns a random number to variable x
x = random.uniform(-mpmath.pi,mpmath.pi)
#Sets a new number value1 which takes the information from string_1, and evaluates it
value1 = eval(string_1)
#Sets a new number value2 which takes the information from string_2, and evaluates it
value2 = eval(string_2)
#If the absolute value of value1 - value2 is greater than the tolerance value, then it returns false
if np.abs(value1-value2)>tolerance:
return False
#Returns true if the statement abive is incorrect
return True
#Method that checks if apartion can be made from the two parts of S
def arrayPartition(S1,S2):
#If the sum of S1 % by 2 is not 0, then there is no partition
if sum(S1)%2!=0:#if summation of sum is odd then return error message
return "No partition exists"
else:
#Creates a set needed for the next section
res,s,= subset_summation(S1,len(S1)-1,sum(S1)//2)
#If the length of s equals 0, then there is no partition
if len(s)==0:
return "No partition exists"
#For every i in s
for i in s:
#New counter is created used to get the position
counter=0
#For every j in S1
for j in S1:
#If the value of i equals the value of j, then S1 pops a value
if i == j:
S1.pop(counter)
#Adds one to the counter
counter+=1
#Returns the value of s and S1
return s,S1
#Method that creates a new subse
def subset_summation(S,last,goal):
#If the value of goal equals 0, then it returns true with a new blank array
if goal == 0:
return True, []
#If the value of goal is less than or greater than 0, then it retrens false with a new blank array
if goal<0 or last<0:
return False, []
#Takes a new subset
res, subset = subset_summation(S,last-1,goal-S[last])
#If res is true, then it will append S[last and retrun true with the subset
if res:
subset.append(S[last])
return True, subset
#Otherwise, it will not take S[last from the list and move on
else:
return subset_summation(S,last-1,goal)
#Starts the timer for the running time for part 1
startTime1=time.time()
print('Importing algorithim equations to test: ')
print()
#Creates a new array called 'part1' which will import all of the functions that will be compared its equalities
part1=['mpmath.sin(x)',
'mpmath.cos(x)',
'mpmath.tan(x)',
'mpmath.sec(x)',
'-mpmath.sin(x)',
'-mpmath.cos(x)',
'-mpmath.tan(x)',
'mpmath.sin(-x)',
'mpmath.cos(-x)',
'mpmath.tan(-x)',
'mpmath.sin(x)/mpmath.cos(x)',
'2*mpmath.sin(x/2)*mpmath.cos(x/2)',
'mpmath.sin(x)**2',
'1-mpmath.cos(x)**2',
'(1-mpmath.cos(2*x))/2',
'1/mpmath.cos(x)']
#The actual method t
sim_count = similarities(part1)
#Prints the count number found in the method similarities
print()
print('The number of similarities in the equations are a total of: ', sim_count)
print()
#Ends the timer for the running time for part 1
endTime1=time.time()
#Creates the fianl time for the running time for part 1
finalTime1 = endTime1-startTime1
#Starts the timer for the running time for part 2
startTime2=time.time()
#Creates a new array of integeres needed for part 2 of the lab
part2=[2,4,5,9,12]
print('Partition process using array', part2, 'commencing: ')
print()
#Sends the new array to method arrayPartition
print(arrayPartition(part2,part2))
print()
#Ends the timer for the running time for part 2
endTime2=time.time()
#Creates the fianl time for the running time for part 2
finalTime2 = endTime2-startTime2
#Prints the running times of both part 1 and part 2
print('Running time for Part 1 in: ',finalTime1)
print('Running time for Part 2 in: ',finalTime2) | cfcastaneda98/CS2302 | Lab8/lab8.py | lab8.py | py | 5,453 | python | en | code | 0 | github-code | 50 |
73961133915 | import pymysql
from modules.cardgen import CardGen
import modules.config as cfg
if __name__ == "__main__":
cardgen = CardGen()
cards = cardgen.get_cards(1)
print(cards)
# Connect to the database
conn = pymysql.connect(host=cfg.host,
user=cfg.user,
password=cfg.password,
db=cfg.db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
conn.autocommit(True)
query = 'INSERT INTO card (expansion, name, clk, pow, hp, effs, img) VALUES'
for c in cards:
query += '\n({0}, "{1}", {2}, {3}, {4}, "{5}", "{6}"),'.format(0, c['name'], c['clk'], c['pow'], c['hp'], c['eff'], c['img'])
query = query[:-1] + ';'
print("QUERY:\n\n\n" + query + "\n\n\n")
conn.cursor().execute(query)
| TaylorAbraham/Uncharted-Realms-ML | main.py | main.py | py | 870 | python | en | code | 1 | github-code | 50 |
37492925281 | def main():
fruits = ['grape', 'apple', 'strawberry', 'waxberry', 'pitaya']
print(max(fruits))
print(min(fruits))
max_value = min_value=fruits[0]
for elem in fruits:
if elem >max_value:
max_value=elem
elif elem<min_value:
min_value=elem
print("Max:",max_value)
print("Min:",min_value)
if __name__ == '__main__':
main()
| ymjrchx/python-demo | Day07/findmax.py | findmax.py | py | 392 | python | en | code | 0 | github-code | 50 |
70832032154 | '''
Main file
Run in terminal 'python3 main.py' to use project
Dependencies:
- sqlite3
- requests
'''
from api import *
from sql import *
from datetime import date, datetime
# User input start/end date in ISO8601 format
start_date = input("Start date (YYYY-MM-DD, default=[start of repo]): ") + "T"
if start_date == "T":
start_date = date(2008, 1, 1).strftime('%Y-%m-%dT')
end_date = input("End date (YYYY-MM-DD, default=today): ") + "T"
if end_date == "T":
end_date = datetime.now().strftime('%Y-%m-%dT')
# ensure dates are inclusive
start_date += "00:00:00Z"
end_date += "23:59:59Z"
#Repeat until valid owner/name combination given
while True:
# User input to receive valid repo information
repo_owner = input("Repository Owner (default = apache): ")
if repo_owner == "":
repo_owner = "apache"
repo_name = input("Repository Name (default = hadoop): ")
if repo_name == "":
repo_name = "hadoop"
# test if is valid repository
commit_obj = test_repo_info(repo_owner, repo_name)
if 'message' in commit_obj:
# error in request
print(commit_obj['message'])
else:
# valid repo information
break
if init_db(f"{repo_owner}.{repo_name}") == "Created":
# new db, have to load data
page_num = 1
commits_analyzed = 0
print("\nStarting to access GitHub...")
while len(commit_obj) > 0:
commit_obj = get_commits(repo_owner, repo_name, start_date, end_date, page_num)
if 'message' in commit_obj:
print(commit_obj['message'])
break
add_commits_to_db(commit_obj)
commits_analyzed += len(commit_obj)
print(f"{commits_analyzed} commits analyzed", end="\r")
page_num += 1
print(f"{commits_analyzed} commits analyzed")
top_authors = get_top_authors()
print("\nTop Authors")
for i in range(1, min(4, len(top_authors))):
print(f"{i}: {top_authors[i-1][0]} - {top_authors[i-1][1]}")
longest_window = get_longest_contribution_window()
print("\nLongest Window:")
print(f"{longest_window[0]} - {longest_window[1].days} days")
heatmap, maxnum = generate_heatmap()
days_of_week = ["M ", "T ", "W ", "Th", "F ", "S ", "Su"]
timings = ["12AM-3AM", "3AM-6AM ", "6AM-9AM ", "9AM-12PM", "12PM-3PM", "3PM-6PM ", "6PM-9PM ", "9PM-12AM"]
TABLE_DATA_MAX_LENGTH = max(2, len(str(maxnum)))
MAX_TIMING_LENGTH = 8
print("\nHeatmap:")
# table header row
header = " " * MAX_TIMING_LENGTH
for day in range(7):
header += "|" + days_of_week[day] + " " * (TABLE_DATA_MAX_LENGTH - 2)
print(header)
# test of table rows
for row in range(8):
row_divider = "-" * (MAX_TIMING_LENGTH + (1 + TABLE_DATA_MAX_LENGTH) * 7)
print(row_divider)
row_str = timings[row] # row header (timing)
for column in range(7):
table_value = str(heatmap[column][row])
row_str += "|" + table_value + " " * (TABLE_DATA_MAX_LENGTH - len(table_value))
print(row_str)
print("\n")
| ARtheboss/github-repo-analysis | main.py | main.py | py | 2,967 | python | en | code | 0 | github-code | 50 |
40126941120 | # used by cmsDriver when called like
# cmsDriver.py hlt -s HLT:@relval
autoHLT = {
'fake' : 'Fake',
'fake1' : 'Fake1',
'fake2' : 'Fake2',
'relval50ns' : 'Fake',
'relval25ns' : 'Fake1',
'relval2016' : 'Fake2',
'relval2017' : 'Fake2',
'relval2018' : 'Fake2',
'relval2022' : 'Fake2',
'relval2023' : '2023v12',
'relval2024' : 'GRun',
'relval2026' : '75e33',
'test' : 'GRun',
}
| cms-sw/cmssw | Configuration/HLT/python/autoHLT.py | autoHLT.py | py | 424 | python | en | code | 985 | github-code | 50 |
73110306076 | from pydub import AudioSegment
def split_mp3(file_name):
sound = AudioSegment.from_mp3(file_name)
halfway_point = len(sound) // 2
first_half = sound[:halfway_point] + sound[:halfway_point]
# create a new file "first_half.mp3":
first_half.export("first_half_twice.mp3", format="mp3")
| Algostu/chungyo | pose_diff/core/audio.py | audio.py | py | 306 | python | en | code | 6 | github-code | 50 |
24360173200 | # bit_mask로 하는 방법도 있다.
arr = [1, 2, 3]
N = 3
sel = [0] * N # 사용.
def perm(idx, check):
if idx == N:
print(sel)
return
for i in range(N): # 원소의 개수만큼 반복할 것.
if (check & (1<<i)) != 0: # 이전에 사용한 원소 사용 X. -> 이걸 어떻게 체크?
continue
sel[idx] = arr[i]
perm(idx + 1, check | (1 << i)) # '또는'이라는 비트 연산자가 들어갔으므로, check에 저장된 1인 bit가 다음 함수 호출에서는 제외된다.
# ...
perm(0, 0) | phoenix9373/Algorithm | 2020/SWEA_문제/순열_BitMask.py | 순열_BitMask.py | py | 580 | python | ko | code | 0 | github-code | 50 |
25156426576 | import optuna
from optuna.pruners import SuccessiveHalvingPruner
from optuna.samplers import TPESampler
from optuna.trial import TrialState
from functools import partial
import matplotlib.pyplot as plt
from ai.lab.base import LabEntity
from ai.lab.trial import Trial
from ai.util import print_header
class Experiment(LabEntity):
def __init__(s, path, clean=False, direction='min', prune=False, **trial_kw):
super().__init__(path, clean)
assert direction in ['min', 'max']
if prune:
assert 'val_data' in trial_kw or 'task' in trial_kw, (
'Need val_data or task in trial kwargs if prune==True')
assert 'task' not in trial_kw, 'TODO: task-based early stopping'
s._trial_kw = trial_kw
optuna.logging.set_verbosity(optuna.logging.WARNING)
s._exp = optuna.create_study(
study_name=str(s.path),
storage=f'sqlite:///{s.path}/experiment.db',
load_if_exists=True,
direction='minimize' if direction == 'min' else 'maximize',
sampler=TPESampler(),
pruner=SuccessiveHalvingPruner() if prune else None,
)
s._prune = prune
@property
def trial_data(s):
return s._exp.trials
@property
def best_hparams(s):
return s._exp.best_params
def run(s, n, fn):
print(f'\nRUNNING EXPERIMENT {s.path} (n={n})\n')
s._exp.optimize(partial(s._run, fn), n_trials=n)
def show_plot(s, hparam, show_pruned=False, only_best=None):
trials = []
for t in s._exp.trials:
if show_pruned or t.state == TrialState.COMPLETE:
trials.append(t)
if only_best is not None:
idx = int(len(trials) * only_best)
trials = sorted(trials, key=lambda t: t.value)[:idx]
x, y = [], []
for t in trials:
x.append(t.params[hparam])
y.append(t.value)
plt.scatter(x, y)
plt.show()
def _run(s, fn, optuna_trial):
id = str(optuna_trial.number)
print_header(f'TRIAL {id}')
trial = _ExpTrial(
s.path / f'trials/{id}',
optuna_trial,
s._prune,
**s._trial_kw,
)
result = fn(trial)
print(f'RESULT: {result:.4f}\n')
return result
class _ExpTrial(Trial):
def __init__(s, path, optuna_trial, prune, **kw):
super().__init__(path, val_stopper=s.pruner, **kw)
s._optuna_trial = optuna_trial
s._prune = prune
s.hp = _HyperParams(optuna_trial)
def pruner(s, step, val_loss):
s._optuna_trial.report(val_loss, step)
if s._prune and s._optuna_trial.should_prune():
print_header('')
print('PRUNED\n')
raise optuna.TrialPruned()
return False
class _HyperParams:
def __init__(s, optuna_trial, prefix=None):
s._optuna = optuna_trial
s._prefix = prefix
def lin(s, name, min_, max_, step=1):
name = s._prefix_name(name)
has_float = False
for x in [min_, max_, step]:
if isinstance(x, float):
has_float = True
break
if has_float:
val = s._optuna.suggest_float(name, min_, max_, step=step)
else:
for x in [min_, max_, step]:
assert isinstance(x, int)
val = s._optuna.suggest_int(name, min_, max_, step=step)
print(f'{name}: {val}')
return val
def log(s, name, min_, max_):
name = s._prefix_name(name)
if isinstance(min_, float) or isinstance(max_, float):
val = s._optuna.suggest_float(name, min_, max_, log=True)
else:
assert isinstance(min_, int) and isinstance(max_, int)
val = s._optuna.suggest_int(name, min_, max_, log=True)
print(f'{name}: {val}')
return val
def lst(s, name, items):
name = s._prefix_name(name)
val = s._optuna.suggest_categorical(name, items)
print(f'{name}: {val}')
return val
def _prefix_name(s, name):
if s._prefix is None:
return name
return f'{s._prefix}.{name}'
| calvinpelletier/ai | lab/exp.py | exp.py | py | 4,226 | python | en | code | 0 | github-code | 50 |
9349714998 | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def inicio():
return render_template("formulario.html")
@app.route('/procesar', methods=['POST'])
def procesar():
palabra = request.form.get("palabra")
significado = request.form.get("significado")
return render_template("mostrar.html", palabra=palabra, significado=significado)
if __name__ == "__main__":
app.run( port=8088, debug=True) | Fersnake22/Sustitucion-de-CLI-por-Web | main.py | main.py | py | 452 | python | en | code | 0 | github-code | 50 |
36817192015 | from __future__ import print_function
import os
import yaml
from config import config
destination = os.path.expanduser("~/.exo")
if not os.path.exists(destination):
os.mkdir(destination, 0o755)
print("created configuration folder:", destination)
config_destination = os.path.join(destination, "template.yaml")
if not os.path.exists(config_destination):
pwd = os.getcwd()
cfg = config.load("config/config.yaml")
cfg["template"]["channel"] = os.path.join(pwd, "config/2012.input.yaml")
cfg["template"]["plot"] = os.path.join(pwd, "config/2012.plot.yaml")
with open(config_destination, "w") as output_:
yaml.dump(cfg, output_)
print("the application configuration is saved in:", config_destination)
print("the system is setup for running")
| baites/exo_plots | install.py | install.py | py | 790 | python | en | code | 0 | github-code | 50 |
26895369081 | from requests import request, exceptions as req_exceptions
from .microsoft_api_auth import *
from connectors.core.connector import get_logger, ConnectorError
from connectors.core.utils import update_connnector_config
logger = get_logger('azure-log-analytics')
MANAGE_SERVER_URL = 'https://management.azure.com'
MANAGE_API_VERSION = '2020-08-01'
LOG_SERVER_URL = 'https://api.loganalytics.io'
LOG_API_VERSION = '2022-10-27_Preview'
class AzureLogAnalytics(object):
def __init__(self, config):
self.server_url = LOG_SERVER_URL
self.manage_server_url = MANAGE_SERVER_URL
self.verify_ssl = config.get('verify_ssl')
self.ms_auth = MicrosoftAuth(config)
self.tenant_id = config.get('tenant_id')
self.connector_info = config.pop('connector_info', '')
self.manage_token = self.ms_auth.validate_token(config, self.connector_info)
self.log_token = self.ms_auth.validate_log_token(config, self.connector_info)
self.api_version = MANAGE_API_VERSION
def api_request(self, method, endpoint, config, params=None, data=None, headers={},
manage_api_endpoint=False):
try:
if manage_api_endpoint:
headers = {
'Authorization': self.manage_token,
'Content-Type': 'application/json'
}
service_url = self.manage_server_url + endpoint
params['api-version'] = MANAGE_API_VERSION
else:
headers = {
'Authorization': self.log_token,
'Content-Type': 'application/json'
}
service_url = self.server_url + endpoint
params['api-version'] = LOG_API_VERSION
try:
response = request(method, service_url, headers=headers, params=params, json=data,
verify=self.verify_ssl)
logger.debug("Response Status Code: {0}".format(response.status_code))
logger.debug("Response: {0}".format(response.text))
logger.debug("API Header: {0}".format(response.headers))
if response.status_code in [200, 201, 204]:
if response.text != "":
return response.json()
else:
return True
else:
if response.text != "":
err_resp = response.json()
failure_msg = err_resp['error']['message']
error_msg = 'Response [{0}:{1} Details: {2}]'.format(response.status_code, response.reason,
failure_msg if failure_msg else '')
else:
error_msg = 'Response [{0}:{1}]'.format(response.status_code, response.reason)
logger.error(error_msg)
raise ConnectorError(error_msg)
except req_exceptions.SSLError:
logger.error('An SSL error occurred')
raise ConnectorError('An SSL error occurred')
except req_exceptions.ConnectionError:
logger.error('A connection error occurred')
raise ConnectorError('A connection error occurred')
except req_exceptions.Timeout:
logger.error('The request timed out')
raise ConnectorError('The request timed out')
except req_exceptions.RequestException:
logger.error('There was an error while handling the request')
raise ConnectorError('There was an error while handling the request')
except Exception as err:
raise ConnectorError(str(err))
except Exception as err:
raise ConnectorError(str(err))
def check_payload(payload):
final_payload = {}
for key, value in payload.items():
if isinstance(value, dict):
nested = check_payload(value)
if len(nested.keys()) > 0:
final_payload[key] = nested
elif value is not None and value != '':
final_payload[key] = value
return final_payload
def build_payload(payload):
payload = {k: v for k, v in payload.items() if v is not None and v != ''}
return payload
def execute_query(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/v1/workspaces/{0}/query'.format(config.get('workspace_id'))
workspaces = config.get("workspace_name")
if workspaces:
workspaces = workspaces.split(",")
payload = {
'query': params.get('query'),
'timespan': params.get('timespan'),
'workspaces': workspaces
}
payload = build_payload(payload)
logger.debug("Payload: {0}".format(payload))
response = al.api_request("POST", endpoint, config=config, data=payload, params={})
return response
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def list_saved_searches(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches'.format(
config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'))
response = al.api_request("GET", endpoint, config, manage_api_endpoint=True, params={})
return response
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def get_saved_searches(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format(
config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'),
params.get('savedSearchId'))
response = al.api_request("GET", endpoint, config, manage_api_endpoint=True, params={})
return response
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def create_saved_searches(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format(
config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'),
params.get('savedSearchId'))
additional_fields = params.get('additional_fields')
payload = {
"etag": params.get('etag'),
"properties": {
"category": params.get('category'),
"displayName": params.get('displayName'),
"query": params.get('query')
}
}
if additional_fields:
payload['properties'].update(additional_fields)
payload = check_payload(payload)
logger.debug("Payload: {0}".format(payload))
response = al.api_request("PUT", endpoint, config, data=payload, manage_api_endpoint=True, params={})
return response
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def update_saved_searches(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format(
config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'),
params.get('savedSearchId'))
additional_fields = params.get('additional_fields')
payload = {
"etag": "*",
"properties": {
"category": params.get('category'),
"displayName": params.get('displayName'),
"query": params.get('query')
}
}
if additional_fields:
payload['properties'].update(additional_fields)
payload = check_payload(payload)
logger.debug("Payload: {0}".format(payload))
response = al.api_request("PUT", endpoint, config, data=payload, manage_api_endpoint=True, params={})
return response
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def delete_saved_search(config, params):
try:
al = AzureLogAnalytics(config)
endpoint = '/subscriptions/{0}/resourcegroups/{1}/providers/Microsoft.OperationalInsights/workspaces/{2}/savedSearches/{3}'.format(
config.get('subscription_id'), config.get('resource_group_name'), config.get('workspace_name'),
params.get('savedSearchId'))
response = al.api_request("DELETE", endpoint, config, manage_api_endpoint=True, params={})
return {'result': 'Deleted Saved Search {0} successfully'.format(params.get('savedSearchId'))}
except Exception as err:
logger.exception("{0}".format(str(err)))
raise ConnectorError("{0}".format(str(err)))
def check(config, connector_info):
try:
ms = MicrosoftAuth(config)
config_id = config['config_id']
if 'accessToken' in config and 'logAccessToken' in config:
ms.validate_token(config, connector_info) and ms.validate_log_token(config, connector_info)
elif 'accessToken' not in config and 'logAccessToken' in config:
token_resp = ms.generate_token()
config['accessToken'] = token_resp.get('accessToken')
config['expiresOn'] = token_resp.get('expiresOn')
config['refresh_token'] = token_resp.get('refresh_token')
update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config,
config['config_id']) and ms.validate_log_token(config, connector_info)
elif 'accessToken' in config and 'logAccessToken' not in config:
token_resp = ms.generate_token(LOG_SCOPE)
config['logAccessToken'] = token_resp['accessToken']
config['logExpiresOn'] = token_resp['expiresOn']
config['logRefreshToken'] = token_resp.get('refresh_token')
update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config,
config['config_id']) and ms.validate_log_token(config, connector_info)
else:
token_resp = ms.generate_token()
config['accessToken'] = token_resp.get('accessToken')
config['expiresOn'] = token_resp.get('expiresOn')
config['refresh_token'] = token_resp.get('refresh_token')
token_resp = ms.generate_token(LOG_SCOPE)
config['logAccessToken'] = token_resp['accessToken']
config['logExpiresOn'] = token_resp['expiresOn']
config['logRefreshToken'] = token_resp.get('refresh_token')
update_connnector_config(connector_info['connector_name'], connector_info['connector_version'], config,
config['config_id']) and ms.validate_log_token(config, connector_info)
config['config_id'] = config_id
return True
except Exception as err:
raise ConnectorError(str(err))
operations = {
'execute_query': execute_query,
'list_saved_searches': list_saved_searches,
'get_saved_searches': get_saved_searches,
'create_saved_searches': create_saved_searches,
'update_saved_searches': update_saved_searches,
'delete_saved_search': delete_saved_search
}
| fortinet-fortisoar/connector-azure-log-analytics | azure-log-analytics/operations.py | operations.py | py | 11,944 | python | en | code | 0 | github-code | 50 |
11964147517 | class Solution:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
array = sorted(set(nums), reverse=True)
frequency = Counter(nums)
hash_map = defaultdict(int)
length = len(nums)
for key in array:
length -= frequency[key]
hash_map[key] = length
answer=[]
for item in nums:
answer.append(hash_map[item])
return answer
| duressa-feyissa/A2SV_Programming | 1365-how-many-numbers-are-smaller-than-the-current-number/1365-how-many-numbers-are-smaller-than-the-current-number.py | 1365-how-many-numbers-are-smaller-than-the-current-number.py | py | 469 | python | en | code | 0 | github-code | 50 |
33657136040 | from django.urls import path, include
from rest_framework.routers import Route
from app.urls import router
from . import views
app_name = 'user'
router.routes += [
# User View Route
Route(
url=r'^user{trailing_slash}$',
mapping={
'get': 'view_user',
'post': 'create_user',
},
name='user-view',
detail=False,
initkwargs={'suffix': 'View'}
),
# User Detail Route
Route(
url=r'user{trailing_slash}{lookup}{trailing_slash}$',
mapping={
'get': 'view_user_by_id',
'patch': 'update_user_by_id',
'delete': 'destroy_user_by_id'
},
name='user-detail',
detail=True,
initkwargs={'suffix': 'Detail'}
),
]
router.register('user', views.UserViewSet)
router.register('user', views.UserDetailViewSet)
urlpatterns = [
path('', include(router.urls)),
path('token/', views.AuthTokenViewSet.as_view(), name='auth-token')
]
| Diaga/MARS-Server | app/user/urls.py | urls.py | py | 997 | python | en | code | 1 | github-code | 50 |
35134699125 | from .base import BaseCommand
from app.controllers import Commands
from app.utilities import typings, errors
class Command(BaseCommand):
name = "start"
usage = "start <tournament_id>"
description = "Start or Resume the tournament mode"
def __init__(self) -> None:
self.commands = Commands(package="app.commands.cmd_start")
self.is_running = True
def __quit(self):
class Command(BaseCommand):
name = "quit"
usage = "quit"
description = "quit the tournament mode"
reload = False
def run(_, context: typings.Context, **kwargs):
self.is_running = False
return Command()
def _check_rounds(self, tournament):
state = tournament.state
try:
tournament.round_instances[state.current_round]
except IndexError:
generated_round = tournament.generate_round()
tournament.round_instances.append(generated_round)
return tournament.save()
return tournament
def _check_commands(self, tournament):
# Hide & unhide command based on state
curr_round = tournament.state.current_round + 1
disable_previous = False if curr_round > 1 else True
self.commands.cache["previous"].is_disabled = disable_previous
disable_next = True if curr_round == tournament.rounds else False
self.commands.cache["next"].is_disabled = disable_next
disable_end = False if len(tournament.round_instances) == tournament.rounds else True
self.commands.cache["end"].is_disabled = disable_end
enable_commit = True if tournament.state.is_ongoing else False
self.commands.cache["commit"].is_disabled = enable_commit
def run(self, context: typings.Context, args: list):
context = context.copy() # i don't want to modify the 'main' context
self.commands.cache["quit"] = self.__quit()
tournament_view = context["views"]["tournament"]
tournament_model = context["models"]["Tournament"]
tournament_id = self.pop_arg(args)
tournament = tournament_model.find_one(tournament_id)
if not tournament:
raise errors.GenericError(f"Tournament with the id [{tournament_id}] doesn't exist")
tournament = self._check_rounds(tournament)
commands = self.commands.cache.values()
self._check_commands(tournament)
tournament_view.render_selected_tournament(tournament, commands)
while self.is_running:
try:
input_content = input("-> : ").strip()
args = input_content.split(" ")
command_name = args.pop(0)
self.commands.execute(command_name, args=args, context=context, tournament=tournament)
tournament = tournament.save()
except Exception as e:
if not hasattr(e, "custom"):
errors.GenericError(e)
if not self.is_running:
# Its not an error, but its a way out
raise errors.GenericError("Tournament Mode has been closed", title="Note")
tournament = self._check_rounds(tournament)
self._check_commands(tournament)
tournament_view.render_selected_tournament(tournament, commands)
| Madscientiste/OpenClassrooms_P4 | app/commands/start.py | start.py | py | 3,357 | python | en | code | 0 | github-code | 50 |
7408239741 | import os
import re
import pandas as pd
import numpy as np
import logging
from time import strptime
from collections import defaultdict
from src.utils.files import save_json
from src.utils.refs import aux_paths, params
def read_csv(path, build_mode=False):
_df = pd.read_csv(path)
if build_mode:
_df = _df[0:50]
return _df
def preprocess(data_path, args, load_if_avail=True, save_fe=True, save_all=True):
"""
Feature cleaning
Feature engineering
Feature selection (minimally)
"""
t_name = data_path.split('.csv')[0]
all_fe_path = f'{args.out_folder}/{t_name}_df_fe_all.csv'
logging.info(f'-- starting {t_name} fe')
if load_if_avail and os.path.exists(all_fe_path):
logging.info(f'Opening fe data for all...')
df = read_csv(all_fe_path, args.build_mode)
else:
# Load or generate main features
fe_path = f'{args.out_folder}/{t_name}_df_fe.csv'
if load_if_avail and os.path.exists(fe_path):
logging.info(f'Opening fe data for main...')
df = read_csv(fe_path, args.build_mode)
else:
logging.info(f'Generating fe data for main...')
df = read_csv(os.path.join(args.data_folder, data_path), args.build_mode)
df = generate_main_fe(df, fe_path, save_fe=save_fe)
# Load or generate auxiliary features
aux_df = pd.DataFrame()
aux_cols = params['aux_cols']
if args.with_hdb_data:
aux_cols += params['hdb_cols']
for aux in aux_paths.keys():
# build hdb aux data only if opted in args
if aux=='hdb_data' and not args.with_hdb_data:
next
aux_fe_path = f'{args.out_folder}/{t_name}_df_fe_{aux}.csv'
if load_if_avail and os.path.exists(aux_fe_path):
logging.info(f'Opening fe auxiliary data for "{aux}"...')
_aux_df = read_csv(aux_fe_path, args.build_mode)
else:
logging.info(f'Generating fe auxiliary data for "{aux}"...')
# load all auxiliary data (no build mode here)
_aux_df = generate_aux_fe(
df, aux, os.path.join(args.data_folder, aux_paths[aux]),
aux_fe_path, save_fe=save_fe
)
keep_columns = [i for i in _aux_df.columns if i in aux_cols]
_aux_df = _aux_df[keep_columns]
# concat to frame
aux_df = pd.concat([aux_df, _aux_df], axis=1)
logging.info(f'Auxiliary data has {len(aux_df)} rows, {len(aux_df.columns)} cols')
# Combine features
df = pd.concat([df, aux_df], axis=1)
logging.info(f'Final data has {len(df)} rows, {len(df.columns)} cols')
# Save frame
if save_all:
df.to_csv(all_fe_path, index=False)
logging.info(f'-- complete {t_name} fe')
return df
def clean_flat_type(df):
df['flat_type'] = df['flat_type'].apply(lambda x: str(x).lower())
df.loc[df['flat_type'] == "1-room", 'flat_type'] = "1 room"
df.loc[df['flat_type'] == "2-room", 'flat_type'] = "2 room"
df.loc[df['flat_type'] == "3-room", 'flat_type'] = "3 room"
df.loc[df['flat_type'] == "4-room", 'flat_type'] = "4 room"
df.loc[df['flat_type'] == "5-room", 'flat_type'] = "5 room"
return df
def generate_main_fe(df, fe_path, save_fe=True):
"""
Note that this section is manual, created by domain knowledge.
"""
# resale timing
df[['resale_year', 'resale_month']] = df['month'].str.split('-', 1, expand=True)
df['resale_quarter'] = df['resale_month'].apply(lambda m: (int(m)-1)//3 + 1)
df['flat_age'] = df['resale_year'].astype(int)-df['lease_commence_date'].astype(int)
# create alternative dep var
if 'resale_price' in df.columns:
df['resale_price_sqm'] = df['resale_price']/df['floor_area_sqm']
# flat type
df = clean_flat_type(df)
# count of 4 occurences in block no
df['block'] = df['block'].apply(lambda x: x.count('4'))
# convert to 01 to 06, 06 to 10, 10 to 15, 16 to 21, 21 to 25, 25 to 30,
# 31 to 36, 36 to 40, 40 to 45, 46 to 51
# data is messy as it has lots of overlaps, so the partioning is to make
# it more systematic
# 01 to 06
df.loc[df['storey_range'] == "01 to 03", 'storey_range'] = "01 to 06"
df.loc[df['storey_range'] == "01 to 05", 'storey_range'] = "01 to 06"
df.loc[df['storey_range'] == "04 to 06", 'storey_range'] = "01 to 06"
# 06 to 10
df.loc[df['storey_range'] == "07 to 09", 'storey_range'] = "06 to 10"
# 10 to 15
df.loc[df['storey_range'] == "10 to 12", 'storey_range'] = "10 to 15"
df.loc[df['storey_range'] == "11 to 15", 'storey_range'] = "10 to 15"
df.loc[df['storey_range'] == "13 to 15", 'storey_range'] = "10 to 15"
# 16 to 21
df.loc[df['storey_range'] == "16 to 18", 'storey_range'] = "16 to 21"
df.loc[df['storey_range'] == "16 to 20", 'storey_range'] = "16 to 21"
df.loc[df['storey_range'] == "19 to 21", 'storey_range'] = "16 to 21"
# 21 to 25
df.loc[df['storey_range'] == "22 to 24", 'storey_range'] = "21 to 25"
# 25 to 30
df.loc[df['storey_range'] == "25 to 27", 'storey_range'] = "25 to 30"
df.loc[df['storey_range'] == "26 to 30", 'storey_range'] = "25 to 30"
df.loc[df['storey_range'] == "28 to 30", 'storey_range'] = "25 to 30"
# 31 to 36
df.loc[df['storey_range'] == "31 to 33", 'storey_range'] = "31 to 36"
df.loc[df['storey_range'] == "31 to 35", 'storey_range'] = "31 to 36"
df.loc[df['storey_range'] == "34 to 36", 'storey_range'] = "31 to 36"
# 36 to 40
df.loc[df['storey_range'] == "37 to 39", 'storey_range'] = "36 to 40"
# 40 to 45
df.loc[df['storey_range'] == "40 to 42", 'storey_range'] = "40 to 45"
df.loc[df['storey_range'] == "43 to 45", 'storey_range'] = "40 to 45"
# 46 to 51
df.loc[df['storey_range'] == "46 to 48", 'storey_range'] = "46 to 51"
df.loc[df['storey_range'] == "49 to 51", 'storey_range'] = "46 to 51"
# save frame if opted
if save_fe:
df.to_csv(fe_path, index=False)
return df
def generate_aux_hdb(df, aux_df, aux, save_fe=True):
dnew_columns = defaultdict(dict)
# median_resale_price
sheet = 'median-resale-prices-for-regist'
hdb = aux_df[sheet].copy()
aux_df.pop(sheet)
hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x)
hdb = clean_flat_type(hdb)
hdb = hdb.rename(columns={'quarter': 'resale_quarter', 'price': 'median_resale_price'})
hdb[['resale_year', 'resale_quarter']] = hdb['resale_quarter'].str.split('-', 1, expand=True)
hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x))
hdb['resale_quarter'] = hdb['resale_quarter'].apply(lambda x: int(x[1]))
hdb['town'] = hdb['town'].apply(lambda x: str(x).lower())
df['town'] = df['town'].apply(lambda x: str(x).lower())
df_x_aux = pd.merge(df, hdb, how='left')[['median_resale_price']]
# no_of_resale_applications
sheet = 'number-of-resale-applications-r'
hdb = aux_df[sheet].copy()
aux_df.pop(sheet)
hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x)
hdb = clean_flat_type(hdb)
hdb = hdb.rename(columns={'quarter': 'resale_quarter'})
hdb[['resale_year', 'resale_quarter']] = hdb['resale_quarter'].str.split('-', 1, expand=True)
hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x))
hdb['resale_quarter'] = hdb['resale_quarter'].apply(lambda x: int(x[1]))
df_x_aux = pd.concat([df_x_aux, pd.merge(df, hdb, how='left')[['no_of_resale_applications']]], axis=1)
# resale_transactions
sheet = 'resale-transactions-by-flat-typ'
hdb = aux_df[sheet].copy()
aux_df.pop(sheet)
hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x)
hdb = clean_flat_type(hdb)
hdb = hdb.rename(columns={'financial_year': 'resale_year'})
hdb['resale_year'] = hdb['resale_year'].apply(lambda x: int(x))
df_x_aux = pd.concat([df_x_aux, pd.merge(df, hdb, how='left')[['resale_transactions']]], axis=1)
# construction status
sheet = 'completion-status-of-hdb-reside'
hdb = aux_df[sheet].copy()
aux_df.pop(sheet)
hdb = hdb.applymap(lambda x: np.nan if x in ['-','', 'na', 'none'] else x)
hdb = hdb.rename(columns={'financial_year': 'resale_year', 'town_or_estate': 'town'})
hdb = hdb.groupby(['resale_year', 'town', 'hdb_or_dbss', 'status'])['no_of_units'].sum().unstack(['hdb_or_dbss', 'status']).reset_index()
hdb.columns = [i[0] if i[1]=='' else i[0]+'_'+i[1] for i in hdb.columns]
hdb['town'] = hdb['town'].apply(lambda x: str(x).lower())
df_x_aux = pd.concat([
df_x_aux,
pd.merge(df, hdb, how='left')[['HDB_Completed', 'HDB_Under Construction', 'DBSS_Completed', 'DBSS_Under Construction']]],
axis=1)
# new columns
dnew_columns[aux] = [
'median_resale_price', 'no_of_resale_applications', 'HDB_Completed',
'HDB_Under Construction', 'DBSS_Completed', 'DBSS_Under Construction',
'resale_transactions']
return df_x_aux, dnew_columns
def generate_aux_fe(df, aux, aux_fe_in_path, aux_fe_out_path, save_fe=True):
# load aux frame(s)
if aux == 'macro' or aux == 'hdb_data':
if aux == 'macro':
sheets = ['annual', 'quarterly', 'monthly']
elif aux == 'hdb_data':
sheets = ['median-resale-prices-for-regist', 'number-of-resale-applications-r', 'resale-transactions-by-flat-typ', 'completion-status-of-hdb-reside']
aux_df = {}
for sheet in sheets:
aux_df[sheet] = pd.read_excel(
aux_fe_in_path,
sheet_name=sheet,
engine='openpyxl')
else:
aux_df = read_csv(aux_fe_in_path)
# create features per aux type
if aux == 'demographics':
aux_df, dnew_columns = generate_aux_demographic(df, aux_df, aux)
elif aux == 'commercial':
aux_df, dnew_columns = generate_aux_commercial(df, aux_df, aux)
elif aux == 'hawker':
aux_df, dnew_columns = generate_aux_hawker(df, aux_df, aux)
elif aux == 'station':
aux_df, dnew_columns = generate_aux_station(df, aux_df, aux)
elif aux == 'malls':
aux_df, dnew_columns = generate_aux_malls(df, aux_df, aux)
elif aux == 'prisch':
aux_df, dnew_columns = generate_aux_prisch(df, aux_df, aux)
elif aux == 'secsch':
aux_df, dnew_columns = generate_aux_secsch(df, aux_df, aux)
elif aux == 'macro':
aux_df, dnew_columns = generate_aux_macro(df, aux_df, aux)
elif aux == 'hdb_data':
aux_df, dnew_columns = generate_aux_hdb(df, aux_df, aux)
else:
raise NotImplementedError
# save frame if opted
if save_fe:
aux_df.to_csv(aux_fe_out_path, index=False)
save_json(dnew_columns, aux_fe_out_path.split('.csv')[0]+'_cols.json')
return aux_df
def generate_aux_macro(df, aux_df, aux):
dnew_columns = defaultdict(dict)
for col in ['resale_year', 'resale_month', 'resale_quarter']:
df[col] = df[col].astype(int)
# annual
sheet='annual'
_aux_df = aux_df[sheet].copy()
aux_df.pop(sheet)
orig_col_names = _aux_df.columns
new_col_names = [sheet+'_'+abbreviate_col_name(col) for col in orig_col_names]
_aux_df.columns = new_col_names
dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names))
_aux_df = _aux_df.applymap(lambda x: np.nan if pd.isnull(x) else float(re.sub("[^\d\.]", "", str(x))))
df = pd.merge(df, _aux_df, how='left', left_on='resale_year', right_on=sheet+'_Variables')
df = df.drop(columns=sheet+'_Variables')
# quarterly
sheet='quarterly'
_aux_df = aux_df[sheet].copy()
aux_df.pop(sheet)
merge_on = ['resale_year', 'resale_quarter']
_aux_df[merge_on] = _aux_df['Variables'].str.split(' ', 1, expand=True)
_aux_df['resale_quarter'] = _aux_df['resale_quarter'].apply(lambda q: q[0])
_aux_df = _aux_df.drop(columns='Variables')
orig_col_names = _aux_df.columns
new_col_names = [sheet+'_'+abbreviate_col_name(col) if col not in merge_on else col for col in orig_col_names]
for col in merge_on:
_aux_df[col] = _aux_df[col].astype(int)
df[col] = df[col].astype(int)
_aux_df.columns = new_col_names
dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names))
_aux_df = _aux_df.applymap(lambda x: float(re.sub("[^\d\.]", "", str(x))))
df = pd.merge(df, _aux_df, how='left', on=merge_on)
# monthly
sheet='monthly'
_aux_df = aux_df[sheet].copy()
aux_df.pop(sheet)
merge_on = ['resale_year', 'resale_month']
_aux_df[merge_on] = _aux_df['Variables'].str.split(' ', 1, expand=True)
_aux_df['resale_month'] = _aux_df['resale_month'].apply(lambda m: strptime(m,'%b').tm_mon)
_aux_df = _aux_df.drop(columns='Variables')
orig_col_names = _aux_df.columns
new_col_names = [sheet+'_'+abbreviate_col_name(col) if col not in merge_on else col for col in orig_col_names]
for col in merge_on:
_aux_df[col] = _aux_df[col].astype(int)
df[col] = df[col].astype(int)
_aux_df.columns = new_col_names
dnew_columns[sheet] = dict(zip(orig_col_names, new_col_names))
_aux_df = _aux_df.applymap(lambda x: float(re.sub("[^\d\.]", "", str(x))))
df = pd.merge(df, _aux_df, how='left', on=merge_on)
return df, dnew_columns
def generate_aux_demographic(df, aux_df, aux):
dnew_columns = defaultdict(dict)
conv_dict = {
'kids': ['0-4', '5-9', '10-14'], # dependents
'youth': ['15-19', '20-24'], # students/ part-timers
'youngads': ['25-29', '30-34', '35-39'], # young families
'middle': ['40-44', '45-49', '50-54'], # older families
'older': ['55-59', '60-64'], # retirees
'elderly': ['65-69', '70-74','75-79', '80-84', '85+'] # older group
}
rev_dict = {}
for k,v in conv_dict.items():
for i in v:
rev_dict[i] = k
aux_df['age_grp'] = aux_df['age_group'].apply(lambda x: rev_dict[x])
aux_df = aux_df.groupby(['subzone', 'age_grp'])['count'].sum().unstack('age_grp').reset_index()
df_x_aux = pd.merge(df, aux_df, how='left', on='subzone').iloc[:,-6:]
df_x_aux.columns = [aux+'_'+i for i in df_x_aux.columns]
dnew_columns[aux] = list(df_x_aux.columns)
# 'city hall' and 'gali batu' does not have some information
# we assume this is because none of a certain age group lives in that vicinity
# thus we will fillna with 0
df_x_aux = df_x_aux.fillna(0)
return df_x_aux, dnew_columns
def generate_aux_commercial(df, aux_df, aux):
dnew_columns = defaultdict(dict)
# distance from each location
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), aux_df, aux)
# distance from nearest type (grouped/min)
grp_col_name = 'type'
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False)
return df_x_aux, dnew_columns
def generate_aux_prisch(df, aux_df, aux):
dnew_columns = defaultdict(dict)
# distance from each location
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), aux_df, aux)
# create top 50 variable
aux_df['top50'] = [
'' if i>0 else None
for i in aux_df[
['KiasuRank', '2020over', '2019over', '2018over','2017over']
].sum(axis=1)]
# distance from nearest top school (grouped/min)
grp_col_name = 'top50'
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False)
# create dummies that permit phase applications for pri schools
df_x_aux['prisch_top50_<=1km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if x<=1 else 0)
df_x_aux['prisch_top50_1to2km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if (x>1 and x<=2) else 0)
df_x_aux['prisch_top50_2to4km'] = df_x_aux['prisch_top50_'].apply(lambda x: 1 if (x>2 and x<=4) else 0)
return df_x_aux, dnew_columns
def Haversine(lat1, lon1, lat2, lon2, roundoff=4):
"""
Code Source: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
This uses the ‘haversine’ formula to calculate the great-circle distance between two points – that is,
the shortest distance over the earth’s surface – giving an ‘as-the-crow-flies’ distance between the points
(ignoring any hills they fly over, of course!).
Haversine
formula: a = sin²(Δφ/2) + cos φ1 ⋅ cos φ2 ⋅ sin²(Δλ/2)
c = 2 ⋅ atan2( √a, √(1−a) )
d = R ⋅ c
where φ is latitude, λ is longitude, R is earth’s radius (mean radius = 6,371km);
note that angles need to be in radians to pass to trig functions!
"""
R = 6371.0088
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2) ** 2
c = 2 * np.arctan2(a**0.5, (1-a)**0.5)
d = R * c
return round(d, roundoff)
def get_distance_between_main_aux(df_row, aux_row, verbose=False):
distance = Haversine(
df_row['latitude'], df_row['longitude'],
aux_row['lat'], aux_row['lng']
)
if verbose:
subzone = df_row['subzone']
auxname = aux_row['name']
print(f'Distance between "{subzone}" and "{auxname}": {distance}')
return distance
def abbreviate_col_name(abrv_name):
if ' ' in abrv_name:
abrv_name = ''.join([s[0] if s[0].isupper() else (
s if s.isnumeric() else '')for s in abrv_name.split(' ')])
abrv_name = abrv_name.replace('_', '')
return abrv_name
def create_main_aux_dist_cols(df, _aux_df, aux='', aux_col_name='name', df_lat_name='latitude', df_lng_name='longitude',
aux_lat_name='lat', aux_lng_name='lng', verbose=False, new_frame=True):
"""
Assumes the following column naming convensions:
df: has columns ['latitude', 'longitude']
aux: has columns ['name', 'lat', 'lng']
"""
out_df = pd.DataFrame()
dcol_conversion = defaultdict(str)
for aux_ix, aux_row in _aux_df.iterrows():
# generate new column names
abrv_name = aux_row[aux_col_name]
abrv_name = abbreviate_col_name(abrv_name)
col_name = aux + '_' + abrv_name
# store column conversion
if col_name in dcol_conversion.values():
col_name += 'V' + str(aux_ix) # create a new unique column
dcol_conversion[aux_row[aux_col_name]] = col_name
# generate columns
out_df[col_name] = Haversine(
df[df_lat_name], df[df_lng_name], aux_row[aux_lat_name], aux_row[aux_lng_name])
# complete
if verbose:
print(f'Created new column "{col_name}"...')
if new_frame:
return out_df, dcol_conversion
else:
return pd.concat([df, out_df], axis=1), dcol_conversion
def mmin(df):
return df.min(axis=1)
def create_grouped_cols(
dnew_columns, df, _aux_df, aux='', grp_col_name='type', function=mmin,
verbose=False, new_frame=True):
out_df = pd.DataFrame()
dcol_conversion = defaultdict(str)
for grp_ix, grp in enumerate(_aux_df[grp_col_name].unique()):
# we do not create new cols for missings
if grp is None:
continue
# generate new column names
col_name = aux + '_' + grp_col_name + '_' + grp
# store column conversion
if col_name in dcol_conversion.values():
col_name += '_' + str(grp_ix) # create a new unique column
dcol_conversion[grp] = col_name
relevant_columns = [dnew_columns[aux][old]
for old in _aux_df[_aux_df[grp_col_name] == grp]['name']]
out_df[col_name] = function(df[relevant_columns])
# complete
if verbose:
print(f'Created new column "{col_name}"...')
if new_frame:
return out_df, dcol_conversion
else:
return pd.concat([df, out_df], axis=1), dcol_conversion
def label_rows_by_index(full_indexes, positive_indexes, positive_label, negative_label=None):
""" create group tags """
return [positive_label if i in positive_indexes else negative_label for i in full_indexes]
def generate_aux_secsch(df, aux_df, aux):
dnew_columns = defaultdict(dict)
aux_df[''] = ''
grp_col_name = ''
# distance from each secsch
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), aux_df, aux)
# distance from nearest secsch
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False)
return df_x_aux, dnew_columns
def generate_aux_hawker(df, aux_df, aux):
dnew_columns = defaultdict(dict)
aux_df[''] = ''
grp_col_name = ''
# distance from each hawker
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), aux_df, aux)
# distance from nearest hawker
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False)
# reviews
crawled_path = f'data/auxiliary-data/google_search_{aux}.csv'
aux_df2 = pd.read_csv(crawled_path)
aux_df2 = aux_df2.rename(
columns={'Unnamed: 0': 'aux_ix', 'name': 'crawled_name'})
# construct local df version
_aux_df = pd.concat([aux_df, aux_df2], axis=1)
# distance from high ratings hawker
grp_col_name = 'highrating'
_aux_df[grp_col_name] = label_rows_by_index(
full_indexes=_aux_df.index,
positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & (
_aux_df['user_ratings_total'] > 5) & (_aux_df['rating'] > 4)].index,
positive_label=''
)
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False)
# distance from established hawker
grp_col_name = 'established'
_aux_df[grp_col_name] = label_rows_by_index(
full_indexes=_aux_df.index,
positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & (
_aux_df['user_ratings_total'] > 15)].index,
positive_label=''
)
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False)
return df_x_aux, dnew_columns
def generate_aux_malls(df, aux_df, aux):
dnew_columns = defaultdict(dict)
# manual fix loyang point 1.3670, 103.9644
aux_ix = 94
aux_df.loc[aux_ix, 'lat'] = 1.3670
aux_df.loc[aux_ix, 'lng'] = 103.9644
aux_df.loc[aux_ix]
aux_df[''] = ''
grp_col_name = ''
# distance from each mall
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), aux_df, aux)
# distance from nearest mall
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, aux_df, aux, grp_col_name, new_frame=False)
# reviews
crawled_path = f'data/auxiliary-data/google_search_{aux}.csv'
aux_df2 = pd.read_csv(crawled_path)
aux_df2 = aux_df2.rename(
columns={'Unnamed: 0': 'aux_ix', 'name': 'crawled_name'})
# construct local df version
_aux_df = pd.concat([aux_df, aux_df2], axis=1)
# create grouping by ratings
# rationale: malls differ in ranges 4.5-4.0 alot (Central to Local malls)
grp_col_name = 'ratingsbin'
_aux_df2 = _aux_df[(_aux_df['fuzzy_score'] > 70) & (
_aux_df['user_ratings_total'] > 5)].copy()
_aux_df2[grp_col_name] = _aux_df2['rating'].apply(
lambda x: None if pd.isnull(x) else (
'>=4.5' if x >= 4.5 else (
'4.4' if x >= 4.4 else (
'4.3' if x >= 4.3 else (
'4.2' if x >= 4.2 else (
'4.1' if x >= 4.1 else (
'4.0' if x >= 4.0 else ">4.0"))))))
)
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, _aux_df2, aux, grp_col_name, new_frame=False)
# distance from established mall
grp_col_name = 'established'
_aux_df[grp_col_name] = label_rows_by_index(
full_indexes=_aux_df.index,
positive_indexes=_aux_df[(_aux_df['fuzzy_score'] > 70) & (
_aux_df['user_ratings_total'] > 15)].index,
positive_label=''
)
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False)
return df_x_aux, dnew_columns
def generate_aux_station(df, aux_df, aux):
dnew_columns = defaultdict(dict)
# manual fix botanic gardens is an mrt
aux_ix = 139
aux_df.loc[aux_ix, 'type'] = 'mrt'
aux_df.loc[aux_ix, 'opening_year'] = 2011
aux_df.loc[aux_ix]
# fix for duplicate rows that exists in mrt data
_aux_df = aux_df.copy()
_aux_df = _aux_df.groupby(['name', 'type']).agg(
{'codes': '/'.join, 'lat': np.mean, 'lng': np.mean, 'opening_year': np.min}).reset_index()
# generate groupings
_aux_df['numlines'] = _aux_df['codes'].apply(lambda x: x.count('/')+1)
_aux_df['interchange'] = label_rows_by_index(
full_indexes=_aux_df.index,
positive_indexes=_aux_df[_aux_df['numlines'] > 1].index,
positive_label=''
)
# group by main lines
for line in ['EW', 'NS', 'NE', 'CC', 'DT']:
_aux_df[line] = label_rows_by_index(
full_indexes=_aux_df.index,
positive_indexes=[ix for ix, code in enumerate(
_aux_df['codes']) if line in code],
positive_label=''
)
# distance from each mrt stn
df_x_aux, dnew_columns[aux] = create_main_aux_dist_cols(
df.copy(), _aux_df, aux)
# overwrite with NaN if MRT not opened then
# aux_row = _aux_df[_aux_df['opening_year']>=2004].iloc[aux_ix]
dcol_conversion = dnew_columns[aux]
for aux_ix, aux_row in _aux_df[_aux_df['opening_year'] >= 2004].iterrows():
focus_yr, focus_name = aux_row['opening_year'], aux_row['name']
focus_col = dcol_conversion[focus_name]
# create new column
df_x_aux[focus_col+'_open'] = [
ds if yr > focus_yr-5 else np.nan for yr, ds in zip(df['resale_year'].astype(int), df_x_aux[focus_col].astype(float))]
# new column naming
dcol_conversion[focus_name] = focus_col+'_open'
dnew_columns[aux] = dcol_conversion
# distance from group type
for grp_col_name in ['type', 'interchange', 'EW', 'NS', 'NE', 'CC', 'DT']:
df_x_aux, dnew_columns[aux+'_'+grp_col_name] = create_grouped_cols(
dnew_columns, df_x_aux, _aux_df, aux, grp_col_name, new_frame=False)
return df_x_aux, dnew_columns
| tanfiona/HDBResalePrice | src/steps/process.py | process.py | py | 27,156 | python | en | code | 2 | github-code | 50 |
27211573123 | # -*-coding:utf-8-*-
# 题目描述
"""
https://leetcode-cn.com/problems/er-cha-shu-de-shen-du-lcof/
"""
# 标签: 树 深度优先搜索 广度优先搜索 二叉树
# 解题思路:
"""
这里使用层次遍历
queue数组存储树节点 temp数组存储该节点下一层节点 然后用res统计结果
"""
# 执行结果: 通过
"""
执行用时:24 ms, 在所有 Python 提交中击败了80.39% 的用户
内存消耗:15.8 MB, 在所有 Python 提交中击败了32.88% 的用户
通过测试用例:39 / 39
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root: return 0
queue = [root]
res = 0
while queue: # 每个while,走动一层
temp = [] # 存储每一层节点
for value in queue:
# print(value.val)
if value.left: temp.append(value.left)
if value.right: temp.append(value.right)
queue = temp
res += 1
return res
| zranguai/leetcode-solution | 剑指Offer/easy/剑指Offer55-1-二叉树的深度.py | 剑指Offer55-1-二叉树的深度.py | py | 1,254 | python | zh | code | 1 | github-code | 50 |
7297029761 | class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
pos=-1
for i in nums:
if target==i:
pos=nums.index(i)
if pos==-1:
nums.append(target)
nums=sorted(nums)
pos=nums.index(target)
return int(pos)
| DanielAlexanderMarcus/Python-Work | search_Insert.py | search_Insert.py | py | 407 | python | en | code | 0 | github-code | 50 |
350833675 | from django.contrib.auth.views import LoginView, LogoutView
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from class_journal.views import TimetableView, JournalView, AddMarkView, DiaryView
class TestURLs(SimpleTestCase):
def test_url_resolves(self):
url_names_views = {
'login': LoginView,
'logout': LogoutView,
'timetable': TimetableView,
'journal': JournalView,
'add_mark': AddMarkView,
}
for name, view in url_names_views.items():
with self.subTest():
self.assertEqual(resolve(reverse(name)).func.view_class, view)
| Proximity42/Electronic-Diary | tests/test_urls.py | test_urls.py | py | 675 | python | en | code | 0 | github-code | 50 |
71895360476 | from configparser import SafeConfigParser
global _WorkHeight
global _StartxPosition
global _ShakeHeight
global _ShakeXDist
global _ShakeStepDelay
global _ShakeStepAngleRange
global _servoFillAngle
global _Zfeedrate
global _Xfeedrate
global _HWservoDelay # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!)
global _ActualSyringeStatus
global _SyringeHomeRest # sec to wait before synthesis start
global _servosAttachPin
global _LoadStepDelay
global _UnloadStepDelay
global _MaxLoadRange # ml
global _DefaultLoadRange
_WorkHeight = 60
_StartxPosition = 100
_Zfeedrate = 666
_Xfeedrate = 1600
_MaxLoadRange = 8.0 # ml
_SyringeHomeRest = 20 # sec to wait before synthesis start
_ShakeHeight = 60
_ShakeXDist = 150
_ShakeStepDelay = 800
_ShakeStepAngleRange = [5, 160]
_servoFillAngle = 171
_HWservoDelay = 5 # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!)
_servosAttachPin = 57
_LoadStepDelay = 230
_UnloadStepDelay = 140
_DefaultLoadRange=8.0
configs = {'WorkHeight':60,
'StartxPosition':100,
'Zfeedrate':666,
'Xfeedrate':1600,
'MaxLoadRange':8.0,
'SyringeHomeRest':20,
'ShakeHeight':60,
'ShakeXDist':150,
'ShakeStepDelay':800,
'ShakeTopAngle':5,
'ShakeBottomAngle':160,
'ServoFillAngle':171,
'HWservoDelay':5,
'ServosAttachPin':57,
'LoadStepDelay':230,
'UnloadStepDelay':140,
'DefaultLoadRange':8.0
}
def init():
global _WorkHeight
global _StartxPosition
global _ShakeHeight
global _ShakeXDist
global _ShakeStepDelay
global _ShakeStepAngleRange
global _servoFillAngle
global _Zfeedrate
global _Xfeedrate
global _HWservoDelay # Delay after servo move (MUST BE SAME IN FIRMWARE (for MARLIN in config.h)!!!)
global _ActualSyringeStatus
global _SyringeHomeRest # sec to wait before synthesis start
global _servosAttachPin
global _LoadStepDelay
global _UnloadStepDelay
global _MaxLoadRange # ml
global _DefaultLoadRange
config = SafeConfigParser()
config.read('pypes.ini')
#config.add_section('main')
configs['WorkHeight'] = config.getint('main', 'workheight')
configs['StartxPosition'] = config.getint('main', 'startxposition')
configs['Zfeedrate'] = config.getint('main', 'zfeedrate')
configs['Xfeedrate'] = config.getint('main', 'xfeedrate')
configs['MaxLoadRange'] = config.getfloat('main', 'maxloadrange')
configs['SyringeHomeRest'] = config.getint('main', 'syringehomerest')
#config.add_section('shake')
configs['ShakeHeight'] = config.getint('shake', 'shakeheight')
configs['ShakeXDist'] = config.getint('shake', 'shakexdist')
configs['ShakeStepDelay'] = config.getint('shake', 'shakestepdelay')
configs['ShakeTopAngle'] = config.getint('shake', 'shaketopangle')
configs['ShakeBottomAngle'] = config.getint('shake', 'shakebottomangle')
#config.add_section('servo')
configs['ServoFillAngle'] = config.getint('servo', 'servofillangle')
configs['HWservoDelay'] = config.getint('servo', 'hwservodelay')
configs['ServosAttachPin'] = config.getint('servo', 'servosattachpin')
configs['LoadStepDelay'] = config.getint('servo', 'loadstepdelay')
configs['UnloadStepDelay'] = config.getint('servo', 'unloadstepdelay')
# config.add_section('gui')
configs['DefaultLoadRange'] = config.getfloat('gui', 'defaultloadrange')
def saveConfig():
config = SafeConfigParser()
#config.read('pypes.ini')
config.add_section('main')
config.set('main', 'workheight', str(configs['WorkHeight']))
config.set('main', 'startxposition', str(configs['StartxPosition']))
config.set('main', 'zfeedrate', str(configs['Zfeedrate']))
config.set('main', 'xfeedrate', str(configs['Xfeedrate']))
config.set('main', 'maxloadrange', str(configs['MaxLoadRange']))
config.set('main', 'syringehomerest', str(configs['SyringeHomeRest']))
config.add_section('shake')
config.set('shake', 'shakeheight', str(configs['ShakeHeight']))
config.set('shake', 'shakexdist', str(configs['ShakeXDist']))
config.set('shake', 'shakestepdelay', str(configs['ShakeStepDelay']))
config.set('shake', 'shaketopangle' , str(configs['ShakeTopAngle']))
config.set('shake', 'shakebottomangle', str(configs['ShakeBottomAngle']))
config.add_section('servo')
config.set('servo', 'servofillangle', str(configs['ServoFillAngle']))
config.set('servo', 'hwservodelay', str(configs['HWservoDelay']))
config.set('servo', 'servosattachpin', str(configs['ServosAttachPin']))
config.set('servo', 'loadstepdelay', str(configs['LoadStepDelay']))
config.set('servo', 'unloadstepdelay', str(configs['UnloadStepDelay']))
config.add_section('gui')
config.set('gui', 'defaultloadrange', str(configs['DefaultLoadRange']))
with open('pypes.ini', 'w') as f:
config.write(f) | JiriPrusa/PyPeS | python_GUI/settings.py | settings.py | py | 5,036 | python | en | code | 0 | github-code | 50 |
29871407460 | from torch import nn
import torch
import numpy as np
class Tacotron2Loss_VAE(nn.Module):
def __init__(self, hparams):
super(Tacotron2Loss_VAE, self).__init__()
self.anneal_function = hparams.anneal_function
self.lag = hparams.anneal_lag
self.k = hparams.anneal_k
self.x0 = hparams.anneal_x0
self.upper = hparams.anneal_upper
def kl_anneal_function(self, anneal_function, lag, step, k, x0, upper):
if anneal_function == 'logistic':
return float(upper/(upper+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
if step > lag:
return min(upper, step/x0)
else:
return 0
elif anneal_function == 'constant':
return 0.001
def forward(self, model_output, targets, step):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _, mu, logvar, _, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
kl_weight = self.kl_anneal_function(self.anneal_function, self.lag, step, self.k, self.x0, self.upper)
recon_loss = mel_loss + gate_loss
total_loss = recon_loss + kl_weight*kl_loss
return total_loss, recon_loss, kl_loss, kl_weight | jinhan/tacotron2-vae | loss_function.py | loss_function.py | py | 1,671 | python | en | code | 162 | github-code | 50 |
21371042513 | from copy import deepcopy
import numpy as np
import pdb
from src.formula_parser import FormulaParser
from src.extended_definition import ExtendedDefinition
from src.logic_parser import LogicParser
kg_parser = LogicParser(ExtendedDefinition(debug=True))
fm_parser = FormulaParser(ExtendedDefinition(debug=True))
kb = {}
list_of_predicates = []
list_of_explored_rules = []
unproved_premise = []
unproved_query = []
unproved_single_chain = []
unproved_chain = []
def fetch_rules(goal):
global kb
global list_of_predicates
print("fetch_rules for goal:- ", goal)
list_of_rules = []
#predicate = goal.partition('(')[0]
predicate = key_from_predicates(kg_parser.parse(goal))
predicate = predicate[:-1]
print("\t", predicate, kb[predicate]['conc'])
list_of_rules = list_of_rules + kb[predicate]['conc']
return list_of_rules
def subst(theta, res):
#print("\tsubst: ", theta, res)
fact = ""
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii == 0):
fact = fact + res[ii] + "("
elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
if variable(res[ii]) and (res[ii] in theta):
fact = fact + theta[res[ii]] + ","
else:
fact = fact + res[ii] + ","
elif isinstance(res[ii], str) and (ii == nl - 1):
if variable(res[ii]) and (res[ii] in theta):
fact = fact + theta[res[ii]]
else:
fact = fact + res[ii]
elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
_fact = subst(theta, res[ii])
fact = fact + _fact + ","
elif not isinstance(res[ii], str) and (ii == nl - 1):
_fact = subst(theta, res[ii])
fact = fact + _fact
fact = fact + ")"
return fact
"""
def variable(x):
if not isinstance(x, str):
return False
else:
if x[0].islower():
return False
else:
return True
"""
def variable(x):
if not isinstance(x, str):
return False
else:
if x[0].isupper():
return True
else:
return False
def compound(x):
if not isinstance(x, str):
return False
else:
if '(' in x and ')' in x:
return True
else:
return False
def list(x):
if not isinstance(x, str):
return True
else:
return False
def key_from_predicates(res):
nl = len(res)
keys = ""
for ii in range(nl):
if isinstance(res[ii], str) and (ii == 0):
keys = keys + res[ii] + "-"
elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
_key = key_from_predicates(res[ii])
keys = keys + _key
elif not isinstance(res[ii], str) and (ii == nl - 1):
_key = key_from_predicates(res[ii])
keys = keys + _key
return keys
def unify_var(var, x, theta):
#print("IN unify_var", var, x, theta)
if var in theta:
#print("var in theta", var, theta)
return unify(theta[var], x, theta)
elif x in theta:
#print("x in theta", x, theta)
return unify(var, theta[x], theta)
else:
theta[var] = x
#print("not in theta", theta[var])
return theta
def check_theta(theta):
for entry in theta:
if variable(theta[entry]):
if theta[entry] in theta:
print("in check_theta. theta changed")
theta[entry] = theta[theta[entry]]
return theta
def reverse_parse(res):
fact = ""
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii == 0):
fact = fact + res[ii] + "("
elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
fact = fact + res[ii] + ","
elif isinstance(res[ii], str) and (ii == nl - 1):
fact = fact + res[ii]
elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
_fact = reverse_parse(res[ii])
fact = fact + _fact + ","
elif not isinstance(res[ii], str) and (ii == nl - 1):
_fact = reverse_parse(res[ii])
fact = fact + _fact
fact = fact + ")"
return fact
def unify(x, y, theta):
#print("\tunify", x, y, theta)
if theta == None:
#print("\tin theta is None")
return None
elif x == y:
#print("\tin x=y")
return check_theta(theta)
elif variable(x) is True:
#print("\tin variable(x)")
return unify_var(x, y, theta)
elif variable(y) is True:
#print("\tin variable(y)")
return unify_var(y, x, theta)
elif compound(x) and compound(y):
#print("\tin compound")
x_parse = kg_parser.parse(x)
y_parse = kg_parser.parse(y)
x_op = x_parse[0]
y_op = y_parse[0]
x_args = []
for item in range(len(x_parse) - 1): #temp.split(','):
if isinstance(x_parse[item + 1], str):
x_args.append(x_parse[item + 1])
else:
x_args.append(reverse_parse(x_parse[item + 1]))
y_args = []
for item in range(len(y_parse) - 1): #temp.split(','):
if isinstance(y_parse[item + 1], str):
y_args.append(y_parse[item + 1])
else:
y_args.append(reverse_parse(y_parse[item + 1]))
return unify(x_args, y_args, unify(x_op, y_op, theta))
elif list(x) and list(y) and x != [] and y != []:
#print("\tin list")
return unify(x[1:], y[1:], unify(x[0], y[0], theta))
elif x == [] or y == []:
return None
else:
#print("\tin else")
return None
#_body_unify={}
#_body_unify = unify('pointPosition(B,Xb,Yb)', 'pointPosition(n,1055,1060).', {})
#print(_body_unify)
def var_exist(res):
fact = False
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii > 0) and res[ii][0].isupper():
fact = True
return fact
elif not isinstance(res[ii], str):
_fact = var_exist(res[ii])
fact = fact or _fact
return fact
def parse_match(res, fact):
matched = True
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii == 0) and isinstance(
fact[ii], str):
if res[ii] == fact[ii]:
matched = True
else:
matched = False
return matched
elif isinstance(res[ii], str) and (
ii > 0) and res[ii][0].islower() and isinstance(res[ii], str):
if res[ii] == fact[ii]:
matched = True
else:
matched = False
return matched
elif isinstance(res[ii], str) and (
ii > 0) and res[ii][0].isupper() and isinstance(res[ii], str):
matched = True
elif (not isinstance(res[ii], str)) and (not isinstance(fact[ii],
str)):
_matched = parse_match(res[ii], fact[ii])
matched = matched and _matched
else:
matched = False
return matched
return matched
def inst(temp, facts):
results = []
temp_parse = kg_parser.parse(temp)
for fact in facts:
if fact[:len(temp_parse[0])] == temp_parse[0]:
fact_parse = kg_parser.parse(fact)
if parse_match(temp_parse, fact_parse):
results.append(fact)
return results
def filterInst(temp_list, list_of_premises):
filterlist = []
temp = []
n = len(temp_list)
nlist = [len(ii) for ii in temp_list]
for ii in range(n):
temp.append(
temp_list[ii] *
(int(np.prod(nlist[:ii])) * int(np.prod(nlist[(ii + 1):]))))
for ii in range(np.prod(nlist)):
each_query = []
query_str = "Predicate("
premise_str = "Predicate("
for jj in range(n):
each_query.append(temp[jj][ii])
if temp[jj][ii] == "()" and list_of_premises[jj] == "":
continue
else:
if temp[jj][ii][-1] == '.':
temp[jj][ii] = temp[jj][ii][:-1]
query_str = query_str + temp[jj][ii] + ","
premise_str = premise_str + list_of_premises[jj] + ","
#if query_str[-2]=='.':
# query_str = query_str[:-2]+")"
#else:
query_str = query_str[:-1] + ")"
#if premise_str[-2]=='.':
# premise_str = premise_str[:-2]+")"
#else:
premise_str = premise_str[:-1] + ")"
sucess = unify(premise_str, query_str, {})
if sucess != None:
filterlist.append(each_query)
return filterlist
def existed(temp_list, facts):
val = True
for temp in temp_list:
if not (temp in facts):
val = False
break
return val
def extract_var(res, variable_names, label):
nl = len(res)
fact = ""
#label=0
for ii in range(nl):
if isinstance(res[ii], str) and (ii == 0):
fact = fact + res[ii][0].lower() + res[ii][1:] + "("
elif isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
item = res[ii].upper()
if item not in variable_names:
variable_names[item] = "X" + repr(label)
item = "X" + repr(label)
label = label + 1
else:
item = variable_names[item]
fact = fact + item + ","
elif isinstance(res[ii], str) and (ii == nl - 1):
item = res[ii].upper()
if item not in variable_names:
variable_names[item] = "X" + repr(label)
item = "X" + repr(label)
label = label + 1
else:
item = variable_names[item]
fact = fact + item
elif not isinstance(res[ii], str) and (ii > 0) and (ii < nl - 1):
_fact, variable_names, label = extract_var(res[ii], variable_names,
label)
fact = fact + _fact + ","
elif not isinstance(res[ii], str) and (ii == nl - 1):
_fact, variable_names, label = extract_var(res[ii], variable_names,
label)
fact = fact + _fact
fact = fact + ")"
return fact, variable_names, label
def extract_premise_var(res, variable_names, vars):
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
item = res[ii]
vars.append(item)
if item not in variable_names:
variable_names[item] = "unknown"
else:
item = variable_names[item]
elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
variable_names, vars = extract_premise_var(res[ii], variable_names,
vars)
return variable_names, vars
def extract_var_simple(res, vars):
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
item = res[ii]
vars.append(item)
elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
vars = extract_var_simple(res[ii], vars)
return vars
#print(extract_var_simple(kg_parser.parse('a(X, b(Y,Z))'), []))
def extract_element(res, vars):
nl = len(res)
for ii in range(nl):
if isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
item = res[ii]
vars.append(item)
elif not isinstance(res[ii], str) and (ii > 0) and (ii <= nl - 1):
vars = extract_element(res[ii], vars)
return vars
def matchUnify(premise, theta):
#predicate, vars, theta2, hasUnified = matchUnify(premise[ii], theta)
vars_ = []
parse_premise = kg_parser.parse(premise)
theta2, vars_ = extract_premise_var(parse_premise, theta, vars_)
hasUnifed = []
for key in vars_:
if theta2[key] == 'unknown':
hasUnifed.append(False)
else:
hasUnifed.append(True)
return vars_, theta2, hasUnifed
def parse_predicates(rule_str):
left_index = []
right_index = []
left_count = 0
right_count = 0
strset = []
start_index = 0
for ii in range(len(rule_str) - 1):
if rule_str[ii] == "(":
left_index.append(ii)
left_count = left_count + 1
elif rule_str[ii] == ")":
right_index.append(ii)
right_count = right_count + 1
if right_count == left_count and left_count != 0 and (
rule_str[ii + 1:].strip()[0] == ','
or rule_str[ii + 1:].strip()[0] == '.'):
_tmp = rule_str[start_index:ii + 1].strip()
#pdb.set_trace()
if _tmp[0] == "," or _tmp[0] == ".":
_tmp = _tmp[1:]
strset.append(_tmp.strip())
start_index = ii + 1
#left_count=0
#right_count=0
#pdb.set_trace()
return strset
def new_parse_predicates(premises_str):
splited_premises = []
start_premise = i = 0
parent_stack = []
while i < len(premises_str):
if premises_str[i] not in ["(", ")"]:
i += 1
continue
if premises_str[i] == "(":
parent_stack.append("(")
elif premises_str[i] == ")":
parent_stack.pop()
if not parent_stack:
splited_premises.append(premises_str[start_premise:i + 1].strip())
start_premise = i + 2
i += 1
return splited_premises
def testparsing():
from src.input_reader import read_rules
rules = read_rules("src/knowledge_base")
for rule in rules:
try:
# print(new_parse_predicates(rule.split(":-")[1]))
# print(parse_predicates(rule.split(":-")[1]), "\n\n\n")
assert new_parse_predicates(
rule.split(":-")[1]) == parse_predicates(rule.split(":-")[1])
except Exception as e:
print(e)
print(rule, "Failed")
print(new_parse_predicates(rule.split(":-")[1]))
print(parse_predicates(rule.split(":-")[1]), "\n\n\n")
print(
parse_predicates(
"equals(measureOf(angle(A,B,D)),measureOf(angle(B,D,E))),quadrilateral(A,B,E,D)."
))
# parse_predicates(
# "parallel(line(A,B),line(D,E)) :- equals(measureOf(angle(A,B,D)),measureOf(angle(B,D,E))) , quadrilateral(A,B,E,D)."
# )
#test_str ="equals(measureOf(angle(A,B,F)),measureOf(angle(E,D,F))) :- parallel(line(A,B),line(D,E)), line(B, D), on_same_line(B, D, F), pointPosition(A,Xa,Ya), pointPosition(B,Xb,Yb), pointPosition(D,Xd,Yd), pointPosition(E,Xe,Ye), pointPosition(F,Xf,Yf), (Xa-Xb)*(Xe-Xd)>0, (Ya-Yb)*(Ye-Yd)>0, (Xf-Xb)*(Xf-Xd)>0, (Yf-Yb)*(Yf-Yd)>0, not A==C."
#strset = parse_predicates(test_str)
#print(strset)
| JiajunSong-Bigai/geometry_fc_bc | src/my_unification.py | my_unification.py | py | 15,219 | python | en | code | 0 | github-code | 50 |
74858873756 | """Implementation for agents interface"""
from typing import Any, Tuple, List, Union
import numpy as np
from numpy.linalg import norm
from highrl.obstacle.single_obstacle import SingleObstacle
from highrl.utils.action import ActionXY
from highrl.utils.abstract import Position
class Agent:
"""
Class that represents the agent interacting in the environment.
Attributes:
px (int): agent x position.
gx (int): goal x position.
gy (int): goal y position.
gt (int): goal orientation angle.
vx (int): agent x velocity.
vy (int): agent y velocity.
py (int): agent y position.
w (int): agent angular velocity.
theta (int): agent angle theta.
radius (int): agent radius.
goal_radius (int): goal radius.
"""
def __init__(
self,
pos=Position[float](0.0, 0.0),
goal_pos=Position[float](0.0, 0.0),
gt: float = 0.0,
vx: float = 0.0,
vy: float = 0.0,
w: float = 0.0,
theta: float = 0.0,
radius: int = 20,
goal_radius: int = 10,
) -> None:
"""Constructs an agent object.
Args:
pos (Position, optional): Position of agent. Defaults to (x=0, y=0).
gpos (Position, optional): Position of the goal. Defaults to (x=0, y=0).
gt (int, optional): Goal orientation angle. Defaults to 0.
vx (int, optional): Agent x velocity. Defaults to 0.
vy (int, optional): Agent y velocity. Defaults to 0.
w (int, optional): Agent angular velocity. Defaults to 0.
theta (int, optional): Agent angle theta. Defaults to 0.
radius (int, optional): Agent radius. Defaults to 20.
goal_radius (int, optional): Goal radius. Defaults to 10.
"""
self.radius = radius
self.goal_radius = goal_radius
self.pos = pos
self.gpos = goal_pos
self.gt = gt
self.vx = vx
self.vy = vy
self.w = w
self.theta = theta
def set(
self,
pos=Position[float](0.0, 0.0),
goal_pos=Position[float](0.0, 0.0),
gt: float = 0,
vx: float = 0,
vy: float = 0,
w: float = 0,
theta: float = 0,
radius: int = 20,
goal_radius: int = 10,
) -> None:
"""Sets all agent attributes.
Args:
px (int, optional): agent x position. Defaults to 0.
py (int, optional): agent y position. Defaults to 0.
gx (int, optional): goal x position. Defaults to 0.
gy (int, optional): goal y position. Defaults to 0.
gt (int, optional): goal orientation angle. Defaults to 0.
vx (int, optional): agent x velocity. Defaults to 0.
vy (int, optional): agent y velocity. Defaults to 0.
w (int, optional): agent angular velocity. Defaults to 0.
theta (int, optional): agent angle theta. Defaults to 0.
radius (int, optional): agent radius. Defaults to 20.
goal_radius (int, optional): goal radius. Defaults to 10.
"""
self.pos = pos
self.gpos = goal_pos
self.gt = gt
self.vx = vx
self.vy = vy
self.w = w
self.theta = theta
self.radius = radius
self.goal_radius = goal_radius
@property
def x_pos(self) -> float:
"""Getter for x_coord"""
return self.pos.x
@property
def y_pos(self) -> float:
"""Getter for y_coord"""
return self.pos.y
def get_position(self) -> Position:
"""Getter for agent postion"""
return self.pos
def set_position(self, position: Position) -> None:
"""Setter for agent position"""
self.pos = position
def set_goal_position(self, position: Position) -> None:
"""Setter for goal position"""
self.gpos = position
def get_goal_position(self) -> Position:
"""Getter for goal postion"""
return self.gpos
def get_velocity(self) -> Tuple[float, float, float]:
"""Getter agent velocity vector.
Returns:
Tuple[float, float, float]: (agent x velocity, agent y velocity, agent angular velocity)
"""
return self.vx, self.vy, self.w
def set_velocity(self, velocity: Tuple[float, ...]):
"""Setter for agent linear and angular velocity.
Args:
velocity (Tuple[int, int]): (agent x velocity, agent y velocity, agent angular velocity)
"""
self.vx = velocity[0]
self.vy = velocity[1]
self.w = velocity[2]
def set_radius(self, agent_radius: int, goal_radius: int) -> None:
"""Setter for the goal and agent radius"""
self.radius = agent_radius
self.goal_radius = goal_radius
def check_validity(self, action: Any):
"""Checks if action is in right format.
The right format is the object forman: ActionXY
"""
assert isinstance(action, ActionXY)
def compute_position(self, action: Any, delta_t: float) -> Tuple:
"""Computes agent next position and orientation based on the agent action velocity.
Before computing the agent next position, Checks if the action is in the ActionXY
format.
Args:
action (Any): action decided by the agent model but in ActionXY object format
delta_t (float): time difference between actions
Returns:
Tuple[int, int, int]: (agent x position, agent y posistion, agent orientation theta)
"""
self.check_validity(action)
velocity = (action.vx**2 + action.vy**2) ** 0.5
angle = self.fix(np.arctan2(action.vy, action.vx), 2 * np.pi)
x_pos = self.pos.x + velocity * np.cos(self.theta + angle) * delta_t
y_pos = self.pos.y + velocity * np.sin(self.theta + angle) * delta_t
theta = self.fix(self.theta + action.w * delta_t, 2 * np.pi)
return x_pos, y_pos, theta
def fix(self, base: Union[int, float], mod: Union[int, float]) -> Union[int, float]:
"""Change `base` rane to be [0:mod[.
For example, if `base` is an angle and `mod` is 2*pi, then we want
to ensure that the angle is always in the range [0:mod[.
Since Python does not support modulus of floating/negative numbers,
the modulus is implemented manually.
"""
while base < 0:
base += mod
while base >= mod:
base -= mod
return base
def reached_destination(self) -> bool:
"""Determines if agent reached the goal postion.
Returns:
bool: whether the agent has reached the goal or not
"""
min_allowed_dist = self.radius + self.goal_radius
return self.dist_to_goal() < min_allowed_dist
def dist_to_goal(self) -> float:
"""Compute the distance from the agent to the goal"""
return norm(self.pos.get_coords() - self.gpos.get_coords()).item()
def step(self, action: ActionXY, delta_t: float) -> None:
"""Performs an action and update the agent state.
Args:
action (List): action decided by the agent model but in ActionXY object format
delta_t (float): time difference between actions
"""
self.check_validity(action)
pos = self.compute_position(action, delta_t)
x_pos, y_pos, self.theta = pos
self.pos.set_pos(x_pos, y_pos)
self.vx = action.vx
self.vy = action.vy
self.w = action.w
def is_overlapped(self, obstacle: SingleObstacle, check_target: str = "agent"):
"""Checks if there is an overlap between the agent/goal and a given obstacle.
Args:
obstacle (SingleObstacle): input obstalce to check overlap with
check_target (str): target to be checked, either agent or goal
Returns:
bool: flag to check for overlap. Returns True if there is overlap.
"""
assert check_target in [
"goal",
"agent",
], "Check target should be goal or agent"
if check_target == "goal":
min_x = self.pos.x - self.goal_radius
min_y = self.pos.x - self.goal_radius
max_x = self.gpos.x + self.goal_radius
max_y = self.gpos.y + self.goal_radius
else:
min_x = self.pos.x - self.radius
min_y = self.pos.y - self.radius
max_x = self.pos.x + self.radius
max_y = self.pos.y + self.radius
dummy = [
[
min_x,
min_y,
max_x,
max_y,
],
[
int(obstacle.px),
int(obstacle.py),
int(obstacle.px + obstacle.width),
int(obstacle.py + obstacle.height),
],
]
is_overlap: bool = not self._overlap_handler(dummy)
return is_overlap
def is_robot_overlap_goal(self) -> bool:
"""Check if robot and goal overlap.
Returns:
bool: flag to check for overlap. Returns True if there is an overlap.
"""
dummy = [
[
self.gpos.x - self.goal_radius,
self.gpos.x - self.goal_radius,
self.gpos.x + self.goal_radius,
self.gpos.x + self.goal_radius,
],
[
self.pos.x - self.radius,
self.pos.y - self.radius,
self.pos.x + self.radius,
self.pos.y + self.radius,
],
]
is_overlap = not self._overlap_handler(dummy)
return is_overlap
def is_robot_close_to_goal(self, min_dist: int) -> bool:
"""Checks if the robot is closer than the min distannce to the goal.
Returns ``True`` if the robot is too close and ``False`` if the robot-goal dist
did not exceed the min allowed distance.
Args:
min_dist (int): min allowable distance for robot-goal dist
Returns:
bool: flag to determine if the robot is closer than the max allowed distance or not.
"""
distance = self.dist_to_goal()
distance -= self.radius + self.goal_radius
return distance <= min_dist
def _overlap_handler(self, dummy: List[List]) -> bool:
"""Check overlap condition between two objects.
Args:
dummy (List[List]): objects coordinates
Returns:
bool: overlap flag for input objects
"""
for _ in range(2):
if dummy[0][0] > dummy[1][2] or dummy[0][1] > dummy[1][3]:
return True
dummy[0], dummy[1] = dummy[1], dummy[0]
return False
| ahmedheakl/multi-level-rl-for-robotics | src/highrl/agents/agent.py | agent.py | py | 10,865 | python | en | code | 6 | github-code | 50 |
73810841435 | import logging
import json
import os
import sys
sys.path.append('../')
import data_processing.confidence as cf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import skimage.filters as filters
from skimage.io import imread, imsave
import itertools
from scipy.optimize import least_squares
from skimage.util import img_as_ubyte
import matplotlib.ticker as ticker
base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\thermal camera\calibration'
image_file = 'ir_thermography_spot_size_20.8252px_per_mm.png'
# base_path = r'G:\Shared drives\ARPA-E Project\Lab\Data\Laser Tests\CAMERA\BEAM_PROFILING_20221212'
center = np.array([13.97, 12.03])
pixel_size = 20.8252 # pixels/mm
diameter = 2.68
if __name__ == '__main__':
img = imread(os.path.join(base_path, image_file))
with open('../plot_style.json', 'r') as file:
json_file = json.load(file)
plot_style = json_file['defaultPlotStyle']
mpl.rcParams.update(plot_style)
img_shape = img.shape
norm1 = plt.Normalize(vmin=0, vmax=255)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(4.0, 3.0), constrained_layout=True)
ax.imshow(img, interpolation='none', norm=norm1, extent=(0, img_shape[1]/pixel_size, 0, img_shape[0]/pixel_size))
circle = plt.Circle(center, 0.5*diameter, ec='r', fill=False, clip_on=False, ls=(0, (1, 1)), lw=1.0)
ax.set_xlabel('x (mm)')
ax.set_ylabel('y (mm)')
ax.set_title('IR thermography spot size', fontweight='regular')
wz_text = f'Ø: {diameter:.2f} mm'
# ax.text(
# 0.95, 0.05, wz_text, color='w',
# transform=ax.transAxes, va='bottom', ha='right',
# fontsize=11
# )
q = 45.0
x1 = center[0] + 0.5 * diameter * np.cos(q)
y1 = center[1] - 0.5 * diameter * np.sin(q)
x2 = center[0] + 2.0 * diameter * np.cos(q) + 1.0
y2 = center[1] - 2.0 * diameter * np.sin(q)
connectionstyle = "angle,angleA=0,angleB=-90,rad=0"
ax.annotate(
wz_text,
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
color='w', ha='left', va='center',
arrowprops=dict(
arrowstyle="->", color="w",
shrinkA=-30, shrinkB=2,
patchA=None, patchB=None,
connectionstyle=connectionstyle,
)
)
ax.add_patch(circle)
ax.set_xlim(0, img_shape[1]/pixel_size)
ax.set_ylim(top=0, bottom=img_shape[0]/pixel_size)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(100))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(100))
fig.savefig(os.path.join(base_path, 'result.png'), dpi=600)
plt.show() | erickmartinez/relozwall | data_processing/camera/ir_thermography_spot.py | ir_thermography_spot.py | py | 2,651 | python | en | code | 0 | github-code | 50 |
74164588634 | import numpy as np
def linear_fit(x, y, fit_min, fit_max):
"""Fit x, y with a linear function
y = mx + b
Args:
x: x variable
y: y variable
fit_min: minimal value of x to fit
fit_max: maximal value of x to fit
Returns:
m: slope of the fitted line
b: intercept of the fitted function
"""
idx = (x > fit_min) & (x < fit_max)
x = np.vstack([x[idx], np.ones_like(x[idx])]).T
y = y[idx]
m, b = np.linalg.lstsq(x, y, rcond=None)[0]
return m, b
| yqshao/tame | tame/fit.py | fit.py | py | 533 | python | en | code | 0 | github-code | 50 |
41729304062 | # -*- coding:utf-8 -*-
from Tkinter import *
class Red():
def __init__(self, root, btn, label):
self.label = label
self.btn = btn
self.root = root
self.n = 0
def gs(self):
self.btn['command'] = self.cc
def cc(self):
if self.n == 0:
self.label['bg'] = 'red'
if self.n == 1:
self.label['bg'] = 'yellow'
if self.n == 2:
self.label['bg'] = 'blue'
self.n += 1
if self.n >= 3:
self.n = 0
self.root.after(1000, self.cc)
if __name__ == '__main__':
root = Tk()
btn = Button(root, text='开始游戏')
label = Label(root, text='颜色')
r = Red(root, btn, label)
r.gs()
label.pack()
btn.pack()
root.mainloop()
| sdabing/my-python-diary | tkinter/kapai jishu.py | kapai jishu.py | py | 785 | python | en | code | 0 | github-code | 50 |
7979426307 | from pydantic import BaseModel,ValidationError, validator
from typing import Any
from pydantic.networks import EmailStr
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
SQLALCHEMY_DATABASE_URL = "postgresql://postgres: @localhost/topskill"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
class Business(BaseModel):
businessName: str
fullName: str
businessEmail:EmailStr
role:str
service: str
teamSize: int
class Business(Base):
__tablename__ = "business"
id = Column(String, primary_key=True, index=True)
businessName = Column(String,index=True)
fullName=Column(String,index=True)
businessEmail= Column(String,index=True)
role= Column(String,index=True)
service= Column(String,index=True)
teamSize= Column(Integer,index=True)
class Skill_up_africa(BaseModel):
full_name:str
phone_number:int
email:EmailStr
country:str
career_path:str
experience:str
referal:str | ade2112/skillz | modules/form/model.py | model.py | py | 1,264 | python | en | code | 0 | github-code | 50 |
36793390895 | #! /usr/bin/env python
import copy
def readonly(value):
return property(lambda self: value)
class A:
def __init__(self, value):
_value = copy.deepcopy(value)
A.readonly = readonly(_value)
class B:
def __init__(self, value):
self._readonly = copy.deepcopy(value)
@property
def readonly(self):
return self._readonly
a = A(42)
print(a.readonly)
A.readonly2 = readonly(1)
print(a.readonly2)
b = B(42)
print(b.readonly)
b._readonly = 43
print(b.readonly)
a.readonly = 43
print(a.readonly)
| baites/examples | classes/python/ReadOnlyByClosure.py | ReadOnlyByClosure.py | py | 543 | python | en | code | 4 | github-code | 50 |
585202611 | from socket import *
port = 3333
BUF_SIZE = 1024
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(('', port))
while True:
data, addr = sock.recvfrom(BUF_SIZE)
print('<- ', data.decode())
msg = input('-> ')
sock.sendto(msg.encode(), addr)
| H43RO/Network-Programming | Example2/udp_chat_server.py | udp_chat_server.py | py | 254 | python | en | code | 0 | github-code | 50 |
42082097565 | #!/usr/bin/env python
# coding: utf-8
# 
# In[1]:
# Lets choose K-Means Clustering Unsupervised ML Algorithm
# In[3]:
# Step 1: Let us import the required Libraries
# In[4]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
import seaborn as sns
# In[5]:
# Step-2: Let us load the Iris Data set from sklearn
# In[6]:
iris=datasets.load_iris()
# In[7]:
iris
# In[8]:
iris_df = pd.DataFrame(iris.data, columns = iris.feature_names)
# In[9]:
iris_df
# In[10]:
# Step3: Let us do a Exploratory Data Analysis for the Iris Data Set
#Displays the first 5 rows of the dataset
iris_df.head()
# In[11]:
#Displays the dimensions of the dataset
iris_df.shape
# In[12]:
#Displays the numerical insights on the dataset
iris_df.describe()
# In[13]:
#Checking for null values in the dataset
iris_df.isnull().sum()
# In[14]:
# Step 4: Visualize and Analyze the Dataset
# In[15]:
#Finding the corelation between the data
corr_df= iris_df.corr()
corr_df
# In[16]:
#Plotting a heat map for the dataset
plt.figure(figsize= [10,6])
sns.heatmap(corr_df, cmap='Spectral', annot=True)
# In[17]:
#PLotting a graph by considering different attributes in pairs
sns.pairplot(iris_df)
# In[18]:
# Step 5: Design the K-Means Clustering Algorithm for optimal clusters
# In[19]:
#Extracting the values of different attributes in the dataset such as sepal lenth, sepal width, petal length and petal width
x = iris_df.iloc[:, [0, 1, 2, 3]].values
# In[20]:
x
# In[ ]:
# Step-6: We actually do not know the number of clusters.
#There are several methods to select k that depends on the domain knowledge and rule of thumbs.
# Elbow method is one of the robust one used to find out the optimal number of clusters.
#In this method, the sum of distances of observations from their cluster centroids, called Within-Cluster-Sum-of-Squares (WCSS).
#This is computed as the shown where Yi is centroid for observation Xi
# 
# In[97]:
#KMeans class from the sklearn library.
# Using the elbow method to find out the optimal number of #clusters.
# Now we will difine the K means clustering algorithm. As we do not know what is the optimum number of clusters.
# The way to do this is using FOR loop by keeping the range from 1 to 10 since we dont want large number of clusters depiction
# We want to find what is the optimum number of clusters
# would be storing the value of each iterations in the list called WCSS(Within Cluster Sum of Squares) and we are using that to
# plot our graph
# Within Cluster Sum of Squares (WCSS)
#i above is between 1-10 numbers. init parameter is the random #initialization method
#we select kmeans++ method. max_iter parameter the maximum number of iterations there can be to
#find the final clusters when the K-meands algorithm is running. we #enter the default value of 300
#the next parameter is n_init which is the number of times the #K_means algorithm will be run with
#different initial centroid.
# K-Means Clustering algorithm to find optimal clusters for classification
#kmeans algorithm fits to the X dataset
#appending the WCSS to the list (kmeans.inertia_ returns the WCSS value for an initialized cluster)
# In[98]:
# Using the elbow method to find the optimal number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(X)
#appending the WCSS to the list (kmeans.inertia_ returns the WCSS value for an initialized cluster)
wcss.append(kmeans.inertia_)
# In[99]:
# Step 7: Plot the K-Means Clustering graph and identify the optimal number of clusters from the graph.
# In[100]:
# kmeans inertia_ attribute is: Sum of squared distances of samples #to their closest cluster center.
# Plotting the results onto a line graph, allowing us to observe 'The elbow'
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[102]:
plt.plot(range(1,11), wcss)
plt.title('The elbow method to find optimal number of clusters')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS') # Within cluster sum of squares
plt.show()
# In[103]:
#From 'The Elbow Method' of graphical representation, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration.
#Therefore, from the above graph we choose the optimal number of clusters to be 3.
# In[110]:
# Step 8
# Applying kmeans to the dataset / Creating the kmeans classifier with optimal clusters to be 3 and than fitting the model to
# do the predictions
kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(x)
# In[111]:
y_kmeans
# In[112]:
# Step 9: Visualize the Clusters using a scatter plot
# In[115]:
# Visualising the clusters on the first two columns
plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 80, c = 'red', label = 'Iris-setosa')
plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 80, c = 'yellow', label = 'Iris-versicolour')
plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 80, c = 'pink', label = 'Iris-virginica')
# Plotting the centroids of the clusters
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 200, c = 'black', label = 'Centroids')
plt.legend()
# In[116]:
# Step 6: Make Predictions
# In[117]:
#Defining the input and target variables
X = iris.data[:,:2] #Contains sepal length and Sepal Width
y = iris.target #Contains target species value
# In[118]:
X
# In[119]:
y
# In[120]:
# Visualizing X and Y variables in graphical form
plt.scatter(X[:,0],X[:,1], c=y, cmap='gist_rainbow')
plt.xlabel('Sepal Length', fontsize=14)
plt.ylabel('Sepal Width', fontsize=14)
plt.show()
# In[121]:
# Step 7: Evaluate the Model: Comparing Actual vs Predicted data values
# In[122]:
#This will tell us which cluster the data observation belongs to
new_labels = kmeans.labels_
new_labels
# In[123]:
#Plotting the identified clusters and comparing with the results
fig, axes = plt.subplots(1,2, figsize=(16,8))
axes[0].scatter(X[:,0],X[:,1], c=y, cmap='gist_rainbow', edgecolor = 'k',s=80)
axes[1].scatter(X[:,0],X[:,1], c=new_labels, cmap='viridis', edgecolor = 'k',s=80)
axes[0].set_xlabel('Speal Length',fontsize=18)
axes[0].set_ylabel('Speal Width',fontsize=18)
axes[1].set_xlabel('Speal Length',fontsize=18)
axes[1].set_ylabel('Speal Width',fontsize=18)
axes[0].tick_params(direction='in',length=10,width=5,colors='k',labelsize=20)
axes[1].tick_params(direction='in',length=10,width=5,colors='k',labelsize=20)
axes[0].set_title('Actual',fontsize=18)
axes[1].set_title('Predicted',fontsize=18)
plt.show()
# In[44]:
# 3-D Plotting
# K means Clustering
# In[124]:
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import datasets
#Iris Dataset
iris = datasets.load_iris()
X = iris.data
#KMeans
km = KMeans(n_clusters=3)
km.fit(X)
km.predict(X)
labels = km.labels_
#Plotting
fig = plt.figure(1, figsize=(7,7))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor="k", s=50)
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
plt.title("K Means", fontsize=14)
# In[125]:
# 3-D Plotting
# Gaussian Mixture Model
# In[126]:
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import datasets
#Iris Dataset
iris = datasets.load_iris()
X = iris.data
#Gaussian Mixture Model
gmm = GaussianMixture(n_components=3)
gmm.fit(X)
proba_lists = gmm.predict_proba(X)
#Plotting
colored_arrays = np.matrix(proba_lists)
colored_tuples = [tuple(i.tolist()[0]) for i in colored_arrays]
fig = plt.figure(1, figsize=(7,7))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=colored_tuples, edgecolor="k", s=50)
ax.set_xlabel("Petal width")
ax.set_ylabel("Sepal length")
ax.set_zlabel("Petal length")
plt.title("Gaussian Mixture Model", fontsize=14)
# In[ ]:
| SAMKOXXPACO/Prediction-using-Unsupervised-Algorithm | Unsupervised ML Task Clustering .py | Unsupervised ML Task Clustering .py | py | 8,575 | python | en | code | 0 | github-code | 50 |
71756104794 | import math
from typing import *
import numpy as np
import torch
import torchaudio.transforms as at
from torch import nn
from torch.distributions import Beta
from torch.nn import functional as F
from torch.nn.parameter import Parameter
class GeMP(nn.Module):
"""from: https://github.com/knjcode/kaggle-seti-2021/blob/master/working/model.py
referred at https://www.kaggle.com/c/seti-breakthrough-listen/discussion/266403
"""
def __init__(self, p=3.0, eps=1e-6, learn_p=True):
super().__init__()
self._p = p
self._learn_p = learn_p
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
self.set_learn_p(flag=learn_p)
def set_learn_p(self, flag):
self._learn_p = flag
self.p.requires_grad = flag
def forward(self, x):
# x = F.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2), x.size(-1))).pow(1.0 / self.p)
x = F.avg_pool2d(x.clamp(min=self.eps).pow(self.p), (x.size(-2), 1)).pow(1.0 / self.p)
return x
class SwinGeMP(nn.Module):
"""from: https://github.com/knjcode/kaggle-seti-2021/blob/master/working/model.py
referred at https://www.kaggle.com/c/seti-breakthrough-listen/discussion/266403
"""
def __init__(self, p=3.0, eps=1e-6, learn_p=True):
super().__init__()
self._p = p
self._learn_p = learn_p
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
self.set_learn_p(flag=learn_p)
def set_learn_p(self, flag):
self._learn_p = flag
self.p.requires_grad = flag
def forward(self, x):
x = F.adaptive_avg_pool1d(x.clamp(min=self.eps).pow(self.p), 1).pow(1.0 / self.p)
return x
class GeM1d(nn.Module):
"""
Code modified from the 2d code in
https://amaarora.github.io/2020/08/30/gempool.html
"""
def __init__(self, kernel_size=8, stride=None, p=3, eps=1e-6):
super(GeM1d, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.kernel_size = kernel_size
self.eps = eps
self.stride = stride
def forward(self, x):
return self.gem(x, p=self.p, eps=self.eps)
def gem(self, x, p=3, eps=1e-6):
return F.avg_pool1d(x.clamp(min=eps).pow(p), self.kernel_size, self.stride).pow(1.0 / p)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ "p="
+ "{:.4f}".format(self.p.data.tolist()[0])
+ ", "
+ "eps="
+ str(self.eps)
+ ")"
)
class Mixup(nn.Module):
"""from: https://www.kaggle.com/ilu000/2nd-place-birdclef2021-inference"""
def __init__(self, mix_beta, label_mix_type="mix"):
super(Mixup, self).__init__()
self.beta_distribution = Beta(mix_beta, mix_beta)
self.label_mix_type = label_mix_type
def forward(self, X, Y, weight=None):
bs = X.shape[0]
n_dims = len(X.shape)
perm = torch.randperm(bs)
coeffs = self.beta_distribution.rsample(torch.Size((bs,))).type_as(X)
if n_dims == 2:
X = coeffs.view(-1, 1) * X + (1 - coeffs.view(-1, 1)) * X[perm]
elif n_dims == 3:
X = coeffs.view(-1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1)) * X[perm]
else:
X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm]
y_coeffs = coeffs
if self.label_mix_type == "mix":
Y = y_coeffs * Y + (1 - y_coeffs) * Y[perm]
elif self.label_mix_type == "max":
Y = Y + Y[perm] - Y * Y[perm]
if weight is None:
return X, Y
else:
weight = coeffs.view(-1) * weight + (1 - coeffs.view(-1)) * weight[perm]
return X, Y, weight
class PositionalFeaturesBlockV1(nn.Module):
def __init__(self, pfb_params: Dict[str, Any]):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(pfb_params["input_dim"], pfb_params["dim1"]),
nn.LayerNorm(pfb_params["dim1"]),
nn.ReLU(),
nn.Dropout(pfb_params["drop_out_p"]),
nn.Linear(pfb_params["dim1"], 128),
nn.LayerNorm(128),
nn.ReLU(),
nn.Dropout(pfb_params["drop_out_p"]),
)
def forward(self, x):
x = self.mlp(x)
return x
class Conv3dBlock(nn.Module):
def __init__(self, conv_params: Dict[str, Any]):
super().__init__()
out_ch = conv_params["out_channel_num"]
self.conv1 = nn.Conv3d(
in_channels=out_ch,
out_channels=out_ch,
kernel_size=3,
padding=1,
)
self.bn1 = nn.BatchNorm3d(out_ch)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv3d(
in_channels=out_ch,
out_channels=out_ch,
kernel_size=3,
padding=1,
)
self.bn2 = nn.BatchNorm3d(out_ch)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv3d(
in_channels=out_ch,
out_channels=out_ch,
kernel_size=3,
padding=1,
)
self.bn3 = nn.BatchNorm3d(out_ch)
self.act3 = nn.ReLU()
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.conv3(x)
x = self.bn3(x)
x += residual
x = self.act3(x)
return x
class Conv3dBlockV2(nn.Module):
def __init__(self, mid_channle_num: int):
super().__init__()
self.mid_ch = mid_channle_num
self.conv = torch.nn.Sequential(
torch.nn.Conv3d(self.mid_ch, self.mid_ch, (3, 9, 9), padding=(1, 4, 4), padding_mode="replicate"),
torch.nn.BatchNorm3d(self.mid_ch),
torch.nn.LeakyReLU(),
torch.nn.Conv3d(self.mid_ch, self.mid_ch, (3, 9, 9), padding=(1, 4, 4), padding_mode="replicate"),
torch.nn.BatchNorm3d(self.mid_ch),
torch.nn.LeakyReLU(),
)
self.act = torch.nn.LeakyReLU()
def forward(self, x):
shortcut = x
x = self.conv(x)
x += shortcut
x = self.act(x)
return x
| yoichi-yamakawa/kaggle-contrail-3rd-place-solution | scripts/training/model_util.py | model_util.py | py | 6,295 | python | en | code | 1 | github-code | 50 |
23741005175 | import transformers
import torch
from transformers import OpenAIGPTTokenizer, GPT2Tokenizer
from transformers import PreTrainedTokenizer, PreTrainedModel
from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForCausalLM
import faulthandler
faulthandler.enable()
from transformers import (AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME)
#model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
#tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
print("><>before model")
#model = AutoModelWithLMHead.from_pretrained('/Users/beeoladeji/Desktop/content/gpt-2/output')
#model = AutoModelForCausalLM.from_pretrained('microsoft/DialoGPT-small')
model = AutoModelWithLMHead.from_pretrained('/Users/beeoladeji/Desktop/content/gpt-2/output')
print("**after model")
#/Users/beeoladeji/miniconda3/pkgs/pydotplus-2.0.2-py_3/site-packages/pydotplus-2.0.2.dist-info/METADATA
def generate_answer(question):
new_user_input_ids = tokenizer.encode(question + tokenizer.eos_token, return_tensors='pt')
# append the new user input tokens to the chat history
#bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
# generated a response while limiting the total chat history to 1000 tokens,
chat_history_ids = model.generate(new_user_input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id, temperature=0.6, repetition_penalty=1.3)
preds = [ tokenizer.decode(chat_history_ids[:, new_user_input_ids.shape[-1]:][0], skip_special_tokens=True)]
# pretty print last ouput tokens from bot
#print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
return "".join(preds)
if __name__ == "__main__":
print("Let's chat! (type 'quit' to exit)")
while True:
# sentence = "do you use credit cards?"
sentence = input("You: ")
if sentence == "quit":
break
resp = generate_answer(sentence)
print(resp)
#/Users/beeoladeji/Desktop/output
# import os
# import pandas as pd
# import openai
# import openai
# openai.Completion.create(
# model=FINE_TUNED_MODEL,
# prompt=YOUR_PROMPT)
# def ask(question,chat_log = None):
# if chat_log is None:
# chat_log = start_chart_log
# prompt = f'{chat_log}Human:{question}\nAI:'
# print("prompt",prompt)
# response = completion.create(
# prompt = prompt,engine = "davinci",stop = ["\nHuman"],temperature = 0.9,
# top_p =1,best_of=1,
# max_tokens=150
# )
# answer = response.choices[0].text.strip()
# return answer
#export OPENAI_API_KEY="<OPENAI_API_KEY>"
#openai tools fine_tunes.prepare_data -f <LOCAL_FILE> | BolanleOladeji/IS-Project | chat2.py | chat2.py | py | 2,866 | python | en | code | 0 | github-code | 50 |
70782325915 | '''
Practice Problem #2
Samuel Hulme
Problem:
Given a 2D array of numbers, determine the cheapest path from the top left (0,0) node to the bottom right
example = [
[6, 8, 1]
[100, 2, 30]
[1, 4, 2]
]
The cheapest path in this case would be the path of 6, 8, 2, 4, 2 = 22
Conditions:
1.) The movements can only be right or down
2.) The numbers can be negative or positive
3.) Return only the cheapest path value, not the actual path
My Solution:
We should use a DFS to determine the shortest path. We can call the top [0, 0] and the bottom [w, h]
As we go through the list, we will create a dictionary/set of tuples that will represent the cost from that
location on the matrix. It will be calculated to the value at that location plus the cheapest path that comes
after it.
'''
def cheapest_path(matrix, hash={}, x=0, y=0):
if x >= len(matrix[0]) or y >= len(matrix):
return float('inf') #If x or y are out of bounds
elif x == len(matrix[0])-1 and y == len(matrix)-1:
return matrix[x][y] #If we found the base
elif (x,y) in hash:
return hash[(x, y)] #If this spot has already been visited
overall_cheapest = float('inf')
for dir in [(1, 0), (0, 1)]:
current_cheapest = cheapest_path(matrix, hash, x+dir[0], y+dir[1])
if current_cheapest < overall_cheapest:
overall_cheapest = current_cheapest
hash[(x, y)] = matrix[x][y] + overall_cheapest
return hash[(x, y)]
def print_matrix(matrix):
for row in matrix:
row_string = "["
for item in row:
row_string += " " + "{:<3}".format(str(item))
print(row_string + "]")
matrix = [
[1, 5, 70, 10],
[90, 4, -3, 70],
[-14, 200, 6, 11],
[-1, 15, 2, 3]
]
print("\nThis program will take in a 2D array of integers and determine the\ncheapest path from the top left to the bottom right index.\n")
print("The test matrix is shown below:\n")
print_matrix(matrix)
print("\nThe cheapest path is: " + str(cheapest_path(matrix)))
| shulme33/Programming | Python/pp_2.py | pp_2.py | py | 2,148 | python | en | code | 0 | github-code | 50 |
26636795524 | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Btn(QPushButton):
pass
class MyWindow(QWidget):
def __init__(self):
super(MyWindow, self).__init__()
self.setWindowTitle('QDialog的学习')
self.resize(500, 500)
self.init_gui()
def init_gui(self):
box1 = QWidget(self)
# box1.setStyleSheet("QPushButton {background-color: orange;}")
label1 = QLabel("标签1", box1)
label1.resize(200, 60)
label1.setObjectName("pink")
label1.setProperty("notice_level", "warning")
label1.move(50, 50)
btn1 = Btn("按钮1", box1)
btn1.move(150, 50)
btn1.setObjectName("btn1")
cb = QCheckBox("python", box1)
cb.move(150, 100)
cb.resize(100, 50)
cb.setTristate(True)
box2 = QWidget(self)
box2.setObjectName("box2")
# box2.setStyleSheet("background-color: cyan;")
btn2 = QPushButton("按钮2", box2)
btn2.move(150, 50)
btn2.setObjectName("btn2")
label3 = QLabel("标签3", box2)
label3.move(200, 200)
box3 = QWidget(box2)
box3.resize(150, 150)
# box3.setStyleSheet("background-color: lightgray;")
label2 = QLabel("标签2", box3)
label2.resize(100, 60)
label2.move(50, 50)
v_layout = QVBoxLayout()
self.setLayout(v_layout)
v_layout.addWidget(box1)
v_layout.addWidget(box2)
btn2.setEnabled(False)
self.other_btn = QPushButton("按钮3")
self.other_btn.show()
if __name__ == '__main__':
import sys
from imqssf_tool import QssFileDealer
app = QApplication(sys.argv)
QssFileDealer.set_app_qss(app,'qssfile1.qss')
mywindow = MyWindow()
mywindow.show()
sys.exit(app.exec_()) | PeterZhangxing/codewars | gui_test/test_pyqt/qss_test/learning_qss.py | learning_qss.py | py | 1,856 | python | en | code | 0 | github-code | 50 |
35196294855 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 31 00:21:52 2016
@edited by K Provost
"""
#Aligning sequences
#Muscle software installed required: http://www.drive5.com/muscle/downloads.htm
def align(filename,outpath,cwd,muscle_exe):
import os
from Bio.Align.Applications import MuscleCommandline
from Bio import AlignIO
muscle_exe = "/Users/kprovost/Documents/muscle3.8.31_i86darwin64"
outname = "ALIGNED_"+filename
print("ALIGNING: "+filename)
with open(filename,"r") as infile:
read = infile.read()
count = read.count(">")
if count <= 1:
with open(outpath+outname,"w") as outfile:
outfile.write(read)
print("ONLY ONE SEQ, DONE")
else:
try:
muscle_cline = MuscleCommandline(muscle_exe, input=filename, out=outname)
stdout, stderr = muscle_cline()
AlignIO.read(outname, "fasta")
print("ALIGNED")
except:
print("??? ERROR")
print(filename)
def main():
from Bio.Align.Applications import MuscleCommandline
from Bio import AlignIO
import os
import sys
import glob
import shutil
cwd = os.getcwd()
try:
muscle_exe = sys.argv[1]
print("\tMuscle path exists")
except:
print("Muscle defaulting to:")
print("/Users/kprovost/Documents/muscle3.8.31_i86darwin64")
#print("Muscle not given, quitting")
muscle_exe = "/Users/kprovost/Documents/muscle3.8.31_i86darwin64"
#quit()
try:
path = sys.argv[2]
print("\tPath is: ",path)
except:
print("Path not given")
path = os.getcwd()+"/7_readytoalign/"
print("Path is current directory + 7_readytoalign")
treepath = cwd+"/9_badalignments/"
if not os.path.exists(treepath):
print("creating folder: ",treepath)
os.makedirs(treepath)
outpath = cwd+"/8_goodalignments/"
if not os.path.exists(outpath):
print("creating folder: ",outpath)
os.makedirs(outpath)
os.chdir(path)
for filename in glob.glob("*.fa*"):
align(filename,outpath,cwd,muscle_exe)
print("\n\nDONE")
# from Bio import SeqIO
# filename = "NC_005816.gb"
# locus_to_gene = dict()
# for record in SeqIO.parse(filename, "genbank"):
# for f in record.features:
# if f.type == "CDS":
# if "gene" in f.qualifiers:
# if "locus_tag" in f.qualifiers:
# genes = f.qualifiers["gene"]
# locus_tags = f.qualifiers["locus_tag"]
# assert len(genes) == 1, genes
# assert len(locus_tags) == 1, locus_tags
# locus_to_gene[locus_tags[0]] = genes[0]
# print("Mapped %i locus tags to genes" % len(locus_to_gene))
if __name__ == "__main__":
main()
#import os
#os.chdir("/Users/kprovost/Documents/Publications/Parrots/ParrotPipelineRedo/OUTGROUPS/")
#muscle_exe = "/Users/kprovost/Documents/Publications/Parrots/ParrotPipelineRedo/SCRIPTS/muscle3.8.31_i86darwin64"
#align("Mascarinus_OLDANDNEW.fasta","ALIGNED_Mascarinus_OLDANDNEW.fasta",os.getcwd(),muscle_exe) | kaiyaprovost/misc_scripts | muscleAlign.py | muscleAlign.py | py | 3,397 | python | en | code | 0 | github-code | 50 |
27622938759 | from dataclasses import FrozenInstanceError
from scipy import signal # type: ignore
import matplotlib.pyplot as plt # type: ignore
import pytest
import numpy as np
import pysmo.tools.noise as noise
def test_NoiseModel() -> None:
# create two random arrays for testing
psd = np.random.rand(20)
psd2 = np.random.rand(20)
T = np.random.rand(20)
# length of the arrays needs to be equal
with pytest.raises(ValueError):
noise.NoiseModel(psd[1:], T)
# create a NoiseModel instance and verify it is immutable
model = noise.NoiseModel(psd, T)
assert isinstance(model, noise.NoiseModel)
with pytest.raises(FrozenInstanceError):
model.psd = psd2 # type: ignore
with pytest.raises(ValueError):
model.psd[3] *= 2
with pytest.raises(ValueError):
model.T[3] *= 2
@pytest.mark.depends(on=["test_NoiseModel"])
@pytest.mark.mpl_image_compare(
remove_text=True, baseline_dir="../baseline/", style="default"
)
def test_peterson(): # type: ignore
nlnm = noise.peterson(0)
nhnm = noise.peterson(1)
nm_03 = noise.peterson(0.3)
with pytest.raises(ValueError):
noise.peterson(1.34)
assert nlnm == noise.NLNM
assert nhnm == noise.NHNM
assert all(
nm_03.T
== np.array(
[
0.10,
0.17,
0.22,
0.32,
0.40,
0.80,
1.24,
2.40,
3.80,
4.30,
4.60,
5.00,
6.00,
6.30,
7.90,
10.00,
12.00,
15.40,
15.60,
20.00,
21.90,
31.60,
45.00,
70.00,
101.00,
154.00,
328.00,
354.80,
600.00,
10**4,
10**5,
]
)
)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(nlnm.T, nlnm.psd)
ax.plot(nhnm.T, nhnm.psd)
ax.plot(nm_03.T, nm_03.psd)
ax.set_xscale("log")
return fig
@pytest.mark.depends(on=["test_NoiseModel"])
@pytest.mark.mpl_image_compare(
remove_text=True, baseline_dir="../baseline/", style="default"
)
def test_generate_noise(): # type: ignore
npts = 10000
nperseg = npts / 4
nfft = npts / 2
srate = 0.1
sfrec = 1 / srate
nhnm = noise.NHNM
# velocity noise model from peterson paper
nhnm_velo = noise.NoiseModel(
psd=nhnm.psd + 20 * np.log10(nhnm.T / 2 / np.pi), T=nhnm.T
)
nhnm_data_acc = noise.generate_noise(
model=nhnm, npts=npts, delta=srate, seed=0
).data
nhnm_data_vel = noise.generate_noise(
model=nhnm, npts=npts, delta=srate, return_velocity=True, seed=0
).data
freqs_acc, power_acc = signal.welch(
nhnm_data_acc, sfrec, nperseg=nperseg, nfft=nfft, scaling="density"
)
freqs_vel, power_vel = signal.welch(
nhnm_data_vel, sfrec, nperseg=nperseg, nfft=nfft, scaling="density"
)
freqs_acc, power_acc = freqs_acc[1:], power_acc[1:]
freqs_vel, power_vel = freqs_vel[1:], power_vel[1:]
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(1 / freqs_acc, 10 * np.log10(power_acc))
ax1.plot(nhnm.T, nhnm.psd, "k")
ax1.set_xscale("log")
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot(1 / freqs_vel, 10 * np.log10(power_vel))
ax2.plot(nhnm_velo.T, nhnm_velo.psd, "k")
ax2.set_xscale("log")
return fig
| pysmo/pysmo | tests/tools/test_noise.py | test_noise.py | py | 3,632 | python | en | code | 18 | github-code | 50 |
73179515356 | # coding: utf-8
import argparse
import time
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import data
import model
import cPickle
import glob, os
import math
import read_graph as rg
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--lattice', type=str, default='',
help='lattice path to rescore')
parser.add_argument('--nbest', type=str, default='',
help='nbest path to rescore')
parser.add_argument('--job', type=int, default=30,
help='number of job')
parser.add_argument('--scale', type=int, default=10,
help='scale of lm score')
parser.add_argument('--beam', type=int, default=100,
help='beam')
parser.add_argument('--output', type=str,
help='output file')
args = parser.parse_args()
fw = open(args.output,'w')
cover_recall = []
for job in range(1,args.job+1):
for file in glob.glob(args.lattice+'/'+str(job)+"/*.lat"):
filename = file.replace(args.lattice+'/'+str(job)+'/','').replace('.lat','')
# print file
nodes, arcs = rg.read_graph(file)
lattice_sents = rg.nbest_BFS(1000,nodes,args.scale,args.beam)
nbest_sents=[]
fr = open(args.nbest+'/'+str(job)+'/'+filename+'.onlytext')
for i in fr:
line = i.split()
nbest_sents.append(line)
# lattice_sent = sents[index][0]
# lattice_sent = sents[index][0].split()
# lattice_sent.pop(0)
# lattice_sent.pop()
cover_recall.append(0)
for i in nbest_sents:
for j in lattice_sents:
if i==j[0]:
cover_recall[-1]+=1
break
cover_recall[-1] = cover_recall[-1]/float(len(nbest_sents))
fw.write(str(cover_recall[-1])+'\n')
if len(cover_recall)%100==0:
print(str(len(cover_recall))+'\tfinished')
fw.write('average: '+str(sum(cover_recall)/len(cover_recall))+'\n')
# print cover_recall
# print len(nbest_sents)
# import pdb;pdb.set_trace()
| cliffchen123/language_model | compute_lattice_cover_rate.py | compute_lattice_cover_rate.py | py | 2,375 | python | en | code | 1 | github-code | 50 |
42131499524 | # Source: https://github.com/krrish94/nerf-pytorch
# Torch imports
import torch
from torch import nn
from torch.nn import functional as F
class VeryTinyNerfModel(torch.nn.Module):
def __init__(self, hidden_size=128, num_encoders=6):
super(VeryTinyNerfModel, self).__init__()
self.layer1 = torch.nn.Linear(3 + 3 * 2 * num_encoders, hidden_size)
self.layer2 = torch.nn.Linear(hidden_size, hidden_size)
self.layer3 = torch.nn.Linear(hidden_size, 4)
def forward(self, x):
x = x.float()
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
class ReplicateNeRFModel(torch.nn.Module):
def __init__(self,
hidden_size=256,
num_encoding_fn_xyz=6,
num_encoding_fn_dir=4):
super(ReplicateNeRFModel, self).__init__()
self.dim_xyz = 3 + 2 * 3 * num_encoding_fn_xyz
self.dim_dir = (3 if num_encoding_fn_dir > 0 else 0) + 2 * 3 * max(num_encoding_fn_dir, 0)
self.layer1 = torch.nn.Linear(self.dim_xyz, hidden_size)
self.layer2 = torch.nn.Linear(hidden_size, hidden_size)
self.layer3 = torch.nn.Linear(hidden_size, hidden_size)
self.alpha = torch.nn.Linear(hidden_size, 1)
self.layer4 = torch.nn.Linear(hidden_size + self.dim_dir, hidden_size // 2)
self.layer5 = torch.nn.Linear(hidden_size // 2, hidden_size // 2)
self.rgb = torch.nn.Linear(hidden_size // 2, 3)
def forward(self, x):
x = x.float()
xyz, direction = x[...,:self.dim_xyz], x[...,self.dim_xyz:]
# Pass only location first
x = F.relu(self.layer1(xyz))
x = F.relu(self.layer2(x))
x = self.layer3(x)
alpha = self.alpha(x)
# Add viewing direction
x = F.relu(self.layer4(torch.cat((x, direction), dim=-1)))
x = F.relu(self.layer5(x))
rgb = self.rgb(x)
return torch.cat((rgb, alpha), dim=-1)
| anshuman64/nerf | src/main_model.py | main_model.py | py | 2,028 | python | en | code | 0 | github-code | 50 |
14130660030 | n = int(input())
coords = []
for _ in range(n):
x1,x2 = map(int, input().split())
coords.append((x1,x2))
total_cnt = 0
for i in range(n):
for j in range(i+1, n):
for k in range(j+1, n):
arr = [0] * 101
is_bool = True
for l in range(n):
if l != i and l != j and l != k:
x1,x2 = coords[l][0], coords[l][1]
for m in range(x1, x2+1):
arr[m] += 1
for ele in arr:
if ele > 1:
is_bool = False
if is_bool:
total_cnt += 1
print(total_cnt) | hoonkiyeo/codetree-TILs | 231205/선분 3개 지우기/remove-three-segments.py | remove-three-segments.py | py | 666 | python | en | code | 0 | github-code | 50 |
34094024275 | import math, time, random
import numpy as np
from vcopt import vcopt
node = [
[23,39],[ 8,44],[34,36],[12,30],[42,37],[ 6,35],[ 1,15],[12,25],
[ 4,39],[13,42],[23,13],[ 7,39],[11, 5],[ 6,44],[28,45],[20, 7],
[ 3,16],[ 4,19],[ 3,39],[ 0, 2],[19,21],[ 3,43],[ 8,34],[20,39],
[ 2,50],[20,26],[16,36],[24,30],[ 9,40],[ 5,22],[30,35],[ 2, 0],
[21,36],[22,28],[ 3,33],[11,36],[14,34]
]
def distance(cal_node):
size = len(cal_node)
return_table = [[0] * size for x in range(size)]
for i in range(size):
for j in range(size):
if i != j:
dx = cal_node[i][0] - cal_node[j][0]
dy = cal_node[i][1] - cal_node[j][1]
return_table[i][j] = math.sqrt(dx * dx + dy * dy)
return return_table
def path_length(path):
global distance_table
n = 0
for i in range(1, len(path)):
n += distance_table[path[i - 1]][path[i]]
n += distance_table[path[0]][path[-1]]
return n
def opt_2_solve(size, path):
global distance_table
total = 0
while True:
count = 0
for i in range(size - 2):
i1 = i + 1
for j in range(i + 2, size):
if j == size - 1:
j1 = 0
else:
j1 = j + 1
if i != 0 or j1 != 0:
l1 = distance_table[path[i]][path[i1]]
l2 = distance_table[path[j]][path[j1]]
l3 = distance_table[path[i]][path[j]]
l4 = distance_table[path[i1]][path[j1]]
if l1 + l2 > l3 + l4:
new_path = path[i1:j+1]
path[i1:j+1] = new_path[::-1]
count += 1
total += count
if count == 0: break
return path, total
def NN(size):
global distance_table
cal_path = np.delete(np.arange(size),0,0)
return_path = np.array([0])
for i in range(int(size)-1):
for j in range(len(cal_path)):
if j==0 or (min_len > distance_table[return_path[i]][cal_path[j]]):
del_idx = j
min_idx = cal_path[j]
min_len = distance_table[return_path[i]][cal_path[j]]
return_path = np.insert(return_path,len(return_path),min_idx)
cal_path = np.delete(cal_path,del_idx,0)
return return_path
def tsp_1_score(path):
path_full = np.hstack((0, path))
return path_length(path_full)
#各ノード間距離のテーブルを作成
distance_table = distance(node)
node_len = len(node)
print("\n--Nearest Neighber solve--")
start_time = time.time()
#NearestNeighber法で最適化
min_path = NN(node_len)
min_length = path_length(min_path)
min_path = np.hstack((min_path,0))
end_time = time.time()
print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time))
print("Route : " + str(min_path))
print("\n--2 Opt solve--")
start_time = time.time()
#道順を作成
path = np.arange(node_len)
min_length = 0
#2-opt法で最適化し、一番スコアの良かったものを記録
for i in range(10):
#道順をランダムに並び替える
np.random.shuffle(path)
path, x = opt_2_solve(node_len, path)
length = path_length(path)
if i == 0 or min_length > length:
min_length = length
min_path = np.array(path)
min_path = np.hstack((np.roll(min_path,-np.where(min_path == 0)[0][0]),0))
end_time = time.time()
print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time))
print("Route : " + str(min_path))
print("\n--2 Opt solve (vcopt)--")
start_time = time.time()
#道順を作成
path = np.arange(1,node_len)
min_length = 0
#2-opt法(vcopt)で最適化し、一番スコアの良かったものを記録
for i in range(10):
#道順をランダムに並び替える
np.random.shuffle(path)
path, length = vcopt().opt2(path, tsp_1_score, 0.0, seed=None)
if i == 0 or length < min_length:
min_length = length
min_path = np.array(path)
min_path = np.hstack((0,min_path,0))
end_time = time.time()
print("Min Length : "+str(min_length)+"\ntime : "+str(end_time-start_time))
print("Route : " + str(min_path)) | UnknownSP/ProgrammingExercise | 巡回セールスマン問題/比較/1TSP_Compare.py | 1TSP_Compare.py | py | 4,181 | python | en | code | 0 | github-code | 50 |
13928767500 | APP_INTERFACE = 'tcp://127.0.0.1:5555'
DEFAULT_LIBRARY = 'mongodb://127.0.0.1/apps'
DEFAULT_COLLECTION = 'apps'
ERROR_SUCCESS = 0
ERROR_EXCEPTION = 1
METHOD_REGISTER = 'Register'
METHOD_UNREGISTER = 'UnRegister'
METHOD_UPDATE = 'Update'
METHOD_QUERY = 'Query'
| Kimice/rpc-demo | origin/dataservice/common/constants.py | constants.py | py | 262 | python | en | code | 0 | github-code | 50 |
29860050280 | #domain_stats.py by Mark Baggett
#Twitter @MarkBaggett
from __future__ import print_function
import BaseHTTPServer
import threading
import SocketServer
import urlparse
import re
import argparse
import sys
import time
import os
import datetime
try:
import whois
except Exception as e:
print(str(e))
print("You need to install the Python whois module. Install PIP (https://bootstrap.pypa.io/get-pip.py). Then 'pip install python-whois' ")
sys.exit(0)
class domain_api(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
print(self.path)
(ignore, ignore, urlpath, urlparams, ignore) = urlparse.urlsplit(self.path)
cmdstr = tgtstr = None
print(urlparams)
if re.search("[\/](?:created|alexa|domain)[\/].*?", urlpath):
cmdstr = re.search(r"[\/](created|alexa|domain)[\/].*$", urlpath)
tgtstr = re.search(r"[\/](created|alexa|domain)[\/](.*)$", urlpath)
if not cmdstr or not tgtstr:
self.wfile.write('<html><body>API Documentation<br> http://%s:%s/cmd/tgt <br> cmd = domain, alexa or created <br> tgt = domain name </body></html>' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1]))
return
params = {}
params["cmd"] = cmdstr.group(1)
params["tgt"] = tgtstr.group(2)
else:
cmdstr=re.search("cmd=(?:domain|alexa|created)",urlparams)
tgtstr = re.search("tgt=",urlparams)
if not cmdstr or not tgtstr:
self.wfile.write('<html><body>API Documentation<br> http://%s:%s/?cmd=measure&tgt=<string> <br> http://%s:%s/?cmd=normal&tgt=<string> <br> http://%s:%s/?cmd=normal&tgt=<string>&weight=<weight> </body></html>' % (self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1],self.server.server_address[0], self.server.server_address[1]))
return
params={}
try:
for prm in urlparams.split("&"):
key,value = prm.split("=")
params[key]=value
except:
self.wfile.write('<html><body>Unable to parse the url. </body></html>')
return
if params["cmd"] == "alexa":
if self.server.verbose: self.server.safe_print ("Alexa Query:", params["tgt"])
if not self.server.alexa:
if self.server.verbose: self.server.safe_print ("No Alexa data loaded. Restart program.")
self.wfile.write("Alexa not loaded on server. Restart server with -a or --alexa and file path.")
else:
if self.server.verbose: self.server.safe_print ("Alexa queried for:%s" % (params['tgt']))
self.wfile.write(str(self.server.alexa.get(params["tgt"],"0")))
elif params["cmd"] == "domain" or params["cmd"] == "created":
if params['tgt'] in self.server.cache:
print("Found in cache!!")
domain_info = self.server.cache.get(params['tgt'])
else:
try:
print ("Querying the web", params['tgt'])
domain_info = whois.whois(params['tgt'])
if not domain_info.get('creation_date'):
self.wfile.write(str("No whois record for %s" % (params['tgt'])))
return
except Exception as e:
if self.server.verbose: self.server.safe_print ("Error querying whois server: %s" % (str(e)))
return
self.server.safe_print("Caching whois record %s" % (str(domain_info)))
domain_info["time"] = time.time()
if self.server.alexa:
domain_info['alexa'] = self.server.alexa.get(params["tgt"],"0")
try:
self.server.cache_lock.acquire()
self.server.cache[params['tgt']] = domain_info
finally:
self.server.cache_lock.release()
if params["cmd"] == "created":
self.wfile.write(domain_info.get('creation_date','not found').__str__())
elif params["cmd"] =="domain":
self.wfile.write(str(domain_info))
return
def log_message(self, format, *args):
return
class ThreadedDomainStats(SocketServer.ThreadingMixIn, SocketServer.TCPServer, BaseHTTPServer.HTTPServer):
def __init__(self, *args,**kwargs):
self.cache = {}
self.cache_lock = threading.Lock()
self.cache_time = 1
self.screen_lock = threading.Lock()
self.alexa = ""
self.verbose = False
self.exitthread = threading.Event()
self.exitthread.clear()
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
def safe_print(self,*args,**kwargs):
try:
self.screen_lock.acquire()
print(*args,**kwargs)
finally:
self.screen_lock.release()
def clear_old_cache(self):
if self.verbose: self.safe_print ( "Clearing old cache")
try:
self.cache_lock.acquire()
for item in self.cache:
if (self.cache[item].get('time', time.time()) - time.time()) > self.cache_time*60*60:
del self.cache[item]
finally:
self.cache_lock.release()
#Reschedule yourself to run again in 1 hour
if not self.exitthread.isSet():
self.timer = threading.Timer(60*60, self.clear_old_cache, args = ())
self.timer.start()
def main():
parser=argparse.ArgumentParser()
parser.add_argument('-ip','--address',required=False,help='IP Address for the server to listen on. Default is 127.0.0.1',default='127.0.0.1')
parser.add_argument('-c','--cache_time',type=float,required=False,help='Number of hours to hold a whois record in the cache. Default is 1 hour. Set to 0 to save forever.',default=1)
parser.add_argument('port',type=int,help='You must provide a TCP Port to bind to')
parser.add_argument('-v','--verbose',action='count',required=False,help='Print verbose output to the server screen. -vv is more verbose.')
parser.add_argument('-a','--alexa',required=False,help='Provide a local file path to an Alexa top-1m.csv')
#args = parser.parse_args("-s 1 -vv 8081 english_lowercase.freq".split())
args = parser.parse_args()
#Setup the server.
server = ThreadedDomainStats((args.address, args.port), domain_api)
if args.alexa:
if not os.path.exists(args.alexa):
print("Alexa file not found %s" % (args.alexa))
else:
try:
server.alexa = dict([(a,b) for b,a in re.findall(r"^(\d+),(.*)", open(args.alexa).read(), re.MULTILINE)])
except Exception as e:
print("Unable to parse alexa file:%s" % (str(e)))
server.verbose = args.verbose
server.cache_time = args.cache_time
#Schedule the first save interval unless save_interval was set to 0.
if args.cache_time:
server.timer = threading.Timer(60 *args.cache_time, server.clear_old_cache, args = ())
server.timer.start()
#start the server
print('Server is Ready. http://%s:%s/?cmd=measure&tgt=astring' % (args.address, args.port))
print('[?] - Remember: If you are going to call the api with wget, curl or something else from the bash prompt you need to escape the & with \& \n\n')
while True:
try:
server.handle_request()
except KeyboardInterrupt:
break
server.timer.cancel()
server.safe_print("Web API Disabled...")
server.safe_print("Control-C hit: Exiting server. Please wait..")
if __name__=="__main__":
main() | HASecuritySolutions/Logstash | configfiles-setup_required/freq/domain_stats.py | domain_stats.py | py | 8,009 | python | en | code | 248 | github-code | 50 |
3641922355 | ##
##pedir = True
##while pedir:
## numero = int(input("Dame un numero del 1 al 100: "))
## if numero < 100 and numero > 0:
## pedir = False
from random import *
print("Piensa un número del 1 al 100,¡voy a intentar advinarlo!")
print("Pulsa intro cuando estés listo...")
input()
aleatorio = randint(1,100)
acierto = True
while acierto:
respuesta = input("¿Es el {0} el número secreto? (s/n) " .format(aleatorio))
if respuesta == "n":
mayor_menor = input("¿Es el número secreto mayor o menor que {0}?. ".format(aleatorio))
if mayor_menor == "mayor":
aleatorio = randint(aleatorio+1, 100)
elif mayor_menor == "menor":
aleatorio = randint(0,aleatorio-1)
elif respuesta == "s":
print("Estoy de suerte, ¡He acertado!")
acierto = False
else:
print("Lo siento no te he entendido")
| emiliobort/python | Practica2_Past/Programas/Ejercicio10.py | Ejercicio10.py | py | 896 | python | es | code | 0 | github-code | 50 |
22007133395 | import sqlite3
# criar instancia de conexão com o banco
connection = sqlite3.connect('records.db')
# inicializar cursor
cursor = connection.cursor()
# IF PARA CRIAR SE NAO TIVER CRIADO
create_table = "CREATE TABLE IF NOT EXISTS records (id INTEGER PRIMARY KEY, pontos int)"
cursor.execute(create_table)
connection.commit()
connection.close()
| Murimaral/projeto_batalha_naval | criar_tabela.py | criar_tabela.py | py | 361 | python | en | code | 0 | github-code | 50 |
27539293247 | import json
import PySimpleGUI as sg
from src.handlers import login
def config(dificultad,ayuda,tarjeta,tiempo,color,alerta):
""" Guarda la configuracion del usuario en un archivo json"""
datos_config = [dificultad,ayuda,tarjeta,tiempo,color,alerta]
tiempo = str(tiempo)
if (tiempo.isdigit()):
configuraciones = leer_config() # Carga todas las configuraciones
jugador_logueado = login.leer_sesion()
configuraciones[jugador_logueado] = datos_config # Actualiza las configuraciones del usuario
datos_json = json.dumps(configuraciones)
# Guarda la configuracion en un json
with open("configuracion.json", "w", encoding="utf8") as archivoJSON:
archivoJSON.write(datos_json)
sg.SystemTray.notify('Éxito!', 'Cambios guardados')
def leer_config():
"""Devuelve todas las configuraciones guardadas"""
configuraciones = {}
with open("configuracion.json", "r", encoding="utf8") as archivoJSON:
configuraciones = json.load(archivoJSON)
#print(configuraciones)
return configuraciones
def crear_configuracion_default(usuario):
"""Crea la configuracion default cuando el jugador se registra"""
try:
configuraciones = leer_config()
except Exception:
# Si el archivo no existe
configuraciones = {}
configuraciones[usuario] = ["Facil", "Con", "Texto", "60", "Topanga", "Ganaste, Perdiste"]
datos_json = json.dumps(configuraciones)
# Guarda la configuracion en un json
with open("configuracion.json", "w", encoding="utf8") as archivoJSON:
archivoJSON.write(datos_json)
| LauraCuenca/MempybyGrupo29 | src/handlers/configuracion_h.py | configuracion_h.py | py | 1,640 | python | es | code | 0 | github-code | 50 |
23363173038 | import random
import math
depth = 5
functions = ["xd", "*", "+", "-"]
noFunctions = 3
terminals = ["mizerie", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9"]
noTerminals = 9
class Individ(object):
"""docstring for Individ"""
def __init__(self):
self.values = [0 for i in range(2 ** depth)]
for i in range(len(self.values)):
if i < 3 or (i < 2 ** (depth - 1) - 1 and random.random() < 0.5):
self.values[i] = -random.randint(1, len(functions) - 1)
else:
self.values[i] = random.randint(1, len(terminals) - 1)
def fitness(self, problem):
s = 0
for i in range(len(problem.inData)):
rez = self.dfs(0, problem.inData[i])
s += (rez - problem.outData[i]) * (rez - problem.outData[i])
#print("Expected:", problem.outData[i], "Got: ", rez)
return 100000.0 / s
def eval(self, problem):
s = 0
for i in range(len(problem.inData)):
rez = self.dfs(0, problem.inData[i])
s += (rez - problem.outData[i]) * (rez - problem.outData[i])
print("Expected:", problem.outData[i], "Got: ", rez)
return 100000.0 / s
def dfs(self, pos, inputs):
if (self.values[pos] > 0):
return inputs[self.values[pos] - 1]
if (functions[-self.values[pos]] == "*"):
return self.dfs(2 * pos + 1, inputs) * self.dfs(2 * pos + 2, inputs)
if (functions[-self.values[pos]] == "-"):
return self.dfs(2 * pos + 1, inputs) - self.dfs(2 * pos + 2, inputs)
if (functions[-self.values[pos]] == "+"):
return self.dfs(2 * pos + 1, inputs) + self.dfs(2 * pos + 2, inputs)
def mutate(self, probability):
index1 = random.randint(0, (2 ** (depth - 1)) - 2)
index2 = random.randint(0, (2 ** (depth - 1)) - 2)
i = min(index1, index2)
j = max(index1, index2)
while i < j:
self.values[i], self.values[j] = self.values[j], self.values[i]
i += 1
j -= 1
def crossover(individ1, individ2, probability):
offspring1 = Individ()
offspring2 = Individ()
index1 = random.randint(0, (2 ** (depth)))
index2 = random.randint(0, (2 ** (depth)))
i = min(index1, index2)
j = max(index1, index2)
for k in range(0, (2 ** (depth))):
if k in range(i, j):
offspring1.values[k] = individ1.values[k]
offspring2.values[k] = individ2.values[k]
else:
offspring1.values[k] = individ2.values[k]
offspring2.values[k] = individ1.values[k]
return offspring1, offspring2
def __str__(self):
return str(self.values)
class Algorithm(object):
"""docstring for Algorithm"""
def __init__(self, problem, populationSize = 40):
self.__problem = problem
self.readParameters()
self.__populationSize = populationSize
self.__population = Population(self.__populationSize)
def getPopulation(self):
return self.__population
def iteration(self):
self.__population.selection(self.__problem)
self.__population.evaluate(self.__problem)
def run(self):
for i in range(100):
self.iteration()
print(
self.__population.getBest(),
self.__population.getBest().fitness(self.__problem),
self.__population.getPopSize()
)
self.__population.getBest().eval(self.__problem)
def readParameters(self):
self.__problem.loadData("slump_test.data")
class Population(object):
"""docstring for Population"""
def __init__(self, noIndivids):
self.__noIndivids = noIndivids
self.__individs = []
for i in range(noIndivids):
self.__individs.append(Individ())
def evaluate(self, problem):
for i in range(self.__noIndivids // 2 - 1):
offspring1, offspring2 = Individ.crossover(self.__individs[i], self.__individs[i + 1], 0.5)
self.__individs.append(offspring1)
self.__individs.append(offspring2)
offspring1, offspring2 = Individ.crossover(self.__individs[self.__noIndivids // 2 - 1], self.__individs[0], 0.5)
self.__individs.append(offspring1)
self.__individs.append(offspring2)
for i in range(self.__noIndivids):
self.__individs[i].mutate(0.08)
self.__individs.sort(key = lambda x : x.fitness(problem), reverse = True)
def getBest(self):
return self.__individs[0]
def getPopSize(self):
return len(self.__individs)
def selection(self, problem):
s = 0
newPopulation = []
for i in range(self.__noIndivids):
s += self.__individs[i].fitness(problem)
for i in range(self.__noIndivids // 2):
r = random.random()
j = 0
percents = 0
while j < self.__noIndivids and percents < r:
percents += self.__individs[j].fitness(problem) / s
j += 1
j -= 1
newPopulation.append(self.__individs[j])
s -= self.__individs[j].fitness(problem)
self.__individs.pop(j)
self.__individs = newPopulation
def __str__(self):
s = ""
for i in self.__individs:
s += str(i) + "\n"
return s
class Problem(object):
"""docstring for Problem"""
def __init__(self):
self.inData = []
self.outData = []
def loadData(self, fileName):
with open(fileName, "r") as f:
while True:
line = f.readline()
if (line == ""):
break
line = line.split(",")
crtIn = []
for i in range(1, 10):
crtIn.append(float(line[i].strip()))
self.outData.append(float(line[10].strip()))
self.inData.append(crtIn)
if __name__ == "__main__":
p = Problem()
a = Algorithm(p, 40)
a.run()
| ggaaggaabbii/University-work | ai/lab6_2.py | lab6_2.py | py | 5,158 | python | en | code | 0 | github-code | 50 |
38534453006 | import torch
from torch_geometric.nn import knn_graph, knn, CGConv
class GNNAttention(torch.nn.Module):
'''Uses 2 graph layers. One for self attention and one for cross attention. Self-attention based on k-NN of coordinates. Cross-attention based on k-NN in feature space'''
def __init__(self, dim, k):
'''dim is the feature dimensions, k is the number of neighbours to consider'''
super().__init__()
self.k = k
self.conv1 = CGConv(dim, aggr='max', batch_norm=True).cuda()
self.conv2 = CGConv(dim, aggr='max', batch_norm=True).cuda()
def forward(self, xyz0, xyz1, f0, f1):
b, npts, d = f0.shape
batch_idx = torch.arange(b).repeat_interleave(npts).to(xyz0.device)
f0 = f0.reshape(-1, d)
f1 = f1.reshape(-1, d)
#creates edge graph for coordinates
edge_idx_c0 = knn_graph(xyz0.reshape(-1,3), k=self.k, batch=batch_idx)
edge_idx_c1 = knn_graph(xyz1.reshape(-1,3), k=self.k, batch=batch_idx)
#self-attention (layer 1)
f0 = self.conv1(f0, edge_idx_c0)
f1 = self.conv1(f1, edge_idx_c1)
#cross-attention (layer 2)
edge_idx_f = knn(f1, f0, k=self.k, batch_x=batch_idx, batch_y=batch_idx, cosine=True)
edge_idx_f[1] += b * npts
f = self.conv2(torch.cat([f0,f1], dim=0), edge_idx_f)
f0, f1 = f[:(b*npts)], f[(b*npts):]
#convert f0, f1 to dense representation again
f0 = f0.reshape(b, npts, d)
f1 = f1.reshape(b, npts, d)
return f0, f1 | eduardohenriquearnold/fastreg | lib/models/attention.py | attention.py | py | 1,537 | python | en | code | 52 | github-code | 50 |
14370519259 | import pygame
import math
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
f = open("dialog.txt", "r")
rawText = f.read().split("\n")
f.close()
msg = rawText[0]
options = rawText[1:]
def finish(s):
pygame.quit()
f = open("dialog.txt", "w")
f.write(s)
f.close()
exit()
pygame.font.init()
FONT = pygame.font.Font(pygame.font.get_default_font(), 30)
msgRendered = FONT.render(msg, True, BLACK)
msgWidth = msgRendered.get_width()
msgHeight = msgRendered.get_height()
SCREENSIZE = [msgWidth + 100, msgHeight + 50 + msgHeight]
screen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE)
# Loop
running = True
c = pygame.time.Clock()
option_width = (SCREENSIZE[0] - ((len(options) - 1) * 5)) / len(options)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.VIDEORESIZE:
SCREENSIZE = [*event.dict["size"]]
screen = pygame.display.set_mode(SCREENSIZE, pygame.RESIZABLE)
elif event.type == pygame.MOUSEBUTTONUP:
if pygame.mouse.get_pos()[1] < (SCREENSIZE[1] - msgHeight): continue
pos = pygame.mouse.get_pos()[0]
pos /= option_width
pos = math.floor(pos)
finish(options[pos])
# Message
screen.fill(WHITE)
screen.blit(msgRendered, ((SCREENSIZE[0] - msgWidth) / 2, ((SCREENSIZE[1] - msgHeight) - msgHeight) / 2))
# Options
option_width = SCREENSIZE[0] / len(options)
cum_x = 0
pygame.draw.rect(screen, BLACK, pygame.Rect(0, SCREENSIZE[1] - msgHeight, SCREENSIZE[0], msgHeight))
for o in options:
oRendered = FONT.render(o, True, WHITE)
screen.blit(oRendered, (cum_x + ((option_width - oRendered.get_width()) / 2), SCREENSIZE[1] - msgHeight))
cum_x += option_width
pygame.draw.line(screen, WHITE, (cum_x, SCREENSIZE[1] - msgHeight), (cum_x, SCREENSIZE[1]), 5)
# Flip
pygame.display.flip()
c.tick(60)
# End
finish("") | sillypantscoder/pygame_zip | dialog/dialog.py | dialog.py | py | 1,907 | python | en | code | 0 | github-code | 50 |
16558290798 | import io
import re
import time
from collections import defaultdict
import requests
import requests_cache
from imicrobe.util import grouper
requests_cache.install_cache('kegg_api_cache')
def get_kegg_annotations(kegg_ids):
all_kegg_annotations = {}
all_bad_kegg_ids = set()
# the missing_accessions_groups_of_10 generator returns groups of 10 KEGG ids
# that are not already in the database and that are not 'bad' KEGG ids
# the last group will be padded with 'None' if there are fewer than 10 KEGG ids
for group_of_10 in grouper(sorted(kegg_ids), n=10):
t0 = time.time()
kegg_id_list = [k for k in group_of_10 if k is not None]
#print(kegg_id_list)
print('requesting {} KEGG annotation(s)'.format(len(kegg_id_list)))
kegg_annotations, bad_kegg_ids = get_10_kegg_annotations(kegg_id_list)
print(' received {} in {:5.2f}s'.format(len(kegg_annotations), time.time()-t0))
all_kegg_annotations.update(kegg_annotations)
all_bad_kegg_ids.update(bad_kegg_ids)
return all_kegg_annotations, all_bad_kegg_ids
kegg_orthology_field_re = re.compile(r'^(?P<field_name>[A-Z]+)?(\s+)(?P<field_value>.+)$')
def get_10_kegg_annotations(kegg_ids):
""" Request annotations for up to 10 KEGG ids. If a bad id is given there will be no response for it.
The response from the KEGG API looks like this:
ENTRY K01467 KO
NAME ampC
DEFINITION beta-lactamase class C [EC:3.5.2.6]
PATHWAY ko01501 beta-Lactam resistance
ko02020 Two-component system
MODULE M00628 beta-Lactam resistance, AmpC system
...
ENTRY K00154 KO
NAME E1.2.1.68
DEFINITION coniferyl-aldehyde dehydrogenase [EC:1.2.1.68]
BRITE Enzymes [BR:ko01000]
1. Oxidoreductases
1.2 Acting on the aldehyde or oxo group of donors
1.2.1 With NAD+ or NADP+ as acceptor
1.2.1.68 coniferyl-aldehyde dehydrogenase
K00154 E1.2.1.68; coniferyl-aldehyde dehydrogenase
DBLINKS COG: COG1012
GO: 0050269
GENES GQU: AWC35_21175
CED: LH89_09310 LH89_19560
SMW: SMWW4_v1c32370
SMAR: SM39_2711
SMAC: SMDB11_2482
...
return: a dictionary of dictionaries that looks like this
{
'K01467': {
'ENTRY': 'K01467 KO',
'NAME': 'ampC',
'DEFINITION': '',
'PATHWAY': '',
'MODULE': '',
...
},
'K00154': {
'ENTRY': 'K00154 KO',
'NAME': 'E1.2.1.68',
'DEFINITION': '',
'PATHWAY': '',
'MODULE': '',
...
}
}
and a (possibly empty) set of KEGG ids for which no annotation was returned
"""
debug = False
ko_id_list = '+'.join(['ko:{}'.format(k) for k in kegg_ids])
response = requests.get('http://rest.kegg.jp/get/{}'.format(ko_id_list))
if response.status_code == 404:
print('no annotations returned')
all_entries = {}
bad_kegg_ids = set(kegg_ids)
return all_entries, bad_kegg_ids
if response.status_code != 200:
error_msg = 'ERROR: response to "{}" is {}'.format(response.url, response.status_code)
print(error_msg)
raise Exception(error_msg)
else:
all_entries = defaultdict(lambda: defaultdict(list))
kegg_id = None
field_name = None
for line in io.StringIO(response.text).readlines():
field_match = kegg_orthology_field_re.search(line.rstrip())
if field_match is None:
# this line separates entries
kegg_id = None
field_name = None
else:
field_value = field_match.group('field_value')
if 'field_name' in field_match.groupdict():
field_name = field_match.group('field_name')
if field_name == 'ENTRY':
kegg_id, *_ = field_value.split(' ')
# print('KEGG id: "{}"'.format(kegg_id))
else:
# just a field value is present
pass
all_entries[kegg_id][field_name].append(field_value)
# were any of the KEGG ids bad?
bad_kegg_ids = {k for k in kegg_ids} - {k for k in all_entries.keys()}
return all_entries, bad_kegg_ids | hurwitzlab/imicrobe-data-loaders | imicrobe/util/kegg.py | kegg.py | py | 4,889 | python | en | code | 0 | github-code | 50 |
70409103517 | import os, random
import requests
from bs4 import BeautifulSoup
import NBA as nba
class ziz() :
def hello(self):
print("---- Hello my name Ziz ----")
def NBA(self, args):
if args[0] == 'games':
return self.stringfy(nba.getGames())
def getGames(self):
url = 'https://reddit.nbabite.com/'
page = requests.get(url)
soup = BeautifulSoup(page.content,'html.parser')
# table_MN = pd.read_html(page)
competitions = soup.find(id='competitions')
heure_matchs = soup.find_all("div", {"class": "status"})
team_names = soup.find_all("div", {"class": "team-name"})
date = soup.find_all('div', {"class":"date d-sm-block d-none"})[0].text
# print(matches)
match = {}
text = date + "\n"
for i, heure in enumerate(heure_matchs):
s = heure.text + " : " +team_names[i+1].text + " @ " + team_names[i].text
text += '\n' + s
return self.stringfy(text)
def stringfy(self, text):
s = "```text\n"
s += text +'\n'
s += "```"
return s
| ahandan/discord_bot | bot/zizBot.py | zizBot.py | py | 1,163 | python | en | code | 0 | github-code | 50 |
7764637836 | import socket
from OpenSSL import SSL
import certifi
import datetime
hostname = 'services.bq.com'
port = 443
now = datetime.datetime.now()
context = SSL.Context(method=SSL.TLSv1_METHOD)
context.load_verify_locations(cafile=certifi.where())
conn = SSL.Connection(context, socket=socket.socket(socket.AF_INET, socket.SOCK_STREAM))
conn.settimeout(5)
conn.connect((hostname, port))
conn.setblocking(1)
conn.do_handshake()
conn.set_tlsext_host_name(hostname.encode())
certs = conn.get_peer_cert_chain()
for (idx, cert) in enumerate(certs):
formated_date_after = datetime.datetime.strptime(cert.get_notAfter().decode('ascii'), '%Y%m%d%H%M%SZ')
formated_date_before = datetime.datetime.strptime(cert.get_notBefore().decode('ascii'), '%Y%m%d%H%M%SZ')
print(f'{idx} subject: {cert.get_subject()}')
print(f' issuer: {cert.get_issuer()})')
print(f' Valido-Desde :' , formated_date_before)
print(f' Valido-Hasta :' , formated_date_after)
print( ' --> Expira en : ' , formated_date_after - now)
#if (now - formated_date_after) > now:
# print("ERRROR Expired")
#print(f' fingerprint: {cert.digest("sha1")}')
#print('----',formated_date_after)
print()
conn.close()
| dgardella/pys | check_cert.py | check_cert.py | py | 1,215 | python | en | code | 0 | github-code | 50 |
4635654089 | __author__ = "Younes Bouhadjar, Vincent Marois, Tomasz Kornuta"
import torch
import numpy as np
from miprometheus.problems.seq_to_seq.algorithmic.algorithmic_seq_to_seq_problem import AlgorithmicSeqToSeqProblem
class ScratchPadCommandLines(AlgorithmicSeqToSeqProblem):
"""
Class generating sequences of random bit-patterns and targets forcing the
system to learn the scratch pad problem (overwriting the memory).
Minor modification I: the target contains may contain random command lines.
"""
def __init__(self, params):
"""
Constructor - stores parameters. Calls parent class ``AlgorithmicSeqToSeqProblem``\
initialization.
:param params: Dictionary of parameters (read from configuration ``.yaml`` file).
"""
# Set default number of bits for a given problem.
# This has to be done before calling base class constructor!
params.add_default_params({
'control_bits': 2,
'data_bits': 8 }) # Call parent constructor - sets e.g. the loss function, dtype.
# Additionally it extracts "standard" list of parameters for
# algorithmic tasks, like batch_size, numbers of bits, sequences etc.
super(ScratchPadCommandLines, self).__init__(params)
self.name = 'ScratchPadCommandLines'
assert self.control_bits >= 2, "Problem requires at least 2 control bits (currently %r)" % self.control_bits
assert self.data_bits >= 1, "Problem requires at least 1 data bit (currently %r)" % self.data_bits
# Number of subsequences.
self.num_subseq_min = params["num_subseq_min"]
self.num_subseq_max = params["num_subseq_max"]
def generate_batch(self, batch_size):
"""
Generates a batch of samples of size ''batch_size'' on-the-fly.
.. note::
The sequence length is drawn randomly between ``self.min_sequence_length`` and \
``self.max_sequence_length``.
.. warning::
All the samples within the batch will have the same sequence lengt.
:param batch_size: Size of the batch to be returned.
:return: DataDict({'sequences', 'sequences_length', 'targets', 'masks', 'num_subsequences'}), with:
- sequences: [BATCH_SIZE, SEQ_LENGTH, CONTROL_BITS+DATA_BITS],
- sequences_length: [BATCH_SIZE] (random value between self.min_sequence_length and self.max_sequence_length)
- targets: [BATCH_SIZE, SEQ_LENGTH, DATA_BITS],
- masks: [BATCH_SIZE, SEQ_LENGTH, 1]
- num_subsequences: [BATCH_SIZE, 1] (number of subsequences)
"""
# Store marker.
ctrl_store = np.zeros(self.control_bits)
ctrl_store[self.store_bit] = 1 # [1, 0, 0]
# Recall marker.
ctrl_recall = np.zeros(self.control_bits)
ctrl_recall[self.recall_bit] = 1 # [0, 1, 0]
# Empty data marker.
ctrl_data = np.zeros(self.control_bits) # [0, 0]
# Define control lines.
ctrl_aux = np.zeros(self.control_bits)
if self.use_control_lines:
if self.control_bits >= 3:
if self.randomize_control_lines:
# Randomly pick one of the bits to be set.
ctrl_bit = np.random.randint(2, self.control_bits)
ctrl_aux[ctrl_bit] = 1
else:
# Set last.
ctrl_aux[self.control_bits - 1] = 1
# Else: no control lines!
# assign markers
markers = ctrl_data, ctrl_store, ctrl_data
# number sub sequences
num_sub_seq = np.random.randint(self.num_subseq_min, self.num_subseq_max + 1)
# set the sequence length of each marker
seq_lengths = np.random.randint(low=self.min_sequence_length, high=self.max_sequence_length + 1,
size=num_sub_seq)
# generate subsequences for x and y
x = [np.random.binomial(1, self.bias, (batch_size, n, self.data_bits)) for n in seq_lengths]
# create the target
seq_length_tdummies = sum(seq_lengths) + seq_lengths.shape[0] + 1
dummies_target = np.zeros([batch_size, seq_length_tdummies, self.data_bits], dtype=np.float32)
targets = np.concatenate((dummies_target, x[-1]), axis=1)
# data of x and dummies
xx = [self.augment(seq, markers, ctrl_start=ctrl_store,
add_marker_data=True,
add_marker_dummy=False) for seq in x]
# data of x
data_1 = [arr for a in xx for arr in a[:-1]]
# this is a marker between sub sequence x and dummies
inter_seq = self.add_ctrl(np.zeros((batch_size, 1, self.data_bits)), ctrl_recall, ctrl_data)
# dummies of x
data_2 = [xx[-1][-1]]
# concatenate all parts of the inputs
inputs = np.concatenate(data_1 + [inter_seq] + data_2, axis=1)
# Set control lines for recall items.
inputs[:, inputs.shape[1]-seq_lengths[-1]:,0:self.control_bits] = np.tile(
ctrl_aux,(batch_size,seq_lengths[-1],1))
# Generate 3D ByteTensor for mask.
ptmasks = torch.zeros([batch_size, inputs.shape[1], 1]).type(torch.ByteTensor)
ptmasks[:, inputs.shape[1]-seq_lengths[-1]:, 0] = 1
# Return data_dict.
data_dict = self.create_data_dict()
data_dict['sequences'] = torch.from_numpy(inputs).type(self.app_state.dtype)
data_dict['targets'] = torch.from_numpy(targets).type(self.app_state.dtype)
data_dict['masks'] = ptmasks
data_dict['sequences_length'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * max(seq_lengths).item()
data_dict['num_subsequences'] = torch.ones([batch_size, 1]).type(torch.CharTensor) * num_sub_seq
return data_dict
if __name__ == "__main__":
""" Tests sequence generator - generates and displays a random sample"""
# "Loaded parameters".
from miprometheus.utils.param_interface import ParamInterface
params = ParamInterface()
params.add_config_params({#'control_bits': 4,
#'data_bits': 8,
'min_sequence_length': 1,
'max_sequence_length': 10,
'num_subseq_min': 2,
'num_subseq_max': 4})
batch_size = 10
# Create problem object.
scratchpad = ScratchPad(params)
# get a sample
sample = scratchpad[0]
print(repr(sample))
print('__getitem__ works.')
# wrap DataLoader on top
from torch.utils.data import DataLoader
problem = DataLoader(dataset=scratchpad, batch_size=batch_size, collate_fn=scratchpad.collate_fn,
shuffle=False, num_workers=0)
# generate a batch
import time
s = time.time()
for i, batch in enumerate(problem):
#print('Batch # {} - {}'.format(i, type(batch)))
pass
print('Number of workers: {}'.format(problem.num_workers))
print('time taken to exhaust a dataset of size {}, with a batch size of {}: {}s'
.format(scratchpad.__len__(), batch_size, time.time() - s))
# Display single sample (0) from batch.
batch = next(iter(problem))
scratchpad.show_sample(batch, 0)
print('Unit test completed.')
| vincentalbouy/mi-prometheus | miprometheus/problems/seq_to_seq/algorithmic/recall/scratch_pad_cl.py | scratch_pad_cl.py | py | 7,377 | python | en | code | 0 | github-code | 50 |
224890675 | '''
The purpose of the python code is as follows:
1) To load the trained classifier model to classify different hand signs
2) To capture the frames taken from users camera
3) Take the landmarks from the users hand
4) Load the landmark data into the model
5) Get the prediction from the model and print it in the frame
'''
import pickle
import cv2
import mediapipe as mp
import numpy as np
# Load the savel classifier model
model_dict = pickle.load(open('./model2.p', 'rb'))
model = model_dict['model']
cap = cv2.VideoCapture(0)
# Get the hands part from the picture using mediapipe
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
hands = mp_hands.Hands(static_image_mode=True, min_detection_confidence=0.3)
# Kae the alphabet labels
labels_dict = {}
for i in range(26):
labels_dict[i] = chr(65+i)
while True:
data_aux = []
x_ = []
y_ = []
ret, frame = cap.read()
H, W, _ = frame.shape
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#Capture the landmarks from the hand region
results = hands.process(frame_rgb)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
frame, # image to draw
hand_landmarks, # model output
mp_hands.HAND_CONNECTIONS, # hand connections
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
for hand_landmarks in results.multi_hand_landmarks:
for i in range(len(hand_landmarks.landmark)):
x = hand_landmarks.landmark[i].x
y = hand_landmarks.landmark[i].y
x_.append(x)
y_.append(y)
for i in range(len(hand_landmarks.landmark)):
x = hand_landmarks.landmark[i].x
y = hand_landmarks.landmark[i].y
data_aux.append(x - min(x_))
data_aux.append(y - min(y_))
x1 = int(min(x_) * W) - 10
y1 = int(min(y_) * H) - 10
x2 = int(max(x_) * W) - 10
y2 = int(max(y_) * H) - 10
# Get the prediction of model after loading the hand landmark data
prediction = model.predict([np.asarray(data_aux)])
# Get the character predicted
predicted_character = labels_dict[int(prediction[0])]
# Plot a rectangle on the hand region and inscribe text above this hand region
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 0), 4)
cv2.putText(frame, predicted_character, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.3, (0, 0, 0), 3,
cv2.LINE_AA)
# Show the frame
cv2.imshow('frame', frame)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
| RexSan0x/Sign-Language-and-Emotion-Detection | Sign_Language_Training/inferece_sign_lang.py | inferece_sign_lang.py | py | 2,902 | python | en | code | 0 | github-code | 50 |
191147204 | import sqlite3
# connecting to db
con = sqlite3.connect('technical_test.db')
cur = con.cursor()
# printing each row in the db
for row in cur.execute('select * from famous_people;'):
print(row)
print('')
# closing connection to db
con.close() | MickyCompanie/technical_test_sneldev | query_db.py | query_db.py | py | 252 | python | en | code | 0 | github-code | 50 |
22593983762 | #-*- coding: utf-8 -*-
from cadproj.models import OrientadorOuMediador, Projeto, Curso, TipoDeProjeto, ModoDeApresentacao, Cidade, Recurso, Calouro, Turma
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class EstudanteOptions(admin.ModelAdmin):
list_display = ('nome','matricula')
#date_hierarchy = 'data_e_hora'
class OrientadorOptions(admin.ModelAdmin):
list_display = ('nome',)
#class CursoOptions(admin.ModelAdmin):
#list_display = ('nome',)
class ProjetoOptions(admin.ModelAdmin):
#inlines = [Pendencia_Inline, Contatamento_Inline]
list_display = ('estudante', 'titulo', 'orientador_ou_mediador')
fieldsets = (
(None, {
'fields': ('estudante','matricula','titulo','descricao')
}),
('Outros Componentes da Equipe', {
'fields': ('estudante2','estudante3','outros_componentes')
}),
('Curso', {
'fields': ('curso','turma')
}),
('Contato', {
'fields': ('cidade_onde_mora','fone','email')
}),
('Projeto', {
'fields': ('orientador_ou_mediador','colaborador','tipo_de_projeto','outro_tipo_de_projeto','palavra_chave1','palavra_chave2','palavra_chave3','cidade_de_abrangencia','local_e_ou_instituicao_de_abrangencia')
}),
('Apresentação', {'fields':('modo_de_apresentacao','outro_modo','recursos_para_a_apresentacao')
}),
)
list_per_page = 25
search_fields = ['estudante', 'titulo', 'descricao', 'matricula', 'fone']
list_filter = ('orientador_ou_mediador','curso','tipo_de_projeto','cidade_de_abrangencia')
admin.site.register(Curso)
admin.site.register(Projeto,ProjetoOptions)
admin.site.register(TipoDeProjeto)
admin.site.register(ModoDeApresentacao)
admin.site.register(OrientadorOuMediador)
admin.site.register(Cidade)
admin.site.register(Recurso)
admin.site.register(Calouro)
admin.site.register(Turma)
| jamur/Mostra-de-Projetos | cadproj/admin.py | admin.py | py | 1,960 | python | pt | code | 1 | github-code | 50 |
25257312328 | from __future__ import annotations
import typing
from flupy import flu
from nebulo.config import Config
from nebulo.gql.alias import FunctionPayloadType, MutationPayloadType, ObjectType, ResolveInfo, ScalarType
from nebulo.gql.parse_info import parse_resolve_info
from nebulo.gql.relay.node_interface import NodeIdStructure, to_node_id_sql
from nebulo.gql.resolve.resolvers.claims import build_claims
from nebulo.gql.resolve.transpile.mutation_builder import build_mutation
from nebulo.gql.resolve.transpile.query_builder import sql_builder, sql_finalize
from nebulo.sql.table_base import TableProtocol
from sqlalchemy import literal_column, select
async def async_resolver(_, info: ResolveInfo, **kwargs) -> typing.Any:
"""Awaitable GraphQL Entrypoint resolver
Expects:
info.context['engine'] to contain an sqlalchemy.ext.asyncio.AsyncEngine
"""
context = info.context
engine = context["engine"]
default_role = context["default_role"]
jwt_claims = context["jwt_claims"]
tree = parse_resolve_info(info)
async with engine.begin() as trans:
# Set claims for transaction
if jwt_claims or default_role:
claims_stmt = build_claims(jwt_claims, default_role)
await trans.execute(claims_stmt)
result: typing.Dict[str, typing.Any]
if isinstance(tree.return_type, FunctionPayloadType):
sql_function = tree.return_type.sql_function
function_args = [val for key, val in tree.args["input"].items() if key != "clientMutationId"]
func_call = sql_function.to_executable(function_args)
# Function returning table row
if isinstance(sql_function.return_sqla_type, TableProtocol):
# Unpack the table row to columns
return_sqla_model = sql_function.return_sqla_type
core_table = return_sqla_model.__table__
func_alias = func_call.alias("named_alias")
stmt = select([literal_column(c.name).label(c.name) for c in core_table.c]).select_from(func_alias) # type: ignore
stmt_alias = stmt.alias()
node_id_stmt = select([to_node_id_sql(return_sqla_model, stmt_alias).label("nodeId")]).select_from(stmt_alias) # type: ignore
((row,),) = await trans.execute(node_id_stmt)
node_id = NodeIdStructure.from_dict(row)
# Add nodeId to AST and query
query_tree = next(iter([x for x in tree.fields if x.name == "result"]), None)
if query_tree is not None:
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((stmt_result,),) = await trans.execute(query)
else:
stmt_result = {}
else:
stmt = select([func_call.label("result")])
(stmt_result,) = await trans.execute(stmt)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
result = {tree.alias: {**stmt_result, **{mutation_id_alias: maybe_mutation_id}}}
elif isinstance(tree.return_type, MutationPayloadType):
stmt = build_mutation(tree)
((row,),) = await trans.execute(stmt)
node_id = NodeIdStructure.from_dict(row)
maybe_mutation_id = tree.args["input"].get("clientMutationId")
mutation_id_alias = next(
iter([x.alias for x in tree.fields if x.name == "clientMutationId"]),
"clientMutationId",
)
node_id_alias = next(iter([x.alias for x in tree.fields if x.name == "nodeId"]), "nodeId")
output_row_name: str = Config.table_name_mapper(tree.return_type.sqla_model)
query_tree = next(iter([x for x in tree.fields if x.name == output_row_name]), None)
sql_result = {}
if query_tree:
# Set the nodeid of the newly created record as an arg
query_tree.args["nodeId"] = node_id
base_query = sql_builder(query_tree)
query = sql_finalize(query_tree.alias, base_query)
((sql_result,),) = await trans.execute(query)
result = {
tree.alias: {**sql_result, mutation_id_alias: maybe_mutation_id},
mutation_id_alias: maybe_mutation_id,
node_id_alias: node_id,
}
elif isinstance(tree.return_type, (ObjectType, ScalarType)):
base_query = sql_builder(tree)
query = sql_finalize(tree.name, base_query)
((query_json_result,),) = await trans.execute(query)
if isinstance(tree.return_type, ScalarType):
# If its a scalar, unwrap the top level name
result = flu(query_json_result.values()).first(None)
else:
result = query_json_result
else:
raise Exception("sql builder could not handle return type")
# Stash result on context to enable dumb resolvers to not fail
context["result"] = result
return result
| olirice/nebulo | src/nebulo/gql/resolve/resolvers/asynchronous.py | asynchronous.py | py | 5,380 | python | en | code | 90 | github-code | 50 |
26212162918 | from langchain.agents import Tool
from htmlTemplates import css, bot_template, user_template, disclaimer_text, box_template, user_img, bot_img
from typing import List
from langchain.agents import Tool
from streamlit.components.v1 import html
from agentFunctions import simple_report_search, report_summarizer, one_person_search, tearm_search
def create_tools():
# define usable Tools for the Agent
tools = [
Tool(
name = "TermSearch",
func=tearm_search,
description="use this tool if you are not sure about a term. Input the term"
),
Tool(
name = "SimpleReportSearch",
func=simple_report_search,
description="useful if you think that you need just a little information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space"
),
Tool(
name = "ReportSummarizer",
func = report_summarizer,
description="useful if you think that you need a lot information from the report to answer the User Question. Input a question what information you need and keywords, Suitable for a keywords-based search in a vector space"
),
Tool(
name = "OnePersonSearch",
func= one_person_search,
description="useful if you think that you need personal information about a persons in the MPI to answer the User Question. Input a question with the name of the person you search for, Suitable for a keyword-based search in a vector space"
)
]
return tools
| kpister/prompt-linter | data/scraping/repos/HannesDiemerling~MinervasArchive/agentTools.py | agentTools.py | py | 1,654 | python | en | code | 0 | github-code | 50 |
43709154819 | #recommended way
admin_dict = {'1':'scie/065p','2':'scii/890p'}
#getting value for a key using [] brackets
print(admin_dict['1'])
#not recommended if key is an integer
dict_func = dict(one='1',two='2')
#change value
admin_dict['1'] = 'steve/07'
print(admin_dict['1'])
#adding key value dictionary from one dict to another
admin_name = {'name':'steve','phone':'0756949393'}
admin_name.update(admin_dict)
print(admin_name) | steve-ryan/python-tutorial-for-beginners | dictionary.py | dictionary.py | py | 424 | python | en | code | 0 | github-code | 50 |
17060406313 | import json
import openpyxl
from case_study.models import Question
from core.decorators import staff_required
from django.db import IntegrityError
from django.http import JsonResponse
from django.shortcuts import render
from .common import populate_data, delete_model, patch_model
from ..forms import QuestionImportForm
schema_question = {
"endpoint": "/caseadmin/questions/",
"fields": [
{
"title": "Question",
"key": "body",
"widget": {
"template": "w-text.html",
},
"write": True,
},
]
}
def render_question_view(request, message=None, message_type=None):
data = populate_data(schema_question, Question.objects.all())
c = {
"title": "Question Admin",
"model_name": "Question",
"toolbar_new": True,
"toolbar_import": True,
"data": data,
"import_form": QuestionImportForm(),
"import_endpoint": "/caseadmin/questions/import",
"schema": schema_question,
"admin_message": message,
"admin_message_type": message_type,
"hard_delete_only": True,
}
return render(request, "case-admin.html", c)
@staff_required
def api_admin_question(request, question_id):
if request.method == "PATCH":
return patch_model(request, Question, schema_question, question_id)
elif request.method == "DELETE":
return delete_model(request, Question, question_id)
else:
return JsonResponse({
"success": False,
"message": "Unsupported HTTP method: " + request.method,
})
def question_import_txt(request, file, file_format):
if file.content_type != "text/plain":
return render_question_view(request,
"Failed to import questions as text/plain. "
"Please ensure your text file contains one question per line. ",
"alert-danger")
questions = []
for question in file.file.readlines():
q = question.decode("utf-8").strip()
questions.append(Question(body=q))
try:
Question.objects.bulk_create(questions, ignore_conflicts=True)
return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success")
except IntegrityError as e:
return render_question_view(request,
"Failed to import questions as text/plain. "
"Please ensure your text file contains one question per line. "
"Error: " + str(e.args[0]),
"alert-danger")
def question_import_csv(request, file, file_format):
if file.content_type != "text/csv":
return render_question_view(request,
"Failed to import questions as text/csv. "
"Please ensure your csv file contains one question per line. ",
"alert-danger")
questions = []
lines = file.read().decode("utf-8").split("\n")
for line in lines:
q = line.strip()
questions.append(Question(body=q))
try:
Question.objects.bulk_create(questions, ignore_conflicts=True)
return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success")
except IntegrityError as e:
return render_question_view(request,
"Failed to import questions as text/csv. "
"Please ensure your csv file contains one question per line. "
"Error: " + str(e.args[0]),
"alert-danger")
def question_import_json(request, file, file_format):
if file.content_type != "application/json":
return render_question_view(request,
"Failed to import questions as application/json. "
"Please ensure your json file contains a list of strings. ",
"alert-danger")
questions = []
file_text = file.read().decode("utf-8")
file_json = json.loads(file_text)
for question in file_json:
q = question.strip()
questions.append(Question(body=q))
try:
Question.objects.bulk_create(questions, ignore_conflicts=True)
return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success")
except IntegrityError as e:
return render_question_view(request,
"Failed to import questions as application/json. "
"Please ensure your json file contains a list of strings. "
"Error: " + str(e.args[0]),
"alert-danger")
def question_import_xlsx(request, file, file_format):
if not (str(file.content_type) == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xlsx')):
return render_question_view(request,
"Failed to import questions as xlsx. "
"Please ensure column A has a single question per cell. ",
"alert-danger")
questions = []
wb = openpyxl.load_workbook(file)
sheet = wb.worksheets[0]
for col in sheet.iter_cols():
for cel in col:
q = str(cel.value).strip()
questions.append(Question(body=q))
try:
Question.objects.bulk_create(questions, ignore_conflicts=True)
return render_question_view(request, "Successfully imported {} questions.".format(len(questions)), "alert-success")
except IntegrityError as e:
return render_question_view(request,
"Failed to import questions as xlsx. "
"Please ensure column A has a single question per cell. "
"Error: " + str(e.args[0]),
"alert-danger")
@staff_required
def api_admin_question_import(request):
if request.method == "POST":
form = QuestionImportForm(request.POST)
file = request.FILES["file"]
file_format = str(form["file_format"].value())
if file_format == "auto":
if file.content_type == "text/csv":
file_format = "csv"
elif file.content_type == "application/json":
file_format = "json"
elif file.content_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xlsx'):
file_format = "xlsx"
elif file.content_type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" or file.name.endswith('.xls'):
file_format = "xls"
elif file.content_type == "text/plain":
file_format = "txt"
if file_format == "csv":
return question_import_csv(request, file, file_format)
elif file_format == "json":
return question_import_json(request, file, file_format)
elif file_format == "xlsx":
return question_import_xlsx(request, file, file_format)
elif file_format == "xls":
return question_import_xlsx(request, file, file_format)
elif file_format == "txt":
return question_import_txt(request, file, file_format)
else:
return render_question_view(request,
"Unknown file format: {}".format(str(file_format)),
"alert-danger")
else:
return JsonResponse({
"success": False,
"message": "Unsupported method: " + request.method,
})
@staff_required
def view_admin_question(request):
if request.method == "GET":
return render_question_view(request)
elif request.method == "POST":
try:
body = json.loads(request.body)
Question.objects.create(body=body["body"])
return JsonResponse({
"success": True,
"message": "Question created",
})
except Exception as e:
return JsonResponse({
"success": False,
"message": "Failed to create a question:\n" + str(e.args[0]),
})
| 320011/case | core/case_admin/views/question.py | question.py | py | 8,610 | python | en | code | 1 | github-code | 50 |
41390824167 | import os
import json
import sqlite3
import requests
db_stored = os.path.join(os.path.dirname(__file__), 'qaset.db') # r'D:\Archive\Voibot\qabot\data\qabot\data\qaset.db'
url = 'http://10.1.163.22:5000/encode'
headers = {'Content-Type': 'application/json'}
def generate_all_features(db_stored, begin_id, end_id):
conn = sqlite3.connect(db_stored)
cursor = conn.cursor()
ques_cursor = cursor.execute('select question from qaset where id between ? and ?', (begin_id, end_id))
questions = []
for ques in ques_cursor:
questions.append(ques[0])
data = {
'id': 123,
'texts': questions
}
r = requests.post(url=url, headers=headers, data=json.dumps(data))
result = json.loads(r.text)
qvectors = result['result']
current_id = begin_id
while current_id <= end_id:
cursor.execute('update qaset set feature = ? where id = ?', (json.dumps(qvectors[current_id - begin_id]), current_id))
current_id += 1
conn.commit()
conn.close()
if __name__ == "__main__":
# begin_id = 1
# while(begin_id <= 36800):
# end_id = begin_id + 99
# generate_all_features(db_stored, begin_id, end_id)
# print('%d to %d is done.' % (begin_id, end_id))
# begin_id = end_id + 1
begin_id = 36811
end_id = 36843
generate_all_features(db_stored, begin_id, end_id)
| yaohsinyu/voibot | qabot/data/generate_all_feature.py | generate_all_feature.py | py | 1,382 | python | en | code | 0 | github-code | 50 |
4149350510 | """empty message
Revision ID: 96089780dc64
Revises: 45811f048651
Create Date: 2022-07-14 08:54:14.167701
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '96089780dc64'
down_revision = '45811f048651'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('rounds', sa.Column('funny_count', sa.SmallInteger(), nullable=True))
op.add_column('rounds', sa.Column('deeep_count', sa.SmallInteger(), nullable=True))
op.execute("UPDATE rounds SET funny_count = 0 WHERE true")
op.execute("UPDATE rounds SET deeep_count = 0 WHERE true")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('rounds', 'deeep_count')
op.drop_column('rounds', 'funny_count')
# ### end Alembic commands ###
| pamelafox/translation-telephone | migrations/versions/96089780dc64_.py | 96089780dc64_.py | py | 924 | python | en | code | 16 | github-code | 50 |
23275279979 | from rest_framework import mixins, status
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from api.models import UploadImage, UploadRequest
from api.serializers import UploadSerializer
from api.serializers.image_serializer import ImageSerializer
class UploadViewSet(mixins.CreateModelMixin, GenericViewSet):
queryset = UploadRequest.objects.all()
serializer_class = UploadSerializer
def create(self, request, *args, **kwargs):
serializer_data = {}
processed_images = []
request_data = request.data
images_to_process = 'images[]' in request_data and \
request.FILES.getlist('images[]') or request_data.get('images',[])
for image_to_process in images_to_process:
transformed_image = UploadViewSet.process_image(image_to_process)
processed_images.append(transformed_image['id'])
serializer_data['images_id'] = bool(processed_images) and processed_images or None
serializer = self.get_serializer(data=serializer_data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@classmethod
def process_image(cls, image):
serializer_data = {}
image_to_process = image
serializer_data['source'] = isinstance(image_to_process, str) and \
UploadImage.ImageSource.Remote or UploadImage.ImageSource.Upload
serializer_data['source_path'] = ''
if serializer_data['source'] == UploadImage.ImageSource.Remote:
serializer_data['source_path'] = image_to_process
serializer_data['original_file'] = image_to_process
serializer_data['transformed_file'] = None
image_serializer = ImageSerializer(data=serializer_data)
image_serializer.is_valid(raise_exception=True)
image_serializer.save()
return image_serializer.data
| ongtzewei/django-image-manipulation-webapp | api/views/upload.py | upload.py | py | 2,050 | python | en | code | 0 | github-code | 50 |
17604501818 |
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice_prev and W. Do not add the bias yet.
s = a_slice_prev * W
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = np.float(np.add(b, Z))
### END CODE HERE ###
return Z
def convolve2d(image, kernel, padding=False, striding=1):
# This function which takes an image and a kernel
# and returns the convolution of them
# Args:
# image: a numpy array of size [image_height, image_width].
# kernel: a numpy array of size [kernel_height, kernel_width].
# Returns:
# a numpy array of size [image_height, image_width] (convolution output).
assert kernel.shape[0] == kernel.shape[1], "kernel must be square"
assert striding != 0, "striding cannot be zero"
# The kernel is flipped so that we are not performing a "correlation" operation
kernel = np.flipud(np.fliplr(kernel))
kernel_h = kernel.shape[0]
kernel_w = kernel.shape[1]
h = kernel_h // 2
w = kernel_w // 2
image_h = image.shape[0]
image_w = image.shape[1]
# if padding turned on (to fix border effect) then set for "same" padding
if padding:
pad = (kernel_h - 1) // 2
else:
pad = 0
new_height = int(((image_h + 2*pad - kernel_h) / striding) + 1)
new_width = int(((image_w + 2*pad - kernel_w) / striding) + 1)
image_out = np.zeros(new_height, new_width)
# Add padding to the input image
image_padded = np.pad(image, ((0,0), (pad, pad), (pad, pad), (0,0)), 'constant', constant_values = (0,0))
for x in range(h, image_h - h): # Loop over every pixel of the image
for y in range(w, image_w - w):
sum = 0
for m in range(kernel_h):
for n in range(kernel_w):
sum += kernel[m][n] * image_padded[x-h+m][y-w+n]
image_out[x,y] = sum
return image_out
img = misc.ascent()
plt.grid(False)
plt.gray()
plt.axis('off')
plt.imshow(img)
plt.show()
# This filter detects edges nicely
# It creates a convolution that only passes through sharp edges and straight
# lines.
#Experiment with different values for fun effects.
filter_edge = [[0, 1, 0], [1, -4, 1], [0, 1, 0]]
image_sharpen = convolve2d(img, filter_edge)
plt.imshow(image_sharpen, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
# A couple more filters to try for fun!
filter = [ [-1, -2, -1], [0, 0, 0], [1, 2, 1]]
filter = [ [0, 1, 1, 0], [1, 3, 3, 1], [-1, -3, -3, -1], [0, -1, -1, 0]]
weight = 1
#filter = [ [-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
# If all the digits in the filter don't add up to 0 or 1, you
# should probably do a weight to get it to do so
# so, for example, if your weights are 1,1,1 1,2,1 1,1,1
# They add up to 10, so you would set a weight of .1 if you want to normalize them
i_transformed = np.copy(i)
size_x = i_transformed.shape[0]
size_y = i_transformed.shape[1]
print(size_x, size_y)
weight = 1
for x in range(2,size_x-2):
for y in range(2,size_y-2):
convolution = 0.0
convolution = convolution + (i[x - 2, y-2] * filter[0][0])
convolution = convolution + (i[x - 1, y-2] * filter[0][1])
convolution = convolution + (i[x, y-2] * filter[0][2])
convolution = convolution + (i[x + 1, y-2] * filter[0][3])
convolution = convolution + (i[x-1, y] * filter[1][0])
convolution = convolution + (i[x, y] * filter[1][1])
convolution = convolution + (i[x+1, y] * filter[1][2])
convolution = convolution + (i[x + 1, y] * filter[1][3])
convolution = convolution + (i[x-1, y+1] * filter[2][0])
convolution = convolution + (i[x, y+1] * filter[2][1])
convolution = convolution + (i[x+1, y+1] * filter[2][2])
convolution = convolution + (i[x + 1, y + 1] * filter[2][3])
convolution = convolution + (i[x-1, y+1] * filter[3][0])
convolution = convolution + (i[x, y+1] * filter[3][1])
convolution = convolution + (i[x+1, y+1] * filter[3][2])
convolution = convolution + (i[x + 1, y + 1] * filter[3][3])
convolution = convolution * weight
if(convolution<0):
convolution=0
if(convolution>255):
convolution=255
i_transformed[x, y] = convolution
# Plot the image. Note the size of the axes -- they are 512 by 512
plt.gray()
plt.grid(False)
plt.imshow(i_transformed)
#plt.axis('off')
plt.show()
plt.imshow(image_sharpen, cmap=plt.cm.gray)
plt.axis('off')
plt.show() | sheldon-wall/DLSpecCourse4 | Week1.py | Week1.py | py | 5,168 | python | en | code | 0 | github-code | 50 |
33948199381 | import http.server
import socketserver
from .tools import HTTPTools
class Handler(http.server.SimpleHTTPRequestHandler):
""" Subclass of pex.proto.http module.
This subclass of pex.proto.http module represents
HTTP handler for web server.
"""
def log_request(self, fmt, *args) -> None:
pass
def send_status(self, code: int = 200) -> None:
self.send_response(int(code))
self.send_header("Content-type", "text/html")
self.end_headers()
class HTTPListener(object):
""" Subclass of pex.proto.http module.
This subclass of pex.proto.http module represents Python
implementation of HTTP listener.
"""
def __init__(self, host: str, port: int, methods: dict = {}) -> None:
""" Start HTTP listener on socket pair.
:param str host: host to listen
:param int port: port to listen
:param dict methods: methods, method names as keys and
method handlers as items
:return None: None
"""
super().__init__()
self.http_tools = HTTPTools()
self.handler = Handler
self.host = host
self.port = int(port)
self.sock = None
self.methods = methods
def listen(self) -> None:
""" Start HTTP listener.
:return None: None
:raises RuntimeError: with trailing error message
"""
try:
for method in self.methods:
setattr(self.handler, f"do_{method.upper()}", self.methods[method])
self.sock = socketserver.TCPServer((self.host, self.port), self.handler)
except Exception:
raise RuntimeError(f"Failed to start HTTP listener on port {str(self.port)}!")
def stop(self) -> None:
""" Stop HTTP listener.
:return None: None
:raises RuntimeError: with trailing error message
"""
try:
self.sock.server_close()
except Exception:
raise RuntimeError(f"HTTP listener is not started!")
def accept(self) -> None:
""" Accept connection.
:return None: None
:raises RuntimeError: with trailing error message
"""
try:
self.sock.handle_request()
except Exception:
raise RuntimeError(f"HTTP listener is not started!")
| EntySec/Pex | pex/proto/http/listener.py | listener.py | py | 2,334 | python | en | code | 25 | github-code | 50 |
73961830874 | from urllib.parse import urlencode
import requests
from dj_rest_auth.app_settings import api_settings
from dj_rest_auth.jwt_auth import set_jwt_cookies
from dj_rest_auth.models import get_token_model
from dj_rest_auth.utils import jwt_encode
from dj_rest_auth.views import LoginView
from django.conf import settings
from django.core.exceptions import ValidationError
from django.shortcuts import redirect
from rest_framework import serializers
from db.repository.user import UserRepository
def google_get_access_token(*, code: str, redirect_uri: str) -> str:
# Reference: https://developers.google.com/identity/protocols/oauth2/web-server#obtainingaccesstokens
data = {
"code": code,
"client_id": settings.GOOGLE_OAUTH2_CLIENT_ID,
"client_secret": settings.GOOGLE_OAUTH2_CLIENT_SECRET,
"redirect_uri": redirect_uri,
"grant_type": "authorization_code",
}
response = requests.post(settings.GOOGLE_ACCESS_TOKEN_OBTAIN_URL, data=data)
if not response.ok:
raise ValidationError("Failed to obtain access token from Google.")
return response.json()["access_token"]
def google_get_user_info(*, access_token: str):
# Reference: https://developers.google.com/identity/protocols/oauth2/web-server#callinganapi
response = requests.get(
settings.GOOGLE_USER_INFO_URL, params={"access_token": access_token}
)
if not response.ok:
raise ValidationError("Failed to obtain user info from Google.")
return response.json()
class GoogleLoginApi(LoginView):
permission_classes = ()
authentication_classes = ()
class InputSerializer(serializers.Serializer):
code = serializers.CharField(required=False)
error = serializers.CharField(required=False)
def get(self, request, *args, **kwargs):
user_repository = UserRepository()
input_serializer = self.InputSerializer(data=request.GET)
input_serializer.is_valid(raise_exception=True)
validated_data = input_serializer.validated_data
code = validated_data.get("code")
error = validated_data.get("error")
if error or not code:
params = urlencode({"error": error})
return redirect(f"{settings.PLATFORM_URL}?{params}")
# api_uri = reverse('api:v1:auth:login-with-google')
api_uri = f"{settings.PLATFORM_URL}/api/v1/auth/login/google"
access_token = google_get_access_token(code=code, redirect_uri=api_uri)
user_data = google_get_user_info(access_token=access_token)
profile_data = {
"username": user_data["email"],
"first_name": user_data.get("givenName", ""),
"last_name": user_data.get("familyName", ""),
}
# We use get-or-create logic here for the sake of the example.
# We don't have a sign-up flow.
self.user = user_repository.get_or_create(
email=user_data["email"], **profile_data
)
token_model = get_token_model()
if api_settings.USE_JWT:
self.access_token, self.refresh_token = jwt_encode(self.user)
elif token_model:
self.token = api_settings.TOKEN_CREATOR(
token_model, self.user, self.serializer
)
if api_settings.SESSION_LOGIN:
self.process_login()
response = redirect(f"{settings.PLATFORM_URL}/courses")
set_jwt_cookies(response, self.access_token, self.refresh_token)
return response
| edu4ml/WSB-ML-PLATFORM-FORKED | api/apis/v1/auth/auth.py | auth.py | py | 3,513 | python | en | code | 0 | github-code | 50 |
29905578649 | # coding:utf-8
from unityagents import UnityEnvironment
import numpy as np
from network.DQN import DQNAgent
import matplotlib.pyplot as plt
import tensorflow as tf
import time
env = UnityEnvironment(file_name="../environment/Banana_Windows_x86_64/Banana.exe")
path = "../result/banana/"
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name]
action_size = brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
total_scores = []
scores = []
batch_size = 64
mean = 0
count = 0
eps = 1.0
eps_end = 0.01
decay = 0.999
max_t = 1000
gamma = 0.99
alpha = 1e-4
tua = 1e-3
max_memory_size = 50000
train = False
with tf.Session() as session:
brain_agent = DQNAgent(session, state_size, action_size, max_memory_size, gamma, alpha, tua)
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
while mean < 13 and train:
env_info = env.reset(train_mode=True)[brain_name]
score = 0
time_b = time.time()
loss = 0
for i in range(max_t):
if np.random.random() > eps:
action = np.argmax(brain_agent.choose_action(state), axis=1)
else:
action = np.random.choice(action_size)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
score += reward
brain_agent.store(state, action, reward, next_state, [done])
state = next_state
if brain_agent.step % 4 == 0:
loss += brain_agent.learn(batch_size)
if done:
break
scores.append(score)
total_scores.append(score)
eps = max(eps * decay, eps_end)
print("\rEpisode: {},\tCurr Score: {},\tAverage Score: {:.2f},\tLoss:{:.4},\tEPS:{:.4},\tTime: {:.4}".format(count, score, np.mean(scores), loss/250.0, eps, time.time()-time_b), end="")
if count % 100 == 0 and count > 0:
mean = np.mean(scores)
print("\rEpisode: {}, \tAverage Score: {:.2f}".format(count, mean))
scores.clear()
count += 1
if train:
saver.save(session, path)
fig = plt.figure()
plt.plot(range(len(total_scores)), total_scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
else:
saver.restore(session, path)
saver.restore(session, path)
for _ in range(10):
done = False
env_info = env.reset(train_mode=False)[brain_name]
score = 0
state = env_info.vector_observations[0]
while not done:
action = brain_agent.action = np.argmax(brain_agent.choose_action(state), axis=1)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
score += reward
state = next_state
print("Score is ", score)
| lebesgue125/reinforce_learning | banana/dqn_agent.py | dqn_agent.py | py | 3,144 | python | en | code | 0 | github-code | 50 |
74910611675 | import numpy as np
import cv2
import pyrealsense2 as rs
import math
"""INTIALIZING REALSENSE DATA"""
# Initialize RealSense pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
pipeline.start(config)
# Initialize ORB detector
orb = cv2.ORB_create()
# Brute-force Matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Previous frame data
prev_gray = None
prev_kps = None
prev_descs = None
prev_matched_coords = None
current_matched_coords = None
MAX_MATCH_DISTANCE = 40 # You can change this threshold based on your needs
TOP_PERCENTAGE = 0.1 # Top 10% best matches
# LIST OF DISTANCE VECTORS
real_points = None
distance_vectors = None
euler_prediction = None
def rotation_matrix(theta_x, theta_y, theta_z):
Rx = np.array([[1, 0, 0],
[0, np.cos(theta_x), -np.sin(theta_x)],
[0, np.sin(theta_x), np.cos(theta_x)]])
Ry = np.array([[np.cos(theta_y), 0, -np.sin(theta_y)], # Note the negative sign for sin(theta_y)
[0, 1, 0],
[np.sin(theta_y), 0, np.cos(theta_y)]])
Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],
[np.sin(theta_z), np.cos(theta_z), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
return R
def euler_displacement(theta_x, theta_y, theta_z, point):
return np.dot(rotation_matrix(theta_x, theta_y, theta_z), point)
def distance_point(point):
distance = math.sqrt(point[0] ** 2 + point[1] ** 2 + point[2] ** 2)
return distance
def average_vectors(vectors):
if not vectors:
return None # return None if the list is empty
total_x = sum(vec[0] for vec in vectors)
total_y = sum(vec[1] for vec in vectors)
total_z = sum(vec[2] for vec in vectors)
num_vectors = len(vectors)
return [total_x / num_vectors, total_y / num_vectors, total_z / num_vectors]
def average_list(list):
return sum(list) / len(list)
def vector_between_points(p1, p2):
return [p2[i] - p1[i] for i in range(3)]
"""INTIATING BNO055 ROTATIONAL DATA"""
import os
import hid
os.environ["BLINKA_MCP2221"] = "1"
device = hid.device()
device.open(0x04D8, 0x00DD)
import board
import adafruit_bno055
i2c = board.I2C() # uses board.SCL and board.SDA
sensor = adafruit_bno055.BNO055_I2C(i2c)
last_val = 0xFFFF
"""MAIN LOOP"""
try:
while True:
"""RGB AND DEPTH DATA PROCESSING"""
# Create alignment
align_to = rs.stream.color
align = rs.align(align_to)
# Get frameset of depth and color
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
depth_image = np.asanyarray(aligned_depth_frame.get_data())
depth_intrinsics = frames.profile.as_video_stream_profile().intrinsics
color_frame = frames.get_color_frame()
# Convert color frame to numpy array
color_image = np.asanyarray(color_frame.get_data())
# Convert to grayscale for ORB
gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
# Detect ORB keypoints and descriptors
kps, descs = orb.detectAndCompute(gray, None)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
depth_with_kps = cv2.drawKeypoints(depth_colormap, kps, None, color=(0, 255, 0), flags=0)
cv2.imshow('Depth with Keypoints', depth_with_kps)
# Match with previous frame's keypoints and descriptors, if available
if prev_gray is not None:
matches = bf.match(prev_descs, descs)
if len(matches) > 0:
# Sort the matches based on distance (lowest distance is better)
matches = sorted(matches, key=lambda x: x.distance)
# Filter matches based on a distance threshold
good_matches = [m for m in matches if m.distance < MAX_MATCH_DISTANCE]
"""PERCENTAGE BASED FILTERING"""
# 1. Percentage-based Filtering
num_good_matches = int(len(matches) * TOP_PERCENTAGE)
good_matches_percentage = matches[:num_good_matches]
# Extract (x, y) coordinates of matched keypoints
prev_matched_coords = [prev_kps[match.queryIdx].pt for match in good_matches_percentage]
current_matched_coords = [kps[match.trainIdx].pt for match in good_matches_percentage]
# Print matched coordinates (You can store or process them further based on your needs)
print("Previous Frame Matched Coordinates:", prev_matched_coords)
print("Current Frame Matched Coordinates:", current_matched_coords)
print("Depth of current:",
depth_image[int(current_matched_coords[0][1])][int(current_matched_coords[0][0])])
if len(good_matches) > 0:
matched_image = cv2.drawMatches(prev_gray, prev_kps, gray, kps, good_matches_percentage,
None) # or replace 'good_matches_percentage' with 'good_matches_ratio'
cv2.imshow('Filtered Matched keypoints', matched_image)
# Update the previous frame data
prev_gray = gray
prev_kps = kps
prev_descs = descs
# Exit on 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
pass
finally:
pipeline.stop()
cv2.destroyAllWindows() | vpark915/The-GingerLens | LocalPythonIdeas/FundamentalScripts/ORBDepthPrimitive.py | ORBDepthPrimitive.py | py | 5,665 | python | en | code | 1 | github-code | 50 |
2609538019 | import matplotlib.pyplot as plt
import scipy.optimize as optimize
import scipy.sparse as sparse
import scipy.sparse.linalg
from math import ceil
import numpy as np
import sys
def solve_one_time_step(u_0, mu_vec, temp_a=0, temp_b=0):
print("h")
def create_main_matrix(n_x_points, mu_vec):
"""
Matrix for theta method
"""
tri_diag = np.ones((3, n_x_points))
tri_diag[1] = -2 * tri_diag[1]
for row in range(n_x_points):
tri_diag[:, row] *= float(mu_vec[row])
a_matrix = sparse.spdiags(tri_diag, [-1, 0, 1], n_x_points, n_x_points)
i_matrix = sparse.identity(n_x_points)
return a_matrix, i_matrix
u = u_0
bv = np.zeros_like(u_0)
bv[0] = mu_vec[0] * temp_a
bv[-1] = mu_vec[0] * temp_b
D2, I = create_main_matrix(n_x_points=u_0.shape[0], mu_vec=mu_vec)
lhs = (I - D2 / 2)
rhs = (I + D2 / 2) * u + bv
u = np.transpose(np.mat(sparse.linalg.spsolve(lhs, rhs)))
return u
def solve_heat_equation(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c, plot=False):
"""
This function approximates a solution to the generic heat equation
u_0_func: function of x that returns the initial value.
t_final: Latest time to simulate to [s]
x_a: The lowest x-value of the domain [m]
x_b: The highest x-value of the domain [m]
temp_a: The temperature at x=a (Dirichlet BV) [deg C]
temp_b: The temperature at x=b (Dirichlet BV) [deg C]
n_x_points: The number of points required in the x-direction.
c: The constant in the heat equation.
"""
mu = 1 # Arbitrarily chosen, pick a higher number to increase the time step.
# This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme.
dx = (x_b - x_a) / n_x_points
dt = dx ** 2 * mu / c
n_t_points = ceil(t_final / dt)
x = np.linspace(x_a, x_b, n_x_points)
t = np.arange(0, t_final, dt)
u_0 = np.reshape(u_0_func(x), (100, 1))
data = [u_0]
u = u_0
for t_i in range(n_t_points):
u = solve_one_time_step(u_0=u, mu=mu, temp_a=temp_a - 1 + np.cos(t_i * dt),
temp_b=temp_b - 1 + np.cos(t_i * dt))
data.append(u)
if (t_i % 1000) == 0:
print(".", end="")
result = np.hstack(data)
if plot:
X, Y = np.meshgrid(x, t)
fig = plt.figure()
ax = plt.axes(projection='3d')
# Creating plot
ax.plot_surface(X, Y, result[:, :-1].T)
ax.set_xlabel("X [m]")
ax.set_ylabel("T [s]")
plt.show()
return result
def initial_value(x):
return -6 * np.sin(np.pi * (x - 0.5)) + 2 * (x - 0.5)
def find_zeros(y_arr, a, b):
"""
Returns the x-values (assuming y_arr is on a linear interpolation mesh between a and b) where the y_arr mesh
function changes sign.
"""
zeros_i = []
for i in range(len(y_arr) - 1):
if y_arr[i] * y_arr[i + 1] < 0: # This means that there is a sign change.
zeros_i.append(i) # We want to store the index
# Let's now translate these indices into x values.
dx = (b - a) / len(y_arr)
zeros = []
for index in zeros_i:
zeros.append((index + 0.5) * dx) # Adding half a step because the zero is between i and i+1.
return zeros
# def find_zeros_func(f: callable, a, b):
# k = 1
# xs = np.linspace(a, b, 1000*k)
# t_zero = f(xs)
# sgn = np.sign(t_zero)
# zbd = []
#
# for i in range(0,len(sgn)-1):
# if sgn[i] != sgn[i+1]:
# zbd.append((xs[i]+xs[i+1])/2)
#
# while len(zbd) != 2 and k < 11:
# k += 1
# xs = np.linspace(a, b, 1000 * k)
# t_zero = f(xs)
# sgn = np.sign(t_zero)
# zbd = []
# for i in range(0, len(sgn) - 1):
# if sgn[i] != sgn[i + 1]:
# zbd.append((xs[i] + xs[i + 1]) / 2)
#
# if len(zbd) != 2:
# sys.exit("The function u_0 might not be a suitable choice. The function u_0 must be continuous and have exactly two zeros in [x_a,x_b]")
# h1 = zbd[0]
# h2 = zbd[1]
# h = [h1, h2]
#
# return h
# def find_zeros_array(u, a, b, tol):
# k = len(u)
# xs = np.linspace(a, b, k)
# sgn = np.sign(u)
# zbd = []
# zbd_id = []
# h = []
#
# for i in range(0,len(sgn)-1):
# if sgn[i] != sgn[i+1]:
# zbd.append(xs[i])
# zbd_id.append(i)
#
# if len(zbd) == 1:
# if abs(u[zbd_id[0]]) < tol:
# h.append(xs[zbd_id[0]])
# h.append(xs[zbd_id[0]])
# else:
# h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2)
# h.append((xs[zbd_id[0]] + xs[zbd_id[0] + 1]) / 2)
# elif len(zbd) == 2:
# if abs(u[zbd_id[0]]) < tol:
# h.append(xs[zbd_id[0]])
# else:
# h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2)
# if abs(u[zbd_id[1]]) < tol:
# h.append(xs[zbd_id[1]])
# else:
# h.append((xs[zbd_id[0]]+xs[zbd_id[0]+1])/2)
# else:
# h = []
#
# return h
def solve_model(u_0_func, t_final, x_a, x_b, temp_a, temp_b, n_x_points, c1, c2, c3, tol, n_t_points, plot=False):
"""
u_0_func: function of x that returns the initial value.
t_final: Latest time to simulate to [s]
x_a: The lowest x-value of the domain [m], x_a = 0
x_b: The highest x-value of the domain [m]
temp_a: The temperature at x=a (Dirichlet BV) [deg C]
temp_b: The temperature at x=b (Dirichlet BV) [deg C]
n_x_points: The number of points required in the x-direction.
c1: The constant in the heat equation in the first part.
tol: Tolerance for zero finding.
"""
# This mu was initially set to 1/4 as it needed to be less than 1/2 for an explicit scheme.
dx = (x_b - x_a) / n_x_points
dt = t_final / n_t_points
mu1 = c1 * dt / dx ** 2
mu2 = c2 * dt / dx ** 2
mu3 = c3 * dt / dx ** 2
x = np.linspace(x_a, x_b, n_x_points)
t = np.arange(0, t_final, dt)
u_0 = np.reshape(u_0_func(x), (100, 1))
data = [u_0]
# bd1 = []
# bd2 = []
# u_0 = u_0_func()
# h = find_zeros(u_0_func, x_a, x_b)
# bd1.append(h[0])
# bd2.append(h[1])
h_1_arr = []
h_2_arr = []
h_data = find_zeros(u_0, a=x_a, b=x_b)
print("Starting boundary points: ", h_data)
h_1_arr.append(h_data[0])
h_2_arr.append(h_data[1])
u = u_0
for t_i in range(n_t_points):
mu_vector = np.ones_like(u)
mu_vector[[x < h_1_arr[-1]]] *= mu1
mu_vector[np.logical_and(h_1_arr[-1] <= x, x < h_2_arr[-1])] *= mu2
mu_vector[h_2_arr[-1] <= x] *= mu3
u = solve_one_time_step(u_0=u, mu_vec=mu_vector, temp_a=temp_a, temp_b=temp_b)
h_data = find_zeros(u, a=x_a, b=x_b)
if len(h_data) == 0:
h_1_arr.append(h_data[0])
h_2_arr.append(h_data[1])
data.append(u)
if (t_i % 1000) == 0:
print(".", end="")
result = np.hstack(data)
if plot:
X, Y = np.meshgrid(x, t)
fig = plt.figure()
ax = plt.axes(projection='3d')
# Creating plot
ax.plot_surface(X, Y, result[:, :-1].T)
ax.set_xlabel("X [m]")
ax.set_ylabel("T [s]")
plt.show()
solve_model(u_0_func=initial_value,
t_final=50,
x_a=0,
x_b=2,
temp_a=5,
temp_b=9,
n_x_points=100,
c1=0.01,
c2=0.04,
c3=0.01,
tol=10 ** (-10),
n_t_points=500,
plot=True)
# solve_heat_equation(u_0_func=initial_value,
# t_final=50,
# x_a=-1,
# x_b=2,
# temp_a=-2,
# temp_b=4,
# n_x_points=100,
# c=0.01,
# plot=True)
| liorarueff/MathematicalIce | main.py | main.py | py | 7,928 | python | en | code | 0 | github-code | 50 |
42680391223 | import unittest
from unittest.mock import patch
from lotto.cities import Cities
class TestCities(unittest.TestCase):
def test_get_city_wrong_input(self):
self.assertNotIn('vxvx', Cities.total_cities)
self.assertNotIn(1, Cities.total_cities)
with patch('builtins.input', return_value='Tom'):
with self.assertRaises(ValueError):
Cities.get_city_input()
def test_get_city_correct_input(self):
self.assertIn('bari', Cities.total_cities)
with patch('builtins.input', return_value='BAri'):
self.assertEqual(Cities.get_city_input(), 'bari')
if __name__ == '__main__':
unittest.main() | erydegio/lotto-game | test/test_cities.py | test_cities.py | py | 693 | python | en | code | 0 | github-code | 50 |
35062260193 | # This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
import bpy
import string
import pdb
import time
import json
# import urllib.request
import math
import operator
import ast
try:
import _pickle as pickle
except:
import pickle
import os
import base64
import zlib
# from materials import *
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty
dbg = False
bl_info = {
"name": "Minecraft motions import (*.mcmo)",
"description": "This addon allows you to import minecraft worlds and mob motions",
"author": "Aat Karelse",
"version": (0, 4, 0),
"blender": (2, 6, 3),
#"api": ???,
"location": "File > Import > minecraft stuff",
"warning": "Alpha",
"wiki_url": "https://github.com/aaps/MCmotions",
# "tracker_url": "http://projects.blender.org/tracker/index.php?func=detail&aid=29552",
"category": "Import-Export"}
# This class initiates and starts the state machine and uses the gathered data
# to construct the model in Blender.
class DataImporter:
def createMeshFromData(self, material, origin, verts, faces):
# Create mesh and object
mat = bpy.data.materials.new('TexMat')
if material in self.materials:
themat = self.materials
else:
themat = {material:{'name': 'Unknown - ' + str(material), 'color': (0, 0, 0), 'alpha':0, 'emittance':0 ,'textures':[]}}
# print(themat[material])
if 'textures' in themat[material] and len(themat[material]['textures']) > 0:
for texpath in themat[material]['textures']:
mtex = mat.texture_slots.add()
mtex.texture = self.textures[texpath]
# print('ok' + )
me = bpy.data.meshes.new(themat[material]['name']+' Mesh')
ob = bpy.data.objects.new(themat[material]['name'], me)
ob.location = origin
if len(themat[material]) >= 2:
mat.diffuse_color = themat[material]['color']
if len(themat[material]) >= 3 and themat[material]['alpha'] != 0:
mat.alpha = themat[material]['alpha']
mat.use_transparency = True
mat.transparency_method = 'RAYTRACE'
if len(themat[material]) >= 4 and themat[material]['emittance'] != 0:
mat.emit = themat[material]['emittance']
ob.show_name = True
ob.active_material = mat
# Link object to scene and make active
scn = bpy.context.scene
scn.objects.link(ob)
# Create mesh from given verts, faces.
me.from_pydata(verts, [], faces)
# Update mesh with new data
me.update()
return ob
def run(self, filepath, context):
start_time = time.time()
handle = open(filepath, 'rb')
total = pickle.loads(zlib.decompress(handle.read()))
indexi = 0
vertices = total['vertices']
faces = total['faces']
entitys = total['allhistory']
origins = total['origins']
self.materials = total['materials']
self.textures = total['textures']
total = None
extralist = {}
self.tempdir = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'textures'
try:
os.makedirs(self.tempdir)
except Exception:
print('some dir error should be ok !')
filelist = [ f for f in os.listdir(self.tempdir) ]
for f in filelist:
try:
os.remove(f)
except Exception:
print('file removal trouble no biggy')
if self.textures:
for texture in self.textures:
fileh = open(self.tempdir + os.sep + texture + ".png", "wb")
fileh.write(base64.b64decode(self.textures[texture]))
temp = {}
for material in self.materials:
if 'textures' in self.materials[material] and len(self.materials[material]['textures']) > 0:
for texpath in self.materials[material]['textures']:
img = bpy.data.images.load(self.tempdir + os.sep + texpath + '.png')
cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')
cTex.image = img
temp[texpath] = cTex
self.textures = temp
print(self.textures)
for mat in vertices:
if mat in vertices and mat in faces and mat in origins:
self.createMeshFromData(mat, origins[mat], vertices[mat], faces[mat] )
faces[mat] = None
vertices[mat] = None
else:
print(str(mat) + 'not in faces, vertices or origins !')
for value in entitys:
aentity = entitys[value]
if len( aentity['positions']) > 0:
firstloc = aentity['positions'][0]['pos']
firstloc = firstloc[0], firstloc[1]+2,firstloc[2]
headloc = firstloc[0],firstloc[1]+1, firstloc[2]
bpy.ops.mesh.primitive_cube_add(location=headloc)
head = bpy.context.object
head.rotation_mode = 'XYZ'
head.scale = (0.25, 0.25, 0.25)
bpy.ops.mesh.primitive_cube_add(location=firstloc)
ob = bpy.context.object
ob.rotation_mode = 'XYZ'
ob.scale = (0.25, 0.75, 0.25)
mat = bpy.data.materials.new("PKHG")
mobtype = aentity['type']
if mobtype == '50':
ob.name = "creeper"
mat.diffuse_color = (0.0, 1.0, 0.0)
elif mobtype == '51':
ob.name = "skeleton"
mat.diffuse_color = (1.0, 1.0, 1.0)
elif mobtype == '52':
ob.name = "spider"
mat.diffuse_color = (0.2, 0.1, 0.1)
elif mobtype == '54':
ob.name = "zombol"
mat.diffuse_color = (0.0, 0.3, 0.0)
elif mobtype == '55':
ob.name = "slime"
mat.diffuse_color = (0.5, 1, 0.5)
elif mobtype == '58':
ob.name = "enderman"
mat.diffuse_color = (0.5, 0.0, 0.5)
elif mobtype == '90':
ob.name = "pig"
mat.diffuse_color = (0.5, 0.4, 0.4)
elif mobtype == '65':
ob.name = "bat"
mat.diffuse_color = (1, 0.5, 0.2)
elif mobtype == '91':
ob.name = "sheep"
mat.diffuse_color = (1, 1, 1)
elif mobtype == '92':
ob.name = "cow"
mat.diffuse_color = (1, 0.2, 0.1)
elif mobtype == '94':
ob.name = "squid"
mat.diffuse_color = (0.2, 0.2, 1)
elif mobtype == '101':
ob.name = "rabbit"
mat.diffuse_color = (0.5, 0.1, 0.05)
elif len(mobtype) > 10 or mobtype == 'player':
if mobtype == 'player':
ob.name = "player: RECORDER"
mat.diffuse_color = (1, 0, 0)
else:
if 'type' in aentity:
ob.name = "player: " + aentity['type']
else:
ob.name = "player: unknown"
mat.diffuse_color = (1, 0.6, 0.4)
else:
mat.diffuse_color = (0.0, 0.0, 0.0)
ob.name = str(mobtype)
ob.active_material = mat
bpy.ops.object.select_all(action='DESELECT')
ob.select = True
head.select = True
put_on_layers = lambda x: tuple((i in x) for i in range(20))
bpy.context.scene.objects.active = ob
bpy.ops.object.parent_set()
maincam = bpy.data.cameras.new("Camera")
maincam.clip_start = 1
maincam.clip_end = 5000
cam_ob = bpy.data.objects.new("Camera", maincam)
cam_ob.rotation_euler = (0, math.radians(180), 0)
selfycam = bpy.data.cameras.new("Camera")
selfycam.clip_start = 1
selfycam.clip_end = 5000
selfy_cam_ob = bpy.data.objects.new("Camera", selfycam)
selfy_cam_ob.rotation_euler = (0, 0, 0)
selfy_cam_ob.location = (0, 0, 25)
selfy_cam_ob.layers[:] = put_on_layers({2})
cam_ob.layers[:] = put_on_layers({2})
ob.layers[:] = put_on_layers({2})
head.layers[:] = put_on_layers({2})
selfy_cam_ob.parent = head
cam_ob.parent = head
bpy.context.scene.objects.link(cam_ob)
bpy.context.scene.objects.link(selfy_cam_ob)
for posses in aentity['positions'][1:]:
frame_num = int((posses['time'] / 20) * 25)
bpy.context.scene.frame_set(frame_num)
ob.location = (posses['pos'][0], posses['pos'][2], posses['pos'][1]+0.75)
yaw = posses['yawpichhead'][1]
head.rotation_euler = (math.radians(posses['yawpichhead'][1]), 0, 0)
ob.rotation_euler = (math.radians(90), 0, math.radians(posses['yawpichhead'][0]) )
ob.hide = not bool(posses['alive'])
ob.hide_render = not bool(posses['alive'])
ob.keyframe_insert("hide")
ob.keyframe_insert("hide_render")
ob.keyframe_insert(data_path="location")
ob.keyframe_insert(data_path="rotation_euler")
if ob.animation_data:
for fc in ob.animation_data.action.fcurves:
fc.extrapolation = 'LINEAR'
for kp in fc.keyframe_points:
kp.interpolation = 'LINEAR'
print("Script finished after {} seconds".format(time.time() - start_time))
return {'FINISHED'}
# This is the import operator.
class MineCraftImport(bpy.types.Operator, ImportHelper):
'''Import form minecraft netrecorder some format (.mcmo)'''
bl_idname = "minecraft.importminecraftdump"
bl_label = "MineCraft EntityPaths"
# mc ep
filename_ext = ".mcmo"
filter_glob = StringProperty(
default="*.mcmo",
options={'HIDDEN'}
)
@classmethod
def poll(cls, context):
return True
def execute(self, context):
di = DataImporter()
return di.run(self.filepath, context)
def menu_func_import(self, context):
self.layout.operator(MineCraftImport.bl_idname, text="Mcmo import (.mcmo)")
def register():
bpy.utils.register_class(MineCraftImport)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(MineCraftImport)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
bpy.ops.something.minecraft('INVOKE_DEFAULT') | aaps/MCmotions | minecraftimport.py | minecraftimport.py | py | 12,057 | python | en | code | 8 | github-code | 50 |
1391672757 | # N = input('enter N: ')
# M = input('enter M: ')
import timeit
def draw_board():
global board
for line in transpose(board):
print(*line)
def transpose(matr):
res=[]
n=len(matr)
m=len(matr[0])
for j in range(m):
tmp=[]
for i in range(n):
tmp=tmp+[matr[i][j]]
res=res+[tmp]
return res
N, M = 10, 10
board = [['.' for _ in range(M)] for _ in range(N)]
turn = True
def fill(y1, y2, x):
for y in range(y1 + 1, y2):
global board, turn
if turn:
rock_color = 'W'
else:
rock_color = 'B'
if board[x][y] != rock_color:
board[x][y] = rock_color
def check_column(rock_color, x_placed, y_placed):
y1, y2 = None, None
for y, rock in enumerate(board[x_placed]):
if rock == rock_color:
if y1 == None:
y1 = y
elif y2 == None:
y2 = y
fill(y1, y2, x_placed)
break
x1, x2 = None, None
for y, rock in enumerate(board[y_placed]):
if rock == rock_color:
if x1 == None:
x1 = y
elif x2 == None:
x2 = y
fill(x1, x2, y_placed)
break
def chech_rock(x, y):
global board, turn
if turn:
rock_color = 'W'
else:
rock_color = 'B'
if board[x][y] == '.':
board[x][y] = rock_color
board = transpose(board)
check_column(rock_color, x, y)
board = transpose(board)
check_column(rock_color, x, y)
draw_board()
turn = not turn
else:
print('error: field is taken')
def input_xy():
return [int(number) - 1 for number in input('Enter: ').split(' ')]
def count():
global board
white_count = 0
black_count = 0
for column in board:
for n in column:
if n == 'W':
white_count += 1
elif n == 'B':
black_count += 1
print(f'white has {white_count - black_count} more rocks')
def main():
x, y = input_xy()
while x != -1 and y != -1:
chech_rock(x, y)
count()
x, y = input_xy()
main() | matbitilya/rocks | 2.py | 2.py | py | 2,328 | python | en | code | 0 | github-code | 50 |
27653685459 | import collections
import os
import sys
import openpyxl
import database
from truckmate_email import TruckmateEmail
REPORT_EMAILS = [
'jwaltzjr@krclogistics.com'
]
class Rate(object):
def __init__(self, tariff, customers, origin, destination, break_value, is_min, rate):
self.tariff = tariff
self.customers = customers
self.origin = origin
self.destination = destination
self.break_value = break_value
self.is_min = (is_min.strip() == 'True')
self.rate = rate
def __repr__(self):
return 'Rate(tariff=%s, origin=%s, dest=%s, break=%s, rate=%s)' % (
self.tariff,
self.origin,
self.destination,
self.rate_break,
self.rate
)
@property
def three_digit_zip(self):
if self.destination.isdigit():
if 600 <= int(self.destination[:3]) <= 606:
return 'CHICOMM'
else:
return self.destination[:3]
elif self.destination == 'CHICOMM':
return 'CHICOMM'
elif self.destination in ['497LP', '497UP']:
return '497'
else:
return 'OTHER'
@property
def rate_break(self):
if self.is_min:
return 'MIN'
else:
rounded_break = round(self.break_value / 100.0) * 100.0
return rounded_break
class RateReport(object):
def __init__(self, file_name, datab):
sql_file_path = os.path.join(sys.path[0], file_name)
self.sql_query = self.load_query_from_file(sql_file_path)
self.dataset = self.fetch_data_from_db(self.sql_query, datab)
self.split_data = self.split_dataset(self.dataset)
def load_query_from_file(self, file_path):
with open(file_path, 'r') as sql_file:
return sql_file.read()
def fetch_data_from_db(self, query, db):
with db as datab:
with datab.connection.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def split_dataset(self, dataset):
split_data = collections.defaultdict(
lambda: {
'breaks': set(),
'rates': collections.defaultdict(list)
}
)
for rate in dataset:
for origin in self.get_origins(rate):
rate_obj = Rate(rate.TARIFF, rate.CUSTOMERS, origin, rate.DESTINATION, rate.BREAK, rate.IS_MIN, rate.RATE)
if rate_obj.rate_break not in split_data[rate_obj.three_digit_zip]['breaks']:
if not rate_obj.is_min:
split_data[rate_obj.three_digit_zip]['breaks'].add(rate_obj.rate_break)
rate_tup = (rate_obj.tariff, rate_obj.customers, rate_obj.origin, rate_obj.destination)
split_data[rate_obj.three_digit_zip]['rates'][rate_tup].append(rate_obj)
return split_data
def get_origins(self, rate):
origins = []
if rate.ORIGIN_MS:
for origin in rate.ORIGIN_MS.split(', '):
origins.append(origin)
if rate.ORIGIN:
origins.append(rate.ORIGIN)
return origins
def export_as_xlsx(self):
wb = openpyxl.Workbook()
wb.remove_sheet(wb.active)
for zone in sorted(self.split_data.keys()):
ws = wb.create_sheet(zone)
self._excel_insert_titles(ws, zone)
self._excel_insert_data(ws, zone)
virtual_wb = openpyxl.writer.excel.save_virtual_workbook(wb)
return virtual_wb
def _excel_insert_titles(self, worksheet, zone):
titles = {
'A1': 'TARIFF',
'B1': 'CUSTOMER',
'C1': 'ORIGIN',
'D1': 'DESTINATION',
'E1': 'MIN'
}
row = 'F'
for b in sorted(self.split_data[zone]['breaks']):
cellname = row + str(1)
titles[cellname] = b
row = chr(ord(row) + 1)
for cell, title in titles.items():
worksheet[cell] = title
def _excel_insert_data(self, worksheet, zone):
current_row = 2
for tariff_tup, rates in sorted(self.split_data[zone]['rates'].iteritems()):
tariff, customers, origin, destination = tariff_tup
worksheet.cell(row=current_row, column=1).value = tariff
worksheet.cell(row=current_row, column=2).value = customers
worksheet.cell(row=current_row, column=3).value = origin
worksheet.cell(row=current_row, column=4).value = destination
for rate in rates:
current_column = self.find_column(worksheet, rate.rate_break)
current_cell = worksheet.cell(row=current_row, column=current_column)
current_cell.value = rate.rate
current_cell.number_format = '#,##0.00'
current_row += 1
def find_column(self, worksheet, header):
for cell in worksheet[1]:
if cell.value == header:
return cell.col_idx
else:
raise ValueError('No header found for %s' % header)
def main():
rate_report = RateReport('ratereport.sql', database.truckmate)
email_message = TruckmateEmail(
REPORT_EMAILS,
subject='Rate Report',
attachments=[
('rate_report.xlsx', rate_report.export_as_xlsx())
]
)
email_message.send()
if __name__ == '__main__':
main()
| jwaltzjr/truckmate | truckmate/ratereport.py | ratereport.py | py | 5,480 | python | en | code | 2 | github-code | 50 |
86734481945 | import pygame
class Ui:
def __init__(self, screen, player) -> None:
self.screen = screen
self.player = player
self.font = pygame.font.SysFont('Arial', 18)
self.big_font = pygame.font.SysFont('Arial', 32)
def render(self, score):
score_text = self.big_font.render(str(score), 1, (255, 255, 255))
hp_text = self.font.render(str(self.player.hp), 1, (255, 255, 255))
self.screen.blit(score_text, (self.screen.get_width() / 2 - score_text.get_width() / 2, score_text.get_height()))
self.screen.blit(hp_text, (self.player.rect.x + self.player.width / 2 - hp_text.get_width() / 2, self.player.rect.y + self.player.height)) | JustThomi/SpaceShooter | ui.py | ui.py | py | 692 | python | en | code | 0 | github-code | 50 |
71994975835 | c50=0
c20 = 0
c10 = 0
c1 = 0
print('Banco dos Crias')
saque = int(input('Valor a ser sacado:'))
while saque !=0:
if saque - 50 >= 0:
c50 += 1
saque = saque -50
else:
break
while saque !=0:
if saque - 20 >= 0:
c20 += 1
saque = saque -20
else:
break
while saque !=0:
if saque - 10 >= 0:
c10 += 1
saque = saque -10
else:
break
while saque !=0:
if saque - 1 >= 0:
c1 += 1
saque = saque -1
else:
break
print(f'{c50} Cedulas(a) de 50R$')
print(f'{c20} Cedulas(a) de 20R$')
print(f'{c10} Cedulas(a) de 10R$')
print(f'{c1} Cedulas(a) de 1R$')
###if saque % 20 >= 0:
##print(f'{} Cedulas de 20R$')
| ArthPx/learning-code | d 71.py | d 71.py | py | 769 | python | en | code | 0 | github-code | 50 |
70896170076 | import os
import subprocess
import tempfile
from typing import Dict
import requests
from . import errors
from snapcraft.file_utils import calculate_hash, get_tool_path
from snapcraft.internal.cache import FileCache
from snapcraft.internal.indicators import download_requests_stream
class _Image:
def __init__(
self, *, base: str, snap_arch: str, url: str, checksum: str, algorithm: str
) -> None:
self.base = base
self.snap_arch = snap_arch
self.url = url
self.checksum = checksum
self.algorithm = algorithm
self._image_cache = FileCache(namespace="build-images-{}".format(self.base))
def _download_and_cache(self) -> str:
request = requests.get(self.url, stream=True, allow_redirects=True)
if not request.ok:
raise errors.BuildImageRequestError(
base=self.base, status_code=request.status_code
)
# First create the prefix as tempfile.TemporaryDirectory does not do that for you
os.makedirs(self._image_cache.file_cache, exist_ok=True)
with tempfile.TemporaryDirectory(
prefix=self._image_cache.file_cache
) as tmp_dir:
download_file = os.path.join(tmp_dir, "{}-vm".format(self.base))
download_requests_stream(request, download_file)
calculated_digest = calculate_hash(download_file, algorithm=self.algorithm)
if self.checksum != calculated_digest:
raise errors.BuildImageChecksumError(
expected=self.checksum,
calculated=calculated_digest,
algorithm=self.algorithm,
)
return self._image_cache.cache(
filename=download_file, algorithm=self.algorithm, hash=self.checksum
)
def get(self):
cached_file = self._image_cache.get(
hash=self.checksum, algorithm=self.algorithm
)
if not cached_file:
cached_file = self._download_and_cache()
# TODO verify nothing is using this as a backing store before implementing.
# image_cache.prune(keep_hash=image.checksum)
return cached_file
def _get_build_images(base: str) -> Dict[str, _Image]:
if base == "core16":
return dict(
amd64=_Image(
base="core16",
snap_arch="amd64",
url="https://cloud-images.ubuntu.com/releases/16.04/release-20180703/ubuntu-16.04-server-cloudimg-amd64-disk1.img", # noqa: E501
checksum="79549e87ddfc61b1cc8626a67ccc025cd7111d1af93ec28ea46ba6de70819f8c", # noqa: E501
algorithm="sha256",
)
)
elif base == "core18":
return dict(
amd64=_Image(
base="core18",
snap_arch="amd64",
url="https://cloud-images.ubuntu.com/releases/18.04/release-20180724/ubuntu-18.04-server-cloudimg-amd64.img", # noqa: E501
checksum="6d663a8fd5eddd916f4aef4fd06d0f7f4cf0bb191f170b8c84cd2adf297bc5c3", # noqa: E501
algorithm="sha256",
)
)
else:
raise KeyError(base)
def setup(*, base: str, snap_arch: str, size: str, image_path: str) -> None:
"""Setup a build image for base and snap_arch of size at image_path.
Example usage:
>>> from snapcraft.internal.build_providers import _images
>>> _images.setup(base="core18", snap_arch="amd64", size="10G",
image_path="images/core18.qcow2")
:param str base: the base of the build image to setup.
:param str snap_arch: the architecture of the base for the build image.
:param str size: the size of the disk for the build image.
:param str image_path: the path to create the build image.
:raises errors.BuildImageForBaseMissing:
if there is no build image defined for the requested base or snap
architecture.
:raises errors.BuildImageRequestError:
upon a network related issue that prevents download of the build image.
:raises errors.BuildImageChecksumError:
if the resulting downloaded build image does not match the expected
checksum.
:raises errors.BuildImageSetupError:
if a build image cannot be created due to tooling or other system
issues (e.g.; space issues).
"""
try:
image = _get_build_images(base)[snap_arch]
except KeyError as key_error:
raise errors.BuildImageForBaseMissing(
base=base, snap_arch=snap_arch
) from key_error
cached_file = image.get()
if os.path.dirname(image_path):
os.makedirs(os.path.dirname(image_path), exist_ok=True)
qemu_img_cmd = get_tool_path("qemu-img")
# qemu-img parameters:
# -q: quiet.
# -f: first image format.
# -b: backing file.
try:
subprocess.check_call(
[
qemu_img_cmd,
"create",
"-q",
"-f",
"qcow2",
"-b",
cached_file,
image_path,
size,
]
)
except subprocess.CalledProcessError as process_error:
raise errors.BuildImageSetupError(
exit_code=process_error.returncode
) from process_error
| Tymbur/Archive_Encrypted.zip | snapcraft/data/usr/lib/python3/dist-packages/snapcraft/internal/build_providers/_images.py | _images.py | py | 5,347 | python | en | code | 0 | github-code | 50 |
31526327859 | #"D:\UCLA+USC\OPT\fetch\fetch_run.py"
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import torch
import torch.nn as nn
import torch.nn.functional as F
import streamlit as st
import os
from collections import defaultdict
import torchvision.models as models
start_date_2021 = pd.to_datetime("2021-01-01") # Start date for 2022
end_date_2022 = pd.to_datetime("2022-12-31") # End date for 2022
date_range_2021_2022 = pd.date_range(start_date_2021, end_date_2022, freq='D')
x_new = pd.DataFrame({'# Date': date_range_2021_2022})
script_directory = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(script_directory, 'fetch_LSTM_model.pth')
seq_length=90
input_size = seq_length
hidden_size = 64
num_layers = 2
output_size = seq_length
fetch_data_path= os.path.join(script_directory, 'data_daily.csv')
monthly_sums = defaultdict(float)
days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
monthly_sum_2022 = {month: 0 for month in range(1, 13)}
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.lstm = nn.LSTM(input_size, hidden_size)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, input):
x, _ = self.lstm(input)
x = self.linear(x)
return x
def normalize_column(column):
normalized = (column - column.min()) / (column.max() - column.min())
return torch.tensor(normalized.values, dtype=torch.float32)
def revert(x,Y_min,Y_max):
return Y_min+x*(Y_max-Y_min)
def main():
#Load the trained LSTM model
model = LSTM(input_size, hidden_size, output_size)
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['model_state_dict'])
#Load the original data
raw = pd.read_csv(fetch_data_path)
raw['# Date'] = pd.to_datetime(raw['# Date'])
Y_min = raw['Receipt_Count'].values.astype(float).min()
Y_max = raw['Receipt_Count'].values.astype(float).max()
Y = raw['Receipt_Count'].values.astype(float)
Y = normalize_column(raw['Receipt_Count'])
Y = Y.reshape(-1, 1)
Y_new = Y.detach().reshape(-1)
#Use the loaded model to make predictions for 2022
for j in range(365):
with torch.no_grad():
prediction = model(Y_new[-seq_length:].view(-1,seq_length))
prediction = torch.tensor(prediction[0,-1].item()).view(1)
Y_new = torch.cat((Y_new, prediction))
output = revert(Y_new,Y_min,Y_max)
output2 = output.detach().tolist()
daily_number_of_receipts_2022 = output2[365:]
start_date= 0
for i in monthly_sum_2022.keys():
monthly_sum = sum(daily_number_of_receipts_2022[start_date:(start_date +(days_in_month[i-1]))])
monthly_sum_2022[i] +=monthly_sum
start_date += days_in_month[i-1]
x_to_be_plotted = monthly_sum_2022.keys()
Y_to_be_plotted = [monthly_sum_2022[key] for key in monthly_sum_2022.keys() ]
#Visualization
plt.figure(figsize=(10, 6))
#plt.plot(x_new['# Date'].tolist(), tensor_list, label='Predicted Number of Receipts per month', color='green', marker='o', linestyle='-')
plt.plot(x_to_be_plotted, Y_to_be_plotted, label='Predicted Number of Receipts per month in 2022', color='green', marker='o', linestyle='-')
plt.xlabel('Month for 2022')
plt.ylabel('Number of Receipts')
plt.title('Line Plot of Monthly Number of Receipts Over Time in 2022')
plt.legend()
plt.grid(True)
y_formatter = ScalarFormatter(useMathText=True)
y_formatter.set_scientific(False) # Disable scientific notation
y_formatter.set_powerlimits((0, 0)) # Set the exponent range to (0, 0)
plt.gca().yaxis.set_major_formatter(y_formatter)
#plt.show()
#Show the result using streamlit:
st.title("LSTM model App for fetch analysis By Xiaoshu Luo")
selected_month = st.number_input("Please select a month (1-12) in 2022", min_value=1, max_value=12, step=1, value=1)
plt.scatter(selected_month, Y_to_be_plotted[selected_month - 1], color='red', marker='o', s=100, label='Selected Month')
st.text(f"The month you selected is: {selected_month}")
st.text(f"The predicted monthly number of receipts in 2022 is: {int(monthly_sum_2022[selected_month])}")
st.pyplot(plt)
if __name__ == "__main__":
main()
| tree2601/Fetch_LSTM_model | fetch_run.py | fetch_run.py | py | 4,500 | python | en | code | 0 | github-code | 50 |
26211678718 | import os
import json
import subprocess
from transformers import AutoTokenizer, AutoModelForCausalLM
from openai import OpenAI
import requests
import torch
import tiktoken
import argparse
commit_schema = {
"name": "git_commit",
"description": 'Performs a git commit by calling `git commit -m "commit_message"`',
"parameters": {
"type": "object",
"properties": {
"commit_message": {
"description": "A short but descriptive commit message",
"type": "string"
}
},
"required": ["commit_message"]
}
}
def generate_commit_message_mistral(diff):
"""Generate commit message using Mistral AI."""
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
tokens = tokenizer.encode(diff)
tokens = tokens[:7999]
diff = tokenizer.decode(tokens)
prompt = "You are given the output of a git diff. Your task is to create a descriptive commit message based on this diff, max 15 words\n\n" + diff
data = {
"system": "You generate commit messages from a git diff that is provided to you. It is your job to create a descriptive commit message based on this diff. Do not include the diff in your commit message. Only include the commit message. The most important thing is to ensure you are only describing the changes that are marked with + or - in the diff. Do not include any other changes in your commit message.",
"model": "mistral",
"prompt": "{prompt}".format(prompt=prompt),
"stream": False,
}
response = requests.post("http://localhost:11434/api/generate", json=data)
json_strings = response.text.strip().split('\n')
responses = [json.loads(js)["response"] for js in json_strings]
result = "".join(responses)
return result
def generate_commit_message_globe_server(diff):
data = {"diff": diff}
response = requests.post("http://globe.engineer/api/scommit-server", json=data)
commit_message = response.text.strip()
return commit_message
def format_diff(diff):
added = []
removed = []
lines = diff.split('\n')
for line in lines:
if line.startswith('+'):
added.append(line)
elif line.startswith('-'):
removed.append(line)
formatted_diff = 'ADDED:\n' + '\n'.join(added) + '\nREMOVED:\n' + '\n'.join(removed)
return formatted_diff
def generate_commit_message_gpt(diff):
"""Generate commit message using OpenAI's ChatGPT."""
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo')
if len(diff) == 0:
return 'default commit message'
tokens = tokenizer.encode(diff)
tokens = tokens[:15900]
diff = tokenizer.decode(tokens)
prompt = "Can you commit this diff for me:\n\n" + diff
response = client.chat.completions.create(messages=[
{'role': 'system', 'content': "You call the git commit function with short and informative commit messages"},
{'role': 'user', 'content': prompt},
],
functions=[commit_schema],
function_call={'name': 'git_commit'},
model='gpt-3.5-turbo-16k',
temperature=0.5)
args = json.loads(response.choices[0].message.function_call.arguments)
commit_message = args['commit_message']
return commit_message
def scommit():
"""Perform a git commit with a generated or provided message."""
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=str, help='Commit message')
parser.add_argument('-mi', action='store_true', help='Using mistral')
parser.add_argument('-globe-server', action='store_true', help='Using globe server')
args, unknown = parser.parse_known_args()
try:
# Check if there are any commits
subprocess.check_output(['git', 'rev-parse', '--verify', 'HEAD'], text=True).strip()
commits_exist = True
except subprocess.CalledProcessError:
commits_exist = False
if commits_exist and args.mi:
diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()
formatted_diff = format_diff(diff)
message = generate_commit_message_mistral(formatted_diff)
message = message.replace('"', '\\"')
elif commits_exist and args.globe_server:
diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()
formatted_diff = format_diff(diff)
message = generate_commit_message_globe_server(formatted_diff)
message = message.replace('"', '\\"')
elif args.m is None and commits_exist:
diff = subprocess.check_output(['git', 'diff', 'HEAD'] + unknown, text=True).strip()
formatted_diff = format_diff(diff)
message = generate_commit_message_gpt(formatted_diff)
else:
message = args.m if args.m is not None else 'Initial commit'
cmd = f'git commit {" ".join(unknown)} -m "{message}"'
os.system(cmd)
if __name__ == '__main__':
scommit() | kpister/prompt-linter | data/scraping/repos/Globe-Engineer~semantic-commit/scommit~scommit.py | scommit~scommit.py | py | 5,025 | python | en | code | 0 | github-code | 50 |
25249831556 | import cv2
import pandas as pd
import time
# Can take a video file as input or video stream from the webcam
cap = cv2.VideoCapture("C:/Users/harsh/Downloads/video (1080p).mp4")
#cap = cv2.VideoCapture(0)
index = ["color", "color_name", "hex", "R", "G", "B"]
csv = pd.read_csv("C:/Users/harsh/Downloads/colors.csv", names=index, header=None)
r = g = b = x_pos = y_pos = 0
# Function to get the Color name from the dataset for which the RGB value is the closest.
def get_color_name(R, G, B):
minimum = 10000
for i in range(len(csv)):
d = abs(R - int(csv.loc[i, "R"])) + abs(G - int(csv.loc[i, "G"])) + abs(B - int(csv.loc[i, "B"]))
if d <= minimum:
minimum = d
cname = csv.loc[i, "color_name"]
return cname
# Function to get x,y coordinates of mouse double click which will also give the RGB values
def draw_function(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
global b, g, r, x_pos, y_pos, clicked
clicked = True
x_pos = x
y_pos = y
b, g, r = frame[y, x]
b = int(b)
g = int(g)
r = int(r)
# Outer Loop To keep the Video Stream on
while True:
ret, frame = cap.read()
clicked = False
cv2.namedWindow('Video')
# draw_function will be called when the mouse event occured
cv2.setMouseCallback('Video', draw_function)
cv2.imshow('Video', frame)
key = cv2.waitKey(1)
# Inner Loop will be executed when key(p) is clicked which will pause the video stream to a single frame
# This loop is used for the main task which is Color detection
if cv2.waitKey(1) == ord("p"):
while True:
cv2.imshow('Video', frame)
# Display the color name once draw function is called and clicked is true
if clicked:
# cv2.rectangle(image, start point, endpoint, color, thickness)-1 fills entire rectangle
cv2.rectangle(frame, (20, 20), (750, 60), (b, g, r), -1)
# Creating text string to display( Color name and RGB values )
text = get_color_name(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + ' B=' + str(b)
# cv2.putText(img,text,start,font(0-7),fontScale,color,thickness,lineType )
cv2.putText(frame, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA)
# For very light colours we will display text in black colour
if r + g + b >= 600:
cv2.putText(frame, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
clicked = False
# Key to get out of the loops
# Key(p) to resume the video stream and Key(esc) to get out of both the loops and end the execution
key = cv2.waitKey(1)
if key == ord("p"):
break
if key == 27:
break
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| Harshil-Agrawal/RealTime_Color_Detection | Color_detection.py | Color_detection.py | py | 2,995 | python | en | code | 0 | github-code | 50 |
40559915413 | import random
# def rotto():
# num = [0, 0, 0, 0, 0, 0]
# for i in range(0, 6):
# num[i] = random.randint(1, 46)
# for j in num:
# if j == num[i]:
# i -= 1
# return num
# print(rotto())
lotto_number = []
def getRandomNumber():
number = random.randint(1, 45)
return number
while True:
if len(lotto_number) == 6:
break
random_number = getRandomNumber()
if random_number not in lotto_number:
lotto_number.append(random_number)
while True:
bonus_number = getRandomNumber()
if bonus_number not in lotto_number:
break
print(lotto_number, '+', bonus_number)
| Getver/StartCoding | 00_BasicLecture/09_로또번호.py | 09_로또번호.py | py | 700 | python | en | code | 0 | github-code | 50 |
21652314287 | import sklearn
from sklearn.linear_model import Perceptron
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris=load_iris()
df=pd.DataFrame(iris.data,columns=iris.feature_names)
df['label']=iris.target
df.columns = [
'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
]
data = np.array(df.iloc[:100, [0, 1, -1]])
#因为iloc[num_of_row_start : num_of_row_end, num_of_column_start : num_of_column_end]不包含num_of_end,所以需要 +1才能包含c行
X, y = data[:,:-1], data[:,-1]
y = np.array([1 if i == 1 else -1 for i in y])
#感知机
clf = Perceptron(fit_intercept=True,
max_iter=1000,
shuffle=True)
clf.fit(X, y)
print(clf.coef_)
print(clf.intercept_)
plt.figure(figsize=(10,10))
# 中文标题
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('鸢尾花线性数据示例')
plt.scatter(data[:50, 0], data[:50, 1], c='b', label='Iris-setosa',)
plt.scatter(data[50:100, 0], data[50:100, 1], c='orange', label='Iris-versicolor')
# 画感知机的线
x_ponits = np.arange(4, 8)
y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]
plt.plot(x_ponits, y_)
# 其他部分
plt.legend() # 显示图例
plt.grid(False) # 不显示网格
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.legend()
plt.show() | yishishizi/machinelearning | sk.py | sk.py | py | 1,453 | python | en | code | 0 | github-code | 50 |
3132557366 | import torch
from torch import nn, reshape
from torch import device as torch_device
class Simple(nn.Module):
"""
Simple model
use mlp to do denoise
"""
def __init__(self, samples, chunk_size, channels, device):
super().__init__()
self.chunk_size = chunk_size
self.channels = channels
self.linear = nn.Linear(self.chunk_size*self.channels, self.chunk_size*self.channels, bias=False, device = device)
def forward(self, state, _input):
"""
Parameters
----------
x : Tensor
Input tensor of shape (batch_size, samples, channels)
state : Tensor
Input tensor of shape (batch_size, hidden_dim, channels)
Returns
-------
Tensor
State tensor of shape (batch_size, hidden_dim, channels)
Tensor
Output tensor of shape (batch_size, samples, channels)
"""
if len(_input.shape)==3:
batch_size = _input.shape[0]
else:
batch_size = 1
shape_saved = _input.shape
std = torch.std(_input)
_input = _input/std
_res = reshape(_input, (batch_size,-1))
_res = self.linear(_res)
_res = reshape(_res, shape_saved)
return None, (_input+_res)*std
@property
def is_recursive(self):
return True
| zhouxinyu0723/audio-denoise-addon-v2 | ZENNet/model/simple.py | simple.py | py | 1,156 | python | en | code | 1 | github-code | 50 |
7408358101 | #import tool
import sys
inputfile_1=sys.argv[1]
inputfile_2=sys.argv[2]
#create dictionary
def list2dict(s):
d={}
for i in s:
if i in d.keys():
d[i]=d[i]+1
else:
d[i]=1
return d
#define a function to match key and value between 2 files
def cmplist (s1,s2):
d1=list2dict(s1)
d2=list2dict(s2)
d1_keys=set(d1.keys())
d2_keys=set(d2.keys())
intersect_keys=d1_keys.intersection(d2_keys)
added={}
for i in (d2_keys-d1_keys):
added[i]=d2[i]
removed={}
for i in (d1_keys-d2_keys):
removed[i]=d1[i]
modified={}
for i in intersect_keys:
if d1[i]!=d2[i]:
modified[i]=d1[i],d2[i]
same={}
for i in intersect_keys:
if d1[i]==d2[i]:
same[i]=d1[i]
return added, removed, modified,same
with open(inputfile_1,"r") as f1:
result=f1.read().split('\n')
print(result[0])
f1list=[]
for line in result:
f1list.append(line)
with open(inputfile_2,"r") as f2:
result2=f2.read().split('\n')
f2list=[]
for line in result2:
f2list.append(line)
added_out, removed_out, modified_out, same_out=cmplist(f2list,f1list)
#print out the results on screen
print("Number of observations in spec but not in output ----------")
print(len(added_out))
print("Number of observations in both docs--------")
print(len(same_out))
print("Number of observations in the output but not in spec---------")
print(len(removed_out))
print("Number of observations were modified in the output---------")
print(len(modified_out))
print("Below is the records in spec but not in the output-----")
print(added_out)
print("Below is the records in the output but not in spec------")
print(removed_out)
| Becky2012/Large-file-discrepancy-checks | check.py | check.py | py | 1,755 | python | en | code | 0 | github-code | 50 |
16325613872 | #!/usr/bin/env python
# coding: utf-8
# ECON 280A
#
# PS 1
#
# By Yi-Fan, Lin
# In[1]:
import pandas as pd
import numpy as np
from scipy.optimize import fsolve
from sympy import symbols, Eq, solve, nsolve
import matplotlib.pyplot as plt
# In[2]:
df = pd.read_excel("/Users/ricky/Documents/椰林大學/Berkeley/International Econ/Data for PS 1.xls"
, sheet_name="trade flows mfg", header=0, nrows=20, usecols="B:T")
df = df[1:]
#exports from i to j
# In[3]:
df.head(5)
# In[4]:
#name of countries
print(df.columns)
# In[149]:
theta = 5 #the median theta as in slides
nc = 19 #number of countries
t_cost = np.ones([nc, nc]) #change in trade costs
prod = np.ones(nc) #change in productivities
labor = np.ones(nc) #change in labors
total_prod = df.sum(axis=1) #total production, Y_n
total_cons = df.sum(axis=0) #total consumption
deficit = [total_cons.iloc[i] - total_prod.iloc[i] for i in range(nc)]
df_share = df.divide(total_cons.iloc[0], axis=0) #pi_n
def_share = deficit/np.array(total_prod)
# In[190]:
def gen_denom(ncoun, w, theta, t_cost):
#ncoun for the index of country
denom = 0
for k in range(nc):
temp = df_share.iloc[ncoun, k]*(w[k]*t_cost[ncoun, k])**(-theta)
denom = denom + temp
return denom
def obj(w_vec, theta, t_cost):
eq_vec = [0 for i in range(nc)]
w_vec = w_vec
for i in range(nc):
rhs = 0
for n in range(nc):
denom = gen_denom(n, w_vec, theta, t_cost)
rhs = rhs + df_share.iloc[n, i]*(w_vec[i]*t_cost[n, i])**(-theta)*(w_vec[n]*total_cons.iloc[n])/denom
eq_vec[i] = w_vec[i]*total_cons.iloc[i] - rhs
return eq_vec
# In[151]:
def welfare(ncoun, w, theta, t_cost):
c_share = (w[ncoun]*t_cost[ncoun, ncoun])**(-theta)/gen_denom(ncoun, w, theta, t_cost)
return c_share**(-1/theta)
def output(theta, t_cost):
wage = fsolve(obj, np.ones(nc), (theta, t_cost))
real_wage = np.array([welfare(n, wage, theta, t_cost) for n in range(nc)])
price = wage/real_wage
return [wage, real_wage, price]
# In[191]:
base = output(theta, t_cost)
# In[192]:
for i in range(nc):
print(def_share[i]*100, (base[0])[i], (base[1])[i], (base[2])[i])
# In[154]:
t_cost_dec = t_cost*(1/1.3)
for i in range(nc):
t_cost_dec[i, i] = 1 #except for own
# In[155]:
#tariff cut
tarcut = output(theta, t_cost_dec)
# In[173]:
for i in range(nc):
print((df.columns)[i], def_share[i]*100, tarcut[0][i], tarcut[1][i], tarcut[2][i])
# In[159]:
#us-canada FTA
#canada: 3
#us: 18
t_cost_FTA = t_cost
t_cost_FTA[3, 18] = t_cost_FTA[3, 18]*(1/1.3)
t_cost_FTA[18, 3] = t_cost_FTA[18, 3]*(1/1.3)
# In[160]:
tarFTA = output(theta, t_cost_FTA)
# In[174]:
for i in range(nc):
print((df.columns)[i], def_share[i]*100, tarFTA[0][i], tarFTA[1][i], tarFTA[2][i])
# In[162]:
plt.rcParams['figure.figsize'] = [10, 6]
plt.rcParams['figure.dpi'] = 100
# In[179]:
fig = plt.figure()
fig, ax = plt.subplots()
ax.scatter(def_share, base[0], c='blue', label='Relative')
ax.scatter(def_share, base[1], c='green', label='Real')
ax.legend()
plt.ylabel('Change in Wage')
plt.xlabel('Trade deficit')
plt.title('Baseline (graph 1.)')
plt.show()
# In[180]:
fig = plt.figure()
fig, ax = plt.subplots()
ax.scatter(def_share, tarcut[0], c='blue', label='Relative')
ax.scatter(def_share, tarcut[1], c='green', label='Real')
ax.legend()
plt.ylabel('Change in Wage')
plt.xlabel('Trade deficit')
plt.title('Overall Tariff Cut (graph 2.)')
plt.show()
# In[181]:
fig = plt.figure()
fig, ax = plt.subplots()
ax.scatter(def_share, tarFTA[0], c='blue', label='Relative')
ax.scatter(def_share, tarFTA[1], c='green', label='Real')
plt.text(def_share[3], tarFTA[0][3]+0.01, 'CAN')
plt.text(def_share[18], tarFTA[0][18]+0.01, 'USA')
ax.legend()
plt.ylabel('Change in Wage')
plt.xlabel('Trade deficit')
plt.title('US-Canada FTA (graph 3.)')
plt.show()
# In[182]:
fig = plt.figure()
fig, ax = plt.subplots()
ax.scatter(def_share, base[1], c='blue', label='Base')
ax.scatter(def_share, tarcut[1], c='green', label='Tariff cut')
ax.scatter(def_share, tarFTA[1], c='brown', label='FTA')
plt.text(def_share[3], tarFTA[1][3]+0.01, 'CAN')
plt.text(def_share[18], tarFTA[1][18]+0.01, 'USA')
ax.legend()
plt.ylabel('Change in Real Wage')
plt.xlabel('Trade deficit')
plt.title('Comparison (graph 4.)')
plt.show()
# In[183]:
table1 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[1], 'Tariff Cut': tarcut[1], 'FTA': tarFTA[1]})
table1.index = df.columns
print("Change in Real wage (Table 1)")
print(table1)
# In[184]:
table2 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[0], 'Tariff Cut': tarcut[0], 'FTA': tarFTA[0]})
table2.index = df.columns
print("Change in Relative wage (Table 2)")
print(table2)
# In[185]:
table3 = pd.DataFrame({'Deficit': def_share*100, 'Baseline': base[2], 'Tariff Cut': tarcut[2], 'FTA': tarFTA[2]})
table3.index = df.columns
print("Change in Price index (Table 3)")
print(table3)
# In[193]:
fig = plt.figure()
fig, ax = plt.subplots()
ax.scatter(def_share, base[2], c='blue', label='Base')
ax.scatter(def_share, tarcut[2], c='green', label='Tariff cut')
ax.scatter(def_share, tarFTA[2], c='brown', label='FTA')
plt.text(def_share[3], tarFTA[2][3]+0.01, 'CAN')
plt.text(def_share[18], tarFTA[2][18]+0.01, 'USA')
ax.legend()
plt.ylabel('Change in Price index')
plt.xlabel('Trade deficit')
plt.title('Comparison (graph 5.)')
plt.show()
# In[ ]:
| Yifan3018/Armington-model-in-international-trade | PS1.py | PS1.py | py | 5,541 | python | en | code | 1 | github-code | 50 |
15892916608 | from collections import defaultdict
def solution(dirs):
d = defaultdict(list)
cur_x = 0
cur_y = 0
x = [0, 0, 1, -1]
y = [1, -1, 0, 0]
cnt = 0
for e in dirs:
to_x = cur_x
to_y = cur_y
if e == 'U':
to_x += x[0]
to_y += y[0]
elif e == 'D':
to_x += x[1]
to_y += y[1]
elif e == 'R':
to_x += x[2]
to_y += y[2]
elif e == 'L':
to_x += x[3]
to_y += y[3]
if -5 <= to_x <= 5 and -5 <= to_y <= 5:
flag = False
if d[(cur_x, cur_y)]:
for pos in d[(cur_x, cur_y)]:
dx, dy = pos
if to_x == dx and to_y == dy:
flag = True
break
if d[(to_x, to_y)]:
for to_pos in d[(to_x, to_y)]:
to_dx, to_dy = to_pos
if cur_x == to_dx and cur_y == to_dy:
flag = True
break
d[(cur_x, cur_y)].append((to_x, to_y))
cur_x = to_x
cur_y = to_y
if flag is False:
cnt += 1
return cnt
print(solution("ULURRDLLU"))
print(solution("LULLLLLLU"))
print(solution("LLLLRRRRRRRRRRLLLLUUUUUUUUULLLLLLL"))
print(solution("ULURRDLLUL"))
print(solution("LLLLLLL"))
print(solution("LLLLLLLDRU"))
print(solution("LLLLLLLDRUD"))
print(solution("URULDD"))
# 설명
# 딕셔너리를 사용해서 두 가지 경우의 수를 확인해서 풀었습니다.
# 현재 위치에서 다음 좌표를 방문했을 경우와, 방문할 위치에서 현재 위치를 이미 방문했는지 확인했습니다.
# 이 두 가지의 경우는 해당 경로를 이미 지나쳤기 때문에 처음 걸어본 경로가 아닙니다. | hyunsoolee991/cs | algorithm/programmers/방문 길이.py | 방문 길이.py | py | 1,866 | python | ko | code | 0 | github-code | 50 |
71346745115 | from os import path
from mediakit.utils.files import increment_filename_if_exists
from mediakit.utils.commands import run_command_in_background
from mediakit.constants import FFMPEG_BINARY
VIDEO_FORMATS = {"mp4"}
class ConversionOptions:
NO_AUDIO = "-an"
def merge_video_and_audio(
video_path, audio_path, output_file_path, output_format="mp4"
):
final_output_file_path = increment_filename_if_exists(output_file_path)
command = (
f'{FFMPEG_BINARY} -i "{video_path}" -i "{audio_path}" '
f"-vcodec copy -f {output_format} "
f'"{final_output_file_path}"'
)
run_command_in_background(command)
def convert_media(file_path, output_file_path, output_format, options=[]):
final_output_file_path = increment_filename_if_exists(output_file_path)
command = (
f'{FFMPEG_BINARY} -i "{file_path}" '
+ ("-vcodec copy " if output_format in VIDEO_FORMATS else "")
+ f"-f {output_format} "
+ f'"{final_output_file_path}" '
+ " ".join(options)
)
run_command_in_background(command)
| diego-aquino/mediakit | mediakit/media/convert.py | convert.py | py | 1,080 | python | en | code | 11 | github-code | 50 |
36424255650 | #http://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html#sphx-glr-auto-examples-model-selection-plot-grid-search-digits-py
from __future__ import print_function
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
import data_service
print(__doc__)
scale_data = True
transform_data = True
random_slice = 10000
random_seed = 777
dataset="kdd"
test_size=0.5
x_train, x_test, y_train, y_test = data_service.load_and_split_data(scale_data=scale_data,
transform_data=transform_data,
random_slice=random_slice, random_seed=random_seed,
dataset=dataset, test_size=test_size)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.001, 0.0001],
'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000], 'class_weight': ['balanced', None]}]
# neural_network.MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, solver=solver,
# activation=activation, alpha=alpha, random_state=1, max_iter=max_iter, learning_rate=learning_rate,
# learning_rate_init=learning_rate_init)
tuned_parameters = [{
'solver': ['lbfgs'],
'learning_rate_init': [0.0001, 0.01, 1],
'hidden_layer_sizes': [(100,)],
'activation': ['relu'],
'alpha': [0.0001, 0.01, 1]
},
{
'solver': ['sgd'],
'learning_rate': ['constant', 'invscaling', 'adaptive'],
'learning_rate_init': [0.0001, 0.01, 1],
'hidden_layer_sizes': [(100,)],
'activation': ['relu'],
'alpha': [0.0001, 0.01, 1]
}
]
estimator = SVC();
estimator = MLPClassifier()
scores = ['precision_macro', 'recall_macro', 'accuracy']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(estimator, tuned_parameters, cv=5,
scoring=score, n_jobs=-1)
clf.fit(x_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_pred = clf.predict(x_test)
print(classification_report(y_test, y_pred))
print() | boyko11/ML1-SupervisedLearning | grid_search.py | grid_search.py | py | 3,243 | python | en | code | 0 | github-code | 50 |
36046350963 | import os
class CustomValidator:
@staticmethod
def path_validate(path: str) -> str:
"""
try:
path = validate_path(" my /path /with spaces ")
print(f"The path {path} is valid.")
except FileNotFoundError as e:
print(e)
:param path:
:return:
"""
# Remove spaces from the path
path = path.replace(" ", "")
# Check if the path exists
if not os.path.exists(path):
raise FileNotFoundError(f"The path {path} does not exist.")
return path
| jerome-neo/Command-line-Data-Processor | validator/custom_validator.py | custom_validator.py | py | 579 | python | en | code | 0 | github-code | 50 |
3912202681 | from rk_diagram.models import RKPipeline, LocalizationAlgorithm, TransformNode
from rk_diagram.visualize import RKModelVisualizer
from rk_diagram.models.graph import EdgeType, Edge
import numpy as np
class HierarchicalFeatureExtractor1():
'''
Generates a heirarchical feature extractor
TODO: Think about 2+ levels
'''
def __init__(self):
self.children = {}
def predict(self, X):
self.children['range_measure'] = range_measure(X)
self.children['max_measure'] = max_measure(X)
def range_measure(self, X): # computes the range as a feature
return np.max(X) - np.min(X)
def max_measure(self, X): # computes the max as a measure
return np.max(X)
class SimpleLinkage():
'''
Simple linkage:
A simple linkage function.
Compares the values of two nodes, draws a link if the euclidean distancd is
less than the threshold.
Sends back a list of edges
'''
def __init__(self, threshold):
self.threshold = 5
def link(self, nodes):
edges = []
for n, i in enumerate(nodes):
l = n+1
while l < len(nodes):
if np.linalg.norm(nodes[i].value - nodes[l].values) < self.threshold:
l_larger = nodes[i].value > nodes[l].value
fid = nodes[l].from_id if l_larger else nodes[i].from_id
tid = nodes[i].from_id if l_larger else nodes[j].from_id
etype = EdgeType.DIRECTED
if nodes[i].value == nodes[l].value:
etype = EdgeType.UNDIRECTED
edges.append(Edge(from_id=fid, to_id=tid, type=etype))
l+=1
class MaxLocalizer(LocalizationAlgorithm):
'''
localizes the max position
'''
def localize(self, X):
return np.argmax(X) # returns the max position of X
class MinMaxNormalizerNode(TransformNode):
'''
min max normalizer
takes the max and min as a transform node
and will normalize the data
'''
def __init__(self):
self._fitted = False
def fit(self, X):
self._fitted = True
self.min = np.min(X)
self.max = np.max(X)
def transform(self, X):
return (X - self.min) / (self.max - self.min)
class StaticFilter():
'''
This static filter takes simple boundary conditions,
a min and max, and provides a filter function over it
'''
def __init__(self, min=None, max=None):
self._min = min
self._max = max
def filter(self, val):
if self._min is not None and val < self._min:
return False
if self._max is not None and val > self._max:
return False
return True
def main(X):
rk_models = []
example_pipeline = RKPipeline(preprocess_nodes=[MinMaxNormalizerNode()],
localization_algorithm=MaxLocalizer(),
hierarchical_embedding_nodes= [
{
"HFeatureExtractor1": HierarchicalFeatureExtractor1()
}
],
filter_functions=[
{
"HFeatureExtractor1" :
{
'range_measure': StaticFilter(min=.2, max=.8),
'max_measure': StaticFilter(min=0, max=1)
}
}
], # question: how to define which limits for which measure. Each filter and linkage has to be BY CLUSTER
linkage_function=SimpleLinkage(threshold=.8))
example_pipeline.build()
example_pipeline.fit(X)
rk_model = example_pipeline.transform(X)
rk_models.append(rk_model)
visualizer = RKModelVisualizer(method="circular")
visualizer.build(rk_models) # build requires a list of rk_models
visualizer.show()
def parse_arguments():
X = [1,2,3,4]
main()
| andorsk/rk_toolkit | example/example.py | example.py | py | 4,225 | python | en | code | 2 | github-code | 50 |
637859362 | from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.wait import WebDriverWait
from crud import crud
import sys
sys.path.insert(1, './model')
from algoritmo import algoritmoModel
class scraping:
__obj_crud = None
__obj_model = None
def __init__(self):
self.__obj_crud = crud()
self.__obj_model = algoritmoModel()
def switch(self, case, campeonato):
if case == 'times':
if campeonato == 'premier':
return self.timesPremier()
else:
print("campeonato invalido")
elif case == 'rodadas':
if campeonato == 'premier':
return self.rodadasPremier()
else:
print("campeonato invalido")
else:
print("metódo invalido")
def timesPremier(self):
servico = Service(ChromeDriverManager().install())
url = "https://www.sofascore.com/tournament/football/england/premier-league/17#52186"
navegador = webdriver.Chrome(service=servico)
WebDriverWait(navegador, timeout=10)
navegador.get(url)
click = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[3]')
click.click()
# TEMPO DE ESPERA PARA DISPARA AÇÃO
navegador.implicitly_wait(20)
dados = navegador.find_elements(By.CLASS_NAME, 'uTWnT')
if(dados):
time = []
temporada = '2023/2024'
for dado in dados:
if dado.text and dado.text != '0':
time.append(dado.text)
else:
print("Sem dados")
for t1 in time:
print(t1)
# self.__obj_crud.inserir(t1,temporada)
def rodadasPremier(self):
servico = Service(ChromeDriverManager().install())
url = "https://www.sofascore.com/tournament/football/england/premier-league/17#52186"
navegador = webdriver.Chrome(service=servico)
WebDriverWait(navegador, timeout=10)
navegador.get(url)
click = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[1]/div/div[1]/div/h2[2]')
click.click()
# TEMPO DE ESPERA PARA DISPARA AÇÃO
navegador.implicitly_wait(20)
click2 = navegador.find_element(By.XPATH, '//*[@id="__next"]/main/div/div[2]/div[2]/div[2]/div/div/div[2]/div[2]')
click2.click()
# TEMPO DE ESPERA PARA DISPARA AÇÃO
navegador.implicitly_wait(20)
click3 = navegador.find_element(By.XPATH, '//*[@id="downshift-11-toggle-button"]')
click3.click()
# TEMPO DE ESPERA PARA DISPARA AÇÃO
navegador.implicitly_wait(20)
click4 = navegador.find_element(By.XPATH, '//*[@id="downshift-11-item-0"]')
click4.click()
lista = {}
dados = navegador.find_elements(By.CLASS_NAME, 'bwUmPO')
contador = 0
for dado in dados:
contador += 1
div = contador % 2
if div == 1:
lista[contador] = {'rodada':'1'}
lista[contador]['campeonato'] = '1'
time = self.__obj_model.getTimes(dado.text)
if time:
lista[contador]['mandante'] = time[0]
else:
lista[contador]['mandante'] = '?'
else:
cont = contador - 1
time = self.__obj_model.getTimes(dado.text)
if time:
lista[cont]['visitante'] = time[0]
else:
lista[cont]['visitante'] = '?'
self.__obj_crud.setTable('rodadas')
self.__obj_model.setTable('rodadas')
if len(lista) > 0:
for list in lista:
self.__obj_crud.newInsert(lista[list])
scrap = scraping()
scrap.switch('rodadas','premier')
| CaioFreitas96/scraping | scraping.py | scraping.py | py | 4,411 | python | pt | code | 0 | github-code | 50 |
29053233199 | x=10
y=2
print(x//y)
x=10
y=3
print(x//y)
x=10
y=8.5
print(x//y)
# algorithm
# 10,1,8,3,6,5,4,7,x,y
# Find the general solution of x and y
# x-> 2 y->9
# Step1: Start
# Step2: Initialise a variable named n
# Step3:x=n+1
# Step4:a=x+2
# Step5:b=x-2
# Step6: if x%2=0, then x+a
# Step7: if x%2!=0, then x+b
# Step8: Stop
| RiyaBaid/Python | floordivision.py | floordivision.py | py | 346 | python | en | code | 0 | github-code | 50 |
35880060544 | # O nome e a posição das colunas dos dados históricos e das estações são diferentes!
# Esse dicionário vai nos auxiliar para pegar um determinado dado nas duas tabelas.
# lista[0] -> Colunas como estão nos dados históricos.
# lista[1] -> Colunas como estão nos dados das estações (website).
d_dic = {
"Data": ['DATA (YYYY-MM-DD)', 'DT_MEDICAO'],
"Hora": ['HORA (UTC)', 'HR_MEDICAO'],
"Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],
"Radiacao": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'],
"Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],
"Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']
}
d_dic_2019 = {
"Data": ['Data', 'DT_MEDICAO'],
"Hora": ['Hora UTC', 'HR_MEDICAO'],
"Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],
"Radiacao": ['RADIACAO GLOBAL (KJ/m²)', 'RAD_GLO'],
"Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],
"Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']
}
# Alguém de lá teve a brilhante ideia de modificar o nome das colunas e a formatação dos
# dados a partir de 2019.
d_dic_2020_greater = {
"Data": ['Data', 'DT_MEDICAO'],
"Hora": ['Hora UTC', 'HR_MEDICAO'],
"Pressao": ['PRESSAO ATMOSFERICA AO NIVEL DA ESTACAO, HORARIA (mB)', 'PRE_INS'],
"Radiacao": ['RADIACAO GLOBAL (Kj/m²)', 'RAD_GLO'],
"Temperatura": ['TEMPERATURA DO AR - BULBO SECO, HORARIA (°C)', 'TEM_INS'],
"Umidade": ['UMIDADE RELATIVA DO AR, HORARIA (%)', 'UMD_INS']
}
# Para download feitos através de web scraping no site do INMET.
d_dic_inmet = {
"Data": ['Data', 'DT_MEDICAO'],
"Hora": ['Hora (UTC)', 'HR_MEDICAO'],
"Pressao": ['Pressao Ins. (hPa)', 'PRE_INS'],
"Radiacao": ['Radiacao (KJ/m²)', 'RAD_GLO'],
"Temperatura": ['Temp. Ins. (C)', 'TEM_INS'],
"Umidade": ['Umi. Ins. (%)', 'UMD_INS']
}
class ID:
MENU_SCROLL = 1
LISTBOX = 2
POPUP_CONCAT = 2002
POPUP_UPDATE = 2003
POPUP_CLEAN = 2004
POPUP_DELETE = 2005
POPUP_SAVE = 2006 | NeoFahrenheit/inmet-scraper | id.py | id.py | py | 2,115 | python | pt | code | 0 | github-code | 50 |
39518772432 | from random import random
import requests
from flask import Flask, request
from conf import (
get_healthy_server,
healthcheck,
load_configuration,
process_firewall_rules_flag,
process_rules,
process_rewrite_rules,
transform_backends_from_config,
)
loadbalancer = Flask(__name__)
MAIL_BACKENDS = ['localhost:8081', 'localhost:8082']
YANDEX_BACKENDS = ['localhost:9081', 'localhost:9082']
config = load_configuration('balancer.yaml')
register = transform_backends_from_config(config)
@loadbalancer.route('/')
def router():
host_header = request.headers['Host']
if host_header == 'www.mail.ru':
response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}')
return response.content, response.status_code
elif host_header == 'www.yandex.ru':
response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}')
return response.content, response.status_code
else:
return 'Not Found', 404
@loadbalancer.route('/mail')
def mmail_path():
response = requests.get(f'http://{random.choice(MAIL_BACKENDS)}')
return response.content, response.status_code
@loadbalancer.route('/yandex')
def yandex_path():
response = requests.get(f'http://{random.choice(YANDEX_BACKENDS)}')
return response.content, response.status_code | leader8901/testServer | balancer.py | balancer.py | py | 1,317 | python | en | code | 0 | github-code | 50 |
1282240873 | import os
import numpy as np
import random
from gym.envs.mujoco.pusher import PusherEnv
from evaluation.eval import Eval
from data import utils
XML_FOLDER = "/media/stephen/c6c2821e-ed17-493a-b35b-4b66f0b21ee7/MIL/gym/gym/envs/mujoco/assets"
class EvalMilPush(Eval):
def _load_env(self, xml):
xml = xml[xml.rfind('pusher'):]
xml_file = 'sim_push_xmls/test2_ensure_woodtable_distractor_%s' % xml
xml_file = os.path.join(XML_FOLDER, xml_file)
env = PusherEnv(**{'xml_file': xml_file, 'distractors': True})
env.set_visibility(self.render)
env.render()
viewer = env.viewer
viewer.autoscale()
viewer.cam.trackbodyid = -1
viewer.cam.lookat[0] = 0.4
viewer.cam.lookat[1] = -0.1
viewer.cam.lookat[2] = 0.0
viewer.cam.distance = 0.75
viewer.cam.elevation = -50
viewer.cam.azimuth = -90
return env
def _eval_success(self, obs):
obs = np.array(obs)
target = obs[:, -3:-1]
obj = obs[:, -6:-4]
dists = np.sum((target - obj) ** 2, 1) # distances at each time step
return np.sum(dists < 0.017) >= 10
def evaluate(self, iter):
print("Evaluating at iteration: %i" % iter)
iter_dir = os.path.join(self.record_gifs_dir, 'iter_%i' % iter)
utils.create_dir(iter_dir)
successes = []
for i in range(self.num_tasks):
# demo_selection will be an xml file
env = self._load_env(self.demos[i][0]['demo_selection'])
selected_demo_indexs = random.sample(
range(len(self.demos[i])), self.supports)
embedding = self.get_embedding(i, selected_demo_indexs)
gifs_dir = self.create_gif_dir(iter_dir, i)
for j in range(self.num_trials):
env.reset()
observations = []
world_state = []
for t in range(self.time_horizon):
env.render()
# Observation is shape (100,100,3)
obs, state = env.get_current_image_obs()
observations.append(obs)
obs = ((obs / 255.0) * 2.) - 1.
action = self.get_action(obs, state, embedding)
ob, reward, done, reward_dict = env.step(np.squeeze(action))
world_state.append(np.squeeze(ob))
if done:
break
if self._eval_success(world_state):
successes.append(1.)
else:
successes.append(0.)
self.save_gifs(observations, gifs_dir, j)
env.render(close=True)
final_suc = np.mean(successes)
print("Final success rate is %.5f" % (final_suc))
return final_suc
| stepjam/TecNets | evaluation/eval_mil_push.py | eval_mil_push.py | py | 2,852 | python | en | code | 40 | github-code | 50 |
17566333407 | n=int(input())
k=n
l=(n*(n+1))//2
num=0
if(l)%2==0:
l=l//2
ls=[i for i in range(1,n+1)]
ls1=[]
while(num!=l):
if(l-num)<n:
ls1.append(l-num)
break
else:
num+=n
n-=1
ls1.append(n+1)
print("YES")
print(len(ls1))
print(*ls1)
print(k-len(ls1))
s2=set(ls)-set(ls1)
ls2=list(s2)
ls2.sort(reverse=True)
print(*ls2)
else:
print("NO") | SaranSaiChava/Problem_Solving | CSES/twosets.py | twosets.py | py | 382 | python | en | code | 0 | github-code | 50 |
13917300027 | import pickle
import os
import sys
import ast
from header import Driver
import struct
import subprocess
import re
from pprint import pprint
import pandas as pd
from collections import defaultdict
# set working directory
WD = os.path.dirname(os.path.abspath(__file__))
os.chdir(WD)
d_p = "../../AutoRNP/experiments/testing_results/DEMC/"
def convert(filename):
xsl_file = pd.read_excel(d_p+filename, usecols="C")
l_val = xsl_file['input'].tolist()
uni_val = set()
for val in l_val:
uni_val.add(round(val, 1))
return list(uni_val)
def autoRNP():
directory = os.fsencode(d_p)
all_functions = defaultdict()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".xls"):
all_functions[filename[:-4]+'_DRIVER'] = convert(filename)
else:
continue
return all_functions
def gslGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS):
all_functions = autoRNP()
with open("spFunDrivers/" + libraryName + "_drivers.c", 'w') as f:
f.write("#include <gsl/gsl_sf.h>\n")
f.write("#include <gsl/gsl_errno.h>\n")
f.write("#include <stdio.h>\n\n")
f.write("void my_handler(const char * reason, const char * file, int line, int gsl_errno)\n")
f.write("{\n")
f.write("\tfprintf(stderr, \"%s\\n\", reason);\n")
f.write("}\n\n")
for funcName, args in signatures.items():
driverName = "{}_DRIVER".format(funcName)
thisDriver = Driver(driverName, funcName, libraryName, "c", len(args[0]), funcName)
thisDriver.add_line("double {}(double * doubleInput, int * intInput)\n".format(driverName))
thisDriver.add_line("{\n")
thisDriver.add_line("\tdouble out;\n")
# tally up number of ints and doubles
numberOfInts = 0
numberOfDoubles = 0
for i in range(len(args[0])):
if "int" in args[0][i]:
numberOfInts += 1
elif "double" in args[0][i]:
numberOfDoubles += 1
thisDriver.set_numberOfDoubles(numberOfDoubles)
thisDriver.set_numberOfInts(numberOfInts)
# for each extracted function, save all of its test arguments for test migration
k = 1
for j in range(1, len(args)):
ints = []
doubles = []
for i in range(len(args[0])):
if "int" in args[0][i]:
ints.append(int(args[j][i]))
elif "double" in args[0][i]:
doubles.append(float(args[j][i]))
TEST_INPUTS["{}~input_num{:0>3}".format(driverName, j-1)] = (doubles, ints)
k += 1
if driverName in all_functions.keys():
for m in range(k, len(all_functions[driverName])):
TEST_INPUTS["{}~input_num{:0>3}".format(driverName, m-1)] = ([all_functions[driverName][m-k]], [])
thisDriver.add_line("\tgsl_error_handler_t * old_handler = gsl_set_error_handler (&my_handler);\n\n")
thisDriver.add_line("\tout = {}(".format(funcName))
for i in range(numberOfInts):
thisDriver.add_line('intInput[{}]'.format(i))
if i + 1 != numberOfInts or numberOfDoubles != 0:
thisDriver.add_line(", ")
for i in range(numberOfDoubles):
thisDriver.add_line('doubleInput[{}]'.format(i))
if i + 1 != numberOfDoubles:
thisDriver.add_line(", ")
if numberOfDoubles + numberOfInts < len(args[0]):
thisDriver.add_line(", GSL_PREC_DOUBLE")
thisDriver.add_line(');\n\n')
#thisDriver.add_line("\tgsl_set_error_handler (old_handler);\n\n")
thisDriver.add_line("\treturn out;\n")
thisDriver.add_line("}} //END_DRIVER {}\n\n".format(funcName))
f.write(thisDriver.get_driverText())
DRIVER_LIST[thisDriver.get_driverName()] = thisDriver
def pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS):
with open("spFunDrivers/" + libraryName + "_drivers.py", 'w') as f:
# write all imports
for x in imports:
if len(x) == 1:
f.write("import {}\n".format(x[0]))
if len(x) == 2:
f.write("import {} as {}\n".format(x[0], x[1]))
for x in fromImports:
f.write("from {} import {}\n".format(x[0], x[1]))
# for each collected function signature
for funcName, args in signatures.items():
# for a varying number of integers...
for numberOfInts in range(len(args[0])):
# get the number of doubles
numberOfDoubles = len(args[0]) - numberOfInts
# form driverName
driverName = "{}_{}_DRIVER{}".format(libraryName, funcName.replace('.', '_'), numberOfInts )
# form unique funcName without "_alt" and namespace info
temp = funcName
callName = funcName
if '_alt' in temp:
temp = temp[:temp.index("_alt")]
callName = temp
if '.' in temp:
temp = temp[temp.index(".") + 1:]
# construct driver object
thisDriver = Driver(driverName, temp, libraryName, "python", len(args[0]), callName)
thisDriver.add_line("def {}(doubleInput, intInput):\n".format(driverName))
# for each extracted function, save all of its test arguments for test migration
for j in range(len(args)):
ints = []
doubles = []
for k in range(numberOfInts):
ints.append(int(args[j][k]))
for k in range(len(args[0]) - numberOfInts):
doubles.append(float(args[j][k]))
TEST_INPUTS["{}~inputs_num{:0>3}".format(driverName,j)] = (doubles, ints)
thisDriver.set_numberOfDoubles(numberOfDoubles)
thisDriver.set_numberOfInts(numberOfInts)
if "_alt" in funcName:
thisDriver.add_line("\tout = {}(".format(funcName[:funcName.find("_alt")]))
else:
thisDriver.add_line("\tout = {}(".format(funcName))
for i in range(numberOfInts):
thisDriver.add_line("intInput[{}]".format(i))
if i + 1 != numberOfInts or numberOfDoubles != 0:
thisDriver.add_line(", ")
for i in range(numberOfDoubles):
thisDriver.add_line("doubleInput[{}]".format(i))
if i + 1 != numberOfDoubles:
thisDriver.add_line(", ")
thisDriver.add_line(")\n\n")
thisDriver.add_line("\treturn float(out) #END_DRIVER {}\n\n".format(funcName))
f.write(thisDriver.get_driverText())
DRIVER_LIST[thisDriver.get_driverName()] = thisDriver
if __name__ == "__main__":
# python3 driverGenerator mpmath python
libraryName = sys.argv[1]
language = sys.argv[2]
try:
with open("__temp/__driverCollection", 'rb') as fp:
DRIVER_LIST = pickle.load(fp)
except:
DRIVER_LIST = {}
try:
with open("__temp/__testInputs", 'rb') as fp:
TEST_INPUTS = pickle.load(fp)
except:
TEST_INPUTS = {}
# load information from signature extractor
with open("__temp/__" + libraryName + "_signatures", 'rb') as fp:
signatures = pickle.load(fp)
with open("__temp/__" + libraryName + "_imports", 'rb') as fp:
imports = pickle.load(fp)
with open("__temp/__" + libraryName + "_fromImports", 'rb') as fp:
fromImports = pickle.load(fp)
if language == 'c':
gslGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports, TEST_INPUTS)
subprocess.call(['make'], cwd="spFunDrivers/")
if language == 'python':
pythonGenerator(libraryName, DRIVER_LIST, signatures, imports, fromImports,TEST_INPUTS)
with open("__temp/__testInputs", 'wb') as fp:
pickle.dump(TEST_INPUTS, fp)
with open("__temp/__driverCollection", 'wb') as fp:
pickle.dump(DRIVER_LIST, fp) | Sherryhh/fpdiff_extend | fp-diff-testing/workspace/driverGenerator.py | driverGenerator.py | py | 8,600 | python | en | code | 1 | github-code | 50 |
10070441282 | import datetime
import os
import sys
from importlib import reload
from antlr4 import *
from CnfUtility import CnfUtility
from CnfVcGenerator import CnfVcGenerator
from MyCFG import MyCFG
from MyHelper import MyHelper
from MyUtility import MyUtility
from MyVisitor import MyVisitor
from PreProcessor import PreProcessor
from WpcStringConverter import WpcStringConverter
from gen.MySsaStringGenerator import MySsaStringGenerator
from gen.PlSqlLexer import PlSqlLexer
from gen.PlSqlParser import PlSqlParser
from MyRawCfgToGraph import MyRawCfgToGraph
def executeSinglePlSqlFile(data, spec):
f = open(data, 'r')
linesOfCode = len(f.readlines())
f.close()
processor = PreProcessor(spec, data)
tableInfo, assumeConstraint, assertConstraint, resultString = processor.start()
file = open('cnf/upper_input.sql', "w")
file.write(resultString)
file.close()
# recording startTime
startTime = datetime.datetime.now()
input = FileStream('cnf/upper_input.sql')
lexer = PlSqlLexer(input)
stream = CommonTokenStream(lexer)
parser = PlSqlParser(stream)
tree = parser.sql_script()
# ast = tree.toStringTree(recog=parser)
# print(str(MyPlSqlVisitor(parser).getRuleName(tree)))
# print("\n\n", signature(tree.toStringTree), "\n")
cfg = MyCFG()
helper = MyHelper(parser)
helper.updateTableDict(tableInfo)
utility = MyUtility(helper)
v = MyVisitor(parser, cfg, utility)
v.visit(tree)
print("\nRaw CFG : ", v.rawCFG)
# for key in v.cfg.nodes:
# if v.cfg.nodes[key].ctx != None:
# print(key, " --> ", v.cfg.nodes[key].ctx.getText())
res = MyRawCfgToGraph(v.rawCFG, cfg)
res.execute()
# cfg.printPretty()
# cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph") # TODO: make dot file in cnf form
utility.generateDomSet(cfg)
# print("Dominator set ended----------->\n\n")
utility.generateSDomSet(cfg)
# print("Strictly Dominator set ended ----------->\n\n")
utility.generatIDom(cfg)
# print("Immediate Dominator ended ----------->\n\n")
utility.generateDFSet(cfg)
utility.insertPhiNode(cfg)
utility.initialiseVersinosedPhiNode(cfg)
utility.versioniseVariable(cfg)
utility.phiDestruction(cfg)
ssaString = MySsaStringGenerator(cfg, parser)
ssaString.execute()
# utility.generateFinalDotGraph(cfg)
# for nodeId in cfg.nodes:
# cfg.nodes[nodeId].printPretty()
# cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph")
#
# hello1 = utility.generateBeforeVersioningDotFile(cfg)
# cfg.dotToPng(hello1, "cnf/before_versioning_graph")
#
# hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg)
# cfg.dotToPng(hello4, "cnf/destructed_phi_node_wala_graph")
cnfUtility = CnfUtility(helper)
iCnfCfg = cnfUtility.copyCfg(cfg)
reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg)
cnfUtility.unvisit(iCnfCfg)
cnfUtility.setParentBranching(iCnfCfg)
cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg)
cnfUtility.copyParentBranching(cnfCfg, iCnfCfg)
# print("\n\n\n\n\n\t\t\tThe intermediate CNF form is ------------------------------>\n\n\n\n")
# for nodeId in iCnfCfg.nodes:
# iCnfCfg.nodes[nodeId].printPretty()
# print("\n\n\n\n\n\t\t\tThe CNF form is ------------------------------>\n\n\n\n")
cnfVcGenerator = CnfVcGenerator(cnfCfg, parser)
cnfPath = []
for nodeId in cnfCfg.nodes:
cnfPath.append(nodeId)
cnfVcGenerator.generateCnfVc(cnfPath)
# print("\n\n\n\n\t\t\tThe CNF VCs are : ------------------------------->\n\n\n")
# print(cnfVcs)
# for nodeId in cnfCfg.nodes:
# cnfCfg.nodes[nodeId].printPretty()
# cnfVc = cnfUtility.cnfVc(cnfCfg)
#
# print("\n\n\t\tThe CNF VCs are ----------------->\n\n\n")
#
# for str in cnfVc:
# print(str)
varSet, z3Str = cnfUtility.iZ3format(cnfCfg)
# print("\n\n*******************\n\n", z3Str, "\n\n--------------\n\n")
# print(varSet)
#
# print("\n\n")
z3Str = z3Str.replace(" ", " ")
z3Str = z3Str.replace(" == ", " = ")
z3Str = z3Str.replace(" = ", " == ")
print("\n**** Final CNF VC in Well_Bracketted_Format:\n\n", z3Str, "\n")
z3StringConvertor = WpcStringConverter(z3Str)
z3StringConvertor.execute()
# print("\n**** Final CNF VC in Z3 Format:\n", z3StringConvertor.convertedWpc, "\n")
z3FileString = "# This file was generated at runtime on " + str(datetime.datetime.now()) + "\n"
z3FileString = z3FileString + "from z3 import *\n\n"
z3FileString = z3FileString + "class Z3RuntimeCnfFile():\n"
z3FileString = z3FileString + "\t" + "def __init__(self):\n"
z3FileString = z3FileString + "\t\t" + "self.finalFormula = \"\"\n"
z3FileString = z3FileString + "\t\t" + "self.satisfiability = \"\"\n"
z3FileString = z3FileString + "\t\t" + "self.modelForViolation = \"\"\n\n"
z3FileString = z3FileString + "\t" + "def execute(self):\n"
for i in varSet:
z3FileString = z3FileString + "\t\t" + i + " = Real(\'" + i + "\')\n"
z3FileString = z3FileString + "\n\t\ts = Solver()\n"
if len(z3StringConvertor.implies_p) > 0:
for i in range(len(z3StringConvertor.implies_p)):
z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.implies_p[i] + ")\n"
if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]:
z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.implies_p_q[i] + ")\n"
# if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q:
# z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.convertedWpc + ")\n"
# else:
# z3FileString = z3FileString + "\t\t" + "s.add(" + z3StringConvertor.convertedWpc + ")\n"
z3FileString = z3FileString + "\t\t" + "s.add( Not( " + z3StringConvertor.convertedWpc + " ) )\n"
# z3FileString = z3FileString + "\n\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t" + "print(\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\n\", s)"
z3FileString = z3FileString + "\n\t\t" + "self.finalFormula = str(s)"
# z3FileString = z3FileString + "\n\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t" + "print(\"%%%%%%%%%% Satisfiability %%%%%%%%%%\")\n"
z3FileString = z3FileString + "\n\t\t" + "self.satisfiability = str(s.check())"
z3FileString = z3FileString + "\n\t\t" + "if self.satisfiability == \"sat\":"
# z3FileString = z3FileString + "\n\t\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t\t" + "print(\"-------->> Violation Occurred...\")"
z3FileString = z3FileString + "\n\t\t\t" + "self.satisfiability = \"violation\""
# z3FileString = z3FileString + "\n\t\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t\t" + "print(\"%%%%%%%%%% An Instance for which Violation Occurred %%%%%%%%%%\\n\", s.model())"
z3FileString = z3FileString + "\n\t\t\t" + "self.modelForViolation = str(s.model())"
z3FileString = z3FileString + "\n\t\t" + "elif self.satisfiability == \"unsat\":"
# z3FileString = z3FileString + "\n\t\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t\t" + "print(\"-------->> NO Violation Detected so far...\")"
z3FileString = z3FileString + "\n\t\t\t" + "self.satisfiability = \"sat\""
# z3FileString = z3FileString + "\n\t\t\t" + "print()"
# z3FileString = z3FileString + "\n\t\t" + "print()\n"
file = open('cnf/Z3RuntimeCnfFile.py', "w")
file.write(z3FileString)
file.close()
import cnf.Z3RuntimeCnfFile
from cnf.Z3RuntimeCnfFile import Z3RuntimeCnfFile
# Reload after module's creation to avoid old module remain imported from disk...VVI...
cnf.Z3RuntimeCnfFile = reload(cnf.Z3RuntimeCnfFile)
z3Runtime = Z3RuntimeCnfFile()
z3Runtime.execute()
finishTime = datetime.datetime.now()
timeDifference = (finishTime - startTime).total_seconds()
return linesOfCode, timeDifference, z3StringConvertor.convertedWpc, z3Runtime.satisfiability, z3Runtime.modelForViolation
def main(argv):
if len(argv) < 3:
print("Not Enough Arguments. Exiting...")
elif len(argv) == 3:
data = "cnf/data/" + argv[1]
spec = "cnf/spec/" + argv[2]
linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(data, spec)
print("\n\n*** Equivalent VC :")
print(vcGenerated)
print("\n*** Satisfibality :\t", satisfiability, "\n\n*** Model for Violation :\t", modelForViolation, "\n")
print("\n////// Execution completed for file :", argv[1])
print("No. of VCs = 1")
print("Time Taken =", executionTime)
print("LinesOfCode =", linesOfCode)
elif len(argv) == 4:
if argv[1] == "-dataset":
dataList = os.listdir(argv[2])
specList = os.listdir(argv[3])
# print(dataList)
# print(specList)
mat = []
counter = 0
for dataFile in dataList:
specFile = dataFile.split(".")[0].strip() + ".spec"
print("~~~~~~~~~~~~~~~~ For PlSQL FileName => " + dataFile + " ~~~~~~~~~~~~~~~~")
if specFile in specList:
linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(
argv[2] + "/" + dataFile, argv[3] + "/" + specFile)
temp = []
temp.append(dataFile)
temp.append(linesOfCode)
temp.append(executionTime)
# temp.append(vcGenerated)
temp.append(satisfiability)
temp.append(modelForViolation)
mat.append(temp)
file = open('cnf/Z3RuntimeCnfFile.py', "w")
file.write("# Cleared content of this File...\n\nclass Z3RuntimeCnfFile():\n\tdef __init__(self):\n\t\tself.finalFormula = \"\"\n\t\tself.satisfiability = \"\"\n\t\tself.modelForViolation = \"\"\n\n\tdef execute(self):\n\t\tprint('+++++++++++++++++++++++++++++%%%%%^^^^^^^^####')\n")
file.close()
else:
print(specFile + " do not exist!!!")
counter = counter + 1
print("Counter =", counter)
print(
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Filename\t\tLinesOfCode\t\tExecutionTime\t\tSatisfiability\t\tViolatingInstance\n")
for i in range(len(mat)):
for j in range(len(mat[i])):
print(mat[i][j], end="\t\t")
print()
elif len(argv) == 6:
if argv[1] == "-datafilename" and argv[3] == "-data_spec_filepaths":
linesOfCode, executionTime, vcGenerated, satisfiability, modelForViolation = executeSinglePlSqlFile(argv[4], argv[5])
print(" "+argv[2], end="\t\t\t")
print(linesOfCode, end="\t\t")
print(executionTime, end="\t")
print("1", end="\t")
print(satisfiability, end="\t\t")
print(modelForViolation.replace("\n", " "), end="")
print()
# data = "cnf/data/" + argv[1]
# spec = "cnf/spec/" + argv[2]
# processor = PreProcessor(spec, data)
# tableInfo, assumeConstraint, assertConstraint, resultString = processor.start()
#
# file = open('cnf/upper_input.sql', "w")
# file.write(resultString)
# file.close()
#
# input = FileStream('cnf/upper_input.sql')
# lexer = PlSqlLexer(input)
# stream = CommonTokenStream(lexer)
# parser = PlSqlParser(stream)
# tree = parser.sql_script()
# # ast = tree.toStringTree(recog=parser)
# # print(str(MyPlSqlVisitor(parser).getRuleName(tree)))
# # print("\n\n", signature(tree.toStringTree), "\n")
#
# cfg = MyCFG()
# helper = MyHelper(parser)
# helper.updateTableDict(tableInfo)
# utility = MyUtility(helper)
# v = MyVisitor(parser, cfg, utility)
# v.visit(tree)
#
#
#
# print(v.rawCFG)
#
# for key in v.cfg.nodes:
# if v.cfg.nodes[key].ctx != None:
# print(key, " --> ", v.cfg.nodes[key].ctx.getText())
#
#
# res = MyRawCfgToGraph(v.rawCFG, cfg)
# res.execute()
# cfg.printPretty()
# cfg.dotToPng(cfg.dotGraph, "cnf/raw_graph") #TODO: make dot file in cnf form
# utility.generateDomSet(cfg)
# print("Dominator set ended----------->\n\n")
# utility.generateSDomSet(cfg)
# print("Strictly Dominator set ended ----------->\n\n")
# utility.generatIDom(cfg)
# print("Immediate Dominator ended ----------->\n\n")
# utility.generateDFSet(cfg)
# utility.insertPhiNode(cfg)
#
#
# utility.initialiseVersinosedPhiNode(cfg)
# utility.versioniseVariable(cfg)
# utility.phiDestruction(cfg)
#
#
# ssaString = MySsaStringGenerator(cfg, parser)
# ssaString.execute()
#
# #utility.generateFinalDotGraph(cfg)
# # for nodeId in cfg.nodes:
# # cfg.nodes[nodeId].printPretty()
#
# cnfUtility = CnfUtility(helper)
# iCnfCfg = cnfUtility.copyCfg(cfg)
# reverseCnfCfg = cnfUtility.topologicalSort(iCnfCfg)
# cnfUtility.unvisit(iCnfCfg)
# cnfUtility.setParentBranching(iCnfCfg)
#
# cnfCfg = cnfUtility.reverseDictOrder(reverseCnfCfg)
# cnfUtility.copyParentBranching(cnfCfg, iCnfCfg)
# print("\n\n\n\n\n\t\t\tThe intermediate CNF form is ------------------------------>\n\n\n\n")
#
# for nodeId in iCnfCfg.nodes:
# iCnfCfg.nodes[nodeId].printPretty()
#
# print("\n\n\n\n\n\t\t\tThe CNF form is ------------------------------>\n\n\n\n")
#
#
#
# cnfVcGenerator = CnfVcGenerator(cnfCfg, parser)
#
# cnfPath = []
#
# for nodeId in cnfCfg.nodes:
# cnfPath.append(nodeId)
#
# cnfVcGenerator.generateCnfVc(cnfPath)
#
# # print("\n\n\n\n\t\t\tThe CNF VCs are : ------------------------------->\n\n\n")
# # print(cnfVcs)
#
# for nodeId in cnfCfg.nodes:
# cnfCfg.nodes[nodeId].printPretty()
#
# # cnfVc = cnfUtility.cnfVc(cnfCfg)
# #
# # print("\n\n\t\tThe CNF VCs are ----------------->\n\n\n")
# #
# # for str in cnfVc:
# # print(str)
#
# varSet, z3Str = cnfUtility.iZ3format(cnfCfg)
#
# print("\n\n*******************\n\n", z3Str, "\n\n--------------\n\n")
# print(varSet)
#
# print("\n\n")
# z3Str = z3Str.replace(" ", " ")
# z3Str = z3Str.replace(" == ", " = ")
# z3Str = z3Str.replace(" = ", " == ")
# z3StringConvertor = WpcStringConverter(z3Str)
# z3StringConvertor.execute()
# print("\n**** WPC String in Z3 Format:\n", z3StringConvertor.convertedWpc, "\n")
#
# z3FileString = "# This file was generated at runtime " + "\n"
# z3FileString = z3FileString + "from z3 import *\n\n"
# for i in varSet:
# z3FileString = z3FileString + i + " = Real(\'" + i + "\')\n"
# z3FileString = z3FileString + "\ns = Solver()\n"
#
# if len(z3StringConvertor.implies_p) > 0:
# for i in range(len(z3StringConvertor.implies_p)):
# z3FileString = z3FileString + "s.add(" + z3StringConvertor.implies_p[i] + ")\n"
# if not z3StringConvertor.convertedWpc == z3StringConvertor.implies_p_q[i]:
# z3FileString = z3FileString + "s.add(" + z3StringConvertor.implies_p_q[i] + ")\n"
# # if z3StringConvertor.convertedWpc not in z3StringConvertor.implies_p_q:
# # z3FileString = z3FileString + "s.add(" + z3StringConvertor.convertedWpc + ")\n"
# # else:
# # z3FileString = z3FileString + "s.add(" + z3StringConvertor.convertedWpc + ")\n"
# z3FileString = z3FileString + "s.add( Not( " + z3StringConvertor.convertedWpc + " ) )\n"
#
# z3FileString = z3FileString + "\nprint()\n"
# z3FileString = z3FileString + "\nprint(\"------------------------------------------------------------------\\nRunning script in /wpc/z3FormatWpcFile.py ....\\n\")\n"
# z3FileString = z3FileString + "\nprint(\"%%%%%%%%%% Aggregate Formula %%%%%%%%%%\\n\", s)\n"
# z3FileString = z3FileString + "\nprint()\n"
# z3FileString = z3FileString + "print(\"%%%%%%%%%% Satisfiability %%%%%%%%%%\\n\", s.check())\n"
# z3FileString = z3FileString + "\nprint()\n"
# z3FileString = z3FileString + "print(\"%%%%%%%%%% Satisfiable Model %%%%%%%%%%\\n\", s.model())\n"
# z3FileString = z3FileString + "\nprint()\n"
#
# file = open('cnf/z3FormatCnfFile.py', "w")
# file.write(z3FileString)
# file.close()
#
# # call(["python3", "cnf/z3FormatWpcFile.py"])
#
# #
# # hello = utility.generateFinalDotGraph(cfg)
# # print(hello)
# # cfg.dotToPng(hello, "versioned_graph")
#
# #hello2 = utility.generateVersionedDotFile(cfg)
# #print(hello2)
# #cfg.dotToPng(hello2, "se/versioned_graph")
#
# #hello3 = utility.generateVersionedPhiNodeWalaDotFile(cfg)
# #print(hello3)
# #cfg.dotToPng(hello3, "se/versioned_phi_node_wala_graph")
#
# #hello4 = utility.generateDestructedPhiNodeWalaDotFile(cfg)
# #print(hello4)
# #cfg.dotToPng(hello4, "se/destructed_phi_node_wala_graph")
if __name__ == '__main__':
main(sys.argv) | NabeelQaiser/BTP_2k18-19 | simulator_cnf.py | simulator_cnf.py | py | 17,501 | python | en | code | 1 | github-code | 50 |
18659979731 | from configparser import ConfigParser
from datetime import timedelta, datetime
from discord_webhook import DiscordWebhook
import os, random, requests, re
from typing import TypedDict, Union
class UserNameResponseDict(TypedDict):
personaname:str
name:str
def get_username(steam_id:int) -> Union[UserNameResponseDict,None]:
"""
Returns dict of persona name and name from Steam Player Summaries API
"""
try:
response = requests.get(f'https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?key={str(steam_api_key)}&format=json&steamids={str(steam_id)}')
except requests.exceptions.RequestException as e:
return None
if 'realname' in response.json()['response']['players'][0]:
name = response.json()['response']['players'][0]['realname']
else:
name = response.json()['response']['players'][0]['personaname']
user_dict = {
'personaname': response.json()['response']['players'][0]['personaname'],
'name': name
}
return user_dict
def check_name(steam_id):
"""
Checks if user steam id is known, then returns user dict:
{
'status': <bool> (Is user recognized),
'name': <str>
'personaname': <str>
}
If username is unknown, looks up user via steam ID
"""
if str(steam_id) in known_ids:
name = known_ids[str(steam_id)][0]
personaname = known_ids[str(steam_id)][1]
status = True
else:
response = get_username(steam_id)
if response is not None:
name = response['name']
personaname = response['personaname']
status = False
else:
personaname = name = "Unknown Mystery Person"
status=False
user_dict = {
'status': status,
'name': name,
'personaname': personaname,
}
return user_dict
def generate_greeting(steam_id, incoming):
"""
Generates random greeting after looking up steam ID.
args
steam_id: <int>
incoming: <bool> (true if user joining server, else false)
returns:
greeting: <str>
"""
user = check_name(steam_id)
status = user['status']
name = user['name']
personaname = user['personaname']
#print(status)
if incoming == True:
greetings = [
f'Hello {name}, or is it {personaname}? I don\'t know... I don\'t get paid enough.',
f'Hello {name}, toilet paper is on isle 24.',
f'Welcome to Walmart, {name}!',
f'Enjoy shopping at Walmart, {name}!',
f'Hi, {name} how can-- HEY, NO RIDING ON THE CARTS!',
f'What do you want, {personaname}?',
f'Yo, {personaname}, want to hear about the time I ran over a cat?',
f'We don\'t sell them, but possums are super tasty, {name}',
f'Hey {name}, Have you ever seen a grown Walmart Greeter Naked?',
]
if status == True:
greetings.append(f'Welcome back {name}!')
greetings.append(f'Wonderful seeing you again, {name}!')
greetings.append(f'Lookin\' fly today, {name}')
greetings.append(f'Welcome back {name}... I\'m watching you...')
else:
greetings = [
f'Goodbye {name}',
f'Thank you, come again {name}',
f'Thank you for shopping at Walmart, see you next time, {name}',
f'You better not have anything you didn\'t pay for {name}'
]
if status == True:
greetings.append(f'I hate to watch {name} go, but I love to watch {name} leave...')
greetings.append(f'See ya {name}, wouldn\'t wanna be ya though.')
result = greetings[random.randint(0, len(greetings)-1)]
return result
def extract_date(line):
"""Return a datetime from a log line"""
fmt = '%m/%d/%Y %H:%M:%S'
return datetime.strptime(line[:19], fmt)
if __name__ == "__main__":
# parse config file for paths and known ids
config = ConfigParser()
config.read('greeter_config.ini')
vhlog = config['Paths'].get('RECENT_LOG','./example.log')
lastupdated = config['Paths'].get('LAST_UPDATED','./last_updated.txt')
webhook_url = config['Discord'].get('WEBHOOK_URL',False)
steam_api_key = config['Steam'].get('API_KEY',False)
if not steam_api_key:
raise ValueError("Steam API Key is required to look up users. Please add one to greeter_config.ini")
if not webhook_url:
raise ValueError("Webhook URL is required to post to discord. Please add one to greeter_config.ini")
suppress_old = True if config['Settings']['SUPPRESS_OLD'] == 'True' else False
known_ids = dict()
if config['Known Users']:
for key in config['Known Users']:
known_ids[key] = [w.strip() for w in str(config['Known Users'][key]).split(',')]
## get current time and last updated time
end_date = datetime.now()
# create lastupdated file if none exists
if not os.path.exists(os.path.abspath(lastupdated)):
print('Creating last_updated.txt')
os.makedirs(os.path.dirname(lastupdated),exist_ok=True)
new_file = open(lastupdated, 'a').close()
with open(lastupdated, 'r') as date_file:
if os.stat(lastupdated).st_size > 0:
data = date_file.read(19)
start_date = datetime.strptime(data, '%m/%d/%Y %H:%M:%S')
else:
start_date = datetime(2019,1,1)
changed = False
## Prevent posting status more than a minute old
## Useful if listener is started when there are a bunch of old logs
if suppress_old:
if end_date - start_date > timedelta(seconds=60):
start_date = end_date - timedelta(seconds=60)
## check for updates and post to discord if any
with open(vhlog) as f:
# from https://stackoverflow.com/questions/18562479/what-is-the-quickest-way-to-extract-entries-in-a-log-file-between-two-dates-in-p
for line in f:
if start_date < extract_date(line) < end_date:
client_id = re.search(r'\d+$', line).group(0)
if "Closing socket" in line:
incoming = False
elif "Got handshake from client" in line:
incoming = True
greeting = generate_greeting(client_id, incoming)
if webhook_url:
print('Sending webhook:',greeting)
webhook = DiscordWebhook(url=webhook_url, content=greeting)
response = webhook.execute()
else:
print('No Webhook_URL specified, didn\'t send greeting:', greeting)
changed = True
## set last_updated time to end_date
if changed == True:
date_file = open(lastupdated, "w")
date_file.write(end_date.strftime('%m/%d/%Y %H:%M:%S'))
date_file.close()
else:
print(f'No changes found. Suppress old messages: {suppress_old}')
| lekjos/vhserver-walmart-greeter | discord_post.py | discord_post.py | py | 7,031 | python | en | code | 1 | github-code | 50 |
28076502272 | # -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2023/2/8 0:25
"""
from typing import List
"""
你是一位系统管理员,手里有一份文件夹列表 folder,你的任务是要删除该列表中的所有 子文件夹,并以 任意顺序 返回剩下的文件夹。
如果文件夹 folder[i] 位于另一个文件夹 folder[j] 下,那么 folder[i] 就是 folder[j] 的 子文件夹 。
文件夹的“路径”是由一个或多个按以下格式串联形成的字符串:'/' 后跟一个或者多个小写英文字母。
例如,"/leetcode" 和 "/leetcode/problems" 都是有效的路径,而空字符串和 "/" 不是。
示例 1:
输入:folder = ["/a","/a/b","/c/d","/c/d/e","/c/f"]
输出:["/a","/c/d","/c/f"]
解释:"/a/b/" 是 "/a" 的子文件夹,而 "/c/d/e" 是 "/c/d" 的子文件夹。
示例 2:
输入:folder = ["/a","/a/b/c","/a/b/d"]
输出:["/a"]
解释:文件夹 "/a/b/c" 和 "/a/b/d/" 都会被删除,因为它们都是 "/a" 的子文件夹。
示例 3:
输入: folder = ["/a/b/c","/a/b/ca","/a/b/d"]
输出: ["/a/b/c","/a/b/ca","/a/b/d"]
"""
"""
思路:先排序再挨个扫描。如果当前的路径f不是以我们想要的t开头,说明他不是子路径,就把我们的t更新为f+'/'继续扫描。
"""
class Solution:
@staticmethod
def removeSubfolders(folder: List[str]) -> List[str]:
res, t = [], ' '
for f in sorted(folder):
if not f.startswith(t):
res.append(f)
t = f + '/'
return res
| TankManBeta/LeetCode-Python | problem1233_medium.py | problem1233_medium.py | py | 1,569 | python | zh | code | 0 | github-code | 50 |
705515631 | import math
import os
import time
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
def init_seeds(seed=0):
torch.manual_seed(seed)
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
else: # faster, less reproducible
cudnn.deterministic = False
cudnn.benchmark = True
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA '
for i in range(0, ng):
if i == 1:
s = ' ' * len(s)
print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
(s, i, x[i].name, x[i].total_memory / c))
else:
print('Using CPU')
print('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def time_synchronized():
torch.cuda.synchronize() if torch.cuda.is_available() else None
return time.time()
def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True
def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a
def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))
def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
bias=True).to(conv.weight.device)
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
return fusedconv
def model_info(model, verbose=False):
# Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPS
from thop import profile
flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
except:
fs = ''
print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = models.__dict__[name](pretrained=True)
# Display model properties
input_size = [3, 224, 224]
input_space = 'RGB'
input_range = [0, 1]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for x in [input_size, input_space, input_range, mean, std]:
print(x + ' =', eval(x))
# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model
def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
# scales img(bs,3,y,x) by ratio
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
gs = 128#64#32 # (pixels) grid size
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)
class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)
def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)
| WongKinYiu/ScaledYOLOv4 | utils/torch_utils.py | torch_utils.py | py | 8,846 | python | en | code | 2,013 | github-code | 50 |
72328303514 | import io
# アクセスするときに使う
import requests
import zipfile
# 普通に書いた場合
# with open('/tmp/a.txt','w') as f:
# f.write('test test')
#
# with open('/tmp/a.txt','r') as f:
# print(f.read())
#
f =io.StringIO()
f.write('string io test')
# 最初に戻る
f.seek(0)
print(f.read())
# 使用例
# zipfileをダウンロードをメモリ上で処理するときとかに使用する
url ='###########'
f =io.BytesIO()
r =requests.get(url)
f.write(r.content)
with zipfile.ZipFile(f) as z:
with z.open('ファイル指定') as r:
print(r.read().decode()) | magisystem0408/python_cord_dir | library/io.py | io.py | py | 609 | python | ja | code | 0 | github-code | 50 |
70988579675 | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from Mesh import *
from Utils import *
import math
def F(Pi, Pj, k, r):
return k * (torch.linalg.norm(Pi - Pj) - r) * (Pj - Pi) / torch.linalg.norm(Pi - Pj)
def force_magnitude_sum(mesh):
l = 0
for vIndex, this in enumerate(mesh.verts):
force = torch.tensor([0.,0.])
for key,edge in mesh.connected(vIndex).items():
other = mesh.verts[key]
force += F(this, other, k=edge.stiffness, r=edge.rest_length)
# print(vIndex, force)
l += torch.linalg.norm(force)
return l
mesh = generate_rectangle_mesh_grid((0,10), (10, 0), 5, 5)
# fig, axs = plt.subplots(1,1)
# axs.set_aspect('equal')
# visualize_mesh(axs, mesh)
# plt.show()
verts = [torch.tensor(x, requires_grad=True) for x in mesh.verts]
tInd = mesh.tInd
springMesh = SpringMesh(verts, tInd)
T = [27]
E = []
for t in T:
E.append(tuple(sorted([tInd[t][0], tInd[t][1]])))
E.append(tuple(sorted([tInd[t][1], tInd[t][2]])))
E.append(tuple(sorted([tInd[t][0], tInd[t][2]])))
for k,v in springMesh.edges.items():
if k in E:
v.rest_length = v.length * 2
v.stiffness = 1
else:
v.rest_length = v.length
print(force_magnitude_sum(springMesh))
optimizer = optim.Adam(springMesh.verts, lr=0.0001)
theta = 1e-5
history = []
for i in range(2000):
optimizer.zero_grad()
loss = force_magnitude_sum(springMesh)
history.append(loss.item())
if(history[-1] < theta):
print("break early")
break
loss.backward(retain_graph=True)
optimizer.step()
print(history[-1])
fig, axs = plt.subplots(1,3)
axs[0].set_aspect('equal')
axs[0].set_xlim((0,10))
axs[0].set_ylim((0,10))
visualize_mesh(axs[0], mesh)
axs[1].set_aspect('equal')
axs[1].set_xlim((0,10))
axs[1].set_ylim((0,10))
visualize_mesh(axs[1], springMesh)
axs[2].plot(history)
plt.show() | COMP0031VRProject/Framework | spring_mesh_example.py | spring_mesh_example.py | py | 1,950 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.