seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74611006504 | # coding: utf-8
from __future__ import print_function
import json
from math import log10
import numpy as np
def fit(x, y):
x_mean = np.mean(x)
y_mean = np.mean(y)
cov = np.sum((x - x_mean) * (y - y_mean))
var = np.sum((x - x_mean)**2)
a = cov / var
b = y_mean - a * x_mean
return lambda x1: a * x1 + b
def fit_thd(mol_data, lvl=0.):
x = np.array(mol_data['lvl'])
y = np.array(mol_data['thd'])
cut = (x > lvl-1) * (x < lvl+1)
f = fit(x[cut], y[cut])
return f(lvl)
def fit_mol(mol_data):
x = np.array(mol_data['thd'])
y = np.array(mol_data['lvl'])
cut = (x > -32) * (x < -28)
f = fit(x[cut], y[cut])
return f(-30.46)
data = json.load(open("test.json"))
out = open('datasheet.dat', 'w')
for b, m in data:
print(
b,
20 * log10(b / 0.5),
m['reflevel'],
fit_thd(m['mol_data']),
m['s01'],
m['s63'],
m['s10'],
m['s16'],
fit_mol(m['mol_data']),
m['sol10'],
m['sol16'],
m['noise'],
file=out)
out.close()
| andreas-schmidt/tapetool | json2ds.py | json2ds.py | py | 1,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.mean",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 13,
... |
23682916986 | import re
import pandas as pd
from bs4 import BeautifulSoup
df = pd.DataFrame.from_csv("realtor.csv", sep="|", encoding="ISO-8859-1")
print(df.head)
print ("done")
dftemp = df
for i, (idx, ser) in enumerate(dftemp.iterrows()):
html = ser["metaHTML"]
bs = BeautifulSoup(html)
for li in bs.find_all("li"):
temp = li.get("data-label").split('-')
colname = temp[len(temp)-1]
if (colname not in df.columns):
df[colname] = None
val = li.find("span").text
df[colname][idx] = val
colname = "broker"
html = ser["broker"]
bs = BeautifulSoup(html)
if (colname not in df.columns):
df[colname] = None
df[colname][idx] = re.sub("Brokered by", '', bs.text)
html = ser["geo"]
bs = BeautifulSoup(html)
elems = bs.find_all("meta")
for e in elems:
colname = e.get("itemprop")
if (colname not in df.columns):
df[colname] = None
val = e.get("content")
df[colname][idx] = val
print (i)
del df["metaHTML"]
del df["geo"]
print(df.head)
df.to_csv("realtor2.csv", sep="|", quotechar='"',index=False )
print ("done")
| jhmuller/real_estate | realtor2.py | realtor2.py | py | 1,198 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame.from_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.B... |
13395835031 | """
@author: gjorando
"""
import os
import importlib
from pypandoc import convert_file
from setuptools import setup, find_packages
def read(*tree):
"""
Read a file from the setup.py location.
"""
full_path = os.path.join(os.path.dirname(__file__), *tree)
with open(full_path, encoding='utf-8') as file:
return file.read()
def version(main_package):
"""
Read the version number from the __version__ variable in the main
package __init__ file.
"""
package = "{}.__init__".format(main_package)
init_module = importlib.import_module(package)
try:
return init_module.__version__
except AttributeError:
raise RuntimeError("No version string found in {}.".format(package))
def requirements(*tree):
"""
Read the requirements list from a requirements.txt file.
"""
requirements_file = read(*tree)
return [r for r in requirements_file.split("\n") if r != ""]
def long_description(*tree):
"""
setup.py only supports .rst files for the package description. As a
result, we need to convert README.md on the fly.
"""
tree_join = os.path.join(os.path.dirname(__file__), *tree)
rst_readme = convert_file(tree_join, 'rst')
rst_path = "{}.rst".format(os.path.splitext(tree_join)[0])
with open(rst_path, "w") as file:
file.write(rst_readme)
return rst_readme
setup(
name="neurartist",
version=version("neurartist"),
author="Guillaume Jorandon",
description="Ready-to-use artistic deep learning algorithms",
long_description=long_description("README.md"),
url="https://github.com/gjorando/style-transfer",
packages=find_packages(exclude=["tests"]),
install_requires=requirements("requirements.txt"),
entry_points={
'console_scripts': ['neurartist=neurartist.cli:main']
},
python_requires='>=3',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Artistic Software',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
)
| gjorando/style-transfer | setup.py | setup.py | py | 2,277 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
... |
16009855981 | #!/usr/bin/python3
import numpy as np
from matplotlib import pyplot as plt
lx = []
ly = []
with open("HailStoneNum.txt", "r") as f:
for line in f:
ls = line.split(",")
lx.append(int(ls[0]))
ly.append(int(ls[1]))
x = np.array(lx)
y = np.array(ly)
plt.plot(x,y)
plt.savefig("HailStone.jpg")
| Ukuer/rasp-pi | DSA/HailStone/HailStoneCount.py | HailStoneCount.py | py | 322 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
26361613449 | from bme590_assignment02.ECG_Class import ECG_Class
from flask import Flask, jsonify, request
import numpy as np
app = Flask(__name__)
count_requests = 0 # Global variable
@app.route('/heart_rate/summary', methods=['POST'])
def get_data_for_summary():
"""
Summary endpoint: Accepts user data and returns instantaneous heart rate and brady tachy annotations
:return: resp: (json) instantaneous heart rate and brady tachy annotations
"""
global count_requests
count_requests += 1
req = request.json # Retrieve external data
data = check_and_parse_summary(req) # Validate the data and map to internal format
out = calc_summary(data) # Process the data
resp = jsonify(out) # Map internal data to external format
return resp # Respond to client
def check_and_parse_summary(dictionary):
"""
This validates the user input data and turns it into a tuple (Map external-->internal)
:param: dictionary: (dict) User data (time and voltage)
:return: dat: (tuple) User data (time and voltage)
"""
# Check that time and voltage data were provided
if 'time' in dictionary.keys():
d1 = dictionary['time']
else:
try:
d1 = dictionary['t']
except ValueError:
try:
d1 = dictionary['T']
except ValueError:
try:
d1 = dictionary['Time']
except ValueError:
return send_error('Dictionary does not contain valid ''time'' data', 400)
if 'voltage' in dictionary.keys():
d2 = dictionary['voltage']
else:
try:
d2 = dictionary['v']
except ValueError:
try:
d2 = dictionary['V']
except ValueError:
try:
d2 = dictionary['Voltage']
except ValueError:
return send_error('Dictionary does not contain valid ''voltage'' data', 400)
dat = (np.array([d1]), np.array([d2]))
# Check that time and voltage data have same number of elements
if len(dat[0]<27):
return send_error('The data needs to have at least 27 points to be properly filtered',400)
if len(dat[0]) != len(dat[1]):
return send_error('Time and voltage arrays must have same number of elements', 400)
# Check that data isn't entirely negative
if np.all(np.where(dat[1] < 0, 1, 0)):
return send_error('Data is entirely negative', 400)
return dat
def calc_summary(dat):
"""
This calculates the average heart rate and brady tachy annotations
:param: dat: (tuple) User data (time and voltage)
:return: output: (dict) Contains time, instantaneous HR, and brady tachy cardia annotations
"""
#try:
ecg_object = ECG_Class(dat)
#except: # this should be made much more specific
# return send_error('stop giving me bad data dummy', 400)
hr = ecg_object.instHR
ta = ecg_object.tachy('inst')
ba = ecg_object.brady('inst')
output = {'time': dat[0],
'instantaneous_heart_rate': hr.tolist(),
'tachycardia_annotations': ta,
'bradycardia_annotations': ba
}
return output
@app.route('/heart_rate/average', methods=['POST'])
def get_data_for_average():
"""
Average endpoint: Accepts user data and returns average heart rate and brady tachy annotations
:return: resp: (json) average heart rate and brady tachy annotations
"""
global count_requests
count_requests += 1
req = request.json # Retrieve external data
dat, ap = check_and_parse_average(req) # Validate the data and map to internal format
out = calc_average_summary(dat, ap) # Process the data
resp = jsonify(out) # Map internal data to external format
return resp # Respond to client
def check_and_parse_average(dictionary):
"""
This validates the user input data and turns it into a tuple (Map external-->internal)
:return: dictionary: (dict) User data (time and voltage)
"""
# Check that time, voltage, and averaging period data were provided
if 'time' in dictionary.keys():
d1 = dictionary['time']
else:
try:
d1 = dictionary['t']
except ValueError:
try:
d1 = dictionary['T']
except ValueError:
try:
d1 = dictionary['Time']
except ValueError:
return send_error('Dictionary does not contain valid ''time'' data', 400)
if 'voltage' in dictionary.keys():
d2 = dictionary['voltage']
else:
try:
d2 = dictionary['v']
except ValueError:
try:
d2 = dictionary['V']
except ValueError:
try:
d2 = dictionary['Voltage']
except ValueError:
return send_error('Dictionary does not contain valid ''voltage'' data', 400)
if 'averaging_period' in dictionary.keys():
ap = dictionary['averaging_period']
else:
return send_error('Dictionary does not contain valid ''averaging_period'' data', 400)
dat = (np.array(d1), np.array(d2))
# Check that time and voltage data have same number of elements
if len(dat[0]) != len(dat[1]):
return send_error('Time and voltage arrays must have same number of elements', 400)
# Check that there is enough data for averaging during the specified averaging period
if dat[0][-1] < ap:
return send_error('Not enough data for averaging', 400)
# Check that data isn't entirely negative
if np.all(np.where(dat[1] < 0, 1, 0)):
return send_error('Data is entirely negative', 400)
return dat
def calc_average_summary(dat, avg_secs):
"""
:param dat: (tuple) User data (time and voltage)
:param avg_secs: (int) Number of seconds to average over (bin size)
:return: output: (json) Contains the time interval, averaging period,
average heart rate, and brady and tachy diagnoses
"""
ecg_object = ECG_Class(dat, avg_secs)
ahr = ecg_object.avg()
ta = ecg_object.tachy('avg')
ba = ecg_object.brady('avg')
output = {'time_interval': dat[0],
'averaging_period': avg_secs,
'average_heart_rate': ahr,
'tachycardia_annotations': ta,
'bradycardia_annotations': ba
}
return output
@app.route('/heart_rate/requests', methods=['GET'])
def requests():
"""
Returns the number of requests made to the server since its last reboot
:return: resp: (int) The number of requests
"""
global count_requests
count_requests += 1
resp = jsonify(count_requests)
return resp
def send_error(message, code): # Suyash error function
err = {
"error": message,
}
return jsonify(err), code
| juliaross20/cloud_ecg | api_codes.py | api_codes.py | py | 6,928 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
... |
13289609625 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 00:48:47 2021
@author: baris
"""
import pandas as pd
import math
import numpy as np
import xlsxwriter
xlxs_file = pd.read_excel("example.xlsx")
# All columns have separeted into a list on their own.
parsed_store = xlxs_file["store"].tolist()
parsed_x = xlxs_file["x"].tolist()
parsed_y = xlxs_file["y"].tolist()
parsed_demand = xlxs_file["demand"].tolist()
result = np.zeros((len(parsed_x) + 5, len(parsed_y)))
for i in range(len(parsed_store)):
for j in range(len(parsed_store)):
distance = math.sqrt((parsed_x[i] - parsed_x[j])**2 + (parsed_y[i] - parsed_y[j])**2)
result[i][j] = distance*parsed_demand[i]
for i in range(len(parsed_store)):
sum = .0
for j in range(len(parsed_store)):
sum += result[j][i]
result[-5][i] = sum
result[-4] = sorted(result[-5])
result[-3] = np.argsort(result[-5]) + 1
min1, min2 = int(result[-3][0] - 1), int(result[-3][1] - 1)
sum1 = 0
sum2 = 0
sum1_m = .0
sum2_m = .0
for i in range(len(parsed_store)):
tmp1 = result[i][min1]
tmp2 = result[i][min2]
if tmp1 < tmp2:
result[-2][i] = min1 + 1
sum1 += 1
sum1_m += tmp1
else:
result[-2][i] = min2 + 1
sum2 += 1
sum2_m += tmp2
result[-1][0] = sum1
result[-1][1] = sum2
result[-1][2] = sum1_m
result[-1][3] = sum2_m
workbook = xlsxwriter.Workbook('result.xlsx')
worksheet = workbook.add_worksheet()
row = 0
column = 0
for module in result :
worksheet.write_row(row, column, module)
row += 1
workbook.close()
| barissoyer/FunProjects | X-Yl_Location based/xy_locations.py | xy_locations.py | py | 1,579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_num... |
20496563572 | from typing import List
from instructor import patch
from pydantic import BaseModel, Field
import openai
patch()
class Property(BaseModel):
key: str
value: str
resolved_absolute_value: str
class Entity(BaseModel):
id: int = Field(
...,
description="Unique identifier for the entity, used for deduplication, design a scheme allows multiple entities",
)
subquote_string: List[str] = Field(
...,
description="Correctly resolved value of the entity, if the entity is a reference to another entity, this should be the id of the referenced entity, include a few more words before and after the value to allow for some context to be used in the resolution",
)
entity_title: str
properties: List[Property] = Field(
..., description="List of properties of the entity"
)
dependencies: List[int] = Field(
...,
description="List of entity ids that this entity depends or relies on to resolve it",
)
class DocumentExtraction(BaseModel):
entities: List[Entity] = Field(
...,
description="Body of the answer, each fact should be its seperate object with a body and a list of sources",
)
def ask_ai(content) -> DocumentExtraction:
resp: DocumentExtraction = openai.ChatCompletion.create(
model="gpt-4",
response_model=DocumentExtraction,
messages=[
{
"role": "system",
"content": "You are a perfect entity resolution system that extracts facts from the document. Extract and resolve a list of entities from the following document:",
},
{
"role": "user",
"content": content,
},
],
) # type: ignore
return resp
content = """
Sample Legal Contract
Agreement Contract
This Agreement is made and entered into on 2020-01-01 by and between Company A ("the Client") and Company B ("the Service Provider").
Article 1: Scope of Work
The Service Provider will deliver the software product to the Client 30 days after the agreement date.
Article 2: Payment Terms
The total payment for the service is $50,000.
An initial payment of $10,000 will be made within 7 days of the the signed date.
The final payment will be due 45 days after [SignDate].
Article 3: Confidentiality
The parties agree not to disclose any confidential information received from the other party for 3 months after the final payment date.
Article 4: Termination
The contract can be terminated with a 30-day notice, unless there are outstanding obligations that must be fulfilled after the [DeliveryDate].
"""
model = ask_ai(content)
print(model.model_dump_json(indent=2))
"""
{
"entities": [
{
"id": 1,
"subquote_string": [
"This Agreement is made and entered into on 2020-01-01 by and between Company A (\"the Client\") and Company B (\"the Service Provider\")."
],
"entity_title": "Agreement between Company A and Company B",
"properties": [
{
"key": "Date",
"value": "2020-01-01",
"resolved_absolute_value": "2020-01-01"
},
{
"key": "Party 1",
"value": "Company A",
"resolved_absolute_value": "Company A"
},
{
"key": "Party 2",
"value": "Company B",
"resolved_absolute_value": "Company B"
}
],
"dependencies": []
},
{
"id": 2,
"subquote_string": [
"The Service Provider will deliver the software product to the Client 30 days after the agreement date."
],
"entity_title": "Scope of Work",
"properties": [
{
"key": "Delivery Date",
"value": "30 days after the agreement date",
"resolved_absolute_value": "2020-01-31"
}
],
"dependencies": [
1
]
},
{
"id": 3,
"subquote_string": [
"The total payment for the service is $50,000.",
"An initial payment of $10,000 will be made within 7 days of the the signed date.",
"The final payment will be due 45 days after [SignDate]."
],
"entity_title": "Payment Terms",
"properties": [
{
"key": "Total Payment",
"value": "$50,000",
"resolved_absolute_value": "50000"
},
{
"key": "Initial Payment",
"value": "$10,000",
"resolved_absolute_value": "10000"
},
{
"key": "Final Payment Due Date",
"value": "45 days after [SignDate]",
"resolved_absolute_value": "2020-02-15"
}
],
"dependencies": [
1
]
},
{
"id": 4,
"subquote_string": [
"The parties agree not to disclose any confidential information received from the other party for 3 months after the final payment date."
],
"entity_title": "Confidentiality Terms",
"properties": [
{
"key": "Confidentiality Duration",
"value": "3 months after the final payment date",
"resolved_absolute_value": "2020-05-15"
}
],
"dependencies": [
3
]
},
{
"id": 5,
"subquote_string": [
"The contract can be terminated with a 30-day notice, unless there are outstanding obligations that must be fulfilled after the [DeliveryDate]."
],
"entity_title": "Termination",
"properties": [
{
"key": "Termination Notice",
"value": "30-day",
"resolved_absolute_value": "30 days"
}
],
"dependencies": [
2
]
}
]
}
"""
| realsrisri/jxnl-instructor | examples/reference-citation/run.py | run.py | py | 5,705 | python | en | code | null | github-code | 36 | [
{
"api_name": "instructor.patch",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pydantic.Field"... |
16198615974 | # Climate App
# Now that you have completed your initial analysis, design a Flask api based on the queries that you have just developed.
# - Use FLASK to create your routes.
#################################################
# Import Flask & jsonify & the kitchen sink...
#################################################
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the tables
Station = Base.classes.station
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
"<h1>HW 11 Surf Is Up!<h1/>"
"<br/>"
"<h2>Available APIs<h2/>"
"<li><a href ='/api/v1.0/precipitation'>Precipitation</a></li>"
"<li><a href ='/api/v1.0/stations'>Stations</a></li>"
"<li><a href ='/api/v1.0/tobs'>Temps observed</a></li>"
"<li><a href = '/api/v1.0/start_end'>Calculated Temps</a></li>"
)
#################################################
# /api/v1.0/precipitation
#################################################
@app.route("/api/v1.0/precipitation")
def precipitation():
yearago_date = dt.date(2016, 8 , 22)
# select(station, date, prcp) frome measurement
# where date >= yearago_date
prcp_in_last_year = session.query(Measurement.date, func.sum(Measurement.prcp)).\
filter(Measurement.date > yearago_date).group_by(Measurement.date).all()
prcp_list = [prcp_in_last_year]
return jsonify(prcp_list)
#################################################
# /api/v1.0/stations
#################################################
@app.route("/api/v1.0/stations")
def stations():
all_stations = session.query(Station.name, Station.station, Station.elevation).all()
station_list = []
for a_station in all_stations:
row = {}
row['elevation'] = a_station[2]
row['station'] = a_station[1]
row['name'] = a_station[0]
station_list.append(row)
return jsonify(station_list)
#################################################
# /api/v1.0/tobs
# - Return a json list of Temperature Observations (tobs) for the previous year
#################################################
@app.route("/api/v1.0/tobs")
def temp_obs():
yearago_date = dt.date(2016, 8 , 22)
temps = session.query(Station.name, Measurement.date, Measurement.tobs).\
filter(Measurement.date > yearago_date).all()
tobs_list = []
for temp in temps:
t = {}
t["Station"] = temp[0]
t["Date"] = temp[1]
t["Temperature"] = int(temp[2])
tobs_list.append(t)
return jsonify(tobs_list)
#################################################
#
# - /api/v1.0/<start> and /api/v1.0/<start>/<end>
# - Return a json list of the minimum temperature, the average temperature, and the max temperature
# for a given start or start-end range.
# - When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal
# to the start date.
# - When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the
# start and end date inclusive.
#
# Hints
# - You will need to join the station and measurement tables for some of the analysis queries.
# - Use Flask jsonify to convert your api data into a valid json response object.
#################################################
@app.route("/api/v1.0/start_end")
def calc_temps():
sy = 2017 # start year
sm = 7 # start month
sd = 1 # start day
ey = 2017 # end year
em = 7 # end month
ed = 11 # end day
# Convert dates to "year - 1" dates
start_date = dt.date(sy, sm, sd)
end_date = dt.date(ey, em, ed)
temp_info = session.query(Measurement.tobs).filter(Measurement.date >= start_date, Measurement.date <= end_date).all()
temperatures = [temperature[0] for temperature in temp_info]
# Get the minimum temp
temp_min = min(temperatures)
# Get the maximum temp
temp_max = max(temperatures)
# Get the average temp
temp_avg = np.mean(temperatures)
date_results = 'Start date: ' + str(start_date) + '</br>' + 'End date: ' + str(end_date) + '</br>'
minmax_results = 'Min temp: ' + str(temp_min) + '</br>' + 'Avg temp: ' + str(temp_avg) +'</br>' + 'Max temp: ' + str(temp_max)
temp_results = date_results + minmax_results
return(temp_results)
#################################################
# Define Main behavior
#################################################
if __name__ == '__main__':
app.run(debug=True)
| JREwan/python-challenge | Homework11_SurfsUp/app.py | app.py | py | 5,386 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 29,
"usage_type": "call"
},
{
... |
73694485864 | import requests
from bs4 import BeautifulSoup
from bs4.element import ResultSet
import json
from telprefix.path import JSON_DATA_PATH
def getHTMLText(result: ResultSet | None) -> str:
if result is not None:
result = result.text.strip()
return result
# URL Artikel
# Sumber: https://www.pinhome.id
URL = "https://www.pinhome.id/blog/kode-nomor-prefix/"
class TelPrefixScrap():
def __init__(self) -> None:
self.data = {}
self.URL = URL
def request(self) -> BeautifulSoup:
req = requests.get(URL)
reqParse = BeautifulSoup(req.text, "html.parser")
return reqParse
def parse(self):
reqParse = self.request()
tables = reqParse.find_all("table")
currentTable = 0
for index, table in enumerate(tables):
tableRow = table.find_all("tr")
for row in tableRow[1:]:
# Extract table rows data
prefix = row.find_all("td")[0]
tableRowJudul = tableRow[0]
if( len(tableRowJudul.find_all("td")) == 4 ):
jenis = row.find_all("td")[1]
keterangan = row.find_all("td")[2]
provider = row.find_all("td")[3]
else:
jenis = None
keterangan = row.find_all("td")[1]
provider = row.find_all("td")[2]
# Get & strip text
prefix = getHTMLText(prefix)
provider = getHTMLText(provider)
jenis = getHTMLText(jenis)
keterangan = getHTMLText(keterangan)
# Tidy up
if provider == "":
if index == currentTable:
provider = self.data[list(self.data.keys())[-1]]["provider"]
else:
provider = None
if jenis is not None:
if "atau" in jenis:
jenis = jenis.split(" atau ")
elif "dan" in jenis:
jenis = jenis.split(" dan ")
self.data[prefix] = {
"provider": provider,
"jenis": jenis,
"keterangan": keterangan
}
currentTable += 1
return self.data
def save(self):
with open(JSON_DATA_PATH, "w") as file:
json.dump(self.data, file, indent=4)
def scrap(self):
parse = self.parse()
save = self.save()
return parse
| manoedinata/telprefix | telprefix/scrap.py | scrap.py | py | 2,569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.element.ResultSet",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup... |
10070249997 | from django.shortcuts import render
from.models import friends
# Create your views here.
def showindex(request):
id=request.GET.get("update_id")
if id==None:
res=friends.objects.all()
return render(request,"index.html",{"res":res})
else:
id1=friends.objects.filter(entry=id).update()
print(id1)
return render(request,"index.html",{"id":id})
def displaydetails(request):
entry= request.POST.get("eno")
date= request.POST.get("date")
amount= request.POST.get("amt")
members= request.POST.getlist("t1")
i=(", ".join(members))
t=len(members)
t1=int(amount)/t
fr=friends(entry,date,amount,i,t1)
fr.save()
res=friends.objects.all()
d1={"msg":"datasaved"}
return render(request,"index.html",{"res":res})
def deletedetails(request):
id=request.POST.get("delete_id")
friends.objects.filter(entry=id).delete()
res=friends.objects.all()
return render(request,"index.html",{"res":res}) | prasadnaidu1/django | sisco1/app1/views.py | views.py | py | 993 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.friends.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.friends.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.friends",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dj... |
42629364234 | #!/usr/bin/env python
# coding=utf-8
import torch
import torchvision.models as models
#resnet169 = models.densenet169(pretrained=True).cuda()
inception_v3 = models.inception_v3(pretrained=True).cuda()
dummy_input = torch.randn(1, 3, 224, 224, device='cuda')
input_names = ['data']
output_names = ['outputs']
torch.onnx.export(inception_v3, dummy_input, f='inception_v3.onnx', verbose=True, input_names=input_names,
output_names=output_names, opset_version=10) # generate onnx model of 244M
| YixinSong-e/onnx-tvm | torchmodel/torch_model.py | torch_model.py | py | 528 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.models.inception_v3",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.randn",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.onn... |
21536939801 | import cv2
import numpy as np
import os
import random
import torch
from tqdm import tqdm
def draw(prediction,dependency):
img=np.full((256,256,3),220,dtype=np.uint8)
for i,c in enumerate(prediction):
if c==0 or c>9:
if i not in dependency:
cv2.rectangle(img,(10+i%9*26, 10+i//9*26),(36+i%9*26, 36+i//9*26),(255,255,255),-1)
else:
cv2.rectangle(img, (10 + i % 9 * 26, 10 + i // 9 * 26), (36 + i % 9 * 26, 36 + i // 9 * 26),
(255, 180, 180), -1)
if c>0:
txt=str((c-1)%9+1)
color=[(0,0,0),(0,0,255),(0,120,0)][(c-1)//9]
cv2.putText(img, txt, (18+i%9*26, 30+i//9*26), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
for i in range(10):
cv2.line(img,(10+26*i,10),(10+26*i,245),(0,0,0),2 if i%3==0 else 1)
for i in range(10):
cv2.line(img,(10,10+26*i),(245,10+26*i),(0,0,0),2 if i%3==0 else 1)
return img
if __name__=='__main__':
import datasets
path='output/bart_base_sudoku_bs64'
os.makedirs(os.path.join(path,'pic4'),exist_ok=True)
gt=datasets.load_dataset(path='csv',
data_files={
k: os.path.join('data/sudoku',f'sudoku_{k}.csv') for k in ['test']})["test"][96339]
src = np.int64(list(gt['quizzes']))
tgt = np.int64(list(gt['solutions']))
tgt[src == 0] += 18
preds=[]
atts=[]
for casstep in range(5):
pred=torch.load(os.path.join(path,f'cas_{casstep}/cas_test_generation.pk'))[96339]
pd = pred - 3
pd[(src == 0) & (pd + 9 == tgt)] += 9
preds.append(pd)
if casstep==0:
atts.append(None)
else:
atts.append(torch.load(os.path.join(path,f'cas_{casstep}/cas_test_generation.pk.96339.att')))
for i in range(1,5):
os.makedirs(os.path.join(path, 'pic4',str(i)), exist_ok=True)
mask=np.where((preds[i-1]!=preds[i])&(preds[i]==tgt))[0]
for x in mask:
row_att=atts[i][x]
row_att[src != 0]=0
row_att[x]=0
dependency=np.argsort(row_att)[-5:]
img_pd = draw(preds[i-1],dependency)
cv2.imwrite(os.path.join(path, 'pic4', str(i), '%d-%d.png'%(x//9,x%9)), img_pd)
| RalphHan/CASR | empirical/sudoku2.py | sudoku2.py | py | 2,295 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.full",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.rectangle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_num... |
7615554968 | # -*- coding: utf-8 -*-
import codecs
import sys
import re
import h5py
import numpy as np
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell, GRUCell
from tflearn.layers.recurrent import lstm
from tflearn.layers.estimator import regression
from tflearn.optimizers import *
from multiprocessing import cpu_count, freeze_support
from multiprocessing.pool import Pool
from make_data import make_data, make_data_divided, norm_many
from util import read_text_lines, refine_line
def bi_LSTM():
# Network building
net = input_data(shape=[None, 440])
net = embedding(net, input_dim=20000, output_dim=128)
net = dropout(net, 0.9)
net = bidirectional_rnn(net,
BasicLSTMCell(128, forget_bias=1.),
BasicLSTMCell(128, forget_bias=1.))
net = dropout(net, 0.7)
net = fully_connected(net, 2, activation='softmax')
net = regression(net,
optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
return net
def train(trainX, trainY, model_file):
print('# Data preprocessing')
trainX = pad_sequences(trainX, maxlen=440, value=0.)
trainY = to_categorical(trainY, nb_classes=2)
print('build network')
net = bi_LSTM()
print('# Training')
'''
tensorboard_verbose:
0: Loss, Accuracy (Best Speed)
1: Loss, Accuracy + Gradients
2: Loss, Accuracy, Gradients, Weights
3: Loss, Accuracy, Gradients, Weights, Activations, Sparsity (Best Visualization)
'''
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0,
checkpoint_path='./chkpoint_mdm001/',
best_checkpoint_path='./best_chkpoint_mdm001/',
best_val_accuracy=0.9)
print('tfl.DNN end.')
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128,
n_epoch=4, run_id='bilstm_170519b')
print('model.fit end.')
# Save model
model.save(model_file)
print('model save end.')
class Trainer():
def __init__(self):
print('train_diviced')
print('# Network building')
self.net = bi_LSTM()
self.model = tflearn.DNN(self.net, clip_gradients=0., tensorboard_verbose=0,
checkpoint_path='./chkpoint_mdm001/',
best_checkpoint_path='./best_chkpoint_mdm001/',
best_val_accuracy=0.9)
print('tfl.DNN end.')
self.i = 0
def train(self, trainX, trainY):
print('# Data preprocessing')
trainX = pad_sequences(trainX, maxlen=440, value=0.)
trainY = to_categorical(trainY, nb_classes=2)
print('data preproc end.')
self.model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128,
n_epoch=1, run_id='bilstm_170524mdm001')
print('model.fit #{} end'.format(self.i))
self.i += 1
def save(self, model_file):
self.model.save(model_file)
print('model save end.')
def interference(testX, testY, model_file):
print('interference')
print('# Data preprocessing')
testX = pad_sequences(testX, maxlen=440, value=0.)
testY = to_categorical(testY, nb_classes=2)
print('# Network building')
net = bi_LSTM()
print('# Load model')
model = tflearn.DNN(net)
model.load(model_file)
if not model:
print('model not loaded')
sys.exit(1)
else:
print('model load.')
print('# Predict')
pred = model.predict(testX)
new_y = np.argmax(pred, axis=1)
result = new_y.astype(np.uint8)
print('predict end.')
result = str(result)
print('pred to str.')
with codecs.open('test_result.txt', 'w', encoding='utf-8') as wfh:
wfh.write(result)
print('end.')
class Tagger():
def __init__(self, model_file):
print('interference_divided')
print('# Network building')
self.net = bi_LSTM()
print('# Load model')
self.model = tflearn.DNN(self.net)
self.model.load(model_file)
if not self.model:
print('model not loaded')
sys.exit(1)
else:
print('model load.')
def interference(self, testX):
print('# Data preprocessing')
testX = pad_sequences(testX, maxlen=440, value=0.)
print('# Predict')
pred = self.model.predict(testX)
new_y = np.argmax(pred, axis=1)
result = (int(y) for y in new_y.astype(np.uint8))
return result
def run_train(train_file):
print('train')
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, train_file)
print('make train data end.')
X = norm_many(pool, X)
print('norm_data end.')
train(X, Y, 'model_MDM001.tfl')
def run_train_divided(train_file):
print('train')
pool = Pool(processes=cpu_count())
trainer = Trainer()
epoch = 4
for i in range(epoch):
for X, Y in make_data_divided(pool, train_file):
print('epoch: {}'.format(i))
trainer.train(X, Y)
trainer.save('model_MDM001.tfl')
def run_test():
print('test')
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, 'ted_7_ErasePunc_FullKorean__test.txt')
print('make test data end.')
X = norm_many(pool, X)
print('norm_data end.')
interference(X, Y, 'model.tfl')
def run_test_divided(test_file):
print('test')
pool = Pool(processes=cpu_count())
tagger = Tagger('model.tfl')
for X, _ in make_data_divided(pool, test_file):
y = (str(r) for r in tagger.interference(X))
# y는 문장 구분 없이 한번에 다 들어오므로
# X의 각 문장의 글자수 단위로 끊는다.
# 그 다음에 y의 내용으로 원문을 복원한다.
yield ''.join(y)
def main():
if len(sys.argv) < 2:
print('usage: bi_lstm.py (train|test|make)')
sys.exit(1)
if sys.argv[1] == 'train':
train_file = 'MDM001_FullKorean__train.txt'
#run_train(train_file)
run_train_divided(train_file)
elif sys.argv[1] == 'test':
test_file = 'ted_7_ErasePunc_FullKorean__test.txt'
lines = read_text_lines(test_file)
lines = (refine_line(line) for line in lines)
lines = [re.sub(r'[\ \n\r]+', '', line).strip() for line in lines]
i = 0
with codecs.open('ted_test_result.txt', 'w', encoding='utf-8') as wfh:
for Y in run_test_divided(test_file):
# Y의 길이와 lines의 길이를 확인해가면서 합치기
# 아니면 Y가 10000줄 처리한 단위로 나오니까 10000줄씩 읽어서 대조해보기
y_pos = 0
buf = []
while True:
'''
Y가 있는 만큼만 line을 진행시켜서 해보기
'''
line = lines[i]
result = ''
line_y = Y[y_pos:y_pos+len(line)]
for ch, y in zip(line, line_y):
if y == '1':
result += ' ' + ch
else:
result += ch
buf.append(result.strip())
y_pos += len(line)
i += 1
if y_pos >= len(Y):
break
wfh.write('\n'.join(buf) + '\n')
elif sys.argv[1] == 'make':
make_file = 'MDM001_FullKorean__train.txt'
lines = read_text_lines(make_file)
lines = (refine_line(line) for line in lines)
lines = [re.sub(r'[\ \n\r]+', '', line).strip() for line in lines]
i = 0
pool = Pool(processes=cpu_count())
X = []
Y = []
for x, y in make_data_divided(pool, make_file):
x = norm_many(pool, x)
x = pad_sequences(x, maxlen=440, value=0.)
if len(X) > 0:
X = np.concatenate((X, x), axis=0)
else:
X = x
print('{}) x'.format(i), end=', ')
y = to_categorical(y, nb_classes=2)
if len(Y) > 0:
Y = np.concatenate((Y, y), axis=0)
else:
Y = y
print('y')
i += 1
# TODO: 파일 이름, 데이터셋 이름 바꾸기
#h5f = h5py.File('ted_train.h5', 'w')
#h5f.create_dataset('ted7_X', data=X)
#h5f.create_dataset('ted7_Y', data=Y)
h5f = h5py.File('ted_MDM001.h5', 'w')
h5f.create_dataset('MDM001_X', data=X)
h5f.create_dataset('MDM001_Y', data=Y)
h5f.close()
else:
print('usage: bi_lstm.py (train|test|make)')
if __name__ == '__main__':
print('hello')
print(sys.argv[1])
#input()
freeze_support()
main()
| kimwansu/autospacing_tf | bi_lstm.py | bi_lstm.py | py | 9,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tflearn.layers.core.input_data",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tflearn.layers.embedding_ops.embedding",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tflearn.layers.core.dropout",
"line_number": 31,
"usage_type": "cal... |
71696562344 | # @keras-rl
'''
Script for custom or modified noise processes
'''
from __future__ import division
import numpy as np
#makes an instance of a noise process and returns it
#defined by configuration nc
#size is the number of parameters the noise is applied to
#so far just one-dimensional vector (only action noise)
def getNoise(nc, size):
dictionary = {
'GWN' : GaussianWhiteNoiseProcess,
'OU' : OrnsteinUhlenbeckProcess,
'OUAR' : OUAnnealReset,
#'AOU' : AlternatingOU
}
assert nc['key'] in dictionary, "noise process does not exist"
if nc['key'] == 'GWN':
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
return GaussianWhiteNoiseProcess(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing, size=size)
elif nc['key'] == 'OU':
assert 'theta' in nc
theta = nc['theta']
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
dt = nc['dt'] if 'dt' in nc else 1e-2
#x0 = np.random.normal(mu,sigma,size) if 'x0' in nc else None
return OrnsteinUhlenbeckProcess(theta=theta, mu=mu, sigma=sigma, sigma_min=sigma_min,
n_steps_annealing=n_steps_annealing, dt=dt, x0=None, size=size)
elif nc['key'] == 'OUAR':
assert 'theta' in nc
theta = nc['theta']
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
dt = nc['dt'] if 'dt' in nc else 1e-2
return OUAnnealReset(theta=theta, mu=mu, sigma=sigma, sigma_min=sigma_min,
n_steps_annealing=n_steps_annealing, dt=dt, size=size)
#### From keras-rl: ####
#the following 4 classes
#https://github.com/keras-rl/keras-rl/blob/1e915aa1943086e3c75c6aaf51b84c6b649c2600/rl/random.py
class RandomProcess(object):
def reset_states(self):
pass
class AnnealedGaussianProcess(RandomProcess):
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
class GaussianWhiteNoiseProcess(AnnealedGaussianProcess):
def __init__(self, mu=0., sigma=1., sigma_min=None, n_steps_annealing=1000, size=1):
super(GaussianWhiteNoiseProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.size = size
def sample(self):
sample = np.random.normal(self.mu, self.current_sigma, self.size)
self.n_steps += 1
return sample
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(self, theta, mu=0., sigma=1., dt=1e-2, x0=None, size=1, sigma_min=None, n_steps_annealing=1000):
super(OrnsteinUhlenbeckProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.theta = theta
self.mu = mu
self.dt = dt
self.x0 = x0
self.size = size
self.reset_states()
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size)
#### Own Noise Processes ####
#improves the keras-rl OU implementation by making the reset dependent on the standard deviation
class OUAnnealReset(OrnsteinUhlenbeckProcess):
def __init__(self,**kwargs):
super(OUAnnealReset,self).__init__(**kwargs)
def reset_states(self):
self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size)
#### Experimentals for fun ####
#Ornstein Uhlenbeck which resets the annealing sigma to the initial value
class AlternatingOU(OUAnnealReset):
def __init__(self, n_res, n_steps_annealing, n_begin=0, **kwargs):
self.n_res = n_res
self.n_ann = n_steps_annealing
self.n_begin = n_begin #step count when to begin with noise
super(AlternatingOU, self).__init__(n_steps_annealing=n_steps_annealing, **kwargs)
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps % (self.n_ann + self.n_res)) + self.c)
return sigma
def sample(self):
if self.n_begin <= 0:
return super(AlternatingOU, self).sample()
else:
self.n_begin -= 1
return np.random.normal(self.mu, self.sigma_min, self.size)
#OU modification which sets the ongoing output of the noise process to zero
#but does not interrupt the process itself
class PausingOU(OrnsteinUhlenbeckProcess):
def __init__(self, noiseLength, noisePause, alpha, **kwargs):
self.alpha = alpha
self.noiseLength = noiseLength
self.noisePause = noisePause
self.np = noisePause + noiseLength
super(PausingOU, self).__init__(**kwargs)
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
if self.np <= self.noisePause:
x = x * self.alpha
self.np -= 1
if self.np <= 0:
self.np = self.noiseLength + self.noisePause
return x
#testing
if __name__=='__main__':
from config import noiseConfig as nc
noise = getNoise(nc[0],5)
print(noise.theta)
print(noise.sample())
noise.reset_states()
print(noise.x_prev) | Frawak/squig-rl | source/noiseProcesses.py | noiseProcesses.py | py | 6,718 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.random.normal",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal... |
174132737 | from datetime import datetime, timezone, timedelta
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.conf import settings
from django.template.loader import render_to_string
from elasticsearch.helpers import bulk
from api.indexes import ES_PAGE_NAME
from api.esconnection import ES_CLIENT
from api.models import Country, Appeal, Event, FieldReport, ActionsTaken
from api.logger import logger
from notifications.models import RecordType, SubscriptionType, Subscription, SurgeAlert
from notifications.hello import get_hello
from notifications.notification import send_notification
from deployments.models import PersonnelDeployment, ERU
from main.frontend import frontend_url
import html
time_interval = timedelta(minutes = 5)
time_interva2 = timedelta( days = 1) # to check: the change was not between time_interval and time_interva2, so that the user don't receive email more frequent than a day.
time_interva7 = timedelta( days = 7) # for digest mode
basetime = int(20314) # weekday - hour - min for digest timing (5 minutes once a week)
daily_retro = int(654) # hour - min for daily retropective email timing (5 minutes a day) | Should not contain a leading 0!
max_length = 280 # after this length (at the first space) we cut the sent content
events_sent_to = {} # to document sent events before re-sending them via specific following
template_types = {
99: 'design/generic_notification.html',
RecordType.FIELD_REPORT: 'design/field_report.html',
RecordType.APPEAL: 'design/new_operation.html',
98: 'design/operation_update.html', # TODO: Either Operation Update needs a number or it should be constructed from other types (ask someone)
RecordType.WEEKLY_DIGEST: 'design/weekly_digest.html',
}
class Command(BaseCommand):
help = 'Index and send notifications about new/changed records'
# Digest mode duration is 5 minutes once a week
def is_digest_mode(self):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
weekdayhourmin = int(today.strftime('%w%H%M'))
return basetime <= weekdayhourmin and weekdayhourmin < basetime + 5
def is_retro_mode(self):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
hourmin = int(today.strftime('%H%M'))
return daily_retro <= hourmin and hourmin < daily_retro + 5
def get_time_threshold(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interval
def get_time_threshold2(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interva2
def get_time_threshold_digest(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interva7
def gather_country_and_region(self, records):
# Appeals only, since these have a single country/region
countries = []
regions = []
for record in records:
if record.country is not None:
countries.append('c%s' % record.country.id)
if record.country.region is not None:
regions.append('r%s' % record.country.region.id)
countries = list(set(countries))
regions = list(set(regions))
return countries, regions
def gather_countries_and_regions(self, records):
# Applies to emergencies and field reports, which have a
# many-to-many relationship to countries and regions
countries = []
for record in records:
if record.countries is not None:
countries += [country.id for country in record.countries.all()]
countries = list(set(countries))
qs = Country.objects.filter(pk__in=countries)
regions = ['r%s' % country.region.id for country in qs if country.region is not None]
countries = ['c%s' % id for id in countries]
return countries, regions
def gather_subscribers(self, records, rtype, stype):
# Correction for the new notification types:
if rtype == RecordType.EVENT or rtype == RecordType.FIELD_REPORT:
rtype_of_subscr = RecordType.NEW_EMERGENCIES
stype = SubscriptionType.NEW
elif rtype == RecordType.APPEAL:
rtype_of_subscr = RecordType.NEW_OPERATIONS
stype = SubscriptionType.NEW
else:
rtype_of_subscr = rtype
# Gather the email addresses of users who should be notified
if self.is_digest_mode():
subscribers = User.objects.filter(subscription__rtype=RecordType.WEEKLY_DIGEST, \
is_active=True).values('email')
# In digest mode we do not care about other circumstances, just get every subscriber's email.
emails = [subscriber['email'] for subscriber in subscribers]
return emails
else:
# Start with any users subscribed directly to this record type.
subscribers = User.objects.filter(subscription__rtype=rtype_of_subscr, \
subscription__stype=stype, is_active=True).values('email')
# For FOLLOWED_EVENTs and DEPLOYMENTs we do not collect other generic (d*, country, region) subscriptions, just one. This part is not called.
if rtype_of_subscr != RecordType.FOLLOWED_EVENT and \
rtype_of_subscr != RecordType.SURGE_ALERT and \
rtype_of_subscr != RecordType.SURGE_DEPLOYMENT_MESSAGES:
dtypes = list(set(['d%s' % record.dtype.id for record in records if record.dtype is not None]))
if (rtype_of_subscr == RecordType.NEW_OPERATIONS):
countries, regions = self.gather_country_and_region(records)
else:
countries, regions = self.gather_countries_and_regions(records)
lookups = dtypes + countries + regions
if len(lookups):
subscribers = (subscribers | User.objects.filter(subscription__lookup_id__in=lookups, is_active=True).values('email')).distinct()
emails = [subscriber['email'] for subscriber in subscribers]
return emails
def get_template(self, rtype=99):
#older: return 'email/generic_notification.html'
#old: return 'design/generic_notification.html'
return template_types[rtype]
# Get the front-end url of the resource
def get_resource_uri (self, record, rtype):
# Determine the front-end URL
resource_uri = frontend_url
if rtype == RecordType.SURGE_ALERT or rtype == RecordType.FIELD_REPORT: # Pointing to event instead of field report %s/%s/%s - Munu asked - ¤
belonging_event = record.event.id if record.event is not None else 999 # Very rare
resource_uri = '%s/emergencies/%s#overview' % (frontend_url, belonging_event)
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
resource_uri = '%s/%s' % (frontend_url, 'deployments') # can be further sophisticated
elif rtype == RecordType.APPEAL and (
record.event is not None and not record.needs_confirmation):
# Appeals with confirmed emergencies link to that emergency
resource_uri = '%s/emergencies/%s#overview' % (frontend_url, record.event.id)
elif rtype != RecordType.APPEAL:
# One-by-one followed or globally subscribed emergencies
resource_uri = '%s/%s/%s' % (
frontend_url,
'emergencies' if rtype == RecordType.EVENT or rtype == RecordType.FOLLOWED_EVENT else 'reports', # this else never occurs, see ¤
record.id
)
return resource_uri
def get_admin_uri (self, record, rtype):
admin_page = {
RecordType.FIELD_REPORT: 'api/fieldreport',
RecordType.APPEAL: 'api/appeal',
RecordType.EVENT: 'api/event',
RecordType.FOLLOWED_EVENT: 'api/event',
RecordType.SURGE_DEPLOYMENT_MESSAGES: 'deployments/personneldeployment',
RecordType.SURGE_ALERT: 'notifications/surgealert',
}[rtype]
return 'https://%s/admin/%s/%s/change' % (
settings.BASE_URL,
admin_page,
record.id,
)
def get_record_title(self, record, rtype):
if rtype == RecordType.FIELD_REPORT:
sendMe = record.summary
if record.countries.all():
country = record.countries.all()[0].name
if country not in sendMe:
sendMe = sendMe + ' (' + country + ')'
return sendMe
elif rtype == RecordType.SURGE_ALERT:
return record.operation + ' (' + record.atype.name + ', ' + record.category.name.lower() +')'
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
return '%s, %s' % (record.country_deployed_to, record.region_deployed_to)
else:
return record.name
def get_record_content(self, record, rtype):
if rtype == RecordType.FIELD_REPORT:
sendMe = record.description
elif rtype == RecordType.APPEAL:
sendMe = record.sector
if record.code:
sendMe += ', ' + record.code
elif rtype == RecordType.EVENT or rtype == RecordType.FOLLOWED_EVENT:
sendMe = record.summary
elif rtype == RecordType.SURGE_ALERT:
sendMe = record.message
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
sendMe = record.comments
else:
sendMe = '?'
return html.unescape(sendMe) # For contents we allow HTML markup. = autoescape off in generic_notification.html template.
def get_record_display(self, rtype, count):
display = {
RecordType.FIELD_REPORT: 'field report',
RecordType.APPEAL: 'operation',
RecordType.EVENT: 'event',
RecordType.FOLLOWED_EVENT: 'event',
RecordType.SURGE_DEPLOYMENT_MESSAGES: 'surge deployment',
RecordType.SURGE_ALERT: 'surge alert',
}[rtype]
if (count > 1):
display += 's'
return display
def get_weekly_digest_data(self, field):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
if field == 'dref':
return Appeal.objects.filter(end_date__gt=today, atype=0).count()
elif field == 'ea':
return Appeal.objects.filter(end_date__gt=today, atype=1).count()
elif field == 'fund':
amount_req = (
Appeal.objects
.filter(Q(end_date__gt=today, atype=1) | Q(end_date__gt=today, atype=2))
.aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
)
amount_fund = (
Appeal.objects
.filter(Q(end_date__gt=today, atype=1) | Q(end_date__gt=today, atype=2))
.aggregate(Sum('amount_funded'))['amount_funded__sum'] or 0
)
percent = round(amount_fund / amount_req, 3) * 100
return percent
elif field == 'budget':
amount = Appeal.objects.filter(end_date__gt=today).aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
rounded_amount = round(amount / 1000000, 2)
return rounded_amount
elif field == 'pop':
people = Appeal.objects.filter(end_date__gt=today).aggregate(Sum('num_beneficiaries'))['num_beneficiaries__sum'] or 0
rounded_people = round(people / 1000000, 2)
return rounded_people
def get_weekly_digest_latest_ops(self):
dig_time = self.get_time_threshold_digest()
ops = Appeal.objects.filter(created_at__gte=dig_time).order_by('-created_at')
ret_ops = []
for op in ops:
op_to_add = {
'op_event_id': op.event_id,
'op_country': Country.objects.values_list('name', flat=True).get(id=op.country_id) if op.country_id else '',
'op_name': op.name,
'op_created_at': op.created_at,
'op_funding': op.amount_requested,
}
ret_ops.append(op_to_add)
return ret_ops
def get_weekly_digest_highlights(self):
dig_time = self.get_time_threshold_digest()
events = Event.objects.filter(is_featured=True, updated_at__gte=dig_time).order_by('-updated_at')
ret_highlights = []
for ev in events:
amount_requested = Appeal.objects.filter(event_id=ev.id).aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
amount_funded = Appeal.objects.filter(event_id=ev.id).aggregate(Sum('amount_funded'))['amount_funded__sum'] or 0
data_to_add = {
'hl_id': ev.id,
'hl_name': ev.name,
'hl_last_update': ev.updated_at,
'hl_people': Appeal.objects.filter(event_id=ev.id).aggregate(Sum('num_beneficiaries'))['num_beneficiaries__sum'] or 0,
'hl_funding': amount_requested,
'hl_deployed_eru': ERU.objects.filter(event_id=ev.id).aggregate(Sum('units'))['units__sum'] or 0,
'hl_deployed_sp': PersonnelDeployment.objects.filter(event_deployed_to_id=ev.id).count(),
'hl_coverage': round(amount_funded / amount_requested, 1) if amount_requested != 0 else 0,
}
ret_highlights.append(data_to_add)
return ret_highlights
def get_actions_taken(self, frid):
ret_actions_taken = {
'NTLS': [],
'PNS': [],
'FDRN': [],
}
actions_taken = ActionsTaken.objects.filter(field_report_id=frid)
for at in actions_taken:
action_to_add = {
'action_summary': at.summary,
'actions': [],
}
if at.actions.all():
for act in at.actions.all():
action_to_add['actions'].append(act)
if at.organization == 'NTLS':
ret_actions_taken['NTLS'].append(action_to_add)
elif at.organization == 'PNS':
ret_actions_taken['PNS'].append(action_to_add)
elif at.organization == 'FDRN':
ret_actions_taken['FDRN'].append(action_to_add)
return ret_actions_taken
def get_weekly_latest_frs(self):
dig_time = self.get_time_threshold_digest()
ret_fr_list = []
fr_list = list(FieldReport.objects.filter(created_at__gte=dig_time).order_by('-created_at'))
for fr in fr_list:
fr_data = {
'id': fr.id,
'country': fr.countries.all()[0].name if fr.countries else None,
'summary': fr.summary,
'created_at': fr.created_at,
}
ret_fr_list.append(fr_data)
return ret_fr_list
# Based on the notification type this constructs the different type of objects needed for the different templates
def construct_template_record(self, rtype, record):
if rtype != RecordType.WEEKLY_DIGEST:
shortened = self.get_record_content(record, rtype)
if len(shortened) > max_length:
shortened = shortened[:max_length] + \
shortened[max_length:].split(' ', 1)[0] + '...' # look for the first space
# TODO: Operation Update and Announcement types are missing
if rtype == RecordType.FIELD_REPORT:
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'description': shortened,
'key_figures': {
'affected': (record.num_affected or 0) + (record.gov_num_affected or 0) + (record.other_num_affected or 0),
'injured': (record.num_injured or 0) + (record.gov_num_injured or 0) + (record.other_num_injured or 0),
'dead': (record.num_dead or 0) + (record.gov_num_dead or 0) + (record.other_num_dead or 0),
'missing': (record.num_missing or 0) + (record.gov_num_missing or 0) + (record.other_num_missing or 0),
'displaced': (record.num_displaced or 0) + (record.gov_num_displaced or 0) + (record.other_num_displaced or 0),
'assisted': (record.num_assisted or 0) + (record.gov_num_assisted or 0) + (record.other_num_assisted or 0),
'local_staff': record.num_localstaff or 0,
'volunteers': record.num_volunteers or 0,
'expat_delegates': record.num_expats_delegates or 0,
},
'actions_taken': self.get_actions_taken(record.id),
'actions_others': record.actions_others,
'gov_assistance': 'Yes' if record.request_assistance else 'No',
'ns_assistance': 'Yes' if record.ns_request_assistance else 'No',
}
elif rtype == RecordType.APPEAL:
# Maybe we need these in the future
# localstaff = FieldReport.objects.filter(event_id=record.event_id).values_list('num_localstaff', flat=True)
# volunteers = FieldReport.objects.filter(event_id=record.event_id).values_list('num_volunteers', flat=True)
# expats = FieldReport.objects.filter(event_id=record.event_id).values_list('num_expats_delegates', flat=True)
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'situation_overview': Event.objects.values_list('summary', flat=True).get(id=record.event_id) if record.event_id != None else '',
'key_figures': {
'people_targeted': record.num_beneficiaries or 0,
'funding_req': record.amount_requested or 0,
'appeal_code': record.code,
'start_date': record.start_date,
'end_date': record.end_date,
# 'local_staff': localstaff[0] if localstaff else 0,
# 'volunteers': volunteers[0] if volunteers else 0,
# 'expat_delegates': expats[0] if expats else 0,
},
'field_reports': list(FieldReport.objects.filter(event_id=record.event_id)) if record.event_id != None else None,
}
elif rtype == RecordType.WEEKLY_DIGEST:
dig_time = self.get_time_threshold_digest()
rec_obj = {
'active_dref': self.get_weekly_digest_data('dref'),
'active_ea': self.get_weekly_digest_data('ea'),
'funding_coverage': self.get_weekly_digest_data('fund'),
'budget': self.get_weekly_digest_data('budget'),
'population': self.get_weekly_digest_data('pop'),
'highlighted_ops': self.get_weekly_digest_highlights(),
'latest_ops': self.get_weekly_digest_latest_ops(),
'latest_deployments': list(SurgeAlert.objects.filter(created_at__gte=dig_time).order_by('-created_at')),
'latest_field_reports': self.get_weekly_latest_frs(),
}
else: # The default (old) template
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'content': shortened,
}
return rec_obj
def notify(self, records, rtype, stype, uid=None):
record_count = 0
if records:
record_count = records.count()
if not record_count and rtype != RecordType.WEEKLY_DIGEST:
return
# Decide if it is a personal notification or batch
if uid is None:
emails = self.gather_subscribers(records, rtype, stype)
if not len(emails):
return
else:
usr = User.objects.filter(pk=uid, is_active=True)
if not len(usr):
return
else:
emails = list(usr.values_list('email', flat=True)) # Only one email in this case
# TODO: maybe this needs to be adjusted based on the new functionality (at first only handling Weekly Digest)
# Only serialize the first 10 records
record_entries = []
if rtype == RecordType.WEEKLY_DIGEST:
record_entries.append(self.construct_template_record(rtype, None))
else:
entries = list(records) if record_count <= 10 else list(records[:10])
for record in entries:
record_entries.append(self.construct_template_record(rtype, record))
if uid is not None:
is_staff = usr.values_list('is_staff', flat=True)[0]
if rtype == RecordType.WEEKLY_DIGEST:
record_type = 'weekly digest'
else:
record_type = self.get_record_display(rtype, record_count)
if uid is None:
adj = 'new' if stype == SubscriptionType.NEW else 'modified'
#subject = '%s %s %s in IFRC GO' % (
if rtype == RecordType.WEEKLY_DIGEST:
subject = '%s %s' % (
adj,
record_type,
)
else:
subject = '%s %s %s' % (
record_count,
adj,
record_type,
)
else:
#subject = '%s followed %s modified in IFRC GO' % (
subject = '%s followed %s modified' % (
record_count,
record_type,
)
if self.is_retro_mode():
subject += ' [daily followup]'
template_path = self.get_template()
if rtype == RecordType.FIELD_REPORT or rtype == RecordType.APPEAL or rtype == RecordType.WEEKLY_DIGEST:
template_path = self.get_template(rtype)
html = render_to_string(template_path, {
'hello': get_hello(),
'count': record_count,
'records': record_entries,
'is_staff': True if uid is None else is_staff, # TODO: fork the sending to "is_staff / not ~" groups
'subject': subject,
})
recipients = emails
if uid is None:
if record_count == 1:
subject += ': ' + record_entries[0]['title'] # On purpose after rendering – the subject changes only, not email body
# For new (email-documented :10) events we store data to events_sent_to{ event_id: recipients }
if stype == SubscriptionType.EDIT: # Recently we do not allow EDIT substription
for e in list(records.values('id'))[:10]:
i = e['id']
if i not in events_sent_to:
events_sent_to[i] = []
email_list_to_add = list(set(events_sent_to[i] + recipients))
if email_list_to_add:
events_sent_to[i] = list(filter(None, email_list_to_add)) # filter to skip empty elements
plural = '' if len(emails) == 1 else 's' # record_type has its possible plural thanks to get_record_display()
logger.info('Notifying %s subscriber%s about %s %s %s' % (len(emails), plural, record_count, adj, record_type))
send_notification(subject, recipients, html)
else:
if len(recipients):
# check if email is not in events_sent_to{event_id: recipients}
if not emails:
logger.info('Silent about the one-by-one subscribed %s – user %s has not set email address' % (record_type, uid))
# Recently we do not allow EDIT (modif.) subscription, so it is irrelevant recently (do not check the 1+ events in loop) :
elif (records[0].id not in events_sent_to) or (emails[0] not in events_sent_to[records[0].id]):
logger.info('Notifying %s subscriber about %s one-by-one subscribed %s' % (len(emails), record_count, record_type))
send_notification(subject, recipients, html)
else:
logger.info('Silent about a one-by-one subscribed %s – user already notified via generic subscription' % (record_type))
def index_new_records(self, records):
self.bulk([self.convert_for_bulk(record, create=True) for record in list(records)])
def index_updated_records(self, records):
self.bulk([self.convert_for_bulk(record, create=False) for record in list(records)])
def convert_for_bulk(self, record, create):
data = record.indexing()
metadata = {
'_op_type': 'create' if create else 'update',
'_index': ES_PAGE_NAME,
'_type': 'page',
'_id': record.es_id()
}
if (create):
metadata.update(**data)
else:
metadata['doc'] = data
return metadata
def bulk(self, actions):
try:
created, errors = bulk(client=ES_CLIENT , actions=actions)
if len(errors):
logger.error('Produced the following errors:')
logger.error('[%s]' % ', '.join(map(str, errors)))
except Exception as e:
logger.error('Could not index records')
logger.error('%s...' % str(e)[:512])
# Remove items in a queryset where updated_at == created_at.
# This leaves us with only ones that have been modified.
def filter_just_created(self, queryset):
if queryset.first() is None:
return []
if hasattr(queryset.first(), 'modified_at') and queryset.first().modified_at is not None:
return [record for record in queryset if (
record.modified_at.replace(microsecond=0) == record.created_at.replace(microsecond=0))]
else:
return [record for record in queryset if (
record.updated_at.replace(microsecond=0) == record.created_at.replace(microsecond=0))]
def handle(self, *args, **options):
if self.is_digest_mode():
t = self.get_time_threshold_digest() # in digest mode (1ce a week, for new_entities only) we use a bigger interval
else:
t = self.get_time_threshold()
t2 = self.get_time_threshold2()
cond1 = Q(created_at__gte=t)
condU = Q(updated_at__gte=t)
condR = Q(real_data_update__gte=t) # instead of modified at
cond2 = ~Q(previous_update__gte=t2) # we negate (~) this, so we want: no previous_update in the last day. So: send once a day!
condF = Q(auto_generated_source='New field report') # We exclude those events that were generated from field reports, to avoid 2x notif.
# In this section we check if there was 2 FOLLOWED_EVENT modifications in the last 24 hours (for which there was no duplicated email sent, but now will be one).
if self.is_retro_mode():
condU = Q(updated_at__gte=t2)
cond2 = Q(previous_update__gte=t2) # not negated. We collect those, who had 2 changes in the last 1 day.
followed_eventparams = Subscription.objects.filter(event_id__isnull=False)
users_of_followed_events = followed_eventparams.values_list('user_id', flat=True).distinct()
for usr in users_of_followed_events: # looping in user_ids of specific FOLLOWED_EVENT subscriptions (8)
eventlist = followed_eventparams.filter(user_id=usr).values_list('event_id', flat=True).distinct()
cond3 = Q(pk__in=eventlist) # getting their events as a condition
followed_events = Event.objects.filter(condU & cond2 & cond3)
if len(followed_events): # usr - unique (we loop one-by-one), followed_events - more
self.notify(followed_events, RecordType.FOLLOWED_EVENT, SubscriptionType.NEW, usr)
else:
new_reports = FieldReport.objects.filter(cond1)
updated_reports = FieldReport.objects.filter(condU & cond2)
new_appeals = Appeal.objects.filter(cond1)
updated_appeals = Appeal.objects.filter(condR & cond2)
new_events = Event.objects.filter(cond1).exclude(condF)
updated_events = Event.objects.filter(condU & cond2)
new_surgealerts = SurgeAlert.objects.filter(cond1)
new_pers_deployments = PersonnelDeployment.objects.filter(cond1) # CHECK: Best instantiation of Deployment Messages? Frontend appearance?!?
# No need for indexing for personnel deployments
# Approaching End of Mission ? new_approanching_end = PersonnelDeployment.objects.filter(end-date is close?)
# No need for indexing for Approaching End of Mission
# PER Due Dates ? new_per_due_date_warnings = User.objects.filter(PER admins of countries/regions, for whom the setting/per_due_date is in 1 week)
# No need for indexing for PER Due Dates
followed_eventparams = Subscription.objects.filter(event_id__isnull=False)
## followed_events = Event.objects.filter(updated_at__gte=t, pk__in=[x.event_id for x in followed_eventparams])
# Merge Weekly Digest into one mail instead of separate ones
if self.is_digest_mode():
self.notify(None, RecordType.WEEKLY_DIGEST, SubscriptionType.NEW)
else:
self.notify(new_reports, RecordType.FIELD_REPORT, SubscriptionType.NEW)
#self.notify(updated_reports, RecordType.FIELD_REPORT, SubscriptionType.EDIT)
self.notify(new_appeals, RecordType.APPEAL, SubscriptionType.NEW)
#self.notify(updated_appeals, RecordType.APPEAL, SubscriptionType.EDIT)
self.notify(new_events, RecordType.EVENT, SubscriptionType.NEW)
#self.notify(updated_events, RecordType.EVENT, SubscriptionType.EDIT)
self.notify(new_surgealerts, RecordType.SURGE_ALERT, SubscriptionType.NEW)
self.notify(new_pers_deployments, RecordType.SURGE_DEPLOYMENT_MESSAGES, SubscriptionType.NEW)
users_of_followed_events = followed_eventparams.values_list('user_id', flat=True).distinct()
for usr in users_of_followed_events: # looping in user_ids of specific FOLLOWED_EVENT subscriptions (8)
eventlist = followed_eventparams.filter(user_id=usr).values_list('event_id', flat=True).distinct()
cond3 = Q(pk__in=eventlist) # getting their events as a condition
followed_events = Event.objects.filter(condU & cond2 & cond3)
if len(followed_events): # usr - unique (we loop one-by-one), followed_events - more
self.notify(followed_events, RecordType.FOLLOWED_EVENT, SubscriptionType.NEW, usr)
logger.info('Indexing %s updated field reports' % updated_reports.count())
self.index_updated_records(self.filter_just_created(updated_reports))
logger.info('Indexing %s updated appeals' % updated_appeals.count())
self.index_updated_records(self.filter_just_created(updated_appeals))
logger.info('Indexing %s updated events' % updated_events.count())
self.index_updated_records(self.filter_just_created(updated_events))
logger.info('Indexing %s new field reports' % new_reports.count())
self.index_new_records(new_reports)
logger.info('Indexing %s new appeals' % new_appeals.count())
self.index_new_records(new_appeals)
logger.info('Indexing %s new events' % new_events.count())
self.index_new_records(new_events)
| batpad/go-api | api/management/commands/index_and_notify.py | index_and_notify.py | py | 31,984 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "notification... |
24537790919 | from pathlib import Path
N, S = int(Path("day17.txt").read_text()), 50000000
l, pos, after2017, afterzero = [0], 0, 0, 0
for v in range(1, S+1):
pos = (pos + N) % v + 1
if v == 2017: after2017 = l[pos]
elif v > 2017:
if pos == 1:
afterzero = v
continue
l.insert(pos, v)
print(after2017, afterzero)
| AlexBlandin/Advent-of-Code | 2017/day17.py | day17.py | py | 322 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
}
] |
35376158594 | # fit to time dependent function of chance of having activity of any length during a single labeling window
# infer k_on parameter based on single window for 4SU (though here it is the 2nd window)
# based on different window lengths
# window_lengths = [15, 30, 45, 60, 120, 180]
# fit based on (hidden) presence of active state, on real simulated counts and on sampled simulated counts
# TO DO
# three categories of k_syn:
# only change k_on with fixed (k_off, k_syn, k_d)
import os
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.optimize import curve_fit
from simulator.Experiment import *
from simulator.Transcription import *
import numpy as np
from utils.utils import round_sig
if os.name == 'nt':
dir_sep = "\\"
out_dir = r"D:\26 Battich Oudenaarden transcriptional bursts\runs"
else:
dir_sep = "/"
out_dir = "sc_runs"
plot_dir = out_dir + dir_sep + "infer_parameters_example.plots"
os.makedirs(plot_dir, exist_ok=True)
df_filename = "counts_infer_parameters_example.csv"
k_on = 0.01
k_off = 0.04
k_d = 0.02
k_syn = 0.2
k_eff = 0.1
# window_lengths = [r*15 for r in range(1, 24)]
window_lengths = [15, 30, 45, 60, 120, 180]
k_offs = [k * 0.005 for k in range(1, 6)] # for some examples in theoretical plots
def p_1(t, k_on, k_off):
p_on = k_on/(k_on + k_off)
p_off = k_off/(k_on + k_off)
p_1 = p_on + p_off * (1 - np.exp(-k_on * t))
return p_1
# simplified model
def p_1_model(t, k_on, p_on, p_off):
p_1 = p_on + p_off * (1 - np.exp(-k_on * t))
return p_1
def nr_molecules_in_window_no_decay(t, k_on, k_off, k_syn, k_eff):
p_on = k_on/(k_on + k_off)
nr_mrna = p_on * k_syn * k_eff * t
return nr_mrna
def plot_theoretical_chance_of_active_state():
t = np.linspace(0, 400, 100)
for k_off in k_offs:
sns.lineplot(x=t, y=p_1(t, k_on, k_off))
plt.legend(k_offs)
plt.ylim(0, 1)
plt.title("k_on={k_on}".format(k_on=k_on))
plt.ylabel("chance of some active state (any length)")
plt.xlabel("minutes")
plt.vlines(x=window_lengths, ymin=0, ymax=1, linestyles='dashed', colors='black')
plt.savefig(plot_dir + dir_sep + "theoretical_chance_active_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def plot_production_of_mrna():
t = np.linspace(0, 400, 100)
for k_off in k_offs:
y = nr_molecules_in_window_no_decay(t, k_on, k_off, k_syn, k_eff)
sns.lineplot(x=t, y=y, label="k_off={k_off}".format(k_off=k_off))
plt.legend()
plt.title("k_on={k_on}".format(k_on=k_on))
plt.ylabel("average nr of molecules produced")
plt.xlabel("minutes")
plt.vlines(x=window_lengths, ymin=0, ymax=max(y), linestyles='dashed', colors='black')
plt.savefig(plot_dir + dir_sep + "theoretical_production_mrna_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def run_active_state_is_present_simulations(label, nr_runs):
l_counts = []
for w in window_lengths:
nr_runs_active = 0
nr_real_label = 0
nr_signal_label = 0
windows, fix_time = get_windows_and_fix_time(length_window=w, gap=0)
params = TranscriptParams(k_on=k_on, k_off=k_off, nr_refractions=1,
tm_id=np.nan,
k_syn=k_syn, k_d=k_d,
coord_group=0,
name="test",
tran_type="S")
trans = Transcription(params)
# set complete_trace=True to retrieve the complete trace of transcripts counts (for plotting)
for run in range(0, nr_runs):
df_dtmc, dtmc_list = trans.run_bursts(fix_time, windows, new_dtmc_trace=True, complete_trace=False)
df_transcripts = trans.df_transcripts
df_labeled_transcripts = df_transcripts[df_transcripts.label == label]
if len(df_labeled_transcripts) > 0:
nr_real_label = nr_real_label + 1
# TODO: sampling should be done differently
# here we are taking a fixed percentage
len_sample = int(k_eff * len(df_labeled_transcripts))
df_sampled = df_transcripts.sample(len_sample, replace=False)
if len(df_sampled) > 0:
nr_signal_label = nr_signal_label + 1
# example of calculating percentage active
perc = Experiment.perc_active_state(windows, df_dtmc, label)
# print("Percentage active state: {perc}".format(perc=perc))
if perc > 0:
nr_runs_active = nr_runs_active + 1
print("{label} window contains {nr_runs_active} runs with active state(s) for k_off {k_off} and window {window}".
format(label=label, k_off=k_off, window=w, nr_runs_active=nr_runs_active))
l_counts.append([w, nr_runs_active, nr_real_label, nr_signal_label])
df_counts = pd.DataFrame(l_counts, columns=["window", "active", "real", "signal"])
df_counts.to_csv(out_dir + dir_sep + df_filename, sep=';', index=False)
return df_counts
def plot_chance_of_switching_to_active_state(df_counts, nr_runs):
# we want to convert to
plt.plot(df_counts.window, df_counts.active/nr_runs, label='with active state')
plt.plot(df_counts.window, df_counts.real/nr_runs, label='with real counts')
plt.plot(df_counts.window, df_counts.signal/nr_runs, label='with detected counts')
plt.plot(df_counts.window, df_counts.theoretical, color="red", label="theoretical")
plt.xlim(0, max(window_lengths) + 15)
# plt.ylim(0, 1)
plt.xlabel("window size (minutes)")
plt.ylabel("nr of runs")
plt.legend()
plt.savefig(plot_dir + dir_sep + "counts_{k_on}_{k_off}_{k_syn}.svg".format(
k_on=k_on, k_off=k_off, k_syn=k_syn))
plt.close(1)
def fit_to_model_p1(nr_runs):
expected = (0.1, 0.5, 0.5)
# divide by nr_runs for getting chance
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.active / nr_runs, expected)
popt_active = popt
error_k_on_active = abs(popt_active[0] / k_on - 1) * 100
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.real / nr_runs, expected)
popt_real = popt
error_k_on_real = abs(popt_real[0] / k_on - 1) * 100
popt, pcov = curve_fit(p_1_model, df_counts.window, df_counts.signal / nr_runs, expected)
popt_signal = popt
error_k_on_signal = abs(popt_signal[0] / k_on - 1) * 100
print("fitting to hidden state: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_active[0], 4), error=round_sig(error_k_on_active, 3)))
print("fitting to real counts: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_real[0], 4), error=round_sig(error_k_on_real, 3)))
print("fitting to sampled counts: k_on={k_on}; error={error}%".format(
k_on=round_sig(popt_signal[0]), error=round_sig(error_k_on_signal, 3)))
run_sim = False
nr_runs = 500
if run_sim:
label = "4SU"
df_counts = run_active_state_is_present_simulations(label, nr_runs)
else:
df_counts = pd.read_csv(out_dir + dir_sep + df_filename, sep=';')
plot_theoretical_chance_of_active_state()
plot_production_of_mrna()
df_counts["theoretical"] = p_1(df_counts["window"], k_on, k_off)
plot_chance_of_switching_to_active_state(df_counts, nr_runs)
fit_to_model_p1(nr_runs)
| resharp/scBurstSim | analysis/infer_parameters_example.py | infer_parameters_example.py | py | 7,414 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.name",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 58,... |
9503023051 | import os
import os.path as osp
import time
import yaml
import warnings
import torch
import torch.optim as optim
from utils import get_world_size, get_rank
from builder import build_train_dataloader, build_val_dataloader,build_model
from utils import Logger,CosineDecayLR
from torch import distributed as dist
from torch.nn.utils import clip_grad_norm_
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
class IterRunner():
def __init__(self, config):
self.config = config
self.rank = get_rank()
self.world_size = get_world_size()
self.iter = 0
# init dataloader
self.train_dataloader,self.sampler = build_train_dataloader(self.config['train']['data'])
self.val_dataloader = build_val_dataloader(self.config['val'])
# init model
feat_dim = config['model']['backbone']['net']['out_channel']
self.config['model']['head']['net']['feat_dim'] = feat_dim
self.model = build_model(config['model'])
# init project
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
self.project_dir = osp.join(config['common']['save_log_dir'],timestamp)
os.makedirs(self.project_dir,exist_ok=True)
if self.rank == 0:
print('')
print('The training log and models are saved to ' + self.project_dir)
print('')
# save cfg
save_cfg_path = osp.join(self.project_dir,config['common']['save_cfg_name'])
with open(save_cfg_path, 'w') as f:
yaml.dump(config, f, sort_keys=False, default_flow_style=None)
# save log
save_log_dir = osp.join(self.project_dir, 'log')
os.makedirs(save_log_dir, exist_ok=True)
self.train_log = Logger(name='train', path="{}/{}_train.log".format(save_log_dir,time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
self.val_log = Logger(name='val', path="{}/{}_val.log".format(save_log_dir,time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
#save weight
self.save_weights_dir = osp.join(self.project_dir,'weights')
os.makedirs(self.save_weights_dir, exist_ok=True)
# init common and train arguments
self.freeze_epoch = self.config['train']['freeze']['epoch']
self.norm_epoch = self.config['train']['norm']['epoch']
self.test_first = self.config['common']['test_first']
self.screen_intvl = self.config['common']['screen_intvl']
self.val_intvl = self.config['common']['val_intvl']
self.save_iters = self.config['common']['save_iters']
self.freeze_iter_step = self.config['train']['freeze']['optim']['iter_step']
self.norm_iter_step = self.config['train']['norm']['optim']['iter_step']
self.scheduler_type = None
self.tpr_1e_3 = 0
self.tpr_5e_3 = 0
self.acc = 0
# make sure the max_save_iter less than all_iter
all_iter = (self.freeze_epoch+self.norm_epoch)*len(self.train_dataloader)
if self.rank == 0:
if len(self.save_iters) == 0:
warnings.warn('`save_iters` is not set. if you want to save model in specified location,or not only end of each epoch.please check it!')
else:
if all_iter < max(self.save_iters):
raise KeyError(f'all_iter is {all_iter},but got max_save_iter {max(self.save_iters)},max_save_iter must be less than it')
if self.rank != 0:
return
def set_optimizer_scheduler(self,config,freeze=False):
for module in self.model:
if freeze:
for param in self.model['backbone']['net'].parameters():
param.requires_grad = False
else:
for param in self.model['backbone']['net'].parameters():
param.requires_grad = True
self.model[module]['optimizer'] = optim.SGD(self.model[module]['net'].parameters(),
lr=config['optim']['lr_init'],
momentum=config['optim']['momentum'],
weight_decay=config['optim']['weight_decay'])
if config['scheduler']['type'] == 'CosineDecayLR':
self.scheduler_type = 'CosineDecayLR'
self.model[module]['scheduler'] = CosineDecayLR(
self.model[module]['optimizer'],
T_max=config['epoch']*len(self.train_dataloader),
lr_init=config['optim']['lr_init'],
lr_min=config['scheduler']['lr_end'],
warmup=config['scheduler']['warm_up_epoch']*len(self.train_dataloader)
)
if config['scheduler']['type'] == 'MultiStepLR':
self.scheduler_type = 'MultiStepLR'
self.model[module]['scheduler'] = optim.lr_scheduler.MultiStepLR(
self.model[module]['optimizer'],
config['scheduler']['milestones'],
config['scheduler']['gamma'],
-1
)
def set_model(self, test_mode):
for module in self.model:
if test_mode:
self.model[module]['net'].eval()
else:
self.model[module]['net'].train()
def update_model(self,i,freeze=False):
for module in self.model:
if freeze:
if i % self.freeze_iter_step == 0:
self.model[module]['optimizer'].step()
self.model[module]['optimizer'].zero_grad()
if self.scheduler_type == 'CosineDecayLR':
self.model[module]['scheduler'].step(self.iter)
else:
self.model[module]['scheduler'].step()
else:
if i % self.norm_iter_step == 0:
self.model[module]['optimizer'].step()
self.model[module]['optimizer'].zero_grad()
if self.scheduler_type == 'CosineDecayLR':
self.model[module]['scheduler'].step(self.iter-self.freeze_epoch*len(self.train_dataloader))
else:
self.model[module]['scheduler'].step()
def save_model(self):
for module in self.model:
model_name = '{}_{}.pth'.format(str(module), str(self.iter+1))
model_path = osp.join(self.save_weights_dir, model_name)
torch.save(self.model[module]['net'].state_dict(), model_path)
@torch.no_grad()
def val(self):
# switch to test mode
self.set_model(test_mode=True)
for val_loader in self.val_dataloader:
# meta info
dataset = val_loader.dataset
# create a placeholder `feats`,
# compute _feats in different GPUs and collect
dim = self.config['model']['backbone']['net']['out_channel']
with torch.no_grad():
feats = torch.zeros(
[len(dataset), dim], dtype=torch.float32).to(self.rank)
for data, indices in val_loader:
data = data.to(self.rank)
_feats = self.model['backbone']['net'](data)
data = torch.flip(data, [3])
_feats += self.model['backbone']['net'](data)
feats[indices, :] = _feats
dist.all_reduce(feats, op=dist.ReduceOp.SUM)
results = dataset.evaluate(feats.cpu())
if self.rank == 0:
results = dict(results)
self.val_log.logger.info("Processing Val Iter:{} [{} : {}]".format(self.iter+1, dataset.name, results))
# if model have acc better in the test data,save the model
if results['TPR@FPR=1e-3'] >= self.tpr_1e_3 or results['ACC'] >= self.acc:
self.save_model()
self.tpr_1e_3 = results['TPR@FPR=1e-3']
self.acc = results['ACC']
def train(self):
if self.test_first:
self.val()
self.set_optimizer_scheduler(self.config['train']['freeze'],freeze=True)
for epoch in range(self.freeze_epoch):
Loss,Mag_mean,Mag_std,bkb_grad,head_grad = 0,0,0,0,0
if self.sampler != None:
self.sampler.set_epoch(epoch)
self.set_model(test_mode=False)
for i,(images,labels) in enumerate(self.train_dataloader):
images, labels = images.to(self.rank), labels.to(self.rank)
# forward
self.set_model(test_mode=False)
feats = self.model['backbone']['net'](images)
loss = self.model['head']['net'](feats, labels)
# backward
loss.backward()
b_norm = self.model['backbone']['clip_grad_norm']
h_norm = self.model['head']['clip_grad_norm']
if b_norm < 0. or h_norm < 0.:
raise ValueError(
'the clip_grad_norm should be positive. ({:3.4f}, {:3.4f})'.format(b_norm, h_norm))
b_grad = clip_grad_norm_(
self.model['backbone']['net'].parameters(),
max_norm=b_norm, norm_type=2)
h_grad = clip_grad_norm_(
self.model['head']['net'].parameters(),
max_norm=h_norm, norm_type=2)
# update model
self.iter = epoch*len(self.train_dataloader)+i
self.update_model(i,freeze=True)
magnitude = torch.norm(feats, 2, 1)
Loss = (Loss * i + loss.item()) / (i + 1)
Mag_mean = (Mag_mean * i + magnitude.mean().item()) / (i + 1)
Mag_std = (Mag_std * i + magnitude.std().item()) / (i + 1)
bkb_grad = (bkb_grad * i + b_grad) / (i + 1)
head_grad = (head_grad * i + h_grad) / (i + 1)
if (i + 1) % self.screen_intvl == 0 or (i + 1) == len(self.train_dataloader):
if self.rank == 0:
# logging and update meters
self.train_log.logger.info("Processing Freeze Training Epoch:[{} | {}] Batch:[{} | {}] Lr:{:.6f} Loss:{:.4f} Mag_mean:{:.4f} Mag_std:{:.4f} bkb_grad:{:.4f} head_grad:{:.4f}"
.format(epoch+1,self.freeze_epoch+self.norm_epoch,i+1,len(self.train_dataloader),self.model['backbone']['optimizer'].param_groups[0]['lr'],Loss, Mag_mean, Mag_std, bkb_grad, head_grad))
# if (i + 1) % self.val_intvl == 0 or (i + 1) == len(self.train_dataloader) or (self.iter + 1) in self.save_iters:
# self.val()
if ((self.iter + 1) in self.save_iters or (i + 1) == len(self.train_dataloader)) and self.rank == 0:
self.save_model()
self.set_optimizer_scheduler(self.config['train']['norm'], freeze=False)
for epoch in range(self.norm_epoch):
Loss,Mag_mean,Mag_std,bkb_grad,head_grad = 0,0,0,0,0
if self.sampler != None:
self.sampler.set_epoch(epoch)
self.set_model(test_mode=False)
for i,(images,labels) in enumerate(self.train_dataloader):
images, labels = images.to(self.rank), labels.to(self.rank)
# forward
self.set_model(test_mode=False)
feats = self.model['backbone']['net'](images)
loss = self.model['head']['net'](feats, labels)
# backward
loss.backward()
b_norm = self.model['backbone']['clip_grad_norm']
h_norm = self.model['head']['clip_grad_norm']
if b_norm < 0. or h_norm < 0.:
raise ValueError(
'the clip_grad_norm should be positive. ({:3.4f}, {:3.4f})'.format(b_norm, h_norm))
b_grad = clip_grad_norm_(
self.model['backbone']['net'].parameters(),
max_norm=b_norm, norm_type=2)
h_grad = clip_grad_norm_(
self.model['head']['net'].parameters(),
max_norm=h_norm, norm_type=2)
# update model
self.iter = (self.freeze_epoch+epoch)*len(self.train_dataloader)+i
self.update_model(i,freeze=False)
magnitude = torch.norm(feats, 2, 1)
Loss = (Loss * i + loss.item()) / (i + 1)
Mag_mean = (Mag_mean * i + magnitude.mean().item()) / (i + 1)
Mag_std = (Mag_std * i + magnitude.std().item()) / (i + 1)
bkb_grad = (bkb_grad * i + b_grad) / (i + 1)
head_grad = (head_grad * i + h_grad) / (i + 1)
if (i + 1) % self.screen_intvl == 0 or (i + 1) == len(self.train_dataloader):
if self.rank == 0:
# logging and update meters
self.train_log.logger.info("Processing Norm Training Epoch:[{} | {}] Batch:[{} | {}] Lr:{:.6f} Loss:{:.4f} Mag_mean:{:.4f} Mag_std:{:.4f} bkb_grad:{:.4f} head_grad:{:.4f}"
.format(epoch+self.freeze_epoch+1, self.freeze_epoch + self.norm_epoch, i+1,len(self.train_dataloader),self.model['backbone']['optimizer'].param_groups[0]['lr'],Loss, Mag_mean, Mag_std, bkb_grad, head_grad))
# do test
if (i + 1) % self.val_intvl == 0 or (i + 1) == len(self.train_dataloader) or (self.iter + 1) in self.save_iters:
self.val()
# do save
if ((self.iter + 1) in self.save_iters or (i + 1) == len(self.train_dataloader)) and self.rank == 0:
self.save_model()
| CxyZyr/face-recognition | runner.py | runner.py | py | 13,847 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.backends",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "utils.get_rank",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "utils.get_worl... |
32115859766 | from __future__ import annotations
from typing import Iterable, Iterator, List, Literal, Optional, Type
import frictionless as fl
import marshmallow as mm
from dimcat import DimcatConfig, get_class
from dimcat.data.base import Data
from dimcat.data.packages.base import Package, PackageSpecs
from dimcat.data.resources.base import Resource
from dimcat.data.resources.dc import FeatureSpecs
from dimcat.dc_exceptions import (
DuplicatePackageNameError,
EmptyCatalogError,
EmptyPackageError,
NoMatchingResourceFoundError,
PackageNotFoundError,
ResourceNotFoundError,
)
from dimcat.utils import treat_basepath_argument
from frictionless import FrictionlessException
from typing_extensions import Self
class DimcatCatalog(Data):
"""Has the purpose of collecting and managing a set of :obj:`Package` objects.
Analogous to a :obj:`frictionless.Catalog`, but without intermediate :obj:`frictionless.Dataset` objects.
Nevertheless, a DimcatCatalog can be stored as and created from a Catalog descriptor (ToDo).
"""
class PickleSchema(Data.PickleSchema):
packages = mm.fields.List(
mm.fields.Nested(Package.Schema),
required=False,
allow_none=True,
metadata=dict(description="The packages in the catalog."),
)
class Schema(PickleSchema, Data.Schema):
pass
def __init__(
self,
basepath: Optional[str] = None,
packages: Optional[PackageSpecs | List[PackageSpecs]] = None,
) -> None:
"""Creates a DimcatCatalog which is essentially a list of :obj:`Package` objects.
Args:
basepath: The basepath for all packages in the catalog.
"""
self._packages: List[Package] = []
super().__init__(basepath=basepath)
if packages is not None:
self.packages = packages
def __getitem__(self, item: str) -> Package:
try:
return self.get_package(item)
except Exception as e:
raise KeyError(str(e)) from e
def __iter__(self) -> Iterator[Package]:
yield from self._packages
def __len__(self) -> int:
return len(self._packages)
@property
def basepath(self) -> Optional[str]:
"""If specified, the basepath for all packages added to the catalog."""
return self._basepath
@basepath.setter
def basepath(self, basepath: str) -> None:
new_catalog = self._basepath is None
self._set_basepath(basepath, set_packages=new_catalog)
@property
def package_names(self) -> List[str]:
return [package.package_name for package in self._packages]
@property
def packages(self) -> List[Package]:
return self._packages
@packages.setter
def packages(self, packages: PackageSpecs | List[PackageSpecs]) -> None:
if len(self._packages) > 0:
raise ValueError("Cannot set packages if packages are already present.")
if isinstance(packages, (Package, fl.Package, str)):
packages = [packages]
for package in packages:
try:
self.add_package(package)
except FrictionlessException as e:
self.logger.error(f"Adding the package {package!r} failed with\n{e!r}")
def add_package(
self,
package: PackageSpecs,
basepath: Optional[str] = None,
copy: bool = False,
):
"""Adds a :obj:`Package` to the catalog."""
if isinstance(package, fl.Package):
dc_package = Package.from_descriptor(package)
elif isinstance(package, str):
dc_package = Package.from_descriptor_path(package)
elif isinstance(package, Package):
if copy:
dc_package = package.copy()
else:
dc_package = package
else:
msg = f"{self.name}.add_package() takes a package, not {type(package)!r}."
raise TypeError(msg)
if dc_package.package_name in self.package_names:
raise DuplicatePackageNameError(dc_package.package_name)
if basepath is not None:
dc_package.basepath = basepath
self._packages.append(dc_package)
def add_resource(
self,
resource: Resource,
package_name: Optional[str] = None,
):
"""Adds a resource to the catalog. If package_name is given, adds the resource to the package with that name."""
package = self.get_package_by_name(package_name, create=True)
package.add_resource(resource=resource)
def check_feature_availability(self, feature: FeatureSpecs) -> bool:
"""Checks whether the given feature is potentially available."""
return True
def copy(self) -> Self:
new_object = self.__class__(basepath=self.basepath)
new_object.packages = self.packages
return new_object
def extend(self, catalog: Iterable[Package]) -> None:
"""Adds all packages from another catalog to this one."""
for package in catalog:
if package.package_name not in self.package_names:
self.add_package(package.copy())
continue
self_package = self.get_package_by_name(package.package_name)
self_package.extend(package)
def extend_package(self, package: Package) -> None:
"""Adds all resources from the given package to the existing one with the same name."""
catalog_package = self.get_package_by_name(package.package_name, create=True)
catalog_package.extend(package)
def get_package(self, name: Optional[str] = None) -> Package:
"""If a name is given, calls :meth:`get_package_by_name`, otherwise returns the last loaded package.
Raises:
RuntimeError if no package has been loaded.
"""
if name is not None:
return self.get_package_by_name(name=name)
if len(self._packages) == 0:
raise EmptyCatalogError
return self._packages[-1]
def get_package_by_name(self, name: str, create: bool = False) -> Package:
"""
Raises:
fl.FrictionlessException if none of the loaded packages has the given name.
"""
for package in self._packages:
if package.package_name == name:
return package
if create:
self.make_new_package(
package_name=name,
basepath=self.basepath,
)
self.logger.info(f"Automatically added new empty package {name!r}")
return self.get_package()
raise PackageNotFoundError(name)
def get_resource_by_config(self, config: DimcatConfig) -> Resource:
"""Returns the first resource that matches the given config.
Raises:
EmptyCatalogError: If the package is empty.
NoMatchingResourceFoundError: If no resource matching the specs is found in the "features" package.
"""
if len(self._packages) == 0:
raise EmptyCatalogError
for package in self._packages:
try:
return package.get_resource_by_config(config)
except (EmptyPackageError, ResourceNotFoundError):
pass
raise NoMatchingResourceFoundError(config)
def get_resource_by_name(self, name: str) -> Resource:
"""Returns the Resource with the given name.
Raises:
EmptyCatalogError: If the package is empty.
ResourceNotFoundError: If the resource with the given name is not found.
"""
if len(self._packages) == 0:
raise EmptyCatalogError
for package in self._packages:
try:
return package.get_resource_by_name(name=name)
except (EmptyPackageError, ResourceNotFoundError):
pass
raise ResourceNotFoundError(name, self.catalog_name)
def get_resources_by_regex(self, regex: str) -> List[Resource]:
"""Returns the Resource objects whose names contain the given regex."""
result = []
for package in self._packages:
result.extend(package.get_resources_by_regex(regex=regex))
return result
def get_resources_by_type(
self,
resource_type: Type[Resource] | str,
) -> List[Resource]:
"""Returns the Resource objects of the given type."""
if isinstance(resource_type, str):
resource_type = get_class(resource_type)
results = []
for package in self._packages:
results.extend(package.get_resources_by_type(resource_type=resource_type))
return results
def has_package(self, name: str) -> bool:
"""Returns True if a package with the given name is loaded, False otherwise."""
for package in self._packages:
if package.package_name == name:
return True
return False
def iter_resources(self):
"""Iterates over all resources in all packages."""
for package in self:
for resource in package:
yield resource
def make_new_package(
self,
package: Optional[PackageSpecs] = None,
package_name: Optional[str] = None,
basepath: Optional[str] = None,
auto_validate: bool = False,
):
"""Adds a package to the catalog. Parameters are the same as for :class:`Package`."""
if package is None or isinstance(package, (fl.Package, str)):
package = Package(
package_name=package_name,
basepath=basepath,
auto_validate=auto_validate,
)
elif not isinstance(package, Package):
msg = f"{self.name} takes a Package, not {type(package)!r}."
raise ValueError(msg)
self.add_package(package, basepath=basepath)
def replace_package(self, package: Package) -> None:
"""Replaces the package with the same name as the given package with the given package."""
if not isinstance(package, Package):
msg = (
f"{self.name}.replace_package() takes a Package, not {type(package)!r}."
)
raise TypeError(msg)
for i, p in enumerate(self._packages):
if p.package_name == package.package_name:
self.logger.info(
f"Replacing package {p.package_name!r} ({p.n_resources} resources) with "
f"package {package.package_name!r} ({package.n_resources} resources)"
)
self._packages[i] = package
return
self.add_package(package)
def _set_basepath(
self,
basepath: str | Literal[None],
set_packages: bool = True,
) -> None:
"""Sets the basepath for all packages in the catalog (if set_packages=True)."""
self._basepath = treat_basepath_argument(basepath, self.logger)
if not set_packages:
return
for package in self._packages:
package.basepath = self.basepath
def summary_dict(self, include_type: bool = True) -> dict:
"""Returns a summary of the dataset."""
if include_type:
summary = {
p.package_name: [f"{r.resource_name!r} ({r.dtype})" for r in p]
for p in self._packages
}
else:
summary = {p.package_name: p.resource_names for p in self._packages}
return dict(basepath=self.basepath, packages=summary)
| DCMLab/dimcat | src/dimcat/data/catalogs/base.py | base.py | py | 11,590 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "dimcat.data.base.Data",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "dimcat.data.base.Data.PickleSchema",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "dimcat.data.base.Data",
"line_number": 32,
"usage_type": "name"
},
{
... |
36445673009 | """remove subscriber
Revision ID: f71f10afe911
Revises: 514826a76b2b
Create Date: 2020-03-15 02:09:24.586462
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f71f10afe911'
down_revision = '514826a76b2b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscribers')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('subscribers',
sa.Column('id', sa.INTEGER(),
autoincrement=True, nullable=False),
sa.Column('subscriber_id', sa.VARCHAR(),
autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='subscribers_pkey'),
sa.UniqueConstraint(
'subscriber_id', name='subscribers_subscriber_id_key')
)
# ### end Alembic commands ###
| mhelmetag/mammoth | alembic/versions/f71f10afe911_remove_subscriber.py | f71f10afe911_remove_subscriber.py | py | 1,071 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.drop_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "alembic.op.create_table",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "alembic.op",... |
4179808886 | '''
constants used throughout project
'''
import numpy as np
from astropy.cosmology import FlatLambdaCDM
RERUN_ANALYSIS = False
## set cosmology to Planck 2018 Paper I Table 6
cosmo = FlatLambdaCDM(H0=67.32, Om0=0.3158, Ob0=0.03324)
boss_h = 0.676 ## h that BOSS uses.
h = 0.6732 ## planck 2018 h
eta_star = cosmo.comoving_distance(1059.94).value ## z_drag from Planck 2018 cosmology paper Table 2, all Planck alone
rs = 147.09 ## try rs=r_drag from Planck 2018 same table as z_drag above
lstar = np.pi*eta_star/rs
dklss = np.pi/19. ##width of last scattering -- see Bo & David's paper.
| kpardo/mg_bao | mg_bao/constants.py | constants.py | py | 593 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "astropy.cosmology.FlatLambdaCDM",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 18,
"usage_type": "attribute"
}
] |
72294810025 | import streamlit as st
import pandas as pd
import numpy as np
# Wedding budget planner for the region of Southern France
st.title("Wedding Budget Planner for the Region of Southern France")
# Filter to allow the user to narrow down their options
st.subheader("Filter")
number_of_guests = st.slider("Number of guests", 0, 500)
type_of_accommodation = st.selectbox("Type of accommodation", ["Hotel", "Villa", "Castle"])
type_of_catering = st.selectbox("Type of catering", ["Sit-down dinner", "Buffet", "Family-style"])
type_of_entertainment = st.selectbox("Type of entertainment", ["Live band", "DJ", "Karaoke"])
type_of_decor = st.selectbox("Type of decor", ["Simple", "Elegant", "Extravagant"])
# Selector of the number of days for the wedding
st.subheader("Number of days")
number_of_days = st.slider("Number of days for the wedding", 0, 30)
# Maximum and minimum budget
st.subheader("Budget")
maximum_budget = st.number_input("Maximum budget", 0, 100000)
minimum_budget = st.number_input("Minimum budget", 0, maximum_budget)
# OpenAI API connection
st.subheader("OpenAI API")
openAI_connect = st.checkbox("Connect to OpenAI API for budget advice")
if openAI_connect:
st.text("Connecting to OpenAI API...")
# Connect to OpenAI API
# Retrieve budget advice
st.text("Retrieving budget advice...")
st.text("Budget advice: Spend wisely and get the most value for your money.") # dummy advice
st.success("Done!") # when the user is done creating the budget plan
st.button("Save budget plan") # save the budget plan
| karlotimmerman/budget_heroku | hello.py | hello.py | py | 1,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.title",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.slider",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.selectb... |
6751661466 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QWidget, QTreeWidgetItem, QMenu
from PyQt5.QtCore import pyqtSlot, QPoint
from selfcheck.controllers.selfcheckcontroller import SelfCheckController
from selfcheck.modules.editselfcheckitemmodule import EditSelfCheckItemModule
from selfcheck.views.selfcheckitemlist import Ui_Form
import user
class SelfCheckItemListModule(QWidget, Ui_Form):
def __init__(self, parent=None):
super(SelfCheckItemListModule, self).__init__(parent)
self.setupUi(self)
self.SC = SelfCheckController()
self.current_kind = ''
self.treeWidget_items.hideColumn(0)
self.get_item_kind()
def get_item_kind(self):
temp_kind = self.current_kind
self.comboBox_kind.clear()
res = self.SC.get_data(0, True, *VALUES_TUPLE_KIND).distinct()
if len(res):
self.comboBox_kind.addItems(res)
if temp_kind != '':
self.comboBox_kind.setCurrentText(temp_kind)
def get_detail(self):
self.treeWidget_items.clear()
condition = {'kind': self.current_kind}
res = self.SC.get_data(0, False, *VALUES_TUPLE_ITEM, **condition)
if not len(res):
return
for item in res.order_by('seqid'):
qtreeitem = QTreeWidgetItem(self.treeWidget_items)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, str(item['seqid']))
qtreeitem.setText(2, item['itemname'])
qtreeitem.setText(3, item['basic'])
for i in range(1, 4):
self.treeWidget_items.resizeColumnToContents(i)
@pyqtSlot(str)
def on_comboBox_kind_currentTextChanged(self, p_str):
self.current_kind = p_str
self.get_detail()
@pyqtSlot(QPoint)
def on_treeWidget_items_customContextMenuRequested(self, pos):
global_pos = self.treeWidget_items.mapToGlobal(pos)
current_item = self.treeWidget_items.currentItem()
menu = QMenu()
action_1 = menu.addAction("增加")
action_2 = menu.addAction("修改")
action_3 = menu.addAction("删除")
action = menu.exec(global_pos)
if action == action_1:
detail = EditSelfCheckItemModule(parent=self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
elif action == action_2:
if current_item is None:
return
id = int(current_item.text(0))
detail = EditSelfCheckItemModule(id, self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
elif action == action_3:
if current_item is None:
return
id = int(current_item.text(0))
condition = {'autoid': id}
self.SC.delete_data(0, **condition)
self.get_item_kind()
self.get_detail()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_items_itemDoubleClicked(self, qtreeitem, p_int):
id = int(qtreeitem.text(0))
detail = EditSelfCheckItemModule(id, self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
VALUES_TUPLE_KIND = ('kind', )
VALUES_TUPLE_ITEM = ('autoid', 'seqid', 'itemname', 'basic')
| zxcvbnmz0x/gmpsystem | selfcheck/modules/selfcheckitemlistmodule.py | selfcheckitemlistmodule.py | py | 3,415 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selfcheck.views.selfcheckitemlist.Ui_Form",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selfcheck.controllers.selfcheckcontroller.SelfCheckController",
"line_numbe... |
2723494159 | #!/usr/bin/python3
"""tracking the iss using
api.open-notify.org/astros.json | Alta3 Research"""
# notice we no longer need to import urllib.request or json
import requests
## Define URL
MAJORTOM = 'http://api.open-notify.org/astros.json'
def main():
"""runtime code"""
## Call the webservice
groundctrl = requests.get(MAJORTOM)
# send a post with requests.post()
# send a put with requests.put()
# send a delete with requests.delete()
# send a head with requests.head()
## strip the json off the 200 that was returned by our API
## translate the json into python lists and dictionaries
helmetson = groundctrl.json()
## display our Pythonic data
print("\n\nConverted Python data")
print(helmetson)
print('\n\nPeople in Space: ', helmetson['number'])
people = helmetson['people']
print(people)
for astronaut in helmetson["people"]:
# notice that the text is pink between the two " marks
# python thinks you're starting and stopping a string on one line
# the fix is to mix up your ' and " quotation marks a bit
#print(f"{astronaut["name"]} is on the {astronaut["craft"]}")
print(f"{astronaut['name']} is on the {astronaut['craft']}")
if __name__ == "__main__":
main()
| chadkellum/mycode | iss/requests-ride_iss.py | requests-ride_iss.py | py | 1,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
}
] |
32179513329 | import sys
from PyQt5 import QtWidgets
def Pencere():
app = QtWidgets.QApplication(sys.argv)
okay = QtWidgets.QPushButton("Tamam")
cancel = QtWidgets.QPushButton("İptal")
h_box = QtWidgets.QHBoxLayout()
h_box.addStretch()
h_box.addWidget(okay)
h_box.addWidget(cancel)
v_box = QtWidgets.QVBoxLayout()
v_box.addStretch()
v_box.addLayout(h_box)
pencere = QtWidgets.QWidget()
pencere.setWindowTitle("PyQt5 Ders 4")
pencere.setLayout(v_box)
pencere.setGeometry(100,100,500,500)
pencere.show()
sys.exit(app.exec_())
Pencere()
| mustafamuratcoskun/Sifirdan-Ileri-Seviyeye-Python-Programlama | PyQt5 - Arayüz Geliştirme/Videolarda Kullanılan Kodlar/horizontal ve vertical layout.py | horizontal ve vertical layout.py | py | 643 | python | en | code | 1,816 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidge... |
27868546226 | """Module for I/O related data parsing"""
__author__ = "Copyright (c) 2016, Mac Xu <shinyxxn@hotmail.com>"
__copyright__ = "Licensed under GPLv2 or later."
import datetime
import pprint
import re
from app.modules.lepd.LepDClient import LepDClient
class IOProfiler:
def __init__(self, server, config='release'):
self.server = server
self.client = LepDClient(self.server)
self.config = config
def get_status(self):
start_time = datetime.datetime.now()
result = self.client.getIostatResult()
if not result:
return {}
end_time = datetime.datetime.now()
raw_results = result[:]
headerline = result.pop(0)
duration = "%.1f" % ((end_time - start_time).total_seconds())
io_status = {
'lepdDuration': duration,
'disks': {},
'diskCount': 0,
'ratio': 0
}
for line in result:
if (line.strip() == ""):
continue
line_values = line.split()
device_name = line_values[0]
io_status['diskCount'] += 1
io_status['disks'][device_name] = {}
io_status['disks'][device_name]['rkbs'] = line_values[5]
io_status['disks'][device_name]['wkbs'] = line_values[6]
io_status['disks'][device_name]['ratio'] = line_values[-1]
this_disk_ratio = self.client.toDecimal(line_values[-1])
if this_disk_ratio > io_status['ratio']:
io_status['ratio'] = this_disk_ratio
end_time_2 = datetime.datetime.now()
duration = "%.1f" % ((end_time_2 - end_time).total_seconds())
io_status['lepvParsingDuration'] = duration
response_data = {
'data': io_status,
'rawResult': raw_results
}
return response_data
def get_capacity(self):
responseLines = self.client.getResponse("GetCmdDf")
if (len(responseLines) == 0):
return {}
responseData = {}
if (self.config == 'debug'):
responseData['rawResult'] = responseLines[:]
diskData = {}
for resultLine in responseLines:
if (not resultLine.startswith('/dev/')):
continue
lineValues = resultLine.split()
diskName = lineValues[0][5:]
diskData[diskName] = {}
diskData[diskName]['size'] = lineValues[1]
diskData[diskName]['used'] = lineValues[2]
diskData[diskName]['free'] = lineValues[3]
diskData['size'] = lineValues[1]
diskData['used'] = lineValues[2]
diskData['free'] = lineValues[3]
capacity = {}
capacity['diskTotal'] = diskData['size']
capacity['diskUsed'] = diskData['used']
responseData['data'] = capacity
return responseData
def get_io_top(self, ioTopLines = None):
if (ioTopLines == None):
ioTopLines = self.client.getResponse('GetCmdIotop')
ioTopResults = {}
ioTopResults['data'] = {}
ioTopResults['rawResult'] = ioTopLines[:]
# print(len(ioTopLines))
if (len(ioTopLines) < 2):
return ioTopResults
dataLineStartingIndex = 0
for line in ioTopLines:
if (re.match(r'\W*TID\W+PRIO\W+USER\W+DISK READ\W+DISK WRITE\W+SWAPIN\W+IO\W+COMMAND\W*', line.strip(), re.M|re.I)):
break
else:
dataLineStartingIndex += 1
while(dataLineStartingIndex >= 0):
ioTopLines.pop(0)
dataLineStartingIndex -= 1
# for line in ioTopLines:
# print(line)
# print('--------------------')
orderIndex = 0
for line in ioTopLines:
# print(line)
if (line.strip() == ''):
continue
try:
# find the 'M/s" or 'B/s', they are for disk read and write
matches = re.findall('\s*\d+\.\d{2}\s*[G|M|K|B]\/s\s+', line)
diskRead = matches[0].strip()
diskWrite = matches[1].strip()
# find the "0.00 %" occurrences, they are for swapin and io
matches = re.findall('\s*\d+\.\d{2}\s*\%\s+', line)
swapin = matches[0].strip()
io = matches[1].strip()
lineValues = line.split()
pid = lineValues[0].strip()
prio = lineValues[1].strip()
user = lineValues[2].strip()
lastPercentIndex = line.rfind('%')
command = line[lastPercentIndex+1:]
ioTopItem = {}
ioTopItem['TID'] = pid
ioTopItem['PRIO'] = prio
ioTopItem['USER'] = user
ioTopItem['READ'] = diskRead
ioTopItem['WRITE'] = diskWrite
ioTopItem['SWAPIN'] = swapin
ioTopItem['IO'] = io
ioTopItem['COMMAND'] = command
except Exception as err:
print(err, "------- GetCmdIotop")
continue
# use an incremental int as key, so we keey the order of the items.
ioTopResults['data'][orderIndex] = ioTopItem
orderIndex += 1
return ioTopResults
if( __name__ =='__main__' ):
profiler = IOProfiler('www.rmlink.cn')
profiler.config = 'debug'
pp = pprint.PrettyPrinter(indent=2)
# monitor = IOMonitor('www.rmlink.cn')
# pp.pprint(profiler.get_io_top())
profiler.get_io_top()
# pp.pprint(profiler.getIoPPData())
# to make a io change on server: sudo dd if=/dev/sda of=/dev/null &
| linuxep/lepv | app/modules/profilers/io/IOProfiler.py | IOProfiler.py | py | 5,872 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "app.modules.lepd.LepDClient.LepDClient",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
... |
7537206122 | from django.test import TestCase, tag
from djangoplicity.newsletters.models import NewsletterType, Newsletter
from webb.tests import utils
@tag('newsletters')
class TestNewsletters(TestCase):
fixtures = [
'test/common',
'test/media',
'test/announcements',
'test/releases',
'test/highlights',
'test/newsletters'
]
def setUp(self):
self.client = utils.get_staff_client()
self.newsletter_types = NewsletterType.objects.all()
self.newsletter = Newsletter.objects.filter(published=True, send__isnull=False).first()
def test_newsletter_generation(self):
for newsletter_type in self.newsletter_types:
response = self.client.post(
'/admin/newsletters/newsletter/new/',
{
'type': newsletter_type.pk,
'start_date_0': '01/01/2000',
'start_date_1': '00:00:00',
'end_date_0': '31/12/2220',
'end_date_1': '23:59:59',
'_generate': 'Generate'
},
follow=True
)
utils.check_redirection_to(self, response, r'/admin/newsletters/newsletter/[0-9]+/change/')
def test_newsletter_list(self):
url = '/newsletters/{}/'.format(self.newsletter.type.slug)
response = self.client.get('{}{}'.format(url, '?search=this+does+not+exists'))
self.assertContains(response, 'No entries were found')
response = self.client.get(url)
self.assertContains(response, self.newsletter.type.name)
def test_newsletter_detail(self):
response = self.client.get('/newsletters/{}/html/{}/'.format(self.newsletter.type.slug, self.newsletter.pk))
self.assertContains(response, self.newsletter.subject)
| esawebb/esawebb | webb/tests/newsletters.py | newsletters.py | py | 1,838 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "webb.tests.utils.get_staff_client",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "webb.tests.utils",
"line_number": 19,
"usage_type": "name"
},
{
"api_name":... |
17885393929 | from django.shortcuts import render
from django.views import View
from django.http.response import JsonResponse
from django.template.loader import render_to_string
from .models import Topic
from .forms import TopicForm
class BbsView(View):
def get(self, request, *args, **kwargs):
topics = Topic.objects.all()
context = { "topics":topics }
return render(request,"posting/index.html",context)
def post(self, request, *args, **kwargs):
json = { "error":True }
form = TopicForm(request.POST)
if not form.is_valid():
print("Validation Error")
return JsonResponse(json)
form.save()
json["error"] = False
topics = Topic.objects.all()
context = { "topics":topics }
content = render_to_string("posting/content.html",context,request)
json["content"] = content
return JsonResponse(json)
index = BbsView.as_view() | inatai/super_tsp | posting/views.py | views.py | py | 1,026 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.View",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.Topic.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Topic.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "... |
32473831452 | from config import bot, chat_id
from plugins.error import Error
import requests
from bs4 import BeautifulSoup
import time
from telebot import types
from plugins.error import in_chat
#________________________________________________________________________________________________________________
#Скриншот сайтов
#________________________________________________________________________________________________________________
@bot.message_handler(commands=['url'])
@in_chat()
def screen(m):
bot.delete_message(m.chat.id, m.message_id)
HEADERS = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
keyboard = types.InlineKeyboardMarkup()
keyboard_delete = types.InlineKeyboardButton(text = "❌", callback_data = "delete")
keyboard.add(keyboard_delete)
try:
res = requests.get(m.text[5:], headers = HEADERS) # Защита от спермотоксикозников
bool_ = ("Порн" in res.text or "Porn" in res.text or "porn" in res.text or "порн" in res.text)
if bool_ == 1:
bot.send_sticker(m.chat.id, "CAACAgQAAxkBAAIaSF93cwIsw1oPRGtOdZHTF8_UsBTDAAJYAAO6erwZr3-jVb-xFsgbBA")
time.sleep (15.5)
bot.delete_message(m.chat.id, m.message_id + 1)
else:
bot.send_photo(m.chat.id, photo="https://mini.s-shot.ru/1366x768/JPEG/1366/Z100/?" + m.text[5:], reply_markup = keyboard)
except Exception as e:
print ("❌ ОШИБКА ❌")
print ("screenshot.py " + e)
Error(m, bot).error() | evilcatsystem/telegram-bot | plugins/screenshot.py | screenshot.py | py | 1,624 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.bot.delete_message",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.bot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "telebot.types.InlineKeyboardMarkup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
17417433393 | from rest_framework.serializers import ModelSerializer
from tintoreria.empleados.models import Empleado
class EmpleadoSerializer(ModelSerializer):
def to_internal_value(self, data):
obj = super(EmpleadoSerializer, self).to_internal_value(data)
instance_id = data.get('id', None)
if instance_id:
obj['id'] = instance_id
return obj
class Meta:
model = Empleado
fields = ('id',
'nombre',
'paterno',
'materno',
'puesto',
'status') | marco2v0/Tintoreria | site/tintoreria/empleados/serializers.py | serializers.py | py | 587 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "tintoreria.empleados.models.Empleado",
"line_number": 14,
"usage_type": "name"
}
] |
2892146403 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, serialize=False)),
('date_of_birth', models.DateField(verbose_name='date of birth', blank=True, null=True)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(verbose_name='phone number', blank=True, max_length=128)),
('gender', models.CharField(choices=[('U', 'unknown'), ('M', 'male'), ('F', 'female')], default='U', verbose_name='gender', max_length=1)),
('image', models.ImageField(upload_to='', verbose_name='image', blank=True, null=True)),
],
),
]
| abarto/learn_drf_with_images | learn_drf_with_images/user_profiles/migrations/0001_initial.py | 0001_initial.py | py | 1,052 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 16,
"usage_type": "call"
},
... |
28797419371 | import yfinance as yf
from matplotlib import pyplot as plt
def load_ticker(symbol):
ticker = yf.Ticker(symbol)
hist = ticker.history(start="2020-03-01", end="2020-12-02")
hist = hist.reset_index()
for i in ['Open', 'High', 'Close', 'Low']:
hist[i] = hist[i].astype('float64')
return hist
def main():
while True:
print("Please choose one of the following choices: ")
print("1. Display graph for NVDA and INTC")
print("2. Display graph for INTC and AMD")
print("3. Display graph for AMD and NVDA")
print("4. Exit.")
resp = input(">>> ")
if resp == "1":
h1 = load_ticker("NVDA")
h2 = load_ticker("INTC")
ax = h1[['Open']].plot(title="NVDA vs INTC")
h2[['Open']].plot(ax=ax)
plt.legend(["Open NVDA", "Open INTC"])
if resp == "2":
h1 = load_ticker("INTC")
h2 = load_ticker("AMD")
ax = h1[['Open']].plot(title="INTC vs AMD")
h2[['Open']].plot(ax=ax)
plt.legend(["Open INTC", "Open AMD"])
if resp == "3":
h1 = load_ticker("AMD")
h2 = load_ticker("NVDA")
ax = h1[['Open']].plot(title="AMD vs NVDA")
h2[['Open']].plot(ax=ax)
plt.legend(["Open AMD", "Open NVDA"])
if resp == "4":
break
plt.show()
main()
| Eric-Wonbin-Sang/CS110Manager | 2020F_final_project_submissions/mcdonaldjillian/CSfinalproject.py | CSfinalproject.py | py | 1,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yfinance.Ticker",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.... |
34211305302 | from flask import Flask, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from .models import *
db = SQLAlchemy()
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'data.db')
def model_exists(model_class):
engine = db.get_engine(bind=model_class.__bind_key__)
return model_class.metadata.tables[model_class.__tablename__].exists(engine)
def create_app(config=None):
app = Flask(__name__, static_url_path="", static_folder="build")
CORS(app)
# app.config.from_object('config.ProductionConfig')
# app.config.from_object('config.DevelopmentConfig')
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db.init_app(app)
# Serve React App
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def serve(path):
if path != "" and os.path.exists(app.static_folder + "/" + path):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, "index.html")
# if not model_exists(User):
# User.__table__.create(db.session.bind)
from .auth import auth
app.register_blueprint(auth)
from .api import api
app.register_blueprint(api)
from .pages import page
app.register_blueprint(page)
# admin = User(name='admin', password='123456', admin=True)
# db.session.add(admin)
# db.session.commit()
# app.run(use_reloader=True, port=5000, threaded=True)
return app
if __name__ == "__main__":
app = create_app()
app.run(use_reloader=True, port=5000, threaded=True)
| wickes1/fullstack-react-flask-overview-backend | app/__init__.py | __init__.py | py | 1,680 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.send_fro... |
41037129028 | import matplotlib.pyplot as plt
import cv2
import os
import random
BASE_PATH = "testImages"
CATEGORIES = ["flybuss", "neptuntaxi", "trondertaxi"]
IMG_SIZE = 60
for category in CATEGORIES:
path = os.path.join(BASE_PATH, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap="gray")
plt.show()
| JoakimAa/Bachelor2021 | ML/Cnn/viewtest.py | viewtest.py | py | 474 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": ... |
25317597548 | #!/user/bin/python
import configparser
import requests
import json
import time
#read config file for API key
config = configparser.ConfigParser()
config.sections()
config.read('../TwitterScrape/credentials.ini')
api = config.get("keys", 'urlapi')
#Set headers and data for api usage
headers = {
'Content-Type': 'application/json',
'API-Key': api,
}
data = '{"url":"http://bestravelways.com/P1C0uUXVxpq.jsv?byuIqrLNbdSJ=PszfbaUhtspk18d9brJ032bju01farr0116612056ozcw2fio", "public": "on"}'
#sumbits scan and decodes the details#
scan = requests.post('https://urlscan.io/api/v1/scan/', headers=headers, data=data)
scandetails = scan.content.decode('utf-8')
#parse the returned json details
scanjson = json.loads(scandetails)
#test details
#print(scanjson["uuid"])
uuid = scanjson["uuid"]
#print(uuid)
base_url = "https://urlscan.io/api/v1/result/" + str(uuid)
time.sleep(60)
response = requests.get(base_url)
print(response)
responsedetails = response.content.decode('utf-8')
print(responsedetails)
| monkeytail2002/TwitterURLChecker | Test Scripts/testrequest.py | testrequest.py | py | 1,010 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"l... |
19631354561 | from __future__ import unicode_literals
from zope.component.interfaces import ObjectEvent, IObjectEvent
from zope.interface import Attribute, implements
class IGSJoinSiteEvent(IObjectEvent):
""" An event issued after someone has joined a site."""
siteInfo = Attribute('The site that is being joined')
memberInfo = Attribute('The new site member')
class IGSLeaveSiteEvent(IObjectEvent):
""" An event issued after someone has left a site."""
siteInfo = Attribute('The site that is being left')
memberInfo = Attribute('The old site member')
class GSJoinSiteEvent(ObjectEvent):
implements(IGSJoinSiteEvent)
def __init__(self, context, siteInfo, memberInfo):
ObjectEvent.__init__(self, context)
self.siteInfo = siteInfo
self.memberInfo = memberInfo
class GSLeaveSiteEvent(ObjectEvent):
implements(IGSLeaveSiteEvent)
def __init__(self, context, siteInfo, memberInfo):
ObjectEvent.__init__(self, context)
self.siteInfo = siteInfo
self.memberInfo = memberInfo
| groupserver/gs.site.member.base | gs/site/member/base/event.py | event.py | py | 1,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "zope.component.interfaces.IObjectEvent",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "zope.interface.Attribute",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "zope.interface.Attribute",
"line_number": 9,
"usage_type": "call"
},
{
... |
5179170859 | #coding:utf-8
from django.shortcuts import render_to_response, get_object_or_404
from activity.dao import activityDao
from django.template.context import RequestContext
from collection.dao import collectionDao, select_collection_byReq,\
update_rightTime_byReq, update_wrongTime_byReq
from django.http.response import HttpResponse
import json
from subject.models import Collection, Exercise
from django.views.decorators.csrf import csrf_exempt
from django.utils import simplejson
from exercise.dao import get_tips_byId
def into_collection(req):
if req.COOKIES.has_key('userid'):
userid = req.COOKIES['userid']
content = ('进入错题集').decode('utf-8')
ADao = activityDao({"userid":userid})
ADao.add_a_activity(content)
return render_to_response('collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
def get_collection(req):
if req.COOKIES.has_key('userid'):
p = int(req.GET.get('p'))
cur = p
rs = {}
dao = collectionDao({'userid':req.COOKIES['userid']})
if p==0:
cur = 1
cn = dao.select_Ccollection_byUs()
rs['numT'] = cn
ts = dao.select_collection_byUs(cur)
rs['col'] = ts
return HttpResponse(json.dumps(rs),content_type="application/json")
return HttpResponse(json.dumps({}),content_type="application/json")
@csrf_exempt
def delete_collection(req,p1):
if select_collection_byReq({'id':p1}).righttime > 0:
col = get_object_or_404(Collection,id=p1)
col.delete()
return HttpResponse()
return HttpResponse(json.dumps({'tips':'唯有正确次数>0才能删除'}),content_type="application/json")
def into_a_collection(req):
if req.COOKIES.has_key('userid'):
return render_to_response('a_collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
#获取一条错题
def get_a_collection(req,param):
if req.COOKIES.has_key('userid'):
rsp = collectionDao({'userid':req.COOKIES['userid']}).select_a_collection_byUs(int(param)-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
return HttpResponse(json.dumps({}), content_type="application/json")
'''
验证错题答案:1.获取登录信息
2.获取json
3.判断答案:根据题目id、answer get——》存在:根据collection.id增加正确次数,返回下一错题详情
不存在:根据collection.id增加错误次数,返回tips
'''
@csrf_exempt
def check_answer(req):
if req.method=='POST' and req.COOKIES.has_key('userid'):
jsonReq = simplejson.loads(req.body)
title = jsonReq['title']
id = jsonReq['id']
isTitle = Exercise.objects.filter(id = title['id'],answer = title['answer'])
CDao = collectionDao({'userid':req.COOKIES['userid']})
if isTitle:
update_rightTime_byReq({'id':id})
rsp = CDao.select_a_collection_byUs(jsonReq['num']-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
else:
update_wrongTime_byReq({'id':id})
return HttpResponse(json.dumps({'tips':get_tips_byId(title['id']),'wrongTime':select_collection_byReq({'id':id}).wrongtime}), content_type="application/json")
return HttpResponse(json.dumps({'tips':'访问错误,请重新登录'}), content_type="application/json")
| WarmerHu/subject | collection/views.py | views.py | py | 3,516 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "activity.dao.activityDao",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.template.context.RequestContext",
"line_number": 20,
"usage_type": "c... |
5163641580 | import yaml
import sys
import xarray as xr
import time
import glob
def subset_vars(argv):
if(len(argv)!=7):
print("USAGE: wrf-subset-vars.py <in nc path> <in nc file> <out nc path> <out nc file> <var list path> <var list file>\n")
sys.exit(1)
innc_path = argv[1]
innc_file = argv[2]
innc_name = innc_path+innc_file
outnc_path = argv[3]
outnc_file = argv[4]
outnc_name = outnc_path+outnc_file
yaml_varkeep_path = argv[5]
yaml_varkeep_file = argv[6]
yaml_varkeep_name = yaml_varkeep_path+yaml_varkeep_file
# Get the name of the variables to be subset
with open(yaml_varkeep_name,'r') as file_keep:
var_keep_dict = yaml.full_load(file_keep)
var_keep_list = [ sub['var_name'] for sub in var_keep_dict ]
# Open the wrfout file using Xarray
ds_wrf = xr.open_dataset(innc_name)
# Get the subset by passing the list of variable names to keep to
# the *lazily opened* raw wrfout dataset
ds_wrf_subset = ds_wrf[var_keep_list]
# Copy the attributes of the raw WRF dataset to the new subset dataset
ds_wrf_subset.attrs = ds_wrf.attrs
# Save the output dataset to the specified netcdf file name
ds_wrf_subset.to_netcdf(path=outnc_name)
return
| LEAF-BoiseState/py-wrf-postproc | wrf-subset-vars.py | wrf-subset-vars.py | py | 1,265 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sys.exit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "yaml.full_load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "xarray.open_dataset",
"line_number": 36,
"usage_type": "call"
}
] |
36426081739 | from PIL import Image
import math
def invert(img):
rgb_img = img.convert('RGB')
width, height = rgb_img.size
img2 = Image.new('RGB', (width, height))
for y in range(height):
for x in range(width):
r, g, b = rgb_img.getpixel((x, y))
r = 255 - r
g = 255 - g
b = 255 - b
# print(f'(x:{x},y:{y} = ({r},{g},{}))')
img2.putpixel((x, y), (r, g, b))
return img2
# via https://qiita.com/zaburo/items/0b9db87d0a52191b164b
def blur(img):
rgb_img = img.convert('RGB')
width, height = rgb_img.size
img2 = Image.new('RGB', (width, height))
for y in range(height):
for x in range(width):
r0, g0, b0 = rgb_img.getpixel((x, y))
r1 = r2 = r3 = r4 = r5 = r6 = r7 = r8 = r0
g1 = g2 = g3 = g4 = g5 = g6 = g7 = g8 = g0
b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b0
if x - 1 > 0 and y + 1 < height:
r1, g1, b1 = rgb_img.getpixel((x - 1, y + 1))
if y + 1 < height:
r2, g2, b2 = rgb_img.getpixel((x, y + 1))
if x + 1 < width and y + 1 < height:
r3, g3, b3 = rgb_img.getpixel((x + 1, y + 1))
if x - 1 > 0:
r4, g4, b4 = rgb_img.getpixel((x - 1, y))
if x + 1 < width:
r5, g5, b5 = rgb_img.getpixel((x + 1, y))
if x - 1 > 0 and y - 1 > 0:
r6, g6, b6 = rgb_img.getpixel((x - 1, y - 1))
if y - 1 > 0:
r7, g7, b7 = rgb_img.getpixel((x, y - 1))
if x + 1 < width and y - 1 > 0:
r8, g8, b8 = rgb_img.getpixel((x + 1, y - 1))
r = int((r0 + r1 + r2 + r3 + r4 + r5 + r6 + r7 + r8) / 9)
g = int((g0 + g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8) / 9)
b = int((b0 + b1 + b2 + b3 + b4 + b5 + b6 + b7 + b8) / 9)
img2.putpixel((x, y), (r, g, b))
return img2
def brightness(r, g, b, brightnessValue=None):
# FIXME
mono = int(float((r + g + b) / 3.0))
if brightnessValue is not None:
mono += brightnessValue
if mono > 255:
mono = 255
elif mono < 0:
mono = 0
return mono
def atkinson(src_img, brightnessValue=None):
src_rgb_img = src_img.convert('RGB')
width, height = src_img.size
result_img = Image.new('RGB', (width, height))
gray_array_length = width * height
gray_array = [0] * gray_array_length
for y in range(height):
for x in range(width):
r, g, b = src_rgb_img.getpixel((x, y))
bright_temp = brightness(r, g, b, brightnessValue)
# brightness correction curve
bright_temp = int(math.sqrt(255.0) * math.sqrt(bright_temp))
if bright_temp > 255:
bright_temp = 255
elif bright_temp < 0:
bright_temp = 0
darkness = int(255 - bright_temp)
index = y * width + x
darkness += gray_array[index]
if darkness >= 128:
result_img.putpixel((x, y), (0, 0, 0))
# TODO: specify dark_color with atkinson's argument
darkness -= 128
else:
result_img.putpixel((x, y), (255, 255, 255))
darkn8 = int(round(float(darkness) / 8.0))
# Atkinson dithering algorithm
if index + 1 < gray_array_length:
gray_array[index + 1] += darkn8
if index + 2 < gray_array_length:
gray_array[index + 2] += darkn8
if index + width - 1 < gray_array_length:
gray_array[index + width - 1] += darkn8
if index + width < gray_array_length:
gray_array[index + width] += darkn8
if index + width + 1 < gray_array_length:
gray_array[index + width + 1] += darkn8
if index + width * 2 < gray_array_length:
gray_array[index + width * 2] += darkn8
return result_img
def main():
img = Image.open('test:Lenna')
# img.show()
# inverted_img = invert(img)
# inverted_img.show()
# blured_img = blur(img)
# blured_img.show()
atkinson_img = atkinson(img)
atkinson_img.show()
if __name__ == '__main__':
main()
| koyachi/sketches | 2021-02-11-pythonista-image/image_processor.py | image_processor.py | py | 3,591 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 25... |
15807183248 | # --------------------------------------------------------
# PYTHON PROGRAM
# Here is where we are going to define our set of...
# - Imports
# - Global Variables
# - Functions
# ...to achieve the functionality required.
# When executing > python 'this_file'.py in a terminal,
# the Python interpreter will load our program,
# but it will execute nothing yet.
# --------------------------------------------------------
import pyspark
import pyspark.streaming
import os
import shutil
import time
# ------------------------------------------
# FUNCTION process_line
# ------------------------------------------
def process_line(line, bad_chars):
# 1. We create the output variable
res = []
# 2. We clean the line by removing the bad characters
for c in bad_chars:
line = line.replace(c, '')
# 3. We clean the line by removing each tabulator and set of white spaces
line = line.replace('\t', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
# 4. We clean the line by removing any initial and final white spaces
line = line.strip()
line = line.rstrip()
# 5. We split the line by words
words = line.split(" ")
# 6. We append each valid word to the list
for word in words:
if (word != ''):
if ((ord(word[0]) > 57) or (ord(word[0]) < 48)):
res.append(word)
# 7. We return res
return res
# ------------------------------------------
# FUNCTION my_model
# ------------------------------------------
def my_model(ssc, monitoring_dir, result_dir, bad_chars):
# We are basically reusing the code example of word_count for Spark Core
# For each operation, we comment the code written in such example and rewrite it now.
# Most of the times, this rewrite is nothing but an aesthetic replace of the surname RDD by DStream,
# just to remember the code declared here will be applied per micro-batch, generating
# an RDD per micro-batch. Thus, the DStream here is nothing but the sequence of RDDs being generated.
# 1. Operation C1: Creation 'textFileStream', so as to store the novel content of monitoring_dir for this time step into a new RDD within DStream.
# inputRDD = sc.textFile(dataset_dir)
inputDStream = ssc.textFileStream(monitoring_dir)
# 2. Operation T1: Transformation 'flatMap', so as to get a new DStream where each underlying RDD contains all the words of its equivalent
# RDD in inputDStream.
# allWordsRDD = inputRDD.flatMap(lambda x: process_line(x, bad_chars))
allWordsDStream = inputDStream.flatMap(lambda x: process_line(x, bad_chars))
# 3. Operation T2: Transformation 'map', so as to get a new DStream where each underlying RDD contains pair items, versus the single String items of
# its equivalent RDD in allWordsDStream.
# pairWordsRDD = allWordsRDD.map(lambda x: (x, 1))
pairWordsDStream = allWordsDStream.map(lambda x: (x, 1))
# 4. Operation T3: Transformation 'reduceByKey', so as to get a new DStream where each underlying RDD aggregates the amount of times each word
# appears in its equivalent RDD in pairWordsDStream.
# solutionRDD = pairWordsRDD.reduceByKey(lambda x, y: x + y)
solutionDStream = pairWordsDStream.reduceByKey(lambda x, y: x + y)
# 5. Operation S1: Output Operation saveAsTextFiles so as to Store the DStream solutionDStream into the desired folder from the DBFS.
# Each time step the new micro-batch being computed will be stored in a new directory.
# Each directory is similar to the ones we got with Core Spark.
solutionDStream.cache()
solutionDStream.pprint()
# solutionRDD.saveAsTextFile(o_file_dir)
solutionDStream.saveAsTextFiles(result_dir)
# ------------------------------------------
# FUNCTION create_ssc
# ------------------------------------------
def create_ssc(sc, monitoring_dir, result_dir, max_micro_batches, time_step_interval, bad_chars):
# 1. We create the new Spark Streaming context.
# This is the main entry point for streaming functionality. It requires two parameters:
# (*) The underlying SparkContext that it will use to process the data.
# (**) A batch interval, specifying how often it will check for the arrival of new data,
# so as to process it.
ssc = pyspark.streaming.StreamingContext(sc, time_step_interval)
# 2. We configure the maximum amount of time the data is retained.
# Think of it: If you have a SparkStreaming operating 24/7, the amount of data it is processing will
# only grow. This is simply unaffordable!
# Thus, this parameter sets maximum time duration past arrived data is still retained for:
# Either being processed for first time.
# Being processed again, for aggregation with new data.
# After the timeout, the data is just released for garbage collection.
# We set this to the maximum amount of micro-batches we allow before considering data
# old and dumping it times the time_step_interval (in which each of these micro-batches will arrive).
ssc.remember(max_micro_batches * time_step_interval)
# 3. We model the ssc.
# This is the main function of the Spark application:
# On it we specify what do we want the SparkStreaming context to do once it receives data
# (i.e., the full set of transformations and ouptut operations we want it to perform).
my_model(ssc, monitoring_dir, result_dir, bad_chars)
# 4. We return the ssc configured and modelled.
return ssc
# ------------------------------------------
# FUNCTION get_source_dir_file_names
# ------------------------------------------
def get_source_dir_file_names(local_False_databricks_True, source_dir, verbose):
# 1. We create the output variable
res = []
# 2. We get the FileInfo representation of the files of source_dir
fileInfo_objects = []
if local_False_databricks_True == False:
fileInfo_objects = os.listdir(source_dir)
else:
fileInfo_objects = dbutils.fs.ls(source_dir)
# 3. We traverse the fileInfo objects, to get the name of each file
for item in fileInfo_objects:
# 3.1. We get a string representation of the fileInfo
file_name = str(item)
# 3.2. If the file is processed in DBFS
if local_False_databricks_True == True:
# 3.2.1. We look for the pattern name= to remove all useless info from the start
lb_index = file_name.index("name='")
file_name = file_name[(lb_index + 6):]
# 3.2.2. We look for the pattern ') to remove all useless info from the end
ub_index = file_name.index("',")
file_name = file_name[:ub_index]
# 3.3. We append the name to the list
res.append(file_name)
if verbose == True:
print(file_name)
# 4. We sort the list in alphabetic order
res.sort()
# 5. We return res
return res
# ------------------------------------------
# FUNCTION streaming_simulation
# ------------------------------------------
def streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, verbose):
# 1. We get the names of the files on source_dir
files = get_source_dir_file_names(local_False_databricks_True, source_dir, verbose)
# 2. We get the starting time of the process
time.sleep(time_step_interval * 0.1)
start = time.time()
# 2.1. If verbose mode, we inform of the starting time
if (verbose == True):
print("Start time = " + str(start))
# 3. We set a counter in the amount of files being transferred
count = 0
# 4. We simulate the dynamic arriving of such these files from source_dir to dataset_dir
# (i.e, the files are moved one by one for each time period, simulating their generation).
for file in files:
# 4.1. We copy the file from source_dir to dataset_dir#
if local_False_databricks_True == False:
shutil.copyfile(source_dir + file, monitoring_dir + file)
else:
dbutils.fs.cp(source_dir + file, monitoring_dir + file)
# 4.2. We increase the counter, as we have transferred a new file
count = count + 1
# 4.3. If verbose mode, we inform from such transferrence and the current time.
if (verbose == True):
print("File " + str(count) + " transferred. Time since start = " + str(time.time() - start))
# 4.4. We wait the desired transfer_interval until next time slot.
time.sleep((start + (count * time_step_interval)) - time.time())
# ------------------------------------------
# FUNCTION my_main
# ------------------------------------------
def my_main(sc,
local_False_databricks_True,
source_dir,
monitoring_dir,
checkpoint_dir,
result_dir,
max_micro_batches,
time_step_interval,
verbose,
bad_chars):
# 1. We setup the Spark Streaming context
# This sets up the computation that will be done when the system receives data.
ssc = pyspark.streaming.StreamingContext.getActiveOrCreate(checkpoint_dir,
lambda: create_ssc(sc,
monitoring_dir,
result_dir,
max_micro_batches,
time_step_interval,
bad_chars
)
)
# 2. We start the Spark Streaming Context in the background to start receiving data.
# Spark Streaming will start scheduling Spark jobs in a separate thread.
# Very important: Please note a Streaming context can be started only once.
# Moreover, it must be started only once we have fully specified what do we want it to do
# when it receives data (i.e., the full set of transformations and ouptut operations we want it
# to perform).
ssc.start()
# 3. As the jobs are done in a separate thread, to keep our application (this thread) from exiting,
# we need to call awaitTermination to wait for the streaming computation to finish.
ssc.awaitTerminationOrTimeout(time_step_interval)
# 4. We simulate the streaming arrival of files (i.e., one by one) from source_dir to monitoring_dir.
streaming_simulation(local_False_databricks_True, source_dir, monitoring_dir, time_step_interval, verbose)
# 5. Once we have transferred all files and processed them, we are done.
# Thus, we stop the Spark Streaming Context
ssc.stop(stopSparkContext=False)
# 6. Extra security stop command: It acts directly over the Java Virtual Machine,
# in case the Spark Streaming context was not fully stopped.
# This is crucial to avoid a Spark application working on the background.
# For example, Databricks, on its private version, charges per cluster nodes (virtual machines)
# and hours of computation. If we, unintentionally, leave a Spark application working, we can
# end up with an unexpected high bill.
if (not sc._jvm.StreamingContext.getActive().isEmpty()):
sc._jvm.StreamingContext.getActive().get().stop(False)
# ---------------------------------------------------------------
# PYTHON EXECUTION
# This is the main entry point to the execution of our program.
# It provides a call to the 'main function' defined in our
# Python program, making the Python interpreter to trigger
# its execution.
# ---------------------------------------------------------------
if __name__ == '__main__':
# 1. Extra input arguments
bad_chars = ['?', '!', '.', ',', ';', '_', '-', '\'', '|', '--',
'(', ')', '[', ']', '{', '}', ':', '&', '\n']
# 2. Local or Databricks
local_False_databricks_True = False
# 3. We set the path to my_dataset and my_result
my_local_path = "/home/nacho/CIT/Tools/MyCode/Spark/"
my_databricks_path = "/"
source_dir = "FileStore/tables/2_Spark_Streaming/my_dataset/"
monitoring_dir = "FileStore/tables/2_Spark_Streaming/my_monitoring/"
checkpoint_dir = "FileStore/tables/2_Spark_Streaming/my_checkpoint/"
result_dir = "FileStore/tables/2_Spark_Streaming/my_result/"
if local_False_databricks_True == False:
source_dir = my_local_path + source_dir
monitoring_dir = my_local_path + monitoring_dir
checkpoint_dir = my_local_path + checkpoint_dir
result_dir = my_local_path + result_dir
else:
source_dir = my_databricks_path + source_dir
monitoring_dir = my_databricks_path + monitoring_dir
checkpoint_dir = my_databricks_path + checkpoint_dir
result_dir = my_databricks_path + result_dir
# 4. We set the Spark Streaming parameters
# 4.1. We specify the number of micro-batches (i.e., files) of our dataset.
dataset_micro_batches = 6
# 4.2. We specify the time interval each of our micro-batches (files) appear for its processing.
time_step_interval = 3
# 4.3. We specify the maximum amount of micro-batches that we want to allow before considering data
# old and dumping it.
max_micro_batches = dataset_micro_batches + 1
# 4.4. We configure verbosity during the program run
verbose = False
# 5. We remove the directories
if local_False_databricks_True == False:
# 5.1. We remove the monitoring_dir
if os.path.exists(monitoring_dir):
shutil.rmtree(monitoring_dir)
# 5.2. We remove the result_dir
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
# 5.3. We remove the checkpoint_dir
if os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
else:
# 5.1. We remove the monitoring_dir
dbutils.fs.rm(monitoring_dir, True)
# 5.2. We remove the result_dir
dbutils.fs.rm(result_dir, True)
# 5.3. We remove the checkpoint_dir
dbutils.fs.rm(checkpoint_dir, True)
# 6. We re-create the directories again
if local_False_databricks_True == False:
# 6.1. We re-create the monitoring_dir
os.mkdir(monitoring_dir)
# 6.2. We re-create the result_dir
os.mkdir(result_dir)
# 6.3. We re-create the checkpoint_dir
os.mkdir(checkpoint_dir)
else:
# 6.1. We re-create the monitoring_dir
dbutils.fs.mkdirs(monitoring_dir)
# 6.2. We re-create the result_dir
dbutils.fs.mkdirs(result_dir)
# 6.3. We re-create the checkpoint_dir
dbutils.fs.mkdirs(checkpoint_dir)
# 7. We configure the Spark Context
sc = pyspark.SparkContext.getOrCreate()
sc.setLogLevel('WARN')
print("\n\n\n")
# 8. We call to our main function
my_main(sc,
local_False_databricks_True,
source_dir,
monitoring_dir,
checkpoint_dir,
result_dir,
max_micro_batches,
time_step_interval,
verbose,
bad_chars
)
| segunar/BIG_data_sample_code | Spark/Workspace/2_Spark_Streaming/2_Stateless_Transformations/02_word_count.py | 02_word_count.py | py | 15,508 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.streaming.StreamingContext",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pyspark.streaming",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 145,
"usage_type": "call"
},
{
"api_name"... |
10663976037 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 15:43:55 2015
Plot coodinate time series for radio sources.
@author: Neo
"""
import numpy as np
import matplotlib.pyplot as plt
from fun import ADepoA, ADepoS
cos = np.cos
dat_dir = '../data/opa/'
res_dir = '../plot/timeseries/'
t0 = 2000.0
def tsplot(soun, pmra, pmdec, ra0, dec0):
epo, ra, dec, era, edec = np.loadtxt(dat_dir+soun +'.dat', usecols=list(range(5)), unpack=True)
if epo.size>1:
epo = ADepoA(epo)
else:
epo = ADepoS(epo)
if ra0 == 0.0:
ra0 = ra[-1]
dec0= dec[-1]
x, y1, err1, y2, err2 = epo, (ra-ra0)*3.6e6*cos(np.deg2rad(dec)), era, (dec-dec0)*3.6e6, edec
x0 = t0
x1 = np.arange(1979.0, 2017.0, 0.1)
## time series plot
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.errorbar(x, y1, yerr=err1, fmt='bo', markersize=3)
ax1.errorbar(x, y2, yerr=err2, fmt='bo', markersize=3)
### for data points >=9:
if pmra != 0.0:
y3 = pmra*(x1-x0)/1.0e3
y4 = pmdec*(x1-x0)/1.0e3
ax0.plot(x1, y3, 'r')
ax1.plot(x1, y4, 'r')
## some details.
ax0.set_ylabel('R.A.(mas)')
ax0.set_ylim([-50, 50])
ax0.set_xlim([1979,2017])
ax0.set_title(soun)
ax1.set_ylabel('Dec(mas)')
ax1.set_ylim([-50, 50])
# plt.show()
plt.savefig(res_dir+soun+'.eps', dpi=100)
plt.close()
#tsplot('0434-188')
## read catalog file to get name of sources.
cat = '../list/opa.list'
soun = np.loadtxt(cat, dtype=str)
## linear drift data.
apm = '../results/opa_all.apm'
pmRA, pmDE, RA0, DE0 = np.loadtxt(apm, usecols=(2,3,7,8), unpack=True)
for i in range(len(soun)):
sou_name = soun[i]
pmra, pmdec, ra0, dec0 = pmRA[i], pmDE[i], RA0[i], DE0[i]
## plot
tsplot(sou_name, pmra, pmdec, ra0, dec0)
print('Done!') | Niu-Liu/thesis-materials | sou-selection/progs/TimeseriesPlot.py | TimeseriesPlot.py | py | 1,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.cos",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fun.ADepoA",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "fun.ADepoS",
"line_number... |
25047209667 | from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import Profile, Subject, Lesson, Screenshot
from .permissions import EditingForLecturerOnly
from .serializers import ProfileSerializer, SubjectSerializer, LessonSerializer, ScreenshotSerializer
class UserAPI(APIView):
def get(self, request):
users = Profile.objects.all()
group = request.query_params.get('group', None)
if group:
users = users.filter(user__groups__name=group)
serializer = ProfileSerializer(users, many=True)
return Response({
'users': serializer.data
}, status.HTTP_200_OK)
class SubjectAPI(APIView):
def get(self, _):
serializer = SubjectSerializer(Subject.objects.all(), many=True)
return Response({
'subjects': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = SubjectSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_subject = serializer.save()
return Response({
'success': "Предмет '%s' успешно добавлен." % new_subject.name
}, status.HTTP_201_CREATED)
def put(self, request, subject_id):
updated_subject = get_object_or_404(Subject.objects.all(), pk=subject_id)
serializer = SubjectSerializer(instance=updated_subject, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_subject = serializer.save()
return Response({
'success': "Предмет '%s' был успешно отредактирован." % updated_subject.name
}, status.HTTP_200_OK)
def delete(self, _, subject_id):
subject = get_object_or_404(Subject.objects.all(), pk=subject_id)
message = "Учебный предмет '%s', а также все учебные предметы, " \
"относящиеся к нему, были успешно удалены." % subject.name
subject.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
class ScreenshotAPI(APIView):
def get(self, _):
serializer = ScreenshotSerializer(Screenshot.objects.all(), many=True)
return Response({
'screenshots': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = ScreenshotSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_screenshot = serializer.save()
return Response({
'success': "Скриншот '%s' успешно добавлен." % new_screenshot.name
}, status.HTTP_201_CREATED)
def put(self, request, screenshot_id):
updated_screenshot = get_object_or_404(Screenshot.objects.all(), pk=screenshot_id)
serializer = ScreenshotSerializer(instance=updated_screenshot, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_screenshot = serializer.save()
return Response({
'success': "Скриншот '%s' был успешно отредактирован." % updated_screenshot.name
}, status.HTTP_200_OK)
def delete(self, _, screenshot_id):
screenshot = get_object_or_404(Screenshot.objects.all(), pk=screenshot_id)
message = "Скриншот '%s' был успешно удален." % screenshot.name
screenshot.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
class LessonAPI(APIView):
def get(self, _):
serializer = LessonSerializer(Lesson.objects.all(), many=True)
return Response({
'lessons': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = LessonSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_lesson = serializer.save()
return Response({
'success': "Учебное занятие '%s' успешно добавлен." % new_lesson.name
}, status.HTTP_201_CREATED)
def put(self, request, lesson_id):
updated_lesson = get_object_or_404(Lesson.objects.all(), pk=lesson_id)
serializer = LessonSerializer(instance=updated_lesson, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_lesson = serializer.save()
return Response({
'success': "Учебное занятие '%s' было успешно отредактировано." % updated_lesson.name
}, status.HTTP_200_OK)
def delete(self, _, lesson_id):
lesson = get_object_or_404(Lesson.objects.all(), pk=lesson_id)
message = "Учебное занятие '%s' успешно удалено." % lesson.name
lesson.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
| vnkrtv/screenshots-loader | backend/app/api/views.py | views.py | py | 5,190 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "models.Profile.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Profile.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
... |
3685311565 | # coding: utf-8
import collections
import os
try:
import StringIO
except:
from io import StringIO
import sys
import tarfile
import tempfile
import urllib
import numpy as np
from PIL import Image, ImageDraw
import collections
import tensorflow as tf
import random
if tf.__version__ < '1.5.0':
raise ImportError('Please upgrade your tensorflow installation to v1.5.0 or newer!')
# Needed to show segmentation colormap labels
from lib import get_dataset_colormap
# In[11]:
# LABEL_NAMES = np.asarray([
# 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
# 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
# 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
# 'train', 'tv'
# ])
class BackgroundSubtractor(object):
"""docstring for BackgroundSubtractor"""
def __init__(self, graph_name):
super(BackgroundSubtractor, self).__init__()
self.model = DeepLabModel(graph_name)
self.has_person = False
def extract_image(self,image, mask_array, dst):
background = Image.new('RGB', (mask_array.shape[1],mask_array.shape[0]) , (255, 255, 255))
foreground = image
mask_tmp = []
for i in range(0,len(mask_array)):
mask_tmp.append([])
for j in range(0, len(mask_array[i])):
if mask_array[i][j] == 15:
mask_tmp[i].append([255,255,255,0])
self.has_person = True
else:
mask_tmp[i].append([0,0,0,255])
if self.has_person:
mask_tmp = np.array(mask_tmp)
mask = Image.fromarray(mask_tmp.astype('uint8'))
result = Image.composite(background, foreground, mask)
result.save(dst)
return True
return False
def execute(self, image_name, dst):
try:
orignal_im = Image.open(image_name)
except IOError:
print('Failed to read image from %s.' % image_path)
return None
#print 'running deeplab on image %s...' % image_name
resized_im, seg_map = self.model.run(orignal_im)
self.extract_image(resized_im, seg_map, dst)
def run(self, src, dest):
self.has_person = False
#interact(self.execute, image_name=src, dst=dest)
return self.execute(src, dest)
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
def __init__(self, graph_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
with open(graph_path, "rb") as f:
graph_def = tf.GraphDef.FromString(f.read())
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map | MatthieuBlais/tensorflow-clothing-detection | background.py | background.py | py | 3,761 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "tensorflow.__version__",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.new",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "numpy.array",
... |
35132573715 | import itertools
from abc import ABCMeta
import numpy as np
import tensorflow as tf
import gin.tf
from datasets.raw_dataset import RawDataset
from datasets import dataset_utils
from layers.embeddings_layers import ObjectType
class SamplingDataset(RawDataset, metaclass=ABCMeta):
pass
@gin.configurable(blacklist=['sample_weights_model', 'sample_weights_loss_object'])
class SamplingEdgeDataset(RawDataset):
MAX_ITERATIONS = 1000
def __init__(self, negatives_per_positive=1, sample_weights_model=None, sample_weights_loss_object=None,
sample_weights_count=100, **kwargs):
super(SamplingEdgeDataset, self).__init__(**kwargs)
self.negatives_per_positive = negatives_per_positive
self.sample_weights_model = sample_weights_model
self.sample_weights_loss_object = sample_weights_loss_object
self.sample_weights_count = sample_weights_count
def _get_positive_samples_dataset(self):
raw_dataset = tf.data.Dataset.from_tensor_slices(self.graph_edges)
raw_dataset = raw_dataset.map(
lambda x: {"object_ids": x, "object_types": list(dataset_utils.EDGE_OBJECT_TYPES)}
)
return self._get_processed_dataset(raw_dataset)
def _generate_negative_samples(self, negatives_per_positive):
random_binary_variable_iterator = dataset_utils.get_int_random_variables_iterator(low=0, high=2)
random_entity_index_iterator = dataset_utils.get_int_random_variables_iterator(low=0, high=self.entities_count)
for entity_head, relation, entity_tail in self.graph_edges:
is_head_to_be_swapped = next(random_binary_variable_iterator)
produced_edges = []
iterations_count = 0
while len(produced_edges) < negatives_per_positive and iterations_count < self.MAX_ITERATIONS:
if is_head_to_be_swapped:
entity_head = self.ids_of_entities[next(random_entity_index_iterator)]
else:
entity_tail = self.ids_of_entities[next(random_entity_index_iterator)]
produced_edge = (entity_head, relation, entity_tail)
if produced_edge not in self.set_of_graph_edges and produced_edge not in produced_edges:
produced_edges.append(produced_edge)
iterations_count += 1
if iterations_count < self.MAX_ITERATIONS:
for produced_edge in produced_edges:
yield {
"object_ids": produced_edge,
"object_types": list(dataset_utils.EDGE_OBJECT_TYPES),
"head_swapped": is_head_to_be_swapped,
}
def _reorder_negative_samples(self, batched_samples):
reordered_samples = []
for key, values in batched_samples.items():
for index, negative_inputs in enumerate(tf.unstack(values, axis=1)):
if len(reordered_samples) <= index:
reordered_samples.append({})
reordered_samples[index][key] = negative_inputs
return reordered_samples
def _get_negative_samples_dataset(self):
if self.negatives_per_positive > 1 and self.sample_weights_model is not None:
raise ValueError("`negatives_per_positive > 1` while `sample_weights_model` is not supported")
negatives_per_positive = (
self.negatives_per_positive if self.sample_weights_model is None else self.sample_weights_count
)
raw_dataset = tf.data.Dataset.from_generator(
lambda: self._generate_negative_samples(negatives_per_positive),
output_signature={"object_ids": tf.TensorSpec(shape=(3, ), dtype=tf.int32),
"object_types": tf.TensorSpec(shape=(3,), dtype=tf.int32),
"head_swapped": tf.TensorSpec(shape=(), dtype=tf.bool)},
)
raw_dataset = raw_dataset.batch(negatives_per_positive, drop_remainder=True)
return self._get_processed_dataset(raw_dataset).map(self._reorder_negative_samples)
def _pick_samples_using_model(self, positive_inputs, array_of_negative_inputs):
positive_outputs = self.sample_weights_model(positive_inputs, training=False)
array_of_raw_losses = []
for negative_inputs in array_of_negative_inputs:
negative_outputs = self.sample_weights_model(negative_inputs, training=False)
array_of_raw_losses.append(self.sample_weights_loss_object.get_losses_of_pairs(
positive_outputs, negative_outputs
))
losses = tf.transpose(tf.stack(array_of_raw_losses, axis=0))
probs = losses / tf.expand_dims(tf.reduce_sum(losses, axis=1), axis=1)
indexes_of_chosen_samples = tf.reshape(tf.random.categorical(tf.math.log(probs), num_samples=1), (-1, ))
negative_samples_keys = list(array_of_negative_inputs[0].keys())
chosen_negative_inputs = {}
for key in negative_samples_keys:
stacked_inputs = tf.stack([inputs[key] for inputs in array_of_negative_inputs], axis=1)
chosen_negative_inputs[key] = tf.gather(stacked_inputs, indexes_of_chosen_samples, axis=1, batch_dims=1)
return positive_inputs, (chosen_negative_inputs, )
@property
def samples(self):
positive_samples = self._get_positive_samples_dataset()
negative_samples = self._get_negative_samples_dataset()
samples = tf.data.Dataset.zip((positive_samples, negative_samples))
if (self.sample_weights_model is None) != (self.sample_weights_loss_object is None):
raise ValueError("Expected sample_weights_model and sample_weights_loss_object to be set.")
if self.sample_weights_model is not None:
samples = samples.map(self._pick_samples_using_model)
return samples
@gin.configurable
class SamplingNeighboursDataset(SamplingEdgeDataset):
NEIGHBOUR_OBJECT_TYPES = (ObjectType.ENTITY.value, ObjectType.RELATION.value)
def __init__(self, neighbours_per_sample, **kwargs):
super(SamplingNeighboursDataset, self).__init__(**kwargs)
self.neighbours_per_sample = neighbours_per_sample
def _produce_object_ids_with_types(self, edges):
object_ids, object_types = [], []
for head_id, relation_id, tail_id in edges.numpy():
sampled_output_edges, missing_output_edges_count = dataset_utils.sample_edges(
self.known_entity_output_edges[head_id],
banned_edges=[(tail_id, relation_id)],
neighbours_per_sample=self.neighbours_per_sample,
)
sampled_input_edges, missing_input_edges_count = dataset_utils.sample_edges(
self.known_entity_input_edges[tail_id],
banned_edges=[(head_id, relation_id)],
neighbours_per_sample=self.neighbours_per_sample,
)
object_ids.append([head_id, relation_id, tail_id] + sampled_output_edges + sampled_input_edges)
outputs_types = list(np.concatenate((
np.tile(self.NEIGHBOUR_OBJECT_TYPES, reps=self.neighbours_per_sample - missing_output_edges_count),
np.tile(ObjectType.SPECIAL_TOKEN.value, reps=2 * missing_output_edges_count),
)))
inputs_types = list(np.concatenate((
np.tile(self.NEIGHBOUR_OBJECT_TYPES, reps=self.neighbours_per_sample - missing_input_edges_count),
np.tile(ObjectType.SPECIAL_TOKEN.value, reps=2 * missing_input_edges_count),
)))
object_types.append(list(dataset_utils.EDGE_OBJECT_TYPES) + outputs_types + inputs_types)
return np.array(object_ids), np.array(object_types)
def _produce_positions(self, samples_count):
outputs_positions = list(itertools.chain(*[(3, 4) for _ in range(self.neighbours_per_sample)]))
inputs_positions = list(itertools.chain(*[(5, 6) for _ in range(self.neighbours_per_sample)]))
positions = [0, 1, 2] + outputs_positions + inputs_positions
return tf.tile(tf.expand_dims(positions, axis=0), multiples=[samples_count, 1])
def _include_neighbours_in_edges(self, edges):
object_ids, object_types = tf.py_function(
self._produce_object_ids_with_types, inp=[edges["object_ids"]], Tout=(tf.int32, tf.int32)
)
updated_edges = {
"object_ids": object_ids,
"object_types": object_types,
"positions": self._produce_positions(samples_count=tf.shape(edges["object_ids"])[0]),
}
for key, values in edges.items():
if key in updated_edges:
continue
updated_edges[key] = values
return updated_edges
def _map_batched_samples(self, positive_edges, array_of_negative_edges):
positive_edges = self._include_neighbours_in_edges(positive_edges)
array_of_negative_edges = tuple([
self._include_neighbours_in_edges(edges) for edges in array_of_negative_edges
])
return positive_edges, array_of_negative_edges
@property
def samples(self):
edge_samples = super(SamplingNeighboursDataset, self).samples
return edge_samples.map(self._map_batched_samples)
| Dawidsoni/relation-embeddings | src/datasets/sampling_datasets.py | sampling_datasets.py | py | 9,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datasets.raw_dataset.RawDataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "abc.ABCMeta",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datasets.raw_dataset.RawDataset",
"line_number": 17,
"usage_type": "name"
},
{
"api_n... |
72644950504 | import os
import subprocess
from itertools import chain
from pathlib import Path
import pytest
from netCDF4 import Dataset
from pkg_resources import resource_filename
from compliance_checker.cf import util
from compliance_checker.suite import CheckSuite
def glob_down(pth, suffix, lvls):
"""globs down up to (lvls: int) levels of subfolders\n
suffix in the form ".ipynb"\n
pth: Path"""
return list(chain(*[pth.glob(f'*{"/*"*lvl}{suffix}') for lvl in range(lvls)]))
def generate_dataset(cdl_path, nc_path):
subprocess.call(["ncgen", "-4", "-o", str(nc_path), str(cdl_path)])
def static_files(cdl_stem):
"""
Returns the Path to a valid nc dataset\n
replaces the old STATIC_FILES dict
"""
datadir = Path(resource_filename("compliance_checker", "tests/data")).resolve()
assert datadir.exists(), f"{datadir} not found"
cdl_paths = glob_down(datadir, f"{cdl_stem}.cdl", 3)
assert (
len(cdl_paths) > 0
), f"No file named {cdl_stem}.cdl found in {datadir} or its subfolders"
assert (
len(cdl_paths) == 1
), f"Multiple candidates found with the name {cdl_stem}.cdl:\n{cdl_paths}\nPlease reconcile naming conflict"
cdl_path = cdl_paths[0] # PurePath object
nc_path = cdl_path.parent / f"{cdl_path.stem}.nc"
if not nc_path.exists():
generate_dataset(cdl_path, nc_path)
assert (
nc_path.exists()
), f"ncgen CLI utility failed to produce {nc_path} from {cdl_path}"
return str(nc_path)
# ---------Fixtures-----------
# class scope:
@pytest.fixture
def cs(scope="class"):
"""
Initialize the dataset
"""
cs = CheckSuite()
cs.load_all_available_checkers()
return cs
@pytest.fixture
def std_names(scope="class"):
"""get current std names table version (it changes)"""
_std_names = util.StandardNameTable()
return _std_names
# func scope:
@pytest.fixture
def loaded_dataset(request):
"""
Return a loaded NC Dataset for the given path\n
nc_dataset_path parameterized for each test
"""
nc_dataset_path = static_files(request.param)
nc = Dataset(nc_dataset_path, "r")
yield nc
nc.close()
@pytest.fixture
def new_nc_file(tmpdir):
"""
Make a new temporary netCDF file for the scope of the test
"""
nc_file_path = os.path.join(tmpdir, "example.nc")
if os.path.exists(nc_file_path):
raise OSError("File Exists: %s" % nc_file_path)
nc = Dataset(nc_file_path, "w")
# no need for cleanup, built-in tmpdir fixture will handle it
return nc
@pytest.fixture
def tmp_txt_file(tmpdir):
file_path = os.path.join(tmpdir, "output.txt")
if os.path.exists(file_path):
raise OSError("File Exists: %s" % file_path)
return file_path
@pytest.fixture
def checksuite_setup():
"""For test_cli"""
CheckSuite.checkers.clear()
CheckSuite.load_all_available_checkers()
| ioos/compliance-checker | compliance_checker/tests/conftest.py | conftest.py | py | 2,919 | python | en | code | 92 | github-code | 36 | [
{
"api_name": "itertools.chain",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_f... |
72432170665 | import numpy as np
import cv2
STAGE_FIRST_FRAME = 0
STAGE_SECOND_FRAME = 1
STAGE_DEFAULT_FRAME = 2
kMinNumFeature = 1500
orb = cv2.ORB_create()
lk_params = dict(winSize = (21, 21),
#maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
############## Edit this portion ###############
#Add SIFT
def featureTracking(image_ref, image_cur, px_ref):
# kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, px_ref, None, **lk_params) #shape: [k,2] [k,1] [k,1]
# st = st.reshape(st.shape[0])
#initialize SIFT object
sift = cv2.xfeatures2d.SIFT_create()
#detect keypoints
kp1, _= sift.detectAndCompute(image_ref, None)
kp2, _= sift.detectAndCompute(image_cur, None)
'''
kp1 = px_ref[st == 1]
kp2 = kp2[st == 1]
'''
return kp1, kp2
''' SIFT
import cv2 as cv
#load image
image = cv.imread("lena.jpg")
#convert to grayscale image
gray_scale = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#initialize SIFT object
sift = cv.xfeatures2d.SIFT_create()
#detect keypoints
keypoints, _= sift.detectAndCompute(image, None)
'''
#################
class PinholeCamera:
def __init__(self, width, height, fx, fy, cx, cy,
k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0):
self.width = width
self.height = height
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.distortion = (abs(k1) > 0.0000001)
self.d = [k1, k2, p1, p2, k3]
class VisualOdometry:
def __init__(self, cam, annotations):
self.frame_stage = 0
self.cam = cam
self.new_frame = None
self.last_frame = None
self.cur_R = None
self.cur_t = None
self.px_ref = None
self.px_cur = None
self.keyp1 = None
self.disptr1 = None
self.keyp2 = None
self.disptr2 = None
self.focal = cam.fx
self.pp = (cam.cx, cam.cy)
self.trueX, self.trueY, self.trueZ = 0, 0, 0
self.detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)
with open(annotations) as f:
self.annotations = f.readlines()
def getAbsoluteScale(self, frame_id): #specialized for KITTI odometry dataset
ss = self.annotations[frame_id-1].strip().split()
x_prev = float(ss[3])
y_prev = float(ss[7])
z_prev = float(ss[11])
ss = self.annotations[frame_id].strip().split()
x = float(ss[3])
y = float(ss[7])
z = float(ss[11])
self.trueX, self.trueY, self.trueZ = x, y, z
return np.sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev))
def processFirstFrame(self):
# self.px_ref = self.detector.detect(self.new_frame)
keyp1, disptr1 = orb.detectAndCompute(self.new_frame, None)
self.keyp1 = np.array([x.pt for x in keyp1], dtype=np.float32)
self.disptr1 = disptr1
self.frame_stage = STAGE_SECOND_FRAME
def processSecondFrame(self):
# self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
keyp2, disptr2 = orb.detectAndCompute(self.new_frame, None)
self.keyp2 = np.array([x.pt for x in keyp2], dtype=np.float32)
# brute force match
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # cC=True ==> best matches only
matches = bf.match(self.disptr1, disptr2)
# sorting the match vales from low 2 high
matches = sorted(matches, key=lambda x: x.distance)
matches = matches[0:20]
queryIdx = np.array([x.queryIdx for x in matches], dtype=np.int)
trainIdx = np.array([x.trainIdx for x in matches], dtype=np.int)
self.keyp1 = self.keyp1[queryIdx]
self.keyp2 = self.keyp2[trainIdx]
# matching_result = cv2.drawMatches(self., keyp1, img2, keyp2, matches[0:20], None) # [:20]matches 0 to 20 only
E, mask = cv2.findEssentialMat(self.keyp2, self.keyp1, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.keyp2, self.keyp1, focal=self.focal, pp = self.pp)
#
# # drawing the matches on the images
# matching_result = cv2.drawMatches(img_cur, self.keyp1, img_nxt, keyp2, matches[0:20],
# None) # [:20]matches 0 to 20 only
#
# # display matches
# cv2.imshow("match_result", matching_result)
# cv2.waitKey(0)
# cv2.desrtroyAllWindows()
# img_cur = img_nxt
# keyp1, disptr1 = keyp2, disptr2
#
self.frame_stage = STAGE_DEFAULT_FRAME
self.keyp1 = self.keyp2
def processFrame(self, frame_id):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
absolute_scale = self.getAbsoluteScale(frame_id)
if(absolute_scale > 0.1):
self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
self.cur_R = R.dot(self.cur_R)
if(self.px_ref.shape[0] < kMinNumFeature):
self.px_cur = self.detector.detect(self.new_frame)
self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
self.px_ref = self.px_cur
def update(self, img, frame_id):
assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale"
self.new_frame = img
if(self.frame_stage == STAGE_DEFAULT_FRAME):
self.processFrame(frame_id)
elif(self.frame_stage == STAGE_SECOND_FRAME):
self.processSecondFrame()
elif(self.frame_stage == STAGE_FIRST_FRAME):
self.processFirstFrame()
self.last_frame = self.new_frame
def update(self, img, frame_id):
assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale"
self.new_frame = img
if(self.frame_stage == STAGE_DEFAULT_FRAME):
self.processFrame(frame_id)
elif(self.frame_stage == STAGE_SECOND_FRAME):
self.processSecondFrame()
elif(self.frame_stage == STAGE_FIRST_FRAME):
self.processFirstFrame()
self.last_frame = self.new_frame
| aswinsbabu/visual-odometry | test_folder/odometry/sift_odometry.py | sift_odometry.py | py | 5,990 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.ORB_create",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_COUNT",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": ... |
21671571550 | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
class Basset(nn.Module):
"""
This model is also known to do well in transcription factor binding.
This model is "shallower" than factorized basset, but has larger convolutions
that may be able to pick up longer motifs
"""
def __init__(self, dropout, num_classes):
super(Basset, self).__init__()
torch.manual_seed(3278)
self.dropout = dropout
self.conv1 = nn.Conv2d(4, 300, (19, 1), stride = (1, 1), padding=(9,0))
self.conv2 = nn.Conv2d(300, 200, (11, 1), stride = (1, 1), padding = (5,0))
self.conv3 = nn.Conv2d(200, 200, (7, 1), stride = (1, 1), padding = (4,0))
self.bn1 = nn.BatchNorm2d(300)
self.bn2 = nn.BatchNorm2d(200)
self.bn3 = nn.BatchNorm2d(200)
self.maxpool1 = nn.MaxPool2d((3, 1))
self.maxpool2 = nn.MaxPool2d((4, 1))
self.maxpool3 = nn.MaxPool2d((4, 1))
self.fc1 = nn.Linear(4200, 1000)
self.bn4 = nn.BatchNorm1d(1000)
self.fc2 = nn.Linear(1000, 1000)
self.bn5 = nn.BatchNorm1d(1000)
self.fc3 = nn.Linear(1000, num_classes)
def forward(self, s):
s = s.permute(0, 2, 1).contiguous() # batch_size x 4 x 1000
s = s.view(-1, 4, 1000, 1) # batch_size x 4 x 1000 x 1 [4 channels]
s = self.maxpool1(F.relu(self.bn1(self.conv1(s)))) # batch_size x 300 x 333 x 1
s = self.maxpool2(F.relu(self.bn2(self.conv2(s)))) # batch_size x 200 x 83 x 1
s = self.maxpool3(F.relu(self.bn3(self.conv3(s)))) # batch_size x 200 x 21 x 1
s = s.view(-1, 4200)
s = F.dropout(F.relu(self.bn4(self.fc1(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = F.dropout(F.relu(self.bn5(self.fc2(s))), p=self.dropout, training=self.training) # batch_size x 1000
intermediate_out = s
s = self.fc3(s)
s = torch.sigmoid(s)
return s, intermediate_out
class FactorizedBasset(nn.Module):
"""
This model is known to do well in predicting transcription factor binding. This means it may be good
at predicting sequence localization as well, if its architecture lends itself well to predicting sequence
motifs in general.
"""
def __init__(self, dropout, num_classes=1):
super(FactorizedBasset, self).__init__()
torch.manual_seed(3278)
self.dropout = dropout
self.num_cell_types = num_classes
self.layer1 = self.layer_one()
self.layer2 = self.layer_two()
self.layer3 = self.layer_three()
self.maxpool1 = nn.MaxPool2d((3, 1))
self.maxpool2 = nn.MaxPool2d((4, 1))
self.maxpool3 = nn.MaxPool2d((4, 1))
self.fc1 = nn.Linear(4200, 1000)
self.bn4 = nn.BatchNorm1d(1000)
self.fc2 = nn.Linear(1000, 1000)
self.bn5 = nn.BatchNorm1d(1000)
# self.fc3 = nn.Linear(1000, self.num_cell_types)
self.fc3 = nn.Linear(1000, num_classes)
def layer_one(self):
self.conv1a = nn.Conv2d(4, 48, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1b = nn.Conv2d(48, 64, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1c = nn.Conv2d(64, 100, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1d = nn.Conv2d(100, 150, (7, 1), stride=(1, 1), padding=(3, 0))
self.conv1e = nn.Conv2d(150, 300, (7, 1), stride=(1, 1), padding=(3, 0))
self.bn1a = nn.BatchNorm2d(48)
self.bn1b = nn.BatchNorm2d(64)
self.bn1c = nn.BatchNorm2d(100)
self.bn1d = nn.BatchNorm2d(150)
self.bn1e = nn.BatchNorm2d(300)
tmp = nn.Sequential(self.conv1a, self.bn1a, nn.ReLU(inplace=True),
self.conv1b, self.bn1b, nn.ReLU(inplace=True),
self.conv1c, self.bn1c, nn.ReLU(inplace=True),
self.conv1d, self.bn1d, nn.ReLU(inplace=True),
self.conv1e, self.bn1e, nn.ReLU(inplace=True))
return tmp
def layer_two(self):
self.conv2a = nn.Conv2d(300, 200, (7,1), stride = (1,1), padding = (3,0))
self.conv2b = nn.Conv2d(200, 200, (3,1), stride = (1,1), padding = (1, 0))
self.conv2c = nn.Conv2d(200, 200, (3, 1), stride =(1,1), padding = (1,0))
self.bn2a = nn.BatchNorm2d(200)
self.bn2b = nn.BatchNorm2d(200)
self.bn2c = nn.BatchNorm2d(200)
tmp = nn.Sequential(self.conv2a,self.bn2a, nn.ReLU(inplace= True),
self.conv2b,self.bn2b, nn.ReLU(inplace=True),
self.conv2c, self.bn2c, nn.ReLU(inplace=True))
return tmp
def layer_three(self):
self.conv3 = nn.Conv2d(200, 200, (7,1), stride =(1,1), padding = (4,0))
self.bn3 = nn.BatchNorm2d(200)
return nn.Sequential(self.conv3, self.bn3, nn.ReLU(inplace=True))
def forward(self, s):
"""Expect input batch_size x 1000 x 4"""
s = s.permute(0, 2, 1).contiguous() # batch_size x 4 x 1000
s = s.view(-1, 4, 1000, 1) # batch_size x 4 x 1000 x 1 [4 channels]
s = self.maxpool1(self.layer1(s)) # batch_size x 300 x 333 x 1
s = self.maxpool2(self.layer2(s)) # batch_size x 200 x 83 x 1
s = self.maxpool3(self.layer3(s)) # batch_size x 200 x 21 x 1
s = s.view(-1, 4200)
conv_out = s
s = F.dropout(F.relu(self.bn4(self.fc1(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = F.dropout(F.relu(self.bn5(self.fc2(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = self.fc3(s)
s = torch.sigmoid(s)
return s, conv_out
if __name__ == "__main__":
# Easy sanity check that nothing is blatantly wrong
x = FactorizedBasset(dropout=0.2, num_classes=8)
| wukevin/rnagps | rnagps/models/basset_family.py | basset_family.py | py | 6,034 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "torch.backends",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",... |
36059095725 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch as torch
# In[2]:
import torch.nn as nn
import pandas as pd
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
# In[3]:
df = pd.read_csv("yoochoose-clicks.dat",
names=["session", "timestamp", "item", "category"],
parse_dates=["timestamp"])
# In[9]:
df_percent = df.head(50000)
# In[10]:
df_percent = df_percent[['session','item']]
# In[30]:
df_percent = df_percent.sort_values(by = 'session')
# In[35]:
test_data_size = 10004 #20 percent
train_data = df_percent[:-test_data_size]
test_data = df_percent[-test_data_size:]
# In[237]:
#getting target dataset from training dataset
target_dataset=train_data.loc[(train_data["session"]!=train_data["session"].shift(-1))]
# In[254]:
train_data['session'].isin(target_dataset['session']).value_counts()
# In[217]:
target_numpy = target_dataset.to_numpy(dtype = 'int64')
# In[109]:
train_clicks_numpy = train_data.to_numpy(dtype = 'int64') #Creating training df as numpy int64 type
test_clicks_numpy = test_data.to_numpy(dtype = 'int64') #Creating testing df as numpy int64 type
# In[ ]:
# In[218]:
featuresTrain = torch.from_numpy(train_clicks_numpy)
featuresTest = torch.from_numpy(test_clicks_numpy)
featuresTarget = torch.from_numpy(target_numpy)
# In[114]:
# batch_size, epoch and iteration
batch_size = 100
n_iters = 10000
num_epochs = n_iters / (len(featuresTrain) / batch_size)
num_epochs = int(num_epochs)
# In[111]:
# Pytorch train set
train = TensorDataset(featuresTrain)
# In[112]:
# Pytorch test set
test = TensorDataset(featuresTest)
# In[115]:
# data loader
train_loader = DataLoader(train, batch_size = batch_size, shuffle = False)
test_loader = DataLoader(test, batch_size = batch_size, shuffle = False)
# In[221]:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_arr = scaler.fit_transform(featuresTrain)
val_arr = scaler.transform(featuresTarget)
test_arr = scaler.transform(featuresTest)
# In[207]:
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# In[209]:
#####################
input_dim = 2
hidden_dim = 100
num_layers = 2
output_dim = 1
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(LSTM, self).__init__()
# Hidden dimensions
self.hidden_dim = hidden_dim
# Number of hidden layers
self.num_layers = num_layers
# Building your LSTM
# batch_first=True causes input/output tensors to be of shape
# (batch_dim, seq_dim, feature_dim)
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
# Readout layer
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Initialize hidden state with zeros
h0 = torch.zeros(self.num_layers,0, self.hidden_dim).requires_grad_()
# Initialize cell state
c0 = torch.zeros(self.num_layers, 0, self.hidden_dim).requires_grad_()
# One time step
# We need to detach as we are doing truncated backpropagation through time (BPTT)
# If we don't, we'll backprop all the way to the start even after going through another batch
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
# Index hidden state of last time step
# out.size() --> 100, 28, 100
# out[:, -1, :] --> 100, 100 --> just want last time step hidden states!
out = self.fc(out[:, -1, :])
# out.size() --> 100, 10
return out
model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
loss_fn = torch.nn.MSELoss(size_average=True)
print(model)
print(len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
# In[212]:
# Train model
#####################
import numpy as np
look_back = 20
hist = np.zeros(num_epochs)
# Number of steps to unroll
seq_dim =look_back-1
for t in range(num_epochs):
# Initialise hidden state
# Don't do this if you want your LSTM to be stateful
#model.hidden = model.init_hidden()
# Forward pass
y_train_pred = model(train_inout_seq)
loss = loss_fn(y_train_pred, train)
if t % 10 == 0 and t !=0:
print("Epoch ", t, "MSE: ", loss.item())
hist[t] = loss.item()
# Zero out gradient, else they will accumulate between epochs
optimiser.zero_grad()
# Backward pass
loss.backward()
# Update parameters
optimiser.step()
# In[ ]:
| fahadkh2019/Capstone_Project | LSTM Modeling-updated.py | LSTM Modeling-updated.py | py | 4,722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
... |
71873731623 | import pygame
from Helper.global_variables import *
from Helper.text_helper import drawTextcenter, drawText
pygame.init()
def update_display(win, height, color_height, numswaps, algorithm, number_of_elements, speed, time, running):
win.fill(BLACK)
# call show method to display the list items
show(win, height, color_height, number_of_elements)
for i in range(15):
pygame.draw.line(win, TURQUOISE, (0, 165+i), (WIDTH, 165+i))
pygame.draw.line(win, TURQUOISE, (1060+i,0), (1060+i,165))
pygame.draw.line(win, TURQUOISE, (730+i,0), (730+i,165))
pygame.draw.line(win, TURQUOISE, (230+i,0), (230+i,165))
drawTextcenter("Number of swaps: " + str(numswaps), pygame.font.SysFont('Calibri', 20), win, 100, 25, WHITE)
drawTextcenter("Time elapsed: " + str(format(time, ".1f")) + "s", pygame.font.SysFont('Calibri', 20), win, 100, 75, WHITE)
drawTextcenter("Algorithm used: " + algorithm, pygame.font.SysFont('Calibri', 20), win, 475, 25, WHITE)
drawTextcenter("Number of elements: " + str(number_of_elements), pygame.font.SysFont('Calibri', 20), win, 900, 25, WHITE)
drawTextcenter("Algorithm speed: " + speed, pygame.font.SysFont('Calibri', 20), win, 1225, 25, WHITE)
button_start.draw(win)
button_reset.draw(win)
button_bubble_sort.draw(win)
button_insertion_sort.draw(win)
button_selection_sort.draw(win)
button_merge_sort.draw(win)
button_heap_sort.draw(win)
button_quick_sort.draw(win)
button_radix_sort.draw(win)
button_todo4.draw(win)
button_20.draw(win)
button_50.draw(win)
button_75.draw(win)
button_100.draw(win)
button_slow.draw(win)
button_medium.draw(win)
button_fast.draw(win)
button_instant.draw(win)
# create a time delay
if(running == True):
delay = 0
if(speed == "Slow"):
delay = 5000
pygame.time.delay(delay)
if(speed == "Medium"):
delay = 50
pygame.time.delay(delay)
if(speed == "Fast"):
delay = 25
pygame.time.delay(delay)
if(speed == "No delay"):
delay = 0
# update the display
pygame.display.update()
# method to show the list of height
def show(win, height, color_height, number_of_elements):
if(number_of_elements != -1 and len(height) != 0):
maximum_value = max(height)
step = (WIDTH/len(height))
for i in range(len(height)):
x = Button(step * (i+1), HEIGHT, -(step), -(height[i]/maximum_value)*3*HEIGHT/4, BLACK, color_height[i], str(height[i]), int(round(step - 20)))
x.draw(win) | andreidumitrescu95/Python-Sorting-Algorithm-Visualizer | Display/display.py | display.py | py | 2,692 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.draw.line",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.line",
"... |
36375251491 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from moderation.moderator import GenericModerator
from moderation.tests.apps.test_app1.models import UserProfile,\
ModelWithModeratedFields
from moderation.tests.utils.testsettingsmanager import SettingsTestCase
from moderation.tests.utils import setup_moderation, teardown_moderation
class ExcludeAcceptanceTestCase(SettingsTestCase):
'''
As developer I want to have way to ignore/exclude model fields from
moderation
'''
fixtures = ['test_users.json', 'test_moderation.json']
test_settings = 'moderation.tests.settings.generic'
urls = 'moderation.tests.urls.default'
def setUp(self):
self.client.login(username='admin', password='aaaa')
class UserProfileModerator(GenericModerator):
fields_exclude = ['url']
setup_moderation([(UserProfile, UserProfileModerator)])
def tearDown(self):
teardown_moderation()
def test_excluded_field_should_not_be_moderated_when_obj_is_edited(self):
'''
Change field that is excluded from moderation,
go to moderation admin
'''
profile = UserProfile.objects.get(user__username='moderator')
profile.url = 'http://dominno.pl'
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertFalse((u'http://www.google.com',
u'http://dominno.pl') in changes)
def test_non_excluded_field_should_be_moderated_when_obj_is_edited(self):
'''
Change field that is not excluded from moderation,
go to moderation admin
'''
profile = UserProfile.objects.get(user__username='moderator')
profile.description = 'New description'
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertTrue(("Old description", 'New description') in changes)
def test_excluded_field_should_not_be_moderated_when_obj_is_created(self):
'''
Create new object, only non excluded fields are used
by moderation system
'''
profile = UserProfile(description='Profile for new user',
url='http://www.dominno.com',
user=User.objects.get(username='user1'))
profile.save()
url = reverse('admin:moderation_moderatedobject_change',
args=(profile.moderated_object.pk,))
response = self.client.get(url, {})
changes = [change.change for change in response.context['changes']]
self.assertFalse((u'http://www.dominno.com',
u'http://www.dominno.com') in changes)
class ModeratedFieldsAcceptanceTestCase(SettingsTestCase):
'''
Test that `moderated_fields` model argument excludes all fields not listed
'''
test_settings = 'moderation.tests.settings.generic'
urls = 'moderation.tests.urls.default'
def setUp(self):
setup_moderation([ModelWithModeratedFields])
def tearDown(self):
teardown_moderation()
def test_moderated_fields_not_added_to_excluded_fields_list(self):
from moderation import moderation
moderator = moderation._registered_models[ModelWithModeratedFields]
self.assertTrue('moderated' not in moderator.fields_exclude)
self.assertTrue('also_moderated' not in moderator.fields_exclude)
def test_unmoderated_fields_added_to_excluded_fields_list(self):
from moderation import moderation
moderator = moderation._registered_models[ModelWithModeratedFields]
self.assertTrue('unmoderated' in moderator.fields_exclude)
| arowla/django-moderation | src/moderation/tests/acceptance/exclude.py | exclude.py | py | 4,091 | python | en | code | null | github-code | 36 | [
{
"api_name": "moderation.tests.utils.testsettingsmanager.SettingsTestCase",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "moderation.moderator.GenericModerator",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "moderation.tests.utils.setup_moderation",
"... |
28356972055 |
import logging
import sys
from kodi_interface import KodiObj
LOGGING = logging.getLogger(__name__)
def get_input(prompt: str = "> ", choices: list = [], required = False) -> str:
ret_val = input(prompt)
if choices:
while not ret_val in choices:
print(f'Invalid selection. Valid entries: {"/".join(choices)}')
ret_val = input(prompt)
elif required:
while not ret_val:
print('You MUST enter a value.')
ret_val = input(prompt)
return ret_val
def setup_logging(log_level = logging.ERROR):
lg_format='[%(levelname)-5s] %(message)s'
logging.basicConfig(format=lg_format, level=log_level,)
def set_loglevel(log_level:str):
if log_level == "E":
lg_lvl = logging.ERROR
elif log_level == "I":
lg_lvl = logging.INFO
else:
lg_lvl = logging.DEBUG
logging.getLogger().setLevel(lg_lvl)
def dump_methods(kodi: KodiObj):
namespaces = kodi.get_namespace_list()
for ns in namespaces:
resp = get_input(f"Display: {ns} (y|n|q)> ",['y','n','Y','N','Q','q']).lower()
if resp == "q":
break
elif resp == 'y':
ns_methods = kodi.get_namespace_method_list(ns)
for method in ns_methods:
resp = get_input(f'{ns}.{method} (E,I,D,n,q)> ',['E','I','D','y','n','q',''])
if resp in ['E','I','D']:
set_loglevel(resp)
elif resp == 'q':
sys.exit()
elif resp == 'n':
break
cmd = f'{ns}.{method}'
print(cmd)
kodi.help(cmd)
print()
print('\n=========================================================================')
def main():
setup_logging()
log_level = "E"
set_loglevel(log_level)
kodi = KodiObj()
# kodi.help("")
# pause()
# kodi.help("Application")
# pause()
# kodi.help('AudioLibrary.GetArtists')
dump_methods(kodi)
if __name__ == "__main__":
main()
| JavaWiz1/kodi-cli | kodi_help_tester.py | kodi_help_tester.py | py | 2,077 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.ERROR... |
36830593270 | #!/usr/bin/python3
# 涉及对象的定义过程,不能交互式执行,需要放入.py代码文件中执行。
# 导入LCD数字,滑块,部件,Box布局,Q程序,网格布局
from PySide2.QtWidgets import QLCDNumber, QSlider, QWidget, QVBoxLayout, QApplication, QGridLayout
# 导入Qt库
from PySide2.QtCore import Qt
class MyLCDNumber(QWidget): # 创建LCD数字显示器类
def __init__(self, parent=None): # 初始化,无父类
super().__init__(parent)
self.lcd_number = QLCDNumber() # 创建一个lcd数字显示器对象
self.slider = QSlider(Qt.Horizontal)# 创建滑动条,水平显示
self.layout = QVBoxLayout() # 两元素使用垂直布局(上下排列)
self.layout.addWidget(self.lcd_number) # 将lcd_num对象加入
self.layout.addWidget(self.slider) # 将slider对象加入
self.setLayout(self.layout)
self.setFixedSize(120, 100) # 设置整个控件大小
self.lcd_number.setDigitCount(2) # 设置lcd显示器最多显示两位数字
self.slider.setRange(0, 99) # 设置可调节的范围
self.slider.valueChanged.connect(self.lcd_number.display) # 滑动条的值修改,连接到lcd的显示值
app = QApplication() # 初始化Q程序实例,app
window = QWidget() # 创建window实例,继承自Q部件
layout = QGridLayout() # 布局使用网格布局
mylcdnumber01 = MyLCDNumber() # 创建lcd显示器的4个实例
mylcdnumber02 = MyLCDNumber()
mylcdnumber03 = MyLCDNumber()
mylcdnumber04 = MyLCDNumber()
layout.addWidget(mylcdnumber01, 1, 1) # 将4个lcd显示器实例,逐个加入到全局控件中(按照坐标)
layout.addWidget(mylcdnumber02, 1, 2)
layout.addWidget(mylcdnumber03, 2, 1)
layout.addWidget(mylcdnumber04, 2, 2)
window.setLayout(layout) # window对象使用上述layout布局
window.show() # 显示window对象
app.exec_() # 执行程序
| oca-john/Python3-xi | Pyside2/1.pyside2.4.widget.def.py | 1.pyside2.4.widget.def.py | py | 2,141 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "PySide2.QtWidgets.QWidget",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PySide2.QtWidgets.QLCDNumber",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QSlider",
"line_number": 14,
"usage_type": "call"
},
{
"a... |
32442603242 | import sqlite3
conn = sqlite3.connect('bancodedados.db')
cursor = conn.cursor()
#variaveis gerais
usuario_logado = ""
#cria tabelas
def modularTable():#Victor
clear()
tabela = int(input('\nBem vindo ao sistema Meditech\nPrimeiramente adicione os modulos com que deseja trabalhar\n\n1 - funcionarios\n2 - Veiculos\n3 - Agendamentos\n4 - Equipamentos\n5 - Paciente\n6 - login\n7 - anamnese\n8 - leito\n9 - finalizar.\n\nQuais sao as tabelas de dados que deseja utilizar?'))
if tabela == 1:
tabela_funcionarios()
modularTable()
elif tabela == 2:
tabela_veiculos()
modularTable()
elif tabela == 3:
tabela_agendamentos()
modularTable()
elif tabela == 4:
tabela_equipamento()
modularTable()
elif tabela == 5:
tabela_paciente()
modularTable()
elif tabela == 6:
tabela_login()
cadastro_login('admin', '123', 'gerente')
modularTable()
elif tabela == 7:
tabela_anamnese()
modularTable()
elif tabela == 8:
tabela_leito()
modularTable()
elif tabela == 9:
firstAccess()
else:
print('Opcao invalida')
modularTable()
def firstAccess():
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
if len(cursor.fetchall()):
fazerlogin()
else:
modularTable()
def fazerlogin():
clear()
print('\nBem vindo ao sistema de gerencimento Meditech:\n')
login = input('Digite o seu login:\n')
senha = input('Digite sua senha:\n')
cursor.execute('SELECT * FROM login WHERE nome_usuario = ? and senha = ?', (login, senha))
if len(cursor.fetchall()) >= 1:
cursor.execute('SELECT * FROM login WHERE nome_usuario = ? and senha = ?', (login, senha))
for linha in cursor.fetchall():
global usuario_logado
usuario_logado = linha[1]
area = linha[3]
if area == 'medico':
menu_medico()
if area == 'engenheiro biomedico':
menu_engbio()
if area == 'atendente':
menu_atendente()
if area == 'gerente':
menu_manager()
else:
input('\nLogin ou senha incorretos, pressione qualquer tecla')
fazerlogin()
def tabela_funcionarios():#marianne
cursor.execute('CREATE TABLE funcionarios(nome TEXT NOT NULL, profissao TEXT NOT NULL, matricula VARCHAR(25) NOT NULL, id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT);')
def tabela_veiculos():#marianne
cursor.execute('CREATE TABLE veiculos(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, placa VARCHAR(8) NOT NULL, status TEXT NOT NULL, motorista TEXT NOT NULL, paramedico TEXT NOT NULL, paciente TEXT NOT NULL);')
def tabela_agendamentos():#marianne
cursor.execute('CREATE TABLE agendamentos(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, id_paciente INTEGER NOT NULL, id_medico INTEGER NOT NULL, data VARCHAR(10) NOT NULL, horario VARCHAR(5));')
def tabela_equipamento():# Luiz Eduardo
cursor.execute('CREATE TABLE equipamento(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, funcao TEXT NOT NULL, preco INTEGER, status TEXT NOT NULL, data DATE NOT NULL);')
def tabela_paciente():# Luiz Eduardo
cursor.execute('CREATE TABLE paciente(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, idade INTEGER, sexo TEXT NOT NULL, peso INTEGER);')
def tabela_leito(): #luiz henrique
cursor.execute('CREATE TABLE dadosleito( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, num_leito INTEGER NOT NULL)')
def tabela_login(): #luiz henrique
cursor.execute('CREATE TABLE login( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome_usuario TEXT NOT NULL, senha VARCHAR(10) NOT NULL, area TEXT NOT NULL)')
def tabela_anamnese(): #luiz henrique
cursor.execute('CREATE TABLE anamnese (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, id_paciente INTEGER NOT NULL, o_que_sente TEXT NOT NULL, onde_doi TEXT NOT NULL, quando_comecou INTEGER NOT NULL)')
def clear():#marianne
print("\n" * 100)
# funçoes
def lista_equipamento():
verifica = cursor.execute('SELECT * FROM equipamento')
for linha in verifica.fetchall():
print(linha)
def insere_equipamento(nome, funcao, preco, status, data): # Luiz Eduardo
cursor.execute('INSERT INTO equipamento(nome,funcao,preco,status,data)VALUES(?,?,?,?,?)',
(nome, funcao, preco, status, data))
conn.commit()
def remove_equipamento(id_equipamento): # Luiz Eduardo
cursor.execute('DELETE FROM equipamento WHERE id =?', id_equipamento)
conn.commit()
def alterar_equipamento(novo_nome, nova_funcao, novo_preco, novo_status, nova_data, id_equipamento): # Luiz Eduardo
cursor.execute('UPDATE equipamento SET nome = ?, funcao= ?, preco = ?,status = ?, data = ? WHERE id = ?',
(novo_nome, nova_funcao, novo_preco, novo_status, nova_data, id_equipamento))
conn.commit()
def insere_funcionarios (nome, profissao, matricula):#marianne
cursor.execute('INSERT INTO funcionarios(nome, profissao, matricula) VALUES (?,?,?)', (nome, profissao, matricula))
conn.commit()
def remove_funcionarios (id_funcionario):#marianne
cursor.execute("DELETE FROM funcionarios WHERE id = ?", (id_funcionario))
conn.commit()
def altera_funcionarios(alteracao_campo, alteracao, id_funcionario):#marianne
if alteracao_campo == 'nome':
cursor.execute("UPDATE funcionarios SET nome = ? WHERE id = ?", (alteracao, id_funcionario))
if alteracao_campo == 'profissao':
cursor.execute("UPDATE funcionarios SET profissao = ? WHERE id = ?", (alteracao, id_funcionario))
if alteracao_campo == 'matricula':
cursor.execute("UPDATE funcionarios SET matricula = ? WHERE id = ?", (alteracao, id_funcionario))
conn.commit()
def insere_veiculos(placa, status, motorista, paramedico, paciente):#marianne
cursor.execute("INSERT INTO veiculos(placa, status, motorista, paramedico, paciente) VALUES (?,?,?,?,?)", (placa, status, motorista, paramedico, paciente))
conn.commit()
def remove_veiculos(id_veiculo):#marianne
cursor.execute("DELETE FROM veiculos WHERE id = ?", (id_veiculo))
conn.commit()
def altera_veiculos(alteracao_campo, alteracao, id_veiculo):#marianne
if alteracao_campo == 'placa':
cursor.execute("UPDATE veiculos SET placa = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'status':
cursor.execute("UPDATE veiculos SET status = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'motorista':
cursor.execute("UPDATE veiculos SET motorista = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'paramedico':
cursor.execute("UPDATE veiculos SET paramedico = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'paciente':
cursor.execute("UPDATE veiculos SET paciente = ? WHERE id = ?", (alteracao, id_veiculo))
conn.commit()
def insere_agendamentos(id_paciente, id_medico, data, horario):#marianne
cursor.execute("INSERT INTO agendamentos(id_paciente, id_medico, data, horario) VALUES (?,?,?,?)", (id_paciente, id_medico, data, horario))
conn.commit()
def remove_agendamentos(id_paciente):#marianne
cursor.execute("DELETE FROM agendamentos WHERE id = ?", (id_paciente))
conn.commit()
def altera_agendamentos(alteracao_campo, alteracao, id_agendamentos):#marianne
if alteracao_campo == 'id_paciente':
cursor.execute("UPDATE agendamentos SET id_paciente = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'id_medico':
cursor.execute("UPDATE agendamentos SET id_medico = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'data':
cursor.execute("UPDATE agendamentos SET data = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'horario':
cursor.execute("UPDATE agendamentos SET horario = ? WHERE id = ?", (alteracao, id_agendamentos))
conn.commit()
def cadastro_login(nome_usuario, senha, area):
cursor.execute('INSERT INTO login(nome_usuario, senha, area) VALUES (?, ?, ?)', (nome_usuario, senha, area))
conn.commit()
def remove_login(id_usuario):
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
cursor.execute('DELETE FROM login WHERE id = ?', (id_usuario))
conn.commit()
def altera_login():
login = input('Digite seu login:\n')
novo_login = input('Digite o novo login:\n')
cursor.execute('UPDATE login SET nome_usuario = ? WHERE nome_usuario = ?', (novo_login, login))
senha = input('Digite a senha:')
nova_senha = input('Digite a nova senha:\n')
cursor.execute('UPDATE login SET senha = ? WHERE senha = ?', (nova_senha, senha))
conn.commit()
def insere_paciente(nome, idade, sexo, peso):# Luiz Eduardo
cursor.execute('INSERT INTO paciente(nome,idade,sexo,peso)VALUES(?,?,?,?)',(nome,idade,sexo,peso))
conn.commit()
def remove_paciente(id_paciente):# Luiz Eduardo
cursor.execute('DELETE FROM paciente WHERE id=?', id_paciente)
conn.commit()
def cadastra_leito(nome, numero): #luiz h
cursor.execute('INSERT INTO dadosleito(nome, num_leito) VALUES (?, ?)', (nome, numero))
conn.commit()
def remove_leito(id_paciente): #luiz h
cursor.execute("DELETE FROM dadosleito WHERE id = ?", (id_paciente))
conn.commit()
def insere_anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou): #luiz h
cursor.execute('INSERT INTO anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou) VALUES (?,?,?,?)',
(id_paciente, onde_doi, o_que_sente, quando_comecou))
conn.commit()
#menus
def menu_atendente():#marianne
clear()
print('\nBem vindo '+usuario_logado+'!\n1- Agendar consulta.\n2- Cancelar agendamento.\n3- Alterar agendamento.\n4- Ver agendamentos.\n5- Cadastrar paciente. \n6-Sair.')
opcao = int(input('Digite a opcao desejada: '))
if opcao == 1:
clear()
id_paciente = input("Digite o ID do paciente: ")
id_medico = input("Digite o ID do medico: ")
data = input("Digite a data da consulta: ")
horario = input("Digite o horario da consulta: ")
insere_agendamentos(id_paciente, id_medico, data, horario)
menu_atendente()
if opcao == 2:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
print(linha)
id_paciente_r = input("Digite o ID do paciente que deseja remover: ")
remove_agendamentos(id_paciente_r)
menu_atendente()
if opcao == 3:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
print(linha)
id_agendamentos = input('\nID agendamento: ')
alteracao_campo = input('Digite o campo de alteracao (id_paciente, id_medico, data, horario): ')
alteracao = input('Digite a alteracao: ')
altera_agendamentos(alteracao_campo, alteracao, id_agendamentos)
menu_atendente()
if opcao == 4:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
paciente_id = linha[1]
medico_id = linha[2]
paciente_nome = ''
medico_nome = ''
medico_profissao = ''
novo_mostrar = cursor.execute('SELECT * FROM paciente WHERE id = ? OR id = ?', (paciente_id, paciente_id))
for nova_linha in novo_mostrar.fetchall():
paciente_nome = nova_linha[1]
novo_mostrar = cursor.execute('SELECT * FROM funcionarios WHERE id = ? OR id = ?', (medico_id, medico_id))
for nova_linha in novo_mostrar.fetchall():
medico_nome = nova_linha[0]
medico_profissao = nova_linha[1]
print(linha[3], 'as', linha[4], paciente_nome, 'tem um consulta agendada com', medico_nome, '(', medico_profissao, ')')
input('Pressione qualquer tecla para continuar')
menu_atendente()
if opcao == 5:
clear()
nome = input('Digite o nome do paciente: ')
idade = input('Digite a idade do paciente: ')
sexo = input('Digite o sexo do paciente: ')
peso = input('Digite o peso do paciente: ')
insere_paciente(nome, idade, sexo, peso)
menu_atendente()
if opcao == 6:
fazerlogin()
else:
clear(), print("Invalido, entre com outro valor\n"), menu_atendente()
def menu_manager():
clear()
print('\nBem vindo '+usuario_logado+'!\n1- Cadastrar funcionario.\n2- Remover funcionario.\n3- Alterar funcionario.\n4- Ver funcionarios cadastrados.\n5- Cadastrar veiculo.\n6- Remover veiculo.\n7- Alterar veiculo.\n8- Ver veiculos cadastrados.\n9- Cadastrar login.\n10- Remover login.\n11- Alterar login\n12- Listar logins\n13- Sair!')
opc = int(input('Digite a opcao desejada: '))
if opc == 1:
nome = input('Digite o nome do funcionario: ')
profissao = input('Digite a profissao do funcionario: ')
matricula = input('Digite a matricula do funcionario: ')
insere_funcionarios(nome, profissao, matricula)
voltar_manager()
return 0
if opc == 2:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
id_funcionario_r = input("\nDigite o ID do funcionario que deseja remover: ")
remove_funcionarios(id_funcionario_r)
voltar_manager()
return 0
if opc == 3:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
id_funcionario = int(input('\nID do funcionario que deseja alterar: '))
alteracao_campo = input('Digite o campo de alteracao (nome, profissao, matricula): ')
alteracao = input('Digite a alteracao: ')
altera_funcionarios(alteracao_campo, alteracao, id_funcionario)
voltar_manager()
return 0
if opc == 4:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 5:
placa = input('Digite a placa do veiculo: ')
status = input('Digite o status do veiculo: ')
motorista = input('Digite o motorista do veiculo: ')
paramedico = input('Digite o paramedico que esta no veiculo: ')
paciente = input('Digite o paciente que será atendido: ')
insere_veiculos(placa, status, motorista, paramedico, paciente)
voltar_manager()
return 0
if opc == 6:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
id_veiculo_r = input("\nDigite o ID do veiculo que deseja remover: ")
remove_veiculos(id_veiculo_r)
voltar_manager()
return 0
if opc == 7:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
id_veiculo = input('\nID do veiculo que deseja alterar: ')
alteracao_campo = input('Digite o campo de alteracao (placa, status, motorista, paramedico, paciente): ')
alteracao = input('Digite a alteracao: ')
altera_veiculos(alteracao_campo, alteracao, id_veiculo)
voltar_manager()
return 0
if opc == 8:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 9:
nome_usuario = input('\nDigite o login a ser cadastrado:')
senha = input('\nDigite sua senha:')
area = input('\nDigite sua profissao:')
cadastro_login(nome_usuario, senha, area)
voltar_manager()
return 0
if opc == 10:
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
id_usuario = input('digite o id do usuario a ser removido: ou "cancelar" para voltar\n')
if(id_usuario != "cancelar"):
remove_login(id_usuario)
voltar_manager()
return 0
if opc == 11:
altera_login()
voltar_manager()
return 0
if opc == 12:
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 13:
fazerlogin()
else:
clear(), print("Invalido, entre com outro valor\n"), menu_manager()
def voltar_manager():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_manager()
else:
return 0
def menu_medico():
clear()
opcao = int(input('\nBem vindo '+usuario_logado+'\nDigite\n1-Para fazer anamnese\n2-Para cadastrar ou remover um leito \n3-Para mudar senha ou login\n4-Para sair\n'))
if opcao == 1:
mostrar = cursor.execute('SELECT * FROM paciente')
for linha in mostrar.fetchall():
print(linha)
id_paciente = input('\nDigite o ID do paciente:\n')
onde_doi = input('\nDigite o local da dor:\n')
o_que_sente = input('\nDigite o que o paciente sente:\n')
quando_comecou = input('\nDigite a data de quando começou:\n')
insere_anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou)
voltar_medico()
return 0
if opcao == 2:
op = int(input('\n1-Cadastrar\n2-Remover\n:'))
if op == 1:
nome = input('Digite o nome do paciente:')
numero = input('Digite o numero do leito:')
cadastra_leito(nome, numero)
voltar_medico()
return 0
if op == 2:
mostrar = cursor.execute('SELECT * FROM dadosleito')
for linha in mostrar.fetchall():
print(linha)
id_paciente = input('id do leito a ser removido:')
remove_leito(id_paciente)
voltar_medico()
return 0
if opcao == 3:
altera_login()
voltar_medico()
return 0
if opcao == 4:
fazerlogin()
else:
print('Numero invalido, digite novamente!\n')
def voltar_medico():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_medico()
else:
return 0
def menu_engbio():# Luiz Eduardo
clear()
print("Bem vindo "+usuario_logado+"!\n\n1-Calibragem de equipamentos.\n2-Cadastrar/Remover equipamento.\n3-Listar/Alterar equipamentos.\n4-Sair!")
opcao = int(input('Digite o numero da opcao desejada=>'))
if opcao == 1:
print("\nQual equipamento deseja calibrar ?")
voltar_engbio()
return 0
if opcao == 2:
print("1-Cadastrar\n2-Remover")
cr = int(input('Digite o numero da opcao desejada=>'))
if cr == 1:
nome = input('Nome:')
funcao = input('Funcao:')
preco = input('Preço:')
status = input('Status:')
data = input('Data de insersao:')
insere_equipamento(nome, funcao, preco, status, data)
print('Cadastrado com sucesso !')
voltar_engbio()
return 0
if cr == 2:
lista_equipamento()
id_equipamento = input('id=')
remove_equipamento(id_equipamento)
print("\nEquipamento removido com sucesso!")
voltar_engbio()
return 0
else:
voltar_engbio()
return 0
if opcao == 3:
lista_equipamento()
resp = input('\nDeseja alterar(sim ou nao)?')
if resp == 'sim':
id_equipamento = input('Id do equipamento:')
novo_nome = input('Digite o nome:')
nova_funcao = input('Digite a funcao:')
novo_preco = input('Digite o preco:')
novo_status = input('Digite o novo status do equipamento:')
nova_data = input('Digite a data atual:')
alterar_equipamento(id_equipamento, novo_nome, nova_funcao, novo_preco, novo_status, nova_data)
print('Alterado com sucesso !')
voltar_engbio()
return 0
else:
voltar_engbio()
return 0
if opcao == 4:
fazerlogin()
else:
clear()
menu_engbio()
def voltar_engbio():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_engbio()
else:
return 0
firstAccess()
| victorhnogueira/esof_sistema_gerencimento_hospitalar | setup.py | setup.py | py | 21,575 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 2,
"usage_type": "call"
}
] |
32559830813 | import jwt
from functools import wraps
from app import request, jsonify, app
from app.use_db.tools import quarry
def token_required(f):
@wraps(f)
def _verify(*args, **kwargs):
auth_headers = request.headers.get('Authorization', '').split()
invalid_msg = {
'message': 'Invalid token. Registeration and / or authentication required',
'authenticated': False
}
expired_msg = {
'message': 'Expired token. Reauthentication required.',
'authenticated': False
}
if len(auth_headers) != 2:
return jsonify(invalid_msg), 401
try:
token = auth_headers[1]
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])
email = data['sub']
email_exist = quarry.call('select exists '
'(select * from person where email_per = %s)', [email], commit=False, fetchall=False)
if email_exist[0] == 0:
raise RuntimeError('User not found')
id_per = quarry.call('select id_per from person where email_per = %s', [email], commit=False, fetchall=False)
return f(id_per[0], *args, **kwargs)
except jwt.ExpiredSignatureError:
return jsonify(expired_msg), 401 # 401 is Unauthorized HTTP status code
except (jwt.InvalidTokenError, Exception) as e:
print(e)
return jsonify(invalid_msg), 401
return _verify
| Baral-Chief-of-Compliance/ice_tracing_software | prototype/v1/backend/authorization/decorator_for_authorization.py | decorator_for_authorization.py | py | 1,506 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.request.headers.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.request.headers",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "app.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "app.json... |
11490438190 | import base64
import io
from PIL import Image
from pyzbar.pyzbar import decode
from requests_ntlm import HttpNtlmAuth
import requests
def get_js(sc, shop):
username = r'WebService'
password = 'web2018'
auth = HttpNtlmAuth(username, password)
strParam = shop + '/' + sc
list_url = r"https://ts.offprice.eu/service_retail/hs/wms_api/getpriceQR/" + strParam
headers = {'Accept': 'application/json;odata=verbose'}
responce = requests.get(list_url, verify=False, auth=auth, headers=headers)
response_json = responce.json()
return response_json
def decode_barcode(my_image):
# decodes all barcodes from an my_image
# bar_class = barcode.ean.EAN13.name
decoded_objects = decode(Image.open(my_image))
# print(decoded_objects)
for obj in decoded_objects:
# draw the barcode
# if obj.type == bar_class.replace("-", ""):
# my_image = draw_barcode(obj, my_image)
# print barcode type & data
# print("Type:", obj.type)
# print("Data:", obj.data.decode("utf-8"))
return obj.data.decode("utf-8")
return 0
def use_barcode(my_image):
decoded_objects = decode_barcode(my_image)
return decoded_objects
def use_barcode_ajax(my_image):
decoded_objects = decode_barcode(my_image)
return decoded_objects
def get_my_code(image_base64, shop):
imgdata = base64.b64decode(str(image_base64))
tempimg = io.BytesIO(imgdata)
datasacan = use_barcode(tempimg)
if datasacan == 0:
return 0
textbar = datasacan
textjson = get_js(textbar, shop)
# Надо чтобы возвращал штрихкод, если не удалось получить по нему данные
if textjson == '[] []':
return 1
# get string with all double quotes
single_quoted_dict_in_string = textjson
desired_double_quoted_dict = str(single_quoted_dict_in_string)
desired_double_quoted_dict = desired_double_quoted_dict.replace("'", "\"")
return desired_double_quoted_dict
| otitarenko/djangoqr | qrapp/decoder.py | decoder.py | py | 2,047 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests_ntlm.HttpNtlmAuth",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyzbar.pyzbar.decode",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.Imag... |
20422548222 | import argparse
import collections
import getpass
import hashlib
import json
import os
import pickle
import requests
import time
import uuid
import urllib.parse
from datetime import datetime, timedelta
from email_validator import validate_email, EmailNotValidError
from pandas import DataFrame, to_datetime
from pytz import timezone
from . import endpoints
class webull :
def __init__(self, region_code=None) :
self._session = requests.session()
self._headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:99.0) Gecko/20100101 Firefox/99.0',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/json',
'platform': 'web',
'hl': 'en',
'os': 'web',
'osv': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:99.0) Gecko/20100101 Firefox/99.0',
'app': 'global',
'appid': 'webull-webapp',
'ver': '3.39.18',
'lzone': 'dc_core_r001',
'ph': 'MacOS Firefox',
'locale': 'eng',
# 'reqid': req_id,
'device-type': 'Web',
'did': self._get_did()
}
#endpoints
self._urls = endpoints.urls()
#sessions
self._account_id = ''
self._trade_token = ''
self._access_token = ''
self._refresh_token = ''
self._token_expire = ''
self._uuid = ''
#miscellaenous
self._did = self._get_did()
self._region_code = region_code or 6
self.zone_var = 'dc_core_r001'
self.timeout = 15
def _get_did(self, path=''):
'''
Makes a unique device id from a random uuid (uuid.uuid4).
if the pickle file doesn't exist, this func will generate a random 32 character hex string
uuid and save it in a pickle file for future use. if the file already exists it will
load the pickle file to reuse the did. Having a unique did appears to be very important
for the MQTT web socket protocol
path: path to did.bin. For example _get_did('cache') will search for cache/did.bin instead.
:return: hex string of a 32 digit uuid
'''
filename = 'did.bin'
if path:
filename = os.path.join(path, filename)
if os.path.exists(filename):
did = pickle.load(open(filename,'rb'))
else:
did = uuid.uuid4().hex
pickle.dump(did, open(filename, 'wb'))
return did
def _set_did(self, did, path=''):
'''
If your starting to use this package after webull's new image verification for login, you'll
need to login from a browser to get your did file in order to login through this api. You can
find your did file by using this link:
https://github.com/tedchou12/webull/wiki/Workaround-for-Login
and then headers tab instead of response head, and finally look for the did value from the
request headers.
Then, you can run this program to save your did into did.bin so that it can be accessed in the
future without the did explicitly being in your code.
path: path to did.bin. For example _get_did('cache') will search for cache/did.bin instead.
'''
filename = 'did.bin'
if path:
filename = os.path.join(path, filename)
pickle.dump(did, open(filename, 'wb'))
return True
def build_req_headers(self, include_trade_token=False, include_time=False, include_zone_var=True):
'''
Build default set of header params
'''
headers = self._headers
req_id = str(uuid.uuid4().hex)
headers['reqid'] = req_id
headers['did'] = self._did
headers['access_token'] = self._access_token
if include_trade_token :
headers['t_token'] = self._trade_token
if include_time :
headers['t_time'] = str(round(time.time() * 1000))
if include_zone_var :
headers['lzone'] = self.zone_var
return headers
def login(self, username='', password='', device_name='', mfa='', question_id='', question_answer='', save_token=False, token_path=None):
'''
Login with email or phone number
phone numbers must be a str in the following form
US '+1-XXXXXXX'
CH '+86-XXXXXXXXXXX'
'''
if not username or not password:
raise ValueError('username or password is empty')
# with webull md5 hash salted
password = ('wl_app-a&b@!423^' + password).encode('utf-8')
md5_hash = hashlib.md5(password)
account_type = self.get_account_type(username)
if device_name == '' :
device_name = 'default_string'
data = {
'account': username,
'accountType': str(account_type),
'deviceId': self._did,
'deviceName': device_name,
'grade': 1,
'pwd': md5_hash.hexdigest(),
'regionId': self._region_code
}
if mfa != '' :
data['extInfo'] = {
'codeAccountType': account_type,
'verificationCode': mfa
}
headers = self.build_req_headers()
else :
headers = self._headers
if question_id != '' and question_answer != '' :
data['accessQuestions'] = '[{"questionId":"' + str(question_id) + '", "answer":"' + str(question_answer) + '"}]'
response = requests.post(self._urls.login(), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'accessToken' in result :
self._access_token = result['accessToken']
self._refresh_token = result['refreshToken']
self._token_expire = result['tokenExpireTime']
self._uuid = result['uuid']
self._account_id = self.get_account_id()
if save_token:
self._save_token(result, token_path)
return result
def get_mfa(self, username='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'codeType': int(5)}
response = requests.post(self._urls.get_mfa(), json=data, headers=self._headers, timeout=self.timeout)
# data = response.json()
if response.status_code == 200 :
return True
else :
return False
def check_mfa(self, username='', mfa='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'code': str(mfa),
'codeType': int(5)}
response = requests.post(self._urls.check_mfa(), json=data, headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def get_security(self, username='') :
account_type = self.get_account_type(username)
username = urllib.parse.quote(username)
# seems like webull has a bug/stability issue here:
time = datetime.now().timestamp() * 1000
response = requests.get(self._urls.get_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 0), headers=self._headers, timeout=self.timeout)
data = response.json()
if len(data) == 0 :
response = requests.get(self._urls.get_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 1), headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def next_security(self, username='') :
account_type = self.get_account_type(username)
username = urllib.parse.quote(username)
# seems like webull has a bug/stability issue here:
time = datetime.now().timestamp() * 1000
response = requests.get(self._urls.next_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 0), headers=self._headers, timeout=self.timeout)
data = response.json()
if len(data) == 0 :
response = requests.get(self._urls.next_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 1), headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def check_security(self, username='', question_id='', question_answer='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'answerList': [{'questionId': str(question_id), 'answer': str(question_answer)}],
'event': 'PRODUCT_LOGIN'}
response = requests.post(self._urls.check_security(), json=data, headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def login_prompt(self):
'''
End login session
'''
uname = input('Enter Webull Username:')
pwd = getpass.getpass('Enter Webull Password:')
self.trade_pin = getpass.getpass('Enter 6 digit Webull Trade PIN:')
self.login(uname, pwd)
return self.get_trade_token(self.trade_pin)
def logout(self):
'''
End login session
'''
headers = self.build_req_headers()
response = requests.get(self._urls.logout(), headers=headers, timeout=self.timeout)
return response.status_code
def api_login(self, access_token='', refresh_token='', token_expire='', uuid='', mfa=''):
self._access_token = access_token
self._refresh_token = refresh_token
self._token_expire = token_expire
self._uuid = uuid
self._account_id = self.get_account_id()
def refresh_login(self, save_token=False, token_path=None):
'''
Refresh login token
'''
headers = self.build_req_headers()
data = {'refreshToken': self._refresh_token}
response = requests.post(self._urls.refresh_login(self._refresh_token), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'accessToken' in result and result['accessToken'] != '' and result['refreshToken'] != '' and result['tokenExpireTime'] != '':
self._access_token = result['accessToken']
self._refresh_token = result['refreshToken']
self._token_expire = result['tokenExpireTime']
self._account_id = self.get_account_id()
if save_token:
result['uuid'] = self._uuid
self._save_token(result, token_path)
return result
def _save_token(self, token=None, path=None):
'''
save login token to webull_credentials.json
'''
filename = 'webull_credentials.json'
if path:
filename = os.path.join(path, filename)
with open(filename, 'wb') as f:
pickle.dump(token, f)
return True
return False
def get_detail(self):
'''
get some contact details of your account name, email/phone, region, avatar...etc
'''
headers = self.build_req_headers()
response = requests.get(self._urls.user(), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_account_id(self, id=0):
'''
get account id
call account id before trade actions
'''
headers = self.build_req_headers()
response = requests.get(self._urls.account_id(), headers=headers, timeout=self.timeout)
result = response.json()
if result['success'] and len(result['data']) > 0 :
self.zone_var = str(result['data'][int(id)]['rzone'])
self._account_id = str(result['data'][int(id)]['secAccountId'])
return self._account_id
else:
return None
def get_account(self):
'''
get important details of account, positions, portfolio stance...etc
'''
headers = self.build_req_headers()
response = requests.get(self._urls.account(self._account_id), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_positions(self):
'''
output standing positions of stocks
'''
data = self.get_account()
return data['positions']
def get_portfolio(self):
'''
output numbers of portfolio
'''
data = self.get_account()
output = {}
for item in data['accountMembers']:
output[item['key']] = item['value']
return output
def get_activities(self, index=1, size=500) :
'''
Activities including transfers, trades and dividends
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {'pageIndex': index,
'pageSize': size}
response = requests.post(self._urls.account_activities(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def get_current_orders(self) :
'''
Get open/standing orders
'''
data = self.get_account()
return data['openOrders']
def get_history_orders(self, status='All', count=20):
'''
Historical orders, can be cancelled or filled
status = Cancelled / Filled / Working / Partially Filled / Pending / Failed / All
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
response = requests.get(self._urls.orders(self._account_id, count) + str(status), headers=headers, timeout=self.timeout)
return response.json()
def get_trade_token(self, password=''):
'''
Trading related
authorize trade, must be done before trade action
'''
headers = self.build_req_headers()
# with webull md5 hash salted
password = ('wl_app-a&b@!423^' + password).encode('utf-8')
md5_hash = hashlib.md5(password)
data = {'pwd': md5_hash.hexdigest()}
response = requests.post(self._urls.trade_token(), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'tradeToken' in result :
self._trade_token = result['tradeToken']
return True
else:
return False
'''
Lookup ticker_id
Ticker issue, will attempt to find an exact match, if none is found, match the first one
'''
def get_ticker(self, stock=''):
headers = self.build_req_headers()
ticker_id = 0
if stock and isinstance(stock, str):
response = requests.get(self._urls.stock_id(stock, self._region_code), headers=headers, timeout=self.timeout)
result = response.json()
if result.get('data') :
for item in result['data'] : # implies multiple tickers, but only assigns last one?
if 'symbol' in item and item['symbol'] == stock :
ticker_id = item['tickerId']
break
elif 'disSymbol' in item and item['disSymbol'] == stock :
ticker_id = item['tickerId']
break
if ticker_id == 0 :
ticker_id = result['data'][0]['tickerId']
else:
raise ValueError('TickerId could not be found for stock {}'.format(stock))
else:
raise ValueError('Stock symbol is required')
return ticker_id
'''
Get stock public info
get price quote
tId: ticker ID str
'''
def get_ticker_info(self, stock=None, tId=None) :
headers = self.build_req_headers()
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock :
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
response = requests.get(self._urls.stock_detail(tId), headers=headers, timeout=self.timeout)
result = response.json()
return result
'''
Get all tickers from a region
region id: https://github.com/tedchou12/webull/wiki/What-is-the-region_id%3F
'''
def get_all_tickers(self, region_code=None) :
headers = self.build_req_headers()
if not region_code :
region_code = self._region_code
response = requests.get(self._urls.get_all_tickers(region_code, region_code), headers=headers, timeout=self.timeout)
result = response.json()
return result
'''
Actions related to stock
'''
def get_quote(self, stock=None, tId=None):
'''
get price quote
tId: ticker ID str
'''
headers = self.build_req_headers()
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock:
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
response = requests.get(self._urls.quotes(tId), headers=headers, timeout=self.timeout)
result = response.json()
return result
def place_order(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True, stpPrice=None, trial_value=0, trial_type='DOLLAR'):
'''
Place an order
price: float (LMT / STP LMT Only)
action: BUY / SELL / SHORT
ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
timeinforce: GTC / DAY / IOC
outsideRegularTradingHour: True / False
stpPrice: float (STP / STP LMT Only)
trial_value: float (STP TRIAL Only)
trial_type: DOLLAR / PERCENTAGE (STP TRIAL Only)
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action,
'comboType': 'NORMAL',
'orderType': orderType,
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': int(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce
}
# Market orders do not support extended hours trading.
if orderType == 'MKT' :
data['outsideRegularTradingHour'] = False
elif orderType == 'LMT':
data['lmtPrice'] = float(price)
elif orderType == 'STP' :
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP LMT' :
data['lmtPrice'] = float(price)
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP TRAIL' :
data['trailingStopStep'] = float(trial_value)
data['trailingType'] = str(trial_type)
response = requests.post(self._urls.place_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def modify_order(self, order=None, order_id=0, stock=None, tId=None, price=0, action=None, orderType=None, enforce=None, quant=0, outsideRegularTradingHour=None):
'''
Modify an order
order_id: order_id
action: BUY / SELL
ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
timeinforce: GTC / DAY / IOC
outsideRegularTradingHour: True / False
'''
if not order and not order_id:
raise ValueError('Must provide an order or order_id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
modifiedAction = action or order['action']
modifiedLmtPrice = float(price or order['lmtPrice'])
modifiedOrderType = orderType or order['orderType']
modifiedOutsideRegularTradingHour = outsideRegularTradingHour if type(outsideRegularTradingHour) == bool else order['outsideRegularTradingHour']
modifiedEnforce = enforce or order['timeInForce']
modifiedQuant = int(quant or order['quantity'])
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else :
tId = order['ticker']['tickerId']
order_id = order_id or order['orderId']
data = {
'action': modifiedAction,
'lmtPrice': modifiedLmtPrice,
'orderType': modifiedOrderType,
'quantity': modifiedQuant,
'comboType': 'NORMAL',
'outsideRegularTradingHour': modifiedOutsideRegularTradingHour,
'serialId': str(uuid.uuid4()),
'orderId': order_id,
'tickerId': tId,
'timeInForce': modifiedEnforce
}
#Market orders do not support extended hours trading.
if data['orderType'] == 'MKT':
data['outsideRegularTradingHour'] = False
response = requests.post(self._urls.modify_order(self._account_id, order_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def cancel_order(self, order_id=''):
'''
Cancel an order
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {}
response = requests.post(self._urls.cancel_order(self._account_id) + str(order_id) + '/' + str(uuid.uuid4()), json=data, headers=headers, timeout=self.timeout)
result = response.json()
return result['success']
def place_order_otoco(self, stock='', price='', stop_loss_price='', limit_profit_price='', time_in_force='DAY', quant=0) :
'''
OTOCO: One-triggers-a-one-cancels-the-others, aka Bracket Ordering
Submit a buy order, its fill will trigger sell order placement. If one sell fills, it will cancel the other
sell
'''
headers = self.build_req_headers(include_trade_token=False, include_time=True)
data1 = {
'newOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER'},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS'},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT'}
]
}
response1 = requests.post(self._urls.check_otoco_orders(self._account_id), json=data1, headers=headers, timeout=self.timeout)
result1 = response1.json()
if result1['forward'] :
data2 = {'newOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER', 'serialId': str(uuid.uuid4())},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS', 'serialId': str(uuid.uuid4())},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT', 'serialId': str(uuid.uuid4())}],
'serialId': str(uuid.uuid4())
}
response2 = requests.post(self._urls.place_otoco_orders(self._account_id), json=data2, headers=headers, timeout=self.timeout)
# print('Resp 2: {}'.format(response2))
return response2.json()
else:
print(result1['checkResultList'][0]['code'])
print(result1['checkResultList'][0]['msg'])
return False
def modify_order_otoco(self, order_id1='', order_id2='', order_id3='', stock='', price='', stop_loss_price='', limit_profit_price='', time_in_force='DAY', quant=0) :
'''
OTOCO: One-triggers-a-one-cancels-the-others, aka Bracket Ordering
Submit a buy order, its fill will trigger sell order placement. If one sell fills, it will cancel the other
sell
'''
headers = self.build_req_headers(include_trade_token=False, include_time=True)
data = {'modifyOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id1),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER', 'serialId': str(uuid.uuid4())},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id2),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS', 'serialId': str(uuid.uuid4())},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id3),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT', 'serialId': str(uuid.uuid4())}],
'serialId': str(uuid.uuid4())
}
response = requests.post(self._urls.modify_otoco_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
# print('Resp: {}'.format(response))
return response.json()
def cancel_order_otoco(self, combo_id=''):
'''
Retract an otoco order. Cancelling the MASTER order_id cancels the sub orders.
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
# data = { 'serialId': str(uuid.uuid4()), 'cancelOrders': [str(order_id)]}
data = {}
response = requests.post(self._urls.cancel_otoco_orders(self._account_id, combo_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
'''
Actions related to cryptos
'''
def place_order_crypto(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='DAY', entrust_type='QTY', quant=0, outsideRegularTradingHour=False) :
'''
Place Crypto order
price: Limit order entry price
quant: dollar amount to buy/sell when entrust_type is CASH else the decimal or fractional amount of shares to buy
action: BUY / SELL
entrust_type: CASH / QTY
ordertype : LMT / MKT
timeinforce: DAY
outsideRegularTradingHour: True / False
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action,
'assetType': 'crypto',
'comboType': 'NORMAL',
'entrustType': entrust_type,
'lmtPrice': str(price),
'orderType': orderType,
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': str(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce
}
response = requests.post(self._urls.place_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
'''
Actions related to options
'''
def get_option_quote(self, stock=None, tId=None, optionId=None):
'''
get option quote
'''
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock:
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
headers = self.build_req_headers()
params = {'tickerId': tId, 'derivativeIds': optionId}
return requests.get(self._urls.option_quotes(), params=params, headers=headers, timeout=self.timeout).json()
def get_options_expiration_dates(self, stock=None, count=-1):
'''
returns a list of options expiration dates
'''
headers = self.build_req_headers()
data = {'count': count,
'direction': 'all',
'tickerId': self.get_ticker(stock)}
res = requests.post(self._urls.options_exp_dat_new(), json=data, headers=headers, timeout=self.timeout).json()
r_data = []
for entry in res['expireDateList'] :
r_data.append(entry['from'])
# return requests.get(self._urls.options_exp_date(self.get_ticker(stock)), params=data, headers=headers, timeout=self.timeout).json()['expireDateList']
return r_data
def get_options(self, stock=None, count=-1, includeWeekly=1, direction='all', expireDate=None, queryAll=0):
'''
get options and returns a dict of options contracts
params:
stock: symbol
count: -1
includeWeekly: 0 or 1 (deprecated)
direction: all, call, put
expireDate: contract expire date
queryAll: 0 (deprecated)
'''
headers = self.build_req_headers()
# get next closet expiredate if none is provided
if not expireDate:
dates = self.get_options_expiration_dates(stock)
# ensure we don't provide an option that has < 1 day to expire
for d in dates:
if d['days'] > 0:
expireDate = d['date']
break
data = {'count': count,
'direction': direction,
'tickerId': self.get_ticker(stock)}
res = requests.post(self._urls.options_exp_dat_new(), json=data, headers=headers, timeout=self.timeout).json()
t_data = []
for entry in res['expireDateList'] :
if str(entry['from']['date']) == expireDate :
t_data = entry['data']
r_data = {}
for entry in t_data :
if entry['strikePrice'] not in r_data :
r_data[entry['strikePrice']] = {}
r_data[entry['strikePrice']][entry['direction']] = entry
r_data = dict(sorted(r_data.items()))
rr_data = []
for s_price in r_data :
rr_entry = {'strikePrice': s_price}
if 'call' in r_data[s_price] :
rr_entry['call'] = r_data[s_price]['call']
if 'put' in r_data[s_price] :
rr_entry['put'] = r_data[s_price]['put']
rr_data.append(rr_entry)
return rr_data
#deprecated 22/05/01
# params = {'count': count,
# 'includeWeekly': includeWeekly,
# 'direction': direction,
# 'expireDate': expireDate,
# 'unSymbol': stock,
# 'queryAll': queryAll}
#
# data = requests.get(self._urls.options(self.get_ticker(stock)), params=params, headers=headers, timeout=self.timeout).json()
#
# return data['data']
def get_options_by_strike_and_expire_date(self, stock=None, expireDate=None, strike=None, direction='all'):
'''
get a list of options contracts by expire date and strike price
strike: string
'''
opts = self.get_options(stock=stock, expireDate=expireDate, direction=direction)
return [c for c in opts if c['strikePrice'] == strike]
def place_order_option(self, optionId=None, lmtPrice=None, stpPrice=None, action=None, orderType='LMT', enforce='DAY', quant=0):
'''
create buy / sell order
stock: string
lmtPrice: float
stpPrice: float
action: string BUY / SELL
optionId: string
orderType: MKT / LMT / STP / STP LMT
enforce: GTC / DAY
quant: int
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'orderType': orderType,
'serialId': str(uuid.uuid4()),
'timeInForce': enforce,
'orders': [{'quantity': int(quant), 'action': action, 'tickerId': int(optionId), 'tickerType': 'OPTION'}],
}
if orderType == 'LMT' and lmtPrice :
data['lmtPrice'] = float(lmtPrice)
elif orderType == 'STP' and stpPrice :
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP LMT' and lmtPrice and stpPrice :
data['lmtPrice'] = float(lmtPrice)
data['auxPrice'] = float(stpPrice)
response = requests.post(self._urls.place_option_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('place_option_order failed', response.status_code, response.reason)
return response.json()
def modify_order_option(self, order=None, lmtPrice=None, stpPrice=None, enforce=None, quant=0):
'''
order: dict from get_current_orders
stpPrice: float
lmtPrice: float
enforce: GTC / DAY
quant: int
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'comboId': order['comboId'],
'orderType': order['orderType'],
'timeInForce': enforce or order['timeInForce'],
'serialId': str(uuid.uuid4()),
'orders': [{'quantity': quant or order['totalQuantity'],
'action': order['action'],
'tickerId': order['ticker']['tickerId'],
'tickerType': 'OPTION',
'orderId': order['orderId']}]
}
if order['orderType'] == 'LMT' and (lmtPrice or order.get('lmtPrice')):
data['lmtPrice'] = lmtPrice or order['lmtPrice']
elif order['orderType'] == 'STP' and (stpPrice or order.get('auxPrice')):
data['auxPrice'] = stpPrice or order['auxPrice']
elif order['orderType'] == 'STP LMT' and (stpPrice or order.get('auxPrice')) and (lmtPrice or order.get('lmtPrice')):
data['auxPrice'] = stpPrice or order['auxPrice']
data['lmtPrice'] = lmtPrice or order['lmtPrice']
response = requests.post(self._urls.replace_option_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('replace_option_order failed', response.status_code, response.reason)
return True
def cancel_all_orders(self):
'''
Cancels all open (aka 'working') orders
'''
open_orders = self.get_current_orders()
for order in open_orders:
self.cancel_order(order['orderId'])
def get_tradable(self, stock='') :
'''
get if stock is tradable
'''
headers = self.build_req_headers()
response = requests.get(self._urls.is_tradable(self.get_ticker(stock)), headers=headers, timeout=self.timeout)
return response.json()
def alerts_list(self) :
'''
Get alerts
'''
headers = self.build_req_headers()
response = requests.get(self._urls.list_alerts(), headers=headers, timeout=self.timeout)
result = response.json()
if 'data' in result:
return result.get('data', [])
else:
return None
def alerts_remove(self, alert=None, priceAlert=True, smartAlert=True):
'''
remove alert
alert is retrieved from alert_list
'''
headers = self.build_req_headers()
if alert.get('tickerWarning') and priceAlert:
alert['tickerWarning']['remove'] = True
alert['warningInput'] = alert['tickerWarning']
if alert.get('eventWarning') and smartAlert:
alert['eventWarning']['remove'] = True
for rule in alert['eventWarning']['rules']:
rule['active'] = 'off'
alert['eventWarningInput'] = alert['eventWarning']
response = requests.post(self._urls.remove_alert(), json=alert, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('alerts_remove failed', response.status_code, response.reason)
return True
def alerts_add(self, stock=None, frequency=1, interval=1, priceRules=[], smartRules=[]):
'''
add price/percent/volume alert
frequency: 1 is once a day, 2 is once a minute
interval: 1 is once, 0 is repeating
priceRules: list of dicts with below attributes per alert
field: price , percent , volume
type: price (above/below), percent (above/below), volume (vol in thousands)
value: price, percent, volume amount
remark: comment
rules example:
priceRules = [{'field': 'price', 'type': 'above', 'value': '900.00', 'remark': 'above'}, {'field': 'price', 'type': 'below',
'value': '900.00', 'remark': 'below'}]
smartRules = [{'type':'earnPre','active':'on'},{'type':'fastUp','active':'on'},{'type':'fastDown','active':'on'},
{'type':'week52Up','active':'on'},{'type':'week52Down','active':'on'},{'type':'day5Down','active':'on'}]
'''
headers = self.build_req_headers()
rule_keys = ['value', 'field', 'remark', 'type', 'active']
for line, rule in enumerate(priceRules, start=1):
for key in rule:
if key not in rule_keys:
raise Exception('malformed price alert priceRules found.')
rule['alertRuleKey'] = line
rule['active'] = 'on'
alert_keys = ['earnPre', 'fastUp', 'fastDown', 'week52Up', 'week52Down', 'day5Up', 'day10Up', 'day20Up', 'day5Down', 'day10Down', 'day20Down']
for rule in smartRules:
if rule['type'] not in alert_keys:
raise Exception('malformed smart alert smartRules found.')
try:
stock_data = self.get_tradable(stock)['data'][0]
data = {
'regionId': stock_data['regionId'],
'tickerType': stock_data['type'],
'tickerId': stock_data['tickerId'],
'tickerSymbol': stock,
'disSymbol': stock,
'tinyName': stock_data['name'],
'tickerName': stock_data['name'],
'exchangeCode': stock_data['exchangeCode'],
'showCode': stock_data['disExchangeCode'],
'disExchangeCode': stock_data['disExchangeCode'],
'eventWarningInput': {
'tickerId': stock_data['tickerId'],
'rules': smartRules,
'remove': False,
'del': False
},
'warningInput': {
'warningFrequency': frequency,
'warningInterval': interval,
'rules': priceRules,
'tickerId': stock_data['tickerId']
}
}
except Exception as e:
print(f'failed to build alerts_add payload data. error: {e}')
response = requests.post(self._urls.add_alert(), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('alerts_add failed', response.status_code, response.reason)
return True
def active_gainer_loser(self, direction='gainer', rank_type='afterMarket', count=50) :
'''
gets gainer / loser / active stocks sorted by change
direction: gainer / loser / active
rank_type: preMarket / afterMarket / 5min / 1d / 5d / 1m / 3m / 52w (gainer/loser)
volume / turnoverRatio / range (active)
'''
headers = self.build_req_headers()
response = requests.get(self._urls.active_gainers_losers(direction, self._region_code, rank_type, count), headers=headers, timeout=self.timeout)
result = response.json()
return result
def run_screener(self, region=None, price_lte=None, price_gte=None, pct_chg_gte=None, pct_chg_lte=None, sort=None,
sort_dir=None, vol_lte=None, vol_gte=None):
'''
Notice the fact that endpoints are reversed on lte and gte, but this function makes it work correctly
Also screeners are not sent by name, just the parameters are sent
example: run_screener( price_lte=.10, price_gte=5, pct_chg_lte=.035, pct_chg_gte=.51)
just a start, add more as you need it
'''
jdict = collections.defaultdict(dict)
jdict['fetch'] = 200
jdict['rules'] = collections.defaultdict(str)
jdict['sort'] = collections.defaultdict(str)
jdict['attach'] = {'hkexPrivilege': 'true'} #unknown meaning, was in network trace
jdict['rules']['wlas.screener.rule.region'] = 'securities.region.name.6'
if not price_lte is None and not price_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.price'] = 'gte=' + str(price_lte) + '<e=' + str(price_gte)
if not vol_lte is None and not vol_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.volume'] = 'gte=' + str(vol_lte) + '<e=' + str(vol_gte)
if not pct_chg_lte is None and not pct_chg_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.changeRatio'] = 'gte=' + str(pct_chg_lte) + '<e=' + str(pct_chg_gte)
if sort is None:
jdict['sort']['rule'] = 'wlas.screener.rule.price'
if sort_dir is None:
jdict['sort']['desc'] = 'true'
# jdict = self._ddict2dict(jdict)
response = requests.post(self._urls.screener(), json=jdict, timeout=self.timeout)
result = response.json()
return result
def get_analysis(self, stock=None):
'''
get analysis info and returns a dict of analysis ratings
'''
headers = self.build_req_headers()
return requests.get(self._urls.analysis(self.get_ticker(stock)), headers=headers, timeout=self.timeout).json()
def get_capital_flow(self, stock=None, tId=None, show_hist=True):
'''
get capital flow
:param stock:
:param tId:
:param show_hist:
:return: list of capital flow
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_capital_flow(tId, show_hist), headers=headers, timeout=self.timeout).json()
def get_etf_holding(self, stock=None, tId=None, has_num=0, count=50):
'''
get ETF holdings by stock
:param stock:
:param tId:
:param has_num:
:param count:
:return: list of ETF holdings
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_etf_holding(tId, has_num, count), headers=headers, timeout=self.timeout).json()
def get_institutional_holding(self, stock=None, tId=None):
'''
get institutional holdings
:param stock:
:param tId:
:return: list of institutional holdings
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_institutional_holding(tId), headers=headers, timeout=self.timeout).json()
def get_short_interest(self, stock=None, tId=None):
'''
get short interest
:param stock:
:param tId:
:return: list of short interest
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_shortinterest(tId), headers=headers, timeout=self.timeout).json()
def get_financials(self, stock=None):
'''
get financials info and returns a dict of financial info
'''
headers = self.build_req_headers()
return requests.get(self._urls.fundamentals(self.get_ticker(stock)), headers=headers, timeout=self.timeout).json()
def get_news(self, stock=None, tId=None, Id=0, items=20):
'''
get news and returns a list of articles
params:
Id: 0 is latest news article
items: number of articles to return
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.news(tId, Id, items), headers=headers, timeout=self.timeout).json()
def get_bars(self, stock=None, tId=None, interval='m1', count=1, extendTrading=0, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
interval: m1, m5, m15, m30, h1, h2, h4, d1, w1
count: number of bars to return
extendTrading: change to 1 for pre-market and afterhours bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
if timeStamp is None:
# if not set, default to current time
timeStamp = int(time.time())
params = {'extendTrading': extendTrading}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(
self._urls.bars(tId, interval, count, timeStamp),
params=params,
headers=headers,
timeout=self.timeout,
)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data']:
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_bars_crypto(self, stock=None, tId=None, interval='m1', count=1, extendTrading=0, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
interval: m1, m5, m15, m30, h1, h2, h4, d1, w1
count: number of bars to return
extendTrading: change to 1 for pre-market and afterhours bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
params = {'type': interval, 'count': count, 'extendTrading': extendTrading, 'timestamp': timeStamp}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(self._urls.bars_crypto(tId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data']:
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_options_bars(self, derivativeId=None, interval='1m', count=1, direction=1, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
derivativeId: to be obtained from option chain, eg option_chain[0]['call']['tickerId']
interval: 1m, 5m, 30m, 60m, 1d
count: number of bars to return
direction: 1 ignores {count} parameter & returns all bars on and after timestamp
setting any other value will ignore timestamp & return latest {count} bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if derivativeId is None:
raise ValueError('Must provide a derivative ID')
params = {'type': interval, 'count': count, 'direction': direction, 'timestamp': timeStamp}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(self._urls.options_bars(derivativeId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data'] :
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_chart_data(self, stock=None, tId=None, ma=5, timestamp=None):
bars = self.get_bars(stock=stock, tId=tId, interval='d1', count=1200, timestamp=timestamp)
ma_data = bars['close'].rolling(ma).mean()
return ma_data.dropna()
def get_calendar(self, stock=None, tId=None):
'''
There doesn't seem to be a way to get the times the market is open outside of the charts.
So, best way to tell if the market is open is to pass in a popular stock like AAPL then
and see the open and close hours as would be marked on the chart
and see if the last trade date is the same day as today's date
:param stock:
:param tId:
:return: dict of 'market open', 'market close', 'last trade date'
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
params = {'type': 'm1', 'count': 1, 'extendTrading': 0}
response = requests.get(self._urls.bars(tId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
last_trade_date = datetime.fromtimestamp(int(result[0]['data'][0].split(',')[0])).astimezone(time_zone)
today = datetime.today().astimezone() #use no time zone to have it pull in local time zone
if last_trade_date.date() < today.date():
# don't know what today's open and close times are, since no trade for today yet
return {'market open': None, 'market close': None, 'trading day': False}
for d in result[0]['dates']:
if d['type'] == 'T':
market_open = today.replace(
hour=int(d['start'].split(':')[0]),
minute=int(d['start'].split(':')[1]),
second=0)
market_open -= timedelta(microseconds=market_open.microsecond)
market_open = market_open.astimezone(time_zone) #set to market timezone
market_close = today.replace(
hour=int(d['end'].split(':')[0]),
minute=int(d['end'].split(':')[1]),
second=0)
market_close -= timedelta(microseconds=market_close.microsecond)
market_close = market_close.astimezone(time_zone) #set to market timezone
#this implies that we have waited a few minutes from the open before trading
return {'market open': market_open , 'market close':market_close, 'trading day':True}
#otherwise
return None
def get_dividends(self):
''' Return account's incoming dividend info '''
headers = self.build_req_headers()
data = {}
response = requests.post(self._urls.dividends(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def get_five_min_ranking(self, extendTrading=0):
'''
get 5 minute trend ranking
'''
rank = []
headers = self.build_req_headers()
params = {'regionId': self._region_code, 'userRegionId': self._region_code, 'platform': 'pc', 'limitCards': 'latestActivityPc'}
response = requests.get(self._urls.rankings(), params=params, headers=headers, timeout=self.timeout)
result = response.json()[0].get('data')
if extendTrading:
for data in result:
if data['id'] == 'latestActivityPc.faList':
rank = data['data']
else:
for data in result:
if data['id'] == 'latestActivityPc.5minutes':
rank = data['data']
return rank
def get_watchlists(self, as_list_symbols=False) :
"""
get user watchlists
"""
headers = self.build_req_headers()
params = {'version': 0}
response = requests.get(self._urls.portfolio_lists(), params=params, headers=headers, timeout=self.timeout)
if not as_list_symbols :
return response.json()['portfolioList']
else:
list_ticker = response.json()['portfolioList'][0].get('tickerList')
return list(map(lambda x: x.get('symbol'), list_ticker))
def get_account_type(self, username='') :
try:
validate_email(username)
account_type = 2 # email
except EmailNotValidError as _e:
account_type = 1 # phone
return account_type
def is_logged_in(self):
'''
Check if login session is active
'''
try:
self.get_account_id()
except KeyError:
return False
else:
return True
def get_press_releases(self, stock=None, tId=None, typeIds=None, num=50):
'''
gets press releases, useful for getting past earning reports
typeIds: None (all) or comma-separated string of the following: '101' (financials) / '104' (insiders)
it's possible they add more announcment types in the future, so check the 'announcementTypes'
field on the response to verify you have the typeId you want
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers()
response = requests.get(self._urls.press_releases(tId, typeIds, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_calendar_events(self, event, start_date=None, page=1, num=50):
'''
gets calendar events
event: 'earnings' / 'dividend' / 'splits'
start_date: in `YYYY-MM-DD` format, today if None
'''
if start_date is None:
start_date = datetime.today().strftime('%Y-%m-%d')
headers = self.build_req_headers()
response = requests.get(self._urls.calendar_events(event, self._region_code, start_date, page, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
''' Paper support '''
class paper_webull(webull):
def __init__(self):
super().__init__()
def get_account(self):
''' Get important details of paper account '''
headers = self.build_req_headers()
response = requests.get(self._urls.paper_account(self._account_id), headers=headers, timeout=self.timeout)
return response.json()
def get_account_id(self):
''' Get paper account id: call this before paper account actions'''
headers = self.build_req_headers()
response = requests.get(self._urls.paper_account_id(), headers=headers, timeout=self.timeout)
result = response.json()
if result is not None and len(result) > 0 and 'id' in result[0]:
id = result[0]['id']
self._account_id = id
return id
else:
return None
def get_current_orders(self):
''' Open paper trading orders '''
return self.get_account()['openOrders']
def get_history_orders(self, status='Cancelled', count=20):
headers = self.build_req_headers(include_trade_token=True, include_time=True)
response = requests.get(self._urls.paper_orders(self._account_id, count) + str(status), headers=headers, timeout=self.timeout)
return response.json()
def get_positions(self):
''' Current positions in paper trading account. '''
return self.get_account()['positions']
def place_order(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True):
''' Place a paper account order. '''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action, # BUY or SELL
'lmtPrice': float(price),
'orderType': orderType, # 'LMT','MKT'
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': int(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce # GTC or DAY
}
#Market orders do not support extended hours trading.
if orderType == 'MKT':
data['outsideRegularTradingHour'] = False
response = requests.post(self._urls.paper_place_order(self._account_id, tId), json=data, headers=headers, timeout=self.timeout)
return response.json()
def modify_order(self, order, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True):
''' Modify a paper account order. '''
headers = self.build_req_headers()
data = {
'action': action, # BUY or SELL
'lmtPrice': float(price),
'orderType':orderType,
'comboType': 'NORMAL', # 'LMT','MKT'
'outsideRegularTradingHour': outsideRegularTradingHour,
'serialId': str(uuid.uuid4()),
'tickerId': order['ticker']['tickerId'],
'timeInForce': enforce # GTC or DAY
}
if quant == 0 or quant == order['totalQuantity']:
data['quantity'] = order['totalQuantity']
else:
data['quantity'] = int(quant)
response = requests.post(self._urls.paper_modify_order(self._account_id, order['orderId']), json=data, headers=headers, timeout=self.timeout)
if response:
return True
else:
print("Modify didn't succeed. {} {}".format(response, response.json()))
return False
def cancel_order(self, order_id):
''' Cancel a paper account order. '''
headers = self.build_req_headers()
response = requests.post(self._urls.paper_cancel_order(self._account_id, order_id), headers=headers, timeout=self.timeout)
return bool(response)
def get_social_posts(self, topic, num=100):
headers = self.build_req_headers()
response = requests.get(self._urls.social_posts(topic, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_social_home(self, topic, num=100):
headers = self.build_req_headers()
response = requests.get(self._urls.social_home(topic, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Interface with Webull. Paper trading is not the default.')
parser.add_argument('-p', '--use-paper', help='Use paper account instead.', action='store_true')
args = parser.parse_args()
if args.use_paper:
wb = paper_webull()
else:
wb = webull()
| tedchou12/webull | webull/webull.py | webull.py | py | 63,799 | python | en | code | 576 | github-code | 36 | [
{
"api_name": "requests.session",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line... |
37055431378 | import asyncio
import ciberedev
# creating our client instance
client = ciberedev.Client()
async def main():
# starting our client with a context manager
async with client:
# taking our screenshot
screnshot = await client.take_screenshot("www.google.com")
# printing the screenshots url
print(screnshot.url)
# saving the screenshot to a file
await screnshot.save("test.png")
# checking if this file is the one that was run
if __name__ == "__main__":
# if so, run the main function
asyncio.run(main())
| cibere/ciberedev.py | examples/take_screenshot.py | take_screenshot.py | py | 572 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ciberedev.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "asyncio.run",
"line_number": 24,
"usage_type": "call"
}
] |
74062234985 | import pytest
from fauxcaml.semantics.check import Checker
from fauxcaml.semantics.typ import *
from fauxcaml.semantics.unifier_set import UnificationError
def test_concrete_atom_unification():
checker = Checker()
checker.unify(Int, Int)
def test_concrete_poly_unification():
checker = Checker()
checker.unify(Tuple(Int, Bool), Tuple(Int, Bool))
def test_var_unification():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
assert not checker.unifiers.same_set(T, U)
checker.unify(T, U)
assert checker.unifiers.same_set(T, U)
checker.unify(T, Bool)
assert checker.unifiers.same_set(T, Bool)
assert checker.unifiers.same_set(U, Bool)
def test_var_more_unification():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
checker.unify(Tuple(T, Bool), Tuple(Int, U))
assert checker.unifiers.same_set(T, Int)
assert checker.unifiers.same_set(U, Bool)
def test_unification_error():
checker = Checker()
T = checker.fresh_var()
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Tuple(T, T))
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Tuple(Bool))
with pytest.raises(UnificationError):
checker.unify(Tuple(Bool, Int), Fn(Bool, Int))
def test_basic_generic_non_generic_unification():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
checker.unify(generic, non_generic)
assert checker.is_non_generic(generic)
def test_basic_generic_non_generic_unification_reversed():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
checker.unify(non_generic, generic)
assert checker.is_non_generic(generic)
def test_complex_generic_non_generic_unification():
checker = Checker()
generic = checker.fresh_var()
non_generic = checker.fresh_var(non_generic=True)
t = Tuple(generic)
checker.unify(non_generic, t)
assert checker.is_non_generic(generic)
def test_concretize():
checker = Checker()
T = checker.fresh_var()
U = checker.fresh_var()
tup = Tuple(T, Fn(U, Int))
checker.unify(T, List(Bool))
checker.unify(U, T)
concrete = checker.concretize(tup)
assert concrete == Tuple(List(Bool), Fn(List(Bool), Int))
| eignnx/fauxcaml | fauxcaml/tests/test_unification.py | test_unification.py | py | 2,419 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "fauxcaml.semantics.check.Checker",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fauxcaml.semantics.check.Checker",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fauxcaml.semantics.check.Checker",
"line_number": 19,
"usage_type": "cal... |
483706012 | import hashlib
import json
import os
import struct
import sys
import textwrap
from fnmatch import fnmatch
from pathlib import Path
from typing import Dict, List, Union
import cryptography
from cryptography.fernet import Fernet
if sys.version_info < (3, 8):
TypedDict = dict
else:
from typing import TypedDict
__version__ = "0.1.0"
#
# Helpers
#
def md5_hash_for_file(filepath):
return hashlib.md5(open(filepath, "rb").read()).hexdigest()
def encrypt(key: str, fin: Union[str, Path], fout: Union[str, Path], *, block=1 << 16):
"""
Encrypts a file in chunks to support large file sizes.
:param key: The key to use for encryption
:param fin: The file to encrypt
:param fout: The encrypted file to write to
"""
fernet = cryptography.fernet.Fernet(key)
with open(fin, "rb") as fi, open(fout, "wb") as fo:
while True:
chunk = fi.read(block)
if len(chunk) == 0:
break
enc = fernet.encrypt(chunk)
fo.write(struct.pack("<I", len(enc)))
fo.write(enc)
if len(chunk) < block:
break
def decrypt(key: str, fin: Union[str, Path], fout: Union[str, Path]):
"""
Decrypts a file in chunks to support large file sizes.
:param key: The key to use for decryption
:param fin: The encrypted file to decrypt
:param fout: The decrypted file to write to
"""
fernet = cryptography.fernet.Fernet(key)
with open(fin, "rb") as fi, open(fout, "wb") as fo:
while True:
size_data = fi.read(4)
if len(size_data) == 0:
break
chunk = fi.read(struct.unpack("<I", size_data)[0])
dec = fernet.decrypt(chunk)
fo.write(dec)
class VaultManifest(TypedDict):
"""
A VaultManifest is a dictionary of files and their hashes.
"""
# Used as a notice to indicate the file is machien generated
_: str
# The version of the manifest, used for backwards compatibility
version: str
# The list of file hashes in the vault
files: Dict[str, str]
class VaultChangeSet(TypedDict):
total: int
additions: List[str]
deletions: List[str]
updates: List[str]
unchanged: List[str]
#
# DataVault
#
class DataVault:
VERSION = 1
MANIFEST_FILENAME = "vault_manifest.json"
ENCRYPTED_NAMESPACE = ".encrypted"
@staticmethod
def find_all(path: Union[str, Path]) -> List["DataVault"]:
"""
Returns a list of all vaults in the given path.
"""
# Search path for vault manifests
manifest_paths = [
path
for path in Path(path).rglob(
f"{DataVault.ENCRYPTED_NAMESPACE}/{DataVault.MANIFEST_FILENAME}"
)
if DataVault._verify_manifest(path)
]
vault_dirs = [Path(path).parent.parent for path in manifest_paths]
vaults = [DataVault(path) for path in sorted(vault_dirs)]
return vaults
@staticmethod
def _verify_manifest(vault_manifest_path: Union[str, Path]) -> bool:
"""
Verifies that the vault manifest is valid.
"""
try:
with open(vault_manifest_path, "r") as f:
manifest = json.load(f)
except Exception as e:
return False
if not isinstance(manifest.get("_"), str):
return False
if not isinstance(manifest.get("files"), dict):
return False
return manifest.get("version") == DataVault.VERSION
@staticmethod
def generate_secret() -> str:
"""
Generates a fresh vault key. Keep this some place safe! If you lose it
you'll no longer be able to decrypt vaults; if anyone else gains
access to it, they'll be able to decrypt all of your messages, and
they'll also be able forge arbitrary messages that will be
authenticated and decrypted.
Uses Fernet to generate a key. See:
https://cryptography.io/en/latest/fernet/
"""
return Fernet.generate_key().decode("utf-8")
def __init__(self, path: Union[str, Path]):
self.root_path = Path(path)
self.encrypted_path = self.root_path / DataVault.ENCRYPTED_NAMESPACE
self.vault_manifest_path = self.encrypted_path / DataVault.MANIFEST_FILENAME
def create(self) -> str:
"""
Creates the file paths for a new vault with an empty manifest.
This method will not work if there are already files in the
vaults standard paths.
"""
# Create vault storage paths
self.root_path.mkdir(exist_ok=False)
self.encrypted_path.mkdir(exist_ok=False)
self._create_gitignore()
self._reset_manifest()
self._verify_or_explode()
def encrypt(self, secret_key: str) -> None:
"""
Encrypts all decrypted files in the data vault that have changed
since the last encryption.
"""
self._create_gitignore() # Just in case
self._verify_or_explode()
changes = self.changes()
for f in changes["additions"]:
encrypt(secret_key, self.root_path / f, self.encrypted_path / f)
for f in changes["updates"]:
os.remove(os.path.join(self.encrypted_path, f))
encrypt(secret_key, self.root_path / f, self.encrypted_path / f)
for f in changes["deletions"]:
os.remove(os.path.join(self.encrypted_path, f))
# Write the new manifest
with open(self.vault_manifest_path, "w") as f:
json.dump(self._next_manifest(), f, indent=2)
def decrypt(self, secret_key: str) -> None:
"""
Decrypts all the encrypted files in the data vault.
"""
self._create_gitignore() # Just in case
self._verify_or_explode()
# Delete all decrypted files
for f in self.files():
os.remove(os.path.join(self.root_path, f))
for f in self.encrypted_files():
decrypt(secret_key, self.encrypted_path / f, self.root_path / f)
def verify(self) -> bool:
"""
Returns True if a valid vault exists for the given path.
"""
try:
self._verify_or_explode()
return True
except:
return False
def files(self) -> List[str]:
"""
Returns a list of all files in the vault recursively.
"""
files = []
# Enumerate all files skippping the ones in the encrypted
# directory
for f in os.listdir(self.root_path):
# Skip the encrypted directory
if f in (DataVault.ENCRYPTED_NAMESPACE, ".gitignore"):
continue
# Walk all other directories
elif os.path.isdir(os.path.join(self.root_path, f)):
for dp, dn, filenames in os.walk("."):
for f in filenames:
if os.path.splitext(f)[1]:
# files.append(os.path.join(dp, f))
files.append(
f"{Path(os.path.join(dp, f)).relative_to(self.encrypted_path)}"
)
# Append other files
else:
files.append(f)
# Collect gitignore files
ignore_files = []
if (Path.home() / ".gitignore").exists():
with open(Path.home() / ".gitignore", "r") as f:
ignore_files.append(f.read())
if (Path.cwd() / ".gitignore").exists():
with open(Path.cwd() / ".gitignore", "r") as f:
ignore_files.append(f.read())
# Filter out ignored files
return [
n for n in files if not any(fnmatch(n, ignore) for ignore in ignore_files)
]
def encrypted_files(self):
"""
Returns a list of all encrypted files in the vault.
"""
files = []
for dp, dn, filenames in os.walk(self.encrypted_path):
for f in filenames:
if f != DataVault.MANIFEST_FILENAME:
if os.path.splitext(f)[1]:
files.append(
f"{Path(os.path.join(dp, f)).relative_to(self.encrypted_path)}"
)
return files
def is_empty(self) -> bool:
"""
Returns True if the vault is empty.
"""
return len(self.files()) == 0
def changes(self) -> VaultChangeSet:
"""
Returns a list of the changes to the vault since the last encryption.
"""
updates, additions, deletions = (
self.updates(),
self.additions(),
self.deletions(),
)
return {
"total": len(updates) + len(additions) + len(deletions),
"additions": additions,
"deletions": deletions,
"updates": updates,
"unchanged": [
f for f in self.files() if f not in set(updates + additions + deletions)
],
}
def has_changes(self):
"""
Returns True if there are changes to the data in the vault.
"""
return self.changes()["total"] > 0
def additions(self) -> List[str]:
"""
Returns a list of files that are in the decrypted directory but not
in the vault manifest.
"""
manifest_files = set(self.manifest()["files"])
return [f for f in self.files() if f not in manifest_files]
def deletions(self) -> List[str]:
"""
Returns a list of files that are in the vault manifest but not in
the decrypted directory.
"""
return [f for f in self.manifest()["files"] if f not in self.files()]
def updates(self) -> List[str]:
"""
Returns a list of files that have changed since the last encryption.
We accomplish this by investigating the hashes of the files in the
decrypted directory. If the hash of the file in the decrypted directory
is different than the hash of the file in the vault manifest, we
consider the file to have changed.
"""
current_manifest = self.manifest()["files"]
next_manifest = self._next_manifest()["files"]
updates = []
for file, hash in current_manifest.items():
if not next_manifest.get(file):
continue
if hash == next_manifest[file]:
continue
updates.append(file)
return updates
def manifest(self) -> VaultManifest:
"""
Reads the currently persisted vault manifest file.
"""
with open(self.vault_manifest_path, "r") as f:
return json.load(f)
def no_encypted_files(self) -> bool:
"""
Returns True if the encrypted directory is empty.
"""
return len(self.encrypted_files()) == 0
def clear(self) -> None:
"""
Clears the data vault.
"""
for f in self.files():
os.remove(os.path.join(self.root_path, f))
def clear_encrypted(self) -> None:
"""
Clears the encrypted directory.
"""
for f in self.encrypted_files():
os.remove(os.path.join(self.encrypted_path, f))
# You must clear the manifest otherwise the vault will
# be invalid
self._reset_manifest()
def _verify_or_explode(self) -> None:
"""
Verifies the vault has the correct structure and vault manifest.
It also checks that all of the files in the manifest are encrypted.
"""
if not self.root_path.exists():
raise FileNotFoundError(
f"Vault does not exist at given path: {self.root_path}"
)
if not self.encrypted_path.exists():
raise FileNotFoundError(
f"Vault encrypted directory does not exist at given path: {self.encrypted_path}"
)
if not DataVault._verify_manifest(self.vault_manifest_path):
raise FileNotFoundError(
f"Vault manifest is invalid at given path: {self.vault_manifest_path}"
)
if not (self.root_path / ".gitignore").exists():
raise FileNotFoundError(
f"Vault .gitignore file does not exist at given path: {self.root_path / '.gitignore'}"
)
# All files in the manifest must be encrypted
missing_files = []
for f in self.manifest()["files"]:
if not os.path.exists(os.path.join(self.encrypted_path, f)):
missing_files.append(f)
if len(missing_files) > 0:
raise FileNotFoundError(
textwrap.deindent(
f"""
Vault manifest contains files that are not encrypted: {missing_files}
>>> THIS SHOULD NOT HAPPEN AND IS CONSIDERED A SERIOUS ISSUE. <<<
Check your vault directory {self.root_path} for the decrypted
version of these files. If you can't find them there, you may need
to search for an older version of the vault in version control. Otherwise,
these files have likely been entirely lost.
Once the files have been found, there are several ways to recover the vault:
1. Recreate the vault from scratch.
2. Remove the files from the autogenerated vault manifest ({self.vault_manifest_path})
and rerun the vault encryption.
If you do not need these files, you can simply delete them from the manifest.
"""
)
)
#
# Private helpers
#
def _create_gitignore(self):
"""
Creates a .gitignore file in the vault root directory.
"""
with open(os.path.join(self.root_path, ".gitignore"), "w") as f:
f.write("/*\n")
f.write(f"!/{DataVault.ENCRYPTED_NAMESPACE}\n")
def _reset_manifest(self):
"""
Generate an empty vault manifest
"""
#
with open(self.vault_manifest_path, "w") as f:
json.dump(self._empty_vault_manifest(), f, indent=2)
def _empty_vault_manifest(self) -> VaultManifest:
"""
Returns an empty vault config as a dict.
"""
return {
"_": "DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.",
"version": self.VERSION,
"files": {},
}
def _next_manifest(self) -> VaultManifest:
"""
Returns the next version of the vault manifest that should be persisted
after the next encryption.
"""
return {
"_": "DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.",
"version": self.VERSION,
"files": {f: md5_hash_for_file(self.root_path / f) for f in self.files()},
}
| dihi/datavault | dihi_datavault/__init__.py | __init__.py | py | 14,958 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "hashlib.md5",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"li... |
39924477846 | from rest_framework import serializers
from core.models import Match
class MatchSerializer(serializers.ModelSerializer):
"""
The `season` field is read only for the external API, because we force it to
use the currently active season inside the MatchViewSet.perform_create()
method.
This means that you can ONLY record matches for the currently active
season, as this is the poolbot centric use case to record match results
after they have just finished via a client (slack, NFC etc.)
"""
class Meta:
model = Match
fields = (
'date',
'season',
'winner',
'loser',
'channel',
'granny',
)
read_only_fields = (
'date',
'season',
)
| dannymilsom/poolbot-server | src/api/serializers/match.py | match.py | py | 805 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "core.models.Match",
"line_number": 18,
"usage_type": "name"
}
... |
36740712303 | # coding=UTF-8
# Importamos las librerías
import sys
import os
import math
import csv
import numpy as np
from itertools import groupby
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
# Función que permite reiniciar el programa
def reiniciar():
python = sys.executable
os.execl(python, python, * sys.argv)
# Función de cálculo que genera valores según la formula de distribucion normal de Gauss dadas unas coordenadas.
def funcionGauss(a,s,x,y,mux,muy):
f = (a / (math.sqrt(2.0 * math.pi) * s)) * math.exp(-(0.5 / (s ** 2)) * ((x - mux) ** 2.0 + (y - muy) ** 2.0))
return f
# Función que lee el fichero de datos y pinta un mapa de contornos en 3D
def generarGrafico():
data = []
try:
# Abrimos el fichero de datos generado
ficheroDatos = open('datos.csv')
csv_reader = csv.reader(ficheroDatos)
next(csv_reader, None) # Quitamos la cabecera con el nombre de las variables
# Cargamos los datos del fichero línea a línea
for line in csv_reader:
data.append(map(float, line))
# Procesamos los datos cargados y creamos los arrays pertinentes para generar el gráfico
X, Z = [], []
for x, g in groupby(data, key=lambda line: line[0]):
X.append(x)
Y = []
new_Z = []
for y, gg in groupby(g, key=lambda line: line[1]):
Y.append(y)
new_Z.append(list(gg)[-1][2])
Z.append(new_Z)
# Transformamos X, Y y Z en formato de array válido para el gráfico
X, Y = np.meshgrid(X, Y)
Z = np.array(Z)
# Instanciamos un gráfico 3d de contornos
fig = plt.figure()
ax = fig.gca(projection='3d')
# Generamos la supercicie de datos con los valores de X, Y y Z
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.3)
# Generamos los gráficos de contorno para cada una de las coordenadas
cset = ax.contour(X, Y, Z, zdir='z', offset=-50, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-100, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=-100, cmap=cm.coolwarm)
# Añadimos el nombre a cada coordenada del gráfico y su rango de valores
ax.set_xlabel('X')
ax.set_xlim(-100, 1200)
ax.set_ylabel('Y')
ax.set_ylim(-100, 1200)
ax.set_zlabel('Z')
ax.set_zlim(-50, 130)
# Pintamos el gráfico
plt.show()
finally:
# Cerramos el fichero
ficheroDatos.close()
def generarDatos():
# Factor de corrección para los valores generados para evitar que sean demasiado bajos
factorCorreccion = 0.00001
# Inicializamos las varianzas que marcarán la dispersión de los datos generados respecto a la localización de las medias
s1=100.0
s2=130.0
s3=60.0
# Inicializamos las coordenadas donde se van a ubicar las medias
mu1x=250.0
mu1y=250.0
mu2x=550.0
mu2y=850.0
mu3x=830.0
mu3y=300.0
# Inicializamos las medias
a1=11500.0
a2=12000.0
a3=15500.0
# Abrimos el fichero csv o dat de datos (o lo creamos en su defecto).
# Formato dat -> visualización de datos con GNUPlot.
# Formato csv -> tratamiento de datos con WEKA.
ficheroDatosCSV = open('datos.csv', 'w')
ficheroDatosDat = open('datos.dat', 'w')
# Creamos la cabecera con los nombres de las variables
ficheroDatosCSV.write("x"+","+"y"+","+"f"+"\n")
# Bucles anidados que genera los datos y los escribe en los ficheros
for i in range(0, 100,4):
# Discretizamos los valores del eje x en porciones de 10 unidades
x = 100.0 + i * 10.0
for j in range(0, 100,4):
# Discretizamos los valores del eje y en porciones de 10 unidades
y = 100.0 + j * 10.0
# Creamos 3 distribuiciones normales con las diferentes medias y varianzas y recogemos el resultado
f1 = funcionGauss(a1,s1,x,y,mu1x,mu1y)
f2 = funcionGauss(a2,s2,x,y,mu2x,mu2y)
f3 = funcionGauss(a3,s3,x,y,mu3x,mu3y)
# Escribimos los valores en los diferentes ficheros
ficheroDatosCSV.write( str(x) + "," + str(y) + "," + str(f1 + f2 + f3 + factorCorreccion)+"\n")
ficheroDatosDat.write( str(x) + " " + str(y) + " " + str(f1 + f2 + f3 + factorCorreccion)+"\n")
# Cerramos el fichero csv y dat
ficheroDatosCSV.close()
ficheroDatosDat.close()
def main():
# Generamos los datos de ejemplo
generarDatos()
# Creamos el gráfico de superficie con los datos generados
generarGrafico()
if __name__ == "__main__":
main()
| DNC87/EM-Dataset-Generator | generador_datos/main.py | main.py | py | 4,438 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "sys.executable",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.execl",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_numb... |
9355826258 | import requests
import re
def check_link(url_parent, url_child):
pattern = r"href=\"(.*)\""
res = requests.get(url_parent)
if res.status_code == 200:
all_inclusions = re.findall(pattern, res.text)
else:
print("No")
return
for link in all_inclusions:
res = requests.get(link)
if res.status_code == 200:
all_inclusions_this_page = re.findall(pattern, res.text)
if url_child in all_inclusions_this_page:
print("Yes")
return
print("No")
return
if __name__ == "__main__":
check_link(input(), input()) | ArtemevIvanAlekseevich/Python_course | module 3/3.3-step_6-check_link.py | 3.3-step_6-check_link.py | py | 625 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 15... |
35599138078 | from pandas import Series
from matplotlib import pyplot
from statsmodels.tsa.ar_model import AR
from sklearn.metrics import mean_squared_error
series = Series.from_csv('daily-minimum-temperatures.csv', header=0)
# split dataset
X = series.values
train, test = X[1:len(X)-7], X[len(X)-7:]
# train autoregression
model = AR(train)
model_fit = model.fit()
# 滞后长度
print('Lag: %s' % model_fit.k_ar)
# 系数
print('Coefficients: %s' % model_fit.params)
# make predictions
predictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1, dynamic=False)
for i in range(len(predictions)):
print('predicted=%f, expected=%f' % (predictions[i], test[i]))
error = mean_squared_error(test, predictions)
print('Test MSE: %.3f' % error)
# plot results
pyplot.plot(test)
pyplot.plot(predictions, color='red')
pyplot.show() | yangwohenmai/TimeSeriesForecasting | AR自回归模型/自回归模型.py | 自回归模型.py | py | 828 | python | en | code | 183 | github-code | 36 | [
{
"api_name": "pandas.Series.from_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "statsmodels.tsa.ar_model.AR",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklear... |
70295398824 | import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from models.convnet import ConvNet
from utils.data_loader import load_cifar10, create_dataloaders
from utils.train import train
device = 'cuda' if torch.cuda.is_available() else 'cpu'
writer = SummaryWriter('runs/exercise-2_1')
train_data, val_data, test_data = load_cifar10()
train_dataloader, val_dataloader, test_dataloader = create_dataloaders(train_data, val_data, test_data, batch_size=32)
n_runs = 10
for i in range(n_runs):
n_epochs = 20
convnet = ConvNet()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(convnet.parameters(), lr=0.001, momentum=0.9)
train(epochs=n_epochs, train_dataloader=train_dataloader, val_dataloader=val_dataloader, model=convnet, loss_fn=loss_fn, optimizer=optimizer, device=device, model_name='ConvNet34', writer=writer, save_gradients=True, run_id=i)
resnet34 = ConvNet(is_res_net=True)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(resnet34.parameters(), lr=0.001, momentum=0.9)
train(epochs=n_epochs, train_dataloader=train_dataloader, val_dataloader=val_dataloader, model=resnet34, loss_fn=loss_fn, optimizer=optimizer, device=device, model_name='ResNet34', writer=writer, save_gradients=True, run_id=i)
| simogiovannini/DLA-lab1 | 2_1.py | 2_1.py | py | 1,300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 9,
"usage_type": "call"
},
{
"api_na... |
19033902872 | """Module contains functionality for parsing HTML page of a particular vulnerability."""
import re
import urllib.request
from lxml import etree
from cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers\
.html_parser import HtmlParser
from cve_connector.vendor_cve.implementation.parsers.vendor_parsers.cisco_parsers\
.cisco_cvrf import CiscoXmlParser
from cve_connector.vendor_cve.implementation.vendors_storage_structures.cisco import Cisco
from cve_connector.vendor_cve.implementation.vulnerability_metrics.cvss_v3_metrics import CvssV3
from cve_connector.vendor_cve.implementation.utilities.check_correctness \
import is_correct_cve_id, is_correct_cwe, is_correct_score, \
is_correct_vector_v3
from cve_connector.vendor_cve.implementation.utilities.utility_functions \
import normalize_string, concat_strings, get_current_date, \
string_to_date, get_number_from_string
class CiscoVulnerabilityParser(HtmlParser):
"""
Contains functionality for parsing HTML of specific CVE.
"""
def __init__(self, url, logger, from_date=None, to_date=None):
super().__init__(url, from_date, to_date)
self.date_format = '%Y %B %d' # 2018 January 4
self.load_content()
self.cve_details_dict = {}
self.parsed_cve_ids = []
self.parsed_summary = ''
self.parsed_advisory_id = ''
self.parsed_cwes = []
self.parsed_cvss_base = ''
self.parsed_cvss_temporal = ''
self.parsed_attack_vector = ''
self.parsed_severity = ''
self.parsed_analysis = ''
self.parsed_date = get_current_date()
self.patched = False
self.logger = logger
def get_content_from_ulr(self):
"""
Gets and returns content from URL.
:return: content
"""
response = urllib.request.urlopen(self.url)
if response.getcode() != 200:
self.logger.info("Cisco - get_content_from_url()")
raise ConnectionError('Unable to load ', self.url)
content = response.read()
response.close()
return content
def parse(self):
"""
Provides parsing functionality.
:return: None
"""
content_list = self.data.xpath(
'.//div[@id="advisorycontentcontainer"]//div[@class="mainContent"]')
if not content_list:
return False
content = content_list[0]
advisory_header_list = content.xpath('.//div[@id="advisorycontentheader"]')
if not advisory_header_list:
return False
advisory_header = advisory_header_list[0]
self.parse_header_items(advisory_header)
link_to_xml_content = self.get_xml_link(advisory_header)
correct_parsed_xml = False
if link_to_xml_content != '':
correct_parsed_xml = self.parse_xml(link_to_xml_content)
if link_to_xml_content == '' or not correct_parsed_xml:
advisory_content_body = content.xpath('.//div[@id="advisorycontentbody"]')[0]
self.parse_header_items(advisory_header)
self.parsed_summary = self.parse_summary(advisory_content_body)
self.parse_analysis(advisory_content_body)
self.check_patched(advisory_content_body)
if len(self.parsed_cve_ids) == 1:
i = self.parsed_cve_ids[0]
self.cve_details_dict[i] = self.parse_details_one_cve(content)
else:
details_dict = self.parse_details_more_cves(content)
self.complete_cve_dictionary(details_dict)
if correct_parsed_xml:
self.complete_xml_parsing()
self.complete_entities()
def complete_xml_parsing(self):
"""
Assigns values to each particular property.
:return: None
"""
for item in self.entities:
item.severity = self.parsed_severity
item.cwes.extend(self.parsed_cwes)
item.advisory_id = self.parsed_advisory_id
item.attack_vector = self.parsed_attack_vector
if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):
cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)
if self.parsed_cvss_temporal != '' \
and is_correct_score(self.parsed_cvss_temporal):
cvss_v3.temporal_sc = self.parsed_cvss_temporal
item.cvss_v3 = cvss_v3
item.cvss_base_sc_v3 = self.parsed_cvss_base
item.cvss_temporal_score_v3 = self.parsed_cvss_temporal
item.published = self.parsed_date
def complete_entities(self):
"""
Creates list of Cisco vulnerabilities as a property.
:return: None
"""
for item in self.cve_details_dict:
cisco = Cisco(cve=item)
cisco.details = self.cve_details_dict[item]
cisco.summary = self.parsed_summary
cisco.advisory_id = self.parsed_advisory_id
cisco.attack_vector = self.parsed_attack_vector
cisco.cvss_temporal_score_v3 = self.parsed_cvss_temporal
cisco.cvss_base_sc_v3 = self.parsed_cvss_base
if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):
cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)
if self.parsed_cvss_temporal != '' and is_correct_score(self.parsed_cvss_temporal):
cvss_v3.temporal_sc = self.parsed_cvss_temporal
cisco.cvss_v3 = cvss_v3
cisco.severity = self.parsed_severity
cisco.analysis = self.parsed_analysis
cisco.description = self.parsed_summary + ' ' \
+ self.parsed_analysis + ' ' + self.cve_details_dict[item]
cisco.published = self.parsed_date
cisco.patch_available = self.patched
for cwe in self.parsed_cwes:
if is_correct_cwe(cwe):
cisco.cwes.append(cwe)
if cisco.is_valid_entity():
self.entities.append(cisco)
def complete_cve_dictionary(self, dct):
"""
Sets complete dictionary of parsed CVEs as a property.
:param dct: properties of CVEs to be set (dictionary)
:return: None
"""
for cve in self.parsed_cve_ids:
dict_value = ''
if cve in dct:
dict_value = dct[cve]
self.cve_details_dict[cve] = dict_value
def get_xml_link(self, content):
"""
Extract from the content link for XML file.
:param content: downloaded content
:return: XML link or empty string
"""
xml_link_list = content.xpath('.//a[contains(text(), "Download CVRF")]/@href')
return xml_link_list[0] if xml_link_list else ''
def parse_xml(self, link):
"""
Parses XML downloaded from link.
:param link: download link
:return: True if successful
"""
parser = CiscoXmlParser(link)
try:
parser.load_content()
except ConnectionError as conn_err:
self.logger.error('Cisco Parser - Error: ', str(conn_err))
return False
except etree.ParseError as parse_err:
self.logger.error('Cisco Parser - Error: ', str(parse_err))
return False
parser.parse()
entities = parser.entities
self.entities.extend(entities)
self.patched = True
return True
def parse_details_one_cve(self, content):
"""
Parse properties of particular CVE.
:param content: downloaded content
:return: string containing details of CVE
"""
details_list = content.xpath('.//div[@id="detailfield"]/span//text()')
return concat_strings(details_list, ' ')
def parse_details_more_cves(self, content):
"""
Extracts and returns CVEs from the content.
:param content: downloaded content
:return: string containing details
"""
result = {}
detail = ''
header_appeared = False
vuln_headers = content.xpath('.//*[self::strong or self::h3]/text()')
details_list = content.xpath('.//div[@id="detailfield"]/span//text()')
for item in details_list:
item = normalize_string(item)
if item == '':
continue
if item in vuln_headers:
header_appeared = True
detail = ''
elif header_appeared:
cve_match = self.cve_match(item)
if cve_match == '':
detail += item
else:
result[cve_match] = detail
detail = ''
return result
def cve_match(self, string):
"""
Extracts CVE ID from the string.
:param string: raw string that might contain CVE ID
:return: cve or empty string
"""
pattern_list = [r'assigned the following CVE ID: (CVE-\d+-\d+)',
r'ID for this vulnerability is: (CVE-\d+-\d+)']
for pattern in pattern_list:
match = re.search('{0}'.format(pattern), string)
if match:
cve = match.group(1)
if is_correct_cve_id(cve):
return cve
return ''
def parse_analysis(self, content):
"""
Extracts and returns analysis from the content.
:param content: downloaded content
:return: analysis
"""
analysis_list = content.xpath('.//div[@id="analysisfield"]//text()')
analysis = ''
for text in analysis_list:
analysis += normalize_string(text)
return str(analysis)
def parse_summary(self, content):
"""
Extracts and returns summary from the content.
:param content: downloaded content
:return: summary
"""
summary_list = content.xpath('.//div[@id="summaryfield"]//text()')
summary = ''
for text in summary_list:
summary += normalize_string(text)
return summary
def parse_severity(self, content):
"""
Extracts and returns severity from the content.
:param content: downloaded content
:return: severity
"""
severity_list = content.xpath('.//div[@id="severitycirclecontent"]/text()')
if len(severity_list) != 1:
raise ValueError("Wrong parsed severity")
return str(severity_list[0])
def parse_header_items(self, header):
"""
Parses header item from downloaded tables.
:param header: header of table
:return: None
"""
self.parsed_severity = self.parse_severity(header)
self.parsed_date = self.get_published_date(header)
advisory_id_list = header.xpath('.//div[@id="ud-advisory-identifier"]'
'/div[@class="divLabelContent"]/text()')
if len(advisory_id_list) != 1:
raise ValueError("Wrong parsed advisory id")
self.parsed_advisory_id = str(advisory_id_list[0])
cve_list = header.xpath(
'.//div[@class="cve-cwe-containerlarge"]//div[@class="CVEList"]/div/text()')
self.parsed_cve_ids.extend(i for i in cve_list if is_correct_cve_id(i))
cwe_list = header.xpath(
'.//div[@class="cve-cwe-containerlarge"]//div[@class="CWEList"]//text()')
self.parsed_cwes.extend(c for c in cwe_list if is_correct_cwe(c))
score_list = header.xpath('.//div[contains(@class, "ud-CVSSScore")]//input/@value')
if score_list:
base = re.search(r'Base (\d{1,2}\.\d)', score_list[0])
if base:
base_sc = get_number_from_string(base.group(1))
self.parsed_cvss_base = base_sc
temporal = re.search(r'Temporal (\d.\d)', score_list[0])
if temporal:
temp_sc = get_number_from_string(temporal.group(1))
self.parsed_cvss_temporal = temp_sc
cvss_vector = re.search(
r'CVSS:3\.0/AV:\S+/AC:\S+/PR:\S+/UI:\S+/S:\S+/C:\S+/I:\S+/A:\S+/E:\S+/RL:\S+'
r'/RC:\S+', score_list[0])
if cvss_vector and is_correct_vector_v3(cvss_vector.group(0)):
self.parsed_attack_vector = str(cvss_vector.group(0))
def get_published_date(self, content):
"""
Extracts and returns published date from the content.
:param content: downloaded content
:return: date
"""
date_list = content.xpath(
'.//div[@id="ud-published"]//div[@class="divLabelContent"]/text()')
if not date_list:
return get_current_date()
date_string_list = re.findall(r'\d{4}\xa0\w+\xa0\d+', str(date_list[0]))
if not date_string_list:
return get_current_date()
date_string = date_string_list[0].replace('\xa0', ' ')
date = string_to_date(date_string, self.date_format)
return date
def check_patched(self, content):
"""
Sets property patched according to the tested information.
:param content: downloaded content
:return: None
"""
vendor_ann_text = concat_strings(content.xpath(
'.//div[@id="vendorannouncefield"]//text()'))
fixed_sw_text = concat_strings(content.xpath('.//div[@id="fixedsoftfield"]//text()'))
if 'has released' in vendor_ann_text:
self.patched = True
return
if 'has released' not in fixed_sw_text or 'not released'in fixed_sw_text:
self.patched = False
else:
self.patched = True
| CSIRT-MU/CRUSOE | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/parsers/vendor_parsers/cisco_parsers/cisco_vulnerability_parser.py | cisco_vulnerability_parser.py | py | 13,807 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers.html_parser.HtmlParser",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cve_connector.vendor_cve.implementation.utilities.utility_functions.get_current_date",
"line_number": 40,
"usage_ty... |
44310786559 | import serial, time, syslog, string
def scoredisp(score):
# initializes the serial port
port = '/dev/ttyACM0'
ard = serial.Serial(port,9600)
# writes the inputted score to the serial port
ard.write(str(score).encode('ascii'))
| RamboTheGreat/Minigame-Race | test.py | test.py | py | 237 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 7,
"usage_type": "call"
}
] |
73708087464 | """Covariance-free Partial Least Squares"""
# Author: Artur Jordao <arturjlcorreia[at]gmail.com>
# Artur Jordao
import numpy as np
from scipy import linalg
from sklearn.utils import check_array
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.base import BaseEstimator
from sklearn.preprocessing import normalize
import copy
class CIPLS(BaseEstimator):
"""Covariance-free Partial Least Squares (CIPLS).
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
References
Covariance-free Partial Least Squares: An Incremental Dimensionality Reduction Method
"""
def __init__(self, n_components=10, copy=True):
self.__name__ = 'Covariance-free Partial Least Squares'
self.n_components = n_components
self.n = 0
self.copy = copy
self.sum_x = None
self.sum_y = None
self.n_features = None
self.x_rotations = None
self.x_loadings = None
self.y_loadings = None
self.eign_values = None
self.x_mean = None
self.p = []
def normalize(self, x):
return normalize(x[:, np.newaxis], axis=0).ravel()
def fit(self, X, Y):
X = check_array(X, dtype=FLOAT_DTYPES, copy=self.copy)
Y = check_array(Y, dtype=FLOAT_DTYPES, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if np.unique(Y).shape[0] == 2:
Y[np.where(Y == 0)[0]] = -1
n_samples, n_features = X.shape
if self.n == 0:
self.x_rotations = np.zeros((self.n_components, n_features))
self.x_loadings = np.zeros((n_features, self.n_components))
self.y_loadings = np.zeros((Y.shape[1], self.n_components))
self.n_features = n_features
self.eign_values = np.zeros((self.n_components))
self.p = [0] * self.n_components
for j in range(0, n_samples):
self.n = self.n + 1
u = X[j]
l = Y[j]
if self.n == 1:
self.sum_x = u
self.sum_y = l
else:
old_mean = 1 / (self.n - 1) * self.sum_x
self.sum_x = self.sum_x + u
mean_x = 1 / self.n * self.sum_x
u = u - mean_x
delta_x = mean_x - old_mean
self.x_rotations[0] = self.x_rotations[0] - delta_x * self.sum_y
self.x_rotations[0] = self.x_rotations[0] + (u * l)
self.sum_y = self.sum_y + l
t = np.dot(u, self.normalize(self.x_rotations[0].T))
self.x_loadings[:, 0] = self.x_loadings[:, 0] + (u * t)
self.y_loadings[:, 0] = self.y_loadings[:, 0] + (l * t)
for c in range(1, self.n_components):
u -= np.dot(t, self.x_loadings[:, c - 1])
l -= np.dot(t, self.y_loadings[:, c - 1])
self.x_rotations[c] = self.x_rotations[c] + (u * l)
self.x_loadings[:, c] = self.x_loadings[:, c] + (u * t)
self.y_loadings[:, c] = self.y_loadings[:, c] + (l * t)
t = np.dot(u, self.normalize(self.x_rotations[c].T))
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data."""
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
mean = 1 / self.n * self.sum_x
X -= mean
w_rotation = np.zeros(self.x_rotations.shape)
for c in range(0, self.n_components):
w_rotation[c] = self.normalize(self.x_rotations[c])
return np.dot(X, w_rotation.T)
| arturjordao/IncrementalDimensionalityReduction | Code/CIPLS.py | CIPLS.py | py | 4,113 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.normalize",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 48,
"usage_type": "attribute"
},
{
"api... |
36322979415 | #! /usr/bin/env python
import sys
import pygame
import os
import argparse
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from subprocess import Popen
from pygame.locals import *
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
last_image = None
new_image = False
startimg = None
flashimg = None
gphoto_command = ['gphoto2', '--capture-image-and-download', '--filename', '%Y%m%d%H%M%S.jpg']
photo_event = pygame.USEREVENT + 1
class Button:
""" a simple button class to hold all the attributes together and draw itself """
def __init__(self, rect=pygame.Rect(0, 0, 0, 0), color=pygame.Color('WHITE'), caption='Button'):
self.rect = rect
self.color = color
self.caption = caption
self.fsize = 36
def draw(self, surface):
surface.fill(self.color, rect=self.rect)
if (pygame.font):
font = pygame.font.Font('fkfont.ttf', self.fsize)
text = font.render(self.caption, 0, pygame.Color('BLACK'))
textpos = text.get_rect(center=self.rect.center)
surface.blit(text, textpos)
class MyHandler(PatternMatchingEventHandler):
patterns = ["*.jpg", "*.JPG"]
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
logging.debug ("got something")
logging.debug ((event.src_path, event.event_type))
global last_image
global new_image
logging.debug ("loading image")
last_image = aspect_scale(get_image(event.src_path), (x, y)).convert()
new_image = True
logging.debug ("done loading")
def on_created(self, event):
self.process(event)
def on_modified(self, event):
self.process(event)
def load_resources():
logging.debug ("loading ressources")
global startimg
global flashimg
global bgimg
global cntfont
base_path = './gfx/'
startimg = aspect_scale(pygame.image.load(base_path + 'start.png'), (x, y))
bgimg = aspect_scale(pygame.image.load(base_path + 'BG.png'), (x, y))
flashimg = aspect_scale(pygame.image.load(base_path + 'flash.png'), (x, y))
cntfont = pygame.font.Font('fkfont.ttf', y / 2)
logging.debug ("done loading")
def draw_buttons(surface, sw, sh):
color = pygame.Color('#ee4000')
btnwidth = 250
btnheight = 50
margin = (sw - (2 * btnwidth)) / 3
btnleft = Button(pygame.Rect(margin, sh - btnheight, btnwidth, btnheight), color, 'Start')
btnright = Button(btnleft.rect.move(btnwidth + margin, 0), color, 'Print')
btnleft.draw(surface)
btnright.draw(surface)
def get_image(path):
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
return image
def aspect_scale(img, size):
""" Scales 'img' to fit into box bx/by.
This method will retain the original image's aspect ratio """
bx, by = size
ix, iy = img.get_size()
if ix > iy:
# fit to width
scale_factor = bx / float(ix)
sy = scale_factor * iy
if sy > by:
scale_factor = by / float(iy)
sx = scale_factor * ix
sy = by
else:
sx = bx
else:
# fit to height
scale_factor = by / float(iy)
sx = scale_factor * ix
if sx > bx:
scale_factor = bx / float(ix)
sx = bx
sy = scale_factor * iy
else:
sy = by
sx = int(sx)
sy = int(sy)
return pygame.transform.scale(img, (sx, sy))
def end_script():
logging.debug ("exit")
global done
done = True
observer.stop()
observer.join()
def display_count():
global cnt
global screen
screen.blit(bgimg, (0, 0))
text = cntfont.render(str(cnt), 0, pygame.Color('WHITE'))
textpos = text.get_rect(center=screen.get_rect().center)
screen.blit(text, textpos)
cnt = cnt - 1
if __name__ == '__main__':
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("--width", type=int, help="screen width", default=1024)
parser.add_argument("--height", type=int, help="screen height", default=600)
parser.add_argument("--path", help="path to observe", default=".")
parser.add_argument("--fullscreen", "-f", action='store_true', help="run in fullscreen")
parser.add_argument("--delay", "-d", type=int, help="delay before picture is taken", default=5)
args = parser.parse_args()
x = args.width
y = args.height
path = args.path
fullscreen = args.fullscreen
delay = args.delay
observer = Observer()
observer.schedule(MyHandler(), path)
observer.start()
pygame.init()
load_resources()
if(fullscreen):
screen = pygame.display.set_mode((x, y), FULLSCREEN)
else:
screen = pygame.display.set_mode((x, y))
pygame.mouse.set_visible(False)
done = False
clock = pygame.time.Clock()
first_run = True
cnt = 5
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
end_script()
if event.type == KEYDOWN and event.key == K_ESCAPE:
end_script()
if event.type == KEYDOWN and event.key == K_SPACE:
display_count()
pygame.time.set_timer(photo_event, 1000)
pygame.display.flip()
#sub = Popen(['gphoto2','--capture-image-and-download'])
if event.type == photo_event:
if (cnt <= 0):
screen.blit(bgimg, (0, 0))
text = cntfont.render('CHEESE!!', 0, pygame.Color('WHITE'))
textpos = text.get_rect(center=screen.get_rect().center)
screen.blit(text, textpos)
cnt = 5
pygame.time.set_timer(photo_event, 0)
sub = Popen(gphoto_command)
else:
display_count()
pygame.display.flip()
if(last_image and new_image):
logging.debug ("blitting image")
left = (screen.get_width() - last_image.get_width()) / 2
top = (screen.get_height() - last_image.get_height()) / 2
screen.blit(last_image, (left, top))
new_image = False
logging.debug ("done blitting")
draw_buttons(screen, x, y)
pygame.display.flip()
if(not last_image and first_run):
screen.blit(startimg, (0, 0))
first_run = False
draw_buttons(screen, x, y)
pygame.display.flip()
clock.tick(60)
| hreck/PyBooth | pyBooth.py | pyBooth.py | py | 7,007 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pygame.R... |
718080167 | from re import S
import re
from django.db.models.signals import pre_init
from django.shortcuts import render
from .models import *
from .serializers import *
from django.shortcuts import render
from rest_framework import viewsets, mixins, generics
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
import datetime
import time
from rest_framework.parsers import JSONParser
from django.utils import timezone
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, get_list_or_404, reverse
from django.http import (HttpResponse, HttpResponseNotFound, Http404,
HttpResponseRedirect, HttpResponsePermanentRedirect)
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.contrib import auth
import requests
from django.core.mail import send_mail
from rest_framework import status
from django.contrib.auth import authenticate, login
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib import messages
from datetime import datetime, date
from django.core.mail import send_mail
import json
from django.core.serializers.json import DjangoJSONEncoder
import os
from django.views.decorators.cache import cache_control
from django.db.models import Sum
import collections
import json
from datetime import date
from django.contrib.auth.models import User
from django.db.models import Count, Sum
import datetime
from datetime import datetime, timedelta
from django.db.models.functions import TruncMonth, TruncYear
import requests
import json
import random
from django.db.models import Q
import requests
import json
import uuid
def getFoodImageURL(foodName):
headers = {
"Authorization": "563492ad6f917000010000013784e527f0764d279ff0e8157222e0d2",
"Content-Type": "application/json"
}
r = requests.get(
'https://api.pexels.com/v1/search?query={}&per_page=1'.format(foodName), headers=headers)
data = r.json()
try:
return (random.choice(data["photos"])['src']['original']+"?auto=compress")
except:
return "https://images.pexels.com/photos/1640777/pexels-photo-1640777.jpeg?auto=compress"
class CustomerProfileView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None, **kwargs):
try:
user = CustomerProfile.objects.get(user=request.user)
except:
pass
serializer = CustomerProfileSerializer(user)
return Response(serializer.data)
class DeliveryProfileView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None, **kwargs):
try:
user = DeliveryProfile.objects.get(user=request.user)
except:
pass
serializer = DeliveryProfileSerializer(user)
return Response(serializer.data)
@api_view(('GET',))
@ permission_classes([IsAuthenticated])
def WhoAmI(request):
data = {
}
vendor = Shop.objects.filter(vendor=request.user)
temp = CustomerProfile.objects.filter(user=request.user)
delb = DeliveryProfile.objects.filter(user=request.user)
if len(vendor) > 0:
data['iam'] = "vendor"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif len(temp) > 0:
data['iam'] = "customer"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif len(delb) > 0:
data['iam'] = "deliveryboy"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
elif request.user.is_staff:
data['iam'] = "admin"
return HttpResponse(json.dumps(data), status=status.HTTP_200_OK)
@ api_view(('POST',))
def RegisterNewUserCustomer(request):
temp = request.data.copy()
if len(User.objects.filter(email=temp['email'])) > 0:
return Response({'Error': 'Already Registered with this email'}, status=status.HTTP_400_BAD_REQUEST)
if len(User.objects.filter(username=temp['username'])) > 0:
return Response({'Error': 'This username already exist'}, status=status.HTTP_400_BAD_REQUEST)
# if len(CustomerProfile.objects.filter(aadharNo=temp['aadharNo'])) > 0:
# return Response({'Error': 'Already Registered with this aadhar'}, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
tempUser = User(
username=temp['username'],
first_name=temp['full_name'],
email=temp['email'],
)
tempUser.set_password(temp['password'])
tempUser.save()
tempCustomerProfile = CustomerProfile(
user=tempUser,
phoneNo=temp['phoneNo']
)
tempCustomerProfile.save()
except:
return Response(temp, status=status.HTTP_400_BAD_REQUEST)
return Response(CustomerProfileSerializer(tempCustomerProfile).data, status=status.HTTP_201_CREATED)
@ api_view(('POST',))
def RegisterNewUserDeliveryBoy(request):
temp = request.data.copy()
if len(User.objects.filter(email=temp['email'])) > 0:
return Response({'Error': 'Already Registered with this email'}, status=status.HTTP_400_BAD_REQUEST)
if len(User.objects.filter(username=temp['username'])) > 0:
return Response({'Error': 'This username already exist'}, status=status.HTTP_400_BAD_REQUEST)
# if len(CustomerProfile.objects.filter(aadharNo=temp['aadharNo'])) > 0:
# return Response({'Error': 'Already Registered with this aadhar'}, status=status.HTTP_406_NOT_ACCEPTABLE)
try:
tempUser = User(
username=temp['username'],
first_name=temp['full_name'],
email=temp['email'],
)
tempUser.set_password(temp['password'])
tempUser.save()
tempDeliveryProfile = DeliveryProfile(
user=tempUser,
phoneNo=temp['phoneNo']
)
tempDeliveryProfile.save()
except:
return Response(temp, status=status.HTTP_400_BAD_REQUEST)
return Response(DeliveryProfileSerializer(tempDeliveryProfile).data, status=status.HTTP_201_CREATED)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def LoggedInCustomerOrders(request):
temp = CustomerOrder.objects.filter(
orderFor=request.user).filter(Q(status="pending") | Q(status="inorder")).order_by(*['-date', '-time'])
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def CustomerPendingOrders(request):
temp = CustomerOrder.objects.filter(
orderFor=request.user).filter(status="pending")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllShops(request):
temp = Shop.objects.all()
return Response(ShopSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllProducts(request):
temp = Product.objects.all()
return Response(ProductSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def CustomerBuyProduct(request):
data = request.data.copy()
tempProductList = []
temp = CustomerOrder(
orderFor=request.user,
orderImg=getFoodImageURL("food"),
latitude=data['latitude'],
longitude=data['longitude'],
status=data['status'],
addressinwords=data["addressinwords"],
typeOfPayment=PaymentCategory.objects.filter(
name=data["typeOfPayment"]).first(),
shop=Shop.objects.filter(id=int(data["shopID"])).first(),
locality=Shop.objects.filter(id=int(data["shopID"])).first().locality,
orderPrice=float(data["orderPrice"]),
payment_status=data["payment_status"]
)
temp.save()
productIDS = data['productId'].split(',')
try:
quan = data['productQuan'].split(',')
except:
quan = []
for idx, i in enumerate(productIDS):
try:
pro = Product.objects.get(id=int(i))
temp.product.add(pro)
new = ProductQuanities(
product=pro,
quantity=int(quan[idx]),
orderID=temp
)
new.save()
except:
pass
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def CustomerCancelProduct(request):
data = request.data.copy()
temp = CustomerOrder.objects.filter(id=data['productId'])
temp.delete()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET', 'POST'))
@ permission_classes([IsAuthenticated])
def DeliveryPendingOrders(request):
if request.method == "GET":
temp = CustomerOrder.objects.filter(status="pending")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
else:
data = request.data.copy()
temp = CustomerOrder.objects.get(id=data['orderID'])
temp.deliveryboy = DeliveryProfile.objects.get(user=request.user)
temp.status = data['status']
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET', 'POST'))
@ permission_classes([IsAuthenticated])
def DeliveryinorderOrders(request):
if request.method == "GET":
temp = CustomerOrder.objects.filter(deliveryboy=DeliveryProfile.objects.get(
user=request.user)).filter(status="inorder")
return Response(CustomerOrderSerializer(temp, many=True).data, status=status.HTTP_200_OK)
# else:
# data = request.data.copy()
# temp = CustomerOrder.objects.get(id=data['orderID'])
# temp.deliveryboy = DeliveryProfile.objects.get(user=request.user)
# temp.status = data['status']
# return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
# Vendor
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AddProduct(request):
data = request.data.copy()
food = StoreImage(
image=request.data["image"]
)
food.save()
siteLink = "{0}://{1}".format(request.scheme,
request.get_host())
temp = Product(
name=data['name'],
price=float(data['price']),
shop=Shop.objects.get(id=int(data["shopID"])),
category=ProductCategory.objects.get(id=int(data["category"])),
productImage=data['image'],
)
temp.save()
return Response(ProductSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def ListAllProductCategories(request):
temp = ProductCategory.objects.all()
return Response(ProductCategorySerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateOrderStatus(request):
temp = CustomerOrder.objects.filter(
id=int(request.data["orderID"])).first()
temp.status = request.data["status"]
temp.save()
return Response(CustomerOrderSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AddShop(request):
data = request.data
temp = Shop(
vendor=request.user,
name=data["name"],
currentOffer=float(data["currentOffer"]),
ShopImg=getFoodImageURL('restaurent'),
locality=ShopLocality.objects.filter(id=int(data["locality"])).first(),
latitude=float(data["latitude"]),
longitude=float(data["longitude"]),
addressinwords=data["addressinwords"],
phoneNo=data["phoneNo"],
email=data["email"],
)
temp.save()
return Response(ShopSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def AllProductsOfShop(request):
data = request.data
temp = Product.objects.filter(
shop=Shop.objects.filter(id=data["shopID"]).first())
return Response(ProductSerializer(temp, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST', 'GET'))
@ permission_classes([IsAuthenticated])
def FirebaseTokenView(request):
if request.method == "GET":
return Response(FireabaseTokenSerializer(FireabaseToken.objects.all(), many=True).data, status=status.HTTP_200_OK)
else:
data = request.data
temp = FireabaseToken.objects.filter(user=request.user).first()
if temp is None:
temp = FireabaseToken(
user=request.user,
token=request.data["token"]
)
else:
temp.token = request.data["token"]
temp.save()
return Response(FireabaseTokenSerializer(temp).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def ShopAnalysis(request):
shopID = int(request.data['shopID'])
# weekly
today = datetime.today().weekday()
sunday = datetime.today() - timedelta(days=today+1)
last_week = [["Sun", 0, 0], ["Mon", 0, 0], ["Tue", 0, 0], [
"Wed", 0, 0], ["Thu", 0, 0], ["Fri", 0, 0], ["Sat", 0, 0]]
for i in range(today+2):
temp = CustomerOrder.objects.filter(shop=Shop.objects.filter(
id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").filter(date=sunday).values("date").annotate(price=Sum('orderPrice')).annotate(c=Count('id'))
try:
last_week[i] = [last_week[i][0], temp[0]["c"], temp[0]["price"]]
except:
pass
sunday += timedelta(days=1)
# monthly
name_months = [("Jan", 0, 0), ("Feb", 0, 0), ("March", 0, 0), ("April", 0, 0), ("May", 0, 0), ("June", 0, 0),
("July", 0, 0), ("August", 0, 0), ("Sept", 0, 0), ("Oct", 0, 0), ("Nov", 0, 0), ("Dec", 0, 0)]
month = CustomerOrder.objects.filter(shop=Shop.objects.filter(id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").annotate(
month=TruncMonth('date')).values('month').annotate(price=Sum('orderPrice')).annotate(c=Count('id'))
for i in month:
if(date.today().year == i['month'].year):
name_months[i['month'].month] = (
name_months[i['month'].month][0], i["c"], i["price"])
# print(name_months)
# yearly
name_year = [[i, 0, 0]
for i in range(date.today().year, date.today().year-3, -1)]
years = CustomerOrder.objects.filter(shop=Shop.objects.filter(id=shopID).first()).exclude(status="shoppending").exclude(status="shoprejected").annotate(
year=TruncYear('date')).values('year').annotate(price=Sum('orderPrice')).annotate(c=Count('id'))[:3]
for j, i in enumerate(years):
name_year[j] = [name_year[j][0], i["c"], i["price"]]
# print(name_year)
return Response({"last_week": last_week, "months": name_months, "year": name_year}, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateShopDetails(request):
data = request.data
shop = Shop.objects.filter(id=int(data["shopID"])).first()
shop.currentOffer = float(data["currentOffer"])
shop.save()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def DeleteProduct(request):
data = request.data
product = Product.objects.filter(id=int(data["prodID"])).first()
product.delete()
return Response({}, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateProduct(request):
data = request.data
product = Product.objects.filter(id=int(data["prodID"])).first()
product.name = data["name"]
product.price = data["price"]
product.save()
return Response(ProductSerializer(product).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def LoggedInVendorShop(request):
data = request.data
shop = Shop.objects.filter(vendor=request.user).first()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def VendorsShopOrders(request):
data = request.data
shop = Shop.objects.filter(vendor=request.user).first()
orders = CustomerOrder.objects.filter(
shop=shop).order_by(*['-date', '-time'])
return Response(CustomerOrderSerializer(orders, many=True).data, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def SingleShopDetails(request):
shop = Shop.objects.filter(vendor=request.user).first()
return Response(ShopSerializer(shop).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def SingleShopAllProducts(request):
shop = Shop.objects.filter(id=int(request.data["shopID"])).first()
products = Product.objects.filter(shop=shop)
return Response(ProductSerializer(products, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateUserDetails(request):
data = request.data
customer = CustomerProfile.objects.filter(user=request.user).first()
customer.phoneNo = data["phoneNo"]
customer.user.first_name = data["first_name"]
return Response(CustomerProfileSerializer(customer).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def StoreImageView(request, *args, **kwargs):
print(request.FILES['image'], args, kwargs)
temp = StoreImage(
image=request.FILES['image']
)
temp.save()
siteLink = "{0}://{1}".format(request.scheme,
request.get_host())
return Response({"url": "{}".format(""+temp.image.url)}, status=status.HTTP_200_OK)
def GeneratetOrderIDPayment(name, email, phoneNo, amount):
data1 = {
"client_id": "test_UnAu7a0tHRsdeequ20AEKVCNR2NHOUpBydi",
"client_secret": "test_dzbvZFl6Cl5anSSEwV8wDcgNtAwygXGzi7aPUMgDk2g14lz9U4uiebOB4ZNsqcJhAET3KaN6nhB9Rbj9NDP3ORc6FQRSEF4wYB1jcMidH4miO1HhYsOIx3rI7dN",
"grant_type": "client_credentials"
}
res1 = requests.post(
"https://test.instamojo.com/oauth2/token/", data=data1)
res1 = res1.json()
header2 = {
"Authorization": "Bearer {}".format(res1["access_token"]),
"Content-Type": "application/x-www-form-urlencoded",
"client_id": "test_UnAu7a0tHRsdeequ20AEKVCNR2NHOUpBydi",
"client_secret": "test_dzbvZFl6Cl5anSSEwV8wDcgNtAwygXGzi7aPUMgDk2g14lz9U4uiebOB4ZNsqcJhAET3KaN6nhB9Rbj9NDP3ORc6FQRSEF4wYB1jcMidH4miO1HhYsOIx3rI7dN",
"grant_type": "client_credentials"
}
data2 = {
"name": str(name),
"email": str(email),
"phone": str(phoneNo),
"amount": str(amount),
"transaction_id": uuid.uuid4(),
"currency": "INR",
"redirect_url": "https://test.instamojo.com/integrations/android/redirect/"
}
# print(data2)
res2 = requests.post(
"https://test.instamojo.com/v2/gateway/orders/", data=data2, headers=header2)
res2 = res2.json()
# print(res2)
data3 = {
"id": str(res2["order"]["id"])
}
res3 = requests.post(
"https://test.instamojo.com/v2/gateway/orders/payment-request/", data=data3, headers=header2)
res3 = res3.json()
return(res3["order_id"])
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def GetOrderID(request):
user = request.user
customer = CustomerProfile.objects.filter(user=user).first()
order_id = GeneratetOrderIDPayment(user.first_name, user.email, str(
customer.phoneNo), str(request.data["amount"]))
return Response({"order_id": order_id}, status=status.HTTP_200_OK)
@ api_view(('GET',))
@ permission_classes([IsAuthenticated])
def GetDeliveredOrders(request):
user = request.user
customer = CustomerProfile.objects.filter(user=user).first()
orders = CustomerOrder.objects.filter(
orderFor=customer).filter(status="delivered")
return Response(CustomerOrderSerializer(orders, many=True).data, status=status.HTTP_200_OK)
@ api_view(('POST',))
@ permission_classes([IsAuthenticated])
def UpdateDeliveryBoyDetails(request):
data = request.data
customer = DeliveryProfile.objects.filter(user=request.user).first()
customer.phoneNo = data["phoneNo"]
customer.user.first_name = data["first_name"]
return Response(CustomerProfileSerializer(customer).data, status=status.HTTP_200_OK)
| haydencordeiro/FoodDeliveryDjango | food/views.py | views.py | py | 20,986 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "rest_framework.views.APIView",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "rest_framewor... |
35305572933 | from flask import Flask, jsonify, request
from flask_cors import CORS
import database
app = Flask(__name__)
app.config["ERROR_404_HELP"] = False
# allow all for simplicity
CORS(app)
@app.route("/")
def landing():
return """
Hello, this is the News Article Searcher of Koen Douterloigne! <br>
Please enter any keyword to search for articles containing that keyword<br>
<form action="search" method="post">
<input type="text" name="search" />
</form>
"""
@app.route("/search", methods=['GET', 'POST'])
def search():
data = request.values
query = data['search']
db = database.Database()
results = db.search(query)
if not results:
return f"No results found for search query '{query}' :("
else:
return jsonify(results)
if __name__ == "__main__":
app.run()
| tobneok/isentia_test | server/app.py | app.py | py | 851 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
34124762528 | """
this program is a simulation of the inner planets of our solar system (namely the sun, Mercury,
Venus, Earth and Mars). The planets are objects of the class Planet which enables this class (solarSystemAnimation)
to animate them. The information of the planets can be found in the file PropertiesPlanets.
"""
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
from PlanetClass import Planet
class SolarSystemAnimation(object):
""" this class creates an animation using any planets found in the file PropertiesPlanets.
It then creates a simulation using these planets. There are several plots to display different things
such as the total energy of the system, or their orbit. """
def __init__(self):
# these two lists will contain the planets and their patches respectively
self.listPlanets = []
self.listPatches = []
# this gets the information from the file
self.getInfoFile()
# this is some parameters for the simulation, try to keep the timeStep small for more accuracy
self.interval = 0
self.nframes = 9999999999
self.timeStep = 70000
# this list contains the total energy for each iteration
self.listTotalEnergy = []
def getInfoFile(self):
""" this method gets all the information in the file about the planets """
self.f = open("PropertiesPlanets.txt", "r")
# first we ignore the first paragraph where we explain what the file is for.
line = self.f.readline()
while line is not "\n":
line = self.f.readline()
# this gets the information about the planets
self.getInformationPlanets()
self.f.close()
def getInformationPlanets(self):
""" This method gets all the information about the planets from the file. It also
adds a satellite with custrom parameters (but not from the file). """
line = self.f.readline()
while line is not "\n":
try:
planetName = line[:line.index("\n")]
planetMass = self.getNumber()
planetInitPosx = self.getNumber()
planetInitPosy = self.getNumber()
planetInitPos = np.array([planetInitPosx, planetInitPosy])
planetInitVelx = self.getNumber()
planetInitVely = self.getNumber()
planetInitVel = np.array([planetInitVelx, planetInitVely])
planetRadius = self.getNumber()
planetColour = self.getString()
except:
print("There was a problem while getting information from the file.")
quit()
planet = Planet(planetName, planetMass, planetInitPos, planetInitVel, planetRadius, planetColour)
self.listPlanets.append(planet)
self.listPatches.append(planet.patch)
line = self.f.readline()
# we also include a satellite that we implement here
satName = "Satellite"
satMass = 500000
satInitPos = np.array([1.5e+11+100, 0])
satInitVelx = 11500
satInitVely = -800
satInitVel = np.array([satInitVelx, satInitVely+29748.485675745])
satRadius = 928e+6
satColour = "#000000"
Satellite = Planet(satName, satMass, satInitPos, satInitVel, satRadius, satColour)
self.listPlanets.append(Satellite)
self.listPatches.append(Satellite.patch)
def getNumber(self):
""" This is a helper method that reads a line from the file and removes
everythin before the : as that is simply the name of the variable in the file.
it returns the value as a float. """
line = self.f.readline()
# here we convert the line into a float where we remove the characters begore the colon
number = float(line[line.index(':')+1:])
return number
def getString(self):
""" This is a helper method that reads a line from the file and removes
everythin before the : as that is simply the name of the variable in the file.
it returns the value as a string."""
line = self.f.readline()
string = line[line.index(':')+1:line.index('\n')]
return string
def calculateTotalEnergy(self):
""" this method calculates the total energy of the system by adding
the kinetic and potential energies together. """
self.potEnergy = Planet.calculatePotentialEnergy(self.listPlanets)
self.sumKineticEnergy = Planet.calculateTotalKineticEnergy(self.listPlanets)
self.totalEnergy = self.sumKineticEnergy + self.potEnergy
def updateTotalEnergyPlot(self):
""" this method updates the plot of the total energy. """
# y values of the graph
self.calculateTotalEnergy()
self.listTotalEnergy.append(self.totalEnergy)
# updates the plot
self.totalEnergyPlot.clear()
self.totalEnergyPlot.title.set_text("Total Energy of system over time")
self.totalEnergyPlot.plot(self.listTotalEnergy)
def printTotalEnergyToFile(self, i):
""" this method prints the total energy of the system every nth iteration. """
# reduce the frequency at which the info is written to the file
n = 50
if (i % n == 0):
self.f = open("TotalEnergy.txt", "a+")
self.f.write("Total energy of system: " + str(self.totalEnergy) + "\n")
self.f.close()
def checkDistanceSatMars(self, i):
""" This method checks whether the satelite is close to Mars. if so, it prints the time of the
journey in the legend of the plot "traces orbit". """
for planet in self.listPlanets:
if planet.name == "Satellite":
if (planet.checkDistMars(i, self.timeStep, self.listPlanets)):
for i in range(len(self.textLegendOrbit)):
if self.textLegendOrbit[i] == planet.name:
self.textLegendOrbit[i] = planet.name + " time to Mars: " + str(round(planet.distanceToMars, 7))
self.tracesOrbitPlot.legend(self.tracesOrbitPlot.lines[1:], self.textLegendOrbit, loc='lower left', bbox_to_anchor=(0.0, -0.6))
def checkOrbitPlanets(self, i):
""" This method checks if the planet has gone around then sun. If so it displays
the time it took for that planet to go around the sun in the legend of the plot "traces orbit". """
for planet in self.listPlanets:
if (planet.name != "Sun"):
if (planet.checkOrbitalPeriod(i, self.timeStep, self.listPlanets)):
for i in range(len(self.textLegendOrbit)):
if self.textLegendOrbit[i] == planet.name:
self.textLegendOrbit[i] = planet.name + " orbit: " + str(round(planet.orbitalPeriod, 7))
self.tracesOrbitPlot.legend(self.tracesOrbitPlot.lines[1:], self.textLegendOrbit, loc='lower left', bbox_to_anchor=(0.0, -0.6))
def updateDisplays(self, i):
""" this method updates the figures on the animation and everything related to
the animation. """
self.updateTotalEnergyPlot()
self.printTotalEnergyToFile(i)
# this plots the trace of the orbit for each planet
self.tracesOrbitPlot.lines = []
for planet in self.listPlanets:
planet.trail = self.tracesOrbitPlot.plot(planet.positionsx, planet.positionsy, color=planet.colour, linewidth=0.5)
self.checkDistanceSatMars(i)
self.checkOrbitPlanets(i)
self.fig.canvas.draw()
def stepForwardSimulation(self):
""" this method will make a step forward in the animation by appliying one step in
the Beeman scheme. """
# we first move the planets to their positions
for i in range(len(self.listPatches)):
self.listPatches[i].center = self.listPlanets[i].pos
# * then we calculate the postitions of te planets for the next iteration
for planet in self.listPlanets:
planet.calculatePOS(self.timeStep)
# * then we calculate the new acceleration of the planets according to their new position
for planet in self.listPlanets:
planet.calculateACC(self.listPlanets)
# * then we can calculate the velocity of each planet for the next iteration
for planet in self.listPlanets:
planet.calculateVEL(self.timeStep)
# * finally we update the values of acceleration for each planet
for planet in self.listPlanets:
planet.updateACC()
def animate(self, i):
""" This function is the one that is executed every time a frame is redrawn
so it calls the method to move the planets and the method that update the plots. """
self.updateDisplays(i)
self.stepForwardSimulation()
return self.listPatches
def initAnim(self):
""" This method is executed before the animation start, so it adds the patches
of the planets to the axes. """
for patch in self.listPatches:
self.simulationPlot.add_patch(patch)
return self.listPatches
def run(self):
""" This method launches the animation, it is called outside the class to start the
animation. """
# we first create the plot and axes
self.fig = plt.figure(figsize=(7, 7))
self.simulationPlot = plt.subplot(2,2,1)
self.tracesOrbitPlot = plt.subplot(2,2,2)
self.totalEnergyPlot = plt.subplot(2,2,3)
# set up the axes for simulationPlot
self.simulationPlot.axis('scaled')
self.simulationPlot.title.set_text("Simulation of planets")
maxOrbitalR = 0
for planet in self.listPlanets:
if planet.pos[0] > maxOrbitalR:
maxOrbitalR = planet.pos[0]
scaleUp = 1.1 * maxOrbitalR
self.simulationPlot.set_xlim(-scaleUp, scaleUp)
self.simulationPlot.set_ylim(-scaleUp, scaleUp)
# set up the axes for tracesOrbitPlot
self.tracesOrbitPlot.axis('scaled')
self.tracesOrbitPlot.title.set_text("Traces of planets in orbit")
self.tracesOrbitPlot.set_xlim(-scaleUp, scaleUp)
self.tracesOrbitPlot.set_ylim(-scaleUp, scaleUp)
for planet in self.listPlanets:
planet.trail = self.tracesOrbitPlot.plot(planet.positionsx, planet.positionsy, color=planet.colour, linewidth=0.5, label='orbit of ' + planet.name)
self.textLegendOrbit = []
for planet in self.listPlanets:
if planet.name != "Sun":
self.textLegendOrbit.append(planet.name)
self.tracesOrbitPlot.legend(self.tracesOrbitPlot.lines[1:], self.textLegendOrbit, loc='lower left', bbox_to_anchor=(0.0, -0.6))
# create the animator
FuncAnimation(self.fig, self.animate, init_func = self.initAnim, frames = self.nframes, repeat = False, interval = self.interval, blit = True)
# show the plot
plt.show()
def main():
anim = SolarSystemAnimation()
anim.run()
main() | Platiniom64/OrbitalMotionSimulation | OrbitalMotion.py | OrbitalMotion.py | py | 11,622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "PlanetClass.Planet",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_nu... |
19743419969 | from os import sep
from subprocess import call
import click
path_ini_alembic_file = 'app_config/config_files/alembic.ini'.replace('/', sep)
@click.group('db')
def db():
...
@db.command()
@click.option('-m', 'message', default='migração via CLI',
help='Mensagem para identificar a migrations do alembic')
def makemigration(message):
call(
['alembic', '-c', path_ini_alembic_file, 'revision', '--autogenerate',
'-m', message]
)
@db.command()
def migrate():
call(['alembic', '-c', path_ini_alembic_file, 'upgrade', 'head'])
@db.command()
@click.option('-m', 'message', default='migração via CLI',
help='Mensagem para identificar a migrations do alembic')
def initialize(message):
...
| isaquefel/ensaio_app | app_rotinas/cli/migrations_management.py | migrations_management.py | py | 760 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.sep",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "click.group",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_number"... |
17078297023 | from django.shortcuts import render
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from .forms import Contact_us_form, SupportForm
import urllib
import json
def contact_us(request):
if request.method == 'POST':
form = Contact_us_form(request.POST)
if form.is_valid():
contact_us = form.save(commit=False)
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
response = urllib.request.urlopen(req)
result = json.loads(response.read().decode())
''' End reCAPTCHA validation '''
if result['success']:
if request.user.is_authenticated:
contact_us.email = request.user.email
contact_us.user = request.user.username
logined = True
else:
contact_us.user=request.POST['text']
contact_us.email=request.POST['email']
logined = False
send_mail(
'Contact Us from "{}" (Logined: {})'.format(contact_us.email, logined),
contact_us.body,
contact_us.email,
[settings.GMAIL_MAIL],
fail_silently=False,
)
contact_us.save()
return render(request, 'get_in_touch/contact_us_success.html')
else:
form = Contact_us_form()
context ={'form': form}
return render(request, 'get_in_touch/contact_us.html', context)
def support(request):
if request.method == 'POST':
form = SupportForm(request.POST)
if form.is_valid():
support = form.save(commit=False)
''' Begin reCAPTCHA validation '''
recaptcha_response = request.POST.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,
'response': recaptcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
response = urllib.request.urlopen(req)
result = json.loads(response.read().decode())
''' End reCAPTCHA validation '''
if result['success']:
if request.user.is_authenticated:
support.email = request.user.email
support.user = request.user.username
logined = True
else:
support.user=request.POST['text']
support.email=request.POST['email']
logined = False
send_mail(
'Support ({}) from "{}" (Logined: {})'.format(support.get_problem_display(), support.email, logined),
support.body,
support.email,
[settings.GMAIL_MAIL],
fail_silently=False,
)
support.save()
return render(request, 'get_in_touch/support_success.html')
else:
form = SupportForm()
context ={'form': form}
return render(request, 'get_in_touch/support.html', context) | Pavlo-Olshansky/E-market | get_in_touch/views.py | views.py | py | 3,028 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "forms.Contact_us_form",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.GOOGLE_RECAPTCHA_SECRET_KEY",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 23,
"usage_type": "name... |
39763274152 | from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
url(r'^$', views.assignments, name='assignments'),
url(r'^addnewassignments/$', views.addnewassignments, name='addnewassignments'),
# url(r'^deleteassignments/$', views.deleteassignments, name='deleteassignments'),
url(r'^editassignments/$', views.editassignments, name='editassignments'),
path('da/', views.deleteassignments, name='da'),
path('allsubmissions/<assid>/', views.allsubmissions, name='allsubmissions'),
path('evaluate/<submissionid>/', views.evaluate, name='evaluate'),
path('submitgrade/<submissionid>/', views.submitgrade, name='submitgrade'),
path('signout/', views.signout, name='signout'),
] | hafeezurrahmansaleh/Daily-Lab-Assistance | assignments/urls.py | urls.py | py | 744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.ur... |
11538172081 | #!/usr/bin/python3
""" a module that queries API """
from requests import get
def top_ten(subreddit):
""" A function that queries the Reddit API
Args:
subreddit (str): the name of the subreddit
Returns:
str: print valid titles
"""
load = {'limit': 10}
headers = {'User-Agent': 'MyRedditScraper/1.0'}
url = f'https://www.reddit.com/r/{subreddit}/hot.json'
# Set a custom User-Agent to avoid API rate limiting
response = get(url, headers=headers, params=load, allow_redirects=False)
if response.status_code == 200:
data = response.json().get('data')
for val in data['children']:
print(val['data']['title'])
else:
print(None)
| Rashnotech/alx-system_engineering-devops | 0x16-api_advanced/1-top_ten.py | 1-top_ten.py | py | 737 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
}
] |
22140484347 | from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.http.response import HttpResponseForbidden, JsonResponse
from django.shortcuts import redirect, get_object_or_404
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
from .models import ShortenURL
from .src.constants import URL_ENC
from .src.base62 import encode_base62, decode_base62
def index(request): # encode_url
""" [GET /] """
if request.method != "GET":
return HttpResponseForbidden()
return HttpResponse(
loader.get_template('url_shortener/index.html')
.render( {}, request )
)
@csrf_exempt
def post_encode_url(request):
""" [POST /enc-url] """
if request.method != "POST":
return HttpResponseForbidden()
response_data = {
"shorten_url": None,
"message": None
}
status_code = 200
try:
# request body should include URL like { "url": "https://www.github.com" }
print(request.POST)
url_fetched = request.POST.get("url")
if not url_fetched.endswith("/"):
url_fetched += "/"
# add 'https://' or 'http://' if url does not start with them
url_record = None
if url_fetched.startswith("https://") or url_fetched.startswith("http://"):
url_record = ShortenURL.objects.filter(url=url_fetched)
else:
url_record = ShortenURL.objects.filter(url="https://" + url_fetched)
if not url_record:
url_record = ShortenURL.objects.filter(url="http://" + url_fetched)
# if the url dose not exist in the table, insert new record
if not url_record:
url_record = ShortenURL(url=url_fetched)
url_record.save()
else:
url_record = url_record[0]
# response 200 ok
response_data["shorten_url"] = request.build_absolute_uri()[:-len(URL_ENC)] \
+ encode_base62(url_record.id)
response_data["message"] = "Success! You may copy the shorten URL above."
except ValidationError as e:
# URL is not in vaild form
if "url" in e.message_dict:
response_data["message"] = "The URL may be invalid. Try something else."
status_code = 400
else:
response_data["message"] = "Sorry. There is a problem with the service."
status_code = 500
response = JsonResponse(response_data)
response.status_code = status_code
return response
def get_decode_url(request, shorten_url):
""" [GET /[url_shorten]] """
if request.method != "GET":
return HttpResponseForbidden()
return redirect(
get_object_or_404(ShortenURL, pk=decode_base62(shorten_url)).url
)
| njsh4261/url_shortener | backend/url_shortener/views.py | views.py | py | 2,785 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.response.HttpResponseForbidden",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.template.loader.get_template",
"line_number": 17,
"usage_type"... |
74838938664 |
# environment
import sys, os
import argparse
import json
from board import Tiles, Board
from player import Player
import shape
def pprint(thing):
sys.stdout.write(thing + '\n')
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
player = []
parser.add_argument("--players_allocate", default = "AI,AI", help = "indicate player type and order")
parser.add_argument("--extra", help = "extra info")
args = parser.parse_args()
if args.players_allocate:
pa = args.players_allocate.split(',')
if len(pa) != 2:
raise ValueError("--player_allocate must have two arguments!")
player.append(Player(0, 0, -1))
player.append(Player(0, 1, -1))
if not args.extra is None:
pass
board = Board()
history = {}
history['step'] = []
output = {}
output["status"] = "Success"
output["action_player_id"] = 0
output["state"] = board.board.tolist()
pprint(json.dumps(output))
isOver = False
while True:
jsInfo = sys.stdin.readline().rstrip()
info = json.loads(jsInfo)
act = info['action']
isPass = info['is_pass']
playerOrder = info['action_player_id']
output = {}
if isPass:
if isOver:
output['status'] = "Over"
output['result'] = {
"record" : json.dumps(history),
"score" : [p.score for p in player],
"winner_id" : 0
}
if player[0].score < player[1].score:
output['result']['winner_id'] = 1
elif player[0].score == player[1].score:
output['result']['winner_id'] = -1
pprint(json.dumps(output))
break
output["status"] = "Success"
output["action_player_id"] = playerOrder ^ 1
output["state"] = board.board.tolist()
pprint(json.dumps(output))
isOver = True
continue
isOver = False
tile = []
tileSize = len(act)
minx = 14
miny = 14
for i in range(tileSize):
x = act[i]['row']
y = act[i]['col']
tile.append([x, y])
minx = min(minx, x)
miny = min(miny, y)
try:
result = board.dropTile(playerOrder, tile)
except Exception as e:
output['status'] = "Error"
output['reason'] = str(e)
pprint(json.dumps(output))
break
else:
if result:
output = {}
step = {}
step["player"] = playerOrder
step["action"] = act
step["state"] = {}
history["step"].append(step)
for i in range(tileSize):
tile[i][0] -= minx
tile[i][1] -= miny
tile.sort()
rotf = 0
for t in range(21):
if shape.tileSizes[t] != tileSize:
continue
if tile in shape.shapeSet[t]:
player[playerOrder].used[t] = True
rotf = shape.shapeSet[t].index[tile]
break
player[playerOrder].score += tileSize
output['status'] = "Success"
output['action_player_id'] = playerOrder ^ 1
output['state'] = board.board.tolist()
pprint(json.dumps(output))
| FineArtz/Game3_Blokus | environment.py | environment.py | py | 3,602 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdout.write",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"l... |
73488163624 |
class Animal:
is_alive: bool = True
def breeze(self):
print("I'm breezing")
class Mammal(Animal):
leg_amount: int
kid_food_type: str = 'Milk'
def voice(self):
raise NotImplementedError
def do_bad_things(self):
raise NotImplementedError
class Cat(Mammal):
def voice(self):
print('Meow')
class Dog(Mammal):
def voice(self):
print('Guf!')
#pass
class CatDog(Cat, Dog):
pass
dog = Dog()
cat = Cat()
#cat.voice()
#dog.voice()
catdog = CatDog()
catdog.voice()
#animals = [cat, dog]
#for animal in animals:
# animal.voice()
from datetime import datetime
class Human:
first_name: str
last_name: str
def __digest_food(self):
print("I'm digesting")
def eat(self):
self.__digest_food()
def __init__(self):
self.first_name = 'Ivan'
@staticmethod
def print_current_time():
print(datetime.now())
@classmethod
def get_list_of_attributes(cls):
return['first name', 'last_name']
h = Human()
h.eat()
# h._Human__digest_food()
print(CatDog.mro())
h.print_current_time()
print(Human.get_list_of_attributes())
print(type(type))
NewHuman = type('NewHuman', (Human,), {'power': 100500, 'can_die': False})
newhuman = NewHuman
print(newhuman.power, newhuman.can_die)
class Configuration:
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
config = Configuration()
config2 = Configuration()
print(config2 is config)
from dataclasses import dataclass
from typing import List
@dataclass
class Player:
full_name: str
@dataclass
class Coach:
full_name: str
@dataclass
class Team:
players: List[Player]
coach: Coach
players = [Player(full_name='Roberto Carlos'), Player(full_name='Roberto Pirlo')]
coach = Coach ('Jurgen Klopp')
dream_team = Team(players=players, coach=oach)
| VladPetrov19/Lessons | venv/lesson_14.py | lesson_14.py | py | 2,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 114,
"usage_type": "name"
},
{
"api_name": "datacl... |
16583267084 | from datetime import datetime, date
from email.mime.text import MIMEText
from flask import Flask
import os
import schedule
import smtplib
import time
# import threading
from mailjet_rest import Client
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from config import *
startupTs = datetime.now()
global env_
env_ = env('_ENV')
# env_ = 'prod'
app = Flask(__name__)
# This works on google app engine but appends a different email suffix that makes it look like spam
def mailjet():
today = date.today()
# Textual month, day and year
today_ = today.strftime("%B %d, %Y")
api_key = config[env_].MAILJET_KEY
api_secret = config[env_].MAILJET_SECRET
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
data = {
'Messages': [
{
"From": {
"Email": f"{config[env_].user1}",
"Name": f"{config[env_].name1.split(' ')[0]}"
},
"To": [
{
"Email": f"{config[env_].user1}",
"Name": f"{config[env_].name1.split(' ')[0]}"
}
],
"Subject": f'MNPD COVID-19 Vaccine Standby List: {config[env_].name1}, {today_}',
"TextPart": "My first Mailjet email",
"HTMLPart": f'''
Hello,
Reaching out to be entered into the Metro Nashville Public Health Department COVID-19 Vaccine Standby List!
Contact Info:
Name: {config[env_].name1}
Phone: {config[env_].ph1}
Thank you,
-{config[env_].name1.split(' ')[0]}
''',
"CustomID": ""
}
]
}
result = mailjet.send.create(data=data)
print(result.status_code)
print(result.json())
return
# this does not seem to work on google app engine, but does work locally and does not look like spam. will get this going in cron
def send_emails():
today = date.today()
# Textual month, day and year
today_ = today.strftime("%B %d, %Y")
############################################# USER1 ############################################
# connect with Google's servers
smtp_ssl_host = 'smtp.gmail.com'
smtp_ssl_port = 465
# use username or email to log in
username = config[env_].user1
password = config[env_].pw1
name = config[env_].name1
ph = config[env_].ph1
from_addr = config[env_].user1
to_addrs = config[env_].to_addr
# the email lib has a lot of templates
# for different message formats,
# on our case we will use MIMEText
# to send only text
message = MIMEText(f'''
Hi,
Reaching out to be added to the Metro Nashville Public Health Department COVID-19 Vaccine standby list.
Contact Info:
Name: {name}
Ph: {ph}
Thank you!
-{name.split(' ')[0]}
''')
message['subject'] = f'MNPD COVID-19 Vaccine Standby List: {name}, {today_}'
message['from'] = from_addr
message['to'] = ', '.join([to_addrs])
# we'll connect using SSL
server = smtplib.SMTP_SSL(smtp_ssl_host, smtp_ssl_port)
# to interact with the server, first we log in
# and then we send the message
server.login(username, password)
try:
server.sendmail(from_addr, to_addrs, message.as_string())
print(f'''Successfully sent email from {name.split(' ')[0]} at {datetime.now()}''')
except Exception as e:
print(e)
############################################# USER2 ############################################
# time.sleep(5) # seconds
# use username or email to log in
username = config[env_].user2
password = config[env_].pw2
name = config[env_].name2
ph = config[env_].ph2
from_addr = config[env_].user2
to_addrs = config[env_].to_addr
# the email lib has a lot of templates
# for different message formats,
# on our case we will use MIMEText
# to send only text
message = MIMEText(f'''
Hello,
Reaching out to be entered into the Metro Nashville Public Health Department COVID-19 Vaccine Standby List!
Contact Info:
Name: {name}
Phone: {ph}
Thank you,
-{name.split(' ')[0]}
''')
message['subject'] = f'MNPD COVID-19 Vaccine Standby List: {name}, {today_}'
message['from'] = from_addr
message['to'] = ', '.join([to_addrs])
# we'll connect using SSL
server = smtplib.SMTP_SSL(smtp_ssl_host, smtp_ssl_port)
# to interact with the server, first we log in
# and then we send the message
server.login(username, password)
try:
server.sendmail(from_addr, to_addrs, message.as_string())
print(f'''Successfully sent email from {name.split(' ')[0]} at {datetime.now()}''')
except Exception as e:
print(e)
server.quit()
return
# sendgrid's setup was a pain so i abandoned this
# def sendgrid():
# message = Mail(
# from_email=config[env_].user1,
# to_emails=config[env_].to_addr,
# subject='Sending with Twilio SendGrid is Fun',
# html_content='<strong>and easy to do anywhere, even with Python</strong>')
# try:
# sg = SendGridAPIClient(config[env_].SENDGRID_API_KEY)
# response = sg.send(message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# except Exception as e:
# print(e.message)
# return
# Scheduling Part of Script
# def background_thread():
# schedule_thread = threading.Thread(
# target=schedules)
# schedule_thread.start()
# return '{}'
def schedules():
print(f'Starting service at {startupTs} in Env: {env_}')
send_emails()
schedule.every(config[env_].refresh["frequency"]).minutes.do(send_emails)
while True:
schedule.run_pending()
time.sleep(3600) # checks if any pending jobs every 3600 seconds -> 1 hour
return
# End of scheduling part
def test():
schedules()
# mailjet()
return
if __name__ == '__main__':
try:
app.run(test())
except Exception as e:
print('app kickoff error: ', e)
| wjewell3/email | main.py | main.py | py | 6,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.date.tod... |
74833825384 | import sqlite3
import argparse
import logging
# Optional argument to use a listed database file. otherwise use vics.sqlite
# argparse with usage
# If no vics.sqlite3 then create it, and make the 'all' table.
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="db_file", help="Optional. Path to vics database, if you do not want to use vics.sqlite")
args = parser.parse_args()
if args.db_file:
db_file = args.db_file
else:
db_file = "vics.sqlite"
logging.info(f"database file: {db_file}")
table_creation_string = '''CREATE TABLE el_todo (date text, b64image text, sha text, tags text)'''
def create_new_database(sqlitedb_filename):
con = sqlite3.connect(sqlitedb_filename)
cur = con.cursor()
cur.close()
def sqlite_table_schema(conn, name):
"""Return a string representing the table's CREATE. via https://techoverflow.net/2019/10/14/how-to-get-schema-of-sqlite3-table-in-python/"""
con = sqlite3.connect(sqlitedb)
cur = con.cursor()
cursor = conn.execute("SELECT sql FROM sqlite_master WHERE name=?;", [name])
sql = cursor.fetchone()[0]
cursor.close()
return sql
def old_stuff_from_first_session():
try:
el_todo_schema = sqlite_table_schema(con, 'el_todo')
if table_creation_string != el_todo_schema:
schema_mismatch_error = f"schema mismatch. \n\nExpected: {table_creation_string}\nFound: {el_todo_schema}\n"
logging.critical(schema_mismatch_error)
exit(schema_mismatch_error)
except TypeError:
logging.info("Table 'el_todo' not found, creating.")
cur.execute(table_creation_string)
con.commit()
date = "2021-05-05"
b64image = "abcdefg1234"
sha = "123"
tags = "test baddata notanimage"
cur.execute("insert into el_todo values (?, ?, ?, ?)", (date, b64image, sha, tags))
con.commit()
con.close()
| fine-fiddle/vics | vics.py | vics.py | py | 1,888 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",... |
74863821545 | import json
import unittest
from api.tests.base import BaseTestCase
class TestSimulationsService(BaseTestCase):
""" Tests for the Simulation Service """
def test_simulations(self):
""" Ensure the /ping route behaves correctly. """
response = self.client.get("/simulations/ping")
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn("pong!", data["message"])
self.assertIn("success", data["status"])
if __name__ == "__main__":
unittest.main()
| door2door-io/mi-code-challenge | backend/api/tests/test_simulations.py | test_simulations.py | py | 555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.tests.base.BaseTestCase",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "unittest.main",
"line_number": 20,
"usage_type": "call"
}
] |
24797529159 | #coding=utf-8
"""
PGCNet batch data generator
two different type input :point cloud and multi-view image
__author__ = Cush shen
"""
import numpy as np
from tqdm import tqdm
import h5py
import time
import tensorflow as tf
image_color_gray = 158
image_color_white = 255
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return data, label
def loadDataFile(filename):
return load_h5(filename)
def get_model_learning_rate(
learning_policy, base_learning_rate, learning_rate_decay_step,
learning_rate_decay_factor, training_number_of_steps, learning_power,
slow_start_step, slow_start_learning_rate):
"""Gets model's learning rate.
Computes the model's learning rate for different learning policy.
Right now, only "step" and "poly" are supported.
(1) The learning policy for "step" is computed as follows:
current_learning_rate = base_learning_rate *
learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)
See tf.train.exponential_decay for details.
(2) The learning policy for "poly" is computed as follows:
current_learning_rate = base_learning_rate *
(1 - global_step / training_number_of_steps) ^ learning_power
Args:
learning_policy: Learning rate policy for training.
base_learning_rate: The base learning rate for model training.
learning_rate_decay_step: Decay the base learning rate at a fixed step.
learning_rate_decay_factor: The rate to decay the base learning rate.
training_number_of_steps: Number of steps for training.
learning_power: Power used for 'poly' learning policy.
slow_start_step: Training model with small learning rate for the first
few steps.
slow_start_learning_rate: The learning rate employed during slow start.
Returns:
Learning rate for the specified learning policy.
Raises:
ValueError: If learning policy is not recognized.
"""
global_step = tf.train.get_or_create_global_step()
if learning_policy == 'step':
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step,
learning_rate_decay_step,
learning_rate_decay_factor,
staircase=True)
elif learning_policy == 'poly':
learning_rate = tf.train.polynomial_decay(
base_learning_rate,
global_step,
training_number_of_steps,
end_learning_rate=0,
power=learning_power)
else:
raise ValueError('Unknown learning policy.')
return tf.where(global_step < slow_start_step, slow_start_learning_rate,
learning_rate)
def _gather_loss(regularization_losses, scope):
"""
Gather the loss.
Args:
regularization_losses: Possibly empty list of regularization_losses
to add to the losses.
Returns:
A tensor for the total loss. Can be None.
"""
sum_loss = None
# Individual components of the loss that will need summaries.
loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
all_losses = []
losses = tf.get_collection(tf.GraphKeys.LOSSES, scope)
if losses:
loss = tf.add_n(losses, name='losses')
all_losses.append(loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if loss is not None:
tf.summary.scalar('/'.join(filter(None, ['Losses', 'loss'])), loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize(optimizer, regularization_losses, scope, **kwargs):
"""
Compute losses and gradients.
Args:
optimizer: A tf.Optimizer object.
regularization_losses: Possibly empty list of regularization_losses
to add to the losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (loss, grads_and_vars).
- loss: A tensor for the total loss. Can be None.
- grads_and_vars: List of (gradient, variable). Can be empty.
"""
sum_loss = _gather_loss(regularization_losses, scope)
grad = None
if sum_loss is not None:
grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, grad
def _gradients(grad):
"""
Calculate the sum gradient for each shared variable across all clones.
This function assumes that the grad has been scaled appropriately by
1 / num_clones.
Args:
grad: A List of List of tuples (gradient, variable)
Returns:
tuples of (gradient, variable)
"""
sum_grads = []
for grad_and_vars in zip(*grad):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def optimize(optimizer, scope=None, regularization_losses=None, **kwargs):
"""
Compute losses and gradients
# Note: The regularization_losses are added to losses.
Args:
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
losses = []
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, scope)
# with tf.name_scope(scope):
loss, grad = _optimize(optimizer,
regularization_losses,
scope,
**kwargs)
if loss is not None:
losses.append(loss)
grads_and_vars.append(grad)
# Compute the total_loss summing all the losses.
total_loss = tf.add_n(losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _gradients(grads_and_vars)
return total_loss, grads_and_vars
def rotate_around_point(angle,data,point):
"""
:param angle: rotation angele
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = (data[:, 0] - point[0])*np.cos(angle) - (data[:, 1] - point[1])*np.sin(angle) + point[0]
rotate_y = (data[:, 0] - point[0])*np.sin(angle) + (data[:, 1] - point[1])*np.cos(angle) + point[1]
rotate_z = data[:, 2]
return np.c_[rotate_x, rotate_y, rotate_z]
def rotate_around_point_x(angle, data, point):
"""
:param angle: rotation angle
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = data[:, 0]
rotate_y = (data[:, 1] - point[1])*np.cos(angle) - (data[:, 2] - point[2])*np.sin(angle) + point[1]
rotate_z = (data[:, 1] - point[1])*np.sin(angle) + (data[:, 2] - point[2])*np.cos(angle) + point[2]
return np.c_[rotate_x, rotate_y, rotate_z]
def rotate_around_point_y(angle, data, point):
"""
:param angle: rotation angle
:param data: point
:param point: rotation center point
:return:
"""
rotate_x = (data[:, 2] - point[2])*np.sin(angle) + (data[:, 0] - point[0])*np.cos(angle) + point[0]
rotate_y = data[:, 1]
rotate_z = (data[:, 2] - point[2])*np.cos(angle) - (data[:, 0] - point[0])*np.sin(angle) + point[2]
return np.c_[rotate_x, rotate_y, rotate_z]
def get_profile_data(input_data, grid_x, grid_z, number, char):
"""
:param input_data:
:param grid_x:
:param grid_z:
:param number:
:param char:
:return:
"""
# rotate_nums = int(360 / angle)
# angle_nD = 360 / number
profile_vector = np.zeros((1, number*grid_x*grid_z))
points_pixel_num_zx = []
pts1 = 0
# for i in range(rotate_nums):
num_profile_vector = 0
for i_1 in range(number):
if i_1 == 0:
# input_data1 = input_data
pts1 += input_data.shape[0]
max_x = np.max(input_data[:, 0])
min_x = np.min(input_data[:, 0])
max_z = np.max(input_data[:, 2])
min_z = np.min(input_data[:, 2])
deta_x = max_x - min_x
deta_z = max_z - min_z
deta_deta_xz = np.abs(deta_x - deta_z)/2
for j in range(pts1):
point = input_data[j,:]
if (deta_x > deta_z):
if (j == 0):
pedeta_x = deta_x/grid_x
pedeta_z = deta_x/grid_z
attachment_z = np.ceil(deta_deta_xz/pedeta_z)
x_num = np.ceil((point[0]-min_x)/pedeta_x)
z_num = (np.ceil((point[2] - min_z) / pedeta_z) + attachment_z)
if (x_num == 0):
x_num = 1
if (z_num == 0):
z_num = 1
z_num = (grid_z + 1) - z_num
else:
if(j == 0):
pedeta_x = deta_z / grid_x
pedeta_z = deta_z / grid_z
attachment_x = np.ceil(deta_deta_xz / pedeta_x)
x_num = (np.ceil((point[0] - min_x) / pedeta_x) + attachment_x)
z_num = np.ceil((point[2] - min_z) / pedeta_z)
if (x_num == 0):
x_num = 1
if (z_num == 0):
z_num = 1
z_num = (grid_z + 1) - z_num
points_pixel_num_zx.append([z_num, x_num])
points_pixel_num_zx = np.array(points_pixel_num_zx)
matrix_value_y = np.zeros((grid_z,grid_x))
bar = tqdm(range(grid_z))
for k in bar:
bar.set_description("Processing %s" % char)
for h in range(grid_x):
n_z = [in_z for in_z,z_ in enumerate(points_pixel_num_zx[:, 0]) if z_ == (k+1)]
n_x = [in_x for in_x,x_ in enumerate(points_pixel_num_zx[:, 1]) if x_ == (h+1)]
grid_ij_points_num_zx = list(set(n_z).intersection(set(n_x)))
if grid_ij_points_num_zx != []:
matrix_value_y[k,h] = 1
profile_vector[0,num_profile_vector] = matrix_value_y[k,h]
num_profile_vector +=1
return np.array(profile_vector)
def get_xoy_profile_data(index_1, index_2, input_data, grid_x, grid_y):
"""
:param input_data:
:param grid_x:
:param grid_y:
:param number:
:param char:
:return:
"""
# rotate_nums = int(360 / angle)
# angle_nD = 360 / number
number = 1
profile_vector = np.zeros((1, number*grid_x*grid_y))
points_pixel_num_yx = []
pts1 = 0
# for i in range(rotate_nums):
num_profile_vector = 0
for i_1 in range(number):
if i_1 == 0:
# input_data1 = input_data
pts1 += input_data.shape[0]
max_x = np.max(input_data[:, 0])
min_x = np.min(input_data[:, 0])
max_y = np.max(input_data[:, 1])
min_y = np.min(input_data[:, 1])
deta_x = max_x - min_x
deta_y = max_y - min_y
deta_deta_xy = np.abs(deta_x - deta_y)/2
for j in range(pts1):
point = input_data[j, :]
if deta_x > deta_y:
if j == 0:
pedeta_x = deta_x/grid_x
pedeta_y = deta_x/grid_y
attachment_y = np.ceil(deta_deta_xy/pedeta_y)
x_num = np.ceil((point[0]-min_x)/pedeta_x)
y_num = (np.ceil((point[1] - min_y) / pedeta_y) + attachment_y)
if x_num == 0:
x_num = 1
if y_num == 0:
y_num = 1
y_num = (grid_y + 1) - y_num
else:
if j == 0:
pedeta_x = deta_y / grid_x
pedeta_y = deta_y / grid_y
attachment_x = np.ceil(deta_deta_xy / pedeta_x)
x_num = (np.ceil((point[0] - min_x) / pedeta_x) + attachment_x)
y_num = np.ceil((point[1] - min_y) / pedeta_y)
if (x_num == 0):
x_num = 1
if (y_num == 0):
y_num = 1
y_num = (grid_y + 1) - y_num
points_pixel_num_yx.append([y_num, x_num])
points_pixel_num_yx = np.array(points_pixel_num_yx)
matrix_value_y = np.zeros((grid_y,grid_x))
bar = tqdm(range(grid_y))
for k in bar:
bar.set_description("Processing %d of current batch, index %d" % (index_1, index_2))
for h in range(grid_x):
n_y = [in_y for in_y,y_ in enumerate(points_pixel_num_yx[:, 0]) if y_ == (k+1)]
n_x = [in_x for in_x,x_ in enumerate(points_pixel_num_yx[:, 1]) if x_ == (h+1)]
grid_ij_points_num_yx = list(set(n_y).intersection(set(n_x)))
if grid_ij_points_num_yx:
matrix_value_y[k, h] = 1
profile_vector[0, num_profile_vector] = matrix_value_y[k, h]
num_profile_vector += 1
return np.array(profile_vector)
def pointcloud_multiview_generate(index_1, data_curr, grid_x, grid_z, angle):
angle_ = angle * (np.pi / 180)
local_ori = (np.max(data_curr, axis=0) - np.min(data_curr, axis=0)) / 2 + np.min(data_curr, axis=0)
center_point = local_ori
multi_view_array = []
for i in range(int(360 / angle)):
rotate_angle_ = i * angle_
rotated_data = rotate_around_point_y(rotate_angle_, data_curr, center_point)
profile_xoz1 = np.array(get_xoy_profile_data(index_1, i, rotated_data, grid_x, grid_z)).reshape((1, -1))
Image_r = profile_xoz1.reshape(-1, grid_z)
nor_image_color_gray = image_color_gray*(1. / 255) - 0.5
nor_image_color_white = image_color_white*(1. / 255) - 0.5
rgbArray = np.zeros((grid_x, grid_z, 3))
rgbArray[..., 0] = Image_r * nor_image_color_gray
index_0 = (rgbArray[..., 0] == 0)
rgbArray[index_0, 0] = nor_image_color_white
rgbArray[..., 1] = Image_r * nor_image_color_gray
rgbArray[index_0, 1] = nor_image_color_white
rgbArray[..., 2] = Image_r * nor_image_color_gray
rgbArray[index_0, 2] = nor_image_color_white
multi_view_array.append(rgbArray)
return multi_view_array
def mini_batch_pointcloud_multiview_generate(batch_data, im_width, im_height, rotate_angle):
batch_size = batch_data.shape[0]
batch_data_multi_view = []
for i in range(batch_size):
current_pointcloud = batch_data[i]
current_multi_view = pointcloud_multiview_generate(i, current_pointcloud, im_width, im_height, rotate_angle)
batch_data_multi_view.append(current_multi_view)
return batch_data_multi_view
def fast_confusion(true, pred, label_values=None):
"""
Fast confusion matrix (100x faster than Scikit learn). But only works if labels are la
:param true:
:param false:
:param num_classes:
:return:
"""
true = np.squeeze(true)
pred = np.squeeze(pred)
if len(true.shape) != 1:
raise ValueError('Truth values are stored in a {:d}D array instead of 1D array'. format(len(true.shape)))
if len(pred.shape) != 1:
raise ValueError('Prediction values are stored in a {:d}D array instead of 1D array'. format(len(pred.shape)))
if true.dtype not in [np.int32, np.int64]:
raise ValueError('Truth values are {:s} instead of int32 or int64'.format(true.dtype))
if pred.dtype not in [np.int32, np.int64]:
raise ValueError('Prediction values are {:s} instead of int32 or int64'.format(pred.dtype))
true = true.astype(np.int32)
pred = pred.astype(np.int32)
if label_values is None:
label_values = np.unique(np.hstack((true, pred)))
else:
if label_values.dtype not in [np.int32, np.int64]:
raise ValueError('label values are {:s} instead of int32 or int64'.format(label_values.dtype))
if len(np.unique(label_values)) < len(label_values):
raise ValueError('Given labels are not unique')
label_values = np.sort(label_values)
num_classes = len(label_values)
if label_values[0] == 0 and label_values[-1] == num_classes - 1:
vec_conf = np.bincount(true * num_classes + pred)
if vec_conf.shape[0] < num_classes ** 2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
return vec_conf.reshape((num_classes, num_classes))
else:
if label_values[0] < 0:
raise ValueError('Unsupported negative classes')
label_map = np.zeros((label_values[-1] + 1,), dtype=np.int32)
for k, v in enumerate(label_values):
label_map[v] = k
pred = label_map[pred]
true = label_map[true]
vec_conf = np.bincount(true * num_classes + pred)
# Add possible missing values due to classes not being in pred or true
if vec_conf.shape[0] < num_classes ** 2:
vec_conf = np.pad(vec_conf, (0, num_classes ** 2 - vec_conf.shape[0]), 'constant')
# Reshape confusion in a matrix
return vec_conf.reshape((num_classes, num_classes))
if __name__ == '__main__':
start = time.time()
data_path = './data/train_files.txt'
TRAIN_FILES = getDataFiles(data_path)
train_file_idxs = np.arange(0, len(TRAIN_FILES))
for fn in range(len(TRAIN_FILES)):
current_data, current_label = loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
file_size = current_data.shape[0]
num_batches = file_size // 2
for batch_idx in range(num_batches):
start_idx = batch_idx * 2
end_idx = (batch_idx+1) * 2
current_batch_train_data = current_data[start_idx:end_idx, :, :]
current_batch_data_label = current_label[start_idx:end_idx]
current_train_multi_views = mini_batch_pointcloud_multiview_generate(current_batch_train_data, 299, 299, 360)
current_train_multi_views = np.array(current_train_multi_views)
print(current_train_multi_views.shape)
print("running time:{:.2f} s\n".format(time.time() - start)) | conzyou/PGVNet | train_utils.py | train_utils.py | py | 19,697 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "h5py.File",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.train.get_or_create_global_step",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_na... |
37986564283 | import sys
import scipy
from scipy import io
from scipy.io import wavfile
def getVolume(sound):
value = 0
for sample in sound:
value += abs(sample)
print(value)
def main():
file = sys.argv[1]
print(file)
sampling_rate, sound = scipy.io.wavfile.read(file)
getVolume(sound)
if __name__ == '__main__':
main() | emilymacq/Project-Clear-Lungs | ARCHIVE/Python files/TestTemplate.py | TestTemplate.py | py | 349 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.wavfile.read",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 15,
"usage_type": "attribute"
}
] |
822226274 | #!/usr/bin/env python
# coding: utf-8
# import all packages
from nilearn.connectome import ConnectivityMeasure
from nilearn.input_data import NiftiLabelsMasker
from load_confounds import Scrubbing
from nilearn import datasets
from os.path import join
import nibabel as nib
import numpy as np
import shutil
import os
# intialize the layout to retrieve the data
path = '/path/to/fmriprep/'
file_name = 'task-rest_space-MNI152NLin2009cAsym_desc-preproc_bold'
subjects = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15','16','17', '18']
condition = ['control', 'deaf']
task = 'func'
ext = 'nii.gz'
# variables attribution
conn_measure = ConnectivityMeasure(kind='correlation', vectorize=True, discard_diagonal=True)
all_features = {'condition':[], 'subject':[], 'connectomes':[]} # where all the features are stored
schaefer_atlas = datasets.fetch_atlas_schaefer_2018(n_rois=100) # load the atlas
files_nii = []
for sub in subjects:
for cond in condition:
filename = f'sub-{cond}{sub}/{task}/sub-{cond}{sub}_{file_name}.{ext}'
sub_func = os.path.join(path, filename)
# print (sub_func) to keep track of the loop
if os.path.isfile(sub_func): # verify if path exist
img_load = nib.load(sub_func)
files_nii=np.append(files_nii, img_load)
confounds = Scrubbing().load(sub_func)
# initialize the masker
masker = NiftiLabelsMasker(labels_img=schaefer_atlas.maps, t_r=2.2, standardize=True,
verbose= 0)
masked_data = masker.fit(img_load)
timeseries = masker.transform(img_load, confounds=confounds)
correlation_matrix = conn_measure.fit_transform([timeseries])[0]
# add each subject caracteristics to a container
all_features['condition'].append(cond)
all_features['subject'].append(sub)
all_features['connectomes'].append(correlation_matrix)
np.savez_compressed('schaefern100_features', cond = all_features['condition'], sub = all_features['subject'],
conn = all_features['connectomes'])
original = r'/path/to/save/schaefern100_features.npz'
target = r'/new/path/to/save/'
shutil.move(original,target) # change the path of the saved data
| PSY6983-2021/clandry_project | codes/data_prep.py | data_prep.py | py | 2,355 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nilearn.connectome.ConnectivityMeasure",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nilearn.datasets.fetch_atlas_schaefer_2018",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nilearn.datasets",
"line_number": 27,
"usage_type": "na... |
30793302432 | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# size = (600, 200, 3) # Указываем желаемый размер окна (высоту, ширину, число каналов)
while True:
ret, frame = cap.read() # ret - успешность захвата кадра. Если кадр был успешно захвачен, ret будет равен True. В противном случае, если что-то пошло не так или видео закончилось, ret будет равен False
width = int(cap.get(3))
height = int(cap.get(4))
image = np.zeros(frame.shape, np.uint8)
# (0, 0) в параметре dsize указывает на то, что размеры выходного изображения будут вычислены автоматически на основе масштабных факторов fx и fy
smaller_image = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
# первая колонка изображений
image[:height//2, :width//2] = cv2.rotate(smaller_image, cv2.ROTATE_180) # левая верхняя
image[height//2:, :width//2] = smaller_image # левая нижняя
# вторая колонка изображений
image[:height//2, width//2:] = smaller_image # правая верхняя
image[height//2:, width//2:] = cv2.rotate(smaller_image, cv2.ROTATE_180)# правая нижняя
cv2.imshow('frame', image)
if cv2.waitKey(1) == ord('q'):
break
cap.release() # освобождаем память от захвата видео на устройстве
cv2.destroyAllWindows()
| SeVaSe/Open_CV_test_vision | cameras&videocapture.py | cameras&videocapture.py | py | 1,671 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_n... |
35842604532 | from django.urls import path
from CafeStar import views
app_name = 'CafeStar'
urlpatterns = [
path('', views.homePage, name='home_page'),
path('homePage', views.homePage, name='home_page'),
path('drinkDetail', views.drinkDetail, name='drink_detail'),
path('drinks', views.drinks, name='drinks'),
path('order', views.order, name='order'),
path('orderPricePoint', views.OrderInformationView.as_view(), name='order_price_point'),
path('login', views.newLogin, name='login'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('edit', views.userProfile, name='edit'),
path('orderList', views.orderList, name='order_list'),
path('shopStatus', views.status, name='shop_status'),
path('drinksModify', views.drinksModify, name='drinks_modify'),
]
| zhengx-2000/CafeStar | CafeStar/urls.py | urls.py | py | 843 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "CafeStar.views.homePage",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "CafeStar.views",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls... |
13784383950 | import pynmea2, serial, os, time, sys, glob, datetime
def logfilename():
now = datetime.datetime.now()
return 'datalog.nmea'
#return '/home/pi/Desktop/PiCameraApp/Source/datalog.nmea'
'''
return 'NMEA_%0.4d-%0.2d-%0.2d_%0.2d-%0.2d-%0.2d.nmea' % \
(now.year, now.month, now.day,
now.hour, now.minute, now.second)'''
try:
while True:
ports = ['/dev/serial0']
if len(ports) == 0:
sys.stderr.write('No ports found, waiting 10 seconds...press Ctrl-C to quit...\n')
time.sleep(10)
continue
for port in ports:
# try to open serial port
sys.stderr.write('Trying port %s\n' % port)
try:
# try to read a line of data from the serial port and parse
with serial.Serial(port, 9600, timeout=1) as ser:
# 'warm up' with reading some input
for i in range(10):
ser.readline()
# try to parse (will throw an exception if input is not valid NMEA)
pynmea2.parse(ser.readline().decode('ascii', errors='replace'))
# log data
outfname = logfilename()
sys.stderr.write('Logging data on %s to %s\n' % (port, outfname))
with open(outfname, 'wb') as f:
# loop will exit with Ctrl-C, which raises a
# KeyboardInterrupt
while True:
line = ser.readline()
#line = str(line.decode('ascii', errors='replace').strip())
n = len(line)
if(line[0:6] == "$GNGGA"):
if(len(line) < 45):
## ADD ANYTHING YOU WANT TO DO WHEN FIX IS LOST ##
print('FIX LOST, STOP PHOTOS')
print(line)
f.write(line)
except Exception as e:
sys.stderr.write('Error reading serial port %s: %s\n' % (type(e).__name__, e))
sys.exit()
except KeyboardInterrupt as e:
#sys.stderr.write('Ctrl-C pressed, exiting log of %s to %s\n' % (port, outfname))
sys.exit()
sys.stderr.write('Scanned all ports, waiting 10 seconds...press Ctrl-C to quit...\n')
time.sleep(10)
except KeyboardInterrupt:
sys.stderr.write('Ctrl-C pressed, exiting port scanner\n')
| Keshavkant/RpiGeotaggedImages | GeoLogger.py | GeoLogger.py | py | 2,636 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.write",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.stder... |
37393448771 | # Dependencies
import json
# Get influencer criteria from config.json file
config_file = open('config.json')
config = json.load(config_file)
influencer = config['influencer']
def is_influencer(tweet):
""" Determines if an user who tweeted a tweet is an influencer """
rts = tweet['retweet_count']
fav = tweet['favorite_count']
user = tweet['user']
followers = user['followers_count']
# Check if user meets the influencer Criteria
if followers >= influencer['followers'] and rts >= influencer['retweets'] and fav >= influencer['likes']:
return True
else:
return False
def not_retweet(tweet):
""" Determines if it's tweet and not a retweet """
# check if tweet has the retweeted status property, if so it's a retweet, if not it's original.
if hasattr(tweet, 'retweeted_status'):
return False
else:
return True
| janielMartell/twitter-influencer-scraper | utils.py | utils.py | py | 860 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 6,
"usage_type": "call"
}
] |
25422749814 | from util import get_history_identifier, get_user_identifier, calculate_num_tokens, calculate_num_tokens_by_prompt, say_ts, check_availability
from typing import List, Dict
class GPT_4_CommandExecutor():
"""GPT-4を使って会話をするコマンドの実行クラス"""
MAX_TOKEN_SIZE = 8192 # トークンの最大サイズ
COMPLETION_MAX_TOKEN_SIZE = 2048 # ChatCompletionの出力の最大トークンサイズ
INPUT_MAX_TOKEN_SIZE = MAX_TOKEN_SIZE - COMPLETION_MAX_TOKEN_SIZE # ChatCompletionの入力に使うトークンサイズ
def __init__(self, openai):
self.history_dict : Dict[str, List[Dict[str, str]]] = {}
self.openai = openai
def execute(self, client, message, say, context, logger):
"""GPT-4を使って会話をするコマンドの実行メソッド"""
using_team = message["team"]
using_channel = message["channel"]
history_idetifier = get_history_identifier(
using_team, using_channel, message["user"])
user_identifier = get_user_identifier(using_team, message["user"])
prompt = context["matches"][0]
# ヒストリーを取得
history_array: List[Dict[str, str]] = []
if history_idetifier in self.history_dict.keys():
history_array = self.history_dict[history_idetifier]
history_array.append({"role": "user", "content": prompt})
# トークンのサイズがINPUT_MAX_TOKEN_SIZEを超えたら古いものを削除
while calculate_num_tokens(history_array) > self.INPUT_MAX_TOKEN_SIZE:
history_array = history_array[1:]
# 単一の発言でMAX_TOKEN_SIZEを超えたら、対応できない
if(len(history_array) == 0):
messege_out_of_token_size = f"発言内容のトークン数が{self.INPUT_MAX_TOKEN_SIZE}を超えて、{calculate_num_tokens_by_prompt(prompt)}であったため、対応できませんでした。"
say_ts(client, message, messege_out_of_token_size)
logger.info(messege_out_of_token_size)
return
say_ts(client, message, f"GPT-4で <@{message['user']}> さんの以下の発言に対応中(履歴数: {len(history_array)} 、トークン数: {calculate_num_tokens(history_array)})\n```\n{prompt}\n```")
# ChatCompletionを呼び出す
logger.info(f"user: {message['user']}, prompt: {prompt}")
response = self.openai.ChatCompletion.create(
model="gpt-4",
messages=history_array,
top_p=1,
n=1,
max_tokens=self.COMPLETION_MAX_TOKEN_SIZE,
temperature=1, # 生成する応答の多様性
presence_penalty=0,
frequency_penalty=0,
logit_bias={},
user=user_identifier
)
logger.debug(response)
# ヒストリーを新たに追加
new_response_message = response["choices"][0]["message"]
history_array.append(new_response_message)
# トークンのサイズがINPUT_MAX_TOKEN_SIZEを超えたら古いものを削除
while calculate_num_tokens(history_array) > self.INPUT_MAX_TOKEN_SIZE:
history_array = history_array[1:]
self.history_dict[history_idetifier] = history_array # ヒストリーを更新
say_ts(client, message, new_response_message["content"])
logger.info(f"user: {message['user']}, content: {new_response_message['content']}")
def execute_reset(self, client, message, say, context, logger):
"""GPT-4を使って会話履歴のリセットをするコマンドの実行メソッド"""
using_team = message["team"]
using_channel = message["channel"]
historyIdetifier = get_history_identifier(
using_team, using_channel, message["user"])
# 履歴をリセットをする
self.history_dict[historyIdetifier] = []
logger.info(f"GPT-4の <@{message['user']}> さんの <#{using_channel}> での会話の履歴をリセットしました。")
say_ts(client, message, f"GPT-4の <@{message['user']}> さんの <#{using_channel}> での会話の履歴をリセットしました。")
| sifue/chatgpt-slackbot | opt/gpt_4.py | gpt_4.py | py | 4,222 | python | ja | code | 54 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "util.get_history_identifier",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "util.get_user_ide... |
39060336799 | from obspy import read
from numpy import genfromtxt,sin,cos,deg2rad,array,c_
from matplotlib import pyplot as plt
n=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYN.00.D/BRIC.BK.BYN.00.D.2016.232')
e=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYE.00.D/BRIC.BK.BYE.00.D.2016.232')
z=read(u'/Users/dmelgar/kestrel/BRIC/BRIC.BK/BYZ.00.D/BRIC.BK.BYZ.00.D.2016.232')
n[0].data=n[0].data*100e-6
e[0].data=e[0].data*100e-6
z[0].data=z[0].data*100e-6
yl=[-0.08,0.08]
#sopac
g=genfromtxt('/Users/dmelgar/Downloads/pos_brib_57620_00')
x1=g[:,2]-g[0,2]
y1=g[:,3]-g[0,3]
z1=g[:,4]-g[0,4]
x2=g[:,8]-g[0,8]
y2=g[:,9]-g[0,9]
z2=g[:,10]-g[0,10]
#Rotate to local NEU
lat=deg2rad(37.91940521)
lon=deg2rad(-122.15255493)
R=array([[-sin(lat)*cos(lon),-sin(lat)*sin(lon),cos(lat)],[-sin(lon),cos(lon),0],[cos(lon)*cos(lat),cos(lat)*sin(lon),sin(lat)]])
scripps1=R.dot(c_[x1,y1,z1].T).T
scripps2=R.dot(c_[x2,y2,z2].T).T
plt.subplot(311)
plt.plot(n[0].times(),n[0].data,'k')
plt.plot(scripps1[:,0],c='#1E90FF')
plt.plot(scripps2[:,0],c='#DC143C')
plt.xlim([0,len(y1)])
plt.ylabel('North (m)')
plt.legend(['Kestrel RTX','Scripps 1','Scripps 2'])
plt.ylim(yl)
plt.subplot(312)
plt.plot(e[0].times(),e[0].data,'k')
plt.plot(scripps1[:,1],c='#1E90FF')
plt.plot(scripps2[:,1],c='#DC143C')
plt.xlim([0,len(x1)])
plt.ylabel('East (m)')
plt.ylim(yl)
plt.subplot(313)
plt.plot(z[0].times(),z[0].data,'k')
plt.plot(scripps1[:,2],c='#1E90FF')
plt.plot(scripps2[:,2],c='#DC143C')
plt.xlim([0,len(y1)])
plt.ylabel('Up (m)')
plt.xlabel('Seconds')
plt.ylim(yl)
plt.show()
| Ogweno/mylife | kestrel/plot_data.py | plot_data.py | py | 1,542 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "obspy.read",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "obspy.read",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "obspy.read",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 1... |
5501591657 | ### IMPORT THE REQUIRED LIBRARIES
# To read the dataset in .mat format
import scipy.io as sio
# For matrix operations
import numpy as np
# Keras functions to create and compile the model
from keras.layers import Input, Conv2D, Lambda, Reshape, Multiply, Add, Subtract
from keras.activations import relu
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
### READING THE DATA
phi_read = sio.loadmat('phi_0_25_1089.mat')
train = sio.loadmat('Training_Data_Img91.mat')
### PREPROCESSING
# Reading training input and labels
train_inp = train['inputs']
train_labels = train['labels']
# Preparing the constant matrices
phi = np.transpose(phi_read['phi'])
ptp = np.dot(phi, np.transpose(phi)) # phi^T x phi
temp1 = np.transpose(train_labels)
temp2 = np.dot(np.transpose(phi), temp1)
temp3 = np.dot(np.dot(temp1, np.transpose(temp2)), np.linalg.inv(np.dot(temp2, np.transpose(temp2))))
phi_inv = np.transpose(temp3) # phi^-1
# Instead of multiplying each batch by phi and then supplying it to the model as input,
# we multiply the entire training set by phi in the preprocessing stage itself
x_inp = np.dot(train_labels, phi)
### INITIALIZING CONSTANTS
n_input = 272
tau = 0.1
lambda_step = 0.1
soft_thr = 0.1
conv_size = 32
filter_size = 3
### PREPARING THE MODEL (An image of the model map has been attached)
# Defining the input and output
inp = Input((n_input,))
inp_labels = Input((1089, ))
# Defining the input for the first ISTA block
x0 = Lambda(lambda x: K.dot(x, K.constant(phi_inv)))(inp)
phi_tb = Lambda(lambda x: K.dot(x, K.constant(np.transpose(phi))))(inp)
# ISTA block #1
conv1_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv1_x1')(x0)
conv1_x2 = Reshape((33, 33, 1), name='conv1_x2')(conv1_x1)
conv1_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_x3')(conv1_x2)
conv1_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv1_sl1')
conv1_x4 = conv1_sl1(conv1_x3)
conv1_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_sl2')
conv1_x44 = conv1_sl2(conv1_x4)
conv1_x5 = Multiply(name='conv1_x5')([Lambda(lambda x: K.sign(x))(conv1_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv1_x44))])
conv1_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv1_sl3')
conv1_x6 = conv1_sl3(conv1_x5)
conv1_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_sl4')
conv1_x66 = conv1_sl4(conv1_x6)
conv1_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv1_x7a')(conv1_x66)
conv1_x7 = Add(name='conv1_x7b')([conv1_x7, conv1_x2])
conv1_x8 = Reshape((1089,), name='conv1_x8')(conv1_x7)
conv1_x3_sym = conv1_sl1(conv1_x3)
conv1_x4_sym = conv1_sl2(conv1_x3_sym)
conv1_x6_sym = conv1_sl3(conv1_x4_sym)
conv1_x7_sym = conv1_sl4(conv1_x6_sym)
conv1_x11 = Subtract(name='conv1_x11')([conv1_x7_sym, conv1_x3])
conv1 = conv1_x8
conv1_sym = conv1_x11
# ISTA block #2
conv2_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv2_x1')(conv1)
conv2_x2 = Reshape((33, 33, 1), name='conv2_x2')(conv2_x1)
conv2_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_x3')(conv2_x2)
conv2_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv2_sl1')
conv2_x4 = conv2_sl1(conv2_x3)
conv2_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_sl2')
conv2_x44 = conv2_sl2(conv2_x4)
conv2_x5 = Multiply(name='conv2_x5')([Lambda(lambda x: K.sign(x))(conv2_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv2_x44))])
conv2_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv2_sl3')
conv2_x6 = conv2_sl3(conv2_x5)
conv2_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_sl4')
conv2_x66 = conv2_sl4(conv2_x6)
conv2_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv2_x7a')(conv2_x66)
conv2_x7 = Add(name='conv2_x7b')([conv2_x7, conv2_x2])
conv2_x8 = Reshape((1089,), name='conv2_x8')(conv2_x7)
conv2_x3_sym = conv2_sl1(conv2_x3)
conv2_x4_sym = conv2_sl2(conv2_x3_sym)
conv2_x6_sym = conv2_sl3(conv2_x4_sym)
conv2_x7_sym = conv2_sl4(conv2_x6_sym)
conv2_x11 = Subtract(name='conv2_x11')([conv2_x7_sym, conv2_x3])
conv2 = conv2_x8
conv2_sym = conv2_x11
# ISTA block #3
conv3_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv3_x1')(conv2)
conv3_x2 = Reshape((33, 33, 1), name='conv3_x2')(conv3_x1)
conv3_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_x3')(conv3_x2)
conv3_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv3_sl1')
conv3_x4 = conv3_sl1(conv3_x3)
conv3_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_sl2')
conv3_x44 = conv3_sl2(conv3_x4)
conv3_x5 = Multiply(name='conv3_x5')([Lambda(lambda x: K.sign(x))(conv3_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv3_x44))])
conv3_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv3_sl3')
conv3_x6 = conv3_sl3(conv3_x5)
conv3_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_sl4')
conv3_x66 = conv3_sl4(conv3_x6)
conv3_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv3_x7a')(conv3_x66)
conv3_x7 = Add(name='conv3_x7b')([conv3_x7, conv3_x2])
conv3_x8 = Reshape((1089,), name='conv3_x8')(conv3_x7)
conv3_x3_sym = conv3_sl1(conv3_x3)
conv3_x4_sym = conv3_sl2(conv3_x3_sym)
conv3_x6_sym = conv3_sl3(conv3_x4_sym)
conv3_x7_sym = conv3_sl4(conv3_x6_sym)
conv3_x11 = Subtract(name='conv3_x11')([conv3_x7_sym, conv3_x3])
conv3 = conv3_x8
conv3_sym = conv3_x11
# ISTA block #4
conv4_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv4_x1')(conv3)
conv4_x2 = Reshape((33, 33, 1), name='conv4_x2')(conv4_x1)
conv4_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_x3')(conv4_x2)
conv4_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv4_sl1')
conv4_x4 = conv4_sl1(conv4_x3)
conv4_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_sl2')
conv4_x44 = conv4_sl2(conv4_x4)
conv4_x5 = Multiply(name='conv4_x5')([Lambda(lambda x: K.sign(x))(conv4_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv4_x44))])
conv4_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv4_sl3')
conv4_x6 = conv4_sl3(conv4_x5)
conv4_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_sl4')
conv4_x66 = conv4_sl4(conv4_x6)
conv4_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv4_x7a')(conv4_x66)
conv4_x7 = Add(name='conv4_x7b')([conv4_x7, conv4_x2])
conv4_x8 = Reshape((1089,), name='conv4_x8')(conv4_x7)
conv4_x3_sym = conv4_sl1(conv4_x3)
conv4_x4_sym = conv4_sl2(conv4_x3_sym)
conv4_x6_sym = conv4_sl3(conv4_x4_sym)
conv4_x7_sym = conv4_sl4(conv4_x6_sym)
conv4_x11 = Subtract(name='conv4_x11')([conv4_x7_sym, conv4_x3])
conv4 = conv4_x8
conv4_sym = conv4_x11
# ISTA block #5
conv5_x1 = Lambda(lambda x: x - lambda_step * K.dot(x, K.constant(ptp)) + lambda_step * phi_tb, name='conv5_x1')(conv4)
conv5_x2 = Reshape((33, 33, 1), name='conv5_x2')(conv5_x1)
conv5_x3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_x3')(conv5_x2)
conv5_sl1 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv5_sl1')
conv5_x4 = conv5_sl1(conv5_x3)
conv5_sl2 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_sl2')
conv5_x44 = conv5_sl2(conv5_x4)
conv5_x5 = Multiply(name='conv5_x5')([Lambda(lambda x: K.sign(x))(conv5_x44), Lambda(lambda x: relu(x - soft_thr))(Lambda(lambda x: K.abs(x))(conv5_x44))])
conv5_sl3 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, activation='relu', name='conv5_sl3')
conv5_x6 = conv5_sl3(conv5_x5)
conv5_sl4 = Conv2D(conv_size, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_sl4')
conv5_x66 = conv5_sl4(conv5_x6)
conv5_x7 = Conv2D(1, [filter_size, filter_size], padding='SAME', use_bias=False, name='conv5_x7a')(conv5_x66)
conv5_x7 = Add(name='conv5_x7b')([conv5_x7, conv5_x2])
conv5_x8 = Reshape((1089,), name='conv5_x8')(conv5_x7)
conv5_x3_sym = conv5_sl1(conv5_x3)
conv5_x4_sym = conv5_sl2(conv5_x3_sym)
conv5_x6_sym = conv5_sl3(conv5_x4_sym)
conv5_x7_sym = conv5_sl4(conv5_x6_sym)
conv5_x11 = Subtract(name='conv5_x11')([conv5_x7_sym, conv5_x3])
conv5 = conv5_x8
conv5_sym = conv5_x11
# Defining the custom loss metric
def custom_loss(y_true, y_pred):
# Referred to in the paper as cost
cost1 = K.mean(K.square(y_pred[1] - y_pred[0]))
# Referred to in the paper as cost_sym
cost2 = K.mean(K.square(y_pred[2])) + K.mean(K.square(y_pred[3])) + K.mean(K.square(y_pred[4])) + K.mean(K.square(y_pred[5])) + K.mean(K.square(y_pred[6]))
# Referred to in the paper as cost_all
cost = cost1 + 0.01*cost2
return cost
### COMPILING THE MODEL
# Defining the inputs and outputs
model = Model(inputs=[inp, inp_labels], outputs=[conv5, conv1_sym, conv2_sym, conv3_sym, conv4_sym, conv5_sym])
# Display a model summary
model.summary()
# Define costs
cost1 = K.mean(K.square(conv5 - inp_labels))
cost2 = K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym))
cost = cost1 + 0.01*cost2
# Add custom loss
model.add_loss(K.mean(K.square(conv5 - inp_labels)) + 0.01 * K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
# Compile the model
model.compile(optimizer=Adam(lr=0.0001), metrics=[cost, cost1, cost2])
# Define custom metrics to display
model.metrics_tensors.append(K.mean(K.square(conv5 - inp_labels)) + 0.01*K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
model.metrics_names.append("cost")
model.metrics_tensors.append(K.mean(K.square(conv5 - inp_labels)))
model.metrics_names.append("cost1")
model.metrics_tensors.append(K.mean(K.square(conv1_sym)) + K.mean(K.square(conv2_sym)) + K.mean(K.square(conv3_sym)) + K.mean(K.square(conv4_sym)) + K.mean(K.square(conv5_sym)))
model.metrics_names.append("cost2")
### TRAINING THE MODEL
model.fit([x_inp, train_labels],
epochs = 300,
batch_size = 64)
| hansinahuja/ISTA-Net | ista_net.py | ista_net.py | py | 11,288 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_numbe... |
9786188527 | import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def threshold_normalize(data,transform):
threshold = 254
maxVal = 255
ret, thresh = cv2.threshold(np.uint8(data), threshold, maxVal, cv2.THRESH_BINARY)
if transform:
copy = thresh.copy()
copy = elastic_transform(copy)
return thresh/255.0, copy/255.0
return thresh/255.0
def elastic_transform(data):
"""referenced from https://gist.github.com/fmder/e28813c1e8721830ff9c"""
alpha = 15
sigma = 15
print("Elastic Transform")
np.random.seed(1234)
rand_state = np.random.RandomState()
for i in range(len(data)):
img_shape = data[i].shape
dx = gaussian_filter((rand_state.rand(*img_shape) * 2 - 1), sigma, mode="constant") * alpha
dy = gaussian_filter((rand_state.rand(*img_shape) * 2 - 1), sigma, mode="constant") * alpha
x, y = np.meshgrid(np.arange(img_shape[0]), np.arange(img_shape[1]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))
data[i] = map_coordinates(data[i], indices, order=1).reshape(img_shape)
return data
| sheldon-benard/DigitClassification | 551-project/preprocessing.py | preprocessing.py | py | 1,107 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.threshold",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
... |
74752046504 | from lxml import etree
import unittest
from unittest.mock import MagicMock, patch
from lib.parsers.parseOCLC import readFromClassify, loadEditions, extractAndAppendEditions
from lib.dataModel import WorkRecord
from lib.outputManager import OutputManager
class TestOCLCParse(unittest.TestCase):
@patch.object(OutputManager, 'checkRecentQueries', return_value=False)
def test_classify_read(self, mockCheck):
mockXML = MagicMock()
work = etree.Element(
'work',
title='Test Work',
editions='1',
holdings='1',
eholdings='1',
owi='1111111',
)
start = etree.Element('start')
start.text = '0'
work.text = '0000000000'
mockXML.find.side_effect = [work, start]
mockXML.findall.return_value = []
resWork, resCount, oclcID = readFromClassify(mockXML, 'testUUID')
self.assertIsInstance(resWork, WorkRecord)
self.assertEqual(resCount, 1)
self.assertEqual(oclcID, '0000000000')
mockCheck.assert_called_once_with('lookup/owi/1111111/0')
@patch('lib.parsers.parseOCLC.parseEdition', return_value=True)
def test_loadEditions(self, mockParse):
testEditions = [i for i in range(16)]
outEds = loadEditions(testEditions)
self.assertEqual(len(outEds), 16)
@patch('lib.parsers.parseOCLC.loadEditions')
def test_extractEditions(self, mockLoad):
mockXML = MagicMock()
mockXML.findall.return_value = ['ed1', 'ed2', 'ed3']
mockWork = MagicMock()
mockWork.instances = []
mockLoad.return_value = [1, 2, 3]
extractAndAppendEditions(mockWork, mockXML)
self.assertEqual(mockWork.instances, [1, 2, 3])
mockLoad.assert_called_once_with(['ed1', 'ed2', 'ed3'])
| NYPL/sfr-ingest-pipeline | lambda/sfr-oclc-classify/tests/test_parseOCLC.py | test_parseOCLC.py | py | 1,821 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree.Element",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "lxm... |
74506400423 | from rest_framework import serializers
from onbici.bike.serializers import BikeSerializer
from onbici.bike.models import Bike
from onbici.station.models import Station
from .models import Slot
class SlotSerializer(serializers.ModelSerializer):
bike = BikeSerializer(required=False)
class Meta:
model = Slot
fields = ['id', 'station', 'bike', 'status', 'created_at', 'modified_at']
def create(self, validated_data):
if self.context['bike'] is not None:
try:
bike = Bike.objects.get(id=self.context['bike'])
except Bike.DoesNotExist:
raise serializers.ValidationError({'error': 'Please enter a valid user.'})
else:
bike = None
try:
station = Station.objects.get(id=self.context['station'])
except Station.DoesNotExist:
raise serializers.ValidationError({'error': 'Please enter a valid slot.'})
slot = Slot.objects.create(bike = bike, station = station, **validated_data)
return slot
def update(self, instance, validated_data):
if self.context['bike']:
try:
bike = Bike.objects.get(id=self.context['bike'])
""" falta comprobar si la bici esta en otro slot """
except Bike.DoesNotExist:
raise serializers.ValidationError({'error': 'Please enter a valid user.'})
instance.bike = bike
elif self.context['bike'] is None:
instance.bike = None
if self.context['station']:
try:
station = Station.objects.get(id=self.context['station'])
except Station.DoesNotExist:
raise serializers.ValidationError({'error': 'Please enter a valid slot.'})
instance.station = station
if validated_data.get('status', instance.status) is not None:
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance | jubelltols/React_DRF_MySql | DRF/src/onbici/slot/serializers.py | serializers.py | py | 2,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "onbici.bike.serializers.BikeSerializer",
"line_number": 9,
"usag... |
16528708499 | from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import io
def data_gen(num=100):
"""
Generates random samples for plotting
"""
a = np.random.normal(size=num)
return a
def plot_raw(a):
"""
Plots line graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Line plot of {len(a)} samples",fontsize=16)
plt.plot(a)
return plt.gcf()
def plot_hist(a):
"""
Plots histogram
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Histogram of {len(a)} samples",fontsize=16)
plt.hist(a,color='orange',edgecolor='k')
return plt.gcf()
def fig2img(fig):
"""
Convert a Matplotlib figure to a PIL Image and return it
"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def Generate(num=100):
"""
Generates plot, called from the `Generate` button
"""
remove(scope='raw')
with use_scope(name='raw',clear=True,) as img:
a = data_gen(num)
f1 = plot_raw(a)
im1 = fig2img(f1)
put_image(im1)
f2 = plot_hist(a)
im2 = fig2img(f2)
put_image(im2)
def app():
"""
Main app
"""
put_markdown("""
# Matplotlib plot demo
## [Dr. Tirthajyoti Sarkar](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/)
We show two plots from [random gaussian samples](https://en.wikipedia.org/wiki/Normal_distribution). You choose the number of data points to generate.
- A line plot
- A histogram
""", strip_indent=4)
num_samples = input("Number of samples", type=NUMBER)
Generate(num_samples)
put_markdown("""## Code for this app is here: [Code repo](https://github.com/tirthajyoti/PyWebIO/tree/main/apps)""")
if __name__ == '__main__':
start_server(app,port=9999,debug=True) | tirthajyoti/PyWebIO | apps/matplotlib_demo.py | matplotlib_demo.py | py | 1,955 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "numpy.random.normal",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplot... |
74588531623 | # coding=utf-8
__author__ = "Arnaud KOPP"
__copyright__ = "© 2015-2016 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GNU GPL V3.0"
__maintainer__ = "Arnaud KOPP"
__email__ = "kopp.arnaud@gmail.com"
__status__ = "Production"
from collections import OrderedDict
import logging
import pandas as pd
log = logging.getLogger(__name__)
class MultiFASTA(object):
"""
Class for FASTA files
"""
def __init__(self):
# fetch the sequence using this attribute
self._fasta_fetcher = FASTA()
# an ordered dictionary to store the fasta contents
self._fasta = OrderedDict()
def __len__(self):
return len(self._fasta)
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="Returns all FASTA instances ")
def _get_ids(self):
return [f for f in self._fasta.keys()]
ids = property(_get_ids, doc="returns list of keys/accession identifiers")
def load_fasta(self, ids):
"""
Loads a single FASTA file into the dictionary
:param ids:
"""
if isinstance(ids, str):
ids = [ids]
for id_ in ids:
self._fasta_fetcher.load(id_)
# create a new instance of FASTA and save fasta data
f = FASTA()
f._fasta = self._fasta_fetcher._fasta[:]
# append in the ordered dictionary
self._fasta[id_] = f
log.info("%s loaded" % id_)
def save_fasta(self, filename):
"""
Save all FASTA into a file
:param filename:
"""
fh = open(filename, "w")
for f in self._fasta.values():
fh.write(f.fasta)
fh.close()
def read_fasta(self, filename):
"""
Load several FASTA from a filename
:param filename:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# we split according to ">2 character
for thisfasta in data.split(">")[1:]:
f = FASTA()
f._fasta = f._interpret(thisfasta)
if f.accession is not None and f.accession not in self.ids:
self._fasta[f.accession] = f
else:
log.warning("Accession %s is already in the ids list or could not be interpreted. skipped" %
str(f.accession))
def _get_df(self):
df = pd.concat([self.fasta[id_].df for id_ in self.fasta.keys()])
df.reset_index(inplace=True)
return df
df = property(_get_df)
def hist_size(self, **kargs):
"""
:param kargs:
"""
try:
import pylab
self.df.Size.hist(**kargs)
pylab.title("Histogram length of the sequences")
pylab.xlabel("Length")
except:
pass
class FASTA(object):
"""
Fasta class
"""
known_dbtypes = ["sp", "gi"]
def __init__(self):
self._fasta = None
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="returns FASTA content")
# for all types
def _get_sequence(self):
if self.fasta:
return "".join(self.fasta.split("\n")[1:])
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
sequence = property(_get_sequence, doc="returns the sequence only")
# for all types
def _get_header(self):
if self.fasta:
return self.fasta.split("\n")[0]
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
header = property(_get_header, doc="returns header only")
def _get_dbtype(self):
dbtype = self.header.split("|")[0].replace(">", "")
return dbtype
dbtype = property(_get_dbtype)
# for all types
def _get_identifier(self):
return self.header.split(" ")[0]
identifier = property(_get_identifier)
def _get_entry(self):
return self.header.split("|")[2].split(" ")[0]
entry = property(_get_entry, doc="returns entry only")
# swiss prot only
def _get_accession(self):
if self.dbtype == "sp":
# header = self.header
return self.identifier.split("|")[1]
elif self.dbtype == "gi":
return self.identifier.split("|")[1]
accession = property(_get_accession)
# swiss prot only
def _get_name_sp(self):
if self.dbtype == "sp":
header = self.header
return header.split(" ")[0].split("|")[2]
name = property(_get_name_sp)
def _get_df(self):
df = pd.DataFrame({
"Identifiers": [self.identifier],
"Accession": [self.accession],
"Entry": [self.entry],
"Database": [self.dbtype],
"Organism": [self.organism],
"PE": [self.PE],
"SV": [self.SV],
"Sequence": [self.sequence],
"Header": [self.header],
"Size": [len(self.sequence)]})
return df
df = property(_get_df)
def _get_info_from_header(self, prefix):
if prefix not in self.header:
return None
# finds the prefix
index = self.header.index(prefix + "=")
# remove it
name = self.header[index:][3:]
# figure out if there is anothe = sign to split the string
# otherwise, the prefix we looked for is the last one anyway
if "=" in name:
name = name.split("=")[0]
# here each = sign in FASTA is preceded by 2 characters that we must remove
name = name[0:-2]
name = name.strip()
else:
name = name.strip()
return name
def _get_gene_name(self):
return self._get_info_from_header("GN")
gene_name = property(_get_gene_name,
doc="returns gene name from GN keyword found in the header if any")
def _get_organism(self):
return self._get_info_from_header("OS")
organism = property(_get_organism,
doc="returns organism from OS keyword found in the header if any")
def _get_PE(self):
pe = self._get_info_from_header("PE")
if pe is not None:
return int(pe)
PE = property(_get_PE,
doc="returns PE keyword found in the header if any")
def _get_SV(self):
sv = self._get_info_from_header("SV")
if sv is not None:
return int(sv)
SV = property(_get_SV,
doc="returns SV keyword found in the header if any")
def __str__(self):
str_ = self.fasta
return str_
def load(self, id_):
self.load_fasta(id_)
def load_fasta(self, id_):
"""
:param id_:
:raise Exception:
"""
from BioREST.Uniprot import Uniprot
u = Uniprot()
try:
res = u.retrieve(id_, frmt="fasta")
# some entries in uniprot are valid but obsolet and return empty string
if res == "":
raise Exception
self._fasta = res[:]
except:
pass
def save_fasta(self, filename):
"""
Save FASTA file into a filename
:param str filename: where to save it
"""
if self._fasta is None:
raise ValueError("No fasta was read or downloaded. Nothing to save.")
fh = open(filename, "w")
fh.write(self._fasta)
fh.close()
def read_fasta(self, filename):
"""
:param filename:
:raise ValueError:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# Is there more than one sequence ?
data = data.split(">")[1:]
if len(data) > 1 or len(data) == 0:
raise ValueError(
"""Only one sequence expected to be found. Found %s. Please use MultiFASTA class instead""" % len(data))
self._data = data
if data.count(">sp|") > 1:
raise ValueError("""It looks like your FASTA file contains more than
one FASTA. You must use MultiFASTA class instead""")
self._fasta = data[:]
self._fasta = self._fasta[0]
if self.dbtype not in self.known_dbtypes:
log.warning("Only sp and gi header are recognised so far but sequence and header are loaded")
@staticmethod
def _interpret(data):
# cleanup the data in case of empty spaces or \n characters
return data
| ArnaudKOPP/BioREST | BioREST/Fasta.py | Fasta.py | py | 8,602 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pylab.title",... |
40983404414 | """new fileds are added user
Revision ID: 7758fd2f291e
Revises: 5444eea98e3f
Create Date: 2019-05-03 01:12:53.120773
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7758fd2f291e'
down_revision = '5444eea98e3f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('country', sa.String(), nullable=True))
op.add_column('user', sa.Column('gender', sa.String(), nullable=True))
op.add_column('user', sa.Column('relationship_status', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'relationship_status')
op.drop_column('user', 'gender')
op.drop_column('user', 'country')
# ### end Alembic commands ###
| ShashwatMishra/Mini-Facebook | Mini Facebook/migrations/versions/7758fd2f291e_new_fileds_are_added_user.py | 7758fd2f291e_new_fileds_are_added_user.py | py | 911 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
34564583000 | import logging
import os
from datetime import date
from pathlib import Path
from ._version import get_versions
from .watchdog import Watchdog
# Environment variables and if they are required
ENVIRONMENT_VARS = {
"TZ": False,
"INFLUXDB_HOST": False,
"INFLUXDB_PORT": False,
"INFLUXDB_DATABASE": False,
"INFLUXDB_USER": False,
"INFLUXDB_PASSWORD": False,
"OPERATION_MODE": False,
"SSH_LOG_PATH": False,
"LOG_LEVEL": False,
}
__version__ = get_versions()["version"]
def _real_main():
# Setup logging
_logging_setup(copyright=True)
# Unset empty variables
_unset_empty_env(ENVIRONMENT_VARS)
# Check if required variables are present
_check_vars_exist(ENVIRONMENT_VARS)
# Select if working as a TCP socket (for rsyslog) or as a log watchdog (default)
OPERATION_MODE = os.getenv("OPERATION_MODE")
if not OPERATION_MODE:
logging.warning('OPERATION_MODE variable is not set. Defaulting to "watchdog"')
OPERATION_MODE = "watchdog"
elif OPERATION_MODE.casefold() not in ("socket", "watchdog"):
err = f'OPERATION_MODE={OPERATION_MODE} is not recognised and this cannot continue"'
logging.error(err)
raise EnvironmentError(err)
else:
logging.info(f"Using OPERATION_MODE={OPERATION_MODE}")
# Bootstrap intrusion-monitor from OPERATION_MODE
_bootstrap(OPERATION_MODE)
def _bootstrap(operation_mode):
"""Initialises intrusion-monitor in either `watchdog` or `socket` operation modes."""
if operation_mode == "watchdog":
log_path = Path(os.getenv("SSH_LOG_PATH", "/watchdog/log/auth.log"))
# Check if file exists and can be read
if not log_path.exists():
err = f"No file was not found and this can't continue. Log path provided is: {log_path.absolute()}"
logging.critical(err)
return FileNotFoundError(err)
elif not os.access(log_path, os.R_OK):
err = f'The file cant be opened. Running: "sudo chmod o+r <Log file>" might solve this issue.'
logging.critical(err)
raise PermissionError(err)
else:
logging.info(f"Log file found at: {log_path.absolute()}")
with open(log_path, "r") as f:
lines = f.readlines()
logging.debug(
"Here are the last 5 lines of the log file:\n\t{}".format(
"\t".join(lines[-5:])
)
)
# Everything seems okay, starting watchdog
watchdog = Watchdog(log_path)
logging.debug(f"So far so good, starting log Watchdog...")
watchdog.start()
elif operation_mode == "socket":
logging.critical(
f"This feature is not yet implemented and this can't continue. OPERATION_MODE is {operation_mode}"
)
raise NotImplementedError(
f"The OPERATION_MODE={operation_mode} is not yet implemented."
)
# server.start()
else:
logging.critical(
f"A critical problem occurred while trying to bootstrap from OPERATION_MODE and this can't continue. "
f"OPERATION_MODE is {operation_mode}"
)
raise EnvironmentError(
"A critical problem occurred while trying to bootstrap from OPERATION_MODE and this can't continue. "
)
def _unset_empty_env(vars):
"""Unset empty environment variables."""
for v in vars:
var = os.getenv(v, None)
if not var and var is not None and len(var) == 0:
del os.environ[v]
logging.warning(
f"Environment variable {v} is set but is empty. Unsetted..."
)
def _logging_setup(copyright=True, version=True):
log_level = os.getenv("LOG_LEVEL", "info")
if log_level.casefold() == "debug":
log_level = logging.DEBUG
elif log_level.casefold() == "info":
log_level = logging.INFO
else:
# Default
log_level = logging.INFO
logging.basicConfig(
format="%(asctime)s [%(levelname)s]: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=log_level,
)
# Print copyright
if copyright:
logging.info(f"Copyright {date.today().year} Afonso Costa")
logging.info('Licensed under the Apache License, Version 2.0 (the "License");')
# Print version
if version:
logging.info("Version: {}".format(get_versions()["version"]))
logging.info(
"Intrusion Monitor: An SSH log watchdog, which exports failed login attempts to an InfluxDB timeseries database."
)
def _check_vars_exist(vars):
"""Checks if the required variables exist."""
vars_missing = []
for v in [v for v in vars if vars[v]]:
var = os.getenv(v, None)
if not var:
logging.error(f"Environment variable {v} is not set and its mandatory!")
vars_missing.append(v)
if vars_missing:
logging.critical(
"Some mandatory environment variables are not set and this can't continue. Env variables missing: {}".format(
", ".join(vars_missing)
)
)
raise EnvironmentError(
"Some mandatory environment variables are not set. Env variables missing: {}".format(
", ".join(vars_missing)
)
)
def main():
try:
_real_main()
except KeyboardInterrupt:
logging.error("ERROR: Interrupted by user")
raise
except:
logging.critical(
"Fatal error occurred and intrusion-monitor cannot continue..."
)
raise
| afonsoc12/intrusion-monitor | intrusion_monitor/__init__.py | __init__.py | py | 5,669 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "_version.get_versions",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "logging.error",
"... |
26090415788 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from basic.bupt_2017_11_28.type_deco import prt
import joblib
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from basic.bupt_2017_11_28.type_deco import prt
import seaborn as sns
from basic.bupt_2018_1_19.unionfind import UF
'''
User:waiting
Date:2018-01-19
Time:9:45
'''
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def mxpotontheline(points:list):
uf_x = UF(points,lambda p1,p2:p1.x == p2.x)
uf_y = UF(points,lambda p1,p2:p1.y == p2.y)
uf_x.grouping()
ans = 0
for k,v in uf_x.groups.items():
ans = max(ans,len(v))
uf_y.grouping()
for k,v in uf_y.groups.items():
ans = max(ans,len(v))
return ans
def cal_slope(p1,p2):
return Decimal(p1.y -p2.y) / Decimal(p1.x - p2.x) if p1.x != p2.x else float('inf')
def mxpotontheline2(points:list):
if len(points) < 1:
return 0
if len(points) == 2:
return 2
ans = 1
from collections import defaultdict
for i in range(len(points)):
d = defaultdict(int)
same = 0
for j in range(i+1,len(points)):
if points[i].x == points[j].x and points[i].y == points[j].y:
same += 1
else:
d[cal_slope(points[i],points[j])] += 1
if not d:
d[float('inf')] = 0
for key in d:
d[key] += same
print(d)
ans = max(ans,max(d.values())+1) if d else ans
return ans
if __name__ == '__main':
from decimal import Decimal
d = defaultdict(int)
print(mxpotontheline2([Point(0,0),Point(94911151,94911150),Point(94911152,94911151)]))
x = Decimal(94911150) /Decimal(94911151)
y = Decimal(949111500) /Decimal(949111510) | Mr-cpc/idea_wirkspace | learnp/basic/bupt_2018_1_19/mxpoontheline.py | mxpoontheline.py | py | 1,897 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "basic.bupt_2018_1_19.unionfind.UF",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "basic.bupt_2018_1_19.unionfind.UF",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 48,
"usage_type": "call"
}... |
29291642217 | import numpy as np
import statsmodels.api as sm
import pandas as pd
alpha = 0.05
df = pd.read_excel("4_6.xlsx", header=None)
y = df.values # 提取数据矩阵
y = y.flatten()
a = np.array(range(1, 8))
x = np.tile(a, (1, 10)).flatten()
d = {'x': x, 'y': y} # 构造字典
model = sm.formula.ols("y~C(x)", d).fit() # 构建模型
anovat = sm.stats.anova_lm(model) # 进行单因素方差分析
print(anovat)
if anovat.loc['C(x)', 'PR(>F)'] > alpha:
print("实验室对测量值无显著性影响")
else:
print("实验室对测量值有显著性影响")
| BattleforAzeroth/MMHomework | 4.6.py | 4.6.py | py | 565 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "statsmodels.api.formula.ols",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.