id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
9708754 | #!/usr/local/bin/python3
# Python Challenge - 12
# http://www.pythonchallenge.com/pc/return/evil.html
# Username: huge; Password: <PASSWORD>
# Keyword: disproportional
def main():
'''
Hint: Dealing evils.
evil1.jpg -> evil2.jpg not .jpg - _.gfx -> evil3.jpg No more evils
evil4.jpg -> can't be displayed because of errors (not a 404, though)
Run: `curl -u huge:file http://www.pythonchallenge.com/pc/return/evil4.jpg`
Returns: Bert is evil! Go back!
evil2.gfx has 5 jpgs in it
'''
with open('./evils_chall_12/evil2.gfx', 'rb') as evil:
evil = evil.read()
for i in range(5):
(open('./evils/evil_image' + str(i) + '.jpg', 'wb')
.write(evil[i::5]))
return 0
if __name__ == '__main__':
main()
| StarcoderdataPython |
4865161 | i = 1
sum = 0
while i <= 5:
sum = sum + int(input("Please Enter A Number: "))
i += 1
print(sum) | StarcoderdataPython |
5132073 | """This module contains the boards that we support."""
from .board import Board, BoardGroup
__all__ = ["Board", "BoardGroup"]
| StarcoderdataPython |
6508588 | <gh_stars>1-10
print (2+2)
| StarcoderdataPython |
130697 | from django.test import TestCase
from restaurants.models import Place
| StarcoderdataPython |
8183613 | <reponame>vishalbelsare/inventoryanalytics<filename>inventoryanalytics/abc/__init__.py
__all__ = ["abc_analysis","data","test"] | StarcoderdataPython |
8057114 | <reponame>SODALITE-EU/iac-quality-framework<filename>ansiblemetrics/metrics_cal.py
# Python modules
import argparse
import inspect
import json
import os.path
import re
import sys
from io import StringIO
import yaml
# Own modules
from ansiblemetrics.import_metrics import general_metrics, playbook_metrics, tasks_metrics
def load(path):
""" Returns a StringIO object representing the content of the file at <path>, if any; None otherwise """
if not os.path.isfile(path):
return None
content = StringIO()
with open(path, 'r') as file:
for line in file.readlines():
content.write(re.sub(r'\s{2,2}', '\\t', line).expandtabs(2))
return content
class MetricsCal():
def _execute(self, metric, script):
"""
Returns a triple (count, relative=None, occurrences=None) as result of the metric.
Relative and occurrences are None by default. If the metric provides a relative or occurreces value, they will be set to their actual value
"""
try:
m = metric(script)
count = m.count()
relative = None
occurrences = None
# Check if the metric uses the argument 'relative' or 'occurrences.'
spec = inspect.getfullargspec(m.count)
if 'relative' in spec.args:
relative = round(m.count(relative=True), 2)
if 'occurrences' in spec.args:
occurrences = round(m.count(occurrences=True), 2)
return (count, relative, occurrences)
except Exception:
return (None, None, None)
def _executeOnPlaybookTasks(self, script):
try:
yml = yaml.safe_load(script.getvalue())
if yml is None:
return {}
tasks = []
for d in yml:
if d.get('pre_tasks') is not None:
tasks.extend(d.get('pre_tasks'))
if d.get('tasks') is not None:
tasks.extend(d.get('tasks'))
# using list comprehension to remove None values in list
tasks = [i for i in tasks if i]
if len(tasks) == 0:
return {}
tasks = StringIO(yaml.dump(tasks))
except yaml.YAMLError:
return {}
results = {}
for name in tasks_metrics:
metric_tuple = self._execute(tasks_metrics[name], tasks)
results[name] = {}
results[name]['count'] = metric_tuple[0]
if metric_tuple[1] is not None:
results[name]['count_relative'] = metric_tuple[1]
elif metric_tuple[2] is not None:
results[name]['count_occurrences'] = metric_tuple[2]
return results
def execute(self, script, metrics_type):
"""
Executes metrics on a given script and returns a dictionary of results
script: str -- a StringIO object representing a IaC script in Ansible
metrics: str -- possible options: 'general', 'playbook', 'tasks', 'playbook_and_general', tasks_and_general'
"""
metrics = general_metrics
results = {}
if metrics_type == 'playbook':
metrics = playbook_metrics
results = self._executeOnPlaybookTasks(script)
elif metrics_type == 'tasks':
metrics = tasks_metrics
elif metrics_type == 'playbook_and_general':
metrics = dict(list(general_metrics.items()) + list(playbook_metrics.items()))
results = self._executeOnPlaybookTasks(script)
elif metrics_type == 'tasks_and_general':
metrics = dict(list(general_metrics.items()) + list(tasks_metrics.items()))
# Execute metrics
for name in metrics:
metric_tuple = self._execute(metrics[name], script)
results[name] = {}
results[name]['count'] = metric_tuple[0]
if metric_tuple[1] is not None:
results[name]['count_relative'] = metric_tuple[1]
elif metric_tuple[2] is not None:
results[name]['count_occurrences'] = metric_tuple[2]
return results
def calculate(self, file, metrics_type):
script = load(file)
if script is None:
print('\033[91m' + 'Error: failed to load the file {}. Please insert a valid file!'.format(
file) + '\033[0m')
sys.exit(1)
yml = None
try:
yml = yaml.safe_load(script.getvalue())
for value in yml:
print (value)
except yaml.YAMLError:
print('The input file is not a yaml file')
exit(2)
if yml is None or len(yml) == 0:
print('An error occurred')
exit(2)
# if dict(yml):
# yml = [yml]
i = 0
results = self.execute(script, metrics_type)
script.close()
return json.dumps(results, indent=4, sort_keys=True)
| StarcoderdataPython |
1956407 | <filename>pyN/Network.py<gh_stars>1-10
'''
Network class for running populations.
Should be agnostic for population type
'''
import numpy as np
from synapse import *
from datetime import datetime
import os, sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import networkx as nx
import pickle
#import ipdb as pdb
class Network():
def __init__(self, populations=[]):
"""
The constructor
@params populations array Array of Population Instances to be added to the Network
"""
self.populations = {}
self.graph = nx.DiGraph()
for p in populations:
self.populations[p.name] = p
self.graph.add_node(p.name,size=p.N)
def connect(self, pre, post, synapses, mode="excitatory", delay_matrix=None, delay=0.25, std=0.05, scale=1.0):
"""
Connect two populations together in a network
@param pre str String pointing to Population in network.
@param post str String pointing to Population in network.
@synapses str|np.ndarray
@delay_matrix np.ndarray
@return Boolean
"""
if type(pre) == str:
pre = self.populations[pre]
if type(post) == str:
post = self.populations[post]
if type(synapses) == str:
(gen_synapses, gen_delay_matrix) = generate_synapses(pre_population=pre, post_population=post, connectivity=synapses,delay=delay,std=std, scale=scale)
if mode == "excitatory":
synapses = gen_synapses
elif mode == "inhibitory":
synapses = -1.0 * gen_synapses#since gen_synapses[:,:,0] is initially all zero we are still ok
if delay_matrix == None: delay_matrix = gen_delay_matrix#use the one previously generated
post.receiver.append({'from':pre.name,'syn':synapses, 'mode':mode,'delay':delay_matrix, 'delay_indices':None, 'connected':True, 'disabled_syn':None})
self.graph.add_edge(pre.name,post.name,mode=mode)
return True
def disconnect(self, pre, post):
"""
Toggles the connection off between presynaptic and postsynaptic populations. Useful in disinhibition mechanisms
@param pre str String pointing to Population in network.
@param post str String pointing to Population in network.
"""
if type(pre) == str:
pre = self.populations[pre]
if type(post) == str:
post = self.populations[post]
for recv in post.receiver:
if (recv['from'] == pre.name and recv['connected'] == True):
recv['disabled_syn'] = np.copy(recv['syn'])
recv['syn'] = np.zeros(recv['syn'].shape)#zero it out!
recv['connected'] = False
self.graph.remove_edge(pre.name,post.name)
break#we only need to find the first one
def reconnect(self, pre, post):
"""
Toggles the connection off between presynaptic and postsynaptic populations. Useful in disinhibition mechanisms
@param pre str String pointing to Population in network.
@param post str String pointing to Population in network.
"""
if type(pre) == str:
pre = self.populations[pre]
if type(post) == str:
post = self.populations[post]
for recv in post.receiver:
if (recv['from'] == pre.name and recv['connected'] == False):
recv['syn'] = np.copy(recv['disabled_syn'])#restore the synapses
recv['connected'] = True
self.graph.add_edge(pre.name,post.name,mode=recv['mode'])
break
def get(self,pname):
"""
Returns a Population if it is in class, None otherwise
@param pname str
"""
if self.populations[pname]:
return self.populations[pname]
else:
print("%s not found!" % pname)
return None
def setup(self, experiment_name='My Experiment', T=50,dt=0.125,integration_time=30, I_ext={},spike_delta=50,save_data='./',properties_to_save=[],stdp=True):
"""
Setup simulation state variables prior to running it
@param experiment_name str String pointing to Population in network.
@param T int Total time to simulate (milliseconds).
@param dt float Time step in difference equation (milliseconds).
@param integration_time float Number of milliseconds to integrate over when convolving spike rasters with postsynaptic current decay function
@param I_ext dict Dictionary containing key-value pairs of Population names to stimulate and associated array of stimulus dictionaries.
@param spike_delta Height of voltage spike. Not relevant for postsynaptic potential but may affect change in adaptation variable.
@param save_data str Path to save data to
@param properties_to_save array Array of which Population properties to save
@param stdp Boolean Whether to use spike-time-dependent plasticity
"""
self.now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.T = T
self.dt = dt
self.time_trace = np.arange(0,T+dt,dt)#time array
self.stdp = stdp
self.I_ext = I_ext
self.params = {
'experiment_name' : experiment_name,
'time_stamp' : self.now,
'T' : self.T,
'dt' : self.dt,
'populations' : {},
'properties_to_save' : properties_to_save,
'I_ext' : I_ext,
'save_data' : save_data,
'experiment_log' : save_data + self.now + '-' + 'experiment.pkl'
}
for name, p in self.populations.items(): self.params['populations'][name] = p.N
if (save_data):
params_file = open(self.params['experiment_log'],'wb')
pickle.dump(self.params, params_file)
params_file.close()
#initialize populations for simulation.
for name, p in self.populations.items():
p.initialize(T, len(self.time_trace), integration_time, save_data, self.now, properties_to_save, dt)
return self.params
def simulate(self, experiment_name='<NAME>', T=50,dt=0.125,integration_time=30, I_ext={},spike_delta=50,save_data='./',properties_to_save=[],stdp=True):
"""
Simulate the Network
"""
params = self.setup(experiment_name, T,dt,integration_time, I_ext,spike_delta,save_data,properties_to_save,stdp)
#pdb.set_trace()
#check spike_raster size and time_trace size...
#run the simulation
for i, t in enumerate(self.time_trace[1:],1):
#update state variables
print(str(i) + " - " + str(t) + " - " + str(self.time_trace.shape[0]))
print('i=%d'%i)
for name, p in self.populations.items():
#if i==96:pdb.set_trace()
p.update_currents(all_populations=self.populations, I_ext=I_ext, i=i, t=t, dt=self.dt)
p.update_state(i=i, T=self.T, t=t, dt=self.dt)
if self.stdp:
p.update_synapses(all_populations=self.populations, i=i)
#TODO : move this to be called by populations
if (save_data):
#we don't want to update synapses of one population after saving te data of another so that's why we do save_data after simulation
#step is finished for all populations
for name, p in self.populations.items():
p.write_files(i)
print("%0.3f%% done. t=%d msec" % ((float(i)/len(self.time_trace)*100.0),t))
#simulation done - close the files
#pdb.set_trace()#check
if (save_data):
for name, p in self.populations.items():
p.close()
if (save_data): return params#can be called by load_data right away
else: return True
| StarcoderdataPython |
274511 | <gh_stars>1-10
def GenerateCombination(N, ObjectNumber):
# initiate the object B and C positions
# Get all permutations of length 2
combinlist = []
for i in range(N):
if i < N-1:
j = i + 1
else:
j = 0
combin = [ObjectNumber, i, j]
combinlist.append(combin)
return combinlist
| StarcoderdataPython |
11208899 | text0 = str(input('Digite algo: '))
text1 = str(input('Digite algo: '))
if text0 < text1:
print(text1)
else:
print(text0)
| StarcoderdataPython |
1979595 | """
Description: Developed to prototype an emoji led display installed on the back of the car.
Author: <NAME>
Credits: F.Stern 2014 for multilineMAX7219.py python library
"""
# Import library
import multilineMAX7219_2x2 as LEDMatrix
import time
# Initialise the library and the MAX7219/8x8LED arrays
LEDMatrix.init()
try:
# Display a stationary message
LEDMatrix.static_message("Hi!")
time.sleep(2)
LEDMatrix.clear_all()
except KeyboardInterrupt:
# Display a stationary message
LEDMatrix.static_message("Bye!")
time.sleep(2)
LEDMatrix.clear_all()
| StarcoderdataPython |
11360989 | from . import settings
from pythonjsonlogger import jsonlogger
class GunicornJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(GunicornJsonFormatter, self).add_fields(log_record, record, message_dict)
log_record['type'] = settings.LOGSTASH_MESSAGE_TYPE
log_record['subtype'] = settings.LOGSTASH_MESSAGE_SUBTYPE
log_record.update(settings.LOGSTASH_EXTRA) | StarcoderdataPython |
8199893 | import csv
import argparse
import sys
import os
parser = argparse.ArgumentParser(description = 'Mismatch writer for Bert')
parser.add_argument('--data_dir', type=str, required=True, help='Data directory')
parser.add_argument('--output_dir', type=str, required=True, help='Output directory')
def main():
parsed = parser.parse_known_args(sys.argv)
input_file = os.path.join(parsed[0].data_dir, 'heuristics_evaluation_set.txt')
print ("Reading input file: " + input_file)
data = []
with open(input_file, 'r') as tsvfile:
data = tsvfile.readlines()
mismatch_file = os.path.join(parsed[0].output_dir, 'hans_mismatched.txt')
output_file = os.path.join(parsed[0].data_dir, 'heuristics_evaluation_mismatched.txt')
print("Reading mismatch file: " + mismatch_file)
print("Writing output file: " + output_file)
written_lines = 0
with open(output_file, "w") as writer:
# Write header
writer.write(data[0])
with open(mismatch_file, "r") as reader:
line = reader.readline()
while line is not None and line is not '':
idx = int(line)
idx = idx + 1
if (idx >= len(data)):
raise RuntimeError('Index out of range of input file')
writer.write(data[idx])
written_lines = written_lines + 1
line = reader.readline()
print("Wrote " + str(written_lines) + " lines.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
8185757 | <reponame>nhutnamhcmus/coding-bat-solutions
# =======================================================================================================================================
# VNU-HCM, University of Science
# Department Computer Science, Faculty of Information Technology
# Authors: <NAME> (<NAME>)
# © 2020
"""
We have two monkeys, a and b, and the parameters a_smile and b_smile indicate if each is smiling. We are in trouble if they are both smiling or if neither of them is smiling. Return True if we are in trouble.
monkey_trouble(True, True) → True
monkey_trouble(False, False) → True
monkey_trouble(True, False) → False
"""
import unittest
def monkey_trouble(a_smile, b_smile):
return ((a_smile and b_smile) or (not a_smile and not b_smile))
class MyTest(unittest.TestCase):
def test_case_00(self):
self.assertEqual(monkey_trouble(True, True), True)
def test_case_01(self):
self.assertEqual(monkey_trouble(False, False), True)
def test_case_02(self):
self.assertEqual(monkey_trouble(True, False), False)
def test_case_03(self):
self.assertEqual(monkey_trouble(False, True), False)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8867 | <reponame>adiHusky/uber_backend
from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort
from flask_cors import CORS
# from flask import status
from datetime import date, datetime, timedelta
from calendar import monthrange
from dateutil.parser import parse
import pytz
import os
import sys
import time
import uuid
import json
import random
import string
import pathlib
import io
from uuid import UUID
from bson.objectid import ObjectId
# straight mongo access
from pymongo import MongoClient
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn="https://acea88276810494e96828c4fd0e1471f@o555579.ingest.sentry.io/5685529",
integrations=[FlaskIntegration()],
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0,
# By default the SDK will try to use the SENTRY_RELEASE
# environment variable, or infer a git commit
# SHA as release, however you may want to set
# something more human-readable.
# release="myapp@1.0.0",
)
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# mongo
# mongo_client = MongoClient('mongodb://localhost:27017/')
mongo_client = MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>/test")
app = Flask(__name__)
# CORS(app)
CORS(app, resources={r"/*": {"origins": "*"}})
basedir = os.path.abspath(os.path.dirname(__file__))
# Here are my datasets
bookings = dict()
################
# Apply to mongo
################
def atlas_connect():
# Node
# const MongoClient = require('mongodb').MongoClient;
# const uri = "mongodb+srv://admin:<password><EMAIL>/myFirstDatabase?retryWrites=true&w=majority";
# const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true });
# client.connect(err => {
# const collection = client.db("test").collection("devices");
# // perform actions on the collection object
# client.close();
# });
# Python
client = pymongo.MongoClient(
"mongodb+srv://Mahitha-Maddi:<EMAIL>%<EMAIL>/test")
db = client.test
# database access layer
def insert_one(r):
start_time = datetime.now()
with mongo_client:
# start_time_db = datetime.now()
db = mongo_client['Uber']
# microseconds_caching_db = (datetime.now() - start_time_db).microseconds
# print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.")
print("...insert_one() to mongo: ", r)
try:
mongo_collection = db['bookings']
result = mongo_collection.insert_one(r)
print("inserted _ids: ", result.inserted_id)
except Exception as e:
print(e)
microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds
print("*** It took " + str(microseconds_doing_mongo_work) +
" microseconds to insert_one.")
def tryexcept(requesto, key, default):
lhs = None
try:
lhs = requesto.json[key]
# except Exception as e:
except:
lhs = default
return lhs
def ssm():
now = datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return str((now - midnight).seconds)
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# endpoint to check Availability
@app.route("/checkAvailability", methods=["POST"])
def check_availability():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
with mongo_client:
#raise InvalidUsage('This view is gone', status_code=410)
db = mongo_client['Uber']
mongo_collection = db['available']
print(source)
myquery = {"source": {"$regex": str(source)}, "destination": {
"$regex": str(destination)}, "date": {"$regex": str(date)}}
cursor = dict()
cursor = mongo_collection.find(myquery, {"_id": 0})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
print(type(sorted_records))
return jsonify(sorted_records)
# endpoint to create new Booking
@app.route("/book", methods=["POST"])
def book_bus():
source = request.json['source']
destination = request.json['destination']
date = request.json['date']
startTime = request.json['startTime']
endTime = request.json['endTime']
user = request.json['user']
busnumber = request.json['busnumber']
booking = dict(user=user, source=source, destination=destination, busnumber=busnumber,
date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now(
).strftime("%Y-%m-%d %H:%M:%S"),
_id=str(ObjectId()))
insert_one(booking)
return jsonify(booking)
@app.route("/bookings-results", methods=["GET"])
def get_tweets_results():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['bookings']
cursor = mongo_collection.find({})
records = list(cursor)
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
return jsonify(sorted_records)
##################
# Apply from mongo
##################
def applyRecordLevelUpdates():
return None
def applyCollectionLevelUpdates():
global bookings
with mongo_client:
db = mongo_client['Uber']
mongo_collection = db['available']
cursor = mongo_collection.find({})
records = list(cursor)
# bookings[0] = records[0]
howmany = len(records)
print('found ' + str(howmany) + ' bookings!')
sorted_records = sorted(records, key=lambda t: t['source'])
# return json.dumps({"results": sorted_records })
for booking in sorted_records:
bookings[booking['_id']] = booking
@app.route("/")
def home():
return """Welcome to Uber backend!<br/>"""
##################
# ADMINISTRATION #
##################
# This runs once before the first single request
# Used to bootstrap our collections
@app.before_first_request
def before_first_request_func():
applyCollectionLevelUpdates()
# This runs once before any request
@app.before_request
def before_request_func():
applyRecordLevelUpdates()
############################
# INFO on containerization #
############################
# To containerize a flask app:
# https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| StarcoderdataPython |
3233608 | print(f'Loading {__file__}')
# area_dets = [pilatus100k]
# all_area_dets = [pilatus100k, tetramm]
area_dets = [lambda_det]
all_area_dets = [lambda_det, quadem]
@bpp.stage_decorator(all_area_dets)
def reflection_scan(alpha_start, alpha_stop, num, detector='lambda_det', precount_time=1, exp_time=1, default_att=1e-7, tilt_stage=False, md=None):
for alpha in np.linspace(alpha_start, alpha_stop, num):
# Move to the good geometry position
if tilt_stage:
yield from nabt(alpha, alpha, alpha*0.025)
else:
yield from mabt(alpha, alpha, 0)
# yield from mabt(geo.alpha=0,geo.samchi=x,geo.beta=2*x)
yield from bps.sleep(5)
# Move the default attenuator in for the pre-count
def_att = yield from put_default_absorbers(energy.energy.position,
default_attenuation=default_attenuation.get())
# Set the exposure time to for the pre-count
yield from det_exposure_time(precount_time, precount_time)
# Take the pre-count data
yield from bps.mv(shutter, 1)
ret = yield from bps.trigger_and_read(area_dets, name='precount')
yield from bps.mv(shutter, 0)
if ret is None:
# in simulation mode
continue
else:
# Read the maximum count on a pixel from the detector
i_max = ret['%s_stats4_max_value'%detector]['value']
i_trigger = 100000
# if (geo.alpha.position) < 1:
# i_trigger = i_trigger*10
#previosuly 100000 and 200000
# look at the maximum count of the pre-count and adjust the default attenuation
while (i_max < 100 or i_max > i_trigger) and default_attenuation.get() < 1:
if i_max > 2*i_trigger:
# If i_max to high, attenuate more
yield from bps.mv(default_attenuation, default_attenuation.get() / 10)
elif i_max < 100:
# If i_max to low, attenuate less
yield from bps.mv(default_attenuation, default_attenuation.get() * 10)
else:
print('You should not be there!')
break
def_att = yield from put_default_absorbers(energy.energy.position,
default_attenuation=default_attenuation.get())
# Re-take the pre-count data
yield from bps.mv(shutter, 1)
ret = yield from bps.trigger_and_read(area_dets,
name='precount')
yield from bps.mv(shutter, 0)
# Re-read the maximum count on a pixel from the detector
i_max = ret['%s_stats4_max_value'%detector]['value']
# Adjust the absorbers to avoid saturation of detector
best_at, attenuation_factor, best_att_name = yield from calculate_and_set_absorbers(energy=energy.energy.position,
i_max=i_max,
att=def_att,
precount_time=precount_time)
# Upload the attenuation factor for the metadata
yield from bps.mv(attenuation_factor_signal, attenuation_factor)
yield from bps.mv(attenuator_name_signal, best_att_name)
#THIS CODE WAS SO THAT A MINIMUM NUMBER OF ABSRBERS ARE IN THE BEAM
# if best_att_name < 'att2':
# yield from bps.mv(abs2, 2)
# best_at, attenuation_factor, best_att_name, att_pos = best_att(1E-2)
# yield from bps.mv(attenuation_factor_signal, attenuation_factor)
# yield from bps.mv(attenuator_name_signal, best_att_name)
# Set the exposure time to the define exp_time for the measurement
yield from det_exposure_time(exp_time, exp_time)
yield from bps.mv(exposure_time, exp_time)
# ToDo: is that really usefull now
yield from bps.mv(shutter, 1)
# Add this because the QuadEM I0
yield from bps.sleep(1)
yield from bps.trigger_and_read(all_area_dets +
[geo] +
[attenuation_factor_signal] +
[attenuator_name_signal] +
[exposure_time],
name='primary')
yield from bps.mv(shutter, 0)
def night_scan():
yield from expert_reflection_scan(md={'sample_name': 'test_water10'})
yield from expert_reflection_scan(md={'sample_name': 'test_water11'})
yield from expert_reflection_scan(md={'sample_name': 'test_water12'})
def fast_scan(name = 'test', tilt_stage=False):
yield from expert_reflection_scan(md={'sample_name': name},tilt_stage=tilt_stage)
def expert_reflection_scan(md=None, detector='lambda_det',tilt_stage=False):
"""
Macros to set all the parameters in order to record all the required information for further analysis,
such as the attenuation factors, detector='lambda_det'
:param detector: A string which is the detector name
:type detector: string, can be either 'lambda_det' or 'pilatus100k'
"""
#XF:12ID1-ES{Det:Lambda}ROI1:MinX
# Bluesky command to record metadata
base_md = {'plan_name': 'reflection_scan',
'cycle': RE.md['cycle'],
'proposal_number': RE.md['cycle'] + '_' + RE.md['main_proposer'],
'detector': detector,
'energy': energy.energy.position,
'rois': [202, 190, 202, 214,30,12],
'geo_param': [geo.L1.get(), geo.L2.get(), geo.L3.get(), geo.L4.get()],
'slit_s1': [S1.top.position - S1.bottom.position, S1.outb.position - S1.inb.position],
'slit_s2': [S2.vg.position, S2.hg.position],
'x2': [geo.stblx2.position],
}
base_md.update(md or {})
global attenuation_factor_signal, exposure_time, attenuator_name_signal, default_attenuation
attenuator_name_signal = Signal(name='attenuator_name', value='abs1')
attenuation_factor_signal = Signal(name='attenuation', value=1e-7)
exposure_time = Signal(name='exposure_time', value=1)
default_attenuation = Signal(name='default-attenuation', value=1e-7)
# Disable the plot during the reflectivity scan
bec.disable_plots()
# Bluesky command to start the document
yield from bps.open_run(md=base_md)
print('1st set starting')
# Move stable X2
#yield from bps.mvr(geo.stblx2, -0.5)
yield from bps.sleep(3)
alpha_start, alpha_stop, num, exp_time, precount_time = 0.05, 0.2, 20, 4, 0.1
yield from reflection_scan(alpha_start=alpha_start,
alpha_stop=alpha_stop,
num=num,
detector=detector,
precount_time=precount_time,
exp_time=exp_time,
default_att = default_attenuation.get(),
md=md,
tilt_stage=tilt_stage)
print('1st set done')
print('2nd set starting')
# Move stable X2
# yield from bps.mvr(geo.stblx2, -0.5)
yield from bps.sleep(3)
alpha_start, alpha_stop, num, exp_time, precount_time = 0.2, 0.6, 17, 4, 0.1
yield from reflection_scan(alpha_start=alpha_start,
alpha_stop=alpha_stop,
num=num,
detector=detector,
precount_time=precount_time,
exp_time=exp_time,
default_att = default_attenuation.get(),
md=md,
tilt_stage=tilt_stage)
print('2nd set done', 'default attenuation is', default_attenuation.get())
print('3rd set starting')
# Move stable X2
#yield from bps.mvr(geo.stblx2, -0.5)
yield from bps.sleep(3)
alpha_start, alpha_stop, num, exp_time, precount_time = 0.6, 1, 9, 4, 0.1
yield from reflection_scan(alpha_start=alpha_start,
alpha_stop=alpha_stop,
num=num,
detector=detector,
precount_time=precount_time,
exp_time=exp_time,
default_att = default_attenuation.get(),
md=md,
tilt_stage=tilt_stage)
print('3rd set done')
print('4th set starting')
# Move stable X2
#yield from bps.mvr(geo.stblx2, -0.5)
yield from bps.sleep(3)
alpha_start, alpha_stop, num, exp_time, precount_time = 1, 2., 21, 4, 0.1
yield from reflection_scan(alpha_start=alpha_start,
alpha_stop=alpha_stop,
num=num,
detector=detector,
precount_time=precount_time,
exp_time=exp_time,
default_att = default_attenuation.get(),
md=md,
tilt_stage=tilt_stage)
print('4th set done')
print('5th set starting')
#Move stable X2
yield from bps.mvr(geo.stblx2, -0.5)
yield from bps.sleep(3)
alpha_start, alpha_stop, num, exp_time, precount_time = 2, 3, 11, 5, 0.1
yield from reflection_scan(alpha_start=alpha_start,
alpha_stop=alpha_stop,
num=num,
detector=detector,
precount_time=precount_time,
exp_time=exp_time,
default_att = default_attenuation.get(),
md=md,
tilt_stage=tilt_stage)
print('5th set done')
# Bluesky command to stop recording metadata
yield from bps.close_run()
# Enable the plot during the reflectivity scan
bec.enable_plots()
yield from bps.mv(abs2, 5)
print('The reflectivity scan is over') | StarcoderdataPython |
3539279 | <reponame>DmPo/Schemaorg_CivicOS
'''
This module provides some datetime.tzinfo implementations.
All those classes are taken from the Python documentation.
'''
from datetime import timedelta, tzinfo
import time
ZERO = timedelta(0)
# constant for zero time offset.
class Utc(tzinfo):
'''UTC
Universal time coordinated time zone.
'''
def utcoffset(self, dt):
'''
Return offset from UTC in minutes east of UTC, which is ZERO for UTC.
'''
return ZERO
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a string.
'''
return "UTC"
def dst(self, dt):
'''
Return the daylight saving time (DST) adjustment, in minutes east of UTC.
'''
return ZERO
UTC = Utc()
# the default instance for UTC.
class FixedOffset(tzinfo):
'''
A class building tzinfo objects for fixed-offset time zones.
Note that FixedOffset(0, "UTC") is a different way to build a
UTC tzinfo object.
'''
def __init__(self, offset_hours, offset_minutes, name):
'''
Initialise an instance with time offset and name.
The time offset should be positive for time zones east of UTC
and negate for time zones west of UTC.
'''
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
'''
Return offset from UTC in minutes of UTC.
'''
return self.__offset
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a
string.
'''
return self.__name
def dst(self, dt):
'''
Return the daylight saving time (DST) adjustment, in minutes east of
UTC.
'''
return ZERO
def __repr__(self):
'''
Return nicely formatted repr string.
'''
return "<FixedOffset %r>" % self.__name
STDOFFSET = timedelta(seconds = -time.timezone)
# locale time zone offset
# calculate local daylight saving offset if any.
if time.daylight:
DSTOFFSET = timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# difference between local time zone and local DST time zone
class LocalTimezone(tzinfo):
"""
A class capturing the platform's idea of local time.
"""
def utcoffset(self, dt):
'''
Return offset from UTC in minutes of UTC.
'''
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
'''
Return daylight saving offset.
'''
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a
string.
'''
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
'''
Returns true if DST is active for given datetime object dt.
'''
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCAL = LocalTimezone()
# the default instance for local time zone.
| StarcoderdataPython |
8060955 | <reponame>OscarMugendi/Project-Tracker
# Generated by Django 3.2.8 on 2021-10-19 15:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Trackerapp', '0008_auto_20211017_2325'),
('Trackerapp', '0010_auto_20211019_1133'),
]
operations = [
]
| StarcoderdataPython |
183444 | from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django import forms
from authentication.models import User
class SignupForm(UserCreationForm):
email = forms.EmailField(required=True)
firstname = forms.CharField(max_length=100, required=True)
lastname = forms.CharField(max_length=100, required=True)
class Meta:
model = User
fields = ("firstname", "lastname", "email", "password1", "<PASSWORD>")
def save(self, commit=True):
user = super(SignupForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class LoginForm(AuthenticationForm):
username = forms.EmailField(
label="Email address",
max_length=254,
widget=forms.EmailInput(attrs={'autofocus': True}),
)
#
# def clean(self):
# email = self.cleaned_data.get('email')
# password = self.cleaned_data.get('password')
#
# if email is not None and password:
# self.user_cache = authenticate(self.request, email=email, password=password)
# if self.user_cache is None:
# raise forms.ValidationError(
# self.error_messages['invalid_login'],
# code='invalid_login',
# params={'username': self.username_field.verbose_name},
# )
# else:
# self.confirm_login_allowed(self.user_cache)
#
# return self.cleaned_data
| StarcoderdataPython |
3321821 | <filename>mailerlite/__init__.py<gh_stars>10-100
from mailerlite.api import MailerLiteApi
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['MailerLiteApi', __version__]
| StarcoderdataPython |
9785662 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'readinglist.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^', include('books.urls')),
url(r'^', include('readers.urls')),
url(r'^', include('reviews.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^accounts/', include('allauth.urls')),
url(r'^', include('core.urls', namespace='core')),
)
| StarcoderdataPython |
255517 | import os
import subprocess
import sys
def _ParseDeps(base_dir):
"""Returns a tuple of (deps, hooks)."""
f = open(os.path.join(base_dir, "win32", "DEPS"))
global_context = {}
local_context = {}
exec(f.read(), global_context, local_context)
return local_context.get("deps", {}), local_context.get("hooks", [])
def _DoUpdate(base_dir, deps):
"""Updates checkout dependencies."""
# FIXME: Ideally should automatically clean up removed dependencies.
for path, repo in deps.iteritems():
if os.path.isdir(os.path.join(base_dir, path)):
os.chdir(os.path.join(base_dir, path))
subprocess.call(["git", "pull"])
else:
os.chdir(base_dir)
subprocess.call(["git", "clone", repo, path])
def _DoRunHooks(base_dir, hooks):
"""Runs post update hooks. Typically used for generating build files, etc."""
pass
def _Main(argv):
"""Does stuff."""
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
deps, hooks = _ParseDeps(base_dir)
if argv[1] == "init":
_DoUpdate(base_dir, deps)
_DoRunHooks(base_dir, hooks)
_DoFirstTimeInfo()
elif argv[1] == "update":
_DoUpdate(base_dir, deps)
_DoRunHooks(base_dir, hooks)
elif argv[1] == "runhooks":
_DoRunHooks(base_dir, hooks)
else
# FIXME: Print out some help.
pass
if "__main__" == __name__:
try:
result = _Main(sys.argv)
except Exception, e:
print "Error: %s" % str(e)
result = 1
sys.exit(result)
| StarcoderdataPython |
9748580 | def foo():
return {"foo":"bar"}
print("pos: {} {} {}".format(1, *[2, 3])) | StarcoderdataPython |
6500401 | <reponame>sphoebs/rockshell
#!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-name
def _fix_path():
"""Finds the google_appengine directory and fixes Python imports to use it."""
import os
import sys
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
logging.debug('Found appengine SDK on path!')
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
# Use the next import will fix up sys.path even further to bring in
# any dependent lib directories that the SDK needs.
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
try:
from pipeline import *
except ImportError, e:
import logging
logging.warning(
'Could not load Pipeline API. Will fix path for testing. %s: %s',
e.__class__.__name__, str(e))
_fix_path()
del logging
from pipeline import *
| StarcoderdataPython |
205847 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import Iterator, Optional, Set, Union, List
import oneflow._C
import oneflow._oneflow_internal
import oneflow.framework.graph_build_util as graph_build_util
from oneflow.env import get_rank
from oneflow.framework.tensor import Tensor, TensorTuple
from oneflow.nn.module import Module
from oneflow.nn.modules.container import *
from oneflow.nn.utils.container import *
from oneflow.nn.parameter import Parameter
from oneflow.nn.graph.block_config import BlockConfig
from oneflow.nn.graph.util import add_indent, seq_to_func_return
def get_block_cls(item):
if isinstance(item, Sequential):
return SequentialBlock
elif isinstance(item, ModuleList):
return ModuleListBlock
elif isinstance(item, ModuleDict):
return ModuleDictBlock
elif isinstance(item, ParameterList):
return ParameterListBlock
elif isinstance(item, ParameterDict):
return ParameterDictBlock
elif isinstance(item, Module):
return ModuleBlock
elif isinstance(item, Tensor):
return TensorBlock
else:
raise NotImplementedError()
class BlockType:
NONE = "NONE"
MODULE = "MODULE"
PARAMETER = "PARAMETER"
BUFFER = "BUFFER"
class Block(object):
def __init__(
self, prefix: str = "", name: str = "",
):
self._name = name
self._name_prefix = prefix
self._type = BlockType.NONE
self._origin = None
self._scope = None
self._prev_scope = None
self.config = BlockConfig()
@property
def name(self):
return self._name
@property
def name_prefix(self):
return self._name_prefix
@property
def type(self):
return self._type
@property
def prev_scope(self):
if self._prev_scope is None:
self._prev_scope = oneflow._oneflow_internal.GetCurrentScope()
return self._prev_scope
@property
def scope(self):
if self._scope is None:
self._scope = graph_build_util.make_new_block_scope(self.prev_scope, self)
return self._scope
def scope_context(self):
return graph_build_util.BlockScopeContext(self.prev_scope, self.scope)
class ModuleBlock(Block):
def __init__(
self, prefix: str = "", name: str = "", origin: Module = None,
):
assert not isinstance(origin, Block)
super().__init__(prefix, name)
self._debug = False
self._debug_min_s_level = 2
self._debug_max_v_level = 0
self._type = BlockType.MODULE
self._is_executing_forward = False
self._modules = OrderedDict()
self._parameters = OrderedDict()
self._buffers = OrderedDict()
self._args_repr = []
self._outs_repr = []
self.set_origin(origin)
@property
def origin(self):
return self._origin
def set_origin(self, origin):
self._origin = origin
if origin is None:
return
assert isinstance(origin, Module)
for (n, m) in list(origin.named_children()):
self.__setattr__(
n, get_block_cls(m)(self._name_prefix + self._name + ".", n, m)
)
for (n, p) in list(origin.named_parameters("", False)):
self.__setattr__(
n, get_block_cls(p)(self._name_prefix + self._name + ".", n, p)
)
for (n, b) in list(origin.named_buffers("", False)):
self.__setattr__(
n, get_block_cls(b)(self._name_prefix + self._name + ".", n, b)
)
def debug(
self,
v_level: int = 0,
ranks: Optional[Union[int, List[int]]] = None,
mode: bool = True,
) -> None:
assert isinstance(mode, bool)
assert isinstance(v_level, int)
if ranks is None:
rank_list = [0]
elif isinstance(ranks, int):
rank_list = [ranks]
elif isinstance(ranks, list):
rank_list = ranks
else:
raise ValueError("ranks must be int or List[int].")
my_rank = get_rank()
if -1 in rank_list or my_rank in rank_list:
self._debug = mode
if self._debug:
self._debug_min_s_level = 0
self._debug_max_v_level = v_level
if self._type == BlockType.MODULE:
def _set_child(d):
for (_, n) in d.items():
n.debug(v_level, ranks, mode)
_set_child(self._modules)
def __call__(self, *args):
assert self._type == BlockType.MODULE
self._print(0, 1, self._shallow_repr())
for idx, arg in enumerate(args):
meta_repr_str = (
arg._meta_repr() if isinstance(arg, Tensor) else str(type(arg))
)
in_str = (
"(INPUT:_"
+ self.name_prefix
+ self.name
+ "-input_"
+ str(idx)
+ ":"
+ meta_repr_str
+ ")"
)
if not isinstance(arg, Tensor):
in_str = "[WARNING]" + in_str
self._args_repr.append(in_str)
self._print(0, 1, in_str)
def _print_state(d):
for (_, n) in d.items():
self._print(0, 1, n._shallow_repr())
_print_state(self._parameters)
_print_state(self._buffers)
# NOTE: The original nn.Moudle's __call__ method is ignored, which means
# that hooks of nn.Modules are ignored. It is not recommended
# to use hooks of nn.Module in nn.Graph for the moment.
# result = self._origin.__class__.__call__(self, *args)
result = self.__block_forward(*args)
outputs = ()
if not (type(result) is tuple or type(result) is list):
outputs = (result,)
else:
outputs = result
for idx, out in enumerate(outputs):
out_repr = out._meta_repr() if isinstance(out, Tensor) else str(type(out))
out_str = (
"(OUTPUT:_"
+ self.name_prefix
+ self.name
+ "-output_"
+ str(idx)
+ ":"
+ out_repr
+ ")"
)
if not isinstance(out, Tensor):
out_str = "[WARNING]" + out_str
self._outs_repr.append(out_str)
self._print(0, 1, out_str)
return result
def __block_forward(self, *args):
self._is_executing_forward = True
args = self.__pre_forward_mapping_out_scope(*args)
with self.scope_context():
result = self._origin.__class__.forward(self, *args)
result = self.__post_forward_mapping_out_scope(result)
result = seq_to_func_return(result)
self._is_executing_forward = False
return result
def __pre_forward_mapping_out_scope(self, *args):
# Insert identity op when doing activation checkpointing or pipeline execution.
# Identity op outside activation checkpointing scope will be the endpoint of an activation checkpointing segment.
# Identity op as the first op of a pipeline stage will make backward op depends on the identity op within the stage,
# otherwise the backward op may depends the op in former stage which will make graph creates unnessary buffers.
if self.config.activation_checkpointing or (
self.config.stage_id is not None and self.config.stage_id >= 0
):
def insert_identity(t):
assert isinstance(t, Tensor)
return oneflow._C.identity(t)
args = self.__mapping_io(
"input", insert_identity, "insert_identity", *args,
)
return args
def __post_forward_mapping_out_scope(self, *args):
# Insert identity op when doing activation checkpointing or pipeline execution.
if self.config.activation_checkpointing or (
self.config.stage_id is not None and self.config.stage_id >= 0
):
def insert_identity(t):
assert isinstance(t, Tensor)
return oneflow._C.identity(t)
args = self.__mapping_io(
"output", insert_identity, "insert_identity", *args,
)
return args
def add_module(self, name: str, module: Optional[Module]) -> None:
self.__setattr__(
name,
get_block_cls(module)(self._name_prefix + self._name + ".", name, module),
)
def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
self.__setattr__(
name,
get_block_cls(param)(self._name_prefix + self._name + ".", name, param),
)
def modules(self, memo: Optional[Set["Block"]] = None) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield self
for (name, module) in self._modules.items():
if module is None:
continue
for m in module.modules(memo):
yield m
def __mapping_io(self, io_type, func, func_desc, *args):
assert isinstance(func_desc, str)
assert io_type in ("input", "output")
mapped_args = []
def mapping_tensor(item):
assert isinstance(item, Tensor)
return func(item)
for idx, arg in enumerate(args):
if isinstance(arg, list):
seq_args = list()
for i in range(len(arg)):
is_tensor, name, repr_str = self.__io_tensor_check_and_gen(
arg[i], io_type, idx, i
)
if is_tensor:
seq_args.append(mapping_tensor(arg[i]))
self._print(
0,
1,
f"{repr_str} is a Tensor, {func_desc} transformation has been done.",
)
else:
self._print(
0,
0,
f"{repr_str} is not a Tensor, {func_desc} transformation will be ignored.",
)
seq_args.append(arg[i])
mapped_args.append(seq_args)
elif isinstance(arg, Tensor):
is_tensor, name, repr_str = self.__io_tensor_check_and_gen(
arg, io_type, idx
)
assert is_tensor
mapped_args.append(mapping_tensor(arg))
self._print(
0,
1,
f"{repr_str} is a Tensor, {func_desc} transformation has been done.",
)
else:
is_tensor, name, repr_str = self.__io_tensor_check_and_gen(
arg, io_type, idx
)
assert not is_tensor
mapped_args.append(arg)
self._print(
0,
0,
f"{repr_str} is not a Tensor or a list of Tensor, {func_desc} transformation will be ignored.",
)
return tuple(mapped_args)
def __io_tensor_check_and_gen(self, item, io_type, idx, second_idx=None):
assert io_type in ("input", "output")
name = (
"_"
+ self.name_prefix
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
if isinstance(item, Tensor):
repr_str = (
"(" + io_type.upper() + ":" + name + ":" + item._meta_repr() + ")"
)
return True, name, repr_str
else:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return False, name, repr_str
def __members(self, get_members_fn, recurse=True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
memo = set()
modules = self.modules() if recurse else [self]
for module in modules:
members = get_members_fn(module)
for (k, v) in members:
if v is None or v in memo:
continue
memo.add(v)
yield v
def parameters(self, recurse: bool = True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
gen = self.__members(lambda module: module._parameters.items(), recurse=recurse)
for elem in gen:
yield elem
def buffers(self, recurse: bool = True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
gen = self.__members(lambda module: module._buffers.items(), recurse=recurse)
for elem in gen:
yield elem
def __setattr__(self, name: str, value=None) -> None:
if value is None or not isinstance(value, Block):
self.__dict__[name] = value
else:
dicts_or_sets = (
self.__dict__,
self._modules,
self._parameters,
self._buffers,
)
for d in dicts_or_sets:
if name in d:
raise AttributeError(
"'{}' object has duplicated attribute named '{}'".format(
self._name, name
)
)
if value.type == BlockType.MODULE:
self._modules[name] = value
elif value.type == BlockType.PARAMETER:
self._parameters[name] = value
elif value.type == BlockType.BUFFER:
self._buffers[name] = value
else:
raise AttributeError(
"'{}' object are not allowed to set attribute named '{}'".format(
type(self).__name__, name
)
)
def __getattr__(self, name: str):
if name in self.__dict__:
return self.__dict__[name]
# support get module
if "_modules" in self.__dict__:
modules = self.__dict__["_modules"]
if name in modules:
return modules[name]
# support get parameter
p_state = self._get_from_states(name, "_parameters")
if p_state is not None:
return p_state
# support get buffer
b_state = self._get_from_states(name, "_buffers")
if b_state is not None:
return b_state
# support none parameter or buffer
if name in self._origin._parameters:
p_none = self._origin._parameters[name]
assert p_none is None
return None
if name in self._origin._buffers:
b_none = self._origin._buffers[name]
assert b_none is None
return None
# support get normal attr
if name in self._origin.__dict__:
return self._origin.__dict__[name]
# support get function
if hasattr(self._origin, name):
return partial(getattr(self._origin.__class__, name), self)
raise AttributeError(
"'{}' '{}' object '{}' in nn.Graph has no attribute '{}'".format(
self._type, type(self).__name__, self._name_prefix + self.name, name
)
)
def _get_from_states(self, name, states_name):
if states_name not in self.__dict__:
return None
_states = self.__dict__[states_name]
if name not in _states:
return None
_s_block = _states[name]
if graph_build_util.lazy_mode.is_enabled():
_s_block.try_build()
return _s_block.lazy_origin
elif (
not graph_build_util.lazy_mode.is_enabled()
) and self._is_executing_forward:
# eager and inside nn.Graph.build()
return _s_block.origin
else:
# outside nn.Graph.build()
return _s_block
def __repr__(self):
lines = None
child_lines = []
if (self.config is not None) and (not self.config._is_null):
child_lines.append(add_indent(repr(self.config), 2))
if len(self._args_repr) > 0:
for in_str in self._args_repr:
input_str = add_indent(in_str, 2)
child_lines.append(input_str)
def _append_child(d):
for (_, n) in d.items():
n_str = repr(n)
n_str = add_indent(n_str, 2)
child_lines.append(n_str)
_append_child(self._parameters)
_append_child(self._buffers)
_append_child(self._modules)
if len(self._outs_repr) > 0:
for out_str in self._outs_repr:
output_str = add_indent(out_str, 2)
child_lines.append(output_str)
if len(child_lines) > 0:
lines = child_lines
main_str = self._shallow_repr() + ": ("
if lines is not None:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
def _shallow_repr(self):
shallow_repr = (
"("
+ self._type
+ ":"
+ self._name_prefix
+ self._name
+ ":"
+ self._origin._shallow_repr()
+ ")"
)
return shallow_repr
def _print(self, s_level=2, v_level=0, msg: str = ""):
r"""Do print according to info level.
"""
assert isinstance(s_level, int)
assert isinstance(v_level, int)
assert isinstance(msg, str)
if s_level >= self._debug_min_s_level:
if (s_level > 0) or (s_level == 0 and v_level <= self._debug_max_v_level):
print(msg)
class LazyBuilder(object):
def __init__(self, name: str = None, method=None):
self.name = name
self.method = method
self.result = None
self.finished = False
def try_build(self, block=None):
if not self.finished:
assert self.name is not None
assert self.method is not None
assert self.result is None
with block.scope_context():
self.result = self.method()
self.finished = True
class TensorBlock(Block):
def __init__(
self, prefix: str = "", name: str = "", origin: Union[Parameter, Tensor] = None,
):
assert not isinstance(origin, Block)
super().__init__(prefix, name)
if isinstance(origin, Parameter):
self._type = BlockType.PARAMETER
elif isinstance(origin, Tensor):
self._type = BlockType.BUFFER
else:
raise NotImplementedError()
self._lazy_origin_builder = LazyBuilder()
self.build_finished = False
self.set_origin(origin)
@property
def origin(self):
return self._origin
def set_origin(self, origin):
self._origin = origin
@property
def lazy_origin(self):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin"
return self._lazy_origin_builder.result
def lazy_origin_builder(self):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin_builder"
return self._lazy_origin_builder
def set_lazy_origin_builder(self, builder=None):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin_builder"
self._lazy_origin_builder = builder
def try_build(self):
if not self.build_finished:
self._lazy_origin_builder.try_build(self)
self.build_finished = True
def __repr__(self):
lines = None
main_str = self._shallow_repr() + ": ("
if lines is not None:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
def _shallow_repr(self):
shallow_repr = (
"("
+ self._type
+ ":"
+ self._name_prefix
+ self._name
+ ":"
+ self._origin._meta_repr()
+ ")"
)
return shallow_repr
class SequentialBlock(get_seq(ModuleBlock)):
def __init__(
self, prefix: str = "", name: str = "", origin: Sequential = None,
):
super().__init__()
self._name_prefix = prefix
self._name = name
self.set_origin(origin)
class ModuleListBlock(get_list(ModuleBlock)):
def __init__(
self, prefix: str = "", name: str = "", origin: ModuleList = None,
):
super().__init__()
self._name_prefix = prefix
self._name = name
self.set_origin(origin)
# MoudleList is a container without forward() method,
# so it will not be executed or has an execution config.
self.config = None
class ModuleDictBlock(get_dict(ModuleBlock)):
def __init__(
self, prefix: str = "", name: str = "", origin: ModuleDict = None,
):
super().__init__()
self._name_prefix = prefix
self._name = name
self.set_origin(origin)
class ParameterListBlock(get_para_list(ModuleBlock)):
def __init__(
self, prefix: str = "", name: str = "", origin: ParameterList = None,
):
super().__init__()
self._name_prefix = prefix
self._name = name
self.set_origin(origin)
self._is_executing_forward = True
def __getitem__(self, idx):
assert isinstance(idx, int)
idx = self._get_abs_string_index(idx)
key = str(idx)
p_state = self._get_from_states(key, "_parameters")
if p_state is not None:
return p_state
else:
raise AttributeError("ParameterList dosen't contain ", key)
class ParameterDictBlock(get_para_dict(ModuleBlock)):
def __init__(
self, prefix: str = "", name: str = "", origin: ParameterDict = None,
):
super().__init__()
self._name_prefix = prefix
self._name = name
self.set_origin(origin)
self._is_executing_forward = True
def __getitem__(self, key: str):
p_state = self._get_from_states(key, "_parameters")
if p_state is not None:
return p_state
else:
raise AttributeError("ParameterDict dosen't contain key ", key)
| StarcoderdataPython |
1787244 | <reponame>nuagenetworks/nuage-tempest-plugin<gh_stars>1-10
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import time
from tempest.api.network import base
from tempest.common import utils
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest import test
from nuage_tempest_plugin.lib.topology import Topology
from nuage_tempest_plugin.tests.scenario \
import base_nuage_network_scenario_test
CONF = Topology.get_conf()
LOG = Topology.get_logger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
EXTRA_DHCP_OPT_MTU_VALUE = '1498'
EXTRA_DHCP_OPT_DOMAIN_NAME = 'nuagenetworks.com'
EXTRA_DHCP_OPT_DOMAIN_SEARCH = 'sales.domain.com;eng.domain.org'
FIP_RATE_LIMIT = '5'
FIP_UPDATE_DELAY = 4
class TestNetworkBasicOps(
base_nuage_network_scenario_test.NuageNetworkScenarioTest,
base.BaseNetworkTest):
"""TestNetworkBasicOps
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def skip_checks(cls):
super(TestNetworkBasicOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
for ext in ['router', 'security-group']:
if not utils.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkBasicOps, cls).setup_credentials()
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _setup_network_and_servers(self, **kwargs):
boot_with_port = kwargs.pop('boot_with_port', False)
self.security_group = self._create_security_group()
self.network, self.subnet, self.router = self.create_networks(**kwargs)
self.check_networks()
self.port_id = None
if boot_with_port:
# create a port on the network and boot with that
# Don't forget to add the security group to allow ssh
extra_dhcp_opts = [
{'opt_value': EXTRA_DHCP_OPT_MTU_VALUE,
'opt_name': 'mtu'},
{'opt_value': EXTRA_DHCP_OPT_DOMAIN_NAME,
'opt_name': 'domain-name'},
{'opt_value': EXTRA_DHCP_OPT_DOMAIN_SEARCH,
'opt_name': 'domain-search'}
]
port_kwargs = {
'extra_dhcp_opts': extra_dhcp_opts,
'security_groups': [self.security_group['id']]
}
self.port_id = self._create_port(
self.network['id'], **port_kwargs)['id']
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network, self.port_id)
self._check_nuage_tenant_network_connectivity()
# Create floating IP with FIP rate limiting
result = self.os_primary.floating_ips_client.create_floatingip(
floating_network_id=CONF.network.public_network_id,
port_id=self.port_id,
nuage_fip_rate=FIP_RATE_LIMIT)
self.floating_ips.append(result['floatingip'])
# QA REPO -- does NOT work on dev repo - no net_resources attr
# convert to format used throughout this file
# floating_ip = self.net_resources.DeletableFloatingIp(
# client=self.os_primary.floating_ips_client,
# **result['floatingip'])
# QA repo
# DEV REPO start
floating_ip = result['floatingip']
# DEV REPO end
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""check_networks
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets]
"""
seen_nets = self.os_admin.networks_client.\
list_networks()['networks']
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network['name'], seen_names)
self.assertIn(self.network['id'], seen_ids)
if self.subnet:
seen_subnets = self.os_admin.subnets_client.\
list_subnets()['subnets']
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network['id'], seen_net_ids)
self.assertIn(self.subnet['id'], seen_subnet_ids)
if self.router:
seen_routers = self.os_admin.routers_client.\
list_routers()['routers']
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router['name'],
seen_router_names)
self.assertIn(self.router['id'],
seen_router_ids)
def _create_server(self, name, network, port_id=None):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
server = self.create_server(
name=name,
networks=[network],
key_name=keypair['name'],
security_groups=security_groups,
wait_until='ACTIVE')
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_nuage_tenant_network_connectivity(self):
ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(TestNetworkBasicOps, self).\
_check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _check_public_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True):
"""_check_public_connectivity
Verifies connectivity to a VM via public network and floating IP,
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
negative or positive.
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
"""
ssh_login = CONF.validation.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip['floating_ip_address']
private_key = None
floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
# Check FloatingIP Status before initiating a connection
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
# call the common method in the parent class
self.check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self, create_gateway=False):
self.new_net = self._create_network(tenant_id=self.tenant_id)
if create_gateway:
self.new_subnet = self._create_subnet(
network=self.new_net)
else:
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server['id'])
self.assertEqual(1, len(port_list))
old_port = port_list[0]
interface = self.interface_client.create_interface(
server=server['id'],
network_id=self.new_net.id)
self.addCleanup(self.network_client.wait_for_resource_deletion,
'port',
interface['port_id'])
self.addCleanup(self.delete_wrapper,
self.interface_client.delete_interface,
server['id'], interface['port_id'])
def check_ports():
self.new_port_list = [port for port in
self._list_ports(device_id=server['id'])
if port['id'] != old_port['id']]
return len(self.new_port_list) == 1
if not test.call_until_true(
check_ports,
CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException(
"No new port attached to the server in time (%s sec)! "
"Old port: %s. Number of new ports: %d" % (
CONF.network.build_timeout, old_port,
len(self.new_port_list)))
new_port = self.net_resources.DeletablePort(
client=self.network_client, **self.new_port_list[0])
def check_new_nic():
new_nic_list = self._get_server_nics(ssh_client)
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
return len(self.diff_list) == 1
if not test.call_until_true(
check_new_nic,
CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("Interface not visible on the "
"guest after %s sec"
% CONF.network.build_timeout)
num, new_nic = self.diff_list[0]
ssh_client.assign_static_ip(nic=new_nic,
addr=new_port.fixed_ips[0]['ip_address'])
ssh_client.turn_nic_on(nic=new_nic)
@staticmethod
def _get_server_nics(ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.get_ip_list()
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network,
should_connect=True):
"""_check_network_internal_connectivity
via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
self._list_ports(tenant_id=server['tenant_id'],
network_id=network.id)
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip,
internal_ips,
should_connect)
def _check_network_external_connectivity(self):
"""_check_network_external_connectivity
ping public network default gateway to imply external connectivity
"""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
# We ping the external IP from the instance using its floating IP
# which is always IPv4, so we must only test connectivity to
# external IPv4 IPs if the external network is dualstack.
v4_subnets = [s for s in self._list_subnets(
network_id=CONF.network.public_network_id)
if s['ip_version'] == 4]
self.assertEqual(1, len(v4_subnets),
"Found %d IPv4 subnets" % len(v4_subnets))
external_ips = [v4_subnets[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list,
should_connect=True):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
if should_connect:
msg = "Timed out waiting for %s to become reachable" \
% remote_ip
else:
msg = "ip address %s is reachable" % remote_ip
try:
self.assertTrue(self._check_remote_connectivity
(ssh_source, remote_ip, should_connect),
msg)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise
@staticmethod
def _get_server_mtu(ssh_client, interface='eth0'):
command = 'ip a | grep -v inet | grep ' + interface + \
' | cut -d" " -f 5'
mtu = ssh_client.exec_command(command)
return int(mtu)
@staticmethod
def _get_server_domain_name(ssh_client):
command = 'grep search /etc/resolv.conf | cut -d" " -f2'
domain_name = str(ssh_client.exec_command(command)).rstrip('\n')
return domain_name
def _check_extra_dhcp_opts_on_server(self, server, floating_ip_address):
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(floating_ip_address,
private_key=private_key)
# Fetch MTU FROM ETH0
# command = 'ip a | grep -v inet | grep eth0 | cut -d" " -f 5'
mtu = self._get_server_mtu(ssh_client, 'eth0')
domain_name = self._get_server_domain_name(ssh_client)
# Compare with values used when creating the port
self.assertEqual(int(mtu), int(EXTRA_DHCP_OPT_MTU_VALUE),
'Extra DHCP option <mut> not set correclty on the VM')
self.assertEqual(domain_name, EXTRA_DHCP_OPT_DOMAIN_NAME,
'Extra DHCP option <domain-name> not set correcty '
'on the VM')
LOG.info("EXTRA DHCP OPTIONS validated OK")
# TODO(KRIS) FURTHER INVESTIGATE BUT SOMETHING UPSTREAM BROKE THIS TEST
# TODO(KRIS) CONNECTIVITY IS ANYHOW THESE DAYS TESTED MUCH MORE ALREADY ...
# TODO(KRIS) ADDED nuage.connectivity.
# TODO(KRIS) test_icmp_connectivity_os_managed_l3_domain_using_fip
# TODO(KRIS) for testbed also
def FIXME_KRIS_test_nuage_fip_network_basic_ops(self):
"""test_nuage_fip_network_basic_ops
Spin a VM with a security group on an internal network, with
a floating IP in the public network.
Relies on the fact that there is connectivity form the test runner
to this network.
We use the FIP 2 underlay feature (underlay=true) on the public network
"""
# Use a port, on which we add :
# extra dhcp options (done)
kwargs = {'boot_with_port': True}
self._setup_network_and_servers(**kwargs)
time.sleep(5) # giving time for servers to come up - TODO(check this)
self._check_public_connectivity(
should_connect=True, should_check_floating_ip_status=False)
# Verify whether our extra dhcp options mad it to the VM
floating_ip, this_server = self.floating_ip_tuple
self._check_extra_dhcp_opts_on_server(
this_server, floating_ip['floating_ip_address'])
# Check dissassociate / associate of the FIP on the same port
# a number of times
loop_range = 4
LOG.info("Starting FIP-2-underlay dis/associate loop on " +
str(floating_ip['floating_ip_address']))
for count in range(1, loop_range, 1):
self._disassociate_floating_ips()
time.sleep(FIP_UPDATE_DELAY)
LOG.info("Loop " + str(count) + "/" + str(loop_range) +
" Connectivity is GONE")
self._check_public_connectivity(
should_connect=False, should_check_floating_ip_status=False)
# disassociate de-populates the server in the tuple,
# populate it again:
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, this_server)
self._associate_floating_ip(floating_ip, this_server)
time.sleep(FIP_UPDATE_DELAY)
LOG.info("Loop " + str(count) + "/" + str(loop_range) +
" Connectivity is BACK")
self._check_public_connectivity(
should_connect=True, should_check_floating_ip_status=False)
| StarcoderdataPython |
6401512 | """
Created on 18 Feb 2017
@author: <NAME> (<EMAIL>)
"""
from scs_core.data.datetime import LocalizedDatetime
from scs_core.sample.climate_sample import ClimateSample
from scs_core.sampler.sampler import Sampler
# --------------------------------------------------------------------------------------------------------------------
class ClimateSampler(Sampler):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, runner, tag, sht, barometer=None, altitude=None):
"""
Constructor
"""
Sampler.__init__(self, runner)
self.__tag = tag
self.__sht = sht
self.__barometer = barometer
self.__altitude = altitude
# ----------------------------------------------------------------------------------------------------------------
def reset(self):
Sampler.reset(self)
self.__sht.reset()
def sample(self):
sht_sample = self.__sht.sample()
# TODO: get the altitude from GPS if necessary
if self.__barometer:
barometer_sample = self.__barometer.sample(altitude=self.__altitude, include_temp=False)
else:
barometer_sample = None
recorded = LocalizedDatetime.now().utc() # after sampling, so that we can monitor resource contention
return ClimateSample(self.__tag, recorded, sht_sample, barometer_sample)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "ClimateSampler:{runner:%s, tag:%s, sht:%s, barometer:%s, altitude:%s}" % \
(self.runner, self.__tag, self.__sht, self.__barometer, self.__altitude)
| StarcoderdataPython |
8166908 | '''
Результаты олимпиады
'''
n = int(input())
arr = []
for i in range(n):
a = list(input().split())
arr.append((a[0], int(a[1])))
arr.sort(key=lambda x: x[1])
arr.reverse()
for i in arr:
print(i[0], end='\n')
| StarcoderdataPython |
8099851 | import pymysql
class squirrel_module():
def __init__(self, squirrel):
self.squirrel = squirrel
self.squirrel_service = squirrel.squirrel_service
self.squirrel_namespace = squirrel.squirrel_namespace
self.squirrel_user = squirrel.squirrel_user
self.squirrel_pass = <PASSWORD>
self.secret_annotations = squirrel.secret_annotations
self.random_pass = s<PASSWORD>.random_pass
self.host = squirrel.host
self.debug_mode = squirrel.debug_mode
def mysql_execution(self,
name_username,
user_password,
mysql_host,
mysql_port,
name_database,
list_querys):
print("[INFO] Mysql - Host: %s" % (mysql_host))
dic_query = {}
try:
connection = pymysql.connect(user = name_username,
password = <PASSWORD>,
host = mysql_host,
database = name_database,
port = mysql_port)
cursor = connection.cursor()
#record = cursor.fetchone()
for query in list_querys:
cursor.execute(query)
try:
dic_query[query] = cursor.fetchall()
except Exception as e:
print("(mysql_execution)[WARNING] fetchall: %s" % e)
connection.commit()
print("(mysql_execution)[INFO] %s successfully" % cursor.rowcount)
except (Exception, pymysql.Error) as error:
print("(mysql_execution)[ERROR] while connecting to MySQL: ", error)
return False
finally:
if 'connection' in locals():
if(connection):
cursor.close()
connection.close()
print("(mysql_execution)[INFO] MySQL connection is closed")
return dic_query
def check_app(self):
print("check app")
# if is OK
return True
def update_app(self):
print("update app")
# if is OK
return True
def update_secret(self):
print("update secret")
return True | StarcoderdataPython |
3446105 | #!/usr/bin/python
import lldb
import struct
class OperatingSystemPlugIn(object):
"""Class that provides a OS plugin that along with the particular code in main.cpp
emulates the following scenario:
a) We stop in an OS Plugin created thread - which should be thread index 1
b) We step-out from that thread
c) We hit a breakpoint in another thread, and DON'T produce the OS Plugin thread.
d) We continue, and when we hit the step out breakpoint, we again produce the same
OS Plugin thread.
main.cpp sets values into the global variable g_value, which we use to tell the OS
plugin whether to produce the OS plugin thread or not.
Since we are always producing an OS plugin thread with a backing thread, we don't
need to implement get_register_info or get_register_data.
"""
def __init__(self, process):
'''Initialization needs a valid.SBProcess object.
This plug-in will get created after a live process is valid and has stopped for the
first time.'''
print("Plugin initialized.")
self.process = None
self.start_stop_id = 0
self.g_value = lldb.SBValue()
if isinstance(process, lldb.SBProcess) and process.IsValid():
self.process = process
self.g_value = process.GetTarget().FindFirstGlobalVariable("g_value")
if not self.g_value.IsValid():
print("Could not find g_value")
def create_thread(self, tid, context):
print("Called create thread with tid: ", tid)
return None
def get_thread_info(self):
g_value = self.g_value.GetValueAsUnsigned()
print("Called get_thread_info: g_value: %d"%(g_value))
if g_value == 0 or g_value == 2:
return [{'tid': 0x111111111,
'name': 'one',
'queue': 'queue1',
'state': 'stopped',
'stop_reason': 'breakpoint',
'core' : 1 }]
else:
return []
def get_register_info(self):
print ("called get_register_info")
return None
def get_register_data(self, tid):
print("Get register data called for tid: %d"%(tid))
return None
| StarcoderdataPython |
1646675 | import os
import requests
import pygtrie as trie
from joblib import dump
from config import PUBLIC_SUFFIX_LIST_URL, SUFFIX_TRIE, DATA_DIR
def fetch_public_suffix_data():
data = []
try:
r = requests.get(PUBLIC_SUFFIX_LIST_URL, stream=True)
data = r.text.split("\n")
except Exception as e:
print("EXCEPTION IN FETCHING PUBLIC SUFFIX LIST : " + str(e))
return data
def create_public_suffix_trie():
pub_suf_trie = trie.StringTrie()
data = fetch_public_suffix_data()
if len(data) > 0:
for ps in data:
if ps != "" and not ps.startswith("//"):
pub_suf_trie[ps] = True
return pub_suf_trie
def dump_suffix_trie():
pub_suf_trie = create_public_suffix_trie()
try:
dump(pub_suf_trie, os.path.join(DATA_DIR, SUFFIX_TRIE))
except Exception as e:
print(e)
if __name__ == "__main__":
dump_suffix_trie()
| StarcoderdataPython |
9618403 | import pytest
from node.blockchain.facade import BlockchainFacade
from node.blockchain.inner_models import PVScheduleUpdateBlockMessage
@pytest.mark.usefixtures('base_blockchain')
def test_make_block_message_update(pv_schedule_update_signed_change_request, primary_validator_node):
request = pv_schedule_update_signed_change_request
block_message_update = PVScheduleUpdateBlockMessage.make_block_message_update(
request, BlockchainFacade.get_instance()
)
assert block_message_update.accounts is None
schedule = block_message_update.schedule
assert schedule == {'1': primary_validator_node.identifier}
| StarcoderdataPython |
8126235 | # -*- coding: UTF-8 -*-
from sympy import pi, sin
from scipy.sparse.linalg import spsolve
from sympde.calculus import dot, div
from sympde.topology import VectorFunctionSpace, ScalarFunctionSpace
from sympde.topology import ProductSpace
from sympde.topology import element_of
from sympde.topology import Line
from sympde.expr import BilinearForm, LinearForm
from sympde.expr import integral
from sympde.expr import Norm
from sympde.expr import find
from psydac.fem.basic import FemField
from psydac.api.discretization import discretize
#==============================================================================
def run_system_1_1d_dir(f0, sol, ncells, degree):
# ... abstract model
domain = Line()
V1 = VectorFunctionSpace('V1', domain, kind='H1')
V2 = ScalarFunctionSpace('V2', domain, kind='L2')
X = ProductSpace(V1, V2)
x = domain.coordinates
F = element_of(V2, name='F')
p,q = [element_of(V1, name=i) for i in ['p', 'q']]
u,v = [element_of(V2, name=i) for i in ['u', 'v']]
int_0 = lambda expr: integral(domain , expr)
a = BilinearForm(((p,u),(q,v)), int_0(dot(p,q) + div(q)*u + div(p)*v))
l = LinearForm((q,v), int_0(f0*v))
error = F-sol
l2norm_F = Norm(error, domain, kind='l2')
equation = find([p,u], forall=[q,v], lhs=a((p,u),(q,v)), rhs=l(q,v))
# ... create the computational domain from a topological domain
domain_h = discretize(domain, ncells=ncells)
# ...
# ... discrete spaces
V1h = discretize(V1, domain_h, degree=degree)
V2h = discretize(V2, domain_h, degree=degree)
Xh = discretize(X , domain_h, degree=degree)
# ... dsicretize the equation using Dirichlet bc
ah = discretize(equation, domain_h, [Xh, Xh], symbolic_space=[X, X])
# ... discretize norms
l2norm_F_h = discretize(l2norm_F, domain_h, V2h)
ah.assemble()
M = ah.linear_system.lhs.tosparse().tocsc()
rhs = ah.linear_system.rhs.toarray()
sol = spsolve(M, rhs)
phi2 = FemField(V2h)
phi2.coeffs[0:V2h.nbasis] = sol[V1h.nbasis:]
l2_error = l2norm_F_h.assemble(F=phi2)
return l2_error
###############################################################################
# SERIAL TESTS
###############################################################################
#==============================================================================
def test_api_system_1_1d_dir_1():
from sympy import symbols
x1 = symbols('x1')
f0 = -(2*pi)**2*sin(2*pi*x1)
u = sin(2*pi*x1)
x = run_system_1_1d_dir(f0, u,ncells=[10], degree=[2])
| StarcoderdataPython |
97762 | <filename>notes/my_bad_script.py
def split_full_name(string: str) -> list:
"""Takes a full name and splits it into first and last.
Parameters
----------
string : str
The full name to be parsed.
Returns
-------
list
The first and the last name.
"""
return string.split(" ")
# Test it out.
print(split_full_name(string=100000000))
| StarcoderdataPython |
5088559 | <reponame>hwf1324/nvda<gh_stars>0
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2006-2021 NV Access Limited, <NAME>, <NAME>
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""A module used to record Windows versions.
It is also used to define feature checks such as
making sure NVDA can run on a minimum supported version of Windows.
"""
from typing import Optional
import sys
import os
import functools
import winreg
from buildVersion import version_year
# Records a mapping between Windows builds and release names.
# These include build 10240 for Windows 10 1507 and releases with multiple release builds.
# These are applicable to Windows 10 as they report the same system version (10.0).
_BUILDS_TO_RELEASE_NAMES = {
10240: "Windows 10 1507",
10586: "Windows 10 1511",
14393: "Windows 10 1607",
15063: "Windows 10 1703",
16299: "Windows 10 1709",
17134: "Windows 10 1803",
17763: "Windows 10 1809",
18362: "Windows 10 1903",
18363: "Windows 10 1909",
19041: "Windows 10 2004",
19042: "Windows 10 20H2",
19043: "Windows 10 21H1",
22000: "Windows 11 21H2",
}
@functools.lru_cache(maxsize=1)
def _getRunningVersionNameFromWinReg() -> str:
"""Returns the Windows release name defined in Windows Registry.
This is applicable on Windows 10 Version 1511 (build 10586) and later.
"""
# Release name is recorded in Windows Registry from Windows 10 Version 1511 (build 10586) onwards.
if getWinVer() < WIN10_1511:
raise RuntimeError("Release name is not recorded in Windows Registry on this version of Windows")
# Cache the version in use on the system.
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, r"Software\Microsoft\Windows NT\CurrentVersion"
) as currentVersion:
# Version 20H2 and later where a separate display version string is used.
try:
releaseId = winreg.QueryValueEx(currentVersion, "DisplayVersion")[0]
except OSError:
# Don't set anything if this is Windows 10 1507 or earlier.
try:
releaseId = winreg.QueryValueEx(currentVersion, "ReleaseID")[0]
except OSError:
releaseId = ""
return releaseId
@functools.total_ordering
class WinVersion(object):
"""
Represents a Windows release.
Includes version major, minor, build, service pack information,
as well as tools such as checking for specific Windows 10 releases.
"""
def __init__(
self,
major: int = 0,
minor: int = 0,
build: int = 0,
releaseName: Optional[str] = None,
servicePack: str = "",
productType: str = ""
):
self.major = major
self.minor = minor
self.build = build
if releaseName:
self.releaseName = releaseName
else:
self.releaseName = self._getWindowsReleaseName()
self.servicePack = servicePack
self.productType = productType
def _getWindowsReleaseName(self) -> str:
"""Returns the public release name for a given Windows release based on major, minor, and build.
This is useful if release names are not defined when constructing this class.
For example, 6.1 will return 'Windows 7'.
For Windows 10, feature update release name will be included.
On server systems, unless noted otherwise, client release names will be returned.
For example, 'Windows 10 1809' will be returned on Server 2019 systems.
"""
if (self.major, self.minor) == (6, 1):
return "Windows 7"
elif (self.major, self.minor) == (6, 2):
return "Windows 8"
elif (self.major, self.minor) == (6, 3):
return "Windows 8.1"
elif self.major == 10:
# From Version 1511 (build 10586), release Id/display version comes from Windows Registry.
# However there are builds with no release name (Version 1507/10240)
# or releases with different builds.
# Look these up first before asking Windows Registry.
if self.build in _BUILDS_TO_RELEASE_NAMES:
return _BUILDS_TO_RELEASE_NAMES[self.build]
return "Windows 10 unknown"
else:
return "Windows release unknown"
def __repr__(self):
winVersionText = [self.releaseName]
winVersionText.append(f"({self.major}.{self.minor}.{self.build})")
if self.servicePack != "":
winVersionText.append(f"service pack {self.servicePack}")
if self.productType != "":
winVersionText.append(self.productType)
return " ".join(winVersionText)
def __eq__(self, other):
return (
(self.major, self.minor, self.build)
== (other.major, other.minor, other.build)
)
def __ge__(self, other):
return (
(self.major, self.minor, self.build)
>= (other.major, other.minor, other.build)
)
# Windows releases to WinVersion instances for easing comparisons.
WIN7 = WinVersion(major=6, minor=1, build=7600)
WIN7_SP1 = WinVersion(major=6, minor=1, build=7601, servicePack="1")
WIN8 = WinVersion(major=6, minor=2, build=9200)
WIN81 = WinVersion(major=6, minor=3, build=9600)
WIN10 = WIN10_1507 = WinVersion(major=10, minor=0, build=10240)
WIN10_1511 = WinVersion(major=10, minor=0, build=10586)
WIN10_1607 = WinVersion(major=10, minor=0, build=14393)
WIN10_1703 = WinVersion(major=10, minor=0, build=15063)
WIN10_1709 = WinVersion(major=10, minor=0, build=16299)
WIN10_1803 = WinVersion(major=10, minor=0, build=17134)
WIN10_1809 = WinVersion(major=10, minor=0, build=17763)
WIN10_1903 = WinVersion(major=10, minor=0, build=18362)
WIN10_1909 = WinVersion(major=10, minor=0, build=18363)
WIN10_2004 = WinVersion(major=10, minor=0, build=19041)
WIN10_20H2 = WinVersion(major=10, minor=0, build=19042)
WIN10_21H1 = WinVersion(major=10, minor=0, build=19043)
WIN11 = WIN11_21H2 = WinVersion(major=10, minor=0, build=22000)
def getWinVer():
"""Returns a record of current Windows version NVDA is running on.
"""
winVer = sys.getwindowsversion()
# #12509: on Windows 10, fetch whatever Windows Registry says for the current build.
# #12626: note that not all Windows 10 releases are labeled "Windows 10"
# (build 22000 is Windows 11 despite major.minor being 10.0).
try:
if WinVersion(
major=winVer.major,
minor=winVer.minor,
build=winVer.build
) >= WIN11:
releaseName = f"Windows 11 {_getRunningVersionNameFromWinReg()}"
else:
releaseName = f"Windows 10 {_getRunningVersionNameFromWinReg()}"
except RuntimeError:
releaseName = None
return WinVersion(
major=winVer.major,
minor=winVer.minor,
build=winVer.build,
releaseName=releaseName,
servicePack=winVer.service_pack,
productType=("workstation", "domain controller", "server")[winVer.product_type - 1]
)
def isSupportedOS():
# NVDA can only run on Windows 7 Service pack 1 and above
return getWinVer() >= WIN7_SP1
UWP_OCR_DATA_PATH = os.path.expandvars(r"$windir\OCR")
def isUwpOcrAvailable():
return os.path.isdir(UWP_OCR_DATA_PATH)
# Deprecated: Windows 10 releases will be obtained from Windows Registry, no entries will be added.
# The below map will be removed in 2022.1.
if version_year < 2022:
WIN10_RELEASE_NAME_TO_BUILDS = {
"1507": 10240,
"1511": 10586,
"1607": 14393,
"1703": 15063,
"1709": 16299,
"1803": 17134,
"1809": 17763,
"1903": 18362,
"1909": 18363,
"2004": 19041,
"20H2": 19042,
"21H1": 19043,
}
def isFullScreenMagnificationAvailable():
return getWinVer() >= WIN8
| StarcoderdataPython |
5047810 | <filename>message_recall/frontend_app.py
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Frontend handler for Message Recall.
This app services requests for the urls below.
"""
# setup_path must be first to find library imports.
import setup_path # pylint: disable=unused-import,g-bad-import-order
import frontend_views
import webapp2
# Set debug=True to see more logging.
app = webapp2.WSGIApplication([
(r'/about', frontend_views.AboutPageHandler),
(r'/create_task', frontend_views.CreateTaskPageHandler),
(r'/history', frontend_views.HistoryPageHandler),
(r'/task/debug/([\w\-]+)', frontend_views.DebugTaskPageHandler),
(r'/task/problems/([\w\-]+)', frontend_views.TaskProblemsPageHandler),
(r'/task/report/([\w\-]+)', frontend_views.TaskReportPageHandler),
(r'/task/users/([\w\-]+)', frontend_views.TaskUsersPageHandler),
(r'/task/([\w\-]+)', frontend_views.TaskDetailsPageHandler),
(r'/', frontend_views.LandingPageHandler),
], debug=False)
| StarcoderdataPython |
11211068 | <reponame>tecnickcom/binsearch<filename>python/test/test_binsearch_col.py
"""Tests for binsearch module - column mode."""
import binsearch as bs
import os
from unittest import TestCase
nrows = 251
testDataCol8 = [
(0, 251, 0x00, 0, 0, 0, 1, 2, 2),
(1, 251, 0x00, 1, 1, 1, 1, 2, 2),
(0, 251, 0x01, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F, 16, 16, 16, 16, 17, 17),
(0, 251, 0x10, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F, 32, 32, 32, 32, 33, 33),
(0, 251, 0x20, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F, 48, 48, 48, 48, 49, 49),
(0, 251, 0x30, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F, 64, 64, 64, 64, 65, 65),
(0, 251, 0x40, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F, 80, 80, 80, 80, 81, 81),
(0, 251, 0x50, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F, 96, 96, 96, 96, 97, 97),
(0, 251, 0x60, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F, 112, 112, 112, 112, 113, 113),
(0, 251, 0x70, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F, 128, 128, 128, 128, 129, 129),
(0, 251, 0x80, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F, 144, 144, 144, 144, 145, 145),
(0, 251, 0x90, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9F, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAF, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBF, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCF, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDF, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEF, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x70, 51, 50, 51, 51, 50, 51),
(150, 251, 0x70, 251, 149, 150, 251, 149, 150),
]
testDataColSub8 = [
(0, 251, 0x00, 0, 0, 0, 1, 2, 2),
(1, 251, 0x00, 1, 1, 1, 1, 2, 2),
(0, 251, 0x01, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F, 16, 16, 16, 16, 17, 17),
(0, 251, 0x10, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F, 32, 32, 32, 32, 33, 33),
(0, 251, 0x20, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F, 48, 48, 48, 48, 49, 49),
(0, 251, 0x30, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F, 64, 64, 64, 64, 65, 65),
(0, 251, 0x40, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F, 80, 80, 80, 80, 81, 81),
(0, 251, 0x50, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F, 96, 96, 96, 96, 97, 97),
(0, 251, 0x60, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F, 112, 112, 112, 112, 113, 113),
(0, 251, 0x70, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F, 128, 128, 128, 128, 129, 129),
(0, 251, 0x80, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F, 144, 144, 144, 144, 145, 145),
(0, 251, 0x90, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9F, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAF, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBF, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCF, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDF, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEF, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x70, 51, 50, 51, 51, 50, 51),
(150, 251, 0x70, 251, 149, 150, 251, 149, 150),
]
testDataCol16 = [
(0, 251, 0x0000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0001, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0102, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F10, 16, 16, 16, 16, 17, 17),
(0, 251, 0x1011, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F20, 32, 32, 32, 32, 33, 33),
(0, 251, 0x2021, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F30, 48, 48, 48, 48, 49, 49),
(0, 251, 0x3031, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F40, 64, 64, 64, 64, 65, 65),
(0, 251, 0x4041, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F50, 80, 80, 80, 80, 81, 81),
(0, 251, 0x5051, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F60, 96, 96, 96, 96, 97, 97),
(0, 251, 0x6061, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F70, 112, 112, 112, 112, 113, 113),
(0, 251, 0x7071, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F80, 128, 128, 128, 128, 129, 129),
(0, 251, 0x8081, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F90, 144, 144, 144, 144, 145, 145),
(0, 251, 0x9091, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0C1, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCFD0, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0D1, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDFE0, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0E1, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEFF0, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0F1, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8F9, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9F9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x7071, 51, 50, 51, 51, 50, 51),
(150, 251, 0x7071, 251, 149, 150, 251, 149, 150),
]
testDataColSub16 = [
(0, 251, 0x0000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0001, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0102, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F10, 16, 16, 16, 16, 17, 17),
(0, 251, 0x1011, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F20, 32, 32, 32, 32, 33, 33),
(0, 251, 0x2021, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F30, 48, 48, 48, 48, 49, 49),
(0, 251, 0x3031, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F40, 64, 64, 64, 64, 65, 65),
(0, 251, 0x4041, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F50, 80, 80, 80, 80, 81, 81),
(0, 251, 0x5051, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F60, 96, 96, 96, 96, 97, 97),
(0, 251, 0x6061, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F70, 112, 112, 112, 112, 113, 113),
(0, 251, 0x7071, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F80, 128, 128, 128, 128, 129, 129),
(0, 251, 0x8081, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F90, 144, 144, 144, 144, 145, 145),
(0, 251, 0x9091, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0C1, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCFD0, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0D1, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDFE0, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0E1, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEFF0, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0F1, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8F9, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9F9, 251, 249, 250, 251, 249, 250),
(0, 51, 0x7071, 51, 50, 51, 51, 50, 51),
(150, 251, 0x7071, 251, 149, 150, 251, 149, 150),
]
testDataCol32 = [
(0, 251, 0x00000000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x00010203, 1, 1, 1, 1, 2, 2),
(0, 251, 0x01020304, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F101112, 16, 16, 16, 16, 17, 17),
(0, 251, 0x10111213, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F202122, 32, 32, 32, 32, 33, 33),
(0, 251, 0x20212223, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F303132, 48, 48, 48, 48, 49, 49),
(0, 251, 0x30313233, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F404142, 64, 64, 64, 64, 65, 65),
(0, 251, 0x40414243, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F505152, 80, 80, 80, 80, 81, 81),
(0, 251, 0x50515253, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F606162, 96, 96, 96, 96, 97, 97),
(0, 251, 0x60616263, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F707172, 112, 112, 112, 112, 113, 113),
(0, 251, 0x70717273, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F808182, 128, 128, 128, 128, 129, 129),
(0, 251, 0x80818283, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F909192, 144, 144, 144, 144, 145, 145),
(0, 251, 0x90919293, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0A1A2, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1A2A3, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0B1B2, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1B2B3, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0C1C2, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0C1C2C3, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCFD0D1D2, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0D1D2D3, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDFE0E1E2, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0E1E2E3, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEFF0F1F2, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0F1F2F3, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8F9FAFB, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFFFFFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9F9FAFB, 251, 249, 250, 251, 249, 250),
(0, 51, 0x70717273, 51, 50, 51, 51, 50, 51),
(150, 251, 0x70717273, 251, 149, 150, 251, 149, 150),
]
testDataColSub32 = [
(0, 251, 0x00000000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x00000102, 1, 1, 1, 1, 2, 2),
(0, 251, 0x00000203, 2, 2, 2, 2, 3, 3),
(0, 251, 0x00001011, 16, 16, 16, 16, 17, 17),
(0, 251, 0x00001112, 17, 17, 17, 17, 18, 18),
(0, 251, 0x00002021, 32, 32, 32, 32, 33, 33),
(0, 251, 0x00002122, 33, 33, 33, 33, 34, 34),
(0, 251, 0x00003031, 48, 48, 48, 48, 49, 49),
(0, 251, 0x00003132, 49, 49, 49, 49, 50, 50),
(0, 251, 0x00004041, 64, 64, 64, 64, 65, 65),
(0, 251, 0x00004142, 65, 65, 65, 65, 66, 66),
(0, 251, 0x00005051, 80, 80, 80, 80, 81, 81),
(0, 251, 0x00005152, 81, 81, 81, 81, 82, 82),
(0, 251, 0x00006061, 96, 96, 96, 96, 97, 97),
(0, 251, 0x00006162, 97, 97, 97, 97, 98, 98),
(0, 251, 0x00007071, 112, 112, 112, 112, 113, 113),
(0, 251, 0x00007172, 113, 113, 113, 113, 114, 114),
(0, 251, 0x00008081, 128, 128, 128, 128, 129, 129),
(0, 251, 0x00008182, 129, 129, 129, 129, 130, 130),
(0, 251, 0x00009091, 144, 144, 144, 144, 145, 145),
(0, 251, 0x00009192, 145, 145, 145, 145, 146, 146),
(0, 251, 0x0000A0A1, 160, 160, 160, 160, 161, 161),
(0, 251, 0x0000A1A2, 161, 161, 161, 161, 162, 162),
(0, 251, 0x0000B0B1, 176, 176, 176, 176, 177, 177),
(0, 251, 0x0000B1B2, 177, 177, 177, 177, 178, 178),
(0, 251, 0x0000C0C1, 192, 192, 192, 192, 193, 193),
(0, 251, 0x0000C1C2, 193, 193, 193, 193, 194, 194),
(0, 251, 0x0000D0D1, 208, 208, 208, 208, 209, 209),
(0, 251, 0x0000D1D2, 209, 209, 209, 209, 210, 210),
(0, 251, 0x0000E0E1, 224, 224, 224, 224, 225, 225),
(0, 251, 0x0000E1E2, 225, 225, 225, 225, 226, 226),
(0, 251, 0x0000F0F1, 240, 240, 240, 240, 241, 241),
(0, 251, 0x0000F1F2, 241, 241, 241, 241, 242, 242),
(0, 251, 0x0000F9FA, 249, 249, 249, 249, 250, 250),
(0, 251, 0x0000FFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0x0000F9FA, 249, 249, 249, 249, 250, 250),
(0, 51, 0x00007172, 51, 50, 51, 51, 50, 51),
(150, 251, 0x00007172, 251, 149, 150, 251, 149, 150),
]
testDataCol64 = [
(0, 251, 0x0000000000000000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0001020304050607, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0102030405060708, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0F10111213141516, 16, 16, 16, 16, 17, 17),
(0, 251, 0x1011121314151617, 17, 17, 17, 17, 18, 18),
(0, 251, 0x1F20212223242526, 32, 32, 32, 32, 33, 33),
(0, 251, 0x2021222324252627, 33, 33, 33, 33, 34, 34),
(0, 251, 0x2F30313233343536, 48, 48, 48, 48, 49, 49),
(0, 251, 0x3031323334353637, 49, 49, 49, 49, 50, 50),
(0, 251, 0x3F40414243444546, 64, 64, 64, 64, 65, 65),
(0, 251, 0x4041424344454647, 65, 65, 65, 65, 66, 66),
(0, 251, 0x4F50515253545556, 80, 80, 80, 80, 81, 81),
(0, 251, 0x5051525354555657, 81, 81, 81, 81, 82, 82),
(0, 251, 0x5F60616263646566, 96, 96, 96, 96, 97, 97),
(0, 251, 0x6061626364656667, 97, 97, 97, 97, 98, 98),
(0, 251, 0x6F70717273747576, 112, 112, 112, 112, 113, 113),
(0, 251, 0x7071727374757677, 113, 113, 113, 113, 114, 114),
(0, 251, 0x7F80818283848586, 128, 128, 128, 128, 129, 129),
(0, 251, 0x8081828384858687, 129, 129, 129, 129, 130, 130),
(0, 251, 0x8F90919293949596, 144, 144, 144, 144, 145, 145),
(0, 251, 0x9091929394959697, 145, 145, 145, 145, 146, 146),
(0, 251, 0x9FA0A1A2A3A4A5A6, 160, 160, 160, 160, 161, 161),
(0, 251, 0xA0A1A2A3A4A5A6A7, 161, 161, 161, 161, 162, 162),
(0, 251, 0xAFB0B1B2B3B4B5B6, 176, 176, 176, 176, 177, 177),
(0, 251, 0xB0B1B2B3B4B5B6B7, 177, 177, 177, 177, 178, 178),
(0, 251, 0xBFC0C1C2C3C4C5C6, 192, 192, 192, 192, 193, 193),
(0, 251, 0xC0C1C2C3C4C5C6C7, 193, 193, 193, 193, 194, 194),
(0, 251, 0xCFD0D1D2D3D4D5D6, 208, 208, 208, 208, 209, 209),
(0, 251, 0xD0D1D2D3D4D5D6D7, 209, 209, 209, 209, 210, 210),
(0, 251, 0xDFE0E1E2E3E4E5E6, 224, 224, 224, 224, 225, 225),
(0, 251, 0xE0E1E2E3E4E5E6E7, 225, 225, 225, 225, 226, 226),
(0, 251, 0xEFF0F1F2F3F4F5F6, 240, 240, 240, 240, 241, 241),
(0, 251, 0xF0F1F2F3F4F5F6F7, 241, 241, 241, 241, 242, 242),
(0, 251, 0xF8F9FAFBFCFDFEFF, 249, 249, 249, 249, 250, 250),
(0, 251, 0xFFFFFFFFFFFFFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0xF9F9FAFBFCFDFEFF, 251, 249, 250, 251, 249, 250),
(0, 51, 0x7071727374757677, 51, 50, 51, 51, 50, 51),
(150, 251, 0x7071727374757677, 251, 149, 150, 251, 149, 150),
]
testDataColSub64 = [
(0, 251, 0x0000000000000000, 0, 0, 0, 0, 1, 1),
(1, 251, 0x0000000002030405, 1, 1, 1, 1, 2, 2),
(0, 251, 0x0000000003040506, 2, 2, 2, 2, 3, 3),
(0, 251, 0x0000000011121314, 16, 16, 16, 16, 17, 17),
(0, 251, 0x0000000012131415, 17, 17, 17, 17, 18, 18),
(0, 251, 0x0000000021222324, 32, 32, 32, 32, 33, 33),
(0, 251, 0x0000000022232425, 33, 33, 33, 33, 34, 34),
(0, 251, 0x0000000031323334, 48, 48, 48, 48, 49, 49),
(0, 251, 0x0000000032333435, 49, 49, 49, 49, 50, 50),
(0, 251, 0x0000000041424344, 64, 64, 64, 64, 65, 65),
(0, 251, 0x0000000042434445, 65, 65, 65, 65, 66, 66),
(0, 251, 0x0000000051525354, 80, 80, 80, 80, 81, 81),
(0, 251, 0x0000000052535455, 81, 81, 81, 81, 82, 82),
(0, 251, 0x0000000061626364, 96, 96, 96, 96, 97, 97),
(0, 251, 0x0000000062636465, 97, 97, 97, 97, 98, 98),
(0, 251, 0x0000000071727374, 112, 112, 112, 112, 113, 113),
(0, 251, 0x0000000072737475, 113, 113, 113, 113, 114, 114),
(0, 251, 0x0000000081828384, 128, 128, 128, 128, 129, 129),
(0, 251, 0x0000000082838485, 129, 129, 129, 129, 130, 130),
(0, 251, 0x0000000091929394, 144, 144, 144, 144, 145, 145),
(0, 251, 0x0000000092939495, 145, 145, 145, 145, 146, 146),
(0, 251, 0x00000000A1A2A3A4, 160, 160, 160, 160, 161, 161),
(0, 251, 0x00000000A2A3A4A5, 161, 161, 161, 161, 162, 162),
(0, 251, 0x00000000B1B2B3B4, 176, 176, 176, 176, 177, 177),
(0, 251, 0x00000000B2B3B4B5, 177, 177, 177, 177, 178, 178),
(0, 251, 0x00000000C1C2C3C4, 192, 192, 192, 192, 193, 193),
(0, 251, 0x00000000C2C3C4C5, 193, 193, 193, 193, 194, 194),
(0, 251, 0x00000000D1D2D3D4, 208, 208, 208, 208, 209, 209),
(0, 251, 0x00000000D2D3D4D5, 209, 209, 209, 209, 210, 210),
(0, 251, 0x00000000E1E2E3E4, 224, 224, 224, 224, 225, 225),
(0, 251, 0x00000000E2E3E4E5, 225, 225, 225, 225, 226, 226),
(0, 251, 0x00000000F1F2F3F4, 240, 240, 240, 240, 241, 241),
(0, 251, 0x00000000F2F3F4F5, 241, 241, 241, 241, 242, 242),
(0, 251, 0x00000000FAFBFCFD, 249, 249, 249, 249, 250, 250),
(0, 251, 0x00000000FFFFFFFF, 250, 250, 250, 250, 251, 251),
(0, 251, 0x00000000FAFBFCFD, 249, 249, 249, 249, 250, 250),
(0, 51, 0x0000000072737475, 51, 50, 51, 51, 50, 51),
(150, 251, 0x0000000072737475, 251, 149, 150, 251, 149, 150),
]
class TestFunctions(TestCase):
@classmethod
def setUpClass(cls):
global src, fd, size, doffset, dlength, nrows, ncols, index
inputfile = os.path.realpath(
os.path.dirname(os.path.realpath(__file__))
+ "/../../c/test/data/test_data_col.bin"
)
src, fd, size, doffset, dlength, nrows, ncols, index, idx = bs.mmap_binfile(
inputfile, [1, 2, 4, 8]
)
if fd < 0 or size != 3776:
assert False, "Unable to open the file"
@classmethod
def tearDownClass(cls):
global src, fd, size
h = bs.munmap_binfile(src, fd, size)
if h != 0:
assert False, "Error while closing the memory-mapped file"
def test_col_find_first_uint8(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol8:
rp, rf, rl = bs.col_find_first_uint8(src, index[0], first, last, search)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_uint8(src, index[0], pos, last, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_uint16(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol16:
rp, rf, rl = bs.col_find_first_uint16(src, index[1], first, last, search)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_uint16(src, index[1], pos, last, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_uint32(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol32:
rp, rf, rl = bs.col_find_first_uint32(src, index[2], first, last, search)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_uint32(src, index[2], pos, last, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_uint64(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol64:
rp, rf, rl = bs.col_find_first_uint64(src, index[3], first, last, search)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_uint64(src, index[3], pos, last, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_uint8(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol8:
rp, rf, rl = bs.col_find_last_uint8(src, index[0], first, last, search)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_uint8(src, index[0], first, pos, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_uint16(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol16:
rp, rf, rl = bs.col_find_last_uint16(src, index[1], first, last, search)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_uint16(src, index[1], first, pos, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_uint32(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol32:
rp, rf, rl = bs.col_find_last_uint32(src, index[2], first, last, search)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_uint32(src, index[2], first, pos, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_uint64(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataCol64:
rp, rf, rl = bs.col_find_last_uint64(src, index[3], first, last, search)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_uint64(src, index[3], first, pos, search)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_sub_uint8(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub8:
rp, rf, rl = bs.col_find_first_sub_uint8(
src, index[0], 0, 7, first, last, search
)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_sub_uint8(
src, index[0], 0, 7, pos, last, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_sub_uint16(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub16:
rp, rf, rl = bs.col_find_first_sub_uint16(
src, index[1], 0, 15, first, last, search
)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_sub_uint16(
src, index[1], 0, 15, pos, last, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_sub_uint32(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub32:
rp, rf, rl = bs.col_find_first_sub_uint32(
src, index[2], 8, 23, first, last, search
)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_sub_uint32(
src, index[2], 8, 23, pos, last, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_first_sub_uint64(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub64:
rp, rf, rl = bs.col_find_first_sub_uint64(
src, index[3], 16, 47, first, last, search
)
self.assertEqual(rp, fF)
self.assertEqual(rf, fFF)
self.assertEqual(rl, fFL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_next_sub_uint64(
src, index[3], 16, 47, pos, last, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_sub_uint8(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub8:
rp, rf, rl = bs.col_find_last_sub_uint8(
src, index[0], 0, 7, first, last, search
)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_sub_uint8(
src, index[0], 0, 7, first, pos, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_sub_uint16(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub16:
rp, rf, rl = bs.col_find_last_sub_uint16(
src, index[1], 0, 15, first, last, search
)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_sub_uint16(
src, index[1], 0, 15, first, pos, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_sub_uint32(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub32:
rp, rf, rl = bs.col_find_last_sub_uint32(
src, index[2], 8, 23, first, last, search
)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_sub_uint32(
src, index[2], 8, 23, first, pos, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
def test_col_find_last_sub_uint64(self):
for first, last, search, fF, fFF, fFL, fL, fLF, fLL in testDataColSub64:
rp, rf, rl = bs.col_find_last_sub_uint64(
src, index[3], 16, 47, first, last, search
)
self.assertEqual(rp, fL)
self.assertEqual(rf, fLF)
self.assertEqual(rl, fLL)
numitems = fL - fF + 1
if (rp < last) and (numitems > 0):
pos = rp
ret = True
counter = 0
while ret:
ret, pos = bs.col_has_prev_sub_uint64(
src, index[3], 16, 47, first, pos, search
)
counter = counter + 1
self.assertEqual(counter, numitems)
class TestBenchmark(object):
global setup
def setup():
global src, fd, size, doffset, dlength, nrows, ncols, index
if fd >= 0:
pass
bs.munmap_binfile(src, fd, size)
inputfile = os.path.realpath(
os.path.dirname(os.path.realpath(__file__))
+ "/../../c/test/data/test_data_col.bin"
)
src, fd, size, doffset, dlength, nrows, ncols, index, idx = bs.mmap_binfile(
inputfile, [1, 2, 4, 8]
)
if fd < 0 or size != 3776:
assert False, "Unable to open the file"
def test_col_find_first_uint8_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_uint8,
args=[src, index[0], 0, 251, 0x2F],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_uint16_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_uint16,
args=[src, index[1], 0, 251, 0x2F30],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_uint32_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_uint32,
args=[src, index[2], 0, 251, 0x2F303132],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_uint64_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_uint64,
args=[src, index[3], 0, 251, 0x2F30313233343536],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_uint8_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_uint8,
args=[src, index[0], 0, 250, 0x2F],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_uint16_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_uint16,
args=[src, index[1], 0, 251, 0x2F30],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_uint32_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_uint32,
args=[src, index[2], 0, 251, 0x2F303132],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_uint64_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_uint64,
args=[src, index[3], 0, 251, 0x2F30313233343536],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_sub_uint8_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_sub_uint8,
args=[src, index[0], 0, 7, 0, 251, 0x2F],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_sub_uint16_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_sub_uint16,
args=[src, index[1], 0, 15, 0, 251, 0x2F30],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_sub_uint32_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_sub_uint32,
args=[src, index[2], 8, 23, 0, 251, 0x00003031],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_first_sub_uint64_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_first_sub_uint64,
args=[src, index[3], 16, 47, 0, 251, 0x0000000031323334],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_sub_uint8_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_sub_uint8,
args=[src, index[0], 0, 7, 0, 251, 0x2F],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_sub_uint16_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_sub_uint16,
args=[src, index[1], 0, 15, 0, 251, 0x2F30],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_sub_uint32_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_sub_uint32,
args=[src, index[2], 8, 23, 0, 251, 0x00003031],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_find_last_sub_uint64_benchmark(self, benchmark):
benchmark.pedantic(
bs.col_find_last_sub_uint64,
args=[src, index[3], 16, 47, 0, 251, 0x0000000031323334],
setup=setup,
iterations=1,
rounds=10000,
)
def test_col_tearDown(self):
global src, fd, size
h = bs.munmap_binfile(src, fd, size)
fd = -1
size = 0
if h != 0:
assert False, "Error while closing the memory-mapped file"
| StarcoderdataPython |
11377736 | import time
import sys
import string
import re
from textblob import TextBlob
from polyglot.text import Text
import pymongo
from pymongo import MongoClient
from suffix_trees import STree
import file_scanner
import file_to_db
import file_reader
""" Boyer Moore string search algorithm """
class last_occurrence(object):
"""Last occurrence functor."""
def __init__(self, pattern, alphabet):
"""Generate a dictionary with the last occurrence of each alphabet
letter inside the pattern.
Note: This function uses str.rfind, which already is a pattern
matching algorithm. There are more 'basic' ways to generate this
dictionary."""
self.occurrences = dict()
for letter in alphabet:
self.occurrences[letter] = pattern.rfind(letter)
def __call__(self, letter):
"""Return last position of the specified letter inside the pattern.
Return -1 if letter not found in pattern."""
return self.occurrences[letter]
def boyer_moore_match(text, pattern):
"""Find occurrence of pattern in text."""
alphabet = set(text)
last = last_occurrence(pattern, alphabet)
m = len(pattern)
n = len(text)
i = m - 1 # text index
j = m - 1 # pattern index
while i < n:
if text[i] == pattern[j]:
if j == 0:
return text[i]
else:
i -= 1
j -= 1
else:
l = last(text[i])
i = i + m - min(j, 1+l)
j = m - 1
return -1
""" Horspool string search algorithm """
# preprocess - initialize occ
def preprocess(pattern):
occ = dict.fromkeys(string.ascii_lowercase, -1)
for i in range(0,len(pattern)-1):
occ[pattern[i]] = i
return occ
# seach - find string with horspool
def horspool_search(text,pattern,occ):
found = 0
i = 0
m = len(pattern)
n = len(text)
while i <= n-m:
j = m-1
while j >= 0 and pattern[j] == text[i+j]:
j = j-1
if j < 0:
found = found+1
print("found!")
i = i + m-1
i = i - occ[text[i]]
return found
""" Knutt-Morris-Pratt """
def kmp_search(text, pattern):
d = {0:0}
template = pattern + '#' + text
for i in range(1,len(template)):
j = d[i-1]
while j > 0 and template[j] != template[i]:
j = d[j-1]
if template[j] == template[i]:
j += 1
d[i] = j
if j == len(pattern):
return i
return None
"""def string_search(text, pattern):
i=j=0
lengthS = len(text)
lengthX = len(pattern)
while i<=lengthS - lengthX and j>lengthX:
if li[i+j]==x[j]:
j+=1
else:
i+=1
j=0
return i if j==lengthX else None """
def string_search(text, search):
dText = text.split()
dSearch = search.split()
found_word = 0
for text_word in dText:
for search_word in dSearch:
if search_word == text_word:
found_word += 1
if found_word == len(dSearch):
return len(dSearch)
else:
return None
if __name__ == '__main__':
""" Get personal data with Polyglot """
# get files using file_scanner
files = file_scanner.get_files('/home/vlad/Documents/Repo/python_string-search/path.list')
for found_file in files:
# add found files to DB and compare theyr hashes
if file_to_db.add_file_to_db(found_file[0], found_file[1]):
# file is not scanned for personala data
# read file
foundNamesCount = 0
fContent = file_reader.read_file_content(found_file[0], found_file[2])
# get Named Entities data (names, surnames)
text = Text(fContent)
personalData = ""
# Start Polyglot-NER timer
start_time = time.time()
for entity in text.entities:
if entity.tag == 'I-PER':
foundNamesCount = foundNamesCount + 1
strNames = ' '.join(entity)
personalData = personalData + "; " + strNames
# Stop Polyglot-NER timer
finish_time = time.time() - start_time
# get personal ID numbers
reText = '' . join(text)
#print("Looking for personal ID's")
ids = re.findall(r'[2-6][0-9]{10}',reText)
ids = ' '.join(ids)
# get e-mails
#print("Looking for emails")
eMails = re.findall(r'\S+@\S+', reText)
eMails = ' '.join(eMails)
# get document number
#print("Looking for document numbers")
docNr = re.findall(r'\s[0-9]{8}\s', reText)
docNr = ' '.join(docNr)
docNr = docNr.replace("\n", "")
#personalData = personalData + " " + ids + " " + eMails + " " + docNr
personalData = personalData + " "
# Save data to database
file_to_db.add_names(found_file[0],personalData)
file_to_db.set_date(found_file[0])
print(' ### Found ' + str(foundNamesCount) + ' names in file ' + found_file[0] + '. Took ' + str(finish_time))
""" BruteForce algorithm test """
"""files = file_scanner.get_files('/home/vlad/Documents/Repo/python_string-search/text_sources/')
for found_file in files:
fh = open('/home/vlad/Documents/Repo/python_string-search/tmp_text/vardai-pavardes.txt', 'r')
start_time = time.time()
foundNamesCount = 0
fContent = file_reader.read_file_content(found_file[0], found_file[2])
for line in fh:
pattern = ''.join(line)
#print("Looking for word - " + pattern)
f = open('/home/vlad/Documents/Repo/python_string-search/text_sources/wiki-kvant-teorija_2.txt', 'r')
occ = preprocess(pattern)
text=f.read()
results = string_search(fContent, pattern)
if (results != None):
print(pattern)
foundNamesCount += 1
print("File " + found_file[0] + " Brute Force search took --- %s seconds ---" % (time.time() - start_time) + " Found " + str(foundNamesCount))
fh.close """
""" Boyer Moore algorithm test """
"""files = file_scanner.get_files('/home/vlad/Documents/Repo/python_string-search/text_sources/')
#fh = open('/home/vlad/Documents/Repo/python_string-search/text_sources/vardai-pavardes.txt', 'r')
#start_time = time.time()
for found_file in files:
fh = open('/home/vlad/Documents/Repo/python_string-search/tmp_text/vardai-pavardes.txt', 'r')
start_time = time.time()
foundNamesCount = 0
fContent = file_reader.read_file_content(found_file[0], found_file[2])
for line in fh:
pattern = ''.join(line)
#print("Looking for word - " + pattern)
f = open('text_sources/wiki-straipsnis.txt', 'r')
text=f.read()
results = boyer_moore_match(fContent, pattern)
if (results != -1):
#print(results)
foundNamesCount += 1
print("File " + found_file[0] + " Boyer More took --- %s seconds ---" % (time.time() - start_time) + " Found " + str(foundNamesCount))
fh.close """
""" Horspool algorithm test """
"""
fh = open('text_sources/test-list.txt', 'r')
start_time = time.time()
for line in fh:
pattern = ''.join(line)
print("Looking for word - " + pattern)
f = open('text_sources/wiki-straipsnis-test.txt', 'r')
occ = preprocess(pattern)
text=f.read()
results = horspool_search(text, pattern, occ)
print(results)
print("Horspool search took --- %s seconds ---" % (time.time() - start_time))
fh.close"""
""" KMP algorithm test """
""" fh = open('/home/vlad/Documents/Repo/python_string-search/text_sources/vardai-pavardes.txt', 'r')
start_time = time.time()
for line in fh:
pattern = ''.join(line)
#print("Looking for word - " + pattern)
f = open('text_sources/wiki-straipsnis.txt', 'r')
occ = preprocess(pattern)
text=f.read()
results = kmp_search(text, pattern)
if (results != None):
print(results)
print("KMP search took --- %s seconds ---" % (time.time() - start_time))
fh.close """
""" Suffix tree algorithm test """
"""files = file_scanner.get_files('/home/vlad/Documents/Repo/python_string-search/text_sources/')
for found_file in files:
fh = open('/home/vlad/Documents/Repo/python_string-search/text_sources/vardai-pavardes.txt', 'r')
start_time = time.time()
myList =''
for line in fh:
myList += ''.join(line)
myListT = tuple(myList)
fContent = file_reader.read_file_content(found_file[0], found_file[2])
result = STree.STree(fContent)
print(result.find_all(myListT))
print("File " + found_file[0] + " Suffix Tree search took --- %s seconds ---" % (time.time() - start_time) + " Found ")
"""
| StarcoderdataPython |
4956260 | <filename>examples/application_commands/cog.py
import discord
from discord.ext import commands
class MyBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or('$'))
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
# setting `guild_ids` in development is better when possible because
# registering global commands has a 1 hour delay
class Hello(discord.SlashCommand, guild_ids=[123]):
"""Say hello!"""
def __init__(self, cog):
self.cog = cog
async def callback(self, response: discord.SlashCommandResponse):
message = f'Hello {response.user.name}!'
last_user = self.cog.last_user
if last_user:
message += f' {last_user.name} said hello last!'
await response.send_message(message, ephemeral=True)
class Fun(discord.Cog):
def __init__(self):
self.last_user = None
self.add_application_command(Hello(self))
@commands.command()
async def hello(self, ctx: commands.Context):
await ctx.send(f'Hello {ctx.author.name}!')
self.last_user = ctx.author
bot = MyBot()
bot.add_cog(Fun())
bot.run('token')
| StarcoderdataPython |
12802059 | <gh_stars>1-10
from main.models import LocalUser, KUser
import urllib.request
import urllib.response
import urllib.parse
import json
# 登录参数
# github
github_param = {
'client_id': '2b18fc8f7305f2e73416',
'client_secret': '<KEY>'
}
# qq
qq_param = {
'client_id': '101456289',
'client_secret': 'b26af05256f42dce9c81e5fcc4db0195',
'callback': 'http://www.kindemh.cn/login/qq/callback'
}
class LoginLocalRegisterRequest:
"""
登录系统-本站用户注册请求
"""
def __init__(self, username, password, salt):
# 检验数据库中是否已经存在该用户
if LocalUser.objects.filter(username=username):
self.__response = {
'state': False,
'reason': '该用户名已经被注册过了,换一个吧!'
}
else:
# 将用户信息存入数据库
local_user = LocalUser(
username=username,
password=password,
salt=salt,
is_admin=False
)
local_user.save()
k_user = KUser(
user_type='local',
nickname=local_user.username,
uid=local_user.pk,
is_admin=local_user.is_admin
)
k_user.save()
self.__user_info = {
'user_type': k_user.user_type,
'nickname': k_user.nickname,
'uid': k_user.uid,
'avatar': k_user.avatar,
'is_admin': k_user.is_admin,
'pk': k_user.pk
}
# 返回注册成功信息
self.__response = {
'state': True
}
def get_response(self):
return self.__response
def get_user_info(self):
return self.__user_info
class LoginLocalGetSaltRequest:
"""
本站登录-获取 salt 请求
"""
def __init__(self, username):
"""
构造
:param username: 用户名
"""
# 如果用户存在
if LocalUser.objects.filter(username=username).exists():
# 查询用户对应的 salt
local_user = LocalUser.objects.get(username=username)
salt = local_user.salt
# 返回数据
self.__response = {
'state': True,
'salt': salt
}
# 如果用户不存在
else:
self.__response = {
'state': False,
'reason': '用户不存在!'
}
def get_response(self):
return self.__response
class LoginLocalLoginRequest:
"""
本站登录验证请求
"""
def __init__(self, username, password):
"""
构造
:param username: 用户名
:param password: 密码(的SHA256 Hash值)
"""
# 查询数据库,看用户名和密码是否匹配
if LocalUser.objects.filter(username=username, password=password).exists():
local_user = LocalUser.objects.get(username=username, password=password)
# 返回数据
self.__response = {
'state': True
}
self.__user_info = {
'user_type': 'local',
'nickname': local_user.username,
'uid': local_user.pk,
'avatar': local_user.avatar,
'is_admin': local_user.is_admin,
'pk': local_user.pk
}
else:
self.__response = {
'state': False,
'reason': '用户名或密码错误!'
}
def get_response(self):
return self.__response
def get_user_info(self):
return self.__user_info
class LoginGitHubCallbackRequest:
"""
GitHub 登录回调
"""
def __init__(self, code):
"""
构造
:param code: code
"""
self.__response = {}
# 准备 POST 参数用于交换 access_token
data = bytes(urllib.parse.urlencode({
'client_id': github_param['client_id'],
'client_secret': github_param['client_secret'],
'code': code
}), encoding='utf8')
try:
# 发送 http 请求用于交换 access_token
response = urllib.request.urlopen('https://github.com/login/oauth/access_token', data=data)
# 提取 access_token
access_token = str(response.read(), encoding='utf-8').split('&')[0].split('=')[1]
# 使用 access_token 获取用户信息
response = urllib.request.urlopen('https://api.github.com/user?access_token=' + access_token)
# 解码成 Python 对象
user_info = json.loads(response.read().decode('utf-8'))
# 用 id 来校验是否登录成功
if user_info.get('id'):
user_info['id'] = str(user_info['id'])
self.__response['success'] = True
# 查询数据库,看用户是否已经在数据库中
if not KUser.objects.filter(user_type='github', uid=user_info['id']).exists():
# 将用户信息存入数据库
k_user = KUser(
user_type='github',
uid=user_info['id'],
nickname=user_info['login'],
avatar=user_info['avatar_url'],
is_admin=False
)
k_user.save()
else:
k_user = KUser.objects.get(user_type='github', uid=user_info['id'])
self.__response['user_info'] = {
'user_type': k_user.user_type,
'uid': k_user.uid,
'nickname': k_user.nickname,
'avatar': k_user.avatar,
'is_admin': k_user.is_admin,
'pk': k_user.pk
}
else:
self.__response['success'] = False
except Exception:
self.__response['success'] = False
return
def get_response(self):
"""
获取 response
:return: response
"""
return self.__response
class LoginQQCallbackRequest:
"""
QQ 登录回调
"""
def __init__(self, code):
"""
构造
:param code: code
"""
self.__response = {}
try:
# 通过 code 获取 access_token
response = urllib.request.urlopen(
'https://graph.qq.com/oauth2.0/token?' +
'grant_type=authorization_code' +
'&client_id=' + qq_param['client_id'] +
'&client_secret=' + qq_param['client_secret'] +
'&code=' + code +
'&redirect_uri=' + qq_param['callback']
)
# 提取 access_token
access_token = response.read().decode('utf-8').split('&')[0].split('=')[1]
# 使用 access_token 获取用户的 openid
response = urllib.request.urlopen(
'https://graph.qq.com/oauth2.0/me?' +
'access_token=' + access_token
)
# 解码成 Python 对象
json_obj = json.loads(response.read().decode('utf-8').replace('callback( ', '').replace(' );', ''))
open_id = json_obj['openid']
# 使用 openid 获取用户的信息
response = urllib.request.urlopen(
'https://graph.qq.com/user/get_user_info?' +
'access_token=' + access_token +
'&oauth_consumer_key=' + qq_param['client_id'] +
'&openid=' + open_id
)
# 解码成 Python 对象
user_info = json.loads(response.read().decode('utf-8'))
# 使用 user_info 中的 nickname 来校验是否登录成功
if user_info['nickname']:
self.__response['success'] = True
# 查询数据库,看数据库中是否已经有用户数据了
if KUser.objects.filter(user_type='qq', uid=open_id).exists():
# 如果已经有了,就直接取用户出来
k_user = KUser.objects.get(user_type='qq', uid=open_id)
else:
# 如果没有,那么就将用户存入数据库
k_user = KUser(
user_type='qq',
uid=open_id,
nickname=user_info['nickname'],
avatar=user_info['figureurl_qq_1'],
is_admin=False
)
k_user.save()
self.__response['user_info'] = {
'user_type': k_user.user_type,
'uid': k_user.uid,
'nickname': k_user.nickname,
'avatar': k_user.avatar,
'is_admin': k_user.is_admin,
'pk': k_user.pk
}
else:
self.__response['success'] = False
except Exception:
self.__response['success'] = False
return
def get_response(self):
return self.__response
class LoginLocalAdminLoginRequest:
def __init__(self, username, password):
"""
构造
:param username: 用户名
:param password: 密码
"""
# 查询数据库,看用户名和密码是否匹配
if LocalUser.objects.filter(username=username, password=password).exists():
local_user = LocalUser.objects.get(username=username, password=password)
# 看该用户是否为管理员
if local_user.is_admin:
# 返回数据
self.__response = {
'state': True
}
self.__admin_info = {
'user_type': 'local',
'nickname': local_user.username,
'uid': local_user.pk,
'avatar': local_user.avatar,
'is_admin': local_user.is_admin,
'pk': local_user.pk
}
else:
self.__response = {
'state': False,
'reason': '您不是管理员,不要误闯禁地哦!'
}
else:
self.__response = {
'state': False,
'reason': '用户名或密码错误!'
}
def get_response(self):
return self.__response
def get_admin_info(self):
return self.__admin_info
| StarcoderdataPython |
5100378 | import simulate_trajectories as sim
import numpy as np
for i in xrange(10):
data = []
for j in xrange(550):
data.append(sim.test(0.01*(1 + i))[1])
data = np.array(data)
#print data.shape
oname = 'l1l9_sim_25c_biasd_{}_100ms_fret.dat'.format(i+1)
print oname
np.savetxt(oname, data, delimiter = ',')
| StarcoderdataPython |
1818296 | from questions_three.module_cfg import config_for_module
from questions_three.vanilla import call_with_exception_tolerance
from selenium import webdriver
from twin_sister import dependency
from ..exceptions import AllBrowsersBusy, UnsupportedBrowser
from .browserstack import launch_browserstack_browser
from .selenium_grid import launch_selenium_grid_browser
def launch_chrome():
config = config_for_module(__name__)
opts = dependency(webdriver.chrome.options.Options)()
opts.add_argument("start-fullscreen")
agent_string = config.chrome_user_agent
if agent_string:
opts.add_argument("user-agent=%s" % agent_string)
return dependency(webdriver).Chrome(chrome_options=opts)
def launch_firefox():
return dependency(webdriver).Firefox()
def launch_local_browser():
config = config_for_module(__name__)
browser_name = config.use_browser
if browser_name:
browser_name = browser_name.lower()
if browser_name in ("chrome", None):
return launch_chrome()
if "firefox" == browser_name:
return launch_firefox()
raise UnsupportedBrowser('"%s" is not supported' % browser_name)
def get_launch_function():
config = config_for_module(__name__)
location = config.browser_location
if location:
location = location.lower()
if "browserstack" == location:
return launch_browserstack_browser
if "selenium_grid" == location:
return launch_selenium_grid_browser
return launch_local_browser
def launch_browser():
config = config_for_module(__name__)
launch = get_launch_function()
return call_with_exception_tolerance(
func=launch,
tolerate=AllBrowsersBusy,
timeout=config.browser_availability_timeout,
throttle=config.browser_availability_throttle,
)
| StarcoderdataPython |
377039 | """
Abandon hope all ye who enter the depths of this module
""" | StarcoderdataPython |
1817157 | <reponame>devBOX03/DSApractice
def sortStack(stack):
if not stack:
return
top_element = stack.pop()
sortStack(stack)
insertAtCorrrectPos(stack, top_element)
def insertAtCorrrectPos(stack, item):
if not stack or stack[-1] >= item:
stack.append(item)
else:
top_element = stack.pop()
insertAtCorrrectPos(stack, item)
stack.append(top_element)
if __name__ == '__main__':
stack = list()
stack.append(1)
stack.append(2)
stack.append(3)
stack.append(4)
print("Stack: %s" % stack)
sortStack(stack)
print("Sorted Stack: %s" % stack)
| StarcoderdataPython |
5091753 | from Node import Node
class LinkedList:
def __init__(self):
self.head = None
def add(self, new_data):
# 1. Create a new node
new_node = Node(new_data)
# 2. Set new node's next pointer to where the head is pointing
new_node.next = self.head
# 3. Set head pointer to point at the new node
self.head = new_node
def is_empty(self):
return self.head == None
def size(self):
counter = 0
current_node = self.head
while current_node != None:
counter += 1
current_node = current_node.next
return counter | StarcoderdataPython |
170004 | import json
from rest_framework.test import APIClient, APITestCase
from rest_framework.authtoken.models import Token
from ats.companies.models import CompanyAdmin, CompanyStaff, Company
from ats.users.models import User
from .factories import CompanyFactory
class TestCompanyAPIViewSet(APITestCase):
def setUp(self):
self.staff = CompanyStaff.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.staff_client = APIClient()
token, _ = Token.objects.get_or_create(user=self.staff)
self.staff_client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
self.unauthorized_client = APIClient()
self.main_api = '/api/companies/'
self.obj_api = '/api/companies/{}/'
self.data = {
"name": "test",
"description": "Description",
"email": "<EMAIL>",
"website": "https://test.com"
}
def test_create_company_with_staff(self):
self.assertEqual(Company.objects.count(), 0)
response = self.staff_client.post(
self.main_api,
data=json.dumps(self.data),
content_type='application/json'
)
self.assertEqual(response.status_code, 201)
self.assertEqual(Company.objects.count(), 1)
company = Company.objects.last()
# #TODO: continue with checking response
self.assertEqual(company.created_by, self.staff)
def test_create_company_with_unauthorized_client(self):
self.assertEqual(Company.objects.count(), 0)
response = self.unauthorized_client.post(
self.main_api,
data=json.dumps(self.data),
content_type='application/json'
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Company.objects.count(), 0)
company = Company.objects.last()
def test_create_company_with_client(self):
self.assertEqual(Company.objects.count(), 0)
response = self.client.post(
self.main_api,
data=json.dumps(self.data),
content_type='application/json'
)
self.assertEqual(response.status_code, 403)
self.assertEqual(Company.objects.count(), 0)
company = Company.objects.last()
def test_create_company_with_empty_data(self):
response = self.staff_client.post(
self.main_api,
data=json.dumps({}),
content_type='application/json'
)
self.assertEqual(response.status_code, 400)
# import ipdb ; ipdb.set_trace()
def test_create_company_with_same_data_twice(self):
pass
def test_update_company_with_staff(self):
company = CompanyFactory(created_by=self.staff)
response = self.staff_client.put(
self.obj_api.format(company.id),
data=json.dumps(self.data),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
response = response.json()
self.assertEqual(response.get('name'), self.data.get('name'))
def test_update_company_with_unauthorized_client(self):
pass
def test_update_company_with_client(self):
pass
def test_update_another_company_information(self):
company = CompanyFactory()
response = self.staff_client.put(
self.obj_api.format(company.id),
data=json.dumps(self.data),
content_type='application/json'
)
self.assertEqual(response.status_code, 403)
def test_partial_update_company_with_staff(self):
pass
def test_partial_update_company_with_unauthorized_client(self):
pass
def test_partial_update_company_with_client(self):
pass
def test_partial_update_another_company_information(self):
pass
def test_list_companies_with_client(self):
CompanyFactory.create_batch(5)
response = self.client.get(
self.main_api
)
self.assertEqual(response.status_code, 200)
response = response.json()
self.assertEqual(response.get('count'), 5)
def test_list_companies_with_unauthorized_client(self):
pass
def test_list_company_with_staff(self):
CompanyFactory(created_by=self.staff)
CompanyFactory.create_batch(3)
response = self.staff_client.get(
self.main_api
)
self.assertEqual(response.status_code, 200)
response = response.json()
self.assertEqual(response.get('count'), 4)
def test_retrieve_company_with_staff(self):
pass
def test_retrieve_company_with_unauthorized_client(self):
pass
def test_retrieve_company_with_client(self):
pass
def test_retrieve_another_company_information(self):
pass
| StarcoderdataPython |
261506 | import unittest
from ctypes import c_uint16
from io import BytesIO
from emu101.emu import EMU
class LoadRegTest(unittest.TestCase):
def setUp(self):
self.emu = EMU()
def load_rom(self, words):
self.emu.rom.load(BytesIO(b''.join([
int.to_bytes(w, 2, "big")
for w in words
])))
def load_ram(self, words, addr = 0):
self.emu.ram.load(BytesIO(b''.join([
int.to_bytes(w, 2, "big")
for w in words
])), c_uint16(addr))
def test_ldp(self):
self.load_rom([
0b0000000011110111, 0xabcd, # ldp 0xabcd
0b1111111111111111, # hlt
])
self.emu.run()
self.assertEqual(self.emu.cpu.dp.value, 0xabcd)
def test_write_memory_at_dp(self):
self.load_rom([
0b0000000011110111, 0xabcd, # LDP 0xabcd
0b0000000011000111, 0xbeef, # LD0 0xbeef
0b1000001100111111, # WD0
0b1111111111111111, # HLT
])
self.emu.run()
self.assertEqual(self.emu.bus.read(c_uint16(0xabcd)).value, 0xbeef)
def test_read_memory_at_dp(self):
self.emu.bus.write(c_uint16(0xabcd), c_uint16(0xbeef))
self.load_rom([
0b0000000011110111, 0xabcd, # LDP 0xabcd
0b0000000010000111, # RD0
0b1111111111111111, # HLT
])
self.emu.run()
self.assertEqual(self.emu.cpu.d0.value, 0xbeef)
def test_jmp_to_immediate(self):
# Put a hlt instruction at memory 0
self.emu.bus.write(c_uint16(0x0000), c_uint16(0xFFFF))
self.load_rom([
0b0000000011100111, 0x0000, # jmp 0x0000
0b1111111111111111, # HLT
])
self.emu.run()
# It should be 2 because the pipeline was cleared on jump
self.assertEqual(self.emu.cpu.ip.value, 2)
def test_jmp_to_d0(self):
# Put a hlt instruction at memory 0
self.emu.bus.write(c_uint16(0x0000), c_uint16(0xFFFF))
self.load_rom([
0b0000000011000111, 0x0000, # LD0 0x0000
0b0000001101100111, # JMP
0b1111111111111111, # HLT
])
self.emu.run()
# It should be 2 because the pipeline was cleared on jump
self.assertEqual(self.emu.cpu.ip.value, 2)
def test_instruction_after_jmp(self):
self.load_ram([
0b0000000011000111, 0xbeef, # LD0 0xbeef
0b1111111111111111, # HLT
])
self.load_rom([
0b0000000011000111, 0x0000, # LD0 0x0000
0b0000001101100111, # JMP
0b1111111111111111, # HLT
])
self.emu.run()
self.assertEqual(self.emu.cpu.d0.value, 0xbeef)
def test_instruction_after_jmp_to_immediate(self):
self.load_ram([
0b0000000011000111, 0xbeef, # LD0 0xbeef
0b1111111111111111, # HLT
])
self.load_rom([
0b0000000011100111, 0x0000, # jmp 0x0000
0b1111111111111111, # HLT
])
self.emu.run()
self.assertEqual(self.emu.cpu.d0.value, 0xbeef)
def test_jsr_and_ret(self):
self.load_ram([
0b0000000011000111, 0xbeef, # LD0 0xbeef
0b0010000010100111, # RET
0b1111111111111111, # HLT
])
self.load_rom([
# WAACCCCCSSDDDJJJ
0b1011001111100111, 0x0000, # JSR 0x0000
0b0000000011000111, 0xbeaf, # LD0 0xbeaf
0b1111111111111111, # HLT
])
self.emu.run()
self.assertEqual(self.emu.cpu.d0.value, 0xbeaf)
self.assertEqual(self.emu.cpu.ip.value, 0xf006)
def test_push(self):
self.load_rom([
# WAACCCCCSSDDDJJJ
0b0000000011000111, 0xbeef, # LD0 0xbeef
0b1010001101111111, # push d0
0b1111111111111111, # hlt
])
self.emu.run()
self.assertEqual(self.emu.bus.read(c_uint16(0x01ff - 1)).value, 0xbeef)
def test_hlt(self):
self.load_rom([
0b1111111111111111, #hlt
])
self.emu.run()
def test_loads_pipeline(self):
self.emu.cpu.tick()
self.emu.cpu.tick()
self.assertEqual(len(self.emu.cpu.pipeline), 2)
| StarcoderdataPython |
3293486 | # Generated by Django 3.2.6 on 2021-08-16 11:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0030_alter_solutionpage_body'),
]
operations = [
migrations.CreateModel(
name='UserProblem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(verbose_name='Besedilo zgodbe')),
('email', models.EmailField(max_length=254, verbose_name='E-mail')),
('contact_permission', models.BooleanField(default=False, verbose_name='Lahko me kontaktirate')),
],
options={
'verbose_name': 'Oddan problem',
'verbose_name_plural': 'Oddani problemi',
},
),
migrations.AlterModelOptions(
name='rentalstory',
options={'verbose_name': 'Uporabniška zgodba', 'verbose_name_plural': 'Uporabniške zgodbe'},
),
]
| StarcoderdataPython |
3579442 | <filename>tests/base_tests/expression_tests/test_pos.py
from hypothesis import given
from symba.base import Expression
from . import strategies
@given(strategies.expressions)
def test_basic(expression: Expression) -> None:
result = +expression
assert isinstance(result, Expression)
@given(strategies.definite_expressions)
def test_identity(expression: Expression) -> None:
result = +expression
assert result == expression
| StarcoderdataPython |
11371820 | <reponame>Actis92/lightning-flash
import pytest
import torch
from flash.core.serve.types import Label
def test_path(session_global_datadir):
label = Label(path=str(session_global_datadir / "imagenet_labels.txt"))
assert label.deserialize("chickadee") == torch.tensor(19)
assert label.serialize(torch.tensor(19)) == "chickadee"
def test_list():
label = Label(classes=["classA", "classB"])
assert label.deserialize("classA") == torch.tensor(0)
def test_dict():
label = Label(classes={56: "classA", 48: "classB"})
assert label.deserialize("classA") == torch.tensor(56)
with pytest.raises(TypeError):
Label(classes={"wrongtype": "classA"})
def test_wrong_type():
with pytest.raises(TypeError):
Label(classes=set())
with pytest.raises(ValueError):
Label(classes=None)
| StarcoderdataPython |
3516251 | __version__ = "1.2.0"
__author__ = "<NAME>"
version_info = (1, 2, 0)
| StarcoderdataPython |
9719227 | <filename>models/official/unet3d/unet_config.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config to train UNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
UNET_CONFIG = {
# Place holder for tpu configs.
'tpu_config': {},
'model_dir': '',
'training_file_pattern': '',
'eval_file_pattern': '',
# The input files are GZip compressed and need decompression.
'compressed_input': True,
'use_bfloat16': True,
'label_dtype': 'float32',
'train_batch_size': 8,
'eval_batch_size': 8,
'predict_batch_size': 8,
'train_epochs': 10,
'train_item_count': 1000,
'eval_item_count': 100,
'train_steps': 100000,
'eval_steps': 10,
'num_steps_per_eval': 100,
'min_eval_interval': 180,
'eval_timeout': None,
'optimizer': 'adam',
'momentum': 0.9,
# Spatial dimension of input image.
'input_image_size': [128, 128, 128],
# Number of channels of the input image.
'num_channels': 1,
# Spatial partition dimensions.
'input_partition_dims': None,
# Use deconvolution to upsample, otherwise upsampling.
'deconvolution': True,
# Number of areas i need to segment
'num_classes': 3,
# Number of filters used by the architecture
'num_base_filters': 32,
# Depth of the network
'depth': 4,
# Dropout values to use across the network
'dropout_rate': 0.5,
# Number of levels that contribute to the output.
'num_segmentation_levels': 2,
# Use batch norm.
'use_batch_norm': True,
'init_learning_rate': 0.00005,
# learning rate decay steps.
'lr_decay_steps': 100000,
# learning rate decay rate.
'lr_decay_rate': 0.5,
# Data format, 'channels_last' and 'channels_first'
'data_format': 'channels_last',
# Use class index for training. Otherwise, use one-hot encoding.
'use_index_label_in_train': False,
# e.g. softmax cross entropy, adaptive_dice32
'loss': 'adaptive_dice32',
}
UNET_RESTRICTIONS = []
| StarcoderdataPython |
3323863 | import sys
sys.path.append("../../")
from appJar import gui
def test(): print("test")
with gui() as app:
app.label('hello world')
app.addToolbar(['help', 'save', 'open'], test, True)
app.button('PRESS', test, icon='help')
| StarcoderdataPython |
250216 | #!/usr/bin/env python
class XXX(object):
def __init__(self, p1, p2=None, p3=None, p4=None):
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.p4 = p4
xxx = XXX(p2='222', p3='111', p4='444')
print xxx.p1, xxx.p2, xxx.p3, xxx.p4
| StarcoderdataPython |
8129959 | class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
l = len(num)
if k == l:
return "0"
stack = []
for c in num:
while k > 0 and len(stack) > 0 and stack[-1] > c:
stack.pop()
k -= 1
stack.append(c)
while k > 0:
stack.pop()
k -= 1
i = 0
while i < len(stack) - 1 and stack[i] == '0':
i += 1
stack = stack[i:]
return "".join(stack)
| StarcoderdataPython |
4968253 | from typing import Optional, List
from bluesky.protocols import Readable, Movable
# from pydantic import BaseModel
from dataclasses import dataclass
from ._units import ureg, get_units
@dataclass
class ConstantTerm:
coeff: float = 0
var: Optional[Readable] = None
@dataclass
class Constant:
units: str
terms: List[ConstantTerm]
def evaluate(self, setpoints, units: Optional[str] = None) -> float:
quantity = ureg.Quantity(0, self.units)
for term in self.terms:
if term.var is None:
quantity += ureg.Quantity(term.coeff, self.units)
continue
mot_units = get_units(term.var, self.units)
# TODO: constants based on motors not scanned, some edge cases may not be possible
# Such as the case where you want to set based on something that is moved as a result
# of other set commands, which have not yet been told where to go
# In that case some kind of pseudopositioner is likely the better call
mot_quantity = ureg.Quantity(
setpoints.get(term.var, term.var.position), mot_units
).to(self.units)
mot_quantity *= term.coeff
quantity += mot_quantity
return quantity.to(units).magnitude
| StarcoderdataPython |
4808681 | <gh_stars>1-10
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /usr/bin/env python
# pylint: disable=E1101
"""
Bot-in-a-box: Creates and runs setup for Dialogflow Integration bot
"""
import subprocess
import json
import os
import re
from time import sleep
import firebase_admin
import requests
from googleapiclient import discovery
from google.oauth2.service_account import Credentials
from firebase_admin import credentials
from firebase_admin import db
FIREBASE_CREDENTIALS_FILE_NAME = 'generated_firebase_credentials.json'
SERVICE_ACCOUNT_NAME = 'firebase-bm-df-bot'
def enable_apis():
"""Enable required APIs for project using gcloud command line"""
# APIs to enable:
# businesscommunications.googleapis.com
# businessmessages.googleapis.com
# dialogflow.googleapis.com
# firebase.googleapis.com
print('Enabling APIs...')
apis_to_enable = [
'iam.googleapis.com', 'dialogflow.googleapis.com',
'firebase.googleapis.com', 'cloudbuild.googleapis.com',
"businessmessages.googleapis.com",
"businesscommunications.googleapis.com"
]
for api in apis_to_enable:
print('\t' + api)
subprocess.run(['gcloud', 'services', 'enable', api], check=True)
def set_region():
"""Get region for App Engine project to deploy in"""
proc = subprocess.Popen('gcloud app describe',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = proc.communicate()
if err:
proc = subprocess.Popen('gcloud app regions list',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = proc.communicate()
regions = out.decode('utf-8').split('\n')[1:-1]
regions_list = list(map(lambda x: x.split()[0], regions))
print('\nRegions:')
print('\n'.join(regions_list))
deploy_region = ''
while deploy_region not in regions_list:
deploy_region = input(
'Choose a region for the App Engine project: ')
subprocess.run(
['gcloud', 'app', 'create', f'--region={deploy_region}'],
check=True)
def get_service_key(project_id):
"""Create new service account and get service key"""
# create new service account
print('Creating and saving service account...')
# create service account key + save
service_account_email = f'{<EMAIL>'
try:
subprocess.run([
'gcloud', 'iam', 'service-accounts', 'create',
SERVICE_ACCOUNT_NAME, '--description="Created by bot-in-a-box"',
'--display-name="Firebase Service Account"'
],
check=True)
subprocess.run([
'gcloud', 'iam', 'service-accounts', 'keys', 'create',
f'../resources/{FIREBASE_CREDENTIALS_FILE_NAME}', '--iam-account',
service_account_email
],
check=True)
except subprocess.CalledProcessError:
print(
'Service account already exists or unable to create service account. Continuing...'
)
try:
# add service account to iam
subprocess.run([
'gcloud', 'projects', 'add-iam-policy-binding', project_id,
f'--member=serviceAccount:{service_account_email}',
'--role=roles/editor'
],
check=True)
except subprocess.CalledProcessError:
print('Failed to add editor roles to service account. Continuing...')
def get_access_token():
"""Get Firebase access token"""
cred = firebase_admin.credentials.Certificate(
f'../resources/{FIREBASE_CREDENTIALS_FILE_NAME}').get_access_token()
return cred.access_token
def wait_for_operation(operation_name):
"""Wait for operation completion"""
# load credentials and check operation for completion
creds = Credentials.from_service_account_file(
f'../resources/{FIREBASE_CREDENTIALS_FILE_NAME}')
service = discovery.build('firebase', 'v1beta1', credentials=creds)
# The name of the operation resource.
name = operation_name
request = service.operations().get(name=name)
# check every second for operation completion
print('Waiting for Firebase add operation to complete...')
while True:
result = request.execute()
if 'error' in result:
raise Exception(result['error'])
if 'done' in result and result['done']:
print('Add Firebase operation completed.')
return
sleep(1)
def initialize_firebase(project_id):
"""Initialize Firebase Realtime Database"""
print('Initializing Firebase...')
access_token = get_access_token()
# call addFirebase endpoint
uri = f'https://firebase.googleapis.com/v1beta1/projects/{project_id}:addFirebase'
headers = {'Authorization': 'Bearer ' + access_token}
req = requests.post(uri, headers=headers)
request = req.json()
# print(request)
if 'error' in request and request['error']['code'] == 409:
print('Firebase entity already exists')
elif 'name' in request:
print('Operation name:', request['name'])
# wait for addFirebase completion
wait_for_operation(request['name'])
# initialize Realtime Database
creds = credentials.Certificate(
f'../resources/{FIREBASE_CREDENTIALS_FILE_NAME}')
firebase_admin.initialize_app(
creds, {'databaseURL': f'https://{project_id}.firebaseio.com'})
ref = db.reference('setup/')
ref.set({'completed': True})
def create_config(project_id, partner_key):
"""Create config.json file"""
print('Creating configuration file...')
config = {}
config['use_firebase_for_credentials'] = True
config['firebase_service_account'] = FIREBASE_CREDENTIALS_FILE_NAME
config['verification_token'] = <PASSWORD>_key
config['firebase_database_url'] = f'https://{project_id}.firebaseio.com'
with open('../resources/config.json', 'w') as file:
json.dump(config, file)
def deploy():
"""Deploy to GCloud App Engine + return App Engine URL"""
print('Deploying. This may take several minutes...')
# create pipe to deploy GCloud App Engine
process = subprocess.Popen('gcloud app deploy --quiet',
cwd=os.getcwd() + '/../',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
_, err = process.communicate()
deploy_log = err.decode('utf-8')
print(deploy_log)
# match URL with regex
url_regex = r'Deployed service \[default\] to \[(.+)\]'
deploy_url = re.search(url_regex, deploy_log)
return deploy_url[1]
def main():
"""Initialize project, enable APIs and setup Firebase"""
# prompt for project id + partner key until filled
project_id = ''
while not project_id:
project_id = input(
'Google Cloud Project ID (not necessarily project name): ')
partner_key = ''
while not partner_key:
partner_key = input('Partner key from Google: ')
# set active project to specified project id
print('Setting Active Project ID to:', project_id)
subprocess.run(['gcloud', 'config', 'set', 'project', project_id],
check=True)
set_region()
enable_apis()
get_service_key(project_id)
initialize_firebase(project_id)
create_config(project_id, partner_key)
deployed_url = deploy()
print('\n\n\n---')
print(f"""
Link to Administration console: {deployed_url}/admin.
Link to Firebase Rules: https://console.firebase.google.com/project/{project_id}/database/{project_id}/rules
Please follow the instructions in the README of the parent directory.
""")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3279477 | <gh_stars>0
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonpatch
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from sqlalchemy import desc
from webob import exc
from murano.api.v1 import request_statistics
from murano.api.v1 import sessions
from murano.common.i18n import _
from murano.common import policy
from murano.common import utils
from murano.common import wsgi
from murano.db import models
from murano.db.services import core_services
from murano.db.services import environments as envs
from murano.db.services import sessions as session_services
from murano.db import session as db_session
from murano.services import states
from murano.utils import check_env
from murano.utils import check_session
from murano.utils import verify_env
LOG = logging.getLogger(__name__)
API_NAME = 'Environments'
class Controller(object):
@request_statistics.stats_count(API_NAME, 'Index')
def index(self, request):
all_tenants = request.GET.get('all_tenants', 'false').lower() == 'true'
tenant = request.GET.get('tenant', None)
LOG.debug('Environments:List <all_tenants: {tenants}, '
'tenant: {tenant}>'.format(tenants=all_tenants,
tenant=tenant))
if all_tenants:
policy.check('list_environments_all_tenants', request.context)
filters = {}
elif tenant:
policy.check('list_environments_all_tenants', request.context)
filters = {'tenant_id': tenant}
else:
policy.check('list_environments', request.context)
# Only environments from same tenant as user should be returned
filters = {'tenant_id': request.context.tenant}
environments = envs.EnvironmentServices.get_environments_by(filters)
environments = [env.to_dict() for env in environments]
return {"environments": environments}
@request_statistics.stats_count(API_NAME, 'Create')
def create(self, request, body):
LOG.debug('Environments:Create <Body {body}>'.format(body=body))
policy.check('create_environment', request.context)
if not('name' in body and body['name'].strip()):
msg = _('Please, specify a name of the environment to create')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
name = six.text_type(body['name'])
if len(name) > 255:
msg = _('Environment name should be 255 characters maximum')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
try:
environment = envs.EnvironmentServices.create(
body.copy(),
request.context)
except db_exc.DBDuplicateEntry:
msg = _('Environment with specified name already exists')
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
return environment.to_dict()
@request_statistics.stats_count(API_NAME, 'Show')
@verify_env
def show(self, request, environment_id):
LOG.debug('Environments:Show <Id: {id}>'.format(id=environment_id))
target = {"environment_id": environment_id}
policy.check('show_environment', request.context, target)
session = db_session.get_session()
environment = session.query(models.Environment).get(environment_id)
env = environment.to_dict()
env['status'] = envs.EnvironmentServices.get_status(env['id'])
# if env is currently being deployed we can provide information about
# the session right away
env['acquired_by'] = None
if env['status'] == states.EnvironmentStatus.DEPLOYING:
session_list = session_services.SessionServices.get_sessions(
environment_id, state=states.SessionState.DEPLOYING)
if session_list:
env['acquired_by'] = session_list[0].id
session_id = None
if hasattr(request, 'context') and request.context.session:
session_id = request.context.session
if session_id:
env_session = session.query(models.Session).get(session_id)
check_session(request, environment_id, env_session, session_id)
# add services to env
get_data = core_services.CoreServices.get_data
env['services'] = get_data(environment_id, '/services', session_id)
return env
@request_statistics.stats_count(API_NAME, 'Update')
@verify_env
def update(self, request, environment_id, body):
""""Rename an environment."""
LOG.debug('Environments:Update <Id: {id}, '
'Body: {body}>'.format(id=environment_id, body=body))
target = {"environment_id": environment_id}
policy.check('update_environment', request.context, target)
session = db_session.get_session()
environment = session.query(models.Environment).get(environment_id)
new_name = six.text_type(body['name'])
if new_name.strip():
if len(new_name) > 255:
msg = _('Environment name should be 255 characters maximum')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
try:
environment.update({'name': new_name})
environment.save(session)
except db_exc.DBDuplicateEntry:
msg = _('Environment with specified name already exists')
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
else:
msg = _('Environment name must contain at least one '
'non-white space symbol')
LOG.error(msg)
raise exc.HTTPClientError(explanation=msg)
return environment.to_dict()
@request_statistics.stats_count(API_NAME, 'Delete')
def delete(self, request, environment_id):
target = {"environment_id": environment_id}
policy.check('delete_environment', request.context, target)
if request.GET.get('abandon', '').lower() == 'true':
check_env(request, environment_id)
LOG.debug('Environments:Abandon <Id: {id}>'
.format(id=environment_id))
envs.EnvironmentServices.remove(environment_id)
else:
LOG.debug('Environments:Delete <Id: {id}>'
.format(id=environment_id))
sessions_controller = sessions.Controller()
session = sessions_controller.configure(request, environment_id)
session_id = session['id']
envs.EnvironmentServices.delete(environment_id, session_id)
sessions_controller.deploy(request, environment_id, session_id)
@request_statistics.stats_count(API_NAME, 'LastStatus')
@verify_env
def last(self, request, environment_id):
session_id = None
if hasattr(request, 'context') and request.context.session:
session_id = request.context.session
services = core_services.CoreServices.get_data(environment_id,
'/services',
session_id)
session = db_session.get_session()
result = {}
for service in services or []:
service_id = service['?']['id']
entity_ids = utils.build_entity_map(service).keys()
last_status = session.query(models.Status). \
filter(models.Status.entity_id.in_(entity_ids)). \
order_by(desc(models.Status.created)). \
first()
if last_status:
result[service_id] = last_status.to_dict()
else:
result[service_id] = None
return {'lastStatuses': result}
@request_statistics.stats_count(API_NAME, 'GetModel')
@verify_env
def get_model(self, request, environment_id, path):
LOG.debug('Environments:GetModel <Id: %(env_id)s>, Path: %(path)s',
{'env_id': environment_id, 'path': path})
target = {"environment_id": environment_id}
policy.check('show_environment', request.context, target)
session_id = None
if hasattr(request, 'context') and request.context.session:
session_id = request.context.session
get_description = envs.EnvironmentServices.get_environment_description
env_model = get_description(environment_id, session_id)
try:
result = utils.TraverseHelper.get(path, env_model)
except (KeyError, ValueError):
raise exc.HTTPNotFound
return result
@request_statistics.stats_count(API_NAME, 'UpdateModel')
@verify_env
def update_model(self, request, environment_id, body=None):
if not body:
msg = _('Request body is empty: please, provide '
'environment object model patch')
LOG.error(msg)
raise exc.HTTPBadRequest(msg)
LOG.debug('Environments:UpdateModel <Id: %(env_id)s, Body: %(body)s>',
{'env_id': environment_id, 'body': body})
target = {"environment_id": environment_id}
policy.check('update_environment', request.context, target)
session_id = None
if hasattr(request, 'context') and request.context.session:
session_id = request.context.session
get_description = envs.EnvironmentServices.get_environment_description
env_model = get_description(environment_id, session_id)
for change in body:
change['path'] = '/' + '/'.join(change['path'])
patch = jsonpatch.JsonPatch(body)
try:
patch.apply(env_model, in_place=True)
except jsonpatch.JsonPatchException as e:
raise exc.HTTPNotFound(str(e))
save_description = envs.EnvironmentServices. \
save_environment_description
save_description(session_id, env_model)
return env_model
def create_resource():
return wsgi.Resource(Controller())
| StarcoderdataPython |
5034098 | from flask import g
from plynx.db.demo_user_manager import DemoUserManager
from plynx.web.common import app, requires_auth, make_fail_response
from plynx.utils.common import JSONEncoder
demo_user_manager = DemoUserManager()
@app.route('/plynx/api/v0/token', strict_slashes=False)
@requires_auth
def get_auth_token():
access_token = g.user.generate_access_token()
refresh_token = g.user.generate_refresh_token()
return JSONEncoder().encode({
'access_token': access_token.decode('ascii'),
'refresh_token': refresh_token.decode('ascii')
})
@app.route('/plynx/api/v0/demo', methods=['POST'])
def post_demo_user():
user = demo_user_manager.create_demo_user()
if not user:
return make_fail_response('Failed to create demo user')
demo_user_manager.create_demo_graphs(user)
access_token = user.generate_access_token(expiration=1800)
return JSONEncoder().encode({
'access_token': access_token.decode('ascii'),
'refresh_token': 'Not assigned',
'username': user.username
})
| StarcoderdataPython |
9623689 | #!/usr/bin/env python
# MIT License, (c) <NAME> <EMAIL>
# https://github.com/joswr1ght/md5deep
import os, sys, hashlib
# Reproduce this output with slashes consistent for Windows systems
#ba2812a436909554688154be461d976c A\SEC575-Clown-Chat\nvram
# Optimized for low-memory systems, read whole file with blocksize=0
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), ""):
hash.update(block)
return hash.hexdigest()
def usage():
print "Usage: md5deep.py [OPTIONS] [FILES]"
print "-r - recursive mode, all subdirectories are traversed."
print "-X <file> - enables negative matching mode."
print "-f - speed up hash calculations, using more memory."
print "-0 - Uses a NULL character (/0) to terminate each line instead of a newline. Useful for processing filenames with strange characters."
def validate_hashes(hashfile, hashlist):
# Open file and build a new hashlist
hashlistrec = []
with open(hashfile, "r") as f:
for line in f:
filehash,filename = line.rstrip().split(" ")
# Convert to platform covention directory separators
filename = normfname(filename)
# Add entry to hashlistrec
hashlistrec.append((filename, filehash))
for diff in list(set(hashlistrec) - set(hashlist)):
# Replicate "-n" md5deep functionality; print only the filename
# if the file is missing in the filename list; print the hash
# of the current file if it is different from the negative match
# file.
if (not os.path.isfile(diff[0])):
# File from negative match list is missing, just print filename
print winfname(diff[0])
else:
print diff[0] + " " + winfname(diff[1])
# Produce a Windows-style filename
def winfname(filename):
return filename.replace("/","\\")
# Normalize filename based on platform
def normfname(filename):
if os.name == 'nt': # Windows
return filename.replace("/", "\\")
else:
return filename.replace("\\","/")
if __name__ == '__main__':
opt_recursive = None
opt_negmatch = None
opt_fast = None
opt_null = None
opt_files = []
if len(sys.argv) == 1:
usage()
sys.exit(0)
args = sys.argv[1:]
it = iter(args)
for i in it:
if i == '-r':
opt_recursive = True
continue
elif i == '-0':
opt_null = True
continue
elif i == '-f':
opt_fast = True
elif i == '-X':
opt_negmatch = next(it)
if not os.path.isfile(opt_negmatch):
sys.stdout.write("Cannot open negative match file %s\n"%opt_negmatch)
sys.exit(-1)
continue
else:
opt_files.append(i)
if opt_fast:
md5blocklen=0
else:
# Default to optimize for low-memory systems
md5blocklen=65536
# Build a list of (hash,filename) for each file, regardless of specified
# options
hashlist = []
# Hash files in the current directory
for f in opt_files:
if os.path.isfile(f):
hashlist.append((f, md5sum(f, md5blocklen)))
# Walk all subdirectories
if opt_recursive:
for start in sys.argv[1:]:
for (directory, _, files) in os.walk(start):
for f in files:
path = os.path.join(directory, f)
hashlist.append((path, md5sum(path, md5blocklen)))
# With the hashlist built, compare to the negative match list, or print
# the results.
if opt_negmatch:
validate_hashes(opt_negmatch, hashlist)
else:
# Just print out the list with Windows-syle filenames
for hash in hashlist:
if opt_null:
print "%s %s\0"%(hash[1],winfname(hash[0]))
else:
print "%s %s"%(hash[1],winfname(hash[0]))
| StarcoderdataPython |
8001972 | <reponame>sundarsrst/pytablewriter<filename>pytablewriter/style/_font.py
# encoding: utf-8
from __future__ import absolute_import
from enum import Enum, unique
@unique
class FontSize(Enum):
NONE = "none"
TINY = "tiny"
SMALL = "small"
MEDIUM = "medium"
LARGE = "large"
@unique
class FontStyle(Enum):
NORMAL = "normal"
ITALIC = "italic"
@unique
class FontWeight(Enum):
NORMAL = "normal"
BOLD = "bold"
| StarcoderdataPython |
11218739 | #!usr/bin/env python3
#
# PORGAMMER: <NAME>
# DATE CREATED: 8/8/2019
# REVISED DATE: 17/8/2019
# PURPOSE: This module contains all the initialisation code for the model
# Loading and saving the the trained model is also implemented in this module.
#
# module imports
#
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from workspace_utils import active_session
import numpy as np
from utils import get_input_args, load_datasets
from collections import OrderedDict
from os import path
class Classifier:
# define the model parameters
args = None
model = None
optimizer = None
criterion = None
device = None
epochs = None
learning_rate = None
arch = None
hidden_units = None
def __init__(self, args, device):
self.args = args
self.hidden_units = args.hidden_units
self.arch = args.arch
self.learning_rate = args.learning_rate
self.device = device
self.epochs = args.epochs
self.criterion = nn.NLLLoss()
self.set_model(models) # set the model
self.optimizer = optim.Adam(self.model.classifier.parameters(), lr=self.learning_rate) # set the optimizer and the learning rate
def set_model(self, models):
"""
sets the model
parameters:
model_name - the name of model to initilize with. Given as --arch
hidden_units - the number of hidden unist to use in the network
return:
None - function does not retunr anything
"""
# define and set the model
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
vgg16 = models.vgg16(pretrained=True)
densenet201 = models.densenet201(pretrained=True)
models = {'resnet': resnet18, 'alexnet': alexnet, 'vgg': vgg16, 'densenet201': densenet201}
# apply model
self.model = models[self.arch]
# Freeze parameters so we don't backprop through them
for param in self.model.parameters():
param.requires_grad = False
# set the classifier to match our datasets
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1920, 1000)),
('ReLU', nn.ReLU()),
('fc2', nn.Linear(1000, self.hidden_units)),
('ReLU', nn.ReLU()),
('Dropout', nn.Dropout(0.7)),
('fc3', nn.Linear(self.hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
self.model.classifier = classifier
def load_checkpoint(self, save_dir):
"""
loads the saved model
parameters:
save_dir - the path to the directory where the model is saved
return:
None - function does not retunr anything
"""
checkpoint = None
if path.exists(save_dir):
# load the checkpoint file
if self.device == 'cpu':
checkpoint = torch.load(save_dir,map_location=lambda storage, location: storage)
else:
checkpoint = torch.load(save_dir)
# load the hyperparameter states form the checkpoint
self.model.load_state_dict(checkpoint['state_dict'])
self.model.class_to_idx = checkpoint['class_to_idx']
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
self.epochs = checkpoint['epochs']
self.learning_rate = checkpoint['learning_rate']
self.arch = checkpoint['arch']
else:
# do nothing if there is nothing to load
pass
def save_checkpoint(self, save_dir, train_datasets):
"""
saves the trained model and other parameters to disk
parameters:
save_dir - the directory where the model should be saved
train_datasets - the datasets that the model was trained on.
this param is being used for getting the idx to class mappings
"""
self.model.class_to_idx = train_datasets.class_to_idx
# crete custom dictionary to save additional params
checkpoint = {'epochs': self.epochs,
'classifier': self.model.classifier,
'learning_rate': self.learning_rate,
'arch': self.arch,
'class_to_idx': self.model.class_to_idx,
'optimizer_state': self.optimizer.state_dict(),
'state_dict': self.model.state_dict()}
torch.save(checkpoint, save_dir)
| StarcoderdataPython |
9652858 | <reponame>artefactual-labs/clockify-tool
import argparse
import dateutil
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from clockifytool import helpers
def preprocess_argv():
# Remove script from argv
argv = sys.argv[1:]
if len(argv):
command_abbreviations = {
'l': 'list',
'n': 'new',
'u': 'update',
'd': 'delete',
'w': 'workspaces',
'p': 'projects',
'pd': 'project',
'td': 'task',
'-v': 'version',
'--version': 'version'
}
if argv[0] in command_abbreviations:
# Expand command abbreviation
argv[0] = command_abbreviations[argv[0]]
elif argv[0][0:1] == '+':
# "+<project>" is shorthand for "new <project>"
argv = ['new', argv[0][1:]] + argv[1:]
elif helpers.resolve_period_abbreviation(argv[0]):
# If time period given, not command, use as basis for list command
argv = ['list'] + argv[0:]
else:
# Default to "list" command
argv = ['list']
return argv
def arg_parser():
"""Return ArgumentParser for this application."""
parser = argparse.ArgumentParser(description='Clockify client.')
parser.add_argument('-v', '--version', help='show version and exit', action='store_true')
subparsers = parser.add_subparsers(dest='command')
# Parent parser for entry-specific commands
entry_parser = argparse.ArgumentParser(add_help=False)
entry_parser.add_argument('-c', '--comments', metavar='comments: required for new time entries', action='store')
entry_parser.add_argument('-t', '--hours', metavar='hours spent: required for new time entries', action='store')
entry_parser.add_argument('-d', '--date', metavar='date', action='store', help='defaults to today')
entry_parser.add_argument('-b', '--billable', action='store_true')
# New entry command
parser_new = subparsers.add_parser('new', help='Create new time entry', parents=[entry_parser])
parser_new.add_argument('id', metavar='project ID', help='ID of project or task: required')
parser_new.add_argument('-s', '--start', metavar='start time', action='store')
parser_new.set_defaults(func='new_entry')
# Update entry command
parser_update = subparsers.add_parser('update', help='Update time entry', parents=[entry_parser])
parser_update.add_argument('id', metavar='entry ID', help='ID of time entry: required')
parser_update.add_argument('-a', '--append', metavar='append: append text to comments', action='store')
parser_update.add_argument('-u', '--unbillable', action='store_true')
parser_update.set_defaults(func='update_entry')
# List command
parser_list = subparsers.add_parser('list', help='List time entries', epilog=helpers.describe_periods())
parser_list.add_argument('period', nargs='?', metavar='period', help='time period: optional, overrides -s and -e')
parser_list.add_argument('-s', '--start', metavar='start date', action='store')
parser_list.add_argument('-e', '--end', metavar='end date', action='store')
parser_list.add_argument('--strict', action='store_true')
parser_list.add_argument('-v', '--verbose', action='store_true')
parser_list.set_defaults(func='list_entries')
# Delete command
parser_delete = subparsers.add_parser('delete', help='Delete time entry')
parser_delete.add_argument('id', metavar='time entry ID', help='ID of time entry: required')
parser_delete.set_defaults(func='delete_entry')
# Workspaces command
parser_workspaces = subparsers.add_parser('workspaces', help='List workspaces')
parser_workspaces.set_defaults(func='list_workspaces')
# Projects command
parser_projects = subparsers.add_parser('projects', help='List projects')
parser_projects.set_defaults(func='list_projects')
# Project details command
parser_project = subparsers.add_parser('project', help='Project details')
parser_project.add_argument('id', metavar='project ID', help='ID of project: required')
parser_project.set_defaults(func='project_details')
# Task details command
parser_task = subparsers.add_parser('task', help='Task details')
parser_task.add_argument('id', metavar='task ID', help='ID of task: required')
parser_task.set_defaults(func='task_details')
# Cache command
parser_cache = subparsers.add_parser('cache', help='Cache status/management')
parser_cache.add_argument('-f', '--flush', action='store_true')
parser_cache.set_defaults(func='cache_statistics')
# Version commmand
parser_version = subparsers.add_parser('version', help='Display version')
return parser
def validate_args(parser, args, config):
# Normalize and validate period
if 'period' in args and args.period:
args.period = helpers.resolve_period_abbreviation(args.period)
if not args.period:
parser.error('Invalid period.')
# Normalize and validate project/entry ID
if 'id' in args and args.id:
if args.command == 'new':
# Allow use of preset comments and/or hours
default_comments = helpers.template_field(args.id, 'comments', config['projects'])
default_hours = helpers.template_field(args.id, 'hours', config['projects'])
if default_comments and not args.comments:
args.comments = default_comments
if default_hours and not args.hours:
args.hours = default_hours
# Resolve preset name to ID
args.id = helpers.resolve_project_alias(args.id, config['projects'])
# Resolve dates, if set
if 'date' in args and args.date:
args.date = resolve_and_validate_date_value(args.date, parser)
if 'start' in args and args.start:
args.start = resolve_and_validate_date_value(args.start, parser)
if 'end' in args and args.end:
args.end = resolve_and_validate_date_value(args.end, parser)
# Don't allow both billable and unbillable options to be used at the same time
if ('billable' in args and args.billable) and ('unbillable' in args and args.unbillable):
parser.error("Both --billable and --unbillable can't be used at the same time.")
# Sanity-check hours, if set
if 'hours' in args and args.hours:
try:
float(args.hours)
except ValueError:
parser.error('Invalid hours value.')
return args
def resolve_and_validate_date_value(value, parser):
# Resolve date calculation
value = helpers.handle_date_calculation_value(value)
# Make sure value is actually a date
try:
dateutil.parser.parse(value)
except ValueError:
parser.error('{} is not a valid date.'.format(value))
return value
| StarcoderdataPython |
280458 | """@desc
Parser for google scholar search results
"""
import re
from ..base import BaseSearch, ReturnType, SearchItem
class Search(BaseSearch):
"""
Searches Google Scholar for string
"""
name = "GoogleScholar"
search_url = "https://scholar.google.gr/scholar?"
summary = "\tGoogle Scholar is a freely accessible web search engine that indexes the full "\
"text or metadata of scholarly literature across an array of publishing formats and "\
"disciplines."
def get_params(self, query=None, offset=None, page=None, **kwargs):
params = {}
params["hl"] = "en"
params["start"] = page
params["q"] = query
return params
def parse_soup(self, soup):
"""
Parses Google Scholar Search Soup for results
"""
# find all class_='gs_r gs_or gs_scl' => each result
return soup.find_all('div', class_='gs_r gs_or gs_scl')
def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Parses the source code to return
:param single_result: single result found in <div class="gs_r gs_or gs_scl">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link, description, file link, result type of single result
:rtype: dict
"""
rdict = SearchItem()
r_elem = single_result.find('h3', class_='gs_rt')
if return_type in (ReturnType.FULL, ReturnType.LINK):
link_tag = r_elem.find('a')
if link_tag:
raw_link = link_tag.get('href')
else:
raw_link = ''
rdict["links"] = raw_link
if return_type in (ReturnType.FULL, return_type.DESCRIPTION):
desc = single_result.find('div', class_='gs_rs')
if desc:
desc = desc.text
else:
desc = ''
rdict["descriptions"] = desc
if return_type in (ReturnType.FULL, return_type.TITLE):
title = r_elem.text
title = re.sub(r'^[\[\w+\]]+ ', '', title)
rdict["titles"] = title
if return_type == ReturnType.FULL:
t_elem = single_result.find('span', class_='gs_ct1')
if t_elem:
result_type = t_elem.text
else:
result_type = ''
f_elem = single_result.find('div', class_='gs_or_ggsm')
if f_elem:
flink_tag = r_elem.find('a')
if flink_tag:
file_link = flink_tag.get('href')
else:
file_link = ''
else:
file_link = ''
rdict.update({
"result_types": result_type,
"files_links": file_link
})
return rdict
| StarcoderdataPython |
173723 | <reponame>Lumexralph/python-algorithm-datastructures
def linear_sum(S, n):
if n == 0:
return 0
return linear_sum(S, n - 1) + S[n - 1]
S = [1, 5, 8, 9, 4, 9, 3]
print(linear_sum(S, len(S)))
| StarcoderdataPython |
12863645 | <filename>lang/Python/abstract-type-2.py<gh_stars>0
from abc import ABCMeta, abstractmethod
class BaseQueue(metaclass=ABCMeta):
"""Abstract Class
"""
def __init__(self):
self.contents = list()
@abstractmethod
def Enqueue(self, item):
pass
@abstractmethod
def Dequeue(self):
pass
def Print_Contents(self):
for i in self.contents:
print(i, end=' ')
| StarcoderdataPython |
4879689 | <filename>example/plugins/quest.py<gh_stars>0
from kutana import Plugin
pl = Plugin("Quest")
@pl.on_commands(["quest"], user_state="")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:1")
await ctx.reply("Choose: left or right")
@pl.on_commands(["left"], user_state="quest:1")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:2")
await ctx.reply("Choose: right or left")
@pl.on_commands(["left"], user_state="quest:2")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:end")
await ctx.reply("You have chosen: left, left\nWrite '.OK'")
@pl.on_commands(["right"], user_state="quest:2")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:end")
await ctx.reply("You have chosen: left, right\nWrite '.OK'")
@pl.on_commands(["right"], user_state="quest:1")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:3")
await ctx.reply("Choose: right or left")
@pl.on_commands(["right"], user_state="quest:3")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:end")
await ctx.reply("You have chosen: right, right\nWrite '.OK'")
@pl.on_commands(["left"], user_state="quest:3")
async def _(msg, ctx):
await ctx.set_state(user_state="quest:end")
await ctx.reply("You have chosen: right, left\nWrite '.OK'")
@pl.on_commands(["ok"], user_state="quest:end")
async def _(msg, ctx):
await ctx.set_state(user_state="")
await ctx.reply("Bye")
@pl.on_unprocessed_messages(user_state="quest:end")
async def _(msg, ctx):
await ctx.reply("Write '.OK'")
@pl.on_commands(["exit"])
async def _(msg, ctx):
if ctx.user_state.startswith("quest:"):
await ctx.set_state(user_state="")
await ctx.reply("Quest stopped")
plugin = pl
| StarcoderdataPython |
14712 | from resources import Resources
import configparser
def getConfigEntry(group, item):
entry = None
if group != None and item != None:
config = configparser.ConfigParser()
try:
config.read(Resources.getConfigFile())
except(FileNotFoundError):
print("ERROR: File '" + Resources.getConfigFile() + "' NOT found! " + FileNotFoundError.strerror)
config = None
if config is not None and group in config:
entry = config[group].getint(item)
return entry
def getConfigEntryOrDefault(group, item, defaultValue=None):
entry = None
entry = getConfigEntry(group, item)
if entry is None:
entry = defaultValue
return entry | StarcoderdataPython |
5013144 | class Token:
def __init__(self,value,id=-1,document=-1,offset=-1):
self.id=id
self.attr={}
self.attr['offset']=offset
self.attr['length']=value.__len__()
self.attr['document']=document
self.value=value
def __str__(self):
s ="id: {0}, value: {1}, attributes: {2}".format(self.id,self.value,self.attr)
return s
| StarcoderdataPython |
3485271 | from github import GitHub
import math
# Endpoint: GET https://api.github.com/users/<CSV_Data>
#
gh = GitHub(filename='get_users.csv')
users = []
endpoint = []
required_fields = []
optional_fields = []
def read_user_row(i=0):
if gh.data['Execution Results'][i] == '':
if gh.data['Name'][i] == '': #row 9 will fail
print("Name should be populated in CSV")
exit(0)
else:
endpoint.append(gh.data['Name'][i])
user = {}
for field in required_fields:
try:
if gh.data[field][i] != '':
user[field] = gh.data[field][i]
else:
print(user[field] + "should be populated in CSV")
exit(0)
except AttributeError:
pass
except KeyError:
pass
for field in optional_fields:
try:
if gh.data[field][i] != '':
user[field] = gh.data[field][i]
except AttributeError:
pass
except KeyError:
pass
users.append({
"endpoint": endpoint,
"data": user,
"rows": [i]
})
print('Reading CSV row: #' + str(i) )
# if this was the last entry, proceed to create the requests from the build array
i = i + 1
if i == gh.total_records + 1:
gh.get_users(payload=users)
else:
read_user_row(i)
else:
print('skipping successful row')
i = i + 1
# if this was the last entry, proceed to create the requests from the build array
if i == gh.total_records + 1:
gh.get_users(payload=users)
else:
# continue
read_user_row(i)
return
read_user_row()
| StarcoderdataPython |
6648678 | '''
Sends note on / off data to the Teensy for LED control.
author: <NAME>
date: 3/21/2021
Python: 3.7.x
'''
# used to package the serial data in midi format
import midi
import os
from midi import MidiConnector
from midi import Message, NoteOff, NoteOn
class OmniMidi:
def __init__(self):
self.conn = 0
if os.path.exists('/dev/ttyACM0'):
# connection to the Teensy serial port
self.conn = MidiConnector('/dev/ttyACM0')
elif os.path.exists('/dev/ttyACM1'):
self.conn = MidiConnector('/dev/ttyACM1')
# sends note events to Teensy.
def send_note(self, evnt):
if self.conn == 0: return 0
note = evnt[0]
nn = evnt[1]
vel = evnt[2]
msg = 0
if note=="/noteOn":
note_on = NoteOn(nn, vel)
msg = Message(note_on,2)
elif note=="/noteOff":
note_off = NoteOff(nn, vel)
msg = Message(note_off,2)
if not msg == 0:
self.conn.write(msg)
if __name__ == "__main__":
pass
| StarcoderdataPython |
6526496 | <filename>bindings/python/examples/simple.py
#!/usr/bin/env python
# simple.py -- basic ppamltracer-python example
# This file is in the public domain.
import os
import sys
from ppamltracer import Tracer
def main():
# Disable buffering on stdout so we can see the numbers as they are printed.
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# Start ppamltracer.
with Tracer("/tmp/simple_report") as tracer:
# Register the factorial phase.
with tracer.create_phase("fact") as phase:
# Print factorials.
print "Factorials:",
for i in range(21):
print fact(phase, i),
print
# Register the Fibonacci phase.
with tracer.create_phase("fib") as phase:
# Print Fibonacci numbers.
print "Fibonacci numbers: ",
for i in range(24):
print fib(phase, i),
print
def fact(phase, n):
# Record that we're running inside the factorial phase.
with phase.running():
# Compute the factorial.
if n == 0:
return 1
else:
return n * fact(phase, n - 1)
def fib(phase, n):
# Record that we're running inside the Fibonacci phase.
with phase.running():
# Compute the nth Fibonacci number.
if n == 0 or n == 1:
return n
else:
return fib(phase, n - 1) + fib(phase, n - 2)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4820829 | <reponame>YuelianINC/kombu-aliyun-mqs<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: <NAME> (<EMAIL>)
from setuptools import setup, find_packages
setup(
name='kombu-aliyun-mqs',
version='0.1',
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
url='https://https://github.com/YuelianINC/kombu-aliyun-mqs',
description='aliyun mqs ',
#long_description=open('README.md').read(),
license='Apache2',
requires=[
'kombu',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: System :: Installation/Setup'
],
include_package_data=True,
zip_safe=False
) | StarcoderdataPython |
9715512 | <reponame>jacobmunson/lenskit_confidence
from lenskit.algorithms import basic
from lenskit.algorithms import bias
import pandas as pd
import numpy as np
import lenskit.util.test as lktu
from pytest import approx
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_topn_recommend():
pred = basic.Memorized(simple_df)
rec = basic.TopN(pred)
rec.fit(simple_df)
rec10 = rec.recommend(10, candidates=[1, 2])
assert all(rec10.item == [2, 1])
assert all(rec10.score == [5, 4])
rec2 = rec.recommend(12, candidates=[1, 2])
assert len(rec2) == 1
assert all(rec2.item == [1])
assert all(rec2.score == [3])
rec10 = rec.recommend(10, n=1, candidates=[1, 2])
assert len(rec10) == 1
assert all(rec10.item == [2])
assert all(rec10.score == [5])
def test_topn_config():
pred = basic.Memorized(simple_df)
rec = basic.TopN(pred)
rs = str(rec)
assert rs.startswith('TopN/')
def test_topn_big():
ratings = lktu.ml_test.ratings
users = ratings.user.unique()
items = ratings.item.unique()
user_items = ratings.set_index('user').item
algo = basic.TopN(bias.Bias())
a2 = algo.fit(ratings)
assert a2 is algo
# test 100 random users
for u in np.random.choice(users, 100, False):
recs = algo.recommend(u, 100)
assert len(recs) == 100
rated = user_items.loc[u]
assert all(~recs['item'].isin(rated))
unrated = np.setdiff1d(items, rated)
scores = algo.predictor.predict_for_user(u, unrated)
top = scores.nlargest(100)
assert top.values == approx(recs.score.values)
| StarcoderdataPython |
382672 | <reponame>merlinran/acorn-precision-farming-rover
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 <NAME>, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
import serial
import time
import sys
import math
from odrive.utils import dump_errors
from evdev import InputDevice, list_devices, categorize, ecodes, KeyEvent
import pygame as py
from collections import namedtuple
import numpy as np
from multiprocessing import shared_memory, resource_tracker
COUNTS_PER_REVOLUTION = 9797.0
ACCELERATION_COUNTS_SEC = 0.5
WINDOW_SCALING = 0.75
# define constants
WIDTH = int(1000 * WINDOW_SCALING)
HEIGHT = int(1000 * WINDOW_SCALING)
FPS = 30
# define colors
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
_RW = int(150 * WINDOW_SCALING)
_RH = int(300 * WINDOW_SCALING)
# initialize pygame and create screen
py.init()
screen = py.display.set_mode((WIDTH, HEIGHT))
# for setting FPS
clock = py.time.Clock()
rot = 0
rot_speed = 1
# # define a surface (RECTANGLE)
# image_orig = py.Surface((50 , 200))
# # for making transparent background while rotating an image
# image_orig.set_colorkey(BLACK)
# # fill the rectangle / surface with green color
# image_orig.fill(GREEN)
#
# py.draw.circle(image_orig, RED, (25,20), 15, 0)
# creating a copy of orignal image for smooth rotation
image_red_orig = py.Surface((30 * WINDOW_SCALING, 100 * WINDOW_SCALING))
# for making transparent background while rotating an image
image_red_orig.set_colorkey(BLACK)
# fill the rectangle / surface with green color
image_red_orig.fill(RED)
py.draw.circle(image_red_orig, GREEN, (15 * WINDOW_SCALING,
20 * WINDOW_SCALING), 10 * WINDOW_SCALING, 0)
keys = ('front_left', 'front_right', 'rear_left', 'rear_right')
pos = ((-1, -1), (+1, -1), (-1, +1), (+1, +1))
# rects = []
#
# for i in range(4):
# image = image_orig.copy()
# image.set_colorkey(BLACK)
# # define rect for placing the rectangle at the desired position
# rect = image.get_rect()
# rect.center = (WIDTH // 2 + pos[i][0] * _RW, HEIGHT // 2 + pos[i][1] * _RH)
# rects.append(rect)
red_rects = []
for i in range(4):
image = image_red_orig.copy()
image.set_colorkey(BLACK)
# define rect for placing the rectangle at the desired position
rect = image.get_rect()
rect.center = (WIDTH // 2 + pos[i][0] * _RW, HEIGHT // 2 + pos[i][1] * _RH)
red_rects.append(rect)
# keep rotating the rectangle until running is set to False
joy_steer = 0
joy_throttle = 0
joy_strafe = 0
vel_cmd = 0
steer_cmd = 0
last_vel_cmd = 0
tick_time = time.time()
strafe = 0
myFont = py.font.SysFont("Times New Roman", int(40 * WINDOW_SCALING))
existing_shm = shared_memory.SharedMemory(name='acorn_steering_debug')
# Untrack the resource so it does not get destroyed. This allows the
# steering debug window to stay open.
resource_tracker.unregister(existing_shm._name, 'shared_memory')
calc = np.ndarray((8,), dtype=np.float64, buffer=existing_shm.buf)
running = True
while running:
# set FPS
clock.tick(FPS)
# clear the screen every time before drawing new objects
screen.fill(BLACK)
# check for the exit
for event in py.event.get():
if event.type == py.QUIT:
running = False
# while True:
# print(calc)
# time.sleep(0.1)
# # calc = calculate_steering(steer_cmd, vel_cmd)
horiz = WIDTH//2
coord1 = (horiz + 200 * WINDOW_SCALING, 50 * WINDOW_SCALING)
coord2 = (horiz - 300 * WINDOW_SCALING, HEIGHT-50 * WINDOW_SCALING)
py.draw.circle(screen, RED, coord1, 15 * WINDOW_SCALING, 0)
py.draw.circle(screen, RED, coord2, 15 * WINDOW_SCALING, 0)
py.draw.line(screen, RED, coord1, coord2, 1)
coord3 = (horiz, 350 * WINDOW_SCALING)
coord4 = (horiz, HEIGHT-350 * WINDOW_SCALING)
py.draw.circle(screen, GREEN, coord3, 15 * WINDOW_SCALING, 0)
py.draw.circle(screen, GREEN, coord4, 15 * WINDOW_SCALING, 0)
p1 = np.asarray(coord1)
p2 = np.asarray(coord2)
p3 = np.asarray(coord3)
p4 = np.asarray(coord4)
# https://stackoverflow.com/questions/39840030/distance-between-point-and-a-line-from-two-points#
d1 = np.cross(p2-p1, p1-p3) / np.linalg.norm(p2-p1) * -1
d2 = np.cross(p2-p1, p1-p4) / np.linalg.norm(p2-p1) * -1
#d1 = np.linalg.norm(np.cross(p2-p1, p1-p3))/np.linalg.norm(p2-p1)
#d2 = np.linalg.norm(np.cross(p2-p1, p1-p4))/np.linalg.norm(p2-p1)
coord3_p2 = (coord3[0] + d1, coord3[1])
py.draw.line(screen, RED, coord3, coord3_p2, 5)
coord4_p2 = (coord4[0] + d2, coord4[1])
py.draw.line(screen, RED, coord4, coord4_p2, 5)
idx = 0
for rect in red_rects:
# making a copy of the old center of the rectangle
throttle = math.degrees(calc[idx*2+1])
throttle_text = myFont.render("{:0.0f}".format(throttle), 1, RED)
width = WIDTH // 2 + pos[idx][0] * _RW*2.0
height = HEIGHT // 2 + pos[idx][1] * _RH
screen.blit(throttle_text, (width, height-30))
old_center = rect.center
rot = math.degrees(calc[idx*2]) * -1
idx += 1
# rotating the orignal image
new_image = py.transform.rotate(image_red_orig, rot)
rect = new_image.get_rect()
# set the rotated rectangle to the old center
rect.center = old_center
# drawing the rotated rectangle to the screen
screen.blit(new_image, rect)
# flipping the display after drawing everything
py.display.flip()
# existing_shm.close()
py.quit()
| StarcoderdataPython |
8053976 | from impc_etl.jobs.extract.colony_tracking_extractor import ColonyTrackingExtractor
from impc_etl.workflow.extraction import *
from impc_etl.workflow.config import ImpcConfig
class ExperimentCleaner(SparkSubmitTask):
name = "Experiment_Cleaner"
app = "impc_etl/jobs/clean/experiment_cleaner.py"
output_path = luigi.Parameter()
entity_type = "experiment"
dcc_xml_path = luigi.Parameter()
resources = {"overwrite_resource": 1}
def requires(self):
return ExperimentExtractor(
dcc_xml_path=self.dcc_xml_path, output_path=self.output_path
)
def output(self):
output_path = self.input().path.replace("_raw", "")
return ImpcConfig().get_target(output_path)
def app_options(self):
return [self.input().path, self.entity_type, self.output().path]
class LineExperimentCleaner(SparkSubmitTask):
name = "Line_Experiment_Cleaner"
app = "impc_etl/jobs/clean/experiment_cleaner.py"
output_path = luigi.Parameter()
entity_type = "line"
dcc_xml_path = luigi.Parameter()
resources = {"overwrite_resource": 1}
def requires(self):
return LineExperimentExtractor(
dcc_xml_path=self.dcc_xml_path, output_path=self.output_path
)
def output(self):
output_path = (
self.input()
.path.replace("_raw", "")
.replace("experiment", "line_experiment")
)
return ImpcConfig().get_target(output_path)
def app_options(self):
return [self.input().path, self.entity_type, self.output().path]
class MouseCleaner(SparkSubmitTask):
name = "Mouse_Cleaner"
app = "impc_etl/jobs/clean/specimen_cleaner.py"
output_path = luigi.Parameter()
dcc_xml_path = luigi.Parameter()
resources = {"overwrite_resource": 1}
def requires(self):
return MouseExtractor(
dcc_xml_path=self.dcc_xml_path, output_path=self.output_path
)
def output(self):
output_path = self.input().path.replace("_raw", "")
return ImpcConfig().get_target(output_path)
def app_options(self):
return [self.input().path, self.output().path]
class EmbryoCleaner(SparkSubmitTask):
name = "Embryo_Cleaner"
app = "impc_etl/jobs/clean/specimen_cleaner.py"
output_path = luigi.Parameter()
dcc_xml_path = luigi.Parameter()
resources = {"overwrite_resource": 1}
def requires(self):
return EmbryoExtractor(
dcc_xml_path=self.dcc_xml_path, output_path=self.output_path
)
def output(self):
output_path = self.input().path.replace("_raw", "")
return ImpcConfig().get_target(output_path)
def app_options(self):
return [self.input().path, self.output().path]
class ColonyCleaner(SparkSubmitTask):
name = "Colony_Cleaner"
app = "impc_etl/jobs/clean/colony_cleaner.py"
imits_colonies_tsv_path = luigi.Parameter()
output_path = luigi.Parameter()
resources = {"overwrite_resource": 1}
def requires(self):
return ColonyTrackingExtractor()
def output(self):
return ImpcConfig().get_target(f"{self.output_path}colony_parquet")
def app_options(self):
return [self.input().path, self.output().path]
| StarcoderdataPython |
9783971 | <reponame>FabienArcellier/spike-ariadne<filename>ariadne_spike/domain/user.py
class User:
def __init__(self, name, age):
self.name = name if name is not None else "stranger"
self.age = age if age is not None else 18
| StarcoderdataPython |
12852964 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
class pipe():
def __init__(self, pipefile, queue, actions):
"""
Reads from a pipe
"""
self.pipefile = pipefile
self.queue = queue
actions["pipe"] = {}
self.__makefifo()
self.listening_thread = threading.Thread(target=self.listen_from_pipe)
#self.listening_thread.daemon = True
self.isListening = True
self.listening_thread.start()
def transmit(self, received):
"""
"""
cmd = ("pipe", received)
self.queue.put(cmd)
def __makefifo(self):
"""
"""
try:
os.mkfifo(self.pipefile)
logging.debug("Listening to FIFO Pipe at %s" % self.pipefile)
return True
except:
logging.debug("Error creating FIFO Pipe %s. File already existing?" % self.pipefile)
return False
def listen_from_pipe(self):
"""
"""
while self.isListening:
logging.debug("Listening from PIPE %s" % self.pipefile)
with open(self.pipefile) as fifo:
self.transmit(fifo.read().strip())
if __name__ == '__main__':
p = pipe("pipefile", "none")
| StarcoderdataPython |
6580997 | from .dynamicdirectedgraph import DynamicDirectedGraph
| StarcoderdataPython |
9619689 | <filename>bloch_sphere.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Bloch Sphere.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1FOuaG4O4P5ialuB7a3Na6vZW6wyFG7P7
Use plot_bloch_vector() or plot_bloch_sphere_spherical() to plot a qubit in the states:
|0⟩
|1⟩
1/√2(|0⟩+|1⟩)
1/√2(|0⟩−i|1⟩)
1/√2[i1]
"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# !pip install qiskit
from qiskit import *
from qiskit.visualization import *
from math import pi, sqrt
coord_1 = [0,0,1]
plot_bloch_vector(coord_1)
coord_2 = [0,0,-1]
plot_bloch_vector(coord_2)
coord_3 = [1,0,0]
plot_bloch_vector(coord_3)
coord_4 = [-1,0,0]
plot_bloch_vector(coord_4)
coord_5 = [1,0,0]
plot_bloch_vector(coord_5) | StarcoderdataPython |
228337 | <gh_stars>1-10
import os
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import polyga.analysis as pga
save_loc = 'Planet_Silly'
df, fp_df = pga.load_planet(save_loc)
for col in df.columns:
print(col)
def plot_average_lengths(df):
"""Plots average lengths of polymers vs generation for all settled_nations"""
gens = max(df.generation) + 1
x = np.linspace(0, gens, gens)
settled_nations = np.unique(df.settled_nation.values)
legend = []
for settled_nation in settled_nations:
tdf = df.loc[df['settled_nation'] == settled_nation]
means = []
for gen in range(gens):
tdf_ = tdf.loc[tdf['generation'] == gen]
mean = tdf_.num_chromosomes.mean()
means.append(mean)
plt.plot(x, means)
legend.append(settled_nation)
plt.ylabel('Number of Blocks')
plt.xlabel('Generation')
plt.legend(legend, title='avg_num_blocks_per_polymer')
plt.savefig(save_loc + '/length_avg.png')
plt.clf()
def plot_property_averages(df):
"""Plots property averages for all settled_nations and displays them separately."""
properties = ['Polymer_Coolness', 'Polymer_Funnyness','Polymer_Intelligence']
settled_nations = np.unique(df.settled_nation.values)
for settled_nation in settled_nations:
means = []
maxes = []
mins = []
stds = []
tdf = df.loc[df.settled_nation == settled_nation]
for prop in properties:
mean = []
std = []
max_ = []
min_ = []
for i in range(max(tdf.generation) + 1):
tdf_ = tdf.loc[tdf['generation'] == i]
mean.append(tdf_[prop].mean())
std.append(tdf_[prop].std())
max_.append(max(tdf_[prop]))
min_.append(min(tdf_[prop]))
means.append(mean)
stds.append(std)
maxes.append(max_)
mins.append(min_)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=[16,9])
x = np.linspace(0, max(tdf.generation), max(tdf.generation)+1)
x = [val + 1 for val in x]
row = 0
col = 0
curr = 0
for prop in properties:
max_ = maxes[curr]
min_ = mins[curr]
mean = means[curr]
std = stds[curr]
upper = [mean[i] + std[i] for i in range(len(mean))]
lower = [mean[i] - std[i] for i in range(len(mean))]
axes[row][col].fill_between(x, min_, max_, alpha=0.5,
color='darkblue'
)
axes[row][col].fill_between(x, lower, upper, alpha=0.5,
color='lightblue'
)
axes[row][col].plot(x, means[curr], lw=2, c='darkred')
axes[row][col].set_ylabel(prop)
axes[row][col].set_xlim([1,max(tdf.generation)+1])
axes[row][col].set_xscale("log")
curr += 1
col += 1
if col > 1:
col = 0
row += 1
fig.delaxes(axes[1][1])
fig.suptitle(settled_nation)
plt.savefig(save_loc + '/' + settled_nation + '_property_avgs.png')
plt.clf()
plot_property_averages(df)
plot_average_lengths(df)
| StarcoderdataPython |
11273444 | <filename>Alert Notification.py
# This is a simple application for alert system
from tkinter import *
from tkinter import messagebox
root = Tk()
root.geometry("200x200")
def message():
messagebox.showwarning("Alert Box", "Stop virus found")
but = Button(root, text="ok", command=Message)
but.place(x=100, y=100)
root.mainloop() | StarcoderdataPython |
3530262 | <gh_stars>0
from ensembling.ensemble_framework import Ensemble_Framework
from task_A.frameworks.base_framework import Base_Framework
from task_A.frameworks.base_framework_seq import Base_Framework_SEQ
from task_A.frameworks.bert_framework import BERT_Framework
from task_A.frameworks.bert_framework_for_veracity import BERT_Framework_for_veracity
from task_A.frameworks.bert_framework_with_f import BERT_Framework_with_f
from task_A.frameworks.bert_introspection_framework import BERT_Introspection_Framework
from task_A.frameworks.self_att_with_bert_tokenizing import SelfAtt_BertTokenizing_Framework
from task_A.frameworks.text_features_framework import Text_Feature_Framework
from task_A.frameworks.text_framework_branch import Text_Framework
from task_A.frameworks.text_framework_seq import Text_Framework_Seq
from task_A.models.baseline import Baseline
from task_A.models.baseline_lstm import Baseline_LSTM
from task_A.models.bert_with_features import BertModelForStanceClassificationWFeatures
from task_A.models.sel_att_and_baseline import SelfAttandBsline
from task_A.models.self_att_bert_tokenizer import SelfAttWithBertTokenizing
from task_A.models.self_attention_text_only import SelAttTextOnly, SelAttTextOnlyWithoutPrepInput
from task_A.models.text_BERT import BertModelForStanceClassification
from task_B.models.text_BERT_with_veracity import BertModelForVeracityClassification
__author__ = "<NAME>"
class SolutionA:
def __init__(self, config):
self.config = config
def create_model(self):
"""
Create and validate model
"""
modelf = None
fworkf = Base_Framework
if self.config["active_model"] == "baseline_LSTM":
# 204 804 params
modelf = Baseline_LSTM
elif self.config["active_model"] == "baseline":
modelf = Baseline
elif self.config["active_model"] == "selfatt_textonly":
modelf = SelAttTextOnly
fworkf = Text_Framework
elif self.config["active_model"] == "selfatt_textonly_seq":
modelf = SelAttTextOnlyWithoutPrepInput
fworkf = Text_Framework_Seq
elif self.config["active_model"] == "selfatt_text_and_baseline":
modelf = SelfAttandBsline
fworkf = Text_Feature_Framework
elif self.config["active_model"] == "BERT_textonly":
modelf = BertModelForStanceClassification
# fworkf = BERT_Framework_Hyperparamopt
fworkf = BERT_Framework
elif self.config["active_model"] == "features_seq":
modelf = Baseline
fworkf = Base_Framework_SEQ
elif self.config["active_model"] == "BERT_withf":
modelf = BertModelForStanceClassificationWFeatures
fworkf = BERT_Framework_with_f
# In fact, this was an useless experiment, since we have only ~300 source post classified for veracity
elif self.config["active_model"] == "BERT_veracity":
modelf = BertModelForVeracityClassification
fworkf = BERT_Framework_for_veracity
elif self.config["active_model"] == "self_att_with_bert_tokenizer":
modelf = SelfAttWithBertTokenizing
fworkf = SelfAtt_BertTokenizing_Framework
elif self.config["active_model"] == "ensemble":
modelf = BertModelForStanceClassification
fworkf = Ensemble_Framework
elif self.config["active_model"] == "BERT_introspection":
modelf = BertModelForStanceClassification
fworkf = BERT_Introspection_Framework
modelframework = fworkf(self.config["models"][self.config["active_model"]])
modelframework.fit(modelf)
def submit_model(self, model):
"""
Load model and run submission
"""
pass
| StarcoderdataPython |
9699176 | # -----------------------------------------------------------------------------
# Name: ObserverDBModels.py
# Purpose: ORM for observer, generated by pwiz via:
# python -m pwiz -e sqlite data/observer.db > ObserverORM_new.py
# then removed header info that is moved to ObserverDBBaseModel
# PEP-8 fix spacing via pycharm ctrl-shift-alt-L
# Modified to use APSW extensions
# Modified to temporarily add CatchAdditionalBaskets table. This table not yet sync'd with IFQ, but will.
#
# Author: <NAME> <<EMAIL>>
#
# Created: June 2016
# License: MIT
# ------------------------------------------------------------------------------
from peewee import *
from py.observer.ObserverDBBaseModel import BaseModel
class Species(BaseModel):
active = IntegerField(db_column='ACTIVE', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
bs_species = TextField(db_column='BS_SPECIES', null=True)
common_name = TextField(db_column='COMMON_NAME', null=True)
form_required = TextField(db_column='FORM_REQUIRED', null=True)
pacfin_code = TextField(db_column='PACFIN_CODE', null=True)
priority_species = TextField(db_column='PRIORITY_SPECIES', null=True)
race_code = TextField(db_column='RACE_CODE', null=True)
scientific_name = TextField(db_column='SCIENTIFIC_NAME')
species_category = TextField(db_column='SPECIES_CATEGORY', null=True)
species_code = TextField(db_column='SPECIES_CODE')
species = PrimaryKeyField(db_column='SPECIES_ID')
species_sub_category = TextField(db_column='SPECIES_SUB_CATEGORY', null=True)
class Meta:
db_table = 'SPECIES'
class CatchCategories(BaseModel):
active = IntegerField(db_column='ACTIVE', null=True)
catch_category_code = TextField(db_column='CATCH_CATEGORY_CODE')
catch_category = PrimaryKeyField(db_column='CATCH_CATEGORY_ID')
catch_category_name = TextField(db_column='CATCH_CATEGORY_NAME')
class Meta:
db_table = 'CATCH_CATEGORIES'
class Ports(BaseModel):
ifq_port_code = IntegerField(db_column='IFQ_PORT_CODE', null=True)
ifq_port = IntegerField(db_column='IFQ_PORT_ID', null=True)
port_code = TextField(db_column='PORT_CODE', null=True)
port_group = TextField(db_column='PORT_GROUP', null=True)
port = PrimaryKeyField(db_column='PORT_ID')
port_name = TextField(db_column='PORT_NAME', null=True)
state = TextField(db_column='STATE', null=True)
class Meta:
db_table = 'PORTS'
class Vessels(BaseModel):
coast_guard_number = TextField(db_column='COAST_GUARD_NUMBER', null=True)
is_mothership = TextField(db_column='IS_MOTHERSHIP', null=True)
notes = TextField(db_column='NOTES', null=True)
port = IntegerField(db_column='PORT_ID')
registered_length = FloatField(db_column='REGISTERED_LENGTH', null=True)
registered_length_um = TextField(db_column='REGISTERED_LENGTH_UM', null=True)
safety_decal_exp = TextField(db_column='SAFETY_DECAL_EXP', null=True)
state_reg_number = TextField(db_column='STATE_REG_NUMBER', null=True)
vessel = PrimaryKeyField(db_column='VESSEL_ID')
vessel_name = TextField(db_column='VESSEL_NAME')
vessel_status = TextField(db_column='VESSEL_STATUS', null=True)
vessel_type = TextField(db_column='VESSEL_TYPE', null=True)
class Meta:
db_table = 'VESSELS'
class Programs(BaseModel):
description = TextField(db_column='DESCRIPTION', null=True)
program = PrimaryKeyField(db_column='PROGRAM_ID')
program_name = TextField(db_column='PROGRAM_NAME')
class Meta:
db_table = 'PROGRAMS'
class IfqDealers(BaseModel):
active = IntegerField(db_column='ACTIVE', null=True)
agency = IntegerField(db_column='AGENCY_ID', null=True)
dealer_name = TextField(db_column='DEALER_NAME', null=True)
dealer_number = TextField(db_column='DEALER_NUMBER', null=True)
ifq_dealer = PrimaryKeyField(db_column='IFQ_DEALER_ID')
port_code = IntegerField(db_column='PORT_CODE', null=True)
receiver_code = TextField(db_column='RECEIVER_CODE', null=True)
receiver_number = IntegerField(db_column='RECEIVER_NUMBER', null=True)
class Meta:
db_table = 'IFQ_DEALERS'
class Users(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
first_name = TextField(db_column='FIRST_NAME')
last_name = TextField(db_column='LAST_NAME')
password = TextField(db_column='PASSWORD')
password_encrypted = FloatField(db_column='PASSWORD_ENCRYPTED', null=True)
password_expiration_date = TextField(db_column='PASSWORD_EXPIRATION_DATE', null=True)
species_expiration_date = TextField(db_column='SPECIES_EXPIRATION_DATE', null=True)
status = TextField(db_column='STATUS')
user = PrimaryKeyField(db_column='USER_ID')
class Meta:
db_table = 'USERS'
indexes = (
(('first_name', 'last_name'), True),
)
class Contacts(BaseModel):
address_line1 = TextField(db_column='ADDRESS_LINE1', null=True)
address_line2 = TextField(db_column='ADDRESS_LINE2', null=True)
birthdate = TextField(db_column='BIRTHDATE', null=True)
cell_phone = TextField(db_column='CELL_PHONE', null=True)
city = TextField(db_column='CITY', null=True)
contact_category = TextField(db_column='CONTACT_CATEGORY')
contact = PrimaryKeyField(db_column='CONTACT_ID')
contact_type = TextField(db_column='CONTACT_TYPE', null=True)
country = TextField(db_column='COUNTRY', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
epirb_serial_number = FloatField(db_column='EPIRB_SERIAL_NUMBER', null=True)
epirb_uin_2 = TextField(db_column='EPIRB_UIN_2', null=True)
eprib_uin = TextField(db_column='EPRIB_UIN', null=True)
first_name = TextField(db_column='FIRST_NAME', null=True)
home_email = TextField(db_column='HOME_EMAIL', null=True)
home_phone = TextField(db_column='HOME_PHONE', null=True)
last_name = TextField(db_column='LAST_NAME', null=True)
license_number = TextField(db_column='LICENSE_NUMBER', null=True)
modified_by = IntegerField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
notes = TextField(db_column='NOTES', null=True)
port = IntegerField(db_column='PORT_ID', null=True)
relationship = TextField(db_column='RELATIONSHIP', null=True)
state = TextField(db_column='STATE', null=True)
user = ForeignKeyField(db_column='USER_ID', null=True, rel_model=Users, to_field='user')
work_email = TextField(db_column='WORK_EMAIL', null=True)
work_phone = TextField(db_column='WORK_PHONE', null=True)
zip_code = TextField(db_column='ZIP_CODE', null=True)
class Meta:
db_table = 'CONTACTS'
class Trips(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
crew_size = IntegerField(db_column='CREW_SIZE', null=True)
data_quality = TextField(db_column='DATA_QUALITY', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
debriefing = IntegerField(db_column='DEBRIEFING_ID', null=True)
departure_date = TextField(db_column='DEPARTURE_DATE', null=True)
departure_port = ForeignKeyField(db_column='DEPARTURE_PORT_ID', null=True, rel_model=Ports, to_field='port')
do_expand = TextField(db_column='DO_EXPAND', null=True)
evaluation = IntegerField(db_column='EVALUATION_ID', null=True)
export = FloatField(db_column='EXPORT', null=True)
external_trip = IntegerField(db_column='EXTERNAL_TRIP_ID', null=True)
first_receiver = ForeignKeyField(db_column='FIRST_RECEIVER_ID', null=True, rel_model=IfqDealers,
to_field='ifq_dealer')
fishery = TextField(db_column='FISHERY', null=True)
fishing_days_count = IntegerField(db_column='FISHING_DAYS_COUNT', null=True)
fish_processed = TextField(db_column='FISH_PROCESSED', null=True)
logbook_number = IntegerField(db_column='LOGBOOK_NUMBER', null=True)
logbook_type = TextField(db_column='LOGBOOK_TYPE', null=True)
notes = TextField(db_column='NOTES', null=True)
observer_logbook = IntegerField(db_column='OBSERVER_LOGBOOK', null=True)
otc_kp = FloatField(db_column='OTC_KP', null=True)
partial_trip = TextField(db_column='PARTIAL_TRIP')
program = ForeignKeyField(db_column='PROGRAM_ID', rel_model=Programs, to_field='program')
return_date = TextField(db_column='RETURN_DATE', null=True)
return_port = ForeignKeyField(db_column='RETURN_PORT_ID', null=True, rel_model=Ports,
related_name='PORTS_return_port_set', to_field='port')
run_ter = TextField(db_column='RUN_TER', null=True)
skipper = ForeignKeyField(db_column='SKIPPER_ID', null=True, rel_model=Contacts, to_field='contact')
total_hooks_kp = FloatField(db_column='TOTAL_HOOKS_KP', null=True)
trip = PrimaryKeyField(db_column='TRIP_ID')
trip_status = TextField(db_column='TRIP_STATUS')
user = ForeignKeyField(db_column='USER_ID', rel_model=Users, to_field='user')
vessel = ForeignKeyField(db_column='VESSEL_ID', rel_model=Vessels, to_field='vessel')
is_fg_trip_local = IntegerField(db_column='IS_FG_TRIP_LOCAL', null=True)
class Meta:
db_table = 'TRIPS'
class FishingActivities(BaseModel):
avg_soak_time = TextField(db_column='AVG_SOAK_TIME', null=True)
beaufort_value = TextField(db_column='BEAUFORT_VALUE', null=True)
brd_present = TextField(db_column='BRD_PRESENT', null=True)
cal_weight = TextField(db_column='CAL_WEIGHT', null=True)
catch_count_kp = FloatField(db_column='CATCH_COUNT_KP', null=True)
catch_weight_kp = FloatField(db_column='CATCH_WEIGHT_KP', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_quality = TextField(db_column='DATA_QUALITY')
data_source = TextField(db_column='DATA_SOURCE', null=True)
density = FloatField(db_column='DENSITY', null=True)
density_um = TextField(db_column='DENSITY_UM', null=True)
deterrent_used = TextField(db_column='DETERRENT_USED', null=True)
efp = TextField(db_column='EFP', null=True)
fishing_activity = PrimaryKeyField(db_column='FISHING_ACTIVITY_ID')
fishing_activity_num = IntegerField(db_column='FISHING_ACTIVITY_NUM')
fit = TextField(db_column='FIT', null=True)
gear_performance = TextField(db_column='GEAR_PERFORMANCE', null=True)
gear_segments_lost = IntegerField(db_column='GEAR_SEGMENTS_LOST', null=True)
gear_type = TextField(db_column='GEAR_TYPE', null=True)
hooks_sampled_kp = FloatField(db_column='HOOKS_SAMPLED_KP', null=True)
notes = TextField(db_column='NOTES', null=True)
observer_total_catch = FloatField(db_column='OBSERVER_TOTAL_CATCH', null=True)
otc_weight_method = TextField(db_column='OTC_WEIGHT_METHOD', null=True)
otc_weight_um = TextField(db_column='OTC_WEIGHT_UM', null=True)
sample_count_kp = FloatField(db_column='SAMPLE_COUNT_KP', null=True)
sample_weight_kp = FloatField(db_column='SAMPLE_WEIGHT_KP', null=True)
target_strategy = ForeignKeyField(db_column='TARGET_STRATEGY_ID', null=True, rel_model=CatchCategories,
to_field='catch_category')
total_hooks = IntegerField(db_column='TOTAL_HOOKS', null=True)
total_hooks_unrounded = FloatField(db_column='TOTAL_HOOKS_UNROUNDED', null=True)
total_hooks_lost = IntegerField(db_column='TOTAL_HOOKS_LOST', null=True)
tot_gear_segments = IntegerField(db_column='TOT_GEAR_SEGMENTS', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
volume = FloatField(db_column='VOLUME', null=True)
volume_um = TextField(db_column='VOLUME_UM', null=True)
biolist_localonly = IntegerField(db_column='BIOLIST_LOCALONLY', null=True) # Local only for OPTECS FG
efp_localonly = IntegerField(db_column='EFP_LOCALONLY', null=True) # Local only for OPTECS FG
class Meta:
db_table = 'FISHING_ACTIVITIES'
class Catches(BaseModel):
addl_basket_weight_kp = FloatField(db_column='ADDL_BASKET_WEIGHT_KP', null=True)
basket_weight_count_kp = FloatField(db_column='BASKET_WEIGHT_COUNT_KP', null=True)
basket_weight_kp = FloatField(db_column='BASKET_WEIGHT_KP', null=True)
catch_category = ForeignKeyField(db_column='CATCH_CATEGORY_ID', rel_model=CatchCategories,
to_field='catch_category')
catch_count = IntegerField(db_column='CATCH_COUNT', null=True)
catch_disposition = TextField(db_column='CATCH_DISPOSITION')
catch = PrimaryKeyField(db_column='CATCH_ID')
catch_num = IntegerField(db_column='CATCH_NUM')
catch_purity = TextField(db_column='CATCH_PURITY', null=True)
catch_weight = FloatField(db_column='CATCH_WEIGHT', null=True)
catch_weight_method = TextField(db_column='CATCH_WEIGHT_METHOD', null=True)
catch_weight_um = TextField(db_column='CATCH_WEIGHT_UM', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
density = FloatField(db_column='DENSITY', null=True)
density_um = TextField(db_column='DENSITY_UM', null=True)
discard_reason = TextField(db_column='DISCARD_REASON', null=True)
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', rel_model=FishingActivities,
to_field='fishing_activity')
gear_segments_sampled = IntegerField(db_column='GEAR_SEGMENTS_SAMPLED', null=True)
hooks_sampled = IntegerField(db_column='HOOKS_SAMPLED', null=True)
hooks_sampled_unrounded = FloatField(db_column='HOOKS_SAMPLED_UNROUNDED', null=True)
notes = TextField(db_column='NOTES', null=True)
sample_count = IntegerField(db_column='SAMPLE_COUNT', null=True)
sample_weight = FloatField(db_column='SAMPLE_WEIGHT', null=True)
sample_weight_um = TextField(db_column='SAMPLE_WEIGHT_UM', null=True)
volume = FloatField(db_column='VOLUME', null=True)
volume_um = TextField(db_column='VOLUME_UM', null=True)
class Meta:
db_table = 'CATCHES'
class CatchAdditionalBaskets(BaseModel):
"""
All of OBSPROD.CATCH_ADDITIONAL_BASKETS fields except:
RESOLVED_*, MODIFIED_BY, MODIFIED_DATE
Primary Key: CATCH_ADDITIONAL_BASKETS_ID
Foreign Key: CATCH_ID
Non-nullable Fields (other than Keys):
BASKET_WEIGHT, CREATED_DATE, CREATED_BY
Created by hand from inspection of IFQADMIN DDL, but follows the Peewee convention
of omitting "_id" from the python names of key fields.
"""
catch_addtl_baskets = PrimaryKeyField(db_column='CATCH_ADDTL_BASKETS_ID')
catch = ForeignKeyField(db_column='CATCH_ID', rel_model=Catches, to_field='catch')
# The basket_weight field is not nullable in OBSPROD.
basket_weight = FloatField(db_column='BASKET_WEIGHT', null=False)
created_date = TextField(db_column='CREATED_DATE', null=False)
created_by = IntegerField(db_column='CREATED_BY', null=False)
data_source = TextField(db_column='DATA_SOURCE', null=True)
# N.B. Field BASKET_TYPE is not yet a field in OBSPROD as of April 2017.
# Request to add submitted.
basket_type = IntegerField(db_column='BASKET_TYPE', null=True)
class Meta:
db_table = 'CATCH_ADDITIONAL_BASKETS'
class BioSpecimens(BaseModel):
bio_specimen = PrimaryKeyField(db_column='BIO_SPECIMEN_ID')
catch = ForeignKeyField(db_column='CATCH_ID', rel_model=Catches, to_field='catch')
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
discard_reason = TextField(db_column='DISCARD_REASON', null=True)
frequency_kp = FloatField(db_column='FREQUENCY_KP', null=True)
lf_length_kp = FloatField(db_column='LF_LENGTH_KP', null=True)
notes = TextField(db_column='NOTES', null=True)
sample_method = TextField(db_column='SAMPLE_METHOD')
species = ForeignKeyField(db_column='SPECIES_ID', rel_model=Species, to_field='species')
specimen_length_kp = FloatField(db_column='SPECIMEN_LENGTH_KP', null=True)
specimen_weight_kp = FloatField(db_column='SPECIMEN_WEIGHT_KP', null=True)
class Meta:
db_table = 'BIO_SPECIMENS'
class BioSpecimenItems(BaseModel):
adipose_present = TextField(db_column='ADIPOSE_PRESENT', null=True)
band = TextField(db_column='BAND_ID', null=True)
bio_specimen = ForeignKeyField(db_column='BIO_SPECIMEN_ID', rel_model=BioSpecimens, to_field='bio_specimen')
bio_specimen_item = PrimaryKeyField(db_column='BIO_SPECIMEN_ITEM_ID')
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
maturity = TextField(db_column='MATURITY', null=True)
notes = TextField(db_column='NOTES', null=True)
specimen_length = FloatField(db_column='SPECIMEN_LENGTH', null=True)
specimen_length_um = TextField(db_column='SPECIMEN_LENGTH_UM', null=True)
specimen_sex = TextField(db_column='SPECIMEN_SEX', null=True)
specimen_weight = FloatField(db_column='SPECIMEN_WEIGHT', null=True)
specimen_weight_um = TextField(db_column='SPECIMEN_WEIGHT_UM', null=True)
viability = TextField(db_column='VIABILITY', null=True)
class Meta:
db_table = 'BIO_SPECIMEN_ITEMS'
class Brd(BaseModel):
brd = PrimaryKeyField(db_column='BRD_ID')
data_source = TextField(db_column='DATA_SOURCE', null=True)
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', null=True, rel_model=FishingActivities,
to_field='fishing_activity')
notes = TextField(db_column='NOTES', null=True)
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
class Meta:
db_table = 'BRD'
class BrdHaulsXref(BaseModel):
brd_haul = PrimaryKeyField(db_column='BRD_HAUL_ID')
brd = ForeignKeyField(db_column='BRD_ID', rel_model=Brd, to_field='brd')
data_source = TextField(db_column='DATA_SOURCE', null=True)
fishing_activity = IntegerField(db_column='FISHING_ACTIVITY_ID')
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
class Meta:
db_table = 'BRD_HAULS_XREF'
class BrdType(BaseModel):
brd_codend_mesh = TextField(db_column='BRD_CODEND_MESH', null=True)
brd = ForeignKeyField(db_column='BRD_ID', rel_model=Brd, to_field='brd')
brd_light_colors = TextField(db_column='BRD_LIGHT_COLORS', null=True)
brd_light_manufs = TextField(db_column='BRD_LIGHT_MANUFS', null=True)
brd_light_patterns = TextField(db_column='BRD_LIGHT_PATTERNS', null=True)
brd_locations = TextField(db_column='BRD_LOCATIONS', null=True)
brd_targets = TextField(db_column='BRD_TARGETS', null=True)
brd_type = TextField(db_column='BRD_TYPE', null=True)
brd_type_id = PrimaryKeyField(db_column='BRD_TYPE_ID')
data_source = TextField(db_column='DATA_SOURCE', null=True)
num_lights_port = FloatField(db_column='NUM_LIGHTS_PORT', null=True)
num_lights_single_rigged = FloatField(db_column='NUM_LIGHTS_SINGLE_RIGGED', null=True)
num_lights_starboard = FloatField(db_column='NUM_LIGHTS_STARBOARD', null=True)
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
class Meta:
db_table = 'BRD_TYPE'
class Comment(BaseModel):
appstateinfo = TextField(db_column='APPSTATEINFO', null=True)
comment = TextField(db_column='COMMENT', null=True)
comment_date = TextField(db_column='COMMENT_DATE', null=True)
comment_id = PrimaryKeyField(db_column='COMMENT_ID')
trip = ForeignKeyField(db_column='TRIP_ID', null=True, rel_model=Trips, to_field='trip')
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', null=True,
rel_model=FishingActivities, to_field='fishing_activity')
username = TextField(db_column='USERNAME', null=True)
class Meta:
db_table = 'COMMENT'
class Communications(BaseModel):
communication = FloatField(db_column='COMMUNICATION_ID', primary_key=True)
contact = FloatField(db_column='CONTACT_ID', null=True)
created_by = FloatField(db_column='CREATED_BY')
created_date = TextField(db_column='CREATED_DATE')
data_source = TextField(db_column='DATA_SOURCE', null=True)
modified_by = FloatField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
notes = TextField(db_column='NOTES', null=True)
note_category = TextField(db_column='NOTE_CATEGORY', null=True)
note_date = TextField(db_column='NOTE_DATE', null=True)
private_comment = TextField(db_column='PRIVATE_COMMENT', null=True)
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
user = FloatField(db_column='USER_ID', null=True)
vessel = FloatField(db_column='VESSEL_ID', null=True)
class Meta:
db_table = 'COMMUNICATIONS'
class DbSync(BaseModel):
db_sync = PrimaryKeyField(db_column='DB_SYNC_ID')
metadata = BlobField(db_column='METADATA', null=True)
status = IntegerField(db_column='STATUS', null=True)
sync_date = TextField(db_column='SYNC_DATE', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', null=True, rel_model=Trips, to_field='trip')
class Meta:
db_table = 'DB_SYNC'
class Debriefings(BaseModel):
debriefing_end = TextField(db_column='DEBRIEFING_END', null=True)
debriefing = PrimaryKeyField(db_column='DEBRIEFING_ID')
debriefing_start = TextField(db_column='DEBRIEFING_START', null=True)
evaluation_note = TextField(db_column='EVALUATION_NOTE', null=True)
notes = TextField(db_column='NOTES', null=True)
observer = IntegerField(db_column='OBSERVER_ID')
program = IntegerField(db_column='PROGRAM_ID')
class Meta:
db_table = 'DEBRIEFINGS'
class Dissections(BaseModel):
age = IntegerField(db_column='AGE', null=True)
age_date = TextField(db_column='AGE_DATE', null=True)
age_location = TextField(db_column='AGE_LOCATION', null=True)
age_method = TextField(db_column='AGE_METHOD', null=True)
age_reader = TextField(db_column='AGE_READER', null=True)
band = TextField(db_column='BAND_ID', null=True)
bio_specimen_item = ForeignKeyField(db_column='BIO_SPECIMEN_ITEM_ID', rel_model=BioSpecimenItems,
to_field='bio_specimen_item')
bs_result = TextField(db_column='BS_RESULT', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
cwt_code = TextField(db_column='CWT_CODE', null=True)
cwt_status = TextField(db_column='CWT_STATUS', null=True)
cwt_type = TextField(db_column='CWT_TYPE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
dissection_barcode = IntegerField(db_column='DISSECTION_BARCODE', null=True, unique=True)
dissection = PrimaryKeyField(db_column='DISSECTION_ID')
dissection_type = TextField(db_column='DISSECTION_TYPE')
rack = IntegerField(db_column='RACK_ID', null=True)
rack_position = TextField(db_column='RACK_POSITION', null=True)
class Meta:
db_table = 'DISSECTIONS'
class StratumGroups(BaseModel):
group = PrimaryKeyField(db_column='GROUP_ID')
group_type = TextField(db_column='GROUP_TYPE', null=True)
name = TextField(db_column='NAME', null=True)
class Meta:
db_table = 'STRATUM_GROUPS'
class FisheryStratumGroupsMtx(BaseModel):
fishery_lu = IntegerField(db_column='FISHERY_LU_ID', null=True)
group = ForeignKeyField(db_column='GROUP_ID', null=True, rel_model=StratumGroups, to_field='group')
id = PrimaryKeyField(db_column='ID')
class Meta:
db_table = 'FISHERY_STRATUM_GROUPS_MTX'
class FishingLocations(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
depth = FloatField(db_column='DEPTH')
depth_um = TextField(db_column='DEPTH_UM')
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', rel_model=FishingActivities,
to_field='fishing_activity')
fishing_location = PrimaryKeyField(db_column='FISHING_LOCATION_ID')
latitude = FloatField(db_column='LATITUDE')
location_date = TextField(db_column='LOCATION_DATE')
longitude = FloatField(db_column='LONGITUDE')
notes = TextField(db_column='NOTES', null=True)
position = IntegerField(db_column='POSITION', null=True)
class Meta:
db_table = 'FISHING_LOCATIONS'
indexes = (
(('fishing_activity', 'position'), True),
)
class FishTickets(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
fish_ticket_date = TextField(db_column='FISH_TICKET_DATE', null=True)
fish_ticket = PrimaryKeyField(db_column='FISH_TICKET_ID')
fish_ticket_number = TextField(db_column='FISH_TICKET_NUMBER')
state_agency = TextField(db_column='STATE_AGENCY', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
class Meta:
db_table = 'FISH_TICKETS'
indexes = (
(('trip', 'fish_ticket_number', 'fish_ticket_date', 'state_agency'), False),
)
class GeartypeStratumGroupMtx(BaseModel):
geartype_lu = IntegerField(db_column='GEARTYPE_LU_ID', null=True)
group = ForeignKeyField(db_column='GROUP_ID', null=True, rel_model=StratumGroups, to_field='group')
id = PrimaryKeyField(db_column='ID')
class Meta:
db_table = 'GEARTYPE_STRATUM_GROUP_MTX'
class Hlfc(BaseModel):
avg_aerial_extent = TextField(db_column='AVG_AERIAL_EXTENT', null=True)
avoidance_gear_used = TextField(db_column='AVOIDANCE_GEAR_USED', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', null=True, rel_model=FishingActivities,
to_field='fishing_activity')
floats_per_skate = FloatField(db_column='FLOATS_PER_SKATE', null=True)
floats_used = TextField(db_column='FLOATS_USED', null=True)
hlfc = PrimaryKeyField(db_column='HLFC_ID')
hooks_per_skate = FloatField(db_column='HOOKS_PER_SKATE', null=True)
horizontal_distance = TextField(db_column='HORIZONTAL_DISTANCE', null=True)
mass_per_weight = TextField(db_column='MASS_PER_WEIGHT', null=True)
mitigation_type = TextField(db_column='MITIGATION_TYPE', null=True)
notes = TextField(db_column='NOTES', null=True)
product_delivery = TextField(db_column='PRODUCT_DELIVERY', null=True)
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
speed = TextField(db_column='SPEED', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
weights_per_skate = TextField(db_column='WEIGHTS_PER_SKATE', null=True)
weights_used = TextField(db_column='WEIGHTS_USED', null=True)
class Meta:
db_table = 'HLFC'
class HlfcHaulsXref(BaseModel):
data_source = TextField(db_column='DATA_SOURCE', null=True)
fishing_activity = IntegerField(db_column='FISHING_ACTIVITY_ID')
hlfc_haul = PrimaryKeyField(db_column='HLFC_HAUL_ID')
hlfc = ForeignKeyField(db_column='HLFC_ID', rel_model=Hlfc, to_field='hlfc')
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
class Meta:
db_table = 'HLFC_HAULS_XREF'
class LengthFrequencies(BaseModel):
bio_specimen = ForeignKeyField(db_column='BIO_SPECIMEN_ID', rel_model=BioSpecimens, to_field='bio_specimen')
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
frequency = FloatField(db_column='FREQUENCY')
length_frequency = PrimaryKeyField(db_column='LENGTH_FREQUENCY_ID')
lf_length = FloatField(db_column='LF_LENGTH')
lf_length_um = TextField(db_column='LF_LENGTH_UM')
lf_sex = TextField(db_column='LF_SEX', null=True)
notes = TextField(db_column='NOTES', null=True)
class Meta:
db_table = 'LENGTH_FREQUENCIES'
class Lookups(BaseModel):
active = IntegerField(db_column='ACTIVE', null=True)
description = TextField(db_column='DESCRIPTION', null=True)
lookup = PrimaryKeyField(db_column='LOOKUP_ID')
lookup_type = TextField(db_column='LOOKUP_TYPE')
lookup_value = TextField(db_column='LOOKUP_VALUE')
program = IntegerField(db_column='PROGRAM_ID')
class Meta:
db_table = 'LOOKUPS'
class PasswordHistory(BaseModel):
created_by = IntegerField(db_column='CREATED_BY')
created_date = TextField(db_column='CREATED_DATE')
modified_by = IntegerField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
password = TextField(db_column='PASSWORD')
password_history = PrimaryKeyField(db_column='PASSWORD_HISTORY_ID')
user = IntegerField(db_column='USER_ID')
class Meta:
db_table = 'PASSWORD_HISTORY'
class Photos(BaseModel):
photo = PrimaryKeyField(db_column='PHOTO_ID')
class Meta:
db_table = 'PHOTOS'
class PrincipalInvestigatorLu(BaseModel):
email_address = TextField(db_column='EMAIL_ADDRESS', null=True)
full_name = TextField(db_column='FULL_NAME', null=True)
last_name = TextField(db_column='LAST_NAME', null=True)
organization = TextField(db_column='ORGANIZATION', null=True)
phone_number = TextField(db_column='PHONE_NUMBER', null=True)
principal_investigator = PrimaryKeyField(db_column='PRINCIPAL_INVESTIGATOR_ID')
program = TextField(db_column='PROGRAM', null=True)
class Meta:
db_table = 'PRINCIPAL_INVESTIGATOR_LU'
class Roles(BaseModel):
description = TextField(db_column='DESCRIPTION', null=True)
role = PrimaryKeyField(db_column='ROLE_ID')
role_name = TextField(db_column='ROLE_NAME', null=True)
class Meta:
db_table = 'ROLES'
class ProgramRoles(BaseModel):
program = ForeignKeyField(db_column='PROGRAM_ID', null=True, rel_model=Programs, to_field='program')
program_role = PrimaryKeyField(db_column='PROGRAM_ROLE_ID')
role = ForeignKeyField(db_column='ROLE_ID', null=True, rel_model=Roles, to_field='role')
class Meta:
db_table = 'PROGRAM_ROLES'
class ProgramStratumGroupMtx(BaseModel):
group = ForeignKeyField(db_column='GROUP_ID', null=True, rel_model=StratumGroups, to_field='group')
id = PrimaryKeyField(db_column='ID')
program = IntegerField(db_column='PROGRAM_ID', null=True)
class Meta:
db_table = 'PROGRAM_STRATUM_GROUP_MTX'
class ProtocolGroups(BaseModel):
group = PrimaryKeyField(db_column='GROUP_ID')
name = TextField(db_column='NAME', null=True, unique=True)
class Meta:
db_table = 'PROTOCOL_GROUPS'
class ProtocolGroupMtx(BaseModel):
group = ForeignKeyField(db_column='GROUP_ID', null=True, rel_model=ProtocolGroups, to_field='group')
id = PrimaryKeyField(db_column='ID')
protocol_lu = IntegerField(db_column='PROTOCOL_LU_ID', null=True)
class Meta:
db_table = 'PROTOCOL_GROUP_MTX'
class Settings(BaseModel):
is_active = TextField(db_column='IS_ACTIVE', null=True)
parameter = TextField(db_column='PARAMETER', null=True)
settings = PrimaryKeyField(db_column='SETTINGS_ID')
value = TextField(db_column='VALUE', null=True)
class Meta:
db_table = 'SETTINGS'
class SpeciesCatchCategories(BaseModel):
catch_category = ForeignKeyField(db_column='CATCH_CATEGORY_ID', rel_model=CatchCategories,
to_field='catch_category')
created_by = IntegerField(db_column='CREATED_BY')
created_date = TextField(db_column='CREATED_DATE')
modified_by = IntegerField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
species_catch_category = PrimaryKeyField(db_column='SPECIES_CATCH_CATEGORY_ID')
species = ForeignKeyField(db_column='SPECIES_ID', rel_model=Species, to_field='species')
class Meta:
db_table = 'SPECIES_CATCH_CATEGORIES'
indexes = (
(('species', 'catch_category'), True),
)
class SpeciesCompositions(BaseModel):
basket_number = IntegerField(db_column='BASKET_NUMBER', null=True)
catch = ForeignKeyField(db_column='CATCH_ID', rel_model=Catches, to_field='catch', unique=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_quality = TextField(db_column='DATA_QUALITY')
data_source = TextField(db_column='DATA_SOURCE', null=True)
notes = TextField(db_column='NOTES', null=True)
sample_method = TextField(db_column='SAMPLE_METHOD')
species_composition = PrimaryKeyField(db_column='SPECIES_COMPOSITION_ID')
species_number_kp = FloatField(db_column='SPECIES_NUMBER_KP', null=True)
species_weight_kp = FloatField(db_column='SPECIES_WEIGHT_KP', null=True)
class Meta:
db_table = 'SPECIES_COMPOSITIONS'
class SpeciesCompositionItems(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
discard_reason = TextField(db_column='DISCARD_REASON', null=True)
handling = TextField(db_column='HANDLING', null=True)
notes = TextField(db_column='NOTES', null=True)
species_composition = ForeignKeyField(db_column='SPECIES_COMPOSITION_ID', rel_model=SpeciesCompositions,
to_field='species_composition')
species_comp_item = PrimaryKeyField(db_column='SPECIES_COMP_ITEM_ID')
species = ForeignKeyField(db_column='SPECIES_ID', rel_model=Species, to_field='species')
species_number = IntegerField(db_column='SPECIES_NUMBER', null=True)
species_weight = FloatField(db_column='SPECIES_WEIGHT', null=True)
species_weight_um = TextField(db_column='SPECIES_WEIGHT_UM', null=True)
total_tally = IntegerField(db_column='TOTAL_TALLY', null=True)
extrapolated_species_weight = FloatField(db_column='EXTRAPOLATED_SPECIES_WEIGHT', null=True)
class Meta:
db_table = 'SPECIES_COMPOSITION_ITEMS'
class SpeciesCompositionBaskets(BaseModel):
basket_weight_itq = FloatField(db_column='BASKET_WEIGHT_ITQ', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
fish_number_itq = IntegerField(db_column='FISH_NUMBER_ITQ', null=True)
species_comp_basket = PrimaryKeyField(db_column='SPECIES_COMP_BASKET_ID')
species_comp_item = ForeignKeyField(db_column='SPECIES_COMP_ITEM_ID', rel_model=SpeciesCompositionItems,
to_field='species_comp_item')
is_fg_tally_local = IntegerField(db_column='IS_FG_TALLY_LOCAL', null=True)
is_subsample = IntegerField(db_column='IS_SUBSAMPLE', null=True)
class Meta:
db_table = 'SPECIES_COMPOSITION_BASKETS'
class SpeciesCorrelation(BaseModel):
length = FloatField(db_column='LENGTH', null=True)
species_correlation = PrimaryKeyField(db_column='SPECIES_CORRELATION_ID')
species = IntegerField(db_column='SPECIES_ID', null=True)
weight = FloatField(db_column='WEIGHT', null=True)
class Meta:
db_table = 'SPECIES_CORRELATION'
class SpeciesIdentifications(BaseModel):
data_source = TextField(db_column='DATA_SOURCE', null=True)
identification_date = TextField(db_column='IDENTIFICATION_DATE')
observer = IntegerField(db_column='OBSERVER_ID')
species = ForeignKeyField(db_column='SPECIES_ID', rel_model=Species, to_field='species')
species_ident = PrimaryKeyField(db_column='SPECIES_IDENT_ID')
class Meta:
db_table = 'SPECIES_IDENTIFICATIONS'
class SpeciesSightings(BaseModel):
approach_distance = FloatField(db_column='APPROACH_DISTANCE', null=True)
approach_distance_um = TextField(db_column='APPROACH_DISTANCE_UM', null=True)
beaufort_value = TextField(db_column='BEAUFORT_VALUE', null=True)
body_length = TextField(db_column='BODY_LENGTH', null=True)
confidence = TextField(db_column='CONFIDENCE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
interaction_behaviors = TextField(db_column='INTERACTION_BEHAVIORS', null=True)
interaction_outcome = FloatField(db_column='INTERACTION_OUTCOME', null=True)
notes = TextField(db_column='NOTES', null=True)
sighting_condition = TextField(db_column='SIGHTING_CONDITION', null=True)
sighting_date = TextField(db_column='SIGHTING_DATE')
sighting_latitude = FloatField(db_column='SIGHTING_LATITUDE')
sighting_longitude = FloatField(db_column='SIGHTING_LONGITUDE')
species_best_number = FloatField(db_column='SPECIES_BEST_NUMBER', null=True)
species = IntegerField(db_column='SPECIES_ID')
species_max_number = FloatField(db_column='SPECIES_MAX_NUMBER', null=True)
species_min_number = FloatField(db_column='SPECIES_MIN_NUMBER', null=True)
species_sighting = PrimaryKeyField(db_column='SPECIES_SIGHTING_ID')
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
water_temperature = FloatField(db_column='WATER_TEMPERATURE', null=True)
water_temperature_um = TextField(db_column='WATER_TEMPERATURE_UM', null=True)
class Meta:
db_table = 'SPECIES_SIGHTINGS'
class SpeciesInteractions(BaseModel):
data_source = TextField(db_column='DATA_SOURCE', null=True)
species_interaction = PrimaryKeyField(db_column='SPECIES_INTERACTION_ID')
species_interaction_type = TextField(db_column='SPECIES_INTERACTION_TYPE')
species_sighting = ForeignKeyField(db_column='SPECIES_SIGHTING_ID', rel_model=SpeciesSightings,
to_field='species_sighting')
class Meta:
db_table = 'SPECIES_INTERACTIONS'
class SpeciesInteractionHaulsXref(BaseModel):
data_source = TextField(db_column='DATA_SOURCE', null=True)
fishing_activity = ForeignKeyField(db_column='FISHING_ACTIVITY_ID', rel_model=FishingActivities,
to_field='fishing_activity')
row_processed = FloatField(db_column='ROW_PROCESSED', null=True)
row_status = TextField(db_column='ROW_STATUS', null=True)
si_haul = PrimaryKeyField(db_column='SI_HAUL_ID')
species_sighting = ForeignKeyField(db_column='SPECIES_SIGHTING_ID', rel_model=SpeciesSightings,
to_field='species_sighting')
class Meta:
db_table = 'SPECIES_INTERACTION_HAULS_XREF'
class StratumLu(BaseModel):
disposition = TextField(db_column='DISPOSITION', null=True)
fishery_group = ForeignKeyField(db_column='FISHERY_GROUP_ID', null=True, rel_model=StratumGroups, to_field='group')
gear_type_group = ForeignKeyField(db_column='GEAR_TYPE_GROUP_ID', null=True, rel_model=StratumGroups,
related_name='STRATUM_GROUPS_gear_type_group_set', to_field='group')
name = TextField(db_column='NAME', null=True)
program_group = ForeignKeyField(db_column='PROGRAM_GROUP_ID', null=True, rel_model=StratumGroups,
related_name='STRATUM_GROUPS_program_group_set', to_field='group')
range_max = FloatField(db_column='RANGE_MAX', null=True)
range_min = FloatField(db_column='RANGE_MIN', null=True)
range_units = TextField(db_column='RANGE_UNITS', null=True)
stratum = PrimaryKeyField(db_column='STRATUM_ID')
stratum_subtype = TextField(db_column='STRATUM_SUBTYPE', null=True)
stratum_type = IntegerField(db_column='STRATUM_TYPE_ID', null=True)
value = TextField(db_column='VALUE', null=True)
class Meta:
db_table = 'STRATUM_LU'
class SpeciesSamplingPlanLu(BaseModel):
biosample_assignment_lu = IntegerField(db_column='BIOSAMPLE_ASSIGNMENT_LU_ID', null=True)
biosample_list_lu = ForeignKeyField(db_column='BIOSAMPLE_LIST_LU_ID', null=True,
rel_model=StratumGroups, to_field='group')
count = IntegerField(db_column='COUNT', null=True)
display_name = TextField(db_column='DISPLAY_NAME', null=True)
disposition = TextField(db_column='DISPOSITION', null=True)
parent_species_sampling_plan = ForeignKeyField(db_column='PARENT_SPECIES_SAMPLING_PLAN_ID', null=True,
rel_model='self', to_field='species_sampling_plan')
plan_name = TextField(db_column='PLAN_NAME', null=True)
principal_investigator = ForeignKeyField(db_column='PRINCIPAL_INVESTIGATOR_ID', null=True,
rel_model=PrincipalInvestigatorLu, to_field='principal_investigator')
protocol_group = ForeignKeyField(db_column='PROTOCOL_GROUP_ID', null=True, rel_model=ProtocolGroups,
to_field='group')
species = ForeignKeyField(db_column='SPECIES_ID', null=True, rel_model=Species, to_field='species')
species_sampling_plan = PrimaryKeyField(db_column='SPECIES_SAMPLING_PLAN_ID')
stratum = ForeignKeyField(db_column='STRATUM_ID', null=True, rel_model=StratumLu, to_field='stratum')
weight_method_lu = IntegerField(db_column='WEIGHT_METHOD_LU_ID', null=True)
class Meta:
db_table = 'SPECIES_SAMPLING_PLAN_LU'
class TripCertificates(BaseModel):
certificate_number = TextField(db_column='CERTIFICATE_NUMBER')
certification = IntegerField(db_column='CERTIFICATION_ID', null=True)
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
data_source = TextField(db_column='DATA_SOURCE', null=True)
trip_certificate = PrimaryKeyField(db_column='TRIP_CERTIFICATE_ID')
trip = ForeignKeyField(db_column='TRIP_ID', rel_model=Trips, to_field='trip')
class Meta:
db_table = 'TRIP_CERTIFICATES'
indexes = (
(('trip', 'certificate_number'), False),
)
class TripCheckGroups(BaseModel):
column_list = TextField(db_column='COLUMN_LIST', null=True)
created_by = IntegerField(db_column='CREATED_BY')
created_date = TextField(db_column='CREATED_DATE')
description = TextField(db_column='DESCRIPTION', null=True)
modified_by = IntegerField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
status = IntegerField(db_column='STATUS')
trip_check_group = PrimaryKeyField(db_column='TRIP_CHECK_GROUP_ID')
trip_check_group_name = TextField(db_column='TRIP_CHECK_GROUP_NAME', null=True, unique=True)
class Meta:
db_table = 'TRIP_CHECK_GROUPS'
class TripChecks(BaseModel):
allow_ack = TextField(db_column='ALLOW_ACK')
check_code = IntegerField(db_column='CHECK_CODE', unique=True)
check_description = TextField(db_column='CHECK_DESCRIPTION', null=True)
check_message = TextField(db_column='CHECK_MESSAGE')
check_module = TextField(db_column='CHECK_MODULE', null=True)
check_sql = TextField(db_column='CHECK_SQL')
check_type = TextField(db_column='CHECK_TYPE')
created_by = IntegerField(db_column='CREATED_BY')
created_date = TextField(db_column='CREATED_DATE')
fishery = IntegerField(db_column='FISHERY_ID', null=True)
fixed_gear_type = IntegerField(db_column='FIXED_GEAR_TYPE', null=True)
modified_by = IntegerField(db_column='MODIFIED_BY', null=True)
modified_date = TextField(db_column='MODIFIED_DATE', null=True)
program = IntegerField(db_column='PROGRAM_ID', null=True)
status = IntegerField(db_column='STATUS')
status_optecs = IntegerField(db_column='STATUS_OPTECS', default=1, null=False) # FIELD-2100: to disable in optecs
testing_notes = TextField(db_column='TESTING_NOTES', null=True)
testing_status = TextField(db_column='TESTING_STATUS', null=True)
trawl_gear_type = IntegerField(db_column='TRAWL_GEAR_TYPE', null=True)
trip_check_group = ForeignKeyField(db_column='TRIP_CHECK_GROUP_ID', rel_model=TripCheckGroups,
to_field='trip_check_group')
trip_check = PrimaryKeyField(db_column='TRIP_CHECK_ID')
value_column = TextField(db_column='VALUE_COLUMN', null=True)
debriefer_only = IntegerField(db_column='DEBRIEFER_ONLY', default=0, null=False) # FIELD-2101: for TER filtering
class Meta:
db_table = 'TRIP_CHECKS'
class TripErrors(BaseModel):
bio_specimen = IntegerField(db_column='BIO_SPECIMEN_ID', null=True)
bio_specimen_item = FloatField(db_column='BIO_SPECIMEN_ITEM_ID', null=True)
catch = IntegerField(db_column='CATCH_ID', null=True)
catch_num = FloatField(db_column='CATCH_NUM', null=True)
dissection = FloatField(db_column='DISSECTION_ID', null=True)
error_code = FloatField(db_column='ERROR_CODE', null=True)
error_item = TextField(db_column='ERROR_ITEM', null=True)
error_message = TextField(db_column='ERROR_MESSAGE', null=True)
error_type = TextField(db_column='ERROR_TYPE', null=True)
error_value = TextField(db_column='ERROR_VALUE', null=True)
fishing_activity = IntegerField(db_column='FISHING_ACTIVITY_ID', null=True)
fishing_activity_num = FloatField(db_column='FISHING_ACTIVITY_NUM', null=True)
fishing_location = TextField(db_column='FISHING_LOCATION', null=True)
fishing_location_id = IntegerField(db_column='FISHING_LOCATION_ID', null=True)
length_frequency = FloatField(db_column='LENGTH_FREQUENCY_ID', null=True)
process = IntegerField(db_column='PROCESS_ID', null=True)
species_composition = IntegerField(db_column='SPECIES_COMPOSITION_ID', null=True)
species_comp_item = IntegerField(db_column='SPECIES_COMP_ITEM_ID', null=True)
species_name = TextField(db_column='SPECIES_NAME', null=True)
trip_errors = PrimaryKeyField(db_column='TRIP_ERRORS_ID')
trip = IntegerField(db_column='TRIP_ID', null=True)
class Meta:
db_table = 'TRIP_ERRORS'
class UserProgramRoles(BaseModel):
created_by = IntegerField(db_column='CREATED_BY', null=True)
created_date = TextField(db_column='CREATED_DATE', null=True)
program_role = ForeignKeyField(db_column='PROGRAM_ROLE_ID', rel_model=ProgramRoles, to_field='program_role')
user = ForeignKeyField(db_column='USER_ID', rel_model=Users, to_field='user')
user_program_role = PrimaryKeyField(db_column='USER_PROGRAM_ROLE_ID')
class Meta:
db_table = 'USER_PROGRAM_ROLES'
indexes = (
(('user', 'program_role'), True),
)
class VesselContacts(BaseModel):
contact = ForeignKeyField(db_column='CONTACT_ID', rel_model=Contacts, to_field='contact')
contact_status = TextField(db_column='CONTACT_STATUS', null=True)
contact_type = TextField(db_column='CONTACT_TYPE', null=True)
vessel_contact = PrimaryKeyField(db_column='VESSEL_CONTACT_ID')
vessel = ForeignKeyField(db_column='VESSEL_ID', rel_model=Vessels, to_field='vessel')
class Meta:
db_table = 'VESSEL_CONTACTS'
class HookCounts(BaseModel):
hook_count_id = PrimaryKeyField(db_column='HOOK_COUNT_ID')
avg_hook_count = FloatField(db_column='AVG_HOOK_COUNT', null=True)
hook_count = IntegerField(db_column='HOOK_COUNT', null=True)
total_gear_units = IntegerField(db_column='TOTAL_GEAR_UNITS', null=True)
trip = ForeignKeyField(db_column='TRIP_ID', to_field='trip', rel_model=Trips)
class Meta:
db_table = 'HOOK_COUNTS' | StarcoderdataPython |
8110932 | """Copyright 2009 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import fnmatch
import os
import urllib
from django.conf import settings
from graphite.compat import HttpResponse, HttpResponseBadRequest
from graphite.util import getProfile, json
from graphite.logger import log
from graphite.readers import RRDReader
from graphite.storage import STORE
from graphite.carbonlink import CarbonLink
try:
import cPickle as pickle
except ImportError:
import pickle
def index_json(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
jsonp = queryParams.get('jsonp', False)
cluster = queryParams.get('cluster', False)
def find_matches():
matches = []
for root, dirs, files in os.walk(settings.WHISPER_DIR):
root = root.replace(settings.WHISPER_DIR, '')
for basename in files:
if fnmatch.fnmatch(basename, '*.wsp'):
matches.append(os.path.join(root, basename))
for root, dirs, files in os.walk(settings.CERES_DIR):
root = root.replace(settings.CERES_DIR, '')
for filename in files:
if filename == '.ceres-node':
matches.append(root)
# unlike 0.9.x, we're going to use os.walk with followlinks
# since we require Python 2.7 and newer that supports it
if RRDReader.supported:
for root, dirs, files in os.walk(settings.RRD_DIR, followlinks=True):
root = root.replace(settings.RRD_DIR, '')
for basename in files:
if fnmatch.fnmatch(basename, '*.rrd'):
absolute_path = os.path.join(settings.RRD_DIR, root, basename)
(basename,extension) = os.path.splitext(basename)
metric_path = os.path.join(root, basename)
rrd = RRDReader(absolute_path, metric_path)
for datasource_name in rrd.get_datasources(absolute_path):
matches.append(os.path.join(metric_path, datasource_name))
matches = [
m
.replace('.wsp', '')
.replace('.rrd', '')
.replace('/', '.')
.lstrip('.')
for m in sorted(matches)
]
return matches
matches = []
if cluster and len(settings.CLUSTER_SERVERS) >= 1:
try:
matches = reduce( lambda x, y: list(set(x + y)), \
[json.loads(urllib.urlopen('http://' + cluster_server + '/metrics/index.json').read()) \
for cluster_server in settings.CLUSTER_SERVERS])
except urllib.URLError:
log.exception()
return json_response_for(request, matches, jsonp=jsonp, status=500)
else:
matches = find_matches()
return json_response_for(request, matches, jsonp=jsonp)
def find_view(request):
"View for finding metrics matching a given pattern"
profile = getProfile(request)
queryParams = request.GET.copy()
queryParams.update(request.POST)
format = queryParams.get('format', 'treejson')
local_only = int( queryParams.get('local', 0) )
wildcards = int( queryParams.get('wildcards', 0) )
fromTime = int( queryParams.get('from', -1) )
untilTime = int( queryParams.get('until', -1) )
nodePosition = int( queryParams.get('position', -1) )
jsonp = queryParams.get('jsonp', False)
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = int( queryParams.get('automatic_variants', 0) )
try:
query = str( queryParams['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'",
content_type='text/plain')
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list( STORE.find(query, fromTime, untilTime, local=local_only) )
except:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'nodelist':
content = nodes_by_position(matches, nodePosition)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, content_type='application/pickle')
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)
else:
return HttpResponseBadRequest(
content="Invalid value for 'format' parameter",
content_type='text/plain')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
queryParams = request.GET.copy()
queryParams.update(request.POST)
local_only = int( queryParams.get('local', 0) )
group_by_expr = int( queryParams.get('groupByExpr', 0) )
leaves_only = int( queryParams.get('leavesOnly', 0) )
jsonp = queryParams.get('jsonp', False)
results = {}
for query in queryParams.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = json_response_for(request, result, jsonp=jsonp)
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
key = queryParams.get('key')
metrics = queryParams.getlist('metric')
jsonp = queryParams.get('jsonp', False)
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return json_response_for(request, results, jsonp=jsonp)
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.body )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error='Invalid request method')
return json_response_for(request, results)
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : urllib.unquote_plus(str(node.name)),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return results
def nodes_by_position(matches, position):
found = set()
for metric in matches:
nodes = metric.path.split('.')
found.add(nodes[position])
results = { 'nodes' : sorted(found) }
return results
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def json_response_for(request, data, content_type='application/json',
jsonp=False, **kwargs):
accept = request.META.get('HTTP_ACCEPT', 'application/json')
ensure_ascii = accept == 'application/json'
content = json.dumps(data, ensure_ascii=ensure_ascii)
if jsonp:
content = "%s(%s)" % (jsonp, content)
content_type = 'text/javascript'
if not ensure_ascii:
content_type += ';charset=utf-8'
return HttpResponse(content, content_type=content_type, **kwargs)
| StarcoderdataPython |
3560101 | class Node:
def __init__(self, order, pokemon):
self.order = order
self.values = []
self.keys = []
self.nextKey = None
self.parent = None
self.check_leaf = False
self.pokemon = pokemon
# Insert at the leaf
def insert_at_leaf(self, leaf, value, key):
if (self.values):
temp1 = self.values
for i in range(len(temp1)):
if (value == temp1[i]):
self.keys[i].append(key)
break
elif (value < temp1[i]):
self.values = self.values[:i] + [value] + self.values[i:]
self.keys = self.keys[:i] + [[key]] + self.keys[i:]
break
elif (i + 1 == len(temp1)):
self.values.append(value)
self.keys.append([key])
break
else:
self.values = [value]
self.keys = [[key]] | StarcoderdataPython |
3496551 | from unittest import TestCase
import pytest
from collections import defaultdict
from .coarser import coarse_local
class CoarserTest(TestCase):
def test_coarse_local_extra_seat(self):
community = {
0: [0, 5, 10],
1: [1, 2, 3, 4, 6, 7],
2: [8, 9]
}
table_size = 3
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
# there appears to be an additional seat since
# 1 of the nodes only contains 2 persons
self.assertTrue(0 in coarse_to_original.keys())
self.assertTrue(8 in coarse_to_original.keys())
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_local_simple(self):
community = {
0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 8],
2: [9, 10, 11]
}
table_size = 3
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
self.assertTrue(0 in coarse_to_original.keys())
self.assertTrue(9 in coarse_to_original.keys())
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_local_does_not_remove_multiple_connected_person(self):
community = {
0: [0, 1, 2],
1: [3, 4, 5, 6, 7, 2],
2: [9, 10, 2]
}
table_size = 3
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
# that 2 is still present in each clique
for key, values in new_community.items():
self.assertTrue(2 in values)
self.assertTrue(0 in coarse_to_original.keys())
self.assertTrue(9 in coarse_to_original.keys())
self.assertEqual(coarse_to_original[2], [2])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_local_handles_single_person_in_node(self):
community = {
0: [0, 1, 2, 3]
}
table_size = 3
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
self.assertTrue(0 in coarse_to_original.keys())
self.assertTrue(3 in coarse_to_original.keys())
self.assertEqual(coarse_to_original[3], [3])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_local_handles_nodes_connected_to_persons(self):
community = {
0: [0, 1, 2, 3],
1: [0, 1, 2, 4],
2: [0, 1, 2, 5],
3: [4, 7, 8, 9],
4: [5, 7, 8, 9],
5: [6, 7, 8, 9]
}
table_size = 5
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
print(new_community)
print(coarse_to_original)
self.assertTrue(0 in coarse_to_original.keys())
self.assertTrue(7 in coarse_to_original.keys())
self.assertEqual(coarse_to_original[3], [3])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_local_handles_repeated_occurence_but_not_always(self):
community = {
0: [0, 1, 2, 3, 5],
1: [0, 1, 2, 4, 5],
3: [4, 7, 8, 9],
4: [5, 7, 8, 9],
5: [5, 6, 7, 8, 10]
}
table_size = 5
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
print(new_community)
print(coarse_to_original)
print(new_table_sz)
print(presolved_facts)
# self.assertCountEqual(new_table_sz, [4, 5, 5])
self.assertTrue(0 in coarse_to_original.keys())
self.assertEquals([0, 1, 2], coarse_to_original[0])
self.assertTrue(7 in coarse_to_original.keys())
self.assertEquals([7, 8], coarse_to_original[7])
self.assertEqual(coarse_to_original[3], [3])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
# self.assertTrue(('in_table', 3, 0) in presolved_facts)
# TODO is this wrong?
# self.assertTrue(('in_table', 6, 0) in presolved_facts)
def test_coarse_break_up_clique_if_cannot_fit(self):
community = {
0: [0, 1, 2, 3],
1: [2, 3, 4, 5]
}
table_size = 3
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size)
print(new_community)
print(coarse_to_original)
self.assertEquals([0, 1], coarse_to_original[0])
self.assertEquals([2, 3], coarse_to_original[2])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_ignore_cliques_with_weight_eq_zero(self):
community = {
0: [0, 1, 2, 3],
1: [2, 3, 4, 5]
}
table_size = 3
clique_weights = {
0: 1,
1: 0
}
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size, clique_weights)
print(new_community)
print(coarse_to_original)
self.assertEquals([0, 1], coarse_to_original[0])
self.assertEquals([2], coarse_to_original[2])
self.assertEquals([3], coarse_to_original[3])
self.assertEquals([4], coarse_to_original[4])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_ignore_cliques_with_weight_lt_zero(self):
community = {
0: [0, 1, 2, 3],
1: [2, 3, 4, 5]
}
table_size = 3
clique_weights = {
0: 1,
1: -1
}
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size, clique_weights)
print(new_community)
print(coarse_to_original)
self.assertEquals([0, 1], coarse_to_original[0])
self.assertEquals([2], coarse_to_original[2])
self.assertEquals([3], coarse_to_original[3])
self.assertEquals([4], coarse_to_original[4])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_doesnt_leave_stray_member(self):
community = {
0: [0, 1, 2, 3],
1: [2, 3, 4]
}
table_size = 3
clique_weights = {
0: 1,
1: 1
}
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size, clique_weights)
print(new_community)
print(coarse_to_original)
self.assertEquals([0, 1], coarse_to_original[0])
self.assertEquals([2], coarse_to_original[2])
self.assertEquals([3], coarse_to_original[3])
self.assertEquals([4], coarse_to_original[4])
self.assertEqual(community.keys(), new_community.keys(), 'communities don''t change')
def test_coarse_doesnt_add_one_member_to_multiple(self):
community = {"2": [1, 2, 3, 4, 5, 6, 7, 1, 8, 9, 10], "3": [11],
"7": [7, 1, 12],
"8": [7, 1, 1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31],
"9": [32, 33, 34, 35, 33, 36, 1, 37], "10": [38, 39, 40, 1, 41], "11": [7, 38, 39, 1, 1, 40, 42],
"12": [1, 1, 13, 15, 18, 19, 20, 22, 23, 24, 25, 28, 30, 43, 21, 44, 45, 24, 46, 13, 29, 47, 14, 48],
"13": [32, 34, 35, 33, 36, 1, 37, 16, 49, 50, 1, 33], "14": [33, 1, 51],
"15": [33, 36, 1, 1, 51, 52],
"16": [1, 1, 13, 15, 16, 17, 19, 22, 24, 44, 45, 24, 46, 13, 14, 48, 53, 54, 14, 55, 56, 45, 57, 17, 46],
"17": [7, 1, 28, 24, 46, 13, 14, 54, 56, 45, 57, 58, 59, 60, 57, 61, 62, 63, 48, 64, 65, 23, 66, 67, 46],
"18": [7, 17, 22, 23, 44, 24, 46, 48, 53, 45, 64]}
table_size = 10
clique_weights = defaultdict(lambda : 1)
new_table_sz, new_community, coarse_to_original, presolved_facts = \
coarse_local(community, table_size, clique_weights)
print(new_community)
print(coarse_to_original)
number_of_times_each_person_appears = defaultdict(lambda : 0)
for node, members in coarse_to_original.items():
for member in members:
number_of_times_each_person_appears[member] += 1
members_appearing_more_than_once = []
members_appearing_less_then_once = []
for member, count in number_of_times_each_person_appears.items():
if count > 1:
members_appearing_more_than_once.append(member)
elif count < 1:
members_appearing_less_then_once.append(member)
self.assertEqual(len(members_appearing_more_than_once), 0)
self.assertEqual(len(members_appearing_less_then_once), 0) | StarcoderdataPython |
3537112 | <filename>pipeline/cf_init_df/main.py
"""
Cloud Function which triggers Dataflow job when file is uploaded to Cloud Storage bucket
"""
import os
import json
import datetime
import logging
from googleapiclient.discovery import build
import google.auth
STAGE=os.environ['STAGE']
GCP_PROJECT = os.environ['GCP_PROJECT']
DF_TEMPLATE = os.environ['DF_TEMPLATE']
GCS_BUCKET = os.environ['GCS_BUCKET']
BQ_TARGET_DATASET = os.environ['BQ_TARGET_DATASET']
def init_df(bucket: str, input_filename: str):
"""Launching Dataflow job based on template
:param bucket: name of bucket (without gs://)
:param input_filename: name of object (file) in bucket, expected format is for example planet-latest-lines.geojson.csv
"""
input_path = f'gs://{bucket}/{input_filename}'
bq_table = input_filename.split('.')[0].split('-')[-1]
job_name = '{}-{}_processing_{}'.format(STAGE, bq_table, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
bq_path = f"{GCP_PROJECT}:{BQ_TARGET_DATASET}.{bq_table}"
body = {
"jobName": f"{job_name}",
"parameters": {
"input": f"{input_path}",
"bq_destination": bq_path,
},
"environment": {
"tempLocation": f"{GCS_BUCKET}/df_temp",
},
}
credentials, _project = google.auth.default()
service = build('dataflow', 'v1b3', credentials=credentials, cache_discovery=False)
request = service.projects().templates().launch(projectId=GCP_PROJECT, gcsPath=DF_TEMPLATE, body=body)
response = request.execute()
logging.info(response)
def main(data, context):
bucket = data['bucket']
filename = data['name']
full_path = "gs://{}/{}".format(bucket, filename)
logging.info("triggered by file: {}".format(full_path))
init_df(bucket, filename)
| StarcoderdataPython |
9777622 | # Copyright 2015 Tigera, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import netaddr
import logging
import yaml
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost, CLUSTER_STORE_DOCKER_OPTIONS
from tests.st.utils.exceptions import CommandExecError
from tests.st.utils.network import NETWORKING_CNI, NETWORKING_LIBNETWORK
from tests.st.utils.utils import assert_profile, \
assert_number_endpoints, get_profile_name
POST_DOCKER_COMMANDS = ["docker load -q -i /code/calico-node.tar",
"docker load -q -i /code/busybox.tar",
"docker load -q -i /code/workload.tar"]
_log = logging.getLogger(__name__)
class MultiHostMainline(TestBase):
host1 = None
host2 = None
@classmethod
def setUpClass(cls):
super(MultiHostMainline, cls).setUpClass()
cls.host1 = DockerHost("host1",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS)
cls.host2 = DockerHost("host2",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS)
@classmethod
def tearDownClass(cls):
cls.host1.cleanup()
cls.host2.cleanup()
super(MultiHostMainline, cls).tearDownClass()
def setUp(self):
super(MultiHostMainline, self).setUp(clear_etcd=False)
host1 = self.host1
host2 = self.host2
(self.n1_workloads, self.n2_workloads, self.networks) = \
self._setup_workloads(host1, host2)
# Get the original profiles:
output = host1.calicoctl("get profile -o yaml")
self.original_profiles = yaml.safe_load(output)['items']
# Make a copy of the profiles to mess about with.
self.new_profiles = copy.deepcopy(self.original_profiles)
def tearDown(self):
# Remove the network sets, if present
self.host1.calicoctl("delete globalnetworkset netset-1", raise_exception_on_failure=False)
self.host1.calicoctl("delete globalnetworkset netset-2", raise_exception_on_failure=False)
# Delete conntrack state for UDP connections to the workloads.
# Otherwise a new UDP connection may be allowed where it should be
# denied.
self.host1.delete_conntrack_state_to_workloads("udp")
self.host2.delete_conntrack_state_to_workloads("udp")
# Now restore the original profile and check it all works as before
self._apply_new_profile(self.original_profiles, self.host1)
self.host1.calicoctl("get profile -o yaml")
try:
self._check_original_connectivity(self.n1_workloads, self.n2_workloads)
finally:
# Tidy up
self.host1.remove_workloads()
self.host2.remove_workloads()
for network in self.networks:
network.delete()
super(MultiHostMainline, self).tearDown()
def test_tags(self):
# Update profiles so that they each include each other's labelsToApply.
_log.info("Profile 0 labelsToApply = %r", self.new_profiles[0]['spec']['labelsToApply'])
_log.info("Profile 1 labelsToApply = %r", self.new_profiles[1]['spec']['labelsToApply'])
self.new_profiles[0]['spec']['labelsToApply'].update(self.new_profiles[1]['spec']['labelsToApply'])
self.new_profiles[1]['spec']['labelsToApply'].update(self.new_profiles[0]['spec']['labelsToApply'])
_log.info("Merged profile 0 labelsToApply = %r", self.new_profiles[0]['spec']['labelsToApply'])
_log.info("Merged profile 1 labelsToApply = %r", self.new_profiles[1]['spec']['labelsToApply'])
self._apply_new_profile(self.new_profiles, self.host1)
# Check everything can contact everything else now
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_tags.batchnumber = 5
def test_rules_protocol_icmp(self):
rule = {'action': 'Allow',
'protocol': 'ICMP'}
# The copy.deepcopy(rule) is needed to ensure that we don't
# end up with a yaml document with a reference to the same
# rule. While this is probably legal, it isn't main line.
self.new_profiles[0]['spec']['ingress'].append(rule)
self.new_profiles[1]['spec']['ingress'].append(copy.deepcopy(rule))
self._apply_new_profile(self.new_profiles, self.host1)
# Check everything can contact everything else now
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads,
type_list=["icmp"])
test_rules_protocol_icmp.batchnumber = 1
def test_rules_ip_addr(self):
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
for workload in self.n1_workloads:
ip = workload.ip
rule = {'action': 'Allow',
'source':
{'nets': ['%s/32' % ip]}}
prof_n2['spec']['ingress'].append(rule)
for workload in self.n2_workloads:
ip = workload.ip
rule = {'action': 'Allow',
'source':
{'nets': ['%s/32' % ip]}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_rules_ip_addr.batchnumber = 1
def test_rules_ip_net(self):
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
n1_ips = [workload.ip for workload in self.n1_workloads]
n2_ips = [workload.ip for workload in self.n2_workloads]
n1_subnet = netaddr.spanning_cidr(n1_ips)
n2_subnet = netaddr.spanning_cidr(n2_ips)
rule = {'action': 'Allow',
'source':
{'nets': [str(n1_subnet)]}}
prof_n2['spec']['ingress'].append(rule)
rule = {'action': 'Allow',
'source':
{'nets': [str(n2_subnet)]}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_rules_ip_net.batchnumber = 1
def test_rules_source_ip_nets(self):
# Add a rule to each profile that allows traffic from all the workloads in the *other*
# network (which would normally be blocked).
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
n1_ips = [str(workload.ip) + "/32" for workload in self.n1_workloads]
n2_ips = [str(workload.ip) + "/32" for workload in self.n2_workloads]
rule = {'action': 'Allow',
'source': {'nets': n1_ips}}
prof_n2['spec']['ingress'].append(rule)
rule = {'action': 'Allow',
'source': {'nets': n2_ips}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_rules_source_ip_nets.batchnumber = 4
def test_rules_source_ip_sets(self):
"""Test global network sets end-to-end"""
# Add a rule to each profile that allows traffic from all the workloads in the *other*
# network by means of a network set (which would normally be blocked).
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
n1_ips = [str(workload.ip) + "/32" for workload in self.n1_workloads]
n2_ips = [str(workload.ip) + "/32" for workload in self.n2_workloads]
netset = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'GlobalNetworkSet',
'metadata': {
'name': "netset-1",
'labels': {"group": "n1"},
},
'spec': {
'nets': n1_ips,
}
}
self.host1.writefile("netset.yaml", yaml.dump(netset, default_flow_style=False))
self.host1.calicoctl("create -f netset.yaml")
netset = {
'apiVersion': 'projectcalico.org/v3',
'kind': 'GlobalNetworkSet',
'metadata': {
'name': "netset-2",
'labels': {"group": "n2"},
},
'spec': {
'nets': n2_ips,
}
}
self.host1.writefile("netset.yaml", yaml.dump(netset, default_flow_style=False))
self.host1.calicoctl("create -f netset.yaml")
# Check initial connectivity before we update the rules to reference the
# network sets.
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads,
fail_list=self.n2_workloads)
self.assert_connectivity(retries=2,
pass_list=self.n2_workloads,
fail_list=self.n1_workloads)
rule = {'action': 'Allow',
'source': {'selector': 'group=="n1"'}}
prof_n2['spec']['ingress'].append(rule)
rule = {'action': 'Allow',
'source': {'selector': 'group=="n2"'}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_rules_source_ip_sets.batchnumber = 2
def test_rules_source_ip_nets_2(self):
# Adjust each profile to allow traffic from all IPs in the other group but then exclude
# one of the IPs using a notNets match. The end result is that the first workload in
# each group should be blocked but the other should be allowed.
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
n1_ips = [str(workload.ip) + "/32" for workload in self.n1_workloads]
n1_denied_ips = n1_ips[:1]
_log.info("Network 1 IPs: %s; Denied IPs: %s", n1_ips, n1_denied_ips)
n2_ips = [str(workload.ip) + "/32" for workload in self.n2_workloads]
rule = {'action': 'Allow',
'source': {'nets': n1_ips,
'notNets': n1_denied_ips}}
prof_n2['spec']['ingress'].append(rule)
_log.info("Profile for network 2: %s", prof_n2)
rule = {'action': 'Allow',
'source': {'nets': n2_ips,
'notNets': n2_ips[:1]}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
# Check first workload in each group cannot ping the other group.
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads[:1],
fail_list=self.n2_workloads)
self.assert_connectivity(retries=2,
pass_list=self.n2_workloads[:1],
fail_list=self.n1_workloads)
# Check non-excluded workloads can all ping each other.
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads[1:] + self.n2_workloads[1:])
self.assert_connectivity(retries=2,
pass_list=self.n2_workloads[1:] + self.n1_workloads[1:])
test_rules_source_ip_nets_2.batchnumber = 4
def test_rules_dest_ip_nets(self):
# Adjust the egress policies to drop all traffic
prof_n1, prof_n2 = self._get_profiles(self.new_profiles)
prof_n2['spec']['egress'] = []
prof_n1['spec']['egress'] = []
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n2_workloads[:1],
fail_list=self.n1_workloads + self.n2_workloads[1:])
# Add a destination whitelist to n2 that allows pods within it to reach other pods in n2.
n2_ips = [str(workload.ip) + "/32" for workload in self.n2_workloads]
rule = {'action': 'Allow',
'destination': {'nets': n2_ips}}
prof_n2['spec']['egress'] = [rule]
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n2_workloads,
fail_list=self.n1_workloads)
# Add some rules that have a single nets entry and multiple notNets entries. These are
# rendered a bit differently in Felix.
n1_ips = [str(workload.ip) + "/32" for workload in self.n1_workloads]
rule1 = {'action': 'Allow',
'destination': {'nets': n1_ips[0:1],
'notNets': n1_ips[1:]}}
rule2 = {'action': 'Allow',
'destination': {'nets': n1_ips[1:2],
'notNets': n1_ips[:1]}}
prof_n1['spec']['egress'] = [rule1, rule2]
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads[:2],
fail_list=self.n1_workloads[2:])
test_rules_dest_ip_nets.batchnumber = 5
def test_rules_selector(self):
self.new_profiles[0]['spec']['labelsToApply']['net'] = 'n1'
self.new_profiles[1]['spec']['labelsToApply']['net'] = 'n2'
rule = {'action': 'Allow',
'source':
{'selector': 'net=="n2"'}}
self.new_profiles[0]['spec']['ingress'].append(rule)
rule = {'action': 'Allow',
'source':
{'selector': "net=='n1'"}}
self.new_profiles[1]['spec']['ingress'].append(rule)
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads)
test_rules_selector.batchnumber = 5
def test_rules_tcp_port(self):
rule = {'action': 'Allow',
'protocol': 'TCP',
'destination':
{'ports': [80]}}
# The copy.deepcopy(rule) is needed to ensure that we don't
# end up with a yaml document with a reference to the same
# rule. While this is probably legal, it isn't main line.
self.new_profiles[0]['spec']['ingress'].append(rule)
self.new_profiles[1]['spec']['ingress'].append(copy.deepcopy(rule))
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads,
type_list=['tcp'])
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads,
fail_list=self.n2_workloads,
type_list=['icmp', 'udp'])
test_rules_tcp_port.batchnumber = 5
def test_rules_udp_port(self):
rule = {'action': 'Allow',
'protocol': 'UDP',
'destination':
{'ports': [69]}}
# The copy.deepcopy(rule) is needed to ensure that we don't
# end up with a yaml document with a reference to the same
# rule. While this is probably legal, it isn't main line.
self.new_profiles[0]['spec']['ingress'].append(rule)
self.new_profiles[1]['spec']['ingress'].append(copy.deepcopy(rule))
self._apply_new_profile(self.new_profiles, self.host1)
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads + self.n2_workloads,
type_list=['udp'])
self.assert_connectivity(retries=2,
pass_list=self.n1_workloads,
fail_list=self.n2_workloads,
type_list=['icmp', 'tcp'])
test_rules_udp_port.batchnumber = 5
@staticmethod
def _get_profiles(profiles):
"""
Sorts and returns the profiles for the networks.
:param profiles: the list of profiles
:return: tuple: profile for network1, profile for network2
"""
prof_n1 = None
prof_n2 = None
for profile in profiles:
if profile['metadata']['name'] == "testnet1":
prof_n1 = profile
elif profile['metadata']['name'] == "testnet2":
prof_n2 = profile
assert prof_n1 is not None, "Could not find testnet1 profile"
assert prof_n2 is not None, "Could not find testnet2 profile"
return prof_n1, prof_n2
@staticmethod
def _apply_new_profile(new_profiles, host):
# Get profiles now, so we have up to date resource versions.
output = host.calicoctl("get profile -o yaml")
profiles_now = yaml.safe_load(output)['items']
resource_version_map = {
p['metadata']['name']: p['metadata']['resourceVersion']
for p in profiles_now
}
_log.info("resource_version_map = %r", resource_version_map)
# Set current resource versions in the profiles we are about to apply.
for p in new_profiles:
p['metadata']['resourceVersion'] = resource_version_map[p['metadata']['name']]
if 'creationTimestamp' in p['metadata']:
del p['metadata']['creationTimestamp']
# Apply new profiles
host.writefile("new_profiles",
yaml.dump(new_profiles, default_flow_style=False))
host.calicoctl("apply -f new_profiles")
def _setup_workloads(self, host1, host2):
# Create the networks on host1, but they should be usable from all
# hosts.
network1 = host1.create_network("testnet1")
network2 = host1.create_network("testnet2")
networks = [network1, network2]
n1_workloads = []
n2_workloads = []
# Create two workloads on host1 and one on host2 all in network 1.
n1_workloads.append(host2.create_workload("workload_h2n1_1",
image="workload",
network=network1))
n1_workloads.append(host1.create_workload("workload_h1n1_1",
image="workload",
network=network1))
n1_workloads.append(host1.create_workload("workload_h1n1_2",
image="workload",
network=network1))
# Create similar workloads in network 2.
n2_workloads.append(host1.create_workload("workload_h1n2_1",
image="workload",
network=network2))
n2_workloads.append(host1.create_workload("workload_h1n2_2",
image="workload",
network=network2))
n2_workloads.append(host2.create_workload("workload_h2n2_1",
image="workload",
network=network2))
print "*******************"
print "Network1 is:\n%s\n%s" % (
[x.ip for x in n1_workloads],
[x.name for x in n1_workloads])
print "Network2 is:\n%s\n%s" % (
[x.ip for x in n2_workloads],
[x.name for x in n2_workloads])
print "*******************"
# Assert that endpoints are in Calico
assert_number_endpoints(host1, 4)
assert_number_endpoints(host2, 2)
try:
self._check_original_connectivity(n1_workloads, n2_workloads)
except Exception as e:
_log.exception(e)
host1.log_extra_diags()
host2.log_extra_diags()
raise
# Test deleting the network. It will fail if there are any
# endpoints connected still.
if (host1.networking == NETWORKING_LIBNETWORK or
host2.networking == NETWORKING_LIBNETWORK):
self.assertRaises(CommandExecError, network1.delete)
self.assertRaises(CommandExecError, network2.delete)
return n1_workloads, n2_workloads, networks
def _check_original_connectivity(self, n1_workloads, n2_workloads,
types=None):
# Assert that workloads can communicate with each other on network
# 1, and not those on network 2. Ping using IP for all workloads,
# and by hostname for workloads on the same network (note that
# a workloads own hostname does not work).
if types is None:
types = ['icmp', 'tcp', 'udp']
self.assert_connectivity(retries=10,
pass_list=n1_workloads,
fail_list=n2_workloads,
type_list=types)
# Repeat with network 2.
self.assert_connectivity(pass_list=n2_workloads,
fail_list=n1_workloads,
type_list=types)
| StarcoderdataPython |
4814462 | import factory
from django.contrib.auth import get_user_model, models
from factory import DjangoModelFactory, Faker, Sequence
from factory.fuzzy import FuzzyInteger
from va_explorer.va_data_management.models import Location, VerbalAutopsy, VaUsername
User = get_user_model()
class PermissionFactory(DjangoModelFactory):
class Meta:
model = models.Permission
class GroupFactory(DjangoModelFactory):
class Meta:
model = models.Group
name = Sequence(lambda n: "Group #%s" % n)
@factory.post_generation
def permissions(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for permission in extracted:
self.permissions.add(permission)
class LocationFactory(DjangoModelFactory):
class Meta:
model = Location
# Create a root node by default
name = Faker("city")
depth = 1
numchild = 0
location_type = "province"
path = "0001"
class LocationFacilityFactory(DjangoModelFactory):
class Meta:
model = Location
# Create a root node by default
name = Faker("city")
depth = 1
numchild = 0
location_type = "facility"
path = "0001"
class VerbalAutopsyFactory(DjangoModelFactory):
class Meta:
model = VerbalAutopsy
Id10007 = "Example Name"
Id10023 = "dk"
Id10058 = "hospital"
location = factory.SubFactory(LocationFacilityFactory)
username = ""
class UserFactory(DjangoModelFactory):
class Meta:
model = User
email = Faker("email")
name = Faker("name")
has_valid_password = True
# See: https://factoryboy.readthedocs.io/en/latest/recipes.html
@factory.post_generation
def groups(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for group in extracted:
self.groups.add(group)
@factory.post_generation
def location_restrictions(self, create, extracted, *kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of locations were passed in, use them
for location in extracted:
self.location_restrictions.add(location)
class NewUserFactory(UserFactory):
has_valid_password = False
class AdminFactory(UserFactory):
is_superuser = True
class DataManagerFactory(UserFactory):
is_superuser = False
class DataViewerFactory(UserFactory):
is_superuser = False
class FieldWorkerFactory(UserFactory):
is_superuser = False
class FieldWorkerGroupFactory(GroupFactory):
name = "Field Workers"
class AdminGroupFactory(GroupFactory):
name = "Admins"
class FacilityFactory(LocationFactory):
location_type = "facility"
class VaUsernameFactory(DjangoModelFactory):
class Meta:
model = VaUsername
va_username = Faker("user_name")
| StarcoderdataPython |
1746691 | from qira_base import *
import qira_config
import os
import sys
import time
import base64
import json
sys.path.append(qira_config.BASEDIR+"/static2")
import model
def socket_method(func):
def func_wrapper(*args, **kwargs):
# before things are initted in the js, we get this
for i in args:
if i == None:
#print "BAD ARGS TO %-20s" % (func.func_name), "with",args
return
try:
start = time.time()
ret = func(*args, **kwargs)
tm = (time.time() - start) * 1000
# print slow calls, slower than 50ms
if tm > 50 or qira_config.WEBSOCKET_DEBUG:
print "SOCKET %6.2f ms in %-20s with" % (tm, func.func_name), args
return ret
except Exception, e:
print "ERROR",e,"in",func.func_name,"with",args
return func_wrapper
import qira_socat
import time
import qira_analysis
import qira_log
LIMIT = 0
from flask import Flask, Response, redirect, request
from flask_socketio import SocketIO, emit
# http://stackoverflow.com/questions/8774958/keyerror-in-module-threading-after-a-successful-py-test-run
import threading
import sys
if 'threading' in sys.modules:
del sys.modules['threading']
import gevent
import gevent.socket
import gevent.monkey
gevent.monkey.patch_all()
# done with that
app = Flask(__name__)
#app.config['DEBUG'] = True
socketio = SocketIO(app)
# ***** middleware moved here *****
def push_trace_update(i):
t = program.traces[i]
if t.picture != None:
#print t.forknum, t.picture
socketio.emit('setpicture', {"forknum":t.forknum, "data":t.picture,
"minclnum":t.minclnum, "maxclnum":t.maxclnum}, namespace='/qira')
socketio.emit('strace', {'forknum': t.forknum, 'dat': t.strace}, namespace='/qira')
t.needs_update = False
def push_updates(full = True):
socketio.emit('pmaps', program.get_pmaps(), namespace='/qira')
socketio.emit('maxclnum', program.get_maxclnum(), namespace='/qira')
socketio.emit('arch', list(program.tregs), namespace='/qira')
if not full:
return
for i in program.traces:
push_trace_update(i)
def mwpoll():
# poll for new traces, call this every once in a while
for i in os.listdir(qira_config.TRACE_FILE_BASE):
if "_" in i:
continue
i = int(i)
if i not in program.traces:
program.add_trace(qira_config.TRACE_FILE_BASE+str(i), i)
did_update = False
# poll for updates on existing
for tn in program.traces:
if program.traces[tn].db.did_update():
t = program.traces[tn]
t.read_strace_file()
socketio.emit('strace', {'forknum': t.forknum, 'dat': t.strace}, namespace='/qira')
did_update = True
# trace specific stuff
if program.traces[tn].needs_update:
push_trace_update(tn)
if did_update:
program.read_asm_file()
push_updates(False)
def mwpoller():
while 1:
time.sleep(0.2)
mwpoll()
# ***** after this line is the new server stuff *****
@socketio.on('forkat', namespace='/qira')
@socket_method
def forkat(forknum, clnum, pending):
global program
print "forkat",forknum,clnum,pending
REGSIZE = program.tregs[1]
dat = []
for p in pending:
daddr = fhex(p['daddr'])
ddata = fhex(p['ddata'])
if len(p['ddata']) > 4:
# ugly hack
dsize = REGSIZE
else:
dsize = 1
flags = qira_log.IS_VALID | qira_log.IS_WRITE
if daddr >= 0x1000:
flags |= qira_log.IS_MEM
flags |= dsize*8
dat.append((daddr, ddata, clnum-1, flags))
next_run_id = qira_socat.get_next_run_id()
if len(dat) > 0:
qira_log.write_log(qira_config.TRACE_FILE_BASE+str(next_run_id)+"_mods", dat)
if args.server:
qira_socat.start_bindserver(program, qira_config.FORK_PORT, forknum, clnum)
else:
if os.fork() == 0:
program.execqira(["-qirachild", "%d %d %d" % (forknum, clnum, next_run_id)])
@socketio.on('deletefork', namespace='/qira')
@socket_method
def deletefork(forknum):
global program
print "deletefork", forknum
os.unlink(qira_config.TRACE_FILE_BASE+str(int(forknum)))
del program.traces[forknum]
push_updates()
@socketio.on('doslice', namespace='/qira')
@socket_method
def slice(forknum, clnum):
trace = program.traces[forknum]
data = qira_analysis.slice(trace, clnum)
print "slice",forknum,clnum, data
emit('slice', forknum, data);
@socketio.on('doanalysis', namespace='/qira')
@socket_method
def analysis(forknum):
trace = program.traces[forknum]
data = qira_analysis.get_vtimeline_picture(trace)
if data != None:
emit('setpicture', {"forknum":forknum, "data":data})
@socketio.on('connect', namespace='/qira')
@socket_method
def connect():
global program
print "client connected", program.get_maxclnum()
push_updates()
@socketio.on('getclnum', namespace='/qira')
@socket_method
def getclnum(forknum, clnum, types, limit):
trace = program.traces[forknum]
ret = []
for c in trace.db.fetch_changes_by_clnum(clnum, LIMIT):
if c['type'] not in types:
continue
c = c.copy()
c['address'] = ghex(c['address'])
c['data'] = ghex(c['data'])
ret.append(c)
if len(ret) >= limit:
break
emit('clnum', ret)
@socketio.on('getchanges', namespace='/qira')
@socket_method
def getchanges(forknum, address, typ, cview, cscale, clnum):
if forknum != -1 and forknum not in program.traces:
return
address = fhex(address)
if forknum == -1:
forknums = program.traces.keys()
else:
forknums = [forknum]
ret = {}
for forknum in forknums:
db = program.traces[forknum].db.fetch_clnums_by_address_and_type(address, chr(ord(typ[0])), cview[0], cview[1], LIMIT)
# send the clnum and the bunch closest on each side
if len(db) > 50:
send = set()
bisect = 0
last = None
cnt = 0
for cl in db:
if cl <= clnum:
bisect = cnt
cnt += 1
if last != None and (cl - last) < cscale:
continue
send.add(cl)
last = cl
add = db[max(0,bisect-4):min(len(db), bisect+5)]
#print bisect, add, clnum
for tmp in add:
send.add(tmp)
ret[forknum] = list(send)
else:
ret[forknum] = db
emit('changes', {'type': typ, 'clnums': ret})
@socketio.on('navigatefunction', namespace='/qira')
@socket_method
def navigatefunction(forknum, clnum, start):
trace = program.traces[forknum]
myd = trace.dmap[clnum]
ret = clnum
while 1:
if trace.dmap[clnum] == myd-1:
break
ret = clnum
if start:
clnum -= 1
else:
clnum += 1
if clnum == trace.minclnum or clnum == trace.maxclnum:
ret = clnum
break
emit('setclnum', {'forknum': forknum, 'clnum': ret})
@socketio.on('getinstructions', namespace='/qira')
@socket_method
def getinstructions(forknum, clnum, clstart, clend):
trace = program.traces[forknum]
slce = qira_analysis.slice(trace, clnum)
ret = []
def get_instruction(i):
rret = trace.db.fetch_changes_by_clnum(i, 1)
if len(rret) == 0:
return None
else:
rret = rret[0]
instr = program.static[rret['address']]['instruction']
rret['instruction'] = instr.__str__(trace, i) #i == clnum
# check if static fails at this
if rret['instruction'] == "":
# TODO: wrong place to get the arch
arch = program.static[rret['address']]['arch']
# we have the address and raw bytes, disassemble
raw = trace.fetch_raw_memory(i, rret['address'], rret['data'])
rret['instruction'] = str(model.Instruction(raw, rret['address'], arch))
#display_call_args calls make_function_at
if qira_config.WITH_STATIC:
if instr.is_call():
args = qira_analysis.display_call_args(instr,trace,i)
if args != "":
rret['instruction'] += " {"+args+"}"
if 'name' in program.static[rret['address']]:
#print "setting name"
rret['name'] = program.static[rret['address']]['name']
if 'comment' in program.static[rret['address']]:
rret['comment'] = program.static[rret['address']]['comment']
if i in slce:
rret['slice'] = True
else:
rret['slice'] = False
# for numberless javascript
rret['address'] = ghex(rret['address'])
try:
rret['depth'] = trace.dmap[i - trace.minclnum]
except:
rret['depth'] = 0
# hack to only display calls
if True or instr.is_call():
#if instr.is_call():
return rret
else:
return None
top = []
clcurr = clnum-1
while len(top) != (clnum - clstart) and clcurr >= 0:
rret = get_instruction(clcurr)
if rret != None:
top.append(rret)
clcurr -= 1
clcurr = clnum
while len(ret) != (clend - clnum) and clcurr <= clend:
rret = get_instruction(clcurr)
if rret != None:
ret.append(rret)
clcurr += 1
ret = top[::-1] + ret
emit('instructions', ret)
@socketio.on('getmemory', namespace='/qira')
@socket_method
def getmemory(forknum, clnum, address, ln):
trace = program.traces[forknum]
address = fhex(address)
dat = trace.fetch_memory(clnum, address, ln)
ret = {'address': ghex(address), 'len': ln, 'dat': dat, 'is_big_endian': program.tregs[2], 'ptrsize': program.tregs[1]}
emit('memory', ret)
@socketio.on('setfunctionargswrap', namespace='/qira')
@socket_method
def setfunctionargswrap(func, args):
function = program.static[fhex(func)]['function']
if len(args.split()) == 1:
try:
function.nargs = int(args)
except:
pass
if len(args.split()) == 2:
abi = None
try:
abi = int(args.split()[0])
except:
for m in dir(model.ABITYPE):
if m == args.split()[0].upper():
abi = model.ABITYPE.__dict__[m]
function.nargs = int(args.split()[1])
if abi != None:
function.abi = abi
@socketio.on('getregisters', namespace='/qira')
@socket_method
def getregisters(forknum, clnum):
trace = program.traces[forknum]
# register names shouldn't be here
# though i'm not really sure where a better place is, qemu has this information
ret = []
REGS = program.tregs[0]
REGSIZE = program.tregs[1]
# 50 is a sane limit here, we don't really need to mark lib calls correctly
cls = trace.db.fetch_changes_by_clnum(clnum+1, 50)
regs = trace.db.fetch_registers(clnum)
for i in range(0, len(REGS)):
if REGS[i] == None:
continue
rret = {"name": REGS[i], "address": i*REGSIZE, "value": ghex(regs[i]), "size": REGSIZE, "regactions": ""}
act = set()
for c in cls:
if c['address'] == i*REGSIZE:
act.add(c['type'])
# this +1 is an ugly hack
if 'R' in act:
rret['regactions'] = "regread"
if 'W' in act:
if "regread" == rret['regactions']:
rret['regactions'] = "regreadwrite"
else:
rret['regactions'] = "regwrite"
rret['num'] = i
ret.append(rret)
emit('registers', ret)
# ***** generic webserver stuff *****
@app.route('/', defaults={'path': 'index.html'})
@app.route('/<path:path>')
def serve(path):
# best security?
if ".." in path:
return
ext = path.split(".")[-1]
try:
dat = open(qira_config.BASEDIR + "/web/"+path).read()
except:
return ""
if ext == 'js' and not path.startswith('client/compatibility/') and path.startswith('client/'):
dat = "(function(){"+dat+"})();"
if ext == 'js':
return Response(dat, mimetype="application/javascript")
elif ext == 'css':
return Response(dat, mimetype="text/css")
else:
return Response(dat, mimetype="text/html")
# must go at the bottom
def run_server(largs, lprogram):
global args
global program
global static
args = largs
program = lprogram
# web static moved to external file
import qira_webstatic
qira_webstatic.init(lprogram)
print "****** starting WEB SERVER on %s:%d" % (qira_config.HOST, qira_config.WEB_PORT)
threading.Thread(target=mwpoller).start()
try:
socketio.run(app, host=qira_config.HOST, port=qira_config.WEB_PORT, log_output=False)
except KeyboardInterrupt:
print "*** User raised KeyboardInterrupt"
exit()
| StarcoderdataPython |
8043700 | <gh_stars>0
#!/usr/bin/env python
"""
String Utils for Python 3
"""
import sys
if sys.version_info[0] != 3:
raise ImportError(" Python version 3 required")
EPICS_STR_ENCODING = 'ASCII'
EPICS_STR_ENCODING = 'latin_1'
NULLCHAR_2 = '\x00'
NULLCHAR = b'\x00'
def s2b(st1):
'string to byte conversion'
if isinstance(st1, bytes):
return st1
return bytes(st1, EPICS_STR_ENCODING)
def b2s(st1):
'byte to string conversion'
if isinstance(st1, str):
return st1
elif isinstance(st1, bytes):
return str(st1, EPICS_STR_ENCODING)
else:
return str(st1)
STR2BYTES, BYTES2STR = s2b, b2s
def strjoin(sep, seq):
"join string sequence with a separator"
if isinstance(sep, bytes):
sep = BYTES2STR(sep)
if len(seq) == 0:
seq = ''
elif isinstance(seq[0], bytes):
tmp =[]
for i in seq:
if i == NULLCHAR:
break
tmp.append(BYTES2STR(i))
seq = tmp
return sep.join(seq)
def is_string(s):
return isinstance(s, str)
def ascii_string(s):
return str(bytes(s, EPICS_STR_ENCODING), EPICS_STR_ENCODING)
| StarcoderdataPython |
1699711 | """ A holder for horizon enhancement steps inherited from `.class:Detector` with the following functionality:
- training a model on a horizon with synthetic distortions.
- making inference on a selected data.
"""
from textwrap import indent
import numpy as np
from ...batchflow import Pipeline, B, V, C, P, R
from .horizon import HorizonController
from ...batchflow.models.torch import EncoderDecoder
class Enhancer(HorizonController):
"""
Provides interface for train, inference and quality assesment for the task of horizon enhancement.
"""
#pylint: disable=arguments-renamed
def train(self, horizon, **kwargs):
""" Train model for horizon enhancement. """
dataset = self.make_dataset(horizon=horizon)
sampler = self.make_sampler(dataset)
sampler.show_locations(show=self.plot, savepath=self.make_savepath('sampler_locations.png'))
sampler.show_sampled(show=self.plot, savepath=self.make_savepath('sampler_generated.png'))
self.log(f'Created sampler\n{indent(str(sampler), " "*4)}')
return super().train(dataset=dataset, sampler=sampler, **kwargs)
def inference(self, horizon, model, config=None, **kwargs):
""" Runs enhancement procedure for a given horizon with provided model. """
dataset = self.make_dataset(horizon=horizon)
#TODO: return filtering matrix to grid?
# if filtering_matrix is None:
# filtering_matrix = 1 - (horizon.full_matrix > 0)
prediction = super().inference(dataset=dataset, model=model, config=config, **kwargs)[0]
prediction.name = f'enhanced_{horizon.name}'
return prediction
def load_pipeline(self):
""" Defines data loading procedure.
Following parameters are fetched from pipeline config: `width` and `rebatch_threshold`.
"""
return (
Pipeline()
.make_locations(generator=C('sampler'), batch_size=C('batch_size'))
.create_masks(dst='masks', width=C('width', default=3))
.mask_rebatch(src='masks', threshold=C('rebatch_threshold', default=0.7))
.load_cubes(dst='images')
.adaptive_reshape(src=['images', 'masks'])
.normalize(src='images')
)
def distortion_pipeline(self):
""" Defines transformations performed with `masks` component. """
def binarize(batch):
batch.prior_masks = (batch.prior_masks > 0).astype(np.float32)
return (
Pipeline()
.shift_masks(src='masks', dst='prior_masks')
.transpose(src='prior_masks', order=(1, 2, 0))
.elastic_transform(alpha=P(R('uniform', 30, 50)), sigma=P(R('uniform', 6, 7)),
src='prior_masks', p=0.5)
.bend_masks(src='prior_masks', angle=P(R('uniform', -15, 15)))
.call(binarize, B())
.transpose(src='prior_masks', order=(2, 0, 1))
)
def augmentation_pipeline(self):
""" Defines augmentation pipeline. """
return (
Pipeline()
.transpose(src=['images', 'masks', 'prior_masks'], order=(1, 2, 0))
.rotate(angle=P(R('uniform', -30, 30)),
src=['images', 'masks', 'prior_masks'], p=0.3)
.flip(src=['images', 'masks', 'prior_masks'], axis=1, p=0.3)
.transpose(src=['images', 'masks', 'prior_masks'], order=(2, 0, 1))
)
def train_pipeline(self):
""" Define model initialization and model training pipeline.
Following parameters are fetched from pipeline config: `model_config`.
"""
return (
Pipeline()
.init_variable('loss_history', default=[])
.init_model(mode='dynamic', model_class=C('model_class', default=EncoderDecoder),
name='model', config=C('model_config'))
.concat_components(src=['images', 'prior_masks'], dst='images', axis=1)
.train_model('model', fetches='loss', save_to=V('loss_history', mode='a'),
images=B('images'),
masks=B('masks'))
)
def get_train_template(self, distortion_pipeline=None, **kwargs):
""" Define the whole training procedure pipeline including data loading, augmentation and model training. """
_ = kwargs
return (
self.load_pipeline() +
(distortion_pipeline or self.distortion_pipeline()) +
self.augmentation_pipeline() +
self.train_pipeline()
)
def get_inference_template(self):
""" Defines inference pipeline. """
inference_template = (
Pipeline()
.init_variable('predictions', [])
# Load data
.make_locations(generator=C('grid'))
.load_cubes(dst='images')
.create_masks(dst='prior_masks', width=C('width', default=3))
.adaptive_reshape(src=['images', 'prior_masks'])
.normalize(src='images')
.concat_components(src=['images', 'prior_masks'], dst='images', axis=1)
# Use model for prediction
.predict_model('model',
B('images'),
fetches='predictions',
save_to=B('predictions'))
.update_accumulator(src='predictions', accumulator=C('accumulator'))
)
return inference_template
# One method to rule them all
def run(self, cube_paths=None, horizon_paths=None, horizon=None, **kwargs):
""" Run the entire procedure of horizon enhancement. """
dataset = self.make_dataset(cube_paths=cube_paths, horizon_paths=horizon_paths, horizon=horizon)
horizon = dataset.labels[0][0]
model = self.train(horizon=horizon, **kwargs)
prediction = self.inference(horizon, model, **kwargs)
prediction = self.postprocess(prediction)
info = self.evaluate(prediction, dataset=dataset)
return prediction, info
| StarcoderdataPython |
5165135 | <filename>pytorch_wavelets/dtcwt/transform2d.py
import torch
import torch.nn as nn
from numpy import ndarray, sqrt
from pytorch_wavelets.dtcwt.coeffs import qshift as _qshift, biort as _biort, level1
from pytorch_wavelets.dtcwt.lowlevel import prep_filt
from pytorch_wavelets.dtcwt.transform_funcs import FWD_J1, FWD_J2PLUS
from pytorch_wavelets.dtcwt.transform_funcs import INV_J1, INV_J2PLUS
from pytorch_wavelets.dtcwt.transform_funcs import get_dimensions6
from pytorch_wavelets.dwt.lowlevel import mode_to_int
from pytorch_wavelets.dwt.transform2d import DWTForward, DWTInverse
def pm(a, b):
u = (a + b)/sqrt(2)
v = (a - b)/sqrt(2)
return u, v
class DTCWTForward(nn.Module):
""" Performs a 2d DTCWT Forward decomposition of an image
Args:
biort (str): One of 'antonini', 'legall', 'near_sym_a', 'near_sym_b'.
Specifies the first level biorthogonal wavelet filters. Can also
give a two tuple for the low and highpass filters directly.
qshift (str): One of 'qshift_06', 'qshift_a', 'qshift_b', 'qshift_c',
'qshift_d'. Specifies the second level quarter shift filters. Can
also give a 4-tuple for the low tree a, low tree b, high tree a and
high tree b filters directly.
J (int): Number of levels of decomposition
skip_hps (bools): List of bools of length J which specify whether or
not to calculate the bandpass outputs at the given scale.
skip_hps[0] is for the first scale. Can be a single bool in which
case that is applied to all scales.
include_scale (bool): If true, return the bandpass outputs. Can also be
a list of length J specifying which lowpasses to return. I.e. if
[False, True, True], the forward call will return the second and
third lowpass outputs, but discard the lowpass from the first level
transform.
o_dim (int): Which dimension to put the orientations in
ri_dim (int): which dimension to put the real and imaginary parts
"""
def __init__(self, biort='near_sym_a', qshift='qshift_a',
J=3, skip_hps=False, include_scale=False,
o_dim=2, ri_dim=-1, mode='symmetric'):
super().__init__()
if o_dim == ri_dim:
raise ValueError("Orientations and real/imaginary parts must be "
"in different dimensions.")
self.biort = biort
self.qshift = qshift
self.J = J
self.o_dim = o_dim
self.ri_dim = ri_dim
self.mode = mode
if isinstance(biort, str):
h0o, _, h1o, _ = _biort(biort)
self.register_buffer('h0o', prep_filt(h0o, 1))
self.register_buffer('h1o', prep_filt(h1o, 1))
else:
self.register_buffer('h0o', prep_filt(biort[0], 1))
self.register_buffer('h1o', prep_filt(biort[1], 1))
if isinstance(qshift, str):
h0a, h0b, _, _, h1a, h1b, _, _ = _qshift(qshift)
self.register_buffer('h0a', prep_filt(h0a, 1))
self.register_buffer('h0b', prep_filt(h0b, 1))
self.register_buffer('h1a', prep_filt(h1a, 1))
self.register_buffer('h1b', prep_filt(h1b, 1))
else:
self.register_buffer('h0a', prep_filt(qshift[0], 1))
self.register_buffer('h0b', prep_filt(qshift[1], 1))
self.register_buffer('h1a', prep_filt(qshift[2], 1))
self.register_buffer('h1b', prep_filt(qshift[3], 1))
# Get the function to do the DTCWT
if isinstance(skip_hps, (list, tuple, ndarray)):
self.skip_hps = skip_hps
else:
self.skip_hps = [skip_hps,] * self.J
if isinstance(include_scale, (list, tuple, ndarray)):
self.include_scale = include_scale
else:
self.include_scale = [include_scale,] * self.J
def forward(self, x):
print(type(x))
""" Forward Dual Tree Complex Wavelet Transform
Args:
x (tensor): Input to transform. Should be of shape
:math:`(N, C_{in}, H_{in}, W_{in})`.
Returns:
(yl, yh)
tuple of lowpass (yl) and bandpass (yh) coefficients.
If include_scale was true, yl will be a list of lowpass
coefficients, otherwise will be just the final lowpass
coefficient of shape :math:`(N, C_{in}, H_{in}', W_{in}')`. Yh
will be a list of the complex bandpass coefficients of shape
:math:`list(N, C_{in}, 6, H_{in}'', W_{in}'', 2)`, or similar
shape depending on o_dim and ri_dim
Note:
:math:`H_{in}', W_{in}', H_{in}'', W_{in}''` are the shapes of a
DTCWT pyramid.
"""
scales = [x.new_zeros([]),] * self.J
highs = [x.new_zeros([]),] * self.J
mode = mode_to_int(self.mode)
if self.J == 0:
return x, None
# If the row/col count of X is not divisible by 2 then we need to
# extend X
r, c = x.shape[2:]
if r % 2 != 0:
x = torch.cat((x, x[:,:,-1:]), dim=2)
if c % 2 != 0:
x = torch.cat((x, x[:,:,:,-1:]), dim=3)
# Do the level 1 transform
low, h = FWD_J1.apply(x, self.h0o, self.h1o, self.skip_hps[0],
self.o_dim, self.ri_dim, mode)
highs[0] = h
if self.include_scale[0]:
scales[0] = low
for j in range(1, self.J):
# Ensure the lowpass is divisible by 4
r, c = low.shape[2:]
if r % 4 != 0:
low = torch.cat((low[:,:,0:1], low, low[:,:,-1:]), dim=2)
if c % 4 != 0:
low = torch.cat((low[:,:,:,0:1], low, low[:,:,:,-1:]), dim=3)
low, h = FWD_J2PLUS.apply(low, self.h0a, self.h1a, self.h0b,
self.h1b, self.skip_hps[j], self.o_dim,
self.ri_dim, mode)
highs[j] = h
if self.include_scale[j]:
scales[j] = low
if True in self.include_scale:
return scales, highs
else:
return low, highs
class DTCWTInverse(nn.Module):
""" 2d DTCWT Inverse
Args:
biort (str): One of 'antonini', 'legall', 'near_sym_a', 'near_sym_b'.
Specifies the first level biorthogonal wavelet filters. Can also
give a two tuple for the low and highpass filters directly.
qshift (str): One of 'qshift_06', 'qshift_a', 'qshift_b', 'qshift_c',
'qshift_d'. Specifies the second level quarter shift filters. Can
also give a 4-tuple for the low tree a, low tree b, high tree a and
high tree b filters directly.
J (int): Number of levels of decomposition.
o_dim (int):which dimension the orientations are in
ri_dim (int): which dimension to put th real and imaginary parts in
"""
def __init__(self, biort='near_sym_a', qshift='qshift_a', o_dim=2,
ri_dim=-1, mode='symmetric'):
super().__init__()
self.biort = biort
self.qshift = qshift
self.o_dim = o_dim
self.ri_dim = ri_dim
self.mode = mode
if isinstance(biort, str):
_, g0o, _, g1o = _biort(biort)
self.register_buffer('g0o', prep_filt(g0o, 1))
self.register_buffer('g1o', prep_filt(g1o, 1))
else:
self.register_buffer('g0o', prep_filt(biort[0], 1))
self.register_buffer('g1o', prep_filt(biort[1], 1))
if isinstance(qshift, str):
_, _, g0a, g0b, _, _, g1a, g1b = _qshift(qshift)
self.register_buffer('g0a', prep_filt(g0a, 1))
self.register_buffer('g0b', prep_filt(g0b, 1))
self.register_buffer('g1a', prep_filt(g1a, 1))
self.register_buffer('g1b', prep_filt(g1b, 1))
else:
self.register_buffer('g0a', prep_filt(qshift[0], 1))
self.register_buffer('g0b', prep_filt(qshift[1], 1))
self.register_buffer('g1a', prep_filt(qshift[2], 1))
self.register_buffer('g1b', prep_filt(qshift[3], 1))
def forward(self, coeffs):
"""
Args:
coeffs (yl, yh): tuple of lowpass and bandpass coefficients, where:
yl is a tensor of shape :math:`(N, C_{in}, H_{in}', W_{in}')`
and yh is a list of the complex bandpass coefficients of shape
:math:`list(N, C_{in}, 6, H_{in}'', W_{in}'', 2)`, or similar
depending on o_dim and ri_dim
Returns:
Reconstructed output
Note:
Can accept Nones or an empty tensor (torch.tensor([])) for the
lowpass or bandpass inputs. In this cases, an array of zeros
replaces that input.
Note:
:math:`H_{in}', W_{in}', H_{in}'', W_{in}''` are the shapes of a
DTCWT pyramid.
Note:
If include_scale was true for the forward pass, you should provide
only the final lowpass output here, as normal for an inverse wavelet
transform.
"""
low, highs = coeffs
J = len(highs)
mode = mode_to_int(self.mode)
_, _, h_dim, w_dim = get_dimensions6(
self.o_dim, self.ri_dim)
for j, s in zip(range(J-1, 0, -1), highs[1:][::-1]):
if s is not None and s.shape != torch.Size([]):
assert s.shape[self.o_dim] == 6, "Inverse transform must " \
"have input with 6 orientations"
assert len(s.shape) == 6, "Bandpass inputs must have " \
"6 dimensions"
assert s.shape[self.ri_dim] == 2, "Inputs must be complex " \
"with real and imaginary parts in the ri dimension"
# Ensure the low and highpass are the right size
r, c = low.shape[2:]
r1, c1 = s.shape[h_dim], s.shape[w_dim]
if r != r1 * 2:
low = low[:,:,1:-1]
if c != c1 * 2:
low = low[:,:,:,1:-1]
low = INV_J2PLUS.apply(low, s, self.g0a, self.g1a, self.g0b,
self.g1b, self.o_dim, self.ri_dim, mode)
# Ensure the low and highpass are the right size
if highs[0] is not None and highs[0].shape != torch.Size([]):
r, c = low.shape[2:]
r1, c1 = highs[0].shape[h_dim], highs[0].shape[w_dim]
if r != r1 * 2:
low = low[:,:,1:-1]
if c != c1 * 2:
low = low[:,:,:,1:-1]
low = INV_J1.apply(low, highs[0], self.g0o, self.g1o, self.o_dim,
self.ri_dim, mode)
return low
| StarcoderdataPython |
199583 | <gh_stars>1-10
import datetime
def time_since(dt, human_readable=False):
if dt is None:
return None
assert isinstance(dt, datetime.datetime)
delta = (datetime.datetime.utcnow() - dt).total_seconds()
if human_readable:
return human_readable_time_delta(delta)
return delta
def human_readable_time_delta(seconds):
if seconds is None:
return "Never"
elif seconds < 60:
return "{:d} seconds ago".format(int(round(seconds)))
elif seconds < 60 * 60:
return "{:d} minutes ago".format(int(round(seconds / 60)))
elif seconds < 24 * 60 * 60:
return "{:d} hours ago".format(int(round(seconds / 60 / 60)))
else:
return "{:d} days ago".format(int(round(seconds / 60 / 60 / 24)))
| StarcoderdataPython |
8090929 | <gh_stars>10-100
import argparse
import os
import sys
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
import numpy as np
import models.densenet as dn
import models.wideresnet as wn
import models.gmm as gmmlib
from utils import TinyImages
import utils.svhn_loader as svhn
from sklearn import mixture
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch DenseNet Training')
parser.add_argument('--gpu', default='0', type=str, help='which gpu to use')
parser.add_argument('--in-dataset', default="CIFAR-10", type=str, help='in-distribution dataset')
parser.add_argument('--model-arch', default='densenet', type=str, help='model architecture')
parser.add_argument('--epochs', default=100, type=int,
help='number of total epochs to run')
parser.add_argument('--save-epoch', default=10, type=int,
help='save the model every save_epoch')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--ood-batch-size', default=50, type=int,
help='mini-batch size (default: 50)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,
help='weight decay (default: 0.0001)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--layers', default=100, type=int,
help='total number of layers (default: 100)')
parser.add_argument('--depth', default=40, type=int,
help='depth of resnet')
parser.add_argument('--width', default=4, type=int,
help='width of resnet')
parser.add_argument('--growth', default=12, type=int,
help='number of new channels per layer (default: 12)')
parser.add_argument('--droprate', default=0.0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--reduce', default=0.5, type=float,
help='compression rate in transition stage (default: 0.5)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='To not use bottleneck block')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', required=True, type=str,
help='name of experiment')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.set_defaults(bottleneck=True)
parser.set_defaults(augment=True)
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
print(state)
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
save_state_file = os.path.join(directory, 'args.txt')
fw = open(save_state_file, 'w')
print(state, file=fw)
fw.close()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(1)
np.random.seed(1)
def main():
if args.tensorboard: configure("runs/%s"%(args.name))
if args.augment:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
kwargs = {'num_workers': 1, 'pin_memory': True}
if args.in_dataset == "CIFAR-10":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./datasets/cifar10', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 10
elif args.in_dataset == "CIFAR-100":
# Data loading code
normalizer = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
std=[x/255.0 for x in [63.0, 62.1, 66.7]])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=True, download=True,
transform=transform_train),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./datasets/cifar100', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_schedule=[50, 75, 90]
num_classes = 100
elif args.in_dataset == "SVHN":
# Data loading code
normalizer = None
train_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='train',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
svhn.SVHN('datasets/svhn/', split='test',
transform=transforms.ToTensor(), download=False),
batch_size=args.batch_size, shuffle=False, **kwargs)
args.epochs = 20
args.save_epoch = 2
lr_schedule=[10, 15, 18]
num_classes = 10
out_loader = torch.utils.data.DataLoader(
TinyImages(transform=transforms.Compose(
[transforms.ToTensor(), transforms.ToPILImage(), transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), transforms.ToTensor()])),
batch_size=args.ood_batch_size, shuffle=False, **kwargs)
# create model
if args.model_arch == 'densenet':
base_model = dn.DenseNet3(args.layers, num_classes, args.growth, reduction=args.reduce,
bottleneck=args.bottleneck, dropRate=args.droprate, normalizer=normalizer)
elif args.model_arch == 'wideresnet':
base_model = wn.WideResNet(args.depth, num_classes, widen_factor=args.width, dropRate=args.droprate, normalizer=normalizer)
else:
assert False, 'Not supported model arch: {}'.format(args.model_arch)
gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100])
gmm = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'in_gmm.pth.tar')
gmm.alpha = nn.Parameter(gmm.alpha)
gmm.mu.requires_grad = True
gmm.logvar.requires_grad = True
gmm.alpha.requires_grad = False
gmm_out = torch.load("checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name) + 'out_gmm.pth.tar')
gmm_out.alpha = nn.Parameter(gmm.alpha)
gmm_out.mu.requires_grad = True
gmm_out.logvar.requires_grad = True
gmm_out.alpha.requires_grad = False
loglam = 0.
model = gmmlib.DoublyRobustModel(base_model, gmm, gmm_out,
loglam, dim=3072,
classes=num_classes).cuda()
model.loglam.requires_grad = False
# get the number of model parameters
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
model = model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# define loss function (criterion) and pptimizer
lr = args.lr
lr_gmm = 1e-5
param_groups = [{'params':model.mm.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.mm_out.parameters(),'lr':lr_gmm, 'weight_decay':0.},
{'params':model.base_model.parameters(),'lr':lr, 'weight_decay':args.weight_decay}]
optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, nesterov=True)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, lr_schedule)
# train for one epoch
lam = model.loglam.data.exp().item()
train_CEDA_gmm_out(model, train_loader, out_loader, optimizer, epoch, lam=lam)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
if (epoch + 1) % args.save_epoch == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, epoch + 1)
def gen_gmm(train_loader, out_loader, data_used=50000, PCA=True, N=[100]):
print('Generate GMM...')
start = time.time()
dim = 3072
X = []
for x, f in train_loader:
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
X = X[:data_used] #needed to keep memory of distance matrix below 800 GB
if PCA:
metric = gmmlib.PCAMetric(X, p=2, min_sv_factor=1e6)
X = ( (X@metric.comp_vecs.t()) / metric.singular_values_sqrt[None,:] )
else:
metric = gmmlib.LpMetric()
for n in N:
print(n)
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'in_gmm.pth.tar'
torch.save(gmm, filename)
X = []
for idx, (x, f) in enumerate(out_loader):
if idx>400:
break;
X.append(x.view(-1,dim))
X = torch.cat(X, 0)
if PCA:
X = ( (X@metric.comp_vecs.t()) / metric.singular_values_sqrt[None,:] )
for n in N:
print(n)
# Out GMM
gmm = gmmlib.GMM(n, dim, metric=metric)
clf = mixture.GMM(n_components=n, covariance_type='spherical', params='mc')
clf.fit(X)
mu = torch.tensor(clf.means_ ,dtype=torch.float)
logvar = torch.tensor(np.log(clf.covars_[:,0]) ,dtype=torch.float)
logvar = 0.*logvar + logvar.exp().mean().log()
alpha = torch.tensor(np.log(clf.weights_) ,dtype=torch.float)
gmm = gmmlib.GMM(n, dim, mu=mu, logvar=logvar, metric=metric)
if PCA:
gmm.mu.data = ( (gmm.mu.data * metric.singular_values_sqrt[None,:] )
@ metric.comp_vecs.t().inverse() )
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'out_gmm.pth.tar'
torch.save(gmm, filename)
print('Time: ', time.time() - start)
print('Done!')
def train_CEDA_gmm_out(model, train_loader, ood_loader, optimizer, epoch, lam=1., verbose=10):
criterion = nn.NLLLoss()
model.train()
train_loss = 0
likelihood_loss = 0
correct = 0
margin = np.log(4.)
if ood_loader is not None:
ood_loader.dataset.offset = np.random.randint(len(ood_loader.dataset))
ood_loader_iter = iter(ood_loader)
p_in = torch.tensor(1. / (1. + lam), dtype=torch.float).cuda()
p_out = torch.tensor(lam, dtype=torch.float).cuda() * p_in
log_p_in = p_in.log()
log_p_out = p_out.log()
start = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.cuda(), target.cuda()
noise = next(ood_loader_iter)[0].cuda()
optimizer.zero_grad()
full_data = torch.cat([data, noise], 0)
full_out = model(full_data)
full_out = F.log_softmax(full_out, dim=1)
output = full_out[:data.shape[0]]
output_adv = full_out[data.shape[0]:]
like_in_in = torch.logsumexp(model.mm(data.view(data.shape[0], -1)), 0 )
like_out_in = torch.logsumexp(model.mm(noise.view(noise.shape[0], -1)), 0 )
like_in_out = torch.logsumexp(model.mm_out(data.view(data.shape[0], -1)), 0 )
like_out_out = torch.logsumexp(model.mm_out(noise.view(noise.shape[0], -1)), 0 )
loss1 = criterion(output, target)
loss2 = -output_adv.mean()
loss3 = - torch.logsumexp(torch.stack([log_p_in + like_in_in,
log_p_out + like_in_out], 0), 0).mean()
loss4 = - torch.logsumexp(torch.stack([log_p_in + like_out_in,
log_p_out + like_out_out], 0), 0).mean()
loss = p_in*(loss1 + loss3) + p_out*(loss2 + loss4)
loss.backward()
optimizer.step()
likelihood_loss += loss3.item()
train_loss += loss.item()
_, predicted = output.max(1)
correct += predicted.eq(target).sum().item()
threshold = model.mm.logvar.max() + margin
idx = model.mm_out.logvar<threshold
model.mm_out.logvar.data[idx] = threshold
if (batch_idx % verbose == 0) and verbose>0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
print('Time: ', time.time() - start)
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda()
target = target.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data, input.size(0))
top1.update(prec1, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg
def save_checkpoint(state, epoch):
"""Saves checkpoint to disk"""
directory = "checkpoints/{in_dataset}/{name}/".format(in_dataset=args.in_dataset, name=args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + 'checkpoint_{}.pth.tar'.format(epoch)
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr_schedule=[50, 75, 90]):
"""Sets the learning rate to the initial LR decayed by 10 after 40 and 80 epochs"""
if epoch in lr_schedule:
for group in optimizer.param_groups:
group['lr'] *= .1
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| StarcoderdataPython |
8142507 | #!/usr/bin/env python3
import numpy as np
import numpy.random as npr
from block import block
def test_np():
npr.seed(0)
nx, nineq, neq = 4, 6, 7
Q = npr.randn(nx, nx)
G = npr.randn(nineq, nx)
A = npr.randn(neq, nx)
D = np.diag(npr.rand(nineq))
K_ = np.bmat((
(Q, np.zeros((nx,nineq)), G.T, A.T),
(np.zeros((nineq,nx)), D, np.eye(nineq), np.zeros((nineq,neq))),
(G, np.eye(nineq), np.zeros((nineq,nineq+neq))),
(A, np.zeros((neq, nineq+nineq+neq)))
))
K = block((
(Q, 0, G.T, A.T),
(0, D, 'I', 0),
(G, 'I', 0, 0),
(A, 0, 0, 0)
))
assert np.allclose(K_, K)
def test_torch():
try:
import torch
except:
print('Warning: PyTorch not found. Skipping tests.')
return
torch.manual_seed(0)
nx, nineq, neq = 4, 6, 7
Q = torch.randn(nx, nx)
G = torch.randn(nineq, nx)
A = torch.randn(neq, nx)
D = torch.diag(torch.rand(nineq))
K_ = torch.cat((
torch.cat((Q, torch.zeros(nx, nineq).type_as(Q), G.t(), A.t()), 1),
torch.cat((torch.zeros(nineq, nx).type_as(Q), D, torch.eye(nineq).type_as(Q),
torch.zeros(nineq,neq).type_as(Q)), 1),
torch.cat((G, torch.eye(nineq).type_as(Q), torch.zeros(nineq,nineq+neq).type_as(Q)), 1),
torch.cat((A, torch.zeros((neq, nineq+nineq+neq))), 1)
))
K = block((
(Q, 0, G.t(), A.t()),
(0, D, 'I', 0),
(G, 'I', 0, 0),
(A, 0, 0, 0)
))
assert (K - K).norm() == 0.0
if __name__=='__main__':
test_np()
test_torch()
| StarcoderdataPython |
6434209 | <filename>tensormodel/nsfw/_nsfw.py
from typing import Optional, Tuple
import cv2
import numpy as np
import linora as la
import tensordata as td
import tensorflow as tf
from tensorflow.keras import layers
__all__ = ['NSFW_Model']
def _batch_norm(name: str) -> layers.BatchNormalization:
return layers.BatchNormalization(name=name, epsilon=1e-05)
def _conv_block(stage: int, block: int, inputs: tf.Tensor, nums_filters: Tuple[int, int, int],
kernel_size: int = 3, stride: int = 2) -> tf.Tensor:
num_filters_1, num_filters_2, num_filters_3 = nums_filters
conv_name_base = f"conv_stage{stage}_block{block}_branch"
bn_name_base = f"bn_stage{stage}_block{block}_branch"
shortcut_name_post = f"_stage{stage}_block{block}_proj_shortcut"
shortcut = layers.Conv2D(name=f"conv{shortcut_name_post}", filters=num_filters_3, kernel_size=1, strides=stride, padding="same")(inputs)
shortcut = _batch_norm(f"bn{shortcut_name_post}")(shortcut)
x = layers.Conv2D(name=f"{conv_name_base}2a", filters=num_filters_1, kernel_size=1, strides=stride, padding="same")(inputs)
x = _batch_norm(f"{bn_name_base}2a")(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(name=f"{conv_name_base}2b", filters=num_filters_2, kernel_size=kernel_size, strides=1, padding="same")(x)
x = _batch_norm(f"{bn_name_base}2b")(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(name=f"{conv_name_base}2c", filters=num_filters_3, kernel_size=1, strides=1, padding="same")(x)
x = _batch_norm(f"{bn_name_base}2c")(x)
x = layers.Add()([x, shortcut])
return layers.Activation("relu")(x)
def _identity_block(stage: int, block: int, inputs: tf.Tensor,
nums_filters: Tuple[int, int, int], kernel_size: int) -> tf.Tensor:
num_filters_1, num_filters_2, num_filters_3 = nums_filters
conv_name_base = f"conv_stage{stage}_block{block}_branch"
bn_name_base = f"bn_stage{stage}_block{block}_branch"
x = layers.Conv2D(name=f"{conv_name_base}2a", filters=num_filters_1, kernel_size=1, strides=1, padding="same")(inputs)
x = _batch_norm(f"{bn_name_base}2a")(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(name=f"{conv_name_base}2b", filters=num_filters_2, kernel_size=kernel_size, strides=1, padding="same")(x)
x = _batch_norm(f"{bn_name_base}2b")(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(name=f"{conv_name_base}2c", filters=num_filters_3, kernel_size=1, strides=1, padding="same")(x)
x = _batch_norm(f"{bn_name_base}2c")(x)
x = layers.Add()([x, inputs])
return layers.Activation("relu")(x)
def make_open_nsfw_model(input_shape=(224, 224, 3)):
image_input = layers.Input(shape=input_shape, name="input")
x = image_input
x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], "CONSTANT")
x = layers.Conv2D(name="conv_1", filters=64, kernel_size=7, strides=2, padding="valid")(x)
x = _batch_norm("bn_1")(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(pool_size=3, strides=2, padding="same")(x)
x = _conv_block(stage=0, block=0, inputs=x, nums_filters=(32, 32, 128), kernel_size=3, stride=1)
x = _identity_block(stage=0, block=1, inputs=x, nums_filters=(32, 32, 128), kernel_size=3)
x = _identity_block(stage=0, block=2, inputs=x, nums_filters=(32, 32, 128), kernel_size=3)
x = _conv_block(stage=1, block=0, inputs=x, nums_filters=(64, 64, 256), kernel_size=3, stride=2)
x = _identity_block(stage=1, block=1, inputs=x, nums_filters=(64, 64, 256), kernel_size=3)
x = _identity_block(stage=1, block=2, inputs=x, nums_filters=(64, 64, 256), kernel_size=3)
x = _identity_block(stage=1, block=3, inputs=x, nums_filters=(64, 64, 256), kernel_size=3)
x = _conv_block(stage=2, block=0, inputs=x, nums_filters=(128, 128, 512), kernel_size=3, stride=2)
x = _identity_block(stage=2, block=1, inputs=x, nums_filters=(128, 128, 512), kernel_size=3)
x = _identity_block(stage=2, block=2, inputs=x, nums_filters=(128, 128, 512), kernel_size=3)
x = _identity_block(stage=2, block=3, inputs=x, nums_filters=(128, 128, 512), kernel_size=3)
x = _identity_block(stage=2, block=4, inputs=x, nums_filters=(128, 128, 512), kernel_size=3)
x = _identity_block(stage=2, block=5, inputs=x, nums_filters=(128, 128, 512), kernel_size=3)
x = _conv_block(stage=3, block=0, inputs=x, nums_filters=(256, 256, 1024), kernel_size=3, stride=2)
x = _identity_block(stage=3, block=1, inputs=x, nums_filters=(256, 256, 1024), kernel_size=3)
x = _identity_block(stage=3, block=2, inputs=x, nums_filters=(256, 256, 1024), kernel_size=3)
x = layers.AveragePooling2D(pool_size=7, strides=1, padding="valid", name="pool")(x)
x = layers.Flatten()(x)
logits = layers.Dense(name="fc_nsfw", units=2)(x)
output = layers.Activation("softmax", name="predictions")(logits)
model = tf.keras.Model(image_input, output)
return model
def preprocess_image(image):
if isinstance(image, str):
image = la.image.read_image(image)
image = la.image.color_convert(image, la.image.ColorMode.RGB)
image = la.image.resize(image, (224, 224), la.image.ResizeMethod.BILINEAR)
image = la.image.image_to_array(image)
image = image[:, :, ::-1]-np.array([104., 117., 123.], dtype=np.float32)
return image
class NSFW_Model():
def __init__(self, weights_path, gpu_strategy=None):
if not td.gfile.exists(weights_path):
url = 'https://github.com/bhky/opennsfw2/releases/download/v0.1.0/open_nsfw_weights.h5'
td.utils.request.files(url, weights_path)
if gpu_strategy is not None:
gpu_strategy()
self.model = make_open_nsfw_model()
self.model.load_weights(weights_path)
def predict_images(self, image_file, batch_size=None):
if isinstance(image_file, str):
image_file = [image_file]
images = np.array([preprocess_image(image_path) for image_path in image_file])
predictions = self.model.predict(images, batch_size=len(images) if batch_size is None else batch_size)
return [round(i, 3) for i in predictions[:,1].tolist()]
def predict_video(self, video_path, frame_interval=8, prob_threshold=0.8, output_video_path=None):
"""Make prediction for each video frame."""
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
video_writer = None
nsfw_prob = 0.0
nsfw_probs = []
frame_count = 0
while cap.isOpened():
ret, bgr_frame = cap.read()
if not ret:
break
frame_count += 1
frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
if video_writer is None and output_video_path is not None:
video_writer = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc("M", "J", "P", "G"),
fps, (frame.shape[1], frame.shape[0]))
if frame_count == 1 or (frame_count + 1) % frame_interval == 0:
pil_frame = la.image.array_to_image(frame)
input_frame = preprocess_image(pil_frame)
nsfw_prob = self.model.predict(np.expand_dims(input_frame, axis=0), 1).round(3)[0][1]
nsfw_probs.extend([nsfw_prob]*frame_interval)
if video_writer is not None:
result_text = f"NSFW probability: {nsfw_prob:.3f}"
colour = (255, 0, 0) if nsfw_prob >= prob_threshold else (0, 0, 255)
cv2.putText(frame, result_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, colour, 2, cv2.LINE_AA)
video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
if video_writer is not None:
video_writer.release()
cap.release()
cv2.destroyAllWindows()
elapsed_seconds = (np.arange(1, len(nsfw_probs) + 1) / fps).round(2).tolist()
return {'frame_time':elapsed_seconds, 'frame_prob':nsfw_probs}
| StarcoderdataPython |
8069222 | <filename>PCI/decision_tree_sklearn.py
# coding:utf-8
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn import preprocessing
from sklearn import tree
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['slashdot', 'UK', 'no', 21, 'None'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
df = pd.DataFrame(my_data)
df.columns = ["refer", "location", "FAQ", "pv", "buy"]
print(df)
# print (df.ix[[0]])
# print("++++++++++++++")
# print (type(df[0]))
# print (df[0])
# print("++++++++++++++")
# print (type(df[[0]]))
# print (df[[0]])
# print("=============")
# print(type(df.ix[[0, 2, 4, 5, 7]]))
# print(df.ix[[0, 2, 4, 5, 7],[0, 1]])
# print(df.ix[:,[1,2]])
clf = DecisionTreeClassifier()
X = df[['refer', 'location', 'FAQ', 'pv']]
Y = df[['buy']]
# print(Y)
le_buy = preprocessing.LabelEncoder()
le_buy = le_buy.fit(Y['buy'])
# print(le_buy.classes_)
# Y['buy'] = le_buy.transform(Y['buy'])
Y.loc[:, 'buy'] = le_buy.transform(Y['buy'])
le_refer = preprocessing.LabelEncoder()
le_location = preprocessing.LabelEncoder()
le_FAQ = preprocessing.LabelEncoder()
le_refer = le_refer.fit(X['refer'])
le_location = le_location.fit(X['location'])
le_FAQ = le_FAQ.fit(X['FAQ'])
X.loc[:, "refer"] = le_refer.transform(X['refer'])
X.loc[:, "location"] = le_location.transform(X['location'])
X.loc[:, "FAQ"] = le_FAQ.transform(X['FAQ'])
clf = clf.fit(X, Y)
predictions = clf.predict(X)
# print (clf.predict([[4,3,1,18]]))
print(predictions, le_buy.inverse_transform(predictions))
from sklearn.metrics import make_scorer, accuracy_score
print(accuracy_score(Y['buy'], predictions))
# extract the decision rule
"""
https://stackoverflow.com/questions/20224526/how-to-extract-the-decision-rules-from-scikit-learn-decision-tree
"""
from sklearn.tree import _tree
def tree_to_code(tree, feature_names):
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
print("def tree({}):".format(", ".join(feature_names)))
def recurse(node, depth):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = tree_.threshold[node]
print("{}if {} <= {}:".format(indent, name, threshold))
recurse(tree_.children_left[node], depth + 1)
print("{}else: # if {} > {}".format(indent, name, threshold))
recurse(tree_.children_right[node], depth + 1)
else:
print("{}return {}".format(indent, tree_.value[node]))
recurse(0, 1)
# tree_to_code(clf, ["refer", "location", "FAQ"])
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
out = StringIO()
out = tree.export_graphviz(clf, out_file=out)
# print(out.getvalue())
#
print (tree.export_graphviz(clf, out_file=out)) | StarcoderdataPython |
97286 | <filename>file_read2.py
#try out file I/O
myfile=open("example.txt", "a+")
secondfile=open("python.txt", "r")
for _ in range(4):
print(secondfile.read())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.