index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,000 | e5700bccf6f4a316fa61c3234f1324a5abbe07a9 | import pandas as pd
import numpy as np
from scipy.stats.mstats import gmean
def softmax(X, theta=1.0, axis=None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis=axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
if __name__ == '__main__':
preds = []
for model_name in ["densenet121", "inception_v3", "resnet50"]:
for fold in range(5):
for checkpoint in range(5):
pred = np.load(f"/media/ngxbac/DATA/logs_datahack/intel-scene/{model_name}_{fold}/predict_swa_2/predictions.infer_0.logits.{checkpoint}.npy")
pred = softmax(pred, axis=1)
preds.append(pred)
print(len(preds))
preds = np.asarray(preds)
preds = np.mean(preds, axis=1)
print(preds.shape)
preds = np.argmax(preds, axis=1)
submission = pd.read_csv("./data/test.csv")
submission['label'] = preds
submission.to_csv(f"kfold_5swa_blend.csv", index=False) |
989,001 | d5e89ee77e3a9291b3e4937254b1525f605c3813 | from django.conf.urls import url,include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^',include('home.urls',namespace='home',app_name='home')),
url(r'^admin/', admin.site.urls),
url(r'^blog/',include('blog.urls',namespace='blog',app_name='blog')),
url(r'^podcast/',include('podcast.urls',namespace='podcast',app_name='podcast')),
url(r'^gallery/',include('gallery.urls',namespace='gallery')),
url(r'^gallery/',include('photologue.urls', namespace='photologue'))
]
#iistore nya ang mga media habang hindi pa nakaup yung site
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,document_root= settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
989,002 | 77df14d4737c9fb208d00da2655a35f8fb768239 | # Generated by Django 2.1.2 on 2018-11-20 16:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugtracker', '0005_auto_20181113_1253'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='bugs',
field=models.ManyToManyField(related_name='tickets', to='bugtracker.Bug'),
),
]
|
989,003 | 21914319cc46825445a0936fa537d9e0283303cd | #!/usr/bin/env python
# -*- coding: utf-8; mode: Python; py-indent-offset: 4 -*-
import os
import sys
include = ["base_project"]
exclude = []
incstr = " ".join(include)
excstr = ",".join(exclude)
print("Checking PEP8 ...")
if os.system("pycodestyle --show-source --show-pep8 --max-line-length=79 --filename=*py " +
"--exclude=" + excstr + " " + incstr) != 0:
sys.exit(1)
print("Running lint ...")
if os.system("pylint --rcfile=pylintrc --ignore=" +
excstr + " " + incstr) != 0:
sys.exit(1)
sys.exit(0)
|
989,004 | 38e88485c32b5690d46c593cd36865f5e48afcbc | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
#pins = [11,12,13,15,16,18,22,7]
pins = [7,22,18,16,15,13,12,11]
dats = {"0":0x3f,
"1":0x06,
"2":0x5b,
"3":0x4f,
"4":0x66,
"5":0x6d,
"6":0x7d,
"7":0x07,
"8":0x7f,
"9":0x6f,
"A":0x77,
"b":0x7c,
"C":0x39,
"d":0x5e,
"E":0x79,
"F":0x71,
".":0x80}
for key in dats:
print key, "corresponds to", bin(dats[key])
def setup():
GPIO.setmode(GPIO.BOARD)
for pin in pins:
GPIO.setup(pin, GPIO.OUT) # Set pin mode as output
GPIO.output(pin, GPIO.LOW)
def writeOneByte(val, pins):
bin_val =str(bin(val))[2:].zfill(8)
print (bin_val)
for pin in range(0,8):
print (pin, int(bin_val[pin]))
GPIO.output (pins[pin], int(bin_val[pin]))
# GPIO.output(11, val & (0x01 << 0))
# GPIO.output(12, val & (0x01 << 1))
# GPIO.output(13, val & (0x01 << 2))
# GPIO.output(15, val & (0x01 << 3))
# GPIO.output(16, val & (0x01 << 4))
# GPIO.output(18, val & (0x01 << 5))
# GPIO.output(22, val & (0x01 << 6))
# GPIO.output(7, val & (0x01 << 7))
def loop():
while True:
for val in dats:
writeOneByte(dats[val], pins=pins)
time.sleep(0.5)
def destroy():
for pin in pins:
GPIO.output(pin, GPIO.LOW)
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
|
989,005 | bc45fa681424e629000bb03f534375fe3ae9692a | import struct
from Utils.output_code import OutputType
from ICMP.ICMP_packet import ICMP
class ICMPHandler:
def __init__(self, sequence, data):
self._pack_header = self._unpack_packet_header(data[20:28])
self._type = self._pack_header[0]
self._sequence = sequence
self._data = data
self._output_code = OutputType.ERROR.value
self._delegator = {0: self._echo_request,
3: self._third_type,
8: self._echo_request,
11: self._eleven_type}
self._handle()
def get_output_code(self):
return self._output_code
def _handle(self):
return self._delegator[self._type]()
def _echo_request(self):
if self._pack_header[3] == ICMP.ID and \
self._pack_header[4] in self._sequence:
self._output_code = OutputType.SUCCESS.value
def _third_type(self):
code = self._pack_header[1]
if code == 0:
self._output_code = OutputType.NET.value
elif code == 1:
self._output_code = OutputType.HOST.value
elif code in (9, 10, 13):
self._output_code = OutputType.PROHIB.value
else:
self._output_code = f'!{code}'
def _eleven_type(self):
inner_header = self._unpack_packet_header(self._data[48:56])
if (inner_header[0] == 8 and
inner_header[3] == ICMP.ID and
inner_header[4] in self._sequence):
self._output_code = OutputType.SUCCESS.value
@staticmethod
def _unpack_packet_header(data):
return struct.unpack('!BBHHH', data)
|
989,006 | 124494a36cab9853f2ac0c332f8bb9ae4a2a5d02 | """
Given a path prefix PREFIX of directories containing files
config.json
dev/{summary.json,average_activations.npy,average_norms.npy}
train/summary.json,average_activations.npy,average_norms.npy}
In the directory associated with the prefix, outputs the following files
dev_loss.pdf
train_loss.pdf
dev_train_gap.pdf
Plots for all available attention types available in the prefix (e.g.,
soft, topk, topk-50) at all available k values. The plot is a line plot
of the corresponding loss for the different attention type at different
k values (soft is assumed to have k=0).
Similarly, plots the activation and norms distributions for the marginal
activation block aggregates into
train_act.pdf
train_nrm.pdf
dev_act.pdf
dev_nrm.pdf
All results should be from the same task for viz to make sense.
"""
from absl import app, flags
from ..motivation.bert_agg import main
from ..params import GLUE_TASK_NAMES
from .. import log
flags.DEFINE_string("prefix", None, "prefix directory")
flags.DEFINE_string(
"cache", None, "cache directory (autogenerated based on prefix)"
)
flags.DEFINE_bool("overwrite", False, "overwrite previous directory files")
flags.DEFINE_enum("task", None, GLUE_TASK_NAMES, "BERT fine-tuning task")
def _main(_argv):
log.init()
main(flags.FLAGS.prefix, flags.FLAGS.cache, flags.FLAGS.overwrite, flags.FLAGS.task)
if __name__ == "__main__":
flags.mark_flag_as_required("task")
flags.mark_flag_as_required("prefix")
app.run(_main)
|
989,007 | 0d489c71aa5f829725094efe1262327025e1b910 | from flask import render_template, redirect, url_for, flash, request,session
from models.department_model import DepartmentModel
class Department:
@staticmethod
def create_department():
""""create"""
if session:
username = session['username']
if request.method == 'POST':
department_name = request.form['department_name']
description = request.form['description']
record = DepartmentModel(title=department_name, description=description)
record.create()
flash('new department successfully created', 'success')
return redirect(url_for('add_department'))
return render_template('add-department.html')
else:
return redirect(url_for('login'))
@staticmethod
def view_departments():
if session:
username = session['username']
departments = DepartmentModel.fetch_all()
return render_template('view-departments.html', departments=departments, username=username)
else:
return redirect(url_for('login'))
@staticmethod
def update_department(did:int):
if request.method == 'POST':
department_name = request.form['department_name_edit']
description = request.form['description_edit']
if DepartmentModel.update(id=did, title=department_name,desc=description):
flash('record successfully updated', 'success')
return redirect(url_for('view_departments'))
else:
flash('unable to update record', 'danger')
return redirect(url_for('view_departments'))
@staticmethod
def delete_department(did:int):
if session:
if request.method == 'POST':
dept = DepartmentModel.query.filter_by(id=did).first()
if len(dept.employees) > 0 :
flash('This departments has employees! You cannot delete it', 'danger')
return redirect(url_for('view_departments'))
else:
if DepartmentModel.delete(id=did):
flash('Departments has successfully been delete', 'success')
return redirect(url_for('view_departments'))
else:
flash('Error', 'danger')
return redirect(url_for('view_departments'))
return redirect(url_for('login'))
@staticmethod
def get_department_employees(id:int):
if session:
username = session['username']
dept = DepartmentModel.query.filter_by(id=id).first()
return render_template('view-department-empl.html', department=dept, employees=dept.employees)
else:
return redirect(url_for('login'))
|
989,008 | 5f586c98421d20214ee030a72e7b8efb36e871c3 | ANYONE = None
NOONE = -1
BLACK = 0
WHITE = 1
TURN_CHNL = 2
INVD_CHNL = 3
PASS_CHNL = 4
DONE_CHNL = 5
NUM_CHNLS = 6
class Group:
def __init__(self):
self.locations = set()
self.liberties = set()
def copy(self):
groupcopy = Group()
groupcopy.locations = self.locations.copy()
groupcopy.liberties = self.liberties.copy()
return groupcopy
def __str__(self):
return f'{self.locations}LOC {self.liberties}LIB'
def __repr__(self):
return self.__str__()
|
989,009 | 0464bc549938722385c4e7cca1df6db4bec0b412 | import logging
import numpy as np
from scipy.linalg import pinv, solve
from scipy.spatial import cKDTree
from uncoverml import mpiops
log = logging.getLogger(__name__)
def impute_with_mean(x, mean):
# No missing data
if np.ma.count_masked(x) == 0:
return x
for i, m in enumerate(mean):
x.data[:, i][x.mask[:, i]] = m
x = np.ma.MaskedArray(data=x.data, mask=False)
return x
class MeanImputer:
"""
Simple mean imputation.
Replaces the missing values in x, with the mean of x.
"""
def __init__(self):
self.mean = None
def __call__(self, x):
if self.mean is None:
self.mean = mpiops.mean(x)
x = impute_with_mean(x, self.mean)
return x
class GaussImputer:
"""
Gaussian Imputer.
This imputer fits a Gaussian to the data, then conditions on this Gaussian
to interpolate missing data. This is effectively the same as using a linear
regressor to impute the missing data, given all of the non-missing
dimensions.
Have a look at:
https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Conditional_distributions
We use the precision (inverse covariance) form of the Gaussian for
computational efficiency.
"""
def __init__(self):
self.mean = None
self.prec = None
def __call__(self, x):
if self.mean is None or self.prec is None:
self._make_impute_stats(x)
for i in range(len(x)):
x.data[i] = self._gaus_condition(x[i])
return np.ma.MaskedArray(data=x.data, mask=False)
def _make_impute_stats(self, x):
self.mean = mpiops.mean(x)
cov = mpiops.covariance(x)
self.prec, rank = pinv(cov, return_rank=True) # stable pseudo inverse
# if rank < len(self.mean):
# raise RuntimeError("This imputation method does not work on low "
# "rank problems!")
def _gaus_condition(self, xi):
if np.ma.count_masked(xi) == 0:
return xi
a = xi.mask
b = ~xi.mask
xb = xi[b].data
Laa = self.prec[np.ix_(a, a)]
Lab = self.prec[np.ix_(a, b)]
xfill = np.empty_like(xi)
xfill[b] = xb
xfill[a] = self.mean[a] - solve(Laa, Lab.dot(xb - self.mean[b]))
return xfill
class NearestNeighboursImputer:
"""
Nearest neighbour imputation.
This builds up a KD tree using random points (without missing data), then
fills in the missing data in query points with values from thier average
nearest neighbours.
Parameters
----------
nodes: int, optional
maximum number of points to use as nearest neightbours.
k: int, optional
number of neighbours to average for missing values.
"""
def __init__(self, nodes=500, k=3):
self.k = k
self.nodes = nodes
self.kdtree = None
def __call__(self, x):
# impute with neighbours
missing_ind = np.ma.count_masked(x, axis=1) > 0
if self.kdtree is None:
self._make_kdtree(x)
if missing_ind.sum() > 0:
missing_mask = x.mask[missing_ind]
nn = self._av_neigbours(x[missing_ind])
x.data[x.mask] = nn[missing_mask]
return np.ma.MaskedArray(data=x.data, mask=False)
def _make_kdtree(self, x):
self.kdtree = cKDTree(mpiops.random_full_points(x, Napprox=self.nodes))
if not np.isfinite(self.kdtree.query(x, k=self.k)[0]).all():
log.warning('Kdtree computation encountered problem. '
'Not enough neighbors available to compute '
'kdtree. Printing kdtree for debugging purpose')
raise ValueError('Computed kdtree is not fully populated.'
'Not enough valid neighbours available.')
def _get_neighbour(self, xq):
_, neighbourind = self.kdtree.query(xq)
return self.kdtree.data[neighbourind]
def _av_neigbours(self, xq):
xnn = [self.kdtree.data[self.kdtree.query(x, k=self.k)[1]].mean(axis=0)
for x in xq]
return np.vstack(xnn)
|
989,010 | f0d252e082c88dd6d9446bcfa0b4050748f798b5 | import osmium
import shapely.wkb as wkblib
import numpy as np
import pandas as pd
import geopandas as gpd
from rtree import index
from shapely.geometry import Point, Polygon
# TODO: class to extract ANY desired information from OSM, not only ways. How to do it?
class OsmRouteAnnotator(osmium.SimpleHandler):
def __init__(self, pbf_path):
osmium.SimpleHandler.__init__(self)
self.wkbfab = osmium.geom.WKBFactory()
self.df = []
self.road_types = ['motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'road', 'residential', 'service',
'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link']
print(f'loading {pbf_path}...')
self.apply_file(pbf_path, locations=True)
cols = ['way_id', 'nodes', 'line', 'line_length', 'name', 'maxspeed']
self.df = pd.DataFrame(self.df, columns=cols).set_index('way_id')
not_numeric_flag = ~self.df['maxspeed'].astype(str).str.isnumeric()
self.df.loc[not_numeric_flag, 'maxspeed'] = '0'
self.df['maxspeed'] = self.df['maxspeed'].astype(int)
print('creating spatial index...')
# Populate R-tree index with bounds of grid cells
self.r_tree = index.Index()
pols = []
for way_id, row in self.df.iterrows():
p = Polygon(row['line'].buffer(.00005).exterior.coords)
p.maxspeed = row['maxspeed']
p.way_id = way_id
p.name = row['name']
pols.append(p)
self.r_tree.insert(way_id, p.bounds)
self.df = gpd.GeoDataFrame(self.df, geometry=pols)
print(f'finished')
# TODO: Eliminate redundant ways
# If street name has only 1 speed, keep just 1. Maybe the longest linestring?
def process_way(self, elem):
# elem.nodes return a node list:
# https://docs.osmcode.org/pyosmium/latest/ref_osm.html?highlight=noderef#osmium.osm.NodeRef
# TagList can't be converted to dict automatically, see:
# https://github.com/osmcode/pyosmium/issues/106
keys = {tag.k: tag.v for tag in elem.tags}
# filter all types of car driving highways: https://wiki.openstreetmap.org/wiki/Key:highway?uselang=en-GBs
if (('highway' in keys.keys())):
if (keys['highway'] in self.road_types):
nodes = [n.ref for n in elem.nodes]
wkb = self.wkbfab.create_linestring(elem)
line = wkblib.loads(wkb, hex=True)
names = [el.v for el in elem.tags if el.k == 'name']
maxspeeds = [el.v for el in elem.tags if el.k == 'maxspeed']
self.df.append([elem.id,
nodes,
line,
line.length,
names[0] if len(names) > 0 else '',
maxspeeds[0] if len(maxspeeds) > 0 else np.nan])
def way(self, elem):
self.process_way(elem)
def get_street_max_speed(self, segment):
# rank 7, segment LINESTRING (13.28866358846426 52.45759948794097, 13.28908503055573 52.45704031539945)
# fails because of lack of precision, check out here http://arthur-e.github.io/Wicket/sandbox-gmaps3.html
# Need mapmatch
# Filter possible candidates using R-Tree
idxs = list(self.r_tree.intersection(segment.bounds))
if (len(idxs) > 0):
# Now do actual intersection
filter1 = self.df.loc[idxs].contains(segment)
way_id = self.df.loc[filter1[filter1 == True].index]
if (len(way_id) > 0):
way_id = way_id['line_length'].idxmin()
return self.df.loc[way_id]['maxspeed']
else:
first_point = Point(segment.xy[0][0], segment.xy[1][0])
idxs = list(self.r_tree.intersection(first_point.bounds))
if (len(idxs) > 0):
filter1 = self.df.loc[idxs].contains(first_point)
if (np.sum(filter1) > 0):
way_id = self.df.loc[filter1[filter1 == True].index]['line_length'].idxmin()
return self.df.loc[way_id]['maxspeed']
second_point = Point(segment.xy[0][1], segment.xy[1][1])
idxs = list(self.r_tree.intersection(second_point.bounds))
if (len(idxs) > 0):
filter1 = self.df.loc[idxs].contains(second_point)
if (np.sum(filter1) > 0):
way_id = self.df.loc[filter1[filter1 == True].index]['line_length'].idxmin()
return self.df.loc[way_id]['maxspeed']
raise Exception(
f'Error mapping segment {segment} to street. Please check which segment caused it and evaluate usage of Map Matching')
|
989,011 | 4653a0fa91e2e81cfd37d7d75a9cc233a18e1bb5 | """Seamless high-level API.
Has a two-fold function:
1. Maintain a workflow graph containing nodes (cells, transformers etc.),
checksums, and connections. This workflow graph is pure data that can be
serialized any time to JSON (.seamless file).
2. Maintain a translation of the workflow graph to a low-level representation
that is constantly being evaluated. Interrogate the low-level representation
(asking for its status, checksums, etc.).
"""
import inspect
from types import LambdaType
from ast import PyCF_ONLY_AST, FunctionDef, Expr, Lambda
import textwrap
from silk.mixed import MixedBase
from silk import Silk
from silk.validation import _allowed_types
from ..core.lambdacode import lambdacode
from ..core.cached_compile import cached_compile
ConstantTypes = _allowed_types + (Silk, MixedBase, tuple)
import inspect
import os
def set_resource(f):
caller_frame = inspect.currentframe().f_back
filename = os.path.realpath(inspect.getfile(caller_frame))
currdir = os.path.realpath(os.getcwd())
if filename.startswith(currdir):
filename = os.path.relpath(filename, currdir)
dirname = os.path.dirname(filename)
ff = os.path.join(dirname, f)
if inspect.getmodule(caller_frame).__name__ == "__main__":
return Resource(ff)
else:
data = open(ff).read()
return data
def parse_function_code(code_or_func, identifier="<None>"):
from ..util import strip_decorators
if callable(code_or_func):
func = code_or_func
code = inspect.getsource(func)
if code is not None:
code = textwrap.dedent(code)
code = strip_decorators(code)
if isinstance(func, LambdaType) and func.__name__ == "<lambda>":
code = lambdacode(func)
if code is None:
raise ValueError("Cannot extract source code from this lambda")
else:
assert isinstance(code_or_func, str)
code = code_or_func
ast = cached_compile(code, identifier, "exec", PyCF_ONLY_AST)
is_function = (len(ast.body) == 1 and
isinstance(ast.body[0], FunctionDef))
if is_function:
func_name = ast.body[0].name
code_object = cached_compile(code, identifier, "exec")
else:
assert (len(ast.body) == 1 and isinstance(ast.body[0], Expr))
assert isinstance(ast.body[0].value, Lambda)
func_name = "<lambda>"
code_object = cached_compile(code, identifier, "eval")
return code, func_name, code_object
from .Context import Context
from .Transformer import Transformer
from .Macro import Macro
from .Cell import Cell, SimpleDeepCell, FolderCell
from .DeepCell import DeepCell, DeepFolderCell
from .Module import Module
from .Link import Link
from .Resource import Resource
from ..midlevel.StaticContext import StaticContext
from .copy import copy
def load_graph(graph, *, zip=None, cache_ctx=None, static=False, mounts=True, shares=True):
"""Load a Context from graph.
"graph" can be a file name or a JSON dict
Normally, it has been generated with Context.save_graph / Context.get_graph
"zip" can be a file name, zip-compressed bytes or a Python ZipFile object.
Normally, it has been generated with Context.save_zip / Context.get_zip
"cache_ctx": re-use a previous context for caching (e.g. checksum-to-buffer caching)
"static": create a StaticContext instead
"mounts": mount cells and pins to the file system, as specified in the graph.
"shares": share cells over HTTP, as specified in the graph
"""
import json
from ..core.context import Context as CoreContext
from ..core.manager import Manager
from ..core.unbound_context import UnboundManager
if isinstance(graph, str):
graph = json.load(open(graph))
if isinstance(cache_ctx, Context):
manager = cache_ctx._ctx0._get_manager()
elif isinstance(cache_ctx, CoreContext):
manager = cache_ctx._get_manager()
elif isinstance(cache_ctx, (Manager, UnboundManager)):
manager = cache_ctx
elif cache_ctx is None:
manager = None
else:
raise TypeError(cache_ctx)
if isinstance(manager, UnboundManager):
manager = manager._ctx._bound._get_manager()
assert isinstance(manager, Manager)
if static:
return StaticContext.from_graph(graph, manager=manager)
else:
return Context.from_graph(
graph, manager=manager,
mounts=mounts, shares=shares,
zip=zip
)
from .SubContext import SubContext
nodeclasses = {
"cell": Cell,
"transformer": Transformer,
"context": SubContext,
"macro": Macro,
"module": Module,
"foldercell": FolderCell,
"deepcell": DeepCell,
"deepfoldercell": DeepFolderCell,
}
__all__ = [
"Context", "Transformer", "Macro",
"Cell", "SimpleDeepCell", "FolderCell", "DeepCell", "DeepFolderCell",
"Link", "StaticContext", "Module",
"Resource", "load_graph", "copy"
]
def __dir__():
return sorted(__all__)
|
989,012 | b6d11baa39e79289692a9d73f11046d1f2c16dd1 | from dazer_methods import Dazer
from pandas import read_csv
from uncertainties import ufloat
from numpy import array, median, searchsorted, max, where, ones, mean
from astropy.io import fits
from DZ_observation_reduction import spectra_reduction
def Emission_Threshold(LineLoc, TotalWavelen, TotalInten, BoxSize = 70):
#Use this method to determine the box and location of the emission lines
Bot = LineLoc - BoxSize
Top = LineLoc + BoxSize
indmin, indmax = searchsorted(TotalWavelen, (Bot, Top))
if indmax > (len(TotalWavelen)-1):
indmax = len(TotalWavelen)-1
PartialWavelength = TotalWavelen[indmin:indmax]
PartialIntensity = TotalInten[indmin:indmax]
Bot = LineLoc - 2
Top = LineLoc + 2
indmin, indmax = searchsorted(PartialWavelength, (Bot, Top))
LineHeight = max(PartialIntensity[indmin:indmax])
LineExpLoc = median(PartialWavelength[where(PartialIntensity == LineHeight)])
return PartialWavelength, PartialIntensity, LineHeight, LineExpLoc
def region_indeces(wave_min, wave_max, wavenlength_range):
low_trim, up_trim = searchsorted(wavenlength_range, [wave_min, wave_max])
indeces_array = array(range(low_trim, up_trim))
return indeces_array
dz = Dazer()
dz_reduc = spectra_reduction()
script_code = dz.get_script_code()
lickIndcs_extension = '_lick_indeces.txt'
#Load catalogue dataframe
catalogue_dict = dz.import_catalogue()
catalogue_df = dz.load_excel_DF('/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx')
SIII_theo = 2.469
H7_H8_ratio_theo = 1.98
#Set figure format
size_dict = {'figure.figsize': (16, 10), 'axes.labelsize':12, 'legend.fontsize':12}
dz.FigConf(plotStyle='seaborn-colorblind', plotSize = size_dict, Figtype = 'Grid_size', n_columns = 1, n_rows = 2)
#dz.FigConf(plotStyle='seaborn-colorblind', Figtype = 'Grid_size', n_columns = 1, n_rows = 2)
#Sulfur lines to plot
lines_interest = ['S3_9069A','S3_9531A', 'H1_9015A', 'H1_9229A', 'H1_9546A']
for i in range(len(catalogue_df.index)):
print '\n-- Treating {} @ {}'.format(catalogue_df.iloc[i].name, catalogue_df.iloc[i].Red_file)
codeName = catalogue_df.iloc[i].name
fits_file = catalogue_df.iloc[i].Red_file
ouput_folder = '{}{}/'.format(catalogue_dict['Obj_Folder'], codeName)
#Get object
objName = codeName
redshift_factor = 1 + catalogue_df.iloc[i].z_Red
#Spectrum data
wave_obs, flux_obs, header_0_obs = dz.get_spectra_data(fits_file)
lick_idcs_df = read_csv(ouput_folder + codeName + lickIndcs_extension, delim_whitespace = True, header = 0, index_col = 0, comment='L') #Dirty trick to avoid the Line_label row
wave_join, wave_max = catalogue_df.loc[codeName].join_wavelength, catalogue_df.loc[codeName].Wmax_Red
idx_obj_join, idx_obj_max_Red = searchsorted(wave_obs, [wave_join, wave_max])
len_red_region = idx_obj_max_Red - idx_obj_join
#Load reduction dataframe
reduction_folder = catalogue_df.loc[codeName].obsfolder
dz_reduc.declare_catalogue(reduction_folder, verbose=False)
#Load telluric star files
idcs_stars = (dz_reduc.reducDf.reduc_tag == 'norm_narrow')
Files_Folders = dz_reduc.reducDf.loc[idcs_stars, 'file_location'].values
Files_Names = dz_reduc.reducDf.loc[idcs_stars, 'file_name'].values
objects = dz_reduc.reducDf.loc[idcs_stars, 'frame_tag'].values
#Declare star for telluric correction
favoured_star = catalogue_df.iloc[i].telluric_star
#Case we can (and we want) to perform the telluric correction:
if (len(objects) > 0) and (favoured_star != 'None'):
star_dict = {}
for i in range(len(objects)):
wave_star, flux_star, header_0_star = dz.get_spectra_data(Files_Folders[i] + Files_Names[i])
idx_print_low, idx_print_high = searchsorted(wave_star,[9000, 9650])
idx_join_region = searchsorted(wave_star,[wave_join])
if len(flux_star) == 2:
flux_star = flux_star[0][0]
star_dict[objects[i]+'_wave'], star_dict[objects[i]+'_flux'] = wave_star, flux_star
star_dict[objects[i]+'_idx_join'] = idx_join_region
dz.data_plot(wave_star, flux_star, label=objects[i], graph_axis=dz.ax2)
obj_red_region = array(range(idx_obj_join,idx_obj_join + len_red_region))
mean_flux = mean(flux_obs)
#Loop through the diagnostic lines
obj_dict = {}
for line in lines_interest:
if line in lick_idcs_df.index:
dz.Current_Label = lick_idcs_df.loc[line].name
dz.Current_Ion = lick_idcs_df.loc[line].Ion
dz.Current_TheoLoc = redshift_factor * lick_idcs_df.loc[line].lambda_theo
selections = redshift_factor * lick_idcs_df.loc[line][3:9].values
#Measure the line intensity
line_fit_orig = dz.measure_line(wave_obs, flux_obs, selections, None, 'lmfit', store_data = False)
#Area to plot
subwave, subflux, lineHeight, LineExpLoc = Emission_Threshold(dz.Current_TheoLoc, wave_obs, flux_obs)
obj_dict[line + '_x_reduc'] = line_fit_orig['x_resample']
obj_dict[line + '_y_reduc'] = line_fit_orig['y_resample']
obj_dict[line + '_flux_reduc'] = line_fit_orig['flux_intg']
obj_dict[line + '_fluxEr_reduc'] = line_fit_orig['flux_intg_er']
obj_dict[line + '_Peak'] = line_fit_orig['A0']
obj_dict[line + '_continuum'] = line_fit_orig['zerolev_mean']
obj_dict[line + '_Emis_reduc'] = ufloat(line_fit_orig['flux_intg'], line_fit_orig['flux_intg_er'])
#Measure the lines after the telluric correction for each case
for star in objects:
star_red_region = array(range(star_dict['{}_idx_join'.format(star)], star_dict['{}_idx_join'.format(star)] + len_red_region))
wave_tell, flux_tell = wave_obs, flux_obs / star_dict[star + '_flux']
for line in lines_interest:
if line in lick_idcs_df.index:
dz.Current_Label = lick_idcs_df.loc[line].name
dz.Current_Ion = lick_idcs_df.loc[line].Ion
dz.Current_TheoLoc = redshift_factor * lick_idcs_df.loc[line].lambda_theo
selections = redshift_factor * lick_idcs_df.loc[line][3:9].values
line_fit_tell = dz.measure_line(wave_tell, flux_tell, selections, None, 'lmfit', store_data = False)
obj_dict[line + '_x_telluc_' + star] = line_fit_tell['x_resample']
obj_dict[line + '_y_telluc_' + star] = line_fit_tell['y_resample']
obj_dict[line + '_flux_telluc_' + star] = line_fit_tell['flux_intg']
obj_dict[line + '_fluxEr_telluc_' + star] = line_fit_tell['flux_intg_er']
obj_dict[line + '_Emis_telluc_' + star] = ufloat(line_fit_tell['flux_intg'], line_fit_tell['flux_intg_er'])
#Save the corrected flux from the favoured star
if star == favoured_star:
obj_dict['corrected_flux'] = flux_tell
obj_dict['corrected_wave'] = wave_tell
obj_dict['corrected_header'] = header_0_obs
#Data sulfur lines
label_reduc, label_telluc = None, None
if ('S3_9069A' in lick_idcs_df.index) and ('S3_9531A' in lick_idcs_df.index):
#Flux ratio from original object
rapport_orig = obj_dict['S3_9531A_Emis_reduc'] / obj_dict['S3_9069A_Emis_reduc']
divergence_orig = r'$\rightarrow$ ${diff}$%'.format(diff = round((1 - SIII_theo/rapport_orig.nominal_value), 3) * 100)
ratio_SIII = '{:L}'.format(rapport_orig)
SIII9069 = '{:L}'.format(ufloat(obj_dict['S3_9069A_flux_reduc'], obj_dict['S3_9069A_fluxEr_reduc']))
SIII9561 = '{:L}'.format(ufloat(obj_dict['S3_9531A_flux_reduc'], obj_dict['S3_9531A_fluxEr_reduc']))
label_reduc = r'4) Before: $\frac{{[SIII]\lambda9561\AA}}{{[SIII]\lambda9069\AA}}=\frac{{{SIII9561}}}{{{SIII9069}}}={ratio_SIII}$ {divergence}'.format(
SIII9561=SIII9561, SIII9069=SIII9069, ratio_SIII=ratio_SIII, divergence=divergence_orig)
#Flux ratio from from favoured star
rapport = obj_dict['S3_9531A_Emis_telluc_' + favoured_star] / obj_dict['S3_9069A_Emis_telluc_' + favoured_star]
divergence = r'$\rightarrow$ ${diff}$%'.format(diff = round((1 - SIII_theo/rapport.nominal_value), 3) * 100)
ratio_SIII = '{:L}'.format(rapport)
SIII9069 = '{:L}'.format(ufloat(obj_dict['S3_9069A_flux_telluc_' + favoured_star], obj_dict['S3_9069A_fluxEr_telluc_' + favoured_star]))
SIII9561 = '{:L}'.format(ufloat(obj_dict['S3_9531A_flux_telluc_' + favoured_star], obj_dict['S3_9531A_fluxEr_telluc_' + favoured_star]))
label_telluc = r'5) After: $\frac{{[SIII]\lambda9561\AA}}{{[SIII]\lambda9069\AA}}=\frac{{{SIII9561}}}{{{SIII9069}}}={ratio_SIII}$ {divergence} ({star})'.format(
SIII9561=SIII9561, SIII9069=SIII9069, ratio_SIII=ratio_SIII, divergence=divergence, star = favoured_star)
label_telluric = r'2) Sulfur corrected ratio ({}): {}% $\Rightarrow$'.format(SIII_theo, round(1 - SIII_theo/rapport_orig.nominal_value, 3) * 100)
for star in objects:
rapport = obj_dict['S3_9531A_Emis_telluc_'+star] / obj_dict['S3_9069A_Emis_telluc_'+star]
divergence = round((1 - SIII_theo/rapport.nominal_value), 3) * 100
label_telluric += r' ${}$% ({}),'.format(round(divergence, 3), star)
#Data from Hpas7 and Hpas8 lines
label_Hpas = None
if ('H1_9015A' in lick_idcs_df.index) and ('H1_9546A' in lick_idcs_df.index):
#label_Hpas = r'3) $\frac{H7_{Pas}\lambda9546\AA}{H8_{Pas}\lambda9229\AA} = $'
label_Hpas = r'3) Hydrogen corrected ratio ({}): '.format(H7_H8_ratio_theo)
#Original
rapport = obj_dict['H1_9546A_Emis_reduc'] / obj_dict['H1_9015A_Emis_reduc']
divergence = round((1 - H7_H8_ratio_theo/rapport.nominal_value), 3) * 100
label_Hpas += r'${}$%$\Rightarrow$'.format(divergence)
HIratio_extension = r' $|$ $\frac{{H7_{{Pas}}\lambda9546\AA}}{{H8_{{Pas}}\lambda9229\AA}} =$ {}%'.format(round(divergence, 3))
ratio_H_favoured = ''
for star in objects:
rapport = obj_dict['H1_9546A_Emis_telluc_'+star] / obj_dict['H1_9015A_Emis_telluc_'+star]
divergence = round((1 - H7_H8_ratio_theo/rapport.nominal_value), 3) * 100
label_Hpas += r' ${}$% ({}),'.format(round(divergence, 3), star)
if star == favoured_star:
ratio_H_favoured = rapport
HIratio_extension_tell = r' $|$ $\frac{{H7_{{Pas}}\lambda9546\AA}}{{H8_{{Pas}}\lambda9229\AA}} =$ {}% ({})'.format(round(divergence, 3), star)
#Plot before and after telluric correction
dz.data_plot(wave_obs[region_indeces(wave_join, wave_max, wave_obs)], flux_obs[region_indeces(wave_join, wave_max, wave_obs)], label='1) Observed spectrum', linestyle='step', graph_axis=dz.ax1)
dz.data_plot(wave_tell, flux_tell, label=label_telluric, linestyle='step', graph_axis=dz.ax1)
if label_Hpas is not None:
dz.ax1.autoscale(enable=False)
x, y = array([9229.0, 9546.0]) * redshift_factor, ones(2) * mean_flux
dz.data_plot(x, y, label_Hpas, markerstyle='o', graph_axis=dz.ax1, color=dz.colorVector['olive'])
if label_telluc is not None:
dz.data_plot(obj_dict['S3_9069A_x_telluc_' + favoured_star], obj_dict['S3_9069A_y_telluc_' + favoured_star], label=label_telluc + HIratio_extension_tell, color=dz.colorVector['pink'], graph_axis=dz.ax1)
dz.data_plot(obj_dict['S3_9531A_x_telluc_' + favoured_star], obj_dict['S3_9531A_y_telluc_' + favoured_star], label=label_telluc + HIratio_extension_tell, color=dz.colorVector['pink'], graph_axis=dz.ax1)
if label_reduc is not None:
dz.data_plot(obj_dict['S3_9069A_x_reduc'], obj_dict['S3_9069A_y_reduc'], label=label_reduc + HIratio_extension, color=dz.colorVector['cyan'], graph_axis=dz.ax1)
dz.data_plot(obj_dict['S3_9531A_x_reduc'], obj_dict['S3_9531A_y_reduc'], label=label_reduc + HIratio_extension, color=dz.colorVector['cyan'], graph_axis=dz.ax1)
dz.FigWording(r'Wavelength $(\AA)$', 'Flux' + r'$(erg\,cm^{-2} s^{-1} \AA^{-1})$', r'Object {} Telluric correction ({} star)'.format(codeName, favoured_star), loc='upper left', graph_axis=dz.ax1, sort_legend=True)
dz.FigWording(r'Wavelength $(\AA)$', 'Normalized flux', '', loc='lower center', graph_axis=dz.ax2, ncols_leg=4)
dz.ax2.set_ylim(0.2,1.25)
if 'S3_9531A_continuum' in obj_dict:
dz.ax1.set_ylim(-2 * obj_dict['S3_9531A_continuum'], 1.1 * obj_dict['S3_9531A_Peak'])
else:
dz.ax1.set_ylim(0.005 * mean_flux, 20 * mean_flux)
output_pickle = '{objFolder}{stepCode}_{objCode}_{ext}'.format(objFolder=ouput_folder, stepCode=script_code, objCode=objName, ext='Telluric correction')
dz.save_manager(output_pickle, save_pickle = True)
#Save the fits file
telluric_fits_address = fits_file.replace('.fits', '_tell.fits')
catalogue_df.loc[codeName, 'tellRed_file'] = telluric_fits_address
fits.writeto(telluric_fits_address, data = obj_dict['corrected_flux'], header = obj_dict['corrected_header'], overwrite = True)
#In this case the telluric correction is not performed
else:
print '-- Not applying telluric correction'.format(codeName)
catalogue_df.loc[codeName, 'tellRed_file'] = None
#Reset all the axis
dz.ax1.cla()
dz.ax2.cla()
dz.reset_fig()
#Save the catalogue dataframe
dz.save_excel_DF(catalogue_df, '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/WHT_Galaxies_properties.xlsx', df_sheet_format = 'catalogue_data')
|
989,013 | 04b09513d658cd3315b43ce536f3684fc7cc0868 | # Generated by Django 3.1.13 on 2021-08-19 01:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('image', models.ImageField(upload_to='photos/', verbose_name='photo')),
('description', models.CharField(blank=True, max_length=255, verbose_name='photo description')),
('total_likes', models.PositiveIntegerField()),
('total_comments', models.PositiveIntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.photo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='Created at')),
('modified', models.DateTimeField(auto_now_add=True, help_text='Date time of the last time the object was modified.', verbose_name='Last modified at')),
('comment', models.CharField(max_length=255, verbose_name='comment')),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='photos.photo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
|
989,014 | 58e44e2bf6909a103aeb585f46eb42c014f815a8 | from library.PowerType import PowerType
from library.Action import Action
import numpy as np
import random
class ActionFactory:
"""This helps to create common actions and keep them consistent. Not thread-safe"""
def __init__(self):
self.actions = {} # name: powerType
self.randomActionLastId = 0
pass
def createPower(self, name, powerType:PowerType, power = 10):
if name in self.actions:
existingPowerType = self.actions[name]
if existingPowerType != powerType:
raise Exception(f"There exists an action {name} which has a different powerType, {existingPowerType} than the new one {powerType}")
else:
self.actions[name] = powerType
return Action(name, powerType, power)
def create(self, name):
if name in self.actions:
existingPowerType = self.actions[name]
if existingPowerType is not None:
raise Exception(f"There exists an action {name} which has a different powerType, {existingPowerType} than the new one None")
else:
self.actions[name] = None
return Action(name)
def createRandomAction(self):
self.randomActionLastId += 1
name = "action-" + str(self.randomActionLastId)
return self.create(name)
def createRandomPower(self):
self.randomActionLastId += 1
name = "power-" + str(self.randomActionLastId)
powerType = random.choice(list(PowerType))
return self.createPower(name, powerType, random.randint(1, 100))
def createRandom(self):
if np.random.random_sample() > 0.7:
# create an action
return self.createRandomAction()
else:
# create a power
return self.createRandomPower()
|
989,015 | c55dd08f3ae69d6c24baff1113c0fee8db1dc069 | # Generated by Django 2.1.5 on 2019-10-31 19:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotlinfoarchive', '0016_auto_20191030_0007'),
]
operations = [
migrations.AlterField(
model_name='musician',
name='real_name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Реальное имя'),
),
migrations.AlterField(
model_name='textmaterial',
name='site_photos',
field=models.ManyToManyField(blank=True, related_name='site_text_photoes', to='lotlinfoarchive.Photo', verbose_name='Фото к материалу'),
),
migrations.AlterField(
model_name='textmaterial',
name='videos_urls',
field=models.ManyToManyField(blank=True, to='lotlinfoarchive.VideoLink', verbose_name='Видео к материалу'),
),
]
|
989,016 | fd3cac22c0c0a8c419a16ddbb0986d7fa055bb08 | import os
import glob
import pandas as pd
import numpy as np
from BasicIO.filenameString import getFilenamePair
def checkExistanceOfFiles(imageFilename, maskFilename):
"""Check if there is the image and its corresponding mask.
Parameters
----------
imageFilename : str
Full path to the image.
maskFilename : str
Full path to the mask.
Returns
-------
bool
True if both files exists, False otherwise.
"""
if os.path.isfile(imageFilename) and os.path.isfile(maskFilename):
return True
return False
def getFilterList(filename, sheet_name, set_flag=1, verbose=True):
df = pd.read_excel(filename, sheet_name=sheet_name)
if verbose:
print("Reading the file: {}".format(filename))
df = df.loc[df['Training'] == set_flag] # set_flag=1 filter traint set, otherwise filter test set.
return df
def getListFromPatientList(databasePath, ctPath, ctmaskPath, filename, sheet_name, verbose=True):
df = pd.read_excel(filename, sheet_name=sheet_name)
if verbose:
print("Reading the file: {}".format(filename))
image_list = []
mask_list = []
info_list = []
for idx in range(0, len(df)):
basename = df.iloc[idx]['Patient ID'] # str
#noduleID = df.iloc[idx]['NoduleID']
noduleDiagnosis = df.iloc[idx]['Nodule Diagnosis']
patientDiagnosis = df.iloc[idx]['Patient Diagnosis']
#imageFilename_list = getFilenameList(os.path.join(databasePath, ctPath), basename + '*')
maskFilename_list = getFilenameList(os.path.join(databasePath, ctmaskPath), basename + '*')
maskFilename_list.sort()
imageFilename_list = []
for fname in maskFilename_list:
fname = fname.split('_Mask.nii.gz')[0] + '.nii.gz'
imageFilename_list.append(fname)
for idx in range(len(maskFilename_list)):
if checkExistanceOfFiles(os.path.join(databasePath, ctPath, imageFilename_list[idx]),
os.path.join(databasePath, ctmaskPath, maskFilename_list[idx])):
mask_list.append(maskFilename_list[idx])
image_list.append(imageFilename_list[idx])
info_list.append((noduleDiagnosis, patientDiagnosis))
if verbose:
print("\nFor index: {} including\nfile {}".format(idx, maskFilename_list[idx]))
else:
if verbose:
print("\nFor index: {} discarding\nfile {}".format(idx, maskFilename_list[idx]))
return image_list, mask_list, info_list
def getImageMaskFilenamesAndDiagnosis(databasePath, ctPath, ctmaskPath, filename, sheet_name, roi_flag, verbose=True):
"""
Parameters
----------
databasePath : str
Path to the database.
ctPath : str
Directory name of the CT images or CT ROI images. For e.g. 'CT_nii' or 'CTRoi_nii'.
ctmaskPath : str
Directory name of the CT Mask or CT ROI Mask. For e.g. 'CTmask_nii' or CTRoimask_nii'.
filename : str
Full path to the Excel file. For e.g. '/home/willytell/Desktop/tca_diagnosis.xls'.
sheet_name : str
Sheet of the Excel file.
roi_flag : bool
True if we are working with ROIs images and masks, False otherwise.
Returns
-------
X : :obj:tuple:`list`
Full path with filename for image and mask in each tuple of the list.
y : :obj:int:`list`
It is the diagnosis for a tuple (image and mask) of X. It is the ground truth.
"""
df = pd.read_excel(filename, sheet_name=sheet_name)
if verbose:
print("Reading the file: {}".format(filename))
X = []
y = []
for idx in range(0, len(df)):
basename = df.iloc[idx]['Patient ID'] # str
noduleID = df.iloc[idx]['NoduleID'] # numpy.int64
diagnosis = df.iloc[idx]['Diagnosis'] # numpy.int64
imageFilename, maskFilename = getFilenamePair(databasePath, ctPath, ctmaskPath, basename,
noduleID.astype(str), roi_flag=roi_flag)
if checkExistanceOfFiles(imageFilename, maskFilename):
X.append((imageFilename, maskFilename))
y.append(diagnosis)
if verbose:
print("\nIncluded files for index: {} \n{} \n{}".format(idx, imageFilename, maskFilename))
else:
if verbose:
print("\nDiscarded files for index: {} \n{} \n{}".format(idx, imageFilename, maskFilename))
return X, y
def getFilenameList(path, pattern='*.nii.gz'):
"""Obtain a list of filenames for a given directory path.
Parameters
----------
path : str
Directory path, for e.g. '/home/willytell/Desktop/LungCTDataBase/LIDC-IDRI/Nii_Vol/CTmask_nii
pattern : str
Filter filenames using the pattern extension.
Returns
-------
list
Filenames without the path, only the filename (and extension) is included."""
filename = [os.path.basename(x) for x in sorted(glob.glob(os.path.join(path, pattern)))]
return filename
def debug_test():
databasePath = '/home/willytell/Desktop/LungCTDataBase/LIDC-IDRI/Nii_Vol'
ctPath = 'CT_nii'
ctmaskPath = 'CTmask_nii'
filename = '/home/willytell/Desktop/tcia_diagnosis.xls'
# X, y = getImageMaskFilenamesAndDiagnosis(databasePath, ctPath, ctmaskPath, filename, sheet_name='NoduleMalignancy')
#
# print(X)
# print("==========")
# print(y)
#filename_list = getFilenameList('/home/willytell/Desktop/LungCTDataBase/LIDC-IDRI/Nii_Vol/CTmask_nii')
filename_list = getFilenameList(os.path.join(databasePath, ctmaskPath))
for filename in filename_list:
print(filename)
#print(filename_list)
if __name__ == '__main__':
debug_test()
|
989,017 | 4e8129dd3dd4f396630c0dfffca3925dbb7e0ab1 | # -*- coding: utf-8 -*-
# @Time : 2021/02/01 17:18
# @Author : Lim Yoona
# @Site :
# @File : 04_is_or_equal.py
# @Software: PyCharm
"""
这里是关于==、is的辨析问题
"""
import copy
a = [11,22,33]
b = a
print(a == b)
print(a is b)
c = copy.deepcopy(a)
print(a == c)
print(a is c)
"""
==:表示的是两个比较对象的值是否相等
is:表示的是两个比较对象指向是否是同一个
"""
|
989,018 | 431e2b295d06dfa1e7c99ebdbe99817564d01850 | from odoo import models, fields
class ItiSkills(models.Model):
_name = 'iti.skill'
name = fields.Char()
|
989,019 | 651b4bb6fb64b8cc5a31cb09cfd0dd12590a2497 |
def checkStatement(number):
assert number > 10 , "The number is less than 10 can't be taken"
print("Yep, Number is greater than 10 :-)")
numb = int(input("Enter Number : "))
try:
checkStatement(numb)
except AssertionError as e:
print(e)
|
989,020 | 3099ffdb332e37c9249a9561349cb4cfaadae173 | def multiples_no(limit):
i=1
sum=0
while i<=num:
if i%3==0 or i%5==0:
print(i)
sum=sum+i
i=i+1
print("sum:-",sum)
num=int(input("enter the number:"))
multiples_no(num)
|
989,021 | 2d41f1309b6d60537726426b72d881b86a428419 | from flask import Flask
from flask import render_template
from flask import g
from flask import request
import json
from lockdown import Cell
import lockdown
app = Flask(__name__, static_folder="static")
DATABASE = 'demodb.sqlite'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = lockdown.LockdownConnection(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/insert_test', methods=['POST'])
def insert_test():
j = json.loads(request.data)
cur = get_db().cursor()
cur.execute("INSERT INTO Tweets (Content, Owner, SSN) VALUES (?, ?, ?)", (Cell.from_json(request.data), 0,""))
return "GOOD JOB LOSER"
@app.route('/search_test', methods=['POST'])
def search_test():
j = json.loads(request.data)
cur = get_db().cursor()
cur.execute("SELECT id, Content FROM Tweets WHERE Content LIKE '%im_useless%'", pub_key=j["pub_key"], search_keys=j["search_keys"])
fetch = cur.fetchall()
return json.dumps(fetch)
@app.route('/')
def index():
return render_template("index.html")
|
989,022 | 025bba291dd2b890a57f9ab9628ed69149946234 | from locust import HttpLocust, TaskSet, task
username = 'admin'
password = '123456'
class MyTaskSet(TaskSet):
def on_start(self):
csrftoken = self.client.get('accounts/login').cookies['csrftoken']
self.client.post("accounts/login/",
{"id_username":username,
'id_password':password},
headers={"X-CSRFToken": csrftoken})
@task(5)
def index(self):
self.client.get("")
@task(1)
def addProperty(self):
self.client.get("addProperty/")
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 4000
max_wait = 10000 |
989,023 | 6cdafd264f58e37b6bab6f042913cd478504da53 | import math
import itertools
from collections import deque
from PIL import Image, ImageDraw
class Scheduler:
def __init__(self):
self.jobs = []
def addJob(self, job):
self.jobs.append(job)
def loadAvailJobs(self):
return [x for x in self.jobs if not(x.finished) and not(x.blocked())]
def start(self):
availableJobs = self.loadAvailJobs()
while len(availableJobs) > 0:
availableJobs[0].run()
availableJobs = self.loadAvailJobs()
class Pipe:
def __init__(self, input = None):
if (input == None):
input = []
self.data = input
self.position = 0
self.closed = False
def append(self, content):
self.data.append(content)
def read(self):
if self.avail():
val = self.data[self.position]
self.position += 1
return val
elif self.closed:
raise Exception("Pipe closed")
else:
raise Exception("Data not avail")
def avail(self):
return len(self.data) > self.position
def close(self):
self.closed = True
class Program:
def __init__(self, instructions, input, output, closeOutput=True):
self.mem = instructions.copy()
self.input = input
self.output = output
self.ip = 0
self.finished = False
self.waitingForInput = False
self.relativeBase = 0
self.closeOutput = closeOutput
def run(self):
self.waitingForInput = False
while (self.mem[self.ip] != 99):
op = OpCode(self.mem, self.ip, self.relativeBase)
execResult = op.execute(self.input, self.output)
if op.executed:
self.ip = execResult
self.relativeBase = op.relativeBase
else:
self.waitingForInput = True
return
if (self.closeOutput):
self.output.close()
self.finished = True
def blocked(self):
return self.waitingForInput and not(self.input.closed) and not(self.input.avail())
class OpCode:
opcodes = {
1: 4,
2: 4,
3: 2,
4: 2,
5: 3,
6: 3,
7: 4,
8: 4,
9: 2,
99: 1
}
def __init__(self, memory, ip, relativeBase):
self.memory = memory
self.ip = ip
self.opCode = memory[ip] % 100
self.OriginalCode = memory[ip]
self.executed = False
self.relativeBase = relativeBase
if not(self.opCode) in self.opcodes.keys():
raise "Wrong opCode "+str(self.OriginalCode)
def getParamMode(self, pos):
return int(self.OriginalCode / math.pow(10, pos+1)) % 10
def getLength(self):
return self.opcodes[self.opCode]
#because advent of code thinks that writing to not alocated memory is ok :)
def checkMemory(self, position):
while position >= len(self.memory):
self.memory.append(0)
def calculateMemoryAdress(self, position):
self.checkMemory(self.ip+position)
##Absolute adressing
if (self.getParamMode(position) == 0):
self.checkMemory(self.memory[self.ip+position])
return self.memory[self.ip+position]
##Relative addresing
elif (self.getParamMode(position) == 2):
self.checkMemory(self.relativeBase+self.memory[self.ip+position])
return self.relativeBase+self.memory[self.ip+position]
##Direct value
else:
return self.ip+position
def loadParameter(self, position):
return self.memory[self.calculateMemoryAdress(position)]
def write(self, position, value):
self.memory[self.calculateMemoryAdress(position)] = value
def execute(self, input, output):
self.executed = True
if (self.opCode == 1):
self.write(3, self.loadParameter(1) + self.loadParameter(2))
elif (self.opCode == 2):
self.write(3, self.loadParameter(1) * self.loadParameter(2))
elif (self.opCode == 3):
if not(input.avail()):
self.executed = False
return 0 #Yeld
self.write(1,input.read())
elif (self.opCode == 4):
#print([self.ip, self.loadParameter(1), self.calculateMemoryAdress(1)])
output.append(self.loadParameter(1))
#jump-if-true
elif (self.opCode == 5):
if (self.loadParameter(1) != 0):
return self.loadParameter(2)
#jump-if-false
elif (self.opCode == 6):
if (self.loadParameter(1) == 0):
return self.loadParameter(2)
#less than
elif (self.opCode == 7):
if (self.loadParameter(1) < self.loadParameter(2)):
self.write(3,1)
else:
self.write(3,0)
#equals
elif (self.opCode == 8):
if (self.loadParameter(1) == self.loadParameter(2)):
self.write(3,1)
else:
self.write(3,0)
elif (self.opCode == 9):
#if (self.getParamMode(1) == 2):
self.relativeBase += self.loadParameter(1)
#else:
# self.relativeBase += self.loadParameter(1)
return self.ip + self.getLength()
class ReparRobotController:
def __init__(self, input, output):
self.input = input
self.output = output
self.finished = False
self.waitingForInput = False
self.visited = dict()
self.trace = deque()
self.way = []
self.trace.append((0,0)) #Starting point
def run(self):
self.waitingForInput = False
if not(self.input.avail()):
self.waitingForInput = True
return
value = self.input.read()
self.visited[self.trace[-1]] = value
#print("Visiting: "+str(self.trace[-1])+" -> "+str(value))
if value == 0: #wall
self.trace.pop()
elif value == 2:
print("Found it at:"+str(self.trace[-1])+" with length: "+str(len(self.trace)-1))
dir = self.getNextUnknownDirection()
if dir == None and len(self.trace) > 1:
dir = self.getBackwardsFrom(self.trace.pop())
elif (dir == None):
self.finished = True
self.drawMap()
self.fillWithOxigen()
return
else:
self.trace.append(dir[1]) #Append to trace only when not backtracked
self.output.append(dir[0])
self.way.append(dir[1])
#yelds execution
def blocked(self):
return self.waitingForInput and not(self.input.avail())
def getBackwardsFrom(self, oldPosition):
newPosition = self.trace[-1]
directions = [(0,-1),(0,1),(-1,0),(1,0)]
for i, dir in enumerate(directions):
if (oldPosition[0]+dir[0], oldPosition[1]+dir[1]) == newPosition:
return (i+1, newPosition)
return None
def getNextUnknownDirection(self):
currentPosition = self.trace[-1]
directions = [(0,-1),(0,1),(-1,0),(1,0)]
for i, dir in enumerate(directions):
nextTile = (currentPosition[0]+dir[0], currentPosition[1]+dir[1])
if not(nextTile in self.visited):
return (i+1, nextTile)
return None
def drawMap(self):
minX = min([x[0] for x in self.visited.keys()])
maxX = max([x[0] for x in self.visited.keys()])
minY = min([x[1] for x in self.visited.keys()])
maxY = max([x[1] for x in self.visited.keys()])
img = Image.new('RGB', ((maxX+1-minX)*8, (maxY+1-minY)*8))
for k, v in self.visited.items():
color = 0
if (k == (0,0)):
color = (0,255,0)
elif (v == 2):
color = (255,0,0)
elif (v == 0):
color = (255,255,255)
elif (v == 1):
color = (50,50,50)
for i in range(64):
img.putpixel((int((k[0] - minX)*8 + (i%8)), int((k[1] - minY)*8 + (i/8))), color)
img.save('mapa.png')
def fillWithOxigen(self):
start = next(k for k, v in self.visited.items() if v == 2)
locations = set(k for k, v in self.visited.items() if v == 1)
borderline = [start]
counter = 0
while len(locations) > 0 and len(borderline) > 0:
newBordeline = []
for l in borderline:
directions = [(0,-1),(0,1),(-1,0),(1,0)]
for dir in directions:
nextLocation = (l[0]+dir[0], l[1]+dir[1])
if nextLocation in locations:
locations.remove(nextLocation)
newBordeline.append(nextLocation)
counter+=1
borderline = newBordeline
print("Oxigen fils in: "+str(counter))
with open("input.txt") as f:
code = [int(x) for x in f.read().split(",")]
a = Pipe()
b = Pipe()
b.append(1) #first is just empty tile
scheduler = Scheduler()
scheduler.addJob(Program(code, a, b))
scheduler.addJob(ReparRobotController(b, a))
scheduler.start()
|
989,024 | c792834fe969c1813900cc808db889a877fb41f6 | # Crie um programa que tenha a função leiaInt(), que vai funcionar de forma semelhante 'a função input() do Python,
# só que fazendo a validação para aceitar apenas um valor numérico.
# Ex: n = leiaInt('Digite um n: ')
def leiaInt(msg):
ok = False #Declara o ok como Falso para validação no break do Loop While quando Verdadeiro
valor = 0 #Declara o valor para receber o input
while True:
n = str(input(msg))
if n.isnumeric(): #Teste se o valor é numérico. Quando é omitido o teste lógico é igual a True. Nesse caso, se n é numérico igual a True.
valor = int(n)
ok = True # ok recebe True para quebrar o loop na linha 16.
else:
print('\033[0;31mERRO! Digite um número inteiro válido.\033[m')
if ok: # Omitido o teste lógico, pressupõe que ok seja igual a True
break
return valor #Retorna o resultado da função
#Programa Principal
n = leiaInt('Digite um número: ')
print(f'Você acabou de digitar o número {n}.') |
989,025 | 39c7afe000f084a5cb1498bffadeb4f1e94bba7a | ###########################
## _ANchangeeDirName.py
##
## 2019.10.28
###########################
import sys
import os
dir_lst = sys.argv[1:] # the first argument is the script itself
for d in dir_lst:
if os.path.isdir(d):
file_lst = os.listdir(d)
else :
print (p+"is not directory")
[lindex [split [lindex [split [knob [topnode].file] /] end] .] 0]
nuke.toNode()['antialiasing']
[value [value Dot_checkQTinput.input].file]
nuke.toNode('Write_DPX_Template2')['file'].getValue() |
989,026 | 27cf233df0585e9c3f88b64f50e8b8cb8d8be922 | N1, N2, N3, N4 = input().split(' ')
N1, N2, N3, N4 = float(N1), float(N2), float(N3), float(N4)
MEDIA = (N1*0.2)+(N2*0.3)+(N3*0.4)+(N4*0.1)
if MEDIA >= 7:
print('Media: {:.1f}' .format(MEDIA))
print('Aluno aprovado.')
elif MEDIA < 5:
print('Media:', int(10 * (N1*.2 + N2*.3 + N3*.4 + N4*.1)) / 10)
print('Aluno reprovado.')
else:
EXAME = float(input(''))
print('Media: {:.1f}'.format(MEDIA))
print("Aluno em exame.")
print('Nota do exame: {:.1f}'.format(EXAME))
MEDIAF = (MEDIA + EXAME) / 2
if MEDIAF >= 5:
print('Aluno aprovado.')
else:
print('Aluno reprovado.')
print('Media final: {:.1f}'.format(MEDIAF))
|
989,027 | 9832d6078942af396ff185280c42b064f6fb6fe8 | # Set Postgres configurations as a dictionary(key value pair)
PGSQL_CONFIG = {
'host': 'host_name',
'db_name': 'Database_Name',
'user': 'Username',
'password': 'user_password',
'port': 3306
}
|
989,028 | cd9472ea7cf8e337a318991aec1f92e513cb92f9 | #!/usr/bin/env python3 -B
import unittest
import os
import os.path
import hashlib
import json
import uuid
import pprint
import inspect
from itertools import groupby
from pathlib import Path
import warnings
from tests import TestSalesPipelineOutput
from cromulent import vocab
vocab.add_attribute_assignment_check()
class PIRModelingTest_PrivateContractSales(TestSalesPipelineOutput):
def test_modeling_private_contract_sales(self):
'''
Test for modeling of Private Contract Sales.
'''
output = self.run_pipeline('lottery')
self.verify_catalogs(output)
self.verify_sales(output)
def verify_catalogs(self, output):
'''
For this non-auction sale event, there should be a 'Private Contract Sale' event,
and all physical copies of the sales catalog should be both classified as an
'Exhibition Catalog', and carry the same text.
'''
objects = output['model-object']
sale_activities = output['model-sale-activity']
texts = output['model-lo']
expected_catalog_text_id = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#CATALOG,D-A50'
expected_event_id = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#LOTTERY-EVENT,D-A50'
# there is a single non-auction 'Private Contract Sale' event, and it is referred to by the catalog text
pvt_sale = sale_activities[expected_event_id]
self.assertEqual(pvt_sale['_label'], 'Lottery Event D-A50 (1765)')
self.assertIn(expected_catalog_text_id, {r.get('id') for r in pvt_sale['referred_to_by']})
# there is 1 physical Lottery Catalog
phys_catalogs = [o for o in objects.values() if o['classified_as'][0]['_label'] == 'Lottery Catalog']
self.assertEqual(len(phys_catalogs), 1)
# all physical catalogs carry the same catalog text
catalog_text_ids = set()
for o in phys_catalogs:
for text in o['carries']:
catalog_text_ids.add(text['id'])
self.assertEqual(catalog_text_ids, {expected_catalog_text_id})
self.assertIn(expected_catalog_text_id, texts)
catalog_text = texts[expected_catalog_text_id]
self.assertEqual(len(objects), 4) # 1 physical catalog and 3 objects sold
def verify_sales(self, output):
'''
For a lottery record, there should be:
* A private sale activity classified as a Lottery
* An Object Set classified as a Collection
* A HumanMadeObject classified as a Painting, and belonging to the Object Set
* An Activity modeling the individual private sale
'''
objects = output['model-object']
sale_activities = output['model-sale-activity']
sets = output['model-set']
texts = output['model-lo']
hmo_key = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#OBJ,D-A50,0001,1765'
hmo = objects[hmo_key]
sale_curr = sale_activities['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#AUCTION,D-A50,0001,1765']
event_key = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#LOTTERY-EVENT,D-A50'
sale_event = sale_activities[event_key]
object_set_key = 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:sales#AUCTION,D-A50,0001,1765-Set'
object_set = sets[object_set_key]
self.assertEqual({c['_label'] for c in sale_event['classified_as']}, {'Lottery'})
self.assertEqual({c['_label'] for c in object_set['classified_as']}, {'Collection'})
self.assertIn(object_set_key, {s['id'] for s in hmo['member_of']})
# There are no acquisitions or payments as the transaction is 'unknown'.
self.assertNotIn('part', sale_curr)
if __name__ == '__main__':
unittest.main()
|
989,029 | a5071bb80e1bd6536484c5cb042acae4a41e9f19 | # !/usr/bin/env python
# Copyright 2014 Vodkasoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import loads
from google.appengine.api import users
from controller.base import JsonRequestHandler
from model.datastore import Application
from util.crypto import create_access_token, get_application_key
class AuthenticationHandler(JsonRequestHandler):
""" Manages authentication requests """
def get(self):
""" Authenticates the client and sends it an access token
Method: GET
Path: /auth
Request Parameters:
applicationKey string key that identifies the client as an application
clientSecret string secret key to prove the client's identity
pretty [true|false] whether to output in human readable format or not
Returns:
:return: an access token if the application key and client secret are valid; otherwise
it sends the client an error
"""
application_key = self.request.get('applicationKey')
client_secret = self.request.get('clientSecret')
if application_key is '':
self.write_error(400, 'No application key was provided')
return
if client_secret is '':
self.write_error(400, 'No client secret was provided')
return
application = Application.get_by_id(application_key)
if application is None:
self.write_error(400, 'Invalid credentials')
return
if application.client_secret != client_secret:
self.write_error(400, 'Invalid credentials')
return
access_token = create_access_token(application_key)
response_key = application.server_response_key
self.write_signed_message(200, 'accessToken', access_token, response_key)
def require_admin_login(handler_method):
""" Ensures that the user that calls the handler is an administrator of the application
Parameters:
:param handler_method: the decorated handler that will be called if the called that makes
the request is a administrator of the application
Return:
:return: a wrapper function
"""
def wrapper(self, *args, **kwargs):
""" Verifies that the calling user is an administrator of the application before calling the
decorated handler
Parameters:
:param args: the arguments for the decorated function
:param kwargs: the keyword arguments for the decorated function
Returns:
:return: the decorated function result if the access token was valid; otherwise it
send an error response and returns None
"""
user = users.get_current_user()
if not user:
self.write_error(401)
elif not users.is_current_user_admin():
self.write_error(403)
else:
handler_method(self, *args, **kwargs)
return wrapper
def access_token_required(handler_method):
""" Ensures that a valid access token is presented before accessing a resource
Parameters:
:param handler_method: the decorated handler that will be called if the access token is
valid
Returns:
:return: a wrapper function
"""
def wrapper(self, *args, **kwargs):
""" Verifies the existence and validity of an access token before calling the decorated
handler
Parameters:
:param args: the arguments for the decorated function
:param kwargs: the keyword arguments for the decorated function
Returns:
:return: the decorated function result if the access token was valid; otherwise it
send an error response and returns None
"""
if self.request.method in ['GET', 'DELETE']:
access_token = self.request.get('accessToken')
else:
try:
access_token = loads(self.request.body).get('accessToken')
except ValueError:
access_token = None
if access_token is None or len(access_token) is 0:
self.write_error(401, 'No access token provided')
return None
try:
application = get_application_key(access_token)
except (TypeError, ValueError):
self.write_error(401, 'Invalid access token')
return None
if application is not None:
return handler_method(self, *args, **kwargs)
else:
self.write_error(401, 'Invalid access token')
return None
return wrapper
|
989,030 | 95536566ffae5a577c153afa3ad67f6d6f2d7715 | /home/oseiasbeu/anaconda3/lib/python3.7/fnmatch.py |
989,031 | 84c65ee36f21346ce4fd2d70af4375e0c9427344 | # -*- coding: utf-8 -*-
r"""
Tamari Interval-posets
This module implements Tamari interval-posets: combinatorial objects which
represent intervals of the Tamari order. They have been introduced in
[PCh2013]_ and allow for many combinatorial operations on Tamari intervals.
In particular, they are linked to :class:`DyckWords` and :class:`BinaryTrees`.
An introduction into Tamari interval-posets is given in Chapter 7
of [Pons2013]_.
The Tamari lattice can be defined as a lattice structure on either of several
classes of Catalan objects, especially binary trees and Dyck paths
[TamBrack1962]_ [HuangTamari1972]_ [Sta-EC2]_. An interval can be seen as
a pair of comparable elements. The number of intervals has been given in
[ChapTamari08]_.
REFERENCES:
.. [PCh2013] Grégory Châtel and Viviane Pons.
*Counting smaller trees in the Tamari order*.
FPSAC. (2013). :arxiv:`1212.0751v1`.
.. [Pons2013] Viviane Pons,
*Combinatoire algébrique liée aux ordres sur les permutations*.
PhD Thesis. (2013). :arxiv:`1310.1805v1`.
.. [TamBrack1962] Dov Tamari.
*The algebra of bracketings and their enumeration*.
Nieuw Arch. Wisk. (1962).
.. [HuangTamari1972] Samuel Huang and Dov Tamari.
*Problems of associativity: A simple proof for the lattice property
of systems ordered by a semi-associative law*.
J. Combinatorial Theory Ser. A. (1972).
http://www.sciencedirect.com/science/article/pii/0097316572900039 .
.. [ChapTamari08] Frédéric Chapoton.
*Sur le nombre d'intervalles dans les treillis de Tamari*.
Sem. Lothar. Combin. (2008).
:arxiv:`math/0602368v1`.
.. [FPR15] Wenjie Fang and Louis-François Préville-Ratelle,
*From generalized Tamari intervals to non-separable planar maps*.
:arxiv:`1511.05937`
AUTHORS:
- Viviane Pons 2014: initial implementation
- Frederic Chapoton 2014: review
- Darij Grinberg 2014: review
- Travis Scrimshaw 2014: review
"""
# ****************************************************************************
# Copyright (C) 2013 Viviane Pons <viviane.pons@univie.ac.at>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
from six.moves import range
from sage.categories.enumerated_sets import EnumeratedSets
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.categories.posets import Posets
from sage.combinat.posets.posets import Poset, FinitePoset
from sage.categories.finite_posets import FinitePosets
from sage.combinat.binary_tree import BinaryTrees
from sage.combinat.binary_tree import LabelledBinaryTrees
from sage.combinat.dyck_word import DyckWords
from sage.combinat.permutation import Permutation
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
from sage.misc.cachefunc import cached_method
from sage.misc.latex import latex
from sage.misc.lazy_attribute import lazy_attribute
from sage.rings.integer import Integer
from sage.rings.all import NN
from sage.sets.non_negative_integers import NonNegativeIntegers
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.sets.family import Family
from sage.structure.element import Element
from sage.structure.global_options import GlobalOptions
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
class TamariIntervalPoset(Element):
r"""
The class of Tamari interval-posets.
An interval-poset is a labelled poset of size `n`, with labels
`1, 2, \ldots, n`, satisfying the following conditions:
- if `a < c` (as integers) and `a` precedes `c` in the poset, then,
for all `b` such that `a < b < c`, `b` precedes `c`,
- if `a < c` (as integers) and `c` precedes `a` in the poset, then,
for all `b` such that `a < b < c`, `b` precedes `a`.
We use the word "precedes" here to distinguish the poset order and
the natural order on numbers. "Precedes" means "is smaller than
with respect to the poset structure"; this does not imply a
covering relation.
Interval-posets of size `n` are in bijection with intervals of
the Tamari lattice of binary trees of size `n`. Specifically, if
`P` is an interval-poset of size `n`, then the set of linear
extensions of `P` (as permutations in `S_n`) is an interval in the
right weak order (see
:meth:`~sage.combinat.permutation.Permutation.permutohedron_lequal`),
and is in fact the preimage of an interval in the Tamari lattice (of
binary trees of size `n`) under the operation which sends a
permutation to its right-to-left binary search tree
(:meth:`~sage.combinat.permutation.Permutation.binary_search_tree`
with the ``left_to_right`` variable set to ``False``)
without its labelling.
INPUT:
- ``size`` -- an integer, the size of the interval-posets (number of
vertices)
- ``relations`` -- a list (or tuple) of pairs ``(a,b)`` (themselves
lists or tuples), each representing a relation of the form
'`a` precedes `b`' in the poset.
- ``check`` -- (default: ``True``) whether to check the interval-poset
condition or not.
.. WARNING::
The ``relations`` input can be a list or tuple, but not an
iterator (nor should its entries be iterators).
NOTATION:
Here and in the following, the signs `<` and `>` always refer to
the natural ordering on integers, whereas the word "precedes" refers
to the order of the interval-poset. "Minimal" and "maximal" refer
to the natural ordering on integers.
The *increasing relations* of an interval-poset `P` mean the pairs
`(a, b)` of elements of `P` such that `a < b` as integers and `a`
precedes `b` in `P`. The *initial forest* of `P` is the poset
obtained by imposing (only) the increasing relations on the ground
set of `P`. It is a sub-interval poset of `P`, and is a forest with
its roots on top. This forest is usually given the structure of a
planar forest by ordering brother nodes by their labels; it then has
the property that if its nodes are traversed in post-order
(see :meth:~sage.combinat.abstract_tree.AbstractTree.post_order_traversal`,
and traverse the trees of the forest from left to right as well),
then the labels encountered are `1, 2, \ldots, n` in this order.
The *decreasing relations* of an interval-poset `P` mean the pairs
`(a, b)` of elements of `P` such that `b < a` as integers and `a`
precedes `b` in `P`. The *final forest* of `P` is the poset
obtained by imposing (only) the decreasing relations on the ground
set of `P`. It is a sub-interval poset of `P`, and is a forest with
its roots on top. This forest is usually given the structure of a
planar forest by ordering brother nodes by their labels; it then has
the property that if its nodes are traversed in pre-order
(see :meth:`~sage.combinat.abstract_tree.AbstractTree.pre_order_traversal`,
and traverse the trees of the forest from left to right as well),
then the labels encountered are `1, 2, \ldots, n` in this order.
EXAMPLES::
sage: TamariIntervalPoset(0,[])
The Tamari interval of size 0 induced by relations []
sage: TamariIntervalPoset(3,[])
The Tamari interval of size 3 induced by relations []
sage: TamariIntervalPoset(3,[(1,2)])
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: TamariIntervalPoset(3,[(1,2),(2,3)])
The Tamari interval of size 3 induced by relations [(1, 2), (2, 3)]
sage: TamariIntervalPoset(3,[(1,2),(2,3),(1,3)])
The Tamari interval of size 3 induced by relations [(1, 2), (2, 3)]
sage: TamariIntervalPoset(3,[(1,2),(3,2)])
The Tamari interval of size 3 induced by relations [(1, 2), (3, 2)]
sage: TamariIntervalPoset(3,[[1,2],[2,3]])
The Tamari interval of size 3 induced by relations [(1, 2), (2, 3)]
sage: TamariIntervalPoset(3,[[1,2],[2,3],[1,2],[1,3]])
The Tamari interval of size 3 induced by relations [(1, 2), (2, 3)]
sage: TamariIntervalPoset(3,[(3,4)])
Traceback (most recent call last):
...
ValueError: The relations do not correspond to the size of the poset.
sage: TamariIntervalPoset(2,[(2,1),(1,2)])
Traceback (most recent call last):
...
ValueError: The graph is not directed acyclic
sage: TamariIntervalPoset(3,[(1,3)])
Traceback (most recent call last):
...
ValueError: This does not satisfy the Tamari interval-poset condition.
It is also possible to transform a poset directly into an interval-poset::
sage: TIP = TamariIntervalPosets()
sage: p = Poset( ([1,2,3], [(1,2)]))
sage: TIP(p)
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: TIP(Poset({1: []}))
The Tamari interval of size 1 induced by relations []
sage: TIP(Poset({}))
The Tamari interval of size 0 induced by relations []
"""
__metaclass__ = InheritComparisonClasscallMetaclass
@staticmethod
def __classcall_private__(cls, *args, **opts):
r"""
Ensure that interval-posets created by the enumerated sets and
directly are the same and that they are instances of
:class:`TamariIntervalPoset`.
TESTS::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.parent()
Interval-posets
sage: type(ip)
<class 'sage.combinat.interval_posets.TamariIntervalPosets_all_with_category.element_class'>
sage: ip2 = TamariIntervalPosets()(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip2.parent() is ip.parent()
True
sage: type(ip) is type(ip2)
True
sage: ip3 = TamariIntervalPosets(4)([(2,4),(3,4),(2,1),(3,1)])
sage: ip3.parent() is ip.parent()
False
sage: type(ip3) is type(ip)
True
"""
P = TamariIntervalPosets_all()
return P.element_class(P, *args, **opts)
def __init__(self, parent, size, relations, check=True):
r"""
TESTS::
sage: TamariIntervalPoset(3,[(1,2),(3,2)]).parent()
Interval-posets
"""
self._size = size
self._poset = Poset((range(1, size + 1), relations))
if self._poset.cardinality() != size:
# This can happen as the Poset constructor automatically adds
# in elements from the relations.
raise ValueError("The relations do not correspond to the size of the poset.")
if check and not TamariIntervalPosets.check_poset(self._poset):
raise ValueError("This does not satisfy the Tamari interval-poset condition.")
Element.__init__(self, parent)
self._cover_relations = tuple(self._poset.cover_relations())
self._latex_options = dict()
def set_latex_options(self, D):
r"""
Set the latex options for use in the ``_latex_`` function. The
default values are set in the ``__init__`` function.
- ``tikz_scale`` -- (default: 1) scale for use with the tikz package
- ``line_width`` -- (default: 1*``tikz_scale``) value representing the
line width
- ``color_decreasing`` -- (default: red) the color for decreasing
relations
- ``color_increasing`` -- (default: blue) the color for increasing
relations
- ``hspace`` -- (default: 1) the difference between horizontal
coordinates of adjacent vertices
- ``vspace`` -- (default: 1) the difference between vertical
coordinates of adjacent vertices
INPUT:
- ``D`` -- a dictionary with a list of latex parameters to change
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.latex_options()["color_decreasing"]
'red'
sage: ip.set_latex_options({"color_decreasing":'green'})
sage: ip.latex_options()["color_decreasing"]
'green'
sage: ip.set_latex_options({"color_increasing":'black'})
sage: ip.latex_options()["color_increasing"]
'black'
To change the default options for all interval-posets, use the
parent's latex options::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.latex_options()["color_decreasing"]
'red'
sage: ip2.latex_options()["color_decreasing"]
'red'
sage: TamariIntervalPosets.options(latex_color_decreasing='green')
sage: ip.latex_options()["color_decreasing"]
'green'
sage: ip2.latex_options()["color_decreasing"]
'green'
Next we set a local latex option and show the global option does not
override it::
sage: ip.set_latex_options({"color_decreasing": 'black'})
sage: ip.latex_options()["color_decreasing"]
'black'
sage: TamariIntervalPosets.options(latex_color_decreasing='blue')
sage: ip.latex_options()["color_decreasing"]
'black'
sage: ip2.latex_options()["color_decreasing"]
'blue'
sage: TamariIntervalPosets.options._reset()
"""
for opt in D:
self._latex_options[opt] = D[opt]
def latex_options(self):
r"""
Return the latex options for use in the ``_latex_`` function as a
dictionary. The default values are set using the options.
- ``tikz_scale`` -- (default: 1) scale for use with the tikz package
- ``line_width`` -- (default: 1) value representing the line width
(additionally scaled by ``tikz_scale``)
- ``color_decreasing`` -- (default: ``'red'``) the color for
decreasing relations
- ``color_increasing`` -- (default: ``'blue'``) the color for
increasing relations
- ``hspace`` -- (default: 1) the difference between horizontal
coordinates of adjacent vertices
- ``vspace`` -- (default: 1) the difference between vertical
coordinates of adjacent vertices
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.latex_options()['color_decreasing']
'red'
sage: ip.latex_options()['hspace']
1
"""
d = self._latex_options.copy()
if "tikz_scale" not in d:
d["tikz_scale"] = self.parent().options["latex_tikz_scale"]
if "line_width" not in d:
d["line_width"] = self.parent().options["latex_line_width_scalar"] * d["tikz_scale"]
if "color_decreasing" not in d:
d["color_decreasing"] = self.parent().options["latex_color_decreasing"]
if "color_increasing" not in d:
d["color_increasing"] = self.parent().options["latex_color_increasing"]
if "hspace" not in d:
d["hspace"] = self.parent().options["latex_hspace"]
if "vspace" not in d:
d["vspace"] = self.parent().options["latex_vspace"]
return d
def _find_node_positions(self, hspace=1, vspace=1):
"""
Compute a nice embedding.
If `x` precedes `y`, then `y` will always be placed on top of `x`
and/or to the right of `x`.
Decreasing relations tend to be drawn vertically and increasing
relations horizontally.
The algorithm tries to avoid superposition but on big
interval-posets, it might happen.
OUTPUT:
a dictionary {vertex: (x,y)}
EXAMPLES::
sage: ti = TamariIntervalPosets(4)[2]
sage: ti._find_node_positions().values()
[[0, 0], [0, -1], [0, -2], [1, -2]]
"""
node_positions = {}
to_draw = [(1, 0)]
current_parent = [self.increasing_parent(1)]
parenty = [0]
x = 0
y = 0
for i in range(2, self.size() + 1):
decreasing_parent = self.decreasing_parent(i)
increasing_parent = self.increasing_parent(i)
while to_draw and (decreasing_parent is None or
decreasing_parent < to_draw[-1][0]):
n = to_draw.pop()
node_positions[n[0]] = [x, n[1]]
if i != current_parent[-1]:
if (not self.le(i, i - 1) and decreasing_parent is not None):
x += hspace
if current_parent[-1] is not None:
y -= vspace
else:
y -= vspace
if increasing_parent != current_parent[-1]:
current_parent.append(increasing_parent)
parenty.append(y)
nodey = y
else:
current_parent.pop()
x += hspace
nodey = parenty.pop()
if not current_parent or increasing_parent != current_parent[-1]:
current_parent.append(increasing_parent)
parenty.append(nodey)
to_draw.append((i, nodey))
for n in to_draw:
node_positions[n[0]] = [x, n[1]]
return node_positions
def plot(self, **kwds):
"""
Return a picture.
The picture represents the Hasse diagram, where the covers are
colored in blue if they are increasing and in red if they are
decreasing.
This uses the same coordinates as the latex view.
EXAMPLES::
sage: ti = TamariIntervalPosets(4)[2]
sage: ti.plot()
Graphics object consisting of 6 graphics primitives
"""
c0 = 'blue' # self.latex_options()["color_increasing"]
c1 = 'red' # self.latex_options()["color_decreasing"]
G = self.poset().hasse_diagram()
G.set_pos(self._find_node_positions())
for a, b, c in G.edges():
if a < b:
G.set_edge_label(a, b, 0)
else:
G.set_edge_label(a, b, 1)
return G.plot(color_by_label={0: c0, 1: c1}, **kwds)
def _latex_(self):
r"""
A latex representation of ``self`` using the tikzpicture package.
This picture shows the union of the Hasse diagrams of the
initial and final forests.
If `x` precedes `y`, then `y` will always be placed on top of `x`
and/or to the right of `x`.
Decreasing relations tend to be drawn vertically and increasing
relations horizontally.
The algorithm tries to avoid superposition but on big
interval-posets, it might happen.
You can use ``self.set_latex_options()`` to change default latex
options. Or you can use the parent's options.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: print(ip._latex_())
\begin{tikzpicture}[scale=1]
\node(T1) at (1,0) {1};
\node(T2) at (0,-1) {2};
\node(T3) at (1,-2) {3};
\node(T4) at (2,-1) {4};
\draw[line width = 0.5, color=red] (T3) -- (T1);
\draw[line width = 0.5, color=red] (T2) -- (T1);
\draw[line width = 0.5, color=blue] (T2) -- (T4);
\draw[line width = 0.5, color=blue] (T3) -- (T4);
\end{tikzpicture}
"""
latex.add_package_to_preamble_if_available("tikz")
latex_options = self.latex_options()
start = "\\begin{tikzpicture}[scale=" + str(latex_options['tikz_scale']) + "]\n"
end = "\\end{tikzpicture}"
vspace = latex_options["vspace"]
hspace = latex_options["hspace"]
def draw_node(j, x, y):
r"""
Internal method to draw vertices
"""
return "\\node(T" + str(j) + ") at (" + str(x) + "," + str(y) + ") {" + str(j) + "};\n"
def draw_increasing(i, j):
r"""
Internal method to draw increasing relations
"""
return "\\draw[line width = " + str(latex_options["line_width"]) + ", color=" + latex_options["color_increasing"] + "] (T" + str(i) + ") -- (T" + str(j) + ");\n"
def draw_decreasing(i, j):
r"""
Internal method to draw decreasing relations
"""
return "\\draw[line width = " + str(latex_options["line_width"]) + ", color=" + latex_options["color_decreasing"] + "] (T" + str(i) + ") -- (T" + str(j) + ");\n"
if self.size() == 0:
nodes = "\\node(T0) at (0,0){$\emptyset$};"
relations = ""
else:
positions = self._find_node_positions(hspace, vspace)
nodes = "" # latex for node declarations
relations = "" # latex for drawing relations
for i in range(1, self.size() + 1):
nodes += draw_node(i, *positions[i])
for i, j in self.decreasing_cover_relations():
relations += draw_decreasing(i, j)
for i, j in self.increasing_cover_relations():
relations += draw_increasing(i, j)
return start + nodes + relations + end
def poset(self):
r"""
Return ``self`` as a labelled poset.
An interval-poset is indeed constructed from a labelled poset which
is stored internally. This method allows to access the poset and
all the associated methods.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(3,2),(2,4),(3,4)])
sage: pos = ip.poset(); pos
Finite poset containing 4 elements
sage: pos.maximal_chains()
[[3, 2, 4], [1, 2, 4]]
sage: pos.maximal_elements()
[4]
sage: pos.is_lattice()
False
"""
return self._poset
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: len(set([hash(u) for u in TamariIntervalPosets(4)]))
68
"""
pair = (self.size(), tuple(tuple(e) for e in self._cover_relations))
return hash(pair)
@cached_method
def increasing_cover_relations(self):
r"""
Return the cover relations of the initial forest of ``self``
(the poset formed by keeping only the relations of the form
`a` precedes `b` with `a < b`).
The initial forest of ``self`` is a forest with its roots
being on top. It is also called the increasing poset of ``self``.
.. WARNING::
This method computes the cover relations of the initial
forest. This is not identical with the cover relations of
``self`` which happen to be increasing!
.. SEEALSO::
:meth:`initial_forest`
EXAMPLES::
sage: TamariIntervalPoset(4,[(1,2),(3,2),(2,4),(3,4)]).increasing_cover_relations()
[(1, 2), (2, 4), (3, 4)]
sage: TamariIntervalPoset(3,[(1,2),(1,3),(2,3)]).increasing_cover_relations()
[(1, 2), (2, 3)]
"""
relations = []
size = self.size()
for i in range(1, size):
for j in range(i + 1, size + 1):
if self.le(i, j):
relations.append((i, j))
break
return relations
def increasing_roots(self):
r"""
Return the root vertices of the initial forest of ``self``,
i.e., the vertices `a` of ``self`` such that there is no
`b > a` with `a` precedes `b`.
OUTPUT:
The list of all roots of the initial forest of ``self``, in
decreasing order.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.increasing_roots()
[6, 5, 2]
sage: ip.initial_forest().increasing_roots()
[6, 5, 2]
"""
size = self.size()
if size == 0:
return []
roots = [size]
root = size
for i in range(size - 1, 0, -1):
if not self.le(i, root):
roots.append(i)
root = i
return roots
def increasing_children(self, v):
r"""
Return the children of ``v`` in the initial forest of ``self``.
INPUT:
- ``v`` -- an integer representing a vertex of ``self``
(between 1 and ``size``)
OUTPUT:
The list of all children of ``v`` in the initial forest of
``self``, in decreasing order.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.increasing_children(2)
[1]
sage: ip.increasing_children(5)
[4, 3]
sage: ip.increasing_children(1)
[]
"""
children = []
root = None
for i in range(v - 1, 0, -1):
if not self.le(i, v):
break
if root is None or not self.le(i, root):
children.append(i)
root = i
return children
def increasing_parent(self, v):
r"""
Return the vertex parent of ``v`` in the initial forest of ``self``.
This is the lowest (as integer!) vertex `b > v` such that `v`
precedes `b`. If there is no such vertex (that is, `v` is an
increasing root), then ``None`` is returned.
INPUT:
- ``v`` -- an integer representing a vertex of ``self``
(between 1 and ``size``)
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.increasing_parent(1)
2
sage: ip.increasing_parent(3)
5
sage: ip.increasing_parent(4)
5
sage: ip.increasing_parent(5) is None
True
"""
parent = None
for i in range(self.size(), v, -1):
if self.le(v, i):
parent = i
return parent
@cached_method
def decreasing_cover_relations(self):
r"""
Return the cover relations of the final forest of ``self``
(the poset formed by keeping only the relations of the form
`a` precedes `b` with `a > b`).
The final forest of ``self`` is a forest with its roots
being on top. It is also called the decreasing poset of ``self``.
.. WARNING::
This method computes the cover relations of the final
forest. This is not identical with the cover relations of
``self`` which happen to be decreasing!
.. SEEALSO::
:meth:`final_forest`
EXAMPLES::
sage: TamariIntervalPoset(4,[(2,1),(3,2),(3,4),(4,2)]).decreasing_cover_relations()
[(4, 2), (3, 2), (2, 1)]
sage: TamariIntervalPoset(4,[(2,1),(4,3),(2,3)]).decreasing_cover_relations()
[(4, 3), (2, 1)]
sage: TamariIntervalPoset(3,[(2,1),(3,1),(3,2)]).decreasing_cover_relations()
[(3, 2), (2, 1)]
"""
relations = []
for i in range(self.size(), 1, -1):
for j in range(i - 1, 0, -1):
if self.le(i, j):
relations.append((i, j))
break
return relations
def decreasing_roots(self):
r"""
Return the root vertices of the final forest of ``self``,
i.e., the vertices `b` such that there is no `a < b` with `b`
preceding `a`.
OUTPUT:
The list of all roots of the final forest of ``self``, in
increasing order.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.decreasing_roots()
[1, 2]
sage: ip.final_forest().decreasing_roots()
[1, 2]
"""
if self.size() == 0:
return []
roots = [1]
root = 1
for i in range(2, self.size() + 1):
if not self.le(i, root):
roots.append(i)
root = i
return roots
def decreasing_children(self, v):
r"""
Return the children of ``v`` in the final forest of ``self``.
INPUT:
- ``v`` -- an integer representing a vertex of ``self``
(between 1 and ``size``)
OUTPUT:
The list of all children of ``v`` in the final forest of ``self``,
in increasing order.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.decreasing_children(2)
[3, 5]
sage: ip.decreasing_children(3)
[4]
sage: ip.decreasing_children(1)
[]
"""
children = []
root = None
for i in range(v + 1, self.size() + 1):
if not self.le(i, v):
break
if root is None or not self.le(i, root):
children.append(i)
root = i
return children
def decreasing_parent(self, v):
r"""
Return the vertex parent of ``v`` in the final forest of ``self``.
This is the highest (as integer!) vertex `a < v` such that ``v``
precedes ``a``. If there is no such vertex (that is, `v` is a
decreasing root), then ``None`` is returned.
INPUT:
- ``v`` -- an integer representing a vertex of ``self`` (between
1 and ``size``)
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.decreasing_parent(4)
3
sage: ip.decreasing_parent(3)
2
sage: ip.decreasing_parent(5)
2
sage: ip.decreasing_parent(2) is None
True
"""
parent = None
for i in range(1, v):
if self.le(v, i):
parent = i
return parent
def le(self, e1, e2):
r"""
Return whether ``e1`` precedes or equals ``e2`` in ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.le(1,2)
True
sage: ip.le(1,3)
True
sage: ip.le(2,3)
True
sage: ip.le(3,4)
False
sage: ip.le(1,1)
True
"""
return self._poset.le(e1, e2)
def lt(self, e1, e2):
r"""
Return whether ``e1`` strictly precedes ``e2`` in ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.lt(1,2)
True
sage: ip.lt(1,3)
True
sage: ip.lt(2,3)
True
sage: ip.lt(3,4)
False
sage: ip.lt(1,1)
False
"""
return self._poset.lt(e1, e2)
def ge(self, e1, e2):
r"""
Return whether ``e2`` precedes or equals ``e1`` in ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.ge(2,1)
True
sage: ip.ge(3,1)
True
sage: ip.ge(3,2)
True
sage: ip.ge(4,3)
False
sage: ip.ge(1,1)
True
"""
return self._poset.ge(e1, e2)
def gt(self, e1, e2):
r"""
Return whether ``e2`` strictly precedes ``e1`` in ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.gt(2,1)
True
sage: ip.gt(3,1)
True
sage: ip.gt(3,2)
True
sage: ip.gt(4,3)
False
sage: ip.gt(1,1)
False
"""
return self._poset.gt(e1, e2)
def size(self):
r"""
Return the size (number of vertices) of the interval-poset.
EXAMPLES::
sage: TamariIntervalPoset(3,[(2,1),(3,1)]).size()
3
"""
return self._size
def complement(self):
r"""
Return the complement of the interval-poset ``self``.
If `P` is a Tamari interval-poset of size `n`, then the
*complement* of `P` is defined as the interval-poset `Q` whose
base set is `[n] = \{1, 2, \ldots, n\}` (just as for `P`), but
whose order relation has `a` precede `b` if and only if
`n + 1 - a` precedes `n + 1 - b` in `P`.
In terms of the Tamari lattice, the *complement* is the symmetric
of ``self``. It is formed from the left-right symmeterized of
the binary trees of the interval (switching left and right
subtrees, see
:meth:`~sage.combinat.binary_tree.BinaryTree.left_right_symmetry`).
In particular, initial intervals are sent to final intervals and
vice-versa.
EXAMPLES::
sage: TamariIntervalPoset(3, [(2, 1), (3, 1)]).complement()
The Tamari interval of size 3 induced by relations [(1, 3), (2, 3)]
sage: TamariIntervalPoset(0, []).complement()
The Tamari interval of size 0 induced by relations []
sage: ip = TamariIntervalPoset(4, [(1, 2), (2, 4), (3, 4)])
sage: ip.complement() == TamariIntervalPoset(4, [(2, 1), (3, 1), (4, 3)])
True
sage: ip.lower_binary_tree() == ip.complement().upper_binary_tree().left_right_symmetry()
True
sage: ip.upper_binary_tree() == ip.complement().lower_binary_tree().left_right_symmetry()
True
sage: ip.is_initial_interval()
True
sage: ip.complement().is_final_interval()
True
"""
N = self._size + 1
new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]
return TamariIntervalPoset(N - 1, new_covers)
def insertion(self, i):
"""
Return the Tamari insertion of an integer `i` into the
interval-poset ``self``.
If `P` is a Tamari interval-poset of size `n` and `i` is an
integer with `1 \leq i \leq n+1`, then the Tamari insertion of
`i` into `P` is defined as the Tamari interval-poset of size
`n+1` which corresponds to the interval `[C_1, C_2]` on the
Tamari lattice, where the binary trees `C_1` and `C_2` are
defined as follows: We write the interval-poset `P` as
`[B_1, B_2]` for two binary trees `B_1` and `B_2`. We label
the vertices of each of these two trees with the integers
`1, 2, \ldots, i-1, i+1, i+2, \ldots, n+1` in such a way that
the trees are binary search trees (this labelling is unique).
Then, we insert `i` into each of these trees (in the way as
explained in
:meth:`~sage.combinat.binary_tree.LabelledBinaryTree.binary_search_insert`).
The shapes of the resulting two trees are denoted `C_1` and
`C_2`.
An alternative way to construct the insertion of `i` into
`P` is by relabeling each vertex `u` of `P` satisfying
`u \geq i` (as integers) as `u+1`, and then adding a vertex
`i` which should precede `i-1` and `i+1`.
.. TODO::
To study this, it would be more natural to define
interval-posets on arbitrary ordered sets rather than just
on `\{1, 2, \ldots, n\}`.
EXAMPLES::
sage: ip = TamariIntervalPoset(4, [(2, 3), (4, 3)]); ip
The Tamari interval of size 4 induced by relations [(2, 3), (4, 3)]
sage: ip.insertion(1)
The Tamari interval of size 5 induced by relations [(1, 2), (3, 4), (5, 4)]
sage: ip.insertion(2)
The Tamari interval of size 5 induced by relations [(2, 3), (3, 4), (5, 4), (2, 1)]
sage: ip.insertion(3)
The Tamari interval of size 5 induced by relations [(2, 4), (3, 4), (5, 4), (3, 2)]
sage: ip.insertion(4)
The Tamari interval of size 5 induced by relations [(2, 3), (4, 5), (5, 3), (4, 3)]
sage: ip.insertion(5)
The Tamari interval of size 5 induced by relations [(2, 3), (5, 4), (4, 3)]
sage: ip = TamariIntervalPoset(0, [])
sage: ip.insertion(1)
The Tamari interval of size 1 induced by relations []
sage: ip = TamariIntervalPoset(1, [])
sage: ip.insertion(1)
The Tamari interval of size 2 induced by relations [(1, 2)]
sage: ip.insertion(2)
The Tamari interval of size 2 induced by relations [(2, 1)]
TESTS:
Verifying that the two ways of computing insertion are
equivalent::
sage: def insert_alternative(T, i):
....: # Just another way to compute the insertion of i into T.
....: from sage.combinat.binary_tree import LabelledBinaryTree
....: B1 = T.lower_binary_tree().canonical_labelling()
....: B2 = T.upper_binary_tree().canonical_labelling()
....: # We should relabel the trees to "make space" for a label i,
....: # but we don't, because it doesn't make a difference: The
....: # binary search insertion will go precisely the same, because
....: # an integer equal to the label of the root gets sent onto
....: # the left branch.
....: C1 = B1.binary_search_insert(i)
....: C2 = B2.binary_search_insert(i)
....: return TamariIntervalPosets.from_binary_trees(C1, C2)
sage: def test_equivalence(n):
....: for T in TamariIntervalPosets(n):
....: for i in range(1, n + 2):
....: if not (insert_alternative(T, i) == T.insertion(i)):
....: print(T, i)
....: return False
....: return True
sage: test_equivalence(3)
True
"""
n = self._size
if not 0 < i <= n + 1:
raise ValueError("integer to be inserted not "
"in the appropriate interval")
def add1(u):
if u >= i:
return u + 1
return u
rels = [(add1(a), add1(b))
for (a, b) in self.decreasing_cover_relations()]
rels += [(add1(a), add1(b))
for (a, b) in self.increasing_cover_relations()]
rels += [(k, k - 1) for k in [i] if i > 1]
rels += [(k, k + 1) for k in [i] if i <= n]
return TamariIntervalPoset(n + 1, rels)
def _repr_(self):
r"""
TESTS::
sage: TamariIntervalPoset(3,[(2,1),(3,1)])
The Tamari interval of size 3 induced by relations [(3, 1), (2, 1)]
sage: TamariIntervalPoset(3,[(3,1),(2,1)])
The Tamari interval of size 3 induced by relations [(3, 1), (2, 1)]
sage: TamariIntervalPoset(3,[(2,3),(2,1)])
The Tamari interval of size 3 induced by relations [(2, 3), (2, 1)]
"""
msg = "The Tamari interval of size {} induced by relations {}"
return msg.format(self.size(),
self.increasing_cover_relations() +
self.decreasing_cover_relations())
def __eq__(self, other):
r"""
TESTS::
sage: TamariIntervalPoset(0,[]) == TamariIntervalPoset(0,[])
True
sage: TamariIntervalPoset(1,[]) == TamariIntervalPoset(0,[])
False
sage: TamariIntervalPoset(3,[(1,2),(3,2)]) == TamariIntervalPoset(3,[(3,2),(1,2)])
True
sage: TamariIntervalPoset(3,[(1,2),(3,2)]) == TamariIntervalPoset(3,[(1,2)])
False
"""
if (not isinstance(other, TamariIntervalPoset)):
return False
return self.size() == other.size() and self._cover_relations == other._cover_relations
def __ne__(self, other):
r"""
TESTS::
sage: TamariIntervalPoset(0,[]) != TamariIntervalPoset(0,[])
False
sage: TamariIntervalPoset(1,[]) != TamariIntervalPoset(0,[])
True
sage: TamariIntervalPoset(3,[(1,2),(3,2)]) != TamariIntervalPoset(3,[(3,2),(1,2)])
False
sage: TamariIntervalPoset(3,[(1,2),(3,2)]) != TamariIntervalPoset(3,[(1,2)])
True
"""
return not (self == other)
def __le__(self, el2):
r"""
TESTS::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip1 <= ip2
True
sage: ip1 <= ip1
True
sage: ip2 <= ip1
False
"""
return self.parent().le(self, el2)
def __lt__(self, el2):
r"""
TESTS::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip1 < ip2
True
sage: ip1 < ip1
False
sage: ip2 < ip1
False
"""
return self.parent().lt(self, el2)
def __ge__(self, el2):
r"""
TESTS::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip1 >= ip2
False
sage: ip1 >= ip1
True
sage: ip2 >= ip1
True
"""
return self.parent().ge(self, el2)
def __gt__(self, el2):
r"""
TESTS::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip1 > ip2
False
sage: ip1 > ip1
False
sage: ip2 > ip1
True
"""
return self.parent().gt(self, el2)
def __iter__(self):
r"""
Iterate through the vertices of ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(3,2)])
sage: [i for i in ip]
[1, 2, 3, 4]
"""
return iter(range(1, self.size() + 1))
def contains_interval(self, other):
r"""
Return whether the interval represented by ``other`` is contained
in ``self`` as an interval of the Tamari lattice.
In terms of interval-posets, it means that all relations of ``self``
are relations of ``other``.
INPUT:
- ``other`` -- an interval-poset
EXAMPLES::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(2,3)])
sage: ip2.contains_interval(ip1)
True
sage: ip3 = TamariIntervalPoset(4,[(2,1)])
sage: ip2.contains_interval(ip3)
False
sage: ip4 = TamariIntervalPoset(3,[(2,3)])
sage: ip2.contains_interval(ip4)
False
"""
if other.size() != self.size():
return False
for (i, j) in self._cover_relations:
if not other.le(i, j):
return False
return True
def lower_contains_interval(self, other):
r"""
Return whether the interval represented by ``other`` is contained
in ``self`` as an interval of the Tamari lattice and if they share
the same lower bound.
As interval-posets, it means that ``other`` contains the relations
of ``self`` plus some extra increasing relations.
INPUT:
- ``other`` -- an interval-poset
EXAMPLES::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)]);
sage: ip2 = TamariIntervalPoset(4,[(4,3)])
sage: ip2.lower_contains_interval(ip1)
True
sage: ip2.contains_interval(ip1) and ip2.lower_binary_tree() == ip1.lower_binary_tree()
True
sage: ip3 = TamariIntervalPoset(4,[(4,3),(2,1)])
sage: ip2.contains_interval(ip3)
True
sage: ip2.lower_binary_tree() == ip3.lower_binary_tree()
False
sage: ip2.lower_contains_interval(ip3)
False
"""
if not self.contains_interval(other):
return False
for (i, j) in other.decreasing_cover_relations():
if not self.le(i, j):
return False
return True
def upper_contains_interval(self, other):
r"""
Return whether the interval represented by ``other`` is contained
in ``self`` as an interval of the Tamari lattice and if they share
the same upper bound.
As interval-posets, it means that ``other`` contains the relations
of ``self`` plus some extra decreasing relations.
INPUT:
- ``other`` -- an interval-poset
EXAMPLES::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip2.upper_contains_interval(ip1)
True
sage: ip2.contains_interval(ip1) and ip2.upper_binary_tree() == ip1.upper_binary_tree()
True
sage: ip3 = TamariIntervalPoset(4,[(1,2),(2,3),(3,4)])
sage: ip2.upper_contains_interval(ip3)
False
sage: ip2.contains_interval(ip3)
True
sage: ip2.upper_binary_tree() == ip3.upper_binary_tree()
False
"""
if not self.contains_interval(other):
return False
for (i, j) in other.increasing_cover_relations():
if not self.le(i, j):
return False
return True
def is_linear_extension(self, perm):
r"""
Return whether the permutation ``perm`` is a linear extension
of ``self``.
INPUT:
- ``perm`` -- a permutation of the size of ``self``
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip.is_linear_extension([1,4,2,3])
True
sage: ip.is_linear_extension(Permutation([1,4,2,3]))
True
sage: ip.is_linear_extension(Permutation([1,4,3,2]))
False
"""
return self._poset.is_linear_extension(perm)
def contains_binary_tree(self, binary_tree):
r"""
Return whether the interval represented by ``self`` contains
the binary tree ``binary_tree``.
INPUT:
- ``binary_tree`` -- a binary tree
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.contains_binary_tree(BinaryTree([[None,[None,[]]],None]))
True
sage: ip.contains_binary_tree(BinaryTree([None,[[[],None],None]]))
True
sage: ip.contains_binary_tree(BinaryTree([[],[[],None]]))
False
sage: ip.contains_binary_tree(ip.lower_binary_tree())
True
sage: ip.contains_binary_tree(ip.upper_binary_tree())
True
sage: all(ip.contains_binary_tree(bt) for bt in ip.binary_trees())
True
"""
return self.is_linear_extension(binary_tree.to_132_avoiding_permutation())
def contains_dyck_word(self, dyck_word):
r"""
Return whether the interval represented by ``self`` contains
the Dyck word ``dyck_word``.
INPUT:
- ``dyck_word`` -- a Dyck word
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.contains_dyck_word(DyckWord([1,1,1,0,0,0,1,0]))
True
sage: ip.contains_dyck_word(DyckWord([1,1,0,1,0,1,0,0]))
True
sage: ip.contains_dyck_word(DyckWord([1,0,1,1,0,1,0,0]))
False
sage: ip.contains_dyck_word(ip.lower_dyck_word())
True
sage: ip.contains_dyck_word(ip.upper_dyck_word())
True
sage: all(ip.contains_dyck_word(bt) for bt in ip.dyck_words())
True
"""
return self.contains_binary_tree(dyck_word.to_binary_tree_tamari())
def intersection(self, other):
r"""
Return the interval-poset formed by combining the relations from
both ``self`` and ``other``. It corresponds to the intersection
of the two corresponding intervals of the Tamari lattice.
INPUT:
- ``other`` -- an interval-poset of the same size as ``self``
EXAMPLES::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip2 = TamariIntervalPoset(4,[(4,3)])
sage: ip1.intersection(ip2)
The Tamari interval of size 4 induced by relations [(1, 2), (2, 3), (4, 3)]
sage: ip3 = TamariIntervalPoset(4,[(2,1)])
sage: ip1.intersection(ip3)
Traceback (most recent call last):
...
ValueError: This intersection is empty, it does not correspond to an interval-poset.
sage: ip4 = TamariIntervalPoset(3,[(2,3)])
sage: ip2.intersection(ip4)
Traceback (most recent call last):
...
ValueError: Intersections are only possible on interval-posets of the same size.
"""
if other.size() != self.size():
raise ValueError("Intersections are only possible on interval-posets of the same size.")
try:
return TamariIntervalPoset(self.size(), self._cover_relations + other._cover_relations)
except ValueError:
raise ValueError("This intersection is empty, it does not correspond to an interval-poset.")
def initial_forest(self):
r"""
Return the initial forest of ``self``, i.e., the interval-poset
formed from only the increasing relations of ``self``.
EXAMPLES::
sage: TamariIntervalPoset(4,[(1,2),(3,2),(2,4),(3,4)]).initial_forest()
The Tamari interval of size 4 induced by relations [(1, 2), (2, 4), (3, 4)]
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: ip.initial_forest() == ip
True
"""
return TamariIntervalPoset(self.size(), self.increasing_cover_relations())
def final_forest(self):
r"""
Return the final forest of ``self``, i.e., the interval-poset
formed with only the decreasing relations of ``self``.
EXAMPLES::
sage: TamariIntervalPoset(4,[(2,1),(3,2),(3,4),(4,2)]).final_forest()
The Tamari interval of size 4 induced by relations [(4, 2), (3, 2), (2, 1)]
sage: ip = TamariIntervalPoset(3,[(2,1),(3,1)])
sage: ip.final_forest() == ip
True
"""
return TamariIntervalPoset(self.size(), self.decreasing_cover_relations())
def is_initial_interval(self):
r"""
Return if ``self`` corresponds to an initial interval of the Tamari
lattice, i.e. if its lower end is the smallest element of the lattice.
It consists of checking that ``self`` does not contain any decreasing
relations.
EXAMPLES::
sage: ip = TamariIntervalPoset(4, [(1, 2), (2, 4), (3, 4)])
sage: ip.is_initial_interval()
True
sage: ip.lower_dyck_word()
[1, 0, 1, 0, 1, 0, 1, 0]
sage: ip = TamariIntervalPoset(4, [(1, 2), (2, 4), (3, 4), (3, 2)])
sage: ip.is_initial_interval()
False
sage: ip.lower_dyck_word()
[1, 0, 1, 1, 0, 0, 1, 0]
sage: all(DyckWord([1,0,1,0,1,0]).tamari_interval(dw).is_initial_interval() for dw in DyckWords(3))
True
"""
return self.decreasing_cover_relations() == []
def is_final_interval(self):
r"""
Return if ``self`` corresponds to a final interval of the Tamari
lattice, i.e. if its upper end is the largest element of the lattice.
It consists of checking that ``self`` does not contain any increasing
relations.
EXAMPLES::
sage: ip = TamariIntervalPoset(4, [(4, 3), (3, 1), (2, 1)])
sage: ip.is_final_interval()
True
sage: ip.upper_dyck_word()
[1, 1, 1, 1, 0, 0, 0, 0]
sage: ip = TamariIntervalPoset(4, [(4, 3), (3, 1), (2, 1), (2, 3)])
sage: ip.is_final_interval()
False
sage: ip.upper_dyck_word()
[1, 1, 0, 1, 1, 0, 0, 0]
sage: all(dw.tamari_interval(DyckWord([1, 1, 1, 0, 0, 0])).is_final_interval() for dw in DyckWords(3))
True
"""
return self.increasing_cover_relations() == []
def lower_binary_tree(self):
r"""
Return the lowest binary tree in the interval of the Tamari
lattice represented by ``self``.
This is a binary tree. It is the shape of the unique binary
search tree whose left-branch ordered forest (i.e., the result
of applying
:meth:`~sage.combinat.binary_tree.BinaryTree.to_ordered_tree_left_branch`
and cutting off the root) is the final forest of ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.lower_binary_tree()
[[., .], [[., [., .]], [., .]]]
sage: TamariIntervalPosets.final_forest(ip.lower_binary_tree()) == ip.final_forest()
True
sage: ip == TamariIntervalPosets.from_binary_trees(ip.lower_binary_tree(),ip.upper_binary_tree())
True
"""
return self.min_linear_extension().binary_search_tree_shape(left_to_right=False)
def lower_dyck_word(self):
r"""
Return the lowest Dyck word in the interval of the Tamari lattice
represented by ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.lower_dyck_word()
[1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0]
sage: TamariIntervalPosets.final_forest(ip.lower_dyck_word()) == ip.final_forest()
True
sage: ip == TamariIntervalPosets.from_dyck_words(ip.lower_dyck_word(),ip.upper_dyck_word())
True
"""
return self.lower_binary_tree().to_dyck_word_tamari()
def upper_binary_tree(self):
r"""
Return the highest binary tree in the interval of the Tamari
lattice represented by ``self``.
This is a binary tree. It is the shape of the unique binary
search tree whose right-branch ordered forest (i.e., the result
of applying
:meth:`~sage.combinat.binary_tree.BinaryTree.to_ordered_tree_right_branch`
and cutting off the root) is the initial forest of ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.upper_binary_tree()
[[., .], [., [[., .], [., .]]]]
sage: TamariIntervalPosets.initial_forest(ip.upper_binary_tree()) == ip.initial_forest()
True
sage: ip == TamariIntervalPosets.from_binary_trees(ip.lower_binary_tree(),ip.upper_binary_tree())
True
"""
return self.max_linear_extension().binary_search_tree_shape(left_to_right=False)
def upper_dyck_word(self):
r"""
Return the highest Dyck word in the interval of the Tamari lattice
represented by ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.upper_dyck_word()
[1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0]
sage: TamariIntervalPosets.initial_forest(ip.upper_dyck_word()) == ip.initial_forest()
True
sage: ip == TamariIntervalPosets.from_dyck_words(ip.lower_dyck_word(),ip.upper_dyck_word())
True
"""
return self.upper_binary_tree().to_dyck_word_tamari()
def sub_poset(self, start, end):
r"""
Return the renormalized sub-poset of ``self`` consisting solely
of integers from ``start`` (inclusive) to ``end`` (not inclusive).
"Renormalized" means that these integers are relabelled
`1,2,\ldots,k` in the obvious way (i.e., by subtracting
``start - 1``).
INPUT:
- ``start`` -- an integer, the starting vertex (inclusive)
- ``end`` -- an integer, the ending vertex (not inclusive)
EXAMPLES::
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(3,5),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (3, 5), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.sub_poset(1,3)
The Tamari interval of size 2 induced by relations [(1, 2)]
sage: ip.sub_poset(1,4)
The Tamari interval of size 3 induced by relations [(1, 2), (3, 2)]
sage: ip.sub_poset(1,5)
The Tamari interval of size 4 induced by relations [(1, 2), (4, 3), (3, 2)]
sage: ip.sub_poset(1,7) == ip
True
sage: ip.sub_poset(1,1)
The Tamari interval of size 0 induced by relations []
"""
if start < 1 or start > end or end > self.size() + 1:
raise ValueError("Invalid starting or ending value, accepted: 1 <= start <= end <= size+1")
if start == end:
return TamariIntervalPoset(0, [])
relations = [(i - start + 1, j - start + 1) for (i, j) in self.increasing_cover_relations() if i >= start and j < end]
relations.extend([(j - start + 1, i - start + 1) for (j, i) in self.decreasing_cover_relations() if i >= start and j < end])
return TamariIntervalPoset(end - start, relations)
def min_linear_extension(self):
r"""
Return the minimal permutation for the right weak order which is
a linear extension of ``self``.
This is also the minimal permutation in the sylvester
class of ``self.lower_binary_tree()`` and is a 312-avoiding
permutation.
The right weak order is also known as the right permutohedron
order. See
:meth:`~sage.combinat.permutation.Permutation.permutohedron_lequal`
for its definition.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip.min_linear_extension()
[1, 2, 4, 3]
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)])
sage: ip.min_linear_extension()
[1, 4, 3, 6, 5, 2]
sage: ip = TamariIntervalPoset(0,[])
sage: ip.min_linear_extension()
[]
sage: ip = TamariIntervalPoset(5, [(1, 4), (2, 4), (3, 4), (5, 4)]); ip
The Tamari interval of size 5 induced by relations [(1, 4), (2, 4), (3, 4), (5, 4)]
sage: ip.min_linear_extension()
[1, 2, 3, 5, 4]
"""
# The min linear extension is build by postfix-reading the
# final forest of ``self``.
def add(perm, i):
r"""
Internal recursive method to compute the min linear extension.
"""
for j in self.decreasing_children(i):
add(perm, j)
perm.append(i)
perm = []
for i in self.decreasing_roots():
add(perm, i)
return Permutation(perm)
def max_linear_extension(self):
r"""
Return the maximal permutation for the right weak order which is
a linear extension of ``self``.
This is also the maximal permutation in the sylvester
class of ``self.upper_binary_tree()`` and is a 132-avoiding
permutation.
The right weak order is also known as the right permutohedron
order. See
:meth:`~sage.combinat.permutation.Permutation.permutohedron_lequal`
for its definition.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip.max_linear_extension()
[4, 1, 2, 3]
sage: ip = TamariIntervalPoset(6,[(3,2),(4,3),(5,2),(6,5),(1,2),(4,5)]); ip
The Tamari interval of size 6 induced by relations [(1, 2), (4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: ip.max_linear_extension()
[6, 4, 5, 3, 1, 2]
sage: ip = TamariIntervalPoset(0,[]); ip
The Tamari interval of size 0 induced by relations []
sage: ip.max_linear_extension()
[]
sage: ip = TamariIntervalPoset(5, [(1, 4), (2, 4), (3, 4), (5, 4)]); ip
The Tamari interval of size 5 induced by relations [(1, 4), (2, 4), (3, 4), (5, 4)]
sage: ip.max_linear_extension()
[5, 3, 2, 1, 4]
"""
# The max linear extension is build by right-to-left
# postfix-reading the initial forest of ``self``. The
# right-to-leftness here is ensured by the fact that
# :meth:`increasing_children` and :meth:`increasing_roots`
# output their results in decreasing order.
def add(perm, i):
r"""
Internal recursive method to compute the max linear extension.
"""
for j in self.increasing_children(i):
add(perm, j)
perm.append(i)
perm = []
for i in self.increasing_roots():
add(perm, i)
return Permutation(perm)
def linear_extensions(self):
r"""
Return an iterator on the permutations which are linear
extensions of ``self``.
They form an interval of the right weak order (also called the
right permutohedron order -- see
:meth:`~sage.combinat.permutation.Permutation.permutohedron_lequal`
for a definition).
EXAMPLES::
sage: ip = TamariIntervalPoset(3,[(1,2),(3,2)])
sage: list(ip.linear_extensions())
[[3, 1, 2], [1, 3, 2]]
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: list(ip.linear_extensions())
[[4, 1, 2, 3], [1, 4, 2, 3], [1, 2, 4, 3]]
"""
for ext in self._poset.linear_extensions():
yield Permutation(ext)
def lower_contained_intervals(self):
r"""
If ``self`` represents the interval `[t_1, t_2]` of the Tamari
lattice, return an iterator on all intervals `[t_1,t]` with
`t \leq t_2` for the Tamari lattice.
In terms of interval-posets, it corresponds to adding all possible
relations of the form `n` precedes `m` with `n<m`.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: list(ip.lower_contained_intervals())
[The Tamari interval of size 4 induced by relations [(2, 4), (3, 4), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(1, 4), (2, 4), (3, 4), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(2, 3), (3, 4), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(1, 4), (2, 3), (3, 4), (3, 1), (2, 1)]]
sage: ip = TamariIntervalPoset(4,[])
sage: len(list(ip.lower_contained_intervals()))
14
"""
size = self._size
yield self
r"""
we try to add links recursively in this order :
1 -> 2
2 -> 3
1 -> 3
3 -> 4
2 -> 4
1 -> 4
...
("Link" means "relation of the poset".)
One useful feature of interval-posets is that if you add a single
new relation -- say, `x` precedes `y` -- to an existing
interval-poset and take the transitive closure, and if the axioms
of an interval-poset are still satisfied for `(a,c) = (x,y)` and
for `(a,c) = (y,x)`, then the transitive closure is an
interval-poset (i.e., roughly speaking, the other new relations
forced by `x` preceding `y` under transitive closure cannot
invalidate the axioms). This is helpful when extending
interval-posets, and is the reason why this and other iterators
don't yield invalid interval-posets.
"""
def add_relations(poset, n, m):
r"""
Internal recursive method to generate all possible intervals.
At every step during the iteration, we have n < m and every
i satisfying n < i < m satisfies that i precedes m in the
poset ``poset`` (except when m > size).
"""
if n <= 0:
# if n<=0, then we go to the next m
n = m
m += 1
if m > size:
# if m>size, it's finished
return
if poset.le(n, m):
# there is already a link n->m, so we go to the next n
for pos in add_relations(poset, n - 1, m):
yield pos
elif poset.le(m, n):
# there is an inverse link m->n, we know we won't be able
# to create a link i->m with i<=n, so we go to the next m
for pos in add_relations(poset, m, m + 1):
yield pos
else:
# there is no link n->m
# first option : we don't create the link and go to the next m
# (since the lack of a link n->m forbids any links i->m
# with i<n)
for pos in add_relations(poset, m, m + 1):
yield pos
# second option : we create the link
# (this is allowed because links i->m already exist for all
# n<i<m, or else we wouldn't be here)
poset = TamariIntervalPoset(poset.size(), poset._cover_relations + ((n, m),))
yield poset
# and then, we go to the next n
for pos in add_relations(poset, n - 1, m):
yield pos
for inter in add_relations(self, 1, 2):
yield inter
def interval_cardinality(self):
r"""
Return the cardinality of the interval, i.e., the number of elements
(binary trees or Dyck words) in the interval represented by ``self``.
Not to be confused with :meth:`size` which is the number of
vertices.
EXAMPLES::
sage: TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)]).interval_cardinality()
4
sage: TamariIntervalPoset(4,[]).interval_cardinality()
14
sage: TamariIntervalPoset(4,[(1,2),(2,3),(3,4)]).interval_cardinality()
1
"""
return len(list(self.lower_contained_intervals()))
def binary_trees(self):
r"""
Return an iterator on all the binary trees in the interval
represented by ``self``.
EXAMPLES::
sage: list(TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)]).binary_trees())
[[., [[., [., .]], .]],
[[., [., [., .]]], .],
[., [[[., .], .], .]],
[[., [[., .], .]], .]]
sage: set(TamariIntervalPoset(4,[]).binary_trees()) == set(BinaryTrees(4))
True
"""
for ip in self.lower_contained_intervals():
yield ip.upper_binary_tree()
def dyck_words(self):
r"""
Return an iterator on all the Dyck words in the interval
represented by ``self``.
EXAMPLES::
sage: list(TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)]).dyck_words())
[[1, 1, 1, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 0]]
sage: set(TamariIntervalPoset(4,[]).dyck_words()) == set(DyckWords(4))
True
"""
for ip in self.lower_contained_intervals():
yield ip.upper_dyck_word()
def maximal_chain_tamari_intervals(self):
r"""
Return an iterator on the upper contained intervals of one
longest chain of the Tamari interval represented by ``self``.
If ``self`` represents the interval `[T_1,T_2]` of the Tamari
lattice, this returns intervals `[T',T_2]` with `T'` following
one longest chain between `T_1` and `T_2`.
To obtain a longest chain, we use the Tamari inversions of ``self``.
The elements of the chain are obtained by adding one by one the
relations `(b,a)` from each Tamari inversion `(a,b)` to ``self``,
where the Tamari inversions are taken in lexicographic order.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: list(ip.maximal_chain_tamari_intervals())
[The Tamari interval of size 4 induced by relations [(2, 4), (3, 4), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(2, 4), (3, 4), (4, 1), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(2, 4), (3, 4), (4, 1), (3, 2), (2, 1)]]
sage: ip = TamariIntervalPoset(4,[])
sage: list(ip.maximal_chain_tamari_intervals())
[The Tamari interval of size 4 induced by relations [],
The Tamari interval of size 4 induced by relations [(2, 1)],
The Tamari interval of size 4 induced by relations [(3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(4, 1), (3, 1), (2, 1)],
The Tamari interval of size 4 induced by relations [(4, 1), (3, 2), (2, 1)],
The Tamari interval of size 4 induced by relations [(4, 2), (3, 2), (2, 1)],
The Tamari interval of size 4 induced by relations [(4, 3), (3, 2), (2, 1)]]
"""
yield self
n = self.size()
cover_relations = list(self._cover_relations)
for inv in self.tamari_inversions_iter():
cover_relations.append((inv[1], inv[0]))
yield TamariIntervalPoset(n, cover_relations)
def maximal_chain_binary_trees(self):
r"""
Return an iterator on the binary trees forming a longest chain of
``self`` (regarding ``self`` as an interval of the Tamari
lattice).
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: list(ip.maximal_chain_binary_trees())
[[[., [[., .], .]], .], [., [[[., .], .], .]], [., [[., [., .]], .]]]
sage: ip = TamariIntervalPoset(4,[])
sage: list(ip.maximal_chain_binary_trees())
[[[[[., .], .], .], .],
[[[., [., .]], .], .],
[[., [[., .], .]], .],
[., [[[., .], .], .]],
[., [[., [., .]], .]],
[., [., [[., .], .]]],
[., [., [., [., .]]]]]
"""
for it in self.maximal_chain_tamari_intervals():
yield it.lower_binary_tree()
def maximal_chain_dyck_words(self):
r"""
Return an iterator on the Dyck words forming a longest chain of
``self`` (regarding ``self`` as an interval of the Tamari
lattice).
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: list(ip.maximal_chain_dyck_words())
[[1, 1, 0, 1, 0, 0, 1, 0], [1, 1, 0, 1, 0, 1, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0]]
sage: ip = TamariIntervalPoset(4,[])
sage: list(ip.maximal_chain_dyck_words())
[[1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 0, 0, 1, 0, 1, 0],
[1, 1, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]]
"""
for it in self.maximal_chain_tamari_intervals():
yield it.lower_dyck_word()
def tamari_inversions(self):
r"""
Return the Tamari inversions of ``self``. A Tamari inversion is
a pair of vertices `(a,b)` with `a < b` such that:
- the decreasing parent of `b` is strictly smaller than `a` (or
does not exist), and
- the increasing parent of `a` is strictly bigger than `b` (or
does not exist).
"Smaller" and "bigger" refer to the numerical values of the
elements, not to the poset order.
This method returns the list of all Tamari inversions in
lexicographic order.
The number of Tamari inversions is the length of the
longest chain of the Tamari interval represented by ``self``.
Indeed, when an interval consists of just one binary tree, it has
no inversion. One can also prove that if a Tamari interval
`I' = [T_1', T_2']` is a proper subset of a Tamari interval
`I = [T_1, T_2]`, then the inversion number of `I'` is strictly
lower than the inversion number of `I`. And finally, by adding
the relation `(b,a)` to the interval-poset where `(a,b)` is the
first inversion of `I` in lexicographic order, one reduces the
inversion number by exactly `1`.
.. SEEALSO::
:meth:`tamari_inversions_iter`.
EXAMPLES::
sage: ip = TamariIntervalPoset(3,[])
sage: ip.tamari_inversions()
[(1, 2), (1, 3), (2, 3)]
sage: ip = TamariIntervalPoset(3,[(2,1)])
sage: ip.tamari_inversions()
[(1, 3), (2, 3)]
sage: ip = TamariIntervalPoset(3,[(1,2)])
sage: ip.tamari_inversions()
[(2, 3)]
sage: ip = TamariIntervalPoset(3,[(1,2),(3,2)])
sage: ip.tamari_inversions()
[]
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.tamari_inversions()
[(1, 4), (2, 3)]
sage: ip = TamariIntervalPoset(4,[])
sage: ip.tamari_inversions()
[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
sage: all(len(TamariIntervalPosets.from_binary_trees(bt,bt).tamari_inversions())==0 for bt in BinaryTrees(3))
True
sage: all(len(TamariIntervalPosets.from_binary_trees(bt,bt).tamari_inversions())==0 for bt in BinaryTrees(4))
True
"""
return list(self.tamari_inversions_iter())
def tamari_inversions_iter(self):
r"""
Iterate over the Tamari inversions of ``self``, in
lexicographic order.
See :meth:`tamari_inversions` for the definition of the terms
involved.
EXAMPLES::
sage: T = TamariIntervalPoset(5, [[1,2],[3,4],[3,2],[5,2],[4,2]])
sage: list(T.tamari_inversions_iter())
[(4, 5)]
sage: T = TamariIntervalPoset(8, [(2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (8, 7), (6, 4), (5, 4), (4, 3), (3, 2)])
sage: list(T.tamari_inversions_iter())
[(1, 2), (1, 7), (5, 6)]
sage: T = TamariIntervalPoset(1, [])
sage: list(T.tamari_inversions_iter())
[]
sage: T = TamariIntervalPoset(0, [])
sage: list(T.tamari_inversions_iter())
[]
"""
n1 = self.size() + 1
for a in range(1, self.size()): # a == n will never work
ipa = self.increasing_parent(a)
if ipa is None:
max_b_1 = n1
else:
max_b_1 = ipa
for b in range(a + 1, max_b_1):
dpb = self.decreasing_parent(b)
if dpb is None or dpb < a:
yield (a, b)
def number_of_tamari_inversions(self):
r"""
Return the number of Tamari inversions of ``self``. This is also
the length the longest chain of the Tamari interval represented
by ``self``.
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.number_of_tamari_inversions()
2
sage: ip = TamariIntervalPoset(4,[])
sage: ip.number_of_tamari_inversions()
6
sage: ip = TamariIntervalPoset(3,[])
sage: ip.number_of_tamari_inversions()
3
"""
return len(self.tamari_inversions())
def number_of_new_components(self):
"""
Return the number of terms in the decomposition in new interval-posets.
Every interval-poset has a unique decomposition as a planar tree
of new interval-posets, as explained in [ChapTamari08]_. This function
just computes the number of terms, not the planar tree nor
the terms themselves.
.. SEEALSO:: :meth:`is_new`, :meth:`new_decomposition`
EXAMPLES::
sage: TIP4 = TamariIntervalPosets(4)
sage: nb = [u.number_of_new_components() for u in TIP4]
sage: [nb.count(i) for i in range(1, 5)]
[12, 21, 21, 14]
"""
t_low = self.lower_binary_tree().to_tilting()
t_up = self.upper_binary_tree().to_tilting()
return len([p for p in t_low if p in t_up])
def new_decomposition(self):
"""
Return the decomposition of the interval-poset into
new interval-posets.
Every interval-poset has a unique decomposition as a planar
tree of new interval-posets, as explained in
[ChapTamari08]_. This function computes the terms of this
decomposition, but not the planar tree.
For the number of terms, you can use instead the method
:meth:`number_of_new_components`.
OUTPUT:
a list of new interval-posets.
.. SEEALSO::
:meth:`number_of_new_components`, :meth:`is_new`
EXAMPLES::
sage: ex = TamariIntervalPosets(4)[11]
sage: ex.number_of_new_components()
3
sage: ex.new_decomposition()
[The Tamari interval of size 1 induced by relations [],
The Tamari interval of size 2 induced by relations [],
The Tamari interval of size 1 induced by relations []]
TESTS::
sage: ex = TamariIntervalPosets(4).random_element()
sage: dec = ex.new_decomposition()
sage: len(dec) == ex.number_of_new_components()
True
sage: all(u.is_new() for u in dec)
True
"""
from sage.combinat.binary_tree import BinaryTree
t_low = self.lower_binary_tree().to_tilting()
t_up = self.upper_binary_tree().to_tilting()
common = [p for p in t_low if p in t_up]
def extract_tree(x, y, tilt, common):
"""
Extract a tree with root at position xy (recursive).
"""
left_tree = None
for k in range(y - 1, x, -1):
if (x, k) in tilt:
if (x, k) not in common:
left_tree = extract_tree(x, k, tilt, common)
break
right_tree = None
for k in range(x + 1, y):
if (k, y) in tilt:
if (k, y) not in common:
right_tree = extract_tree(k, y, tilt, common)
break
return BinaryTree([left_tree, right_tree])
TIP = self.parent()
return [TIP.from_binary_trees(extract_tree(cx, cy, t_low, common),
extract_tree(cx, cy, t_up, common))
for cx, cy in common]
def is_new(self):
"""
Return ``True`` if ``self`` is a new Tamari interval.
Here 'new' means that the interval is not contained in any
facet of the associahedron.
They have been considered in section 9 of [ChapTamari08]_.
.. SEEALSO:: :meth:`is_modern`
EXAMPLES::
sage: TIP4 = TamariIntervalPosets(4)
sage: len([u for u in TIP4 if u.is_new()])
12
sage: TIP3 = TamariIntervalPosets(3)
sage: len([u for u in TIP3 if u.is_new()])
3
"""
c_up = self.upper_binary_tree().single_edge_cut_shapes()
c_down = self.lower_binary_tree().single_edge_cut_shapes()
return not any(x in c_up for x in c_down)
def is_simple(self):
"""
Return ``True`` if ``self`` is a simple Tamari interval.
Here 'simple' means that the interval contains a unique binary tree.
These intervals define the simple modules over the
incidence algebras of the Tamari lattices.
.. SEEALSO:: :meth:`is_final_interval`, :meth:`is_initial_interval`
EXAMPLES::
sage: TIP4 = TamariIntervalPosets(4)
sage: len([u for u in TIP4 if u.is_simple()])
14
sage: TIP3 = TamariIntervalPosets(3)
sage: len([u for u in TIP3 if u.is_simple()])
5
"""
return self.upper_binary_tree() == self.lower_binary_tree()
def is_synchronized(self):
"""
Return ``True`` if ``self`` is a synchronized Tamari interval.
This means that the upper and lower binary trees have the same canopee.
This has been considered in [FPR15]_. The numbers of
synchronized intervals are given by the sequence :oeis:`A000139`.
EXAMPLES::
sage: len([T for T in TamariIntervalPosets(3)
....: if T.is_synchronized()])
6
"""
up = self.upper_binary_tree()
down = self.lower_binary_tree()
return down.canopee() == up.canopee()
def is_modern(self):
"""
Return ``True`` if ``self`` is a modern Tamari interval.
This is defined by exclusion of a simple pattern in the Hasse diagram,
namely there is no configuration ``y --> x <-- z``
with `1 \leq y < x < z \leq n`.
.. SEEALSO:: :meth:`is_new`
EXAMPLES::
sage: len([T for T in TamariIntervalPosets(3) if T.is_modern()])
12
"""
G = self.poset().hasse_diagram()
for x in G:
nx = list(G.neighbors_in(x))
nx.append(x)
if min(nx) < x and max(nx) > x:
return False
return True
def is_exceptional(self):
"""
Return ``True`` if ``self`` is an exceptional Tamari interval.
This is defined by exclusion of a simple pattern in the Hasse diagram,
namely there is no configuration ``y <-- x --> z``
with `1 \leq y < x < z \leq n`.
EXAMPLES::
sage: len([T for T in TamariIntervalPosets(3)
....: if T.is_exceptional()])
12
"""
G = self.poset().hasse_diagram()
for x in G:
nx = list(G.neighbors_out(x))
nx.append(x)
if min(nx) < x and max(nx) > x:
return False
return True
# Abstract class to serve as a Factory ; no instances are created.
class TamariIntervalPosets(UniqueRepresentation, Parent):
r"""
Factory for interval-posets.
INPUT:
- ``size`` -- (optional) an integer
OUTPUT:
- the set of all interval-posets (of the given ``size`` if specified)
EXAMPLES::
sage: TamariIntervalPosets()
Interval-posets
sage: TamariIntervalPosets(2)
Interval-posets of size 2
.. NOTE::
This is a factory class whose constructor returns instances of
subclasses.
"""
@staticmethod
def __classcall_private__(cls, n=None):
r"""
TESTS::
sage: from sage.combinat.interval_posets import TamariIntervalPosets_all, TamariIntervalPosets_size
sage: isinstance(TamariIntervalPosets(2), TamariIntervalPosets_size)
True
sage: isinstance(TamariIntervalPosets(), TamariIntervalPosets_all)
True
sage: TamariIntervalPosets(2) is TamariIntervalPosets_size(2)
True
sage: TamariIntervalPosets() is TamariIntervalPosets_all()
True
"""
if n is None:
return TamariIntervalPosets_all()
if n not in NN:
raise ValueError("n must be a non negative integer")
return TamariIntervalPosets_size(Integer(n))
# add options to class
options=GlobalOptions('TamariIntervalPosets',
module='sage.combinat.interval_posets',
doc=r"""
Set and display the options for Tamari interval-posets. If no
parameters are set, then the function returns a copy of the options
dictionary.
The ``options`` to Tamari interval-posets can be accessed as the method
:meth:`TamariIntervalPosets.options` of :class:`TamariIntervalPosets`
and related parent classes.
""",
end_doc=r"""
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(2,4),(3,4),(2,1),(3,1)])
sage: ip.latex_options.color_decreasing
'red'
sage: TamariIntervalPosets.options.latex_color_decreasing='green'
sage: ip.latex_options.color_decreasing
'green'
sage: TamariIntervalPosets.options._reset()
sage: ip.latex_options.color_decreasing
'red'
""",
latex_tikz_scale=dict(default=1,
description='the default value for the tikz scale when latexed',
checker=lambda x: True), # More trouble than it's worth to check
latex_line_width_scalar=dict(default=0.5,
description='the default value for the line width as a'
'multiple of the tikz scale when latexed',
checker=lambda x: True), # More trouble than it's worth to check
latex_color_decreasing=dict(default="red",
description='the default color of decreasing relations when latexed',
checker=lambda x: True), # More trouble than it's worth to check
latex_color_increasing=dict(default="blue",
description='the default color of increasing relations when latexed',
checker=lambda x: True), # More trouble than it's worth to check
latex_hspace=dict(default=1,
description='the default difference between horizontal'
' coordinates of vertices when latexed',
checker=lambda x: True), # More trouble than it's worth to check
latex_vspace=dict(default=1,
description='the default difference between vertical'
' coordinates of vertices when latexed',
checker=lambda x: True) # More trouble than it's worth to check
)
@staticmethod
def check_poset(poset):
r"""
Check if the given poset ``poset`` is a interval-poset, that is,
if it satisfies the following properties:
- Its labels are exactly `1, \ldots, n` where `n` is its size.
- If `a < c` (as numbers) and `a` precedes `c`, then `b` precedes
`c` for all `b` such that `a < b < c`.
- If `a < c` (as numbers) and `c` precedes `a`, then `b` precedes
`a` for all `b` such that `a < b < c`.
INPUT:
- ``poset`` -- a finite labeled poset
EXAMPLES::
sage: p = Poset(([1,2,3],[(1,2),(3,2)]))
sage: TamariIntervalPosets.check_poset(p)
True
sage: p = Poset(([2,3],[(3,2)]))
sage: TamariIntervalPosets.check_poset(p)
False
sage: p = Poset(([1,2,3],[(3,1)]))
sage: TamariIntervalPosets.check_poset(p)
False
sage: p = Poset(([1,2,3],[(1,3)]))
sage: TamariIntervalPosets.check_poset(p)
False
"""
if not set(poset._elements) == set(range(1, poset.cardinality() + 1)):
return False
for i in range(1, poset.cardinality() + 1):
stop = False
for j in range(i - 1, 0, -1):
if not poset.le(j, i):
stop = True # j does not precede i so no j'<j should
elif stop:
return False
stop = False
for j in range(i + 1, poset.cardinality() + 1):
if not poset.le(j, i):
stop = True # j does not precede i so no j'>j should
elif stop:
return False
return True
@staticmethod
def final_forest(element):
r"""
Return the final forest of a binary tree, an interval-poset or a
Dyck word.
A final forest is an interval-poset corresponding to a final
interval of the Tamari lattice, i.e., containing only decreasing
relations.
It can be constructed from a binary tree by its binary
search tree labeling with the rule: `b` precedes
`a` in the final forest iff `b` is in the right subtree of `a`
in the binary search tree.
INPUT:
- ``element`` -- a binary tree, a Dyck word or an interval-poset
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: TamariIntervalPosets.final_forest(ip)
The Tamari interval of size 4 induced by relations [(1, 2), (2, 3)]
From binary trees::
sage: bt = BinaryTree(); bt
.
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 0 induced by relations []
sage: bt = BinaryTree([]); bt
[., .]
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 1 induced by relations []
sage: bt = BinaryTree([[],None]); bt
[[., .], .]
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 2 induced by relations []
sage: bt = BinaryTree([None,[]]); bt
[., [., .]]
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 2 induced by relations [(2, 1)]
sage: bt = BinaryTree([[],[]]); bt
[[., .], [., .]]
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 3 induced by relations [(3, 2)]
sage: bt = BinaryTree([[None,[[],None]],[]]); bt
[[., [[., .], .]], [., .]]
sage: TamariIntervalPosets.final_forest(bt)
The Tamari interval of size 5 induced by relations [(5, 4), (3, 1), (2, 1)]
From Dyck words::
sage: dw = DyckWord([1,0])
sage: TamariIntervalPosets.final_forest(dw)
The Tamari interval of size 1 induced by relations []
sage: dw = DyckWord([1,1,0,1,0,0,1,1,0,0])
sage: TamariIntervalPosets.final_forest(dw)
The Tamari interval of size 5 induced by relations [(5, 4), (3, 1), (2, 1)]
"""
if isinstance(element, TamariIntervalPoset):
return element.initial_forest()
elif element in DyckWords():
binary_tree = element.to_binary_tree_tamari()
elif element in BinaryTrees() or element in LabelledBinaryTrees():
binary_tree = element
else:
raise ValueError("Do not know how to construct the initial forest of {}".format(element))
def get_relations(bt, start=1):
r"""
Recursive method to get the binary tree final forest relations
with only one recursive reading of the tree.
The vertices are being labelled with integers starting with
``start``.
OUTPUT:
- the indexes of the nodes on the left border of the tree
(these become the roots of the forest)
- the relations of the final forest (as a list of tuples)
- the next available index for a node (size of tree +
``start``)
"""
if not bt:
return [], [], start # leaf
roots, relations, index = get_relations(bt[0], start=start)
rroots, rrelations, rindex = get_relations(bt[1], start=index + 1)
roots.append(index)
relations.extend(rrelations)
relations.extend([(j, index) for j in rroots])
return roots, relations, rindex
roots, relations, index = get_relations(binary_tree)
return TamariIntervalPoset(index - 1, relations)
@staticmethod
def initial_forest(element):
r"""
Return the inital forest of a binary tree, an interval-poset or
a Dyck word.
An initial forest is an interval-poset corresponding to an initial
interval of the Tamari lattice, i.e., containing only increasing
relations.
It can be constructed from a binary tree by its binary
search tree labeling with the rule: `a` precedes `b` in the
initial forest iff `a` is in the left subtree of `b` in the
binary search tree.
INPUT:
- ``element`` -- a binary tree, a Dyck word or an interval-poset
EXAMPLES::
sage: ip = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: TamariIntervalPosets.initial_forest(ip)
The Tamari interval of size 4 induced by relations [(1, 2), (2, 3)]
with binary trees::
sage: bt = BinaryTree(); bt
.
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 0 induced by relations []
sage: bt = BinaryTree([]); bt
[., .]
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 1 induced by relations []
sage: bt = BinaryTree([[],None]); bt
[[., .], .]
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 2 induced by relations [(1, 2)]
sage: bt = BinaryTree([None,[]]); bt
[., [., .]]
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 2 induced by relations []
sage: bt = BinaryTree([[],[]]); bt
[[., .], [., .]]
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: bt = BinaryTree([[None,[[],None]],[]]); bt
[[., [[., .], .]], [., .]]
sage: TamariIntervalPosets.initial_forest(bt)
The Tamari interval of size 5 induced by relations [(1, 4), (2, 3), (3, 4)]
from Dyck words::
sage: dw = DyckWord([1,0])
sage: TamariIntervalPosets.initial_forest(dw)
The Tamari interval of size 1 induced by relations []
sage: dw = DyckWord([1,1,0,1,0,0,1,1,0,0])
sage: TamariIntervalPosets.initial_forest(dw)
The Tamari interval of size 5 induced by relations [(1, 4), (2, 3), (3, 4)]
"""
if isinstance(element, TamariIntervalPoset):
return element.initial_forest()
elif element in DyckWords():
binary_tree = element.to_binary_tree_tamari()
elif element in BinaryTrees() or element in LabelledBinaryTrees():
binary_tree = element
else:
raise ValueError("Do not know how to construct the initial forest of {}".format(element))
def get_relations(bt, start=1):
r"""
Recursive method to get the binary tree initial forest
relations with only one recursive reading of the tree.
The vertices are being labelled with integers starting with
``start``.
OUTPUT:
- the indexes of the nodes on the right border of the tree
(these become the roots of the forest)
- the relations of the initial forest (as a list of tuples)
- the next available index for a node (size of tree +
``start``)
"""
if not bt:
return [], [], start # leaf
lroots, lrelations, index = get_relations(bt[0], start=start)
roots, relations, rindex = get_relations(bt[1], start=index + 1)
roots.append(index)
relations.extend(lrelations)
relations.extend([(j, index) for j in lroots])
return roots, relations, rindex
roots, relations, index = get_relations(binary_tree)
return TamariIntervalPoset(index - 1, relations)
@staticmethod
def from_binary_trees(tree1, tree2):
r"""
Return the interval-poset corresponding to the interval
[``tree1``, ``tree2``] of the Tamari lattice. Raise an exception if
``tree1`` is not `\leq` ``tree2`` in the Tamari lattice.
INPUT:
- ``tree1`` -- a binary tree
- ``tree2`` -- a binary tree greater or equal than ``tree1`` for
the Tamari lattice
EXAMPLES::
sage: tree1 = BinaryTree([[],None])
sage: tree2 = BinaryTree([None,[]])
sage: TamariIntervalPosets.from_binary_trees(tree1,tree2)
The Tamari interval of size 2 induced by relations []
sage: TamariIntervalPosets.from_binary_trees(tree1,tree1)
The Tamari interval of size 2 induced by relations [(1, 2)]
sage: TamariIntervalPosets.from_binary_trees(tree2,tree2)
The Tamari interval of size 2 induced by relations [(2, 1)]
sage: tree1 = BinaryTree([[],[[None,[]],[]]])
sage: tree2 = BinaryTree([None,[None,[None,[[],[]]]]])
sage: TamariIntervalPosets.from_binary_trees(tree1,tree2)
The Tamari interval of size 6 induced by relations [(4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: tree3 = BinaryTree([None,[None,[[],[None,[]]]]])
sage: TamariIntervalPosets.from_binary_trees(tree1,tree3)
Traceback (most recent call last):
...
ValueError: The two binary trees are not comparable on the Tamari lattice.
sage: TamariIntervalPosets.from_binary_trees(tree1,BinaryTree())
Traceback (most recent call last):
...
ValueError: The two binary trees are not comparable on the Tamari lattice.
"""
initial_forest = TamariIntervalPosets.initial_forest(tree2)
final_forest = TamariIntervalPosets.final_forest(tree1)
try:
return initial_forest.intersection(final_forest)
except Exception:
raise ValueError("The two binary trees are not comparable on the Tamari lattice.")
@staticmethod
def from_dyck_words(dw1, dw2):
r"""
Return the interval-poset corresponding to the interval
[``dw1``, ``dw2``] of the Tamari lattice. Raise an exception if the
two Dyck words ``dw1`` and ``dw2`` do not satisfy
``dw1`` `\leq` ``dw2`` in the Tamari lattice.
INPUT:
- ``dw1`` -- a Dyck word
- ``dw2`` -- a Dyck word greater or equal than ``dw1`` for
the Tamari lattice
EXAMPLES::
sage: dw1 = DyckWord([1,0,1,0])
sage: dw2 = DyckWord([1,1,0,0])
sage: TamariIntervalPosets.from_dyck_words(dw1,dw2)
The Tamari interval of size 2 induced by relations []
sage: TamariIntervalPosets.from_dyck_words(dw1,dw1)
The Tamari interval of size 2 induced by relations [(1, 2)]
sage: TamariIntervalPosets.from_dyck_words(dw2,dw2)
The Tamari interval of size 2 induced by relations [(2, 1)]
sage: dw1 = DyckWord([1,0,1,1,1,0,0,1,1,0,0,0])
sage: dw2 = DyckWord([1,1,1,1,0,1,1,0,0,0,0,0])
sage: TamariIntervalPosets.from_dyck_words(dw1,dw2)
The Tamari interval of size 6 induced by relations [(4, 5), (6, 5), (5, 2), (4, 3), (3, 2)]
sage: dw3 = DyckWord([1,1,1,0,1,1,1,0,0,0,0,0])
sage: TamariIntervalPosets.from_dyck_words(dw1,dw3)
Traceback (most recent call last):
...
ValueError: The two Dyck words are not comparable on the Tamari lattice.
sage: TamariIntervalPosets.from_dyck_words(dw1,DyckWord([1,0]))
Traceback (most recent call last):
...
ValueError: The two Dyck words are not comparable on the Tamari lattice.
"""
tree1 = dw1.to_binary_tree_tamari()
tree2 = dw2.to_binary_tree_tamari()
try:
return TamariIntervalPosets.from_binary_trees(tree1, tree2)
except Exception:
raise ValueError("The two Dyck words are not comparable on the Tamari lattice.")
@staticmethod
def from_minimal_schnyder_wood(graph):
"""
Return a Tamari interval build from a minimal Schnyder wood.
This is an implementation of Bernardi and Bonichon's bijection
[BerBon]_.
INPUT:
a minimal Schnyder wood, given as a graph with colored and
oriented edges, without the three exterior unoriented edges
The three boundary vertices must be 'a', 'b' and 'c'.
One assumes moreover that the embedding around 'a' is the
list of neighbors of 'a' and not just a cyclic permutation of that.
Beware that the embedding convention used here is the opposite of
the one used by the plot method.
OUTPUT:
a Tamari interval poset
EXAMPLES:
A small example::
sage: TIP = TamariIntervalPosets
sage: G = DiGraph([(0,'a',0),(0,'b',1),(0,'c',2)], format='list_of_edges')
sage: G.set_embedding({'a':[0],'b':[0],'c':[0],0:['a','b','c']})
sage: TIP.from_minimal_schnyder_wood(G)
The Tamari interval of size 1 induced by relations []
An example from page 14 of [BerBon]_::
sage: c0 = [(0,'a'),(1,0),(2,0),(4,3),(3,'a'),(5,3)]
sage: c1 = [(5,'b'),(3,'b'),(4,5),(1,3),(2,3),(0,3)]
sage: c2 = [(0,'c'),(1,'c'),(3,'c'),(4,'c'),(5,'c'),(2,1)]
sage: ed = [(u,v,0) for u,v in c0]
sage: ed += [(u,v,1) for u,v in c1]
sage: ed += [(u,v,2) for u,v in c2]
sage: G = DiGraph(ed, format='list_of_edges')
sage: embed = {'a':[3,0],'b':[5,3],'c':[0,1,3,4,5]}
sage: data_emb = [[3,2,1,'c','a'],[2,3,'c',0],[3,1,0]]
sage: data_emb += [['b',5,4,'c',1,2,0,'a'],[5,'c',3],['b','c',4,3]]
sage: for k in range(6):
....: embed[k] = data_emb[k]
sage: G.set_embedding(embed)
sage: TIP.from_minimal_schnyder_wood(G)
The Tamari interval of size 6 induced by relations [(1, 4), (2, 4), (3, 4), (5, 6), (6, 4), (5, 4), (3, 1), (2, 1)]
An example from page 18 of [BerBon]_::
sage: c0 = [(0,'a'),(1,0),(2,'a'),(3,2),(4,2),(5,'a')]
sage: c1 = [(5,'b'),(2,'b'),(4,'b'),(3,4),(1,2),(0,2)]
sage: c2 = [(0,'c'),(1,'c'),(3,'c'),(4,'c'),(2,'c'),(5,2)]
sage: ed = [(u,v,0) for u,v in c0]
sage: ed += [(u,v,1) for u,v in c1]
sage: ed += [(u,v,2) for u,v in c2]
sage: G = DiGraph(ed, format='list_of_edges')
sage: embed = {'a':[5,2,0],'b':[4,2,5],'c':[0,1,2,3,4]}
sage: data_emb = [[2,1,'c','a'],[2,'c',0],[3,'c',1,0,'a',5,'b',4]]
sage: data_emb += [[4,'c',2],['b','c',3,2],['b',2,'a']]
sage: for k in range(6):
....: embed[k] = data_emb[k]
sage: G.set_embedding(embed)
sage: TIP.from_minimal_schnyder_wood(G)
The Tamari interval of size 6 induced by relations [(1, 3), (2, 3), (4, 5), (5, 3), (4, 3), (2, 1)]
Another small example::
sage: c0 = [(0,'a'),(2,'a'),(1,0)]
sage: c1 = [(2,'b'),(1,'b'),(0,2)]
sage: c2 = [(0,'c'),(1,'c'),(2,1)]
sage: ed = [(u,v,0) for u,v in c0]
sage: ed += [(u,v,1) for u,v in c1]
sage: ed += [(u,v,2) for u,v in c2]
sage: G = DiGraph(ed, format='list_of_edges')
sage: embed = {'a':[2,0],'b':[1,2],'c':[0,1]}
sage: data_emb = [[2,1,'c','a'],['c',0,2,'b'],['b',1,0,'a']]
sage: for k in range(3):
....: embed[k] = data_emb[k]
sage: G.set_embedding(embed)
sage: TIP.from_minimal_schnyder_wood(G)
The Tamari interval of size 3 induced by relations [(2, 3), (2, 1)]
REFERENCES:
.. [BerBon] Olivier Bernardi and Nicolas Bonichon, *Intervals in Catalan
lattices and realizers of triangulations*, JCTA 116 (2009)
"""
from sage.graphs.digraph import DiGraph
from sage.combinat.dyck_word import DyckWord
color_a = graph.incoming_edges('a')[0][2]
color_b = graph.incoming_edges('b')[0][2]
embedding = graph.get_embedding()
graph0 = DiGraph([e for e in graph.edges() if e[2] == color_a],
format='list_of_edges')
restricted_embedding = {u: [v for v in embedding[u]
if v in graph0.neighbors_in(u) or
v in graph0.neighbors_out(u)]
for u in graph0}
voisins_in = {}
for u in graph0:
if u != 'a':
bad_emb = restricted_embedding[u]
sortie = graph0.neighbors_out(u)[0]
idx = bad_emb.index(sortie)
restricted_embedding[u] = bad_emb[idx:] + bad_emb[:idx]
voisins_in[u] = restricted_embedding[u][1:]
else:
voisins_in[u] = list(restricted_embedding[u])
voisins_in[u].reverse() # pour les avoir dans le bon sens
graph0.set_embedding(restricted_embedding)
def clockwise_labelling(gr, vertex):
if len(gr) == 1:
return [vertex]
else:
lbl = [vertex]
for w in voisins_in[vertex]:
lbl += clockwise_labelling(gr, w)
return lbl
def profil(gr, vertex):
if len(gr) == 1:
return []
else:
lbl = []
for w in voisins_in[vertex]:
lbl += [1] + profil(gr, w) + [0]
return lbl
dyckword_bottom = profil(graph0, 'a')
# this is the profile of the planar graph graph0
liste = clockwise_labelling(graph0, 'a')[1:]
relabelling = {l: i for i, l in enumerate(liste)}
for l in ['a', 'b', 'c']:
relabelling[l] = l
new_graph = graph.relabel(relabelling, inplace=False)
dyckword_top = []
for i in range(1, len(graph) - 3):
indegree1 = len([u for u in new_graph.incoming_edges(i)
if u[2] == color_b])
dyckword_top += [1] + [0] * indegree1
indegree1 = len([u for u in new_graph.incoming_edges('b')
if u[2] == color_b])
dyckword_top += [1] + [0] * indegree1
dyckword_bottom = DyckWord(dyckword_bottom)
dyckword_top = DyckWord(dyckword_top)
TIP = TamariIntervalPosets(len(dyckword_bottom) // 2)
return TIP.from_dyck_words(dyckword_bottom, dyckword_top)
def __call__(self, *args, **keywords):
r"""
Allows for a poset to be directly transformed into an interval-poset.
It is some kind of coercion but cannot be made through the coercion
system because posets do not have parents.
EXAMPLES::
sage: TIP = TamariIntervalPosets()
sage: p = Poset( ([1,2,3], [(1,2)]))
sage: TIP(p)
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: TIP(TIP(p))
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: TIP(3,[(1,2)])
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: p = Poset(([1,2,3],[(1,3)]))
sage: TIP(p)
Traceback (most recent call last):
...
ValueError: This does not satisfy the Tamari interval-poset condition.
"""
if isinstance(args[0], TamariIntervalPoset):
return args[0]
if len(args) == 1 and isinstance(args[0], FinitePoset):
return self.element_class(self, args[0].cardinality(), args[0].cover_relations())
return super(TamariIntervalPosets, self).__call__(*args, **keywords)
def le(self, el1, el2):
r"""
Poset stucture on the set of interval-posets through interval
containment.
Return whether the interval represented by ``el1`` is contained in
the interval represented by ``el2``.
INPUT:
- ``el1`` -- an interval-poset
- ``el2`` -- an interval-poset
EXAMPLES::
sage: ip1 = TamariIntervalPoset(4,[(1,2),(2,3),(4,3)])
sage: ip2 = TamariIntervalPoset(4,[(1,2),(2,3)])
sage: TamariIntervalPosets().le(ip1,ip2)
True
sage: TamariIntervalPosets().le(ip2,ip1)
False
"""
return el2.contains_interval(el1)
#################################################################
# Enumerated set of all Tamari Interval-posets
#################################################################
class TamariIntervalPosets_all(DisjointUnionEnumeratedSets, TamariIntervalPosets):
r"""
The enumerated set of all Tamari interval-posets.
"""
def __init__(self):
r"""
TESTS::
sage: from sage.combinat.interval_posets import TamariIntervalPosets_all
sage: S = TamariIntervalPosets_all()
sage: S.cardinality()
+Infinity
sage: it = iter(S)
sage: [next(it) for i in range(5)]
[The Tamari interval of size 0 induced by relations [],
The Tamari interval of size 1 induced by relations [],
The Tamari interval of size 2 induced by relations [],
The Tamari interval of size 2 induced by relations [(2, 1)],
The Tamari interval of size 2 induced by relations [(1, 2)]]
sage: next(it).parent()
Interval-posets
sage: S(0,[])
The Tamari interval of size 0 induced by relations []
sage: S is TamariIntervalPosets_all()
True
sage: TestSuite(S).run()
"""
DisjointUnionEnumeratedSets.__init__(
self, Family(NonNegativeIntegers(), TamariIntervalPosets_size),
facade=True, keepkey=False, category=(Posets(), EnumeratedSets()))
def _repr_(self):
r"""
TEST::
sage: TamariIntervalPosets()
Interval-posets
"""
return "Interval-posets"
def _element_constructor_(self, size, relations):
r"""
EXAMPLES::
sage: TIP = TamariIntervalPosets()
sage: TIP(3,[(1,2)])
The Tamari interval of size 3 induced by relations [(1, 2)]
"""
return self.element_class(self, size, relations)
def __contains__(self, x):
r"""
TESTS::
sage: S = TamariIntervalPosets()
sage: 1 in S
False
sage: S(0,[]) in S
True
"""
return isinstance(x, self.element_class)
Element = TamariIntervalPoset
#################################################################
# Enumerated set of Tamari interval-posets of a given size
#################################################################
class TamariIntervalPosets_size(TamariIntervalPosets):
r"""
The enumerated set of interval-posets of a given size.
"""
def __init__(self, size):
r"""
TESTS::
sage: S = TamariIntervalPosets(3)
sage: assert S is TamariIntervalPosets(3)
sage: for i in range(6): TestSuite(TamariIntervalPosets(i)).run()
"""
# there is a natural order on interval-posets through inclusions
# that is why we use the FinitePosets category
super(TamariIntervalPosets_size, self).__init__(category=(FinitePosets(), FiniteEnumeratedSets()))
self._size = size
def _repr_(self):
r"""
TESTS::
sage: TamariIntervalPosets(3)
Interval-posets of size 3
"""
return "Interval-posets of size {}".format(self._size)
def __contains__(self, x):
r"""
TESTS::
sage: S = TamariIntervalPosets(3)
sage: 1 in S
False
sage: S([]) in S
True
"""
return isinstance(x, self.element_class) and x.size() == self._size
def cardinality(self):
r"""
The cardinality of ``self``. That is, the number of
interval-posets of size `n`.
The formula was given in [ChapTamari08]_:
.. MATH::
\frac{2(4n+1)!}{(n+1)!(3n+2)!}
= \frac{2}{n(n+1)} \binom{4n+1}{n-1}.
EXAMPLES::
sage: [TamariIntervalPosets(i).cardinality() for i in range(6)]
[1, 1, 3, 13, 68, 399]
"""
from sage.arith.all import binomial
n = self._size
if n == 0:
return Integer(1)
return (2 * binomial(4 * n + 1, n - 1)) // (n * (n + 1))
# return Integer(2 * factorial(4*n+1)/(factorial(n+1)*factorial(3*n+2)))
def __iter__(self):
r"""
Recursive generation: we iterate through all interval-posets of
size ``size - 1`` and add all possible relations to the last
vertex.
This works thanks to the fact that the restriction of an
interval-poset of size `n` to the subset `\{1, 2, \ldots, k\}` for
a fixed `k \leq n` is an interval-poset.
TESTS::
sage: TIP1 = TamariIntervalPosets(1)
sage: list(TIP1)
[The Tamari interval of size 1 induced by relations []]
sage: TIP2 = TamariIntervalPosets(2)
sage: list(TIP2)
[The Tamari interval of size 2 induced by relations [],
The Tamari interval of size 2 induced by relations [(2, 1)],
The Tamari interval of size 2 induced by relations [(1, 2)]]
sage: TIP3 = TamariIntervalPosets(3)
sage: list(TIP3)
[The Tamari interval of size 3 induced by relations [],
The Tamari interval of size 3 induced by relations [(3, 2)],
The Tamari interval of size 3 induced by relations [(2, 3)],
The Tamari interval of size 3 induced by relations [(1, 3), (2, 3)],
The Tamari interval of size 3 induced by relations [(2, 1)],
The Tamari interval of size 3 induced by relations [(3, 2), (2, 1)],
The Tamari interval of size 3 induced by relations [(3, 1), (2, 1)],
The Tamari interval of size 3 induced by relations [(2, 3), (2, 1)],
The Tamari interval of size 3 induced by relations [(2, 3), (3, 1), (2, 1)],
The Tamari interval of size 3 induced by relations [(1, 3), (2, 3), (2, 1)],
The Tamari interval of size 3 induced by relations [(1, 2)],
The Tamari interval of size 3 induced by relations [(1, 2), (3, 2)],
The Tamari interval of size 3 induced by relations [(1, 2), (2, 3)]]
sage: all(len(list(TamariIntervalPosets(i)))==TamariIntervalPosets(i).cardinality() for i in range(6))
True
"""
n = self._size
if n <= 1:
yield TamariIntervalPoset(n, [])
return
for tip in TamariIntervalPosets(n - 1):
new_tip = TamariIntervalPoset(n, tip._cover_relations)
yield new_tip # we have added an extra vertex but no relations
# adding a decreasing relation n>>m2 with m2<n and no
# increasing relations
for m2 in range(n - 1, 0, -1):
if new_tip.le(n - 1, m2):
yield TamariIntervalPoset(n, new_tip._cover_relations + ((n, m2),))
for m in range(n - 1, 0, -1):
# adding an increasing relation m>>n
if not new_tip.le(m, n):
new_tip = TamariIntervalPoset(n, new_tip._cover_relations + ((m, n),))
yield new_tip
else:
continue
# further adding a decreasing relation n>>m2 with m2<m
for m2 in range(m - 1, 0, -1):
if new_tip.le(n - 1, m2):
yield TamariIntervalPoset(n, new_tip._cover_relations + ((n, m2),))
def random_element(self):
"""
Return a random Tamari interval of fixed size.
This is obtained by first creating a random rooted
planar triangulation, then computing its unique
minimal Schnyder wood, then applying a bijection
of Bernardi and Bonichon [BerBon]_.
Because the random rooted planar triangulation is
chosen uniformly at random, the Tamari interval is
also chosen according to the uniform distribution.
EXAMPLES::
sage: T = TamariIntervalPosets(4).random_element()
sage: T.parent()
Interval-posets
sage: u = T.lower_dyck_word(); u # random
[1, 1, 0, 1, 0, 0, 1, 0]
sage: v = T.lower_dyck_word(); v # random
[1, 1, 0, 1, 0, 0, 1, 0]
sage: len(u)
8
"""
from sage.graphs.schnyder import minimal_schnyder_wood
from sage.graphs.generators.random import RandomTriangulation
n = self._size
tri = RandomTriangulation(n + 3)
TIP = TamariIntervalPosets
schnyder = minimal_schnyder_wood(tri, root_edge=('a', 'b'),
check=False)
return TIP.from_minimal_schnyder_wood(schnyder)
@lazy_attribute
def _parent_for(self):
r"""
The parent of the element generated by ``self``.
TESTS::
sage: TIP3 = TamariIntervalPosets(3)
sage: TIP3._parent_for
Interval-posets
"""
return TamariIntervalPosets_all()
# This is needed because this is a facade parent via DisjointUnionEnumeratedSets
@lazy_attribute
def element_class(self):
r"""
TESTS::
sage: S = TamariIntervalPosets(3)
sage: S.element_class
<class 'sage.combinat.interval_posets.TamariIntervalPosets_all_with_category.element_class'>
sage: S.first().__class__ == TamariIntervalPosets().first().__class__
True
"""
return self._parent_for.element_class
def _element_constructor_(self, relations):
r"""
EXAMPLES::
sage: TIP3 = TamariIntervalPosets(3)
sage: TIP3([(1,2)])
The Tamari interval of size 3 induced by relations [(1, 2)]
sage: TIP3([(3,4)])
Traceback (most recent call last):
...
ValueError: The relations do not correspond to the size of the poset.
"""
return self.element_class(self, self._size, relations)
# Deprecations from trac:18555. July 2016
from sage.misc.superseded import deprecated_function_alias
TamariIntervalPosets.global_options=deprecated_function_alias(18555, TamariIntervalPosets.options)
TamariIntervalPosetOptions=deprecated_function_alias(18555, TamariIntervalPosets.options)
|
989,032 | a7c8d15e32fde66bf2e3ae7aee7ccc2f750d0a0c | from django.conf import settings
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from ..app_settings import (
MoveSecretLinkSerializer,
DeleteSecretLinkSerializer,
)
from ..models import (
Secret_Link
)
from ..authentication import TokenAuthentication
def create_secret_link(link_id, secret_id, parent_share_id, parent_datastore_id):
"""
DB wrapper to create a link between a secret and a datastore or a share
Takes care of "degenerated" tree structures (e.g a child has two parents)
In addition checks if the link already exists, as this is a crucial part of the access rights system
:param link_id:
:param secret_id:
:param parent_share_id:
:param parent_datastore_id:
:return:
"""
try:
Secret_Link.objects.create(
link_id = link_id,
secret_id = secret_id,
parent_datastore_id = parent_datastore_id,
parent_share_id = parent_share_id
)
except:
return False
return True
def delete_secret_link(link_id):
"""
DB wrapper to delete a link to a secret
:param link_id:
:return:
"""
Secret_Link.objects.filter(link_id=link_id).delete()
class SecretLinkView(GenericAPIView):
"""
Secret Link View:
Accepted Methods: POST, DELETE
"""
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('POST', 'DELETE', 'OPTIONS', 'HEAD')
def post(self, request, *args, **kwargs):
"""
Move Secret_Link obj
Necessary Rights:
- write on old_parent_share
- write on old_datastore
- write on new_parent_share
- write on new_datastore
:param request:
:param uuid:
:param args:
:param kwargs:
:return: 200 / 400
"""
serializer = MoveSecretLinkSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
link_id = serializer.validated_data['link_id']
new_parent_share_id = serializer.validated_data['new_parent_share_id']
new_parent_datastore_id = serializer.validated_data['new_parent_datastore_id']
secrets = serializer.validated_data['secrets']
# all checks passed, lets move the link with a delete and create at the new location
delete_secret_link(link_id)
for secret_id in secrets:
create_secret_link(link_id, secret_id, new_parent_share_id, new_parent_datastore_id)
return Response(status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
"""
Delete Secret_Link obj
Necessary Rights:
- write on parent_share
- write on parent_datastore
:param request:
:param args:
:param kwargs:
:return: 200 / 400
"""
serializer = DeleteSecretLinkSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
link_id = serializer.validated_data['link_id']
delete_secret_link(link_id)
return Response(status=status.HTTP_200_OK) |
989,033 | 1ea64046556aa1a2acfbb85fa5188eb292c1236c | """
tldr: modifies the squad data to fit the requirements of my experiment 2 (topic contexts)
author: @rohitmusti
"""
import ujson as json
from tqdm import tqdm
from toolkit import fancyprint, save, quick_clean
import config
from data_clean_exp2 import exp2_transformer
from data_clean_exp3 import exp3_transformer
def toy_transformer(in_file, out_file):
"""
distill original data into at most 15 topics, with each having at most 5 paragraphs,
each of which has 5 questions and 5 answers
args:
- in_file: the file name of the data to be transformed to experiment 2
- out_file: the file name of where the ought to be written
return:
none, the data is written to an output
"""
new_data = {}
new_data['experiment'] = "toy"
with open(in_file, "r") as fh:
fancyprint(in_str=("Importing: " + in_file))
source = json.load(fh)
fancyprint(in_str="Converting into toy format")
new_data["version"] = source["version"]
new_data["data"] = []
topic_counter = 3
for topic in tqdm(source["data"]):
topic_dict = {}
topic_dict["title"] = topic["title"]
topic_dict["paragraphs"] = []
para_counter = 3
for para in topic["paragraphs"]:
paragraph = {}
paragraph["context"] = para["context"]
paragraph["qas"] = []
qa_counter = 3
for qas in para['qas']:
qas_dict = {}
qas_dict["id"] = qas["id"]
qas_dict["is_impossible"] = qas["is_impossible"]
qas_dict["question"] = quick_clean(raw_str=qas["question"])
qas_dict["answers"] = []
if not qas["is_impossible"]:
for answer in qas["answers"]:
answer_dict = {}
answer_dict["answer_start"] = answer["answer_start"]
answer_dict["text"] = answer["text"]
qas_dict["answers"].append(answer_dict)
paragraph["qas"].append(qas_dict)
qa_counter -= 1
if qa_counter == 0:
break
topic_dict["paragraphs"].append(paragraph)
para_counter -= 1
if para_counter == 0:
break
new_data["data"].append(topic_dict)
topic_counter -= 1
if topic_counter == 0:
break
save(filename=out_file, obj=new_data, message="saving toy data")
if __name__ == "__main__":
data = config.data()
toy_transformer(in_file=data.train_data_orig, out_file=data.toy_data_orig)
exp2_transformer(in_file=data.toy_data_orig, out_file=data.toy_data_exp2)
exp3_transformer(in_file=data.toy_data_orig, out_file=data.toy_data_exp3)
|
989,034 | 4c507cbf3be3ec87d50a0dcda6f5a2ffc90b695c | import click
from acgt import Acgt
@click.group()
def cli():
"""Example script."""
pass
@click.command()
@click.argument('project_name')
@click.option('--js',default=False, help="generate js file")
@click.option('--flask',default=False, help="generate flask file")
def init(project_name, js, flask):
project = project_name
if js:
click.echo(" init js ... ")
Acgt(project).parse_apis("js")
elif flask:
click.echo("init flask app ...")
Acgt(project).parse_apis("flask")
else:
click.echo("*** usage info ***")
click.echo("--(flask,js) generate code by acgt")
click.echo("done!")
cli.add_command(init)
|
989,035 | 5f8ed03e94b139527ac44011e02d2ad0d467aa8a | #Multiple Linear Regression - multiple features, one label
#General Equation is that of a straight line with multiple features: y = b0 + b1x1 + b2x2 + ... + bnxn
#sourced from superdatascience.com
#-------------------------------- Preprocessing -----------------------
#import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,4].values.reshape(-1,1)
print("features X\n",X, "\n labels y \n",y)
#handle missing data
from sklearn.preprocessing import Imputer
imputer = Imputer()
imputer = imputer.fit(X[:,0:3]) #handle only first three columns
X[:,0:3] = imputer.transform(X[:,0:3])
print("X after handling missing data",X)
#Encode categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder = LabelEncoder()
X[:,3] = labelEncoder.fit_transform(X[:,3])
onehotencoder = OneHotEncoder(categorical_features=[3]) #column to be one-hot encoded
X = onehotencoder.fit_transform(X).toarray()
X = X[:,1:] #ignore column 0 so as to avoid dummy variable trap
print("X after encoding categorical data",X)
#Split dataset into training and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
print("Splitting dataset into training and test sets \n X_train \n",X_train, '\n X_test \n', X_test, '\n y_train \n', y_train, '\n y_test \n', y_test)
#-------------------------------------END------------------------------
#------------------------------------ Model ---------------------------
#Create the regressor and fit it to training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor = regressor.fit(X,y)
#predict test set results
y_pred = regressor.predict(X_test)
print('y_pred for X_test\n',y_pred)
#Building the optimal model using backward elimination
#One by one, remove all columns that have a p-value above 0.05 significance level
import statsmodels.formula.api as sm
X = np.append(arr=np.ones((50,1)).astype(int), values=X, axis=1) #add a column of 1's, the bias term in the equation of line
#Iteration #1
X_opt = X[:,[0,1,2,3,4,5]] #initially, we add all columns to X_optimal
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary() #P-values: x1=0.948, x2=0.777, x3=0.000, x4=0.943, x5=0.056
#Iteration #2 - remove column with highest p-value i.e. x1 (second column)
X_opt = X[:,[0,2,3,4,5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary() #P-values: x1=0.769, x2=0.000, x3=0.944, x4=0.050
#Iteration #4 - remove column with highest p-value i.e. x3 (fourth column)
X_opt = X[:,[0,2,4,5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary() #P-values: x1=0.610, x2=0.010, x3=0.000
#Iteration #5 - remove column with highest p-value i.e. x1 (second column)
X_opt = X[:,[0,4,5]]
regressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()
regressor_OLS.summary() #P-values: x1 = 0.009, x2=0.000
#-------------------------------------END------------------------------
#----------------------------------- Graphs ---------------------------
#Since there are multiple features, we can't show a feature vs . label graph
#You can use Principal Component Analysis (PCA) or LDA to reduce the number of features
#But for now, we will just show the predicted vs. actual value graph
#Predicted vs. actual graph for training set
y_pred_train = regressor.predict(X_train)
plt.figure("train")
plt.scatter(y_pred_train,y_train)
plt.title("Predicted vs. Actual Profit: Training set")
plt.xlabel("Predicted Profit")
plt.ylabel("Actual Profit")
plt.show()
plt.savefig("train.png")
#Predicted vs. actual graph for training set
plt.figure("test")
plt.scatter(y_pred,y_test)
plt.title("Predicted vs. Actual Profit: Test set")
plt.xlabel("Predicted Profit")
plt.ylabel("Actual Profit")
plt.show()
plt.savefig("test.png")
#-------------------------------------END------------------------------
|
989,036 | c56729b0c3260903d49b4c09fcf3e138f77dc38f | from django.db import models
from util.fields import CurrencyField
from categoria.models import Categoria
class Product(models.Model):
name = models.CharField(max_length=255, verbose_name=('Nombre'))
slug = models.SlugField(verbose_name=('Slug'), unique=True)
active = models.BooleanField(default=False, verbose_name=('Activo'))
categoria = models.ForeignKey(Categoria)
date_added = models.DateTimeField(auto_now_add=True,verbose_name=('Fecha de Creacion'))
last_modified = models.DateTimeField(auto_now=True,verbose_name=('Ultima Modificacion'))
orden = models.PositiveIntegerField()
stock = models.IntegerField(blank=True)
unit_price = CurrencyField(verbose_name=('Precio'))
precio_a = CurrencyField(verbose_name=('Precio A'))
precio_b = CurrencyField(verbose_name=('Precio B'))
precio_c = CurrencyField(verbose_name=('Precio C'))
peso = models.DecimalField(max_digits = 30,decimal_places = 2,)
imagen = models.ImageField("Imagen Categoria", upload_to="images/categorias", blank=True, null=True,default='images/default-01.png')
class Meta(object):
ordering = ['categoria','orden']
verbose_name = ('Producto')
verbose_name_plural = ('Productos')
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('product_detail', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urlpe(self):
return ('produccion_esperada', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urlpr(self):
return ('produccion_realizada', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urldb(self):
return ('add_devolucion_buena', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urldm(self):
return ('add_devolucion_mala', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urldr(self):
return ('add_devolucion_reproceso', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urlsa(self):
return ('add_salida', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urlsal(self):
return ('add_saldo', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urldma(self):
return ('add_devolucion_mala_almacen', (), { 'producto_slug': self.slug })
@models.permalink
def get_absolute_urldra(self):
return ('add_devolucion_reproceso_almacen', (), { 'producto_slug': self.slug })
def get_price(self):
return self.unit_price
def get_peso(self):
return self.peso
def get_subtotal(self):
return self.peso *self.stock
def get_name(self):
return self.name
def get_product_reference(self):
return unicode(self.pk)
@property
def can_be_added_to_cart(self):
return self.active |
989,037 | f3d7b5319cdd919f8671be5747e33bf59a1e89b3 | # -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import find_packages, setup
version = re.search(r"^__version__\s*=\s*'(.*)'",
open('src/tav/cmd.py').read(), re.M).group(1)
setup(
name='tav',
version=version,
description='TBD',
long_description='TBD',
author='Mudox',
author_email='imudox@gmail.com',
url='https://github.com/mudox/tav',
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=[
'libtmux',
'ruamel.yaml',
],
package_data={
'': ['resources/*'],
},
scripts=[
'asset/script/tav',
],
entry_points={
"console_scripts": ['tav-core = tav.cmd:run']
})
|
989,038 | 65be7d13d864c9785968ab7967205f7ad498e814 | from typing import List
class Solution:
def closedIsland(self, grid: List[List[int]]) -> int:
row, col = len(grid), len(grid[0])
if row == 1 or col == 1:
return 0
def neibor(i, j):
connect = [
(i+1, j), (i-1, j),
(i, j-1), (i, j+1)
]
for r, c in connect:
if 0 <= r < row and 0 <= c < col:
yield r, c
ans = 0
vis = [[False for _ in range(col)]for _ in range(row)]
def travel(i, j):
if vis[i][j] == False and grid[i][j] == 0:
vis[i][j] = True
for r, c in neibor(i, j):
travel(r, c)
i1, i2 = 0, row-1
for j in range(col):
travel(i1, j)
travel(i2, j)
j1, j2 = 0, col-1
for i in range(row):
travel(i, j1)
travel(i, j2)
for i in range(1, row-1):
for j in range(1, col-1):
if vis[i][j] == False and grid[i][j] == 0:
ans += 1
travel(i, j)
return ans
# 执行用时:
# 172 ms
# , 在所有 Python3 提交中击败了
# 46.54%
# 的用户
# 内存消耗:
# 14.5 MB
# , 在所有 Python3 提交中击败了
# 100.00%
# 的用户
|
989,039 | 2c6c324c068105b1538f90745ed8439adfd2060d | import sys
sys.path.append('../pytorch-CycleGAN-and-pix2pix/')
import importlib
from models import networks
class UnetNormalized(networks.UnetGenerator):
"""
Subclass of UnetGenerator from pix2pix that also normalizes the output (since I was getting weird results)
"""
def __init__(self):
super(UnetNormalized, self).__init__(3, 3, 8, 64, norm_layer=networks.get_norm_layer('batch'), use_dropout=False)
def forward(self, x):
x = super(UnetNormalized, self).forward(x)
mini = float(x.min())
maxi = float(x.max())
x.clamp_(min=mini,max=maxi)
x.add_(-mini).div_(maxi - mini + 1e-5)
return x
|
989,040 | 09126e5a9bed34a175600c4acd63eb325ac25e8b | import json
#import pprint #only for fun printing
from TwitterModule import *
#names = ['@IngrahamAngle','@davidhogg111','@sleepnumber','@ATT','@Allstate','@esurance','@Bayer','@RocketMortgage','@LibertyMutual','@Arbys','@TripAdvisor','@Nestle','@hulu','@Wayfair','@FoxNews','#BoycottIngramAdverts','#boycottLauraIngraham','#FireIngraham','#FireLauraIngraham']
names = ['@sleepnumber']
#names = ['@sleepnumber','@ATT','@Allstate','@esurance','@Bayer','@RocketMortgage','@LibertyMutual','@Arbys','@TripAdvisor','@Nestle','@hulu','@Wayfair','@FoxNews','#BoycottIngramAdverts','#boycottLauraIngraham','#FireIngraham','#FireLauraIngraham']
for q in names:
name = q[1:]
nameFile = name + '328.json'
file = open(nameFile,'r')
#loads decodes json objects into dictionary
largeFile = json.load(file)
print("successfully opened file " + q)#successfulyl reads in the file
print(type(largeFile)) #the type of the file. inexplicably it's a dict. It should be a list?
'''
Prints the contents of the file. Use with caution.
'''
#entire dictionary
#print json.dumps(largeFile, indent=1)
#length of the dictionary
print(len(largeFile))
keys = largeFile.keys()
print(keys)
print(keys[666])
print(largeFile[keys[1]]['created_at'])
#the keys (twitter IDs) in the dictionary
#print(largeFile.keys())
file.close()
|
989,041 | 9a77bcbfc04208a833c3b021170bd5b7120770ff | """
Implementation of the logic to solve the nonogram.
"""
import copy
from nonogram.rules import r1
from nonogram.rules import r2
from nonogram.rules import r3
from nonogram.solution import Solution
RULE_FUNCS = (*r1.RULES, *r2.RULES, *r3.RULES)
def solve(raster):
"""Does a rule based elimination on the raster object and returns a
solution (object) if there's any and None otherwise."""
cells_changed = True
while cells_changed:
cells_changed = False
for meta in raster.row_meta:
mask = raster.get_row(meta.idx)
orig_meta = copy.deepcopy(meta)
linesolve(mask, meta)
if raster.update_row(mask=mask, idx=meta.idx) or meta != orig_meta:
cells_changed = True
for meta in raster.col_meta:
mask = raster.get_col(meta.idx)
orig_meta = copy.deepcopy(meta)
linesolve(mask, meta)
if raster.update_col(mask=mask, idx=meta.idx) or meta != orig_meta:
cells_changed = True
if raster.is_solved():
return Solution(raster.table)
return None
def linesolve(mask, meta):
"""Rule based elimination on the received parameters."""
for func in RULE_FUNCS:
func(mask, meta)
|
989,042 | e2555f2b73bbad1eee96d61aa46482ad997f1b7d | import six
import sys
import os
if sys.version_info[0] >= 3.3:
from types import SimpleNamespace as Dataset
else:
from argparse import Namespace as Dataset
def associate_by_ext_suffix(datasets):
has_ext = []
has_no_ext = []
for dataset in datasets:
out_list = has_ext if "_ext" in dataset.name else has_no_ext
out_list.append(dataset)
for dataset in has_no_ext:
associates = [d for d in has_ext if d.name.startswith(dataset.name)]
associates.append(dataset)
names = [a.name for a in associates]
for index in range(len(associates)):
associates[index].associates = names[:index]
associates[index].associates += names[index + 1:]
def _load_yaml(path):
import yaml
with open(path, 'r') as f:
datasets_dict = yaml.load(f, Loader=yaml.SafeLoader)
if not datasets_dict:
raise RuntimeError("Empty config file in '%s'" % path)
return datasets_dict
def from_yaml(path, defaults={}, find_associates=associate_by_ext_suffix,
selected_prefix=None, expand_prefix=True):
datasets_dict = _load_yaml(path)
this_dir = os.path.dirname(os.path.abspath(path))
return get_datasets(datasets_dict, defaults, this_dir=this_dir,
selected_prefix=selected_prefix,
expand_prefix=expand_prefix,
find_associates=associate_by_ext_suffix)
def get_datasets(datasets_dict, defaults={},
find_associates=associate_by_ext_suffix, already_imported=None,
this_dir=None, selected_prefix=None, expand_prefix=True):
datasets = []
defaults.update(datasets_dict.get("defaults", {}))
if "import" not in datasets_dict and "datasets" not in datasets_dict:
raise RuntimeError("Neither 'datasets' nor 'import' were specified in config")
if already_imported is None:
already_imported = set()
for import_file in datasets_dict.get("import", []):
if this_dir:
import_file = import_file.format(this_dir=this_dir)
if import_file in already_imported:
continue
already_imported.add(import_file)
contents = _load_yaml(import_file)
datasets += get_datasets(contents, defaults=defaults.copy(), this_dir=os.path.dirname(import_file),
find_associates=find_associates, already_imported=already_imported)
for dataset in datasets_dict.get("datasets", []):
if isinstance(dataset, six.string_types):
cfg = _from_string(dataset, defaults)
elif isinstance(dataset, dict):
cfg = _from_dict(dataset, defaults, selected_prefix)
else:
raise TypeError("{} not a string or dict".format(dataset))
if expand_prefix:
prefix = cfg.get("prefix", None)
files = apply_prefix(prefix, cfg["files"], selected_prefix, cfg["name"])
cfg["files"] = files
datasets.append(Dataset(**cfg))
# Associate samples
find_associates(datasets)
return datasets
def _from_string(dataset, default):
cfg = default.copy()
cfg["name"] = dataset
return cfg
def _from_dict(dataset, default, selected_prefix=None):
cfg = default.copy()
cfg.update(dataset)
if "name" not in cfg:
raise RuntimeError(
"Dataset provided as dict, without key-value pair for 'name'")
return cfg
def apply_prefix(prefix, files, selected_prefix, dataset):
if not prefix:
return files
if isinstance(prefix, list):
if not all((isinstance(p, dict) and len(p) == 1 for p in prefix)):
raise ValueError("'prefix' is a list, but not all elements are single-length dicts")
prefix = [tuple(p.items())[0] for p in prefix]
if selected_prefix:
matched = [v for p, v in prefix if p == selected_prefix]
if len(matched) > 1:
msg = "Prefix '%s' is defined %d times, not sure which to use"
raise ValueError(msg % (selected_prefix, len(matched)))
if not matched:
msg = "Prefix '%s' is not defined for dataset '%s'"
raise ValueError(msg % (selected_prefix, dataset))
prefix = matched[0]
else:
prefix = prefix[0][1]
elif not isinstance(prefix, six.string_types):
msg = "'prefix' for dataset '%s' is type '%s'. Need a string or a list of single-length dicts"
raise ValueError(msg % (dataset, type(prefix)))
return [f.format(prefix=prefix) for f in files]
|
989,043 | e951c02bbd65136f89a41e8d7f53f9315864b8f0 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset
from torch.utils.tensorboard import SummaryWriter
import os
from before_ex import vnet_wr
import time
#import matplotlib.pyplot as plt
import cv2
def np_select_rand_pos(img):
max_x = img.shape[1]-128
max_y = img.shape[0]-128
if max_x<0 or max_y<0:
print("This image size is too small.")
return 0, 0
return np.random.randint(max_x), np.random.randint(max_y)
def np_crop_img(img,x,y):
return img[y:y+128,x:x+128,:]
def np_rand_flip(img,flip_flag):
if flip_flag:
return np.flip(img,1)
else:
return img
def np_rand_noise(img):
noise_flag = np.random.randint(2)
if noise_flag:
s = np.random.normal(0, 25, (128, 128, 3))
tmp = img + s
tmp[tmp>255] = 255
tmp[tmp<0] = 0
tmp = tmp.astype(np.uint8)
return tmp
else :
return img
class CCTVDataset(Dataset):
def __init__(self, groundtruthdir, videodir, frm_ch_num=16, frm_period =5):
self.gtdir = groundtruthdir
self.videodir = videodir
self.videolist = sorted(os.listdir(videodir))
self.gtlist = sorted(os.listdir(groundtruthdir))
self.f_ch_num = frm_ch_num
self.f_period = frm_period
def __len__(self):
return len(os.listdir(self.videodir))
def __getitem__(self, idx):
videoname = self.videolist[idx]
frmspath = self.videodir + videoname + '/'
frmsname = sorted(os.listdir(frmspath))
#flip_flag = np.random.randint(2)
height = 16*3*5
width = 16*4*5
frms = cv2.imread(frmspath + frmsname[0])# first frame
frms = cv2.resize(frms, (width, height))
#show_frms = torch.tensor(frms.copy(), dtype=torch.float)
color = frms.shape[2]
frms = np.reshape(frms, (height, width, color, 1))
for num in range(self.f_period, self.f_period*self.f_ch_num, self.f_period):#frame period in video
frm = cv2.imread(frmspath + frmsname[num])
frm = cv2.resize(frm, (width, height))
#region for transforming frm
frm = np.reshape(frm, (height, width, color, 1))
frms = np.concatenate((frms, frm), axis=3)
gt = cv2.imread(self.gtdir + self.gtlist[idx],0)
gt = cv2.resize(gt, (width, height), interpolation=cv2.INTER_NEAREST)
gt_w = (gt == 255) * 1.0 # water groundtruth [0, 1]
gt_r = (gt == 125) * 1.0
#show_gt_w = torch.tensor(gt_w.copy(), dtype=torch.float)
#show_gt_r = torch.tensor(gt_r.copy(), dtype=torch.float)
gt = np.reshape(gt, (height, width, 1, 1))
gt = np.concatenate((gt, gt, gt, gt), axis=3) # [HWC4]
gt = np.concatenate((gt, gt, gt, gt), axis=3) # [HWC16]
gt_w = (gt==255)*1.0 #water groundtruth [0, 1]
gt_r = (gt==125)*1.0
frms = torch.tensor(frms, dtype=torch.float)
gts_w = torch.tensor(gt_w, dtype=torch.float)
gts_r = torch.tensor(gt_r, dtype=torch.float)
frms = frms.permute(2, 3, 0, 1) # C F H W
gts_w = gts_w.permute(2, 3, 0, 1)
gts_r = gts_r.permute(2, 3, 0, 1)
frms = frms / 255
return frms, gts_w, gts_r
'''
class CCTVDataset(Dataset):
def __init__(self, groundtruthdir, videodir, frm_ch_num=16, frm_period =5):
self.gtdir = groundtruthdir
self.videodir = videodir
self.videolist = sorted(os.listdir(videodir))
self.gtlist = sorted(os.listdir(groundtruthdir))
self.f_ch_num = frm_ch_num
self.f_period = frm_period
def __len__(self):
return len(os.listdir(self.videodir))
def __getitem__(self, idx):
videoname = self.videolist[idx]
frmspath = self.videodir + videoname + '/'
frmsname = sorted(os.listdir(frmspath))
flip_flag = np.random.randint(2)
height = 9 * 32
width = 16 * 32
frms = io.imread(frmspath + frmsname[0])# first frame
frms = cv2.resize(frms, (width, height))
gt = io.imread(self.gtdir + self.gtlist[idx])
gt = cv2.resize(gt, (width, height), interpolation=cv2.INTER_NEAREST)
gt = gt[:,:,0:3]
gt_w = gt==255 #water groundtruth [0, 1]
gt_r = gt==125
r_frms = np.reshape(frms[:, :, 0], (height, width, 1, 1))
g_frms = np.reshape(frms[:, :, 1], (height, width, 1, 1))
b_frms = np.reshape(frms[:, :, 2], (height, width, 1, 1))
gts_w = gt_w.copy()
gts_r = gt_r.copy()
for num in range(self.f_period, self.f_period*self.f_ch_num, self.f_period):#frame period in video
frm = io.imread(frmspath + frmsname[num])
frm = cv2.resize(frm, (width, height))
r_frm = np.reshape(frm[:, :, 0], (height, width, 1, 1))
g_frm = np.reshape(frm[:, :, 1], (height, width, 1, 1))
b_frm = np.reshape(frm[:, :, 2], (height, width, 1, 1))
r_frms = np.concatenate((r_frms, r_frm), axis=2)
g_frms = np.concatenate((g_frms, g_frm), axis=2)
b_frms = np.concatenate((b_frms, b_frm), axis=2)
gts_w = np.concatenate((gts_w, gt_w), axis=2)
gts_r = np.concatenate((gts_r, gt_r), axis=2)
frms = np.concatenate((r_frms, g_frms, b_frms), axis=3)
gt_w2 = np.reshape(gts_w[:, :, 0:self.f_ch_num], (height, width, self.f_ch_num, 1))
gt_r2 = np.reshape(gts_r[:, :, 0:self.f_ch_num], (height, width, self.f_ch_num, 1))
frms = torch.tensor(frms, dtype=torch.float)
gt_w2 = torch.tensor(gt_w2, dtype=torch.float)
gt_r2 = torch.tensor(gt_r2, dtype=torch.float)
frms = frms.permute(3, 2, 0, 1)
gt_w2 = gt_w2.permute(3, 2, 0, 1)
gt_r2 = gt_r2.permute(3, 2, 0, 1)
frms = frms / 255
return frms, gt_w2, gt_r2
'''
def dice_loss(pred, target):
smooth = 1.
# have to use contiguous since they may from a torch.view op
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
A_sum = torch.sum(iflat * iflat)
B_sum = torch.sum(tflat * tflat)
return 1-((2. * intersection + smooth) / (A_sum + B_sum + smooth))
def L1_loss(pred, target):
f_pred = pred.contiguous().view(-1)
f_target = target.contiguous().view(-1)
L1_loss_func = nn.L1Loss()
return L1_loss_func(f_pred,f_target)
def diceL1_loss(pred, target):
return (dice_loss(pred,target) + L1_loss(pred,target))/2
def dice_focal_loss(pred, target, batch_size, gamma=2):
f_pred = pred.contiguous().view(batch_size, -1)
f_target = target.contiguous().view(batch_size, -1)
gt1_mask = f_target.contiguous()
gt0_mask = f_target == 0
pt_gt1 = f_pred * gt1_mask
pt_gt0 = 1. * gt0_mask - f_pred * gt0_mask
pt = pt_gt1 + pt_gt0
pt = torch.sum(pt, 1) / f_target.shape[1]
smooth = 1.
inter = torch.sum(f_pred*f_target,1)
p_sum = torch.sum(f_pred*f_pred,1)
g_sum = torch.sum(f_target * f_target, 1)
dice = 1-((2.*inter + smooth)/(p_sum + g_sum + smooth))
dice_focal = ((1-pt)**gamma)*dice
dice_focal = dice_focal.sum()/batch_size
return dice_focal
def diceL1_focal_loss(pred, target, batch_size, gamma=2):
f_pred = pred.contiguous().view(batch_size, -1)
#print("p ", f_pred)
f_target = target.contiguous().view(batch_size, -1)
#print("t ", f_target)
gt1_mask = f_target.contiguous()
gt0_mask = f_target == 0
pt_gt1 = f_pred * gt1_mask
pt_gt0 = 1. * gt0_mask - f_pred * gt0_mask
pt = pt_gt1 + pt_gt0
#print("pt ", pt)
pt = torch.sum(pt, 1) / f_target.shape[1]
#print(f_target.shape[1])
#print("pt ", pt)
smooth = 1.
inter = torch.sum(f_pred * f_target, 1)
p_sum = torch.sum(f_pred * f_pred, 1)
g_sum = torch.sum(f_target * f_target, 1)
dice = 1 - ((2. * inter + smooth) / (p_sum + g_sum + smooth))
#print("dice ", dice)
L1 = 1 - pt
#print("L1 ", L1)
diceL1_focal = ((1 - pt) ** gamma) * (dice + L1)
#print("diceL1_focal ", diceL1_focal)
diceL1_focal = diceL1_focal.sum() / batch_size
return diceL1_focal
#--------------------
#all of parameter setting
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
set_frm_ch_num = 16
set_frm_period = 5
set_batch_size = 7
set_base_lr = 0.0006 # scheduler setting
set_max_lr = 0.0012 # scheduler setting
set_step_size_up = 200 # scheduler setting
set_step_size_down = 200
set_wt_save_name = 'vnet_diceL1_fullsz_bch14_191110'
where = "server" #"home" #"lab"
print(where)
if where == "lab":
set_gtdir = "/datahdd/dataset/water_segmentation/Train/annot/"
set_videodir = "/datahdd/dataset/water_segmentation/Train/frames/"
set_wt_save_path = "/datahdd/code/water detection/vnet.pytorch/model_save/"
elif where == "home":
set_gtdir = "/home/hyeongeun/dataset/Train/annot/"
set_videodir = "/home/hyeongeun/dataset/Train/frames/"
set_wt_save_path = "/home/hyeongeun/PycharmProjects/vnet/model_save/"
elif where == "server":
set_gtdir = '/datahdd/WaterDetection/water_video_dataset/Train/annot/'
set_videodir = "/datahdd/WaterDetection/water_video_dataset/Train/frames/"
set_wt_save_path = "/datahdd/WaterDetection/save_model/vnet/"
else :
raise Exception("Input 'where'.")
#--------------------
writer = SummaryWriter()
print("="*30)
print("Check before training")
gtdir = set_gtdir
videodir = set_videodir
print("Ground truth image dir : ", gtdir)
print("Frames dir : ", videodir)
print("-"*20)
print("Create dataset class")
frm_ch_num = set_frm_ch_num
frm_period = set_frm_period
print("# of frames : ", frm_ch_num)
print("frame period : ", frm_period)
cctv_dataset = CCTVDataset(gtdir,videodir,frm_ch_num,frm_period)
print("Dataset length : ",cctv_dataset.__len__())
frms, gt_w, gt_r = cctv_dataset[0] #,gt_r
print("Frms shape : ",frms.shape)
print("Water ground truth shape : ", gt_w.shape)
print("Road ground truth shape : ", gt_r.shape)
data_height = frms.shape[2]
data_width = frms.shape[3]
print("-"*20)
print("Create dataloader")
batch_sz = set_batch_size
print("Batch size : ",batch_sz)
dataloaders =torch.utils.data.DataLoader(cctv_dataset, batch_size = batch_sz, shuffle= True, num_workers=8)
videos, gts_w, gts_r = next(iter(dataloaders)) #, gts_r , s_r
print("Videos shape : ",videos.shape)
print("Water gts shape : ",gts_w.shape)
print("Road gts shape : ",gts_r.shape)
#print("test v w r : ", s_v.shape, s_w.shape, s_r.shape) #, s_r.shape)
print("-"*20)
print("Create model")
model= vnet_wr.VNet(elu=True, nll=False, frm_ch=frm_ch_num, height=data_height, width=data_width)
weight_decay = 1e-4
start_epoch = 0
if os.path.isfile(set_wt_save_path+set_wt_save_name+'_last.pth'):
print('**********Resume*************')
checkpoint = torch.load(set_wt_save_path+set_wt_save_name+'_last.pth')
model.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
print("< save point >")
print("epoch : ", checkpoint['epoch'])
print("Best epoch loss : ", checkpoint['best_epoch_loss'])
print("-- Water epoch loss : ", checkpoint['water_epoch_loss'])
print("-- Road epoch loss : ", checkpoint['road_epoch_loss'])
#print("Weight decay : ",weight_decay)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
print("Device type:", device.type)
model.train()
optimizer = optim.Adam(model.parameters(), weight_decay=weight_decay)
scheduler = lr_scheduler.CyclicLR(optimizer, base_lr = set_base_lr, max_lr=set_max_lr,
step_size_up = set_step_size_up, cycle_momentum=False)
if os.path.isfile(set_wt_save_path+set_wt_save_name+'_last.pth'):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
epochs = 10000
best_epoch_loss = 1.0
best_epoch_loss_w = 1.0
best_epoch_loss_r = 1.0
best_water_loss = 1.0
best_road_loss = 1.0
print("="*30)
print("Start train")
for epoch in range(start_epoch,epochs):
epoch_start_time =time.time()
epoch_loss = 0.0
water_epoch_loss = 0.0
road_epoch_loss = 0.0
show10_video = None
show10_w_gt = None
show10_w_pred = None
show10_r_gt = None
show10_r_pred = None
show20_video = None
show20_w_gt = None
show20_w_pred = None
show20_r_gt = None
show20_r_pred = None
for batch_idx, (frms, gts_w, gts_r) in enumerate(dataloaders): #, gts_r, show_gts_r
frms, gts_w, gts_r = frms.to(device), gts_w.to(device),gts_r.to(device)#, gts_r.cuda()#, gts_r
optimizer.zero_grad()
output = model(frms)
pred_water = output[:, :, 0]
pred_road = output[:, :, 1]
water_loss = (dice_loss(pred_water,gts_w) + L1_loss(pred_water,gts_w))/2#,batch_sz)
road_loss = (dice_loss(pred_road,gts_r) + L1_loss(pred_road,gts_r) )/2#,batch_sz)
loss = (water_loss + road_loss)/2 #L1_weight*(water_L1_loss) #+ road_loss + road_L1_loss)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
water_epoch_loss += water_loss.item()
road_epoch_loss += road_loss.item()
'''
if batch_idx == 10:
show10_w_pred = pred_water
show10_w_gt = show_gts_w.permute(0, 3, 1, 2)
show10_w_gt = show10_w_gt * 255
show10_r_pred = pred_road
show10_r_gt = show_gts_r.permute(0, 3, 1, 2)
show10_r_gt = show10_r_gt * 255
show10_video = show_frms.permute(0, 3, 1, 2)
show10_w_pred = show10_w_pred.view(-1,frm_ch_num, 128, 128)
show10_w_pred = show10_w_pred[:, 0:3, :, :]
show10_r_pred = show10_r_pred.view(-1, frm_ch_num, 128, 128)
show10_r_pred = show10_r_pred[:, 0:3, :, :]
if batch_idx == 20:
show20_w_pred = pred_water
show20_w_gt = show_gts_w.permute(0, 3, 1, 2)
show20_w_gt = show20_w_gt * 255
show20_r_pred = pred_road
show20_r_gt = show_gts_r.permute(0, 3, 1, 2)
show20_r_gt = show20_r_gt * 255
show20_video = show_frms.permute(0, 3, 1, 2)
show20_w_pred = show20_w_pred.view(-1, frm_ch_num, 128, 128)
show20_w_pred = show20_w_pred[:, 0:3, :, :]
show20_r_pred = show20_r_pred.view(-1, frm_ch_num, 128, 128)
show20_r_pred = show20_r_pred[:, 0:3, :, :]
'''
epoch_loss /= len(dataloaders)
water_epoch_loss /= len(dataloaders)
road_epoch_loss /= len(dataloaders)
scheduler.step()
if best_epoch_loss>epoch_loss:
best_epoch_loss = epoch_loss
best_epoch_loss_w = water_epoch_loss
best_epoch_loss_r = road_epoch_loss
wt_save_path = set_wt_save_path
wt_save_name = set_wt_save_name + '_best.pth'
torch.save({
'epoch':epoch,
'model_state_dict':model.state_dict(),
'optimizer_state_dict':optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'best_epoch_loss':best_epoch_loss,
'water_epoch_loss':best_epoch_loss_w,
'road_epoch_loss':best_epoch_loss_r
}, wt_save_path+wt_save_name)
wt_save_path = set_wt_save_path
wt_save_name = set_wt_save_name +'_last.pth'
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict' : scheduler.state_dict(),
'best_epoch_loss': best_epoch_loss,
'water_epoch_loss': water_epoch_loss,
'road_epoch_loss': road_epoch_loss
}, wt_save_path + wt_save_name)
epoch_time = time.time() - epoch_start_time
time_h = int((epoch_time // 60) // 60)
time_m = int((epoch_time // 60) % 60)
time_s = epoch_time % 60
#print(time_h, "h ", time_m, "m ", time_s, "s")
print("Epoch {}/{} Total loss : {:.8f} ( Best total loss : {:.8f} = focal dice+l1_w {:.8f} + focal dice+l1_r {:.8f} , lr = {} )".format(epoch, epochs - 1, epoch_loss, best_epoch_loss,
best_epoch_loss_w, best_epoch_loss_r,
scheduler.get_lr()))
print("Water loss : {:.8f} / Road loss : {:.8f} / Time : {:.0f}min {:.1f}sec".format(water_epoch_loss, road_epoch_loss,
time_m,time_s))
writer.add_scalar('total loss/train', epoch_loss, epoch)
writer.add_scalar('water focal dice+L1 loss/train', water_epoch_loss, epoch)
writer.add_scalar('road focal dice+L1 loss/train', road_epoch_loss, epoch)
'''
grid_10video = utils.make_grid(show10_video,nrow=4,normalize=True)
grid_10_w_gt = utils.make_grid(show10_w_gt, nrow=4,normalize=True)
grid_10_w_pred = utils.make_grid(show10_w_pred, nrow=4,normalize=True)
grid_10_w_pred_thres = grid_10_w_pred > 0.5
grid_10_r_gt = utils.make_grid(show10_r_gt, nrow=4, normalize=True)
grid_10_r_pred = utils.make_grid(show10_r_pred, nrow=4, normalize=True)
grid_10_r_pred_thres = grid_10_r_pred > 0.5
grid_20video = utils.make_grid(show20_video,nrow=4,normalize=True)
grid_20_w_gt = utils.make_grid(show20_w_gt,nrow=4,normalize=True)
grid_20_w_pred = utils.make_grid(show20_w_pred,nrow=4,normalize=True)
grid_20_w_pred_thres = grid_20_w_pred > 0.5
grid_20_r_gt = utils.make_grid(show20_r_gt, nrow=4, normalize=True)
grid_20_r_pred = utils.make_grid(show20_r_pred, nrow=4, normalize=True)
grid_20_r_pred_thres = grid_20_r_pred > 0.5
writer.add_image('grid_10_video',grid_10video, epoch)
writer.add_image('grid_10_water_gt', grid_10_w_gt, epoch)
writer.add_image('grid_10_water_pred', grid_10_w_pred, epoch)
writer.add_image('grid_10_water_pred_thres', grid_10_w_pred_thres, epoch)
writer.add_image('grid_10_road_gt', grid_10_r_gt, epoch)
writer.add_image('grid_10_road_pred', grid_10_r_pred, epoch)
writer.add_image('grid_10_road_pred_thres', grid_10_r_pred_thres, epoch)
writer.add_image('grid_20_video', grid_20video, epoch)
writer.add_image('grid_20_water_gt', grid_20_w_gt, epoch)
writer.add_image('grid_20_water_pred', grid_20_w_pred, epoch)
writer.add_image('grid_20_water_pred_thres', grid_20_w_pred_thres, epoch)
writer.add_image('grid_20_road_gt', grid_20_r_gt, epoch)
writer.add_image('grid_20_road_pred', grid_20_r_pred, epoch)
writer.add_image('grid_20_road_pred_thres', grid_20_r_pred_thres, epoch)
'''
##----------------------------------------------end of each epoch------------------------------------------------------------------
|
989,044 | 909ae044570b66e8512150beb9de74a5b79e4f41 | import random
class Monster:
def __init__(self, name, attackType, health, attack, defense, drop):
self.name = name
self.attackType = attackType
self.health = health
self.maxhealth = health
self.attack = attack
self.defense = defense
self.dropls = drop
def notdead(self):
return self.health > 0
def status(self):
print(self.name + ": " + self.attackType)
print('Health: '+str(self.health)+' / '+str(self.maxhealth)+' '+'Attack: '+str(self.attack))
def decide(self, opp, roundnum):
if self.health <= self.maxhealth * 0.2:
return 'd'
else:
return 'a'
class AggresiveMonster(Monster):
def __init__(self, name, health, attack, defense, drop):
Monster.__init__(self, name, 'aggresive', health, attack, defense, drop)
def decide(self, opp, roundnum):
return 'a'
class DefensiveMonster(Monster):
def __init__(self, name, health, attack, defense, drop):
Monster.__init__(self, name, 'defensive', health, attack, defense, drop)
def decide(self, opp, roundnum):
if roundnum % 4 == 0:
return 'a'
else:
return 'd'
class CleverMonster(Monster):
def __init__(self, name, health, attack, defense, drop, choices=['a', 'a', 'd', 'd', 'a', 'a']):
Monster.__init__(self, name, 'clever', health, attack, defense, drop)
self.choice = choices
def decide(self, opp, roundnum):
if self.health <= self.maxhealth * 0.1:
return 'd'
elif opp.health <= opp.maxhealth * 0.2:
return 'a'
else:
c = random.choice(self.choice)
if c == 'd':
if random.random() > 0.5:
c = 'a'
return c
#class Boss(Monster): |
989,045 | ae15d98540cf970688b89d0abee54b6e7fb19a1c | from microbit import *
ledOff = 0 #Interpret as bit off
ledOn = 9 #Interpret as bit on
LongPress = 500 #No magic numbers!
ShortPress = 100
INPUTTING = 3 #Needed for switching between screens
CONVERTING = 4
screen = INPUTTING
leds = ["0" for i in range(32)] #Initialize 32 bit array to ascii 0s
bit = 0 #leds array index 0..31 - bit order: MSB:31..LSB:0 i.e. index=0 === bit 31 and index=31 === bit 0
while True:
if screen == INPUTTING: #Do all the stuff in this INPUTTING body
# x === column, y === row where x=0,y=0 is upper left led and x=4,y=4 is bottom right led
x = 0
y = 0
page2 = True #For breaking to the CONVERSION screen from any point while on the INPUTTING screen
while( y < 5 ): #Working with bits in range x: 0-->4 and y: 0-->4
while( not( button_b.was_pressed())): #While b is not being pressed, flash a pixel
display.set_pixel(x,y,ledOn)
sleep(ShortPress)
display.set_pixel(x,y,ledOff)
sleep(ShortPress)
if button_a.was_pressed():
display.set_pixel(x,y,ledOn)
break
sleep(LongPress)
if button_b.is_pressed(): #If b was held longer than .5 second, display CONVERTING screen
page2 = False
screen = CONVERTING
break
leds[bit] = "0" if (0 == display.get_pixel(x,y)) else "1" #Appeneding to the leds list
x = x+1 #Walking through the columns
if x > 4: #If the next column is greater than 4:
x = 0 #return to column 0
y = y+1 #and drop down 1 row
bit += 1 #Walk the bits by 1 from 31-->0
if( page2 ):
display.clear()
# x === column, y === row where x=0,y=0 is upper left led
x = 0
y = 0
while( y < 2 ): #Working with bits in range x: 0-->4 and y: 0-->1
while( not( button_b.was_pressed())):
display.set_pixel(x,y,ledOn)
sleep(ShortPress)
display.set_pixel(x,y,ledOff)
sleep(ShortPress)
if button_a.was_pressed():
display.set_pixel(x,y,ledOn)
break
sleep(LongPress)
if button_b.is_pressed():
screen = CONVERTING
break
leds[bit] = "0" if (0 == display.get_pixel(x,y)) else "1"
x = x+1 #Walking through the columns
if x > 4: #If the next column is greater than 4:
x = 0 #return to column 0
y = y+1 #and drop down 1 row
if( (1 == y) and (2 == x)): #Limiting the second screen to 7 bits
break
bit += 1 #Walk the bits by 1 from 31-->0
if screen == CONVERTING:
display.clear()
menu = [ "hex?", "uint?", "signed int?", "float?", "ascii?"]#Hex was included for a way to view the data
menuIndex = 0
CONVERTING = True
while( CONVERTING ): #Do all the stuff in this CONVERTING body
display.clear()
while( not( button_b.was_pressed())):
display.scroll(menu[menuIndex])
if(button_a.was_pressed()):
while( not( button_a.was_pressed())):
if(0 == menuIndex):
#Hex logic
value = int("".join(leds),2) #Combine all the bit values and convert to an unsigned integer range:0..(2^32)-1
display.scroll("0x{:08x}".format(value),250)
elif(1 == menuIndex):
#Unsigned int logic
value = int("".join(leds),2) #Combine all the bit values and convert to an unsigned integer range:0..(2^32)-1
display.scroll(value)
elif(2 == menuIndex):
#two's complement integer
signedLeds = leds.copy()
if "1" == leds[31]: #Check to see if the MSB(sign bit) is on
for i in range (32): #Walk through the array
#Flip bits
if signedLeds[i] == "0": #Check if the led is a 0
signedLeds[i] = "1" #Make it a 1
else:
signedLeds[i] = "0" #Make it a 0
value = -int("".join(signedLeds),2) - 1 #Assign negative range:-1..-(2^31)
else:
value = int("".join(signedLeds),2) #Assign positive value, range:0..(2^31)
display.scroll(value)
elif(3 == menuIndex):
#Float logic - reference: https://en.wikipedia.org/wiki/Single-precision_floating-point_format
sign = (-1)**int(leds[0]) #-1 raised to power of bit 31 e.g. -1**1 = -1 OR -1**0 = 1
exponent = int("".join(leds[1:9]),2) #Combining bit values 30 - 23 and converting to an int
mantissa = int("".join(leds[9:]),2) #Combining bit values 22 - 0 and converting to an int
if (255 == exponent) and (0 != mantissa): #Check for all bits on in exponent AND mantissa not = 0
value = "nan"
elif (255 == exponent) and (0 == mantissa): #Check for all bits on in exponent AND mantissa = 0
value = "+ infinity" if 0 < sign else "- infinity" #For sign > 0, value = - infinity.For sign < 0, value = infinity
else:
if 0 == exponent: #Check for all bits off in exponent === 0 - special denormalized case
exponent = 2**(int("".join(leds[1:9]),2)-126) #2 raised to the power of int of (leds 30 through 23) - 126
mantissa = 0.0 #Invisible leading bit in mantissa does not apply
else: # not denormalized - "normal" case
exponent = 2**(int("".join(leds[1:9]),2)-127) #2 raised to the power of int of (leds 30 through 23) - 127
mantissa = 1.0 #Invisible leading bit in mantissa does apply
power = -1 #No magic numbers: used in bit contribution calculation i.e. 2**power
for led in (leds[9:]): #Working with the bits in the mantissa
if "1" == led: #Check which bits in the mantissa are on
mantissa = 2**power + mantissa #If bit is set, add its contribution: bit 23: .5, bit 22: .25, ...
power = power - 1 #Each index in the mantissa has it's own power, going down by 1
value = sign*exponent*mantissa #Combine the sign,exponent and mantissa
display.scroll(value)
elif(4 == menuIndex):
#Ascii logic
asciiValue = value&0xff
asciiValue += ((value>>8)&0xff)
asciiValue += ((value>>16)&0xff)
asciiValue += ((value>>24)&0xff)
display.scroll(asciiValue)
sleep(LongPress)
if button_b.is_pressed(): #If true, break out of CONVERTING loop and go to INPUTTING screen
screen = INPUTTING
break
menuIndex = (menuIndex + 1) % len(menu) #Walking through the menu options
|
989,046 | a75ce0ff0b7a440156896eaa13ab641445d435af | from glass.command import command
@command
def samplex_random_seed(env, s):
env.model_gen_options['rngseed'] = s
@command
def samplex_add_noise(env, n=1e-6):
assert 0, 'samplex_add_noise: DEPRECATED FUNCTION'
@command
def samplex_stride(env, s=1):
assert 0, 'samplex_stride: DEPRECATED FUNCTION'
@command
def samplex_acceptance(env, rate=0.25, tol=0.05):
assert rate > 0
assert tol > 0
env.model_gen_options['acceptance rate'] = rate
env.model_gen_options['acceptance tol'] = tol
@command
def samplex_redo_factor(env, f):
assert f > 0
env.model_gen_options['redo factor'] = f
@command
def samplex_redo_exponent(env, e):
env.model_gen_options['redo exp'] = e
@command
def samplex_start_twiddle(env, t):
assert t > 0
env.model_gen_options['twiddle'] = t
@command
def samplex_burnin_factor(env, b):
assert b > 0
env.model_gen_options['burnin factor'] = b
|
989,047 | 5dea135da674dd1122c3c1db6861f55e0949e5e4 | dict = {
"name": "Arun",
"lastname" : "Suryan"
}
print(dict.get("name")) |
989,048 | 1239fd380d0affd4f804ecbd9f275c60860f4cb8 | from django.test import TestCase
from django.http import HttpRequest
from webp_converter.context_processors import webp_support
class TestContextProcessors(TestCase):
def test_webp_support_true(self):
request = HttpRequest()
request.META["HTTP_ACCEPT"] = (
"text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;q=0.8"
)
assert webp_support(request) == {"webp_compatible": True}
def test_webp_support_false(self):
request = HttpRequest()
request.META[
"HTTP_ACCEPT"
] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
assert webp_support(request) == {"webp_compatible": False}
|
989,049 | d20c26293b86b18f35765e41b6ddc7d8300c2b9e | class car(object):
def __init__(self, name, type):
self.name = name
self.type = type
def getName(self):
return self.name
def gettype(self):
return self.type
def __str__(self):
return "%s is a %s" % (self.name, self.type)
def __init__(self):
self.__updateSoftware()
def drive(self):
print 'driving'
def __updateSoftware(self):
print 'updating software'
redcar = Car()
redcar.drive()
class Car:
__maxspeed = 0
__name = ""
def __init__(self):
self.__maxspeed = 200
self.__name = "Supercar"
def drive(self):
print 'driving. maxspeed ' + str(self.__maxspeed)
def setMaxSpeed(self,speed):
self.__maxspeed = speed
redcar = Car()
redcar.drive()
redcar.setMaxSpeed(320)
redcar.drive()
class Car:
def __init__(self, name):
self.name = name
def drive(self):
raise NotImplementedError("Subclass must implement abstract method")
def stop(self):
raise NotImplementedError("Subclass must implement abstract method")
class Sportscar(Car):
def drive(self):
return 'Sportscar driving!'
def stop(self):
return 'Sportscar breaking!'
class Truck(Car):
def drive(self):
return 'Truck driving slowly because heavily loaded.'
def stop(self):
return 'Truck breaking!'
cars = [Truck('Bananatruck'),
Truck('Orangetruck'),
Sportscar('Z3')]
for car in cars:
print car.name + ': ' + car.drive()
|
989,050 | 4c1ec677a7f9b1a55585c40e1f6b48fb6b056d8f | from flask import Flask, render_template, redirect, url_for, request, jsonify
from subprocess import Popen, PIPE
import requests, json, socket, sys
from publish import Publish
from subscribe import Subscribe
app = Flask(__name__)
hostname=socket.gethostname()
@app.route('/')
def index():
return render_template('frontend/index.html', hostname = hostname)
@app.route('/publisher')
def publisher():
return render_template('frontend/publisher.html', hostname = hostname)
@app.route('/subscriber')
def subscriber():
return render_template('frontend/subscriber.html', hostname = hostname)
@app.route('/addsub', methods=['POST'])
def addsub():
data =json.loads(request.data)
# print(data)
Subscribe().subscribe(data['subemail'], data['events'])
return jsonify("nothing")
@app.route('/addpublish', methods=['POST'])
def addpublish():
data =json.loads(request.data)
# print(data)
Publish().publish_event(data['events'], data['eventmessage'])
return jsonify("nothing")
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=80)
|
989,051 | 4db2430b3b9596809acbde28eb4162b11babc9a6 | import tensorflow as tf
import tensorflow_hub as hub
import sentencepiece as spm
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import pandas as pd
import re
import seaborn as sns
# data = pd.read_csv('atec_nlp_sim_train_add.csv', header=None, delimiter="\n")
def load_dataset(filename):
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
# print(ts[1], ts[2], ts[3])
sent_pairs.append((ts[1], ts[2], float(ts[3])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
data = load_dataset('../atec_nlp_sim_train_add.csv')
data_train = data.iloc[:20]
data_test = data.iloc[:10]
print(data_test)
print('Start downlaod...')
# module = hub.Module("/home/alex/my_module_cache/9c61abbea1e6365bdd67e17707f5dd2434ea42d7/")
module = hub.Module("https://tfhub.dev/google/nnlm-zh-dim128-with-normalization/1")
print('End download...')
|
989,052 | 4b7123db1bbab9320747279bd7d6fcf32d41f973 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 22:15:53 2020
@author: Cheerag
Even Fibonacci Numbers
Given a number N find the sum of all the even valued terms in the fibonacci sequence less than or equal to N. Try generating only even fibonacci numbers instead of iterating over all Fibonacci numbers.
Input Format
Line 1 : An integer N
Output Format
Total Sum
Input Constraints
1 <= N <= 10^6
Sample Input 1:
8
Sample Output 1 :
10
Sample Input 2:
400
Sample Output 2:
188
"""
def evenFib(n):
a = 1
b = 1
i = 2
ans = 0
while i <n-1:
sum = a+b
if sum%2==0:
ans = ans + sum
print(sum)
a,b = b,sum
i+=1
return ans
evenFib(10) |
989,053 | 421e4f1d09acccef17b898ac710171e26787f618 | #
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
from datetime import datetime
import sys
import requests
import pystache
import config
def callAPI(url,method,auth,payload):
r = None
if method == 'get':
r = requests.get(url, auth=auth)
elif method == 'post':
r = requests.post(url, auth=auth, json=payload)
elif method == 'put':
r = requests.put(url, auth=auth, json=payload)
try:
return r.json()
except:
return {}
def contentful(method,resource,data):
endpoint = 'https://cdn.contentful.com'
auth = None
url = '{}/spaces/{}/{}?access_token={}'.format(endpoint,config.CONTENTFUL_SPACE_ID,resource,config.CONTENTFUL_ACCESS_TOKEN)
return callAPI(url,method,auth,data)
def mailchimp(method,resource,data):
endpoint = 'https://us14.api.mailchimp.com/3.0/'
url = '{}{}'.format(endpoint,resource)
return callAPI(url,method,auth=(config.MAILCHIMP_USER,config.MAILCHIMP_API_KEY),payload=data)
def getLinked(type,id):
# which content type are we getting?
if (type == 'Asset'):
content_type = 'assets'
else:
content_type = 'entries'
resource = '{}/{}'.format(content_type,id)
linked = contentful('get',resource,data=None)
if content_type == 'assets':
linked['fields']['file']['url'] = 'https:{}'.format(linked['fields']['file']['url'])
try:
return linked['fields']
except:
return None
def getContent(params):
linked_asset = getLinked('Asset',params['article']['featureImage']['sys']['id'])
params['article']['featureImage'] = linked_asset
return {
'article' : params['article']
}
def getTemplate(template_id):
resource = 'templates/{}/default-content'.format(template_id)
default_content = mailchimp('get',resource,data=None)
if default_content is not None and 'mustache' in default_content['sections']:
return default_content['sections']['mustache']
else:
return None
def createCampaign(content,params):
response = {}
# create campaign data
request = {
'type' : 'regular',
'recipients' : {
'list_id' : config.CAMPAIGN_LIST_ID
},
'settings' : {
'template_id' : config.CAMPAIGN_TEMPLATE_ID,
'folder_id' : config.CAMPAIGN_FOLDER_ID,
'title' : 'Latest article : {}'.format(content['article']['title']),
'from_name' : 'Test',
'reply_to' : config.CAMPAIGN_REPLY_TO,
'subject_line' : content['article']['title'],
'preview_text' : content['article']['lead']
}
}
tid = request['settings']['template_id']
content['settings'] = request['settings']
# get the template from MailChimp
template = getTemplate(tid)
if template is None:
return {'message' : 'Could not find the template'}
# create the HTML
HTML = pystache.render(template,content)
# create the campaign
campaign = mailchimp('post','campaigns',request)
if campaign is None:
return {'message' : 'Could not create a campaign'}
# update the campaign content
mailchimp('put','campaigns/{}/content'.format(campaign['id']),data={
'template' : {
'id' : tid,
'sections' : {
'mustache' : HTML
}
}
})
# send a test
resource = 'campaigns/{}/actions/test'.format(campaign['id'])
response['test'] = mailchimp('post',resource,data={
'test_emails': config.CAMPAIGN_TEST_EMAILS,
'send_type':'html'
})
return response
def main(params):
content = getContent(params)
if content is None:
return {'message':'Nothing to process'}
else:
return createCampaign(content,params)
if __name__ == '__main__':
print(main(config.TEST_PARAMS))
|
989,054 | 0fbb36136254c7899f22b13e91c5e937791a60e9 | nums = []
for _ in range(9):
nums.append(int(input()))
# sort를 안쓰는게 편할것같다는 생각을 했음.
# 최댓값이랑 index를 저장
max = nums[0]
count = 1
for i in range(9):
if max < nums[i]:
max = nums[i]
count = i+1
print(max)
print(count) |
989,055 | 5f338ce787b102d9c694c300d1a32b899c8dcfea | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 11:59:50 2018
@author: spalazzo
"""
"""
2048 GUI
"""
# import modules
import os
import pygame
# pygame specific locals/constants
from pygame.locals import *
# some resource related warnings
if not pygame.font: print('Warning, fonts disabled')
if not pygame.mixer: print('Warning, sound disabled')
# initializations
pygame.init()
# a bit similar to CodeSkulptor frame creation -- we'll call the window the canvas
canvas = pygame.display.set_mode((640, 480))
pygame.display.set_caption("My_Project")
import math
# Tile Images
IMAGENAME = "assets_2048.png"
TILE_SIZE = 100
HALF_TILE_SIZE = TILE_SIZE / 2
BORDER_SIZE = 45
# Directions
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
class GUI:
"""
Class to run game GUI.
"""
def __init__(self, game):
self._rows = game.get_grid_height()
self._cols = game.get_grid_width()
self._frame = simplegui.create_frame('2048',
self._cols * TILE_SIZE + 2 * BORDER_SIZE,
self._rows * TILE_SIZE + 2 * BORDER_SIZE)
self._frame.add_button('New Game', self.start)
self._frame.set_keydown_handler(self.keydown)
self._frame.set_draw_handler(self.draw)
self._frame.set_canvas_background("#BCADA1")
self._frame.start()
self._game = game
url = codeskulptor.file2url(IMAGENAME)
self._tiles = simplegui.load_image(url)
self._directions = {"up": UP, "down": DOWN,
"left": LEFT, "right": RIGHT}
def keydown(self, key):
"""
Keydown handler
"""
for dirstr, dirval in self._directions.items():
if key == simplegui.KEY_MAP[dirstr]:
self._game.move(dirval)
break
def draw(self, canvas):
"""
Draw handler
"""
for row in range(self._rows):
for col in range(self._cols):
tile = self._game.get_tile(row, col)
if tile == 0:
val = 0
else:
val = int(math.log(tile, 2))
canvas.draw_image(self._tiles,
[HALF_TILE_SIZE + val * TILE_SIZE, HALF_TILE_SIZE],
[TILE_SIZE, TILE_SIZE],
[col * TILE_SIZE + HALF_TILE_SIZE + BORDER_SIZE,
row * TILE_SIZE + HALF_TILE_SIZE + BORDER_SIZE],
[TILE_SIZE, TILE_SIZE])
def start(self):
"""
Start the game.
"""
self._game.reset()
def run_gui(game):
"""
Instantiate and run the GUI.
"""
gui = GUI(game)
gui.start()
|
989,056 | 004eba63115a11de6a3aab599e88a9878cf5474c | import sys; sys.path.append("/Users/Shared/cs8");
import cTurtle
t = cTurtle.Turtle()
def f(x):
if x%2 == 0:
y = x//2
else:
y = 3*x+1
return y
def draw(xs):
x=xs
while x>1:
for i in range(3):
t.forward(x)
t.right(120)
for i in range(3):
t.right(180)
t.pencolor('red')
t.right(40)
t.forward(x)
t.right(20)
t.pencolor('black')
t.right(20)
y=f(x)
x=y
|
989,057 | 66fc239cf35f3f44e9fb2d5fef501e754d6a4a88 | /home/rosan/anaconda3/envs/Jarvis/lib/python3.6/copyreg.py |
989,058 | 595291b4ea15a9d6de69f8a47a966bdbd9f98e58 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
N = int(input("Количество сданных экзаменов = "))
if N > 20:
print("Ошибка", file=sys.stderr)
exit(1)
if N == 1:
a = " экзамен"
elif N <= 4:
a = " экзамена"
else:
a = " экзаменов"
print("Мы успешно сдали ", N, a) |
989,059 | 7bea9d1682bd57b6f9c60eedb6ba0934b11889b9 | # Considerando a existência de notas (cédulas) nos valores R$ 100, R$ 50, R$ 20, R$ 10, R$ 5, R$ 2 e
# R$ 1, escreva um programa que capture um valor inteiro em reais (R$) e determine o menor
# número de notas para se obter o montante fornecido. O programa deve exibir o número de notas
# para cada um dos valores de nota existentes.
valorTotal = input('Insira a quantia monetaria desejada: ')
|
989,060 | d620b004070838f01d63e43f34a205105991249d | from lexer import *
from tree import *
# from tabletext import *
error_table = {"Wrong delimiter": -1, "Wrong key_word": -2, "No such identifier": -3, "Wrong integer": -4,
"Must be empty": -5, "Missing lexema \'unsigned-integer\'": -6, "Semantical error: label is already declareted": -7}
temp = lexer("test2.txt")
lex_list = temp[1]
lex_list_err = temp[0]
tree = Tree()
def scan(dictionary, value):
for key, v in dictionary.items():
if v == value:
return key
def err(err_number, err_pos):
tree.add(err_number)
tree.current_element = tree.current_element.parent_element
tree.print_tree()
print(scan(error_table, err_number))
print(' line :' + str(lex_list_err[err_pos][2]) + ' column: ' + str(lex_list_err[err_pos][3]))
print('lexema: ' + str(lex_list[err_pos]))
quit()
def declaration_list_proc(i):
tree.add('declarations-list')
lexem = lex_list[i]
if lexem == 41:
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
else:
err(-5, i)
lexem = lex_list[i]
tree.current_element = tree.current_element.parent_element
return i
def parameters_list_proc(i):
tree.add('parameters-list')
lexem = lex_list[i]
if lexem == 40:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
i += 1
i = declaration_list_proc(i)
lexem = lex_list[i]
if lexem == 41:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
i+=1
else:
err(-1, i)
else:
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
# i += 1
print(i)
tree.current_element = tree.current_element.parent_element
return i
def label_list_proc(i):
tree.add('labels-list')
lexem = lex_list[i]
if lexem == 44:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
i += 1
lexem = lex_list[i]
if scan(table.dig_dic, lexem):
tree.add('unsigned-integer')
tree.add(scan(table.dig_dic, lexem))
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
i += 1
i = label_list_proc(i)
else:
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
return i
def label_declarations_proc(i):
tree.add('label-declarations')
lexem = lex_list[i]
if lexem == 405:
tree.add(scan(table.key_dic, lexem))
tree.current_element = tree.current_element.parent_element
i += 1
lexem = lex_list[i]
tree.add('unsigned-integer')
if scan(table.dig_dic, lexem):
tree.add(scan(table.dig_dic, lexem))
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
i += 1
i = label_list_proc(i)
else:
err(-6, i)
lexem = lex_list[i]
if lexem == 59:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-1, i)
i += 1
elif lexem == 402:
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
else:
err(-2, i)
tree.current_element = tree.current_element.parent_element
# lexem = lex_list[i]
return i
def declarations_proc(i):
tree.add('declarations')
i = label_declarations_proc(i)
tree.current_element = tree.current_element.parent_element
return i
def statement_list_proc(i):
tree.add('statements-list')
lexem = lex_list[i]
if lexem == 403:
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
else:
err(-5, i)
tree.current_element = tree.current_element.parent_element
return i
# def statement_list_proc(i):
# lexem = lex_list[i]
# if lexem == 1002:
# tree.add('statements-list')
# tree.add('st')
# tree.add(scan(table.idn_dic, lexem))
# tree.current_element = tree.current_element.parent_element
# i += 1
# if lex_list[i] == 1002 or lex_list[i] == 407:
# i = statement_list_proc(i)
# elif lex_list[i] == 403 or lex_list[i] == 1003:
# tree.add('statements-list')
# tree.add('< empty >')
# tree.current_element = tree.current_element.parent_element
# tree.current_element = tree.current_element.parent_element
# lexem = lex_list[i]
# elif lex_list[i] == 407:
# tree.add('statements-list')
# tree.add('st')
# tree.add(scan(table.key_dic, 407))
# tree.current_element = tree.current_element.parent_element
# tree.current_element = tree.current_element.parent_element
# i += 1
# if lex_list[i] == 1002:
# i = statement_list_proc(i)
# elif lex_list[i] == 403 or lex_list[i] == 1003:
# tree.add('statements-list')
# tree.add('< empty >')
# tree.current_element = tree.current_element.parent_element
# tree.current_element = tree.current_element.parent_element
# # tree.current_element = tree.current_element.parent_element
# lexem = lex_list[i]
#
# tree.current_element = tree.current_element.parent_element
if lexem == 1003:
tree.add(scan(table.idn_dic, lexem))
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
i += 1
if lex_list[i] == 1002:
i = statement_list_proc(i)
elif lex_list[i] == 403 or lex_list[i] == 1003:
tree.add('statements-list')
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
elif lex_list[i] == 407:
tree.add('statements-list')
tree.add('st')
tree.add(scan(table.key_dic, 407))
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
i += 1
if lex_list[i] == 1002:
i = statement_list_proc(i)
elif lex_list[i] == 403 or lex_list[i] == 1003:
tree.add('statements-list')
tree.add('< empty >')
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
lexem = lex_list[i]
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
# elif lexem == 403:
# tree.add('statements-list')
# tree.add('< empty >')
# tree.current_element = tree.current_element.parent_element
# tree.current_element = tree.current_element.parent_element
# else:
# err(-5, i)
print(lexem)
return i
def block_proc(i):
tree.add('block')
lexem = lex_list[i]
i = declarations_proc(i)
lexem = lex_list[i]
if lexem == 402:
tree.add(scan(table.key_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-2, i)
i += 1
i = statement_list_proc(i)
lexem = lex_list[i]
if lexem == 403:
tree.add(scan(table.key_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-2, i)
tree.current_element = tree.current_element.parent_element
return i
def procedure_identifier_proc(i):
lexem = lex_list[i]
tree.add('procedure-identifier')
tree.add('identifier')
if lexem >= 1000:
tree.add(scan(table.idn_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-3, i)
tree.current_element = tree.current_element.parent_element
tree.current_element = tree.current_element.parent_element
def program_proc():
tree.add('program')
i = 0
lexem = lex_list[i]
if lexem == 401:
tree.add(scan(table.key_dic, lexem))
tree.current_element = tree.current_element.parent_element
i += 1
lexem = lex_list[i]
procedure_identifier_proc(i)
i += 1
lexem = lex_list[i]
if lexem == 59:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-1, i)
i += 1
i = block_proc(i)
i += 1
lexem = lex_list[i]
if lexem == 46:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-1, i)
#################
elif lexem == 404:
tree.add(scan(table.key_dic, lexem))
tree.current_element = tree.current_element.parent_element
i += 1
lexem = lex_list[i]
procedure_identifier_proc(i)
i += 1
i = parameters_list_proc(i)
# i += 1
lexem = lex_list[i]
if lexem == 59:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-1, i)
i += 1
i = block_proc(i)
i += 1
lexem = lex_list[i]
# print(lexem)
if lexem == 59:
tree.add(scan(table.s_sep_dic, lexem))
tree.current_element = tree.current_element.parent_element
else:
err(-1, i)
else:
err(-2, i)
tree.current_element = tree.current_element.parent_element
def signal_program_proc():
if lex_list:
program_proc()
print(lex_list)
tree.print_tree()
print()
print(error_table)
print()
tree.listing()
# print(lex_list[])
return tree
if __name__ == '__main__':
# print(lex_list)
signal_program_proc()
|
989,061 | 05332314dd23f6561a63449afa24a8209074e329 | def foo(n):
return 2 * n
def square(x):
return x ** 2
def cubic(x):
return x ** 3
print(square(cubic(2)))
print(cubic(square(2)))
|
989,062 | 21d0ee13e2e9d256f457a0b5795c878b8b86bf36 | # Generated by Django 3.2.4 on 2021-06-22 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_desc_type1'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='balance',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='expense',
name='expense',
field=models.FloatField(blank=True, default=0.0, null=True),
),
migrations.AlterField(
model_name='expense',
name='income',
field=models.FloatField(blank=True, default=0.0, null=True),
),
]
|
989,063 | b9355f7cf9fa59c900b0008c586d38bc1022f22b | import xlrd
import pymysql
import time
#打开数据所在的工作簿,以及选择存有数据的工作表
book = xlrd.open_workbook(r"F:\python自动化测试\day07_mysql工具类与excel读取\2020年每个月的销售情况.xlsx")
sheet = book.sheet_by_index(0)
#建立一个MySQL连接
conn = pymysql.connect(
host='localhost',
user='root',
passwd='',
db='fuzhuang',
port=3306,
charset='utf8'
)
# 获得游标
cur = conn.cursor()
# 创建插入SQL语句
query = 'insert into 12yue1 (日期,服装名称,单价,库存数量,销售额) values (%s, %s, %s, %s, %s)'
# 创建一个for循环迭代读取xls文件每行数据的, 从第二行开始是要跳过标题行
for r in range(1, sheet.nrows):
日期 = sheet.cell(r, 0).value
服装名称 = sheet.cell(r, 1).value
单价 = sheet.cell(r, 2).value
库存数量 = sheet.cell(r, 3).value
销售额 = sheet.cell(r, 4).value
values = (日期,服装名称,单价,库存数量,销售额)
# 执行sql语句
cur.execute(query, values)
cur.close()
conn.commit()
conn.close()
columns = str(sheet.ncols)
rows = str(sheet.nrows)
print("导入 " +columns + " 列 " + rows + " 行数据到MySQL数据库!") |
989,064 | ab4facf89bca436ab87c69212771f233a7612f2e |
import nltk
import string
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
import csv
def getnegCount(sentence,words_negscore):
text = nltk.word_tokenize(sentence)
count=0
for word in text:
if word in words_negscore:
count+=1
return count
def negativity_count(name):
words_negscore={}
with open('negative_words.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
words_negscore[row[0]]=1
f = open(name,'r')
lines = f.readlines()
text = ""
for line in lines:
text = text + line
sent_tokenize_list = sent_tokenize(text)
neg_count=0
for sent in sent_tokenize_list:
neg_count+=getnegCount(sent,words_negscore)
return neg_count |
989,065 | 45adaa848328b4d54242eef2781cca66a3c66e13 | from threading import Timer
import time
def debounce(wait):
"""Postpone a functions execution until after some time has elapsed
:type wait: int
:param wait: The amount of Seconds to wait before the next call can execute.
"""
def decorator(fun):
def debounced(*args, **kwargs):
def call_it():
fun(*args, **kwargs)
try:
debounced.t.cancel()
except AttributeError:
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return debounced
return decorator
|
989,066 | 0d0e50906566e11f0a751405ba96ea1cc5690085 | class product:
def __init__(self):
self.name='SAMSUNG GALAXY E7'
self.description='GOOD'
self.price=21000
p1=product()
print(p1.name)
print(p1.description)
print(p1.price) |
989,067 | f34b8d817fd9e52f6f6d8c06331fd85b2293a315 | from gensim.models.word2vec import Word2Vec
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import numpy as np
model = Word2Vec.load('Failure_workaround.model')
#keywords = [u'clock',u'flash',u'dma',u'sdram',u'cache',u'parity',u'state',u'set',u'register',u'interrupt',u'interrupts',u'mode',u'reset',u'nxp',u'ti',u'st']
# Top 200
#keywords = ['register', 'bit', '=', 'channel', 'set', 'mode', 'clock', '1', 'are', 'can', 'will', 'by', 'from', 'reset', 'read', 'that', '0', 'must', 'it', 'status', 'does', 'chip', 'interrupt', 'after', 'silicon', 'during', 'drdy', 'adc:', 'same', 'conversion', 'value', 'has', 'software', 'may', 'only', '1.', 'bits', 'one', 'should', 'end', 'before', 'transfer', 'external', 'master', 'than', 'use', 'none.', 'govre', 'device', 'line', 'flag', 'select', 'write', 'spi:', 'start', 'controller', 'two', 'behavior', '2', 'there', 'reading', 'revision', 'then', 'no', 'time', 'output', 'do', 'first', 'memory', 'all', 'following', 'system', 'low', 'signal', 'used', 'update', 'but', 'state', 'pwm:', 'counter', 'dma', 'possible', 'frequency', 'functional', 'while', 'cycle', 'period', 'disabling', 'high', 'flash', 'between', '[datasheet]', 'register.', 'usage', 'been', 'notes','sam7s', 'access', 'cpu', 'current', 'which', 'cache', 'series', 'known', 'slave', 'leakage', 'input', 'l2', 'any', 'design', 'spi', 'using', 'error', 'wait', 'hardware', 'active', 'event', 'timer', 'mode.', 'being', 'into', 'buffer', 'exceptions', 'v', '2.', 'adc', 'specifications', 'pin', '-', 'cdr', 'configured', 'mode,', 'voltage', 'sdram', 'disable', 'serial', 'disabled', 'usart:', 'condition', 'l1d', 'boot', 'maximum', 'cleared', 'programmed', 'revisions', 'instant', 'instead', 'documentation', 'user', 'new', 'none', 'have', 'power', 'equal', 'edge', 'also', 'control', '1,', 'ddr', 'code', 'number', 'transmitter', 'other', 'feedback', 'match', 'revised', 'submit', 'up', '2014', 'june', 'twi:', 'internal', 'receive', 'processor', 'i/o', '.', 'usb', 'eoc', 'active,', 'pa0-pa16', 'field', 'figure', 'aligned', 'bus', 'interrupts', 'enable', 'occurs', 'bad', 'scbr', 'nor', 'limitations', 'sleep', 'pdc', 'set.', 'performed', 'issue', 'reset.', 'already', 'pulse', 'sdma', 'writing', 'frame', 'supply', 'ssc:', 'selected', '1.0', 'enabled', '4', '3', 'watchdog', '2011', '*/', '/*', 'www.ti.com', 'equals', 'rev.', 'generated', 'peripheral', 'pio:', 'fixed', 'setting', 'nrst', '1.1', 'rx', 'address', 'receiver', '2.0,', 'case', 'sent', 'cts', '2.1,', 'second', 'more', 'load', 'written', 'pins', 'through', 'example,', 'rise', 'updated', '25', 'incorporated', 'character', 'handshaking', 'command', 'pll', 'host', 'ram', 'baudrate', 'burst', 'march', 'neither', 'b', 'multiple', 'table', 'mhz', 'i', 'dcd', 'last', 'clear', 'additional', 'interface', 'transmit', 'where', 'timing', 'due', 'spck', 'instruments', 'registers', 'copyright']
# Top 500
#keywords=['x', 'data', 'bit', 'register', 'mode', 'channel', 'set', 'clock', 'spi', 'adc', 'reset', 'pa', 'l', 'read', 'pwm', 'lpc', 'bits', 'conversion', 'must', 'chip', 'interrupt', 'v', 'status', 'c', 'drdy', 'transfer', 'silicon', 'write', 'software', 'value', 'time', 'may', 'device', 'one', 'd', 'memory', 'line', 'start', 'end', 'active', 'external', 'twi', 'use', 'master', 'flag', 'govre', 'none.', 'low', 'two', 'select', 'm', 'output', 'controller', 'behavior', 'state', 'sam', 'cpu', 'frequency', 'reading', 'power', 'revision', 'input', 'possible', 'following', 'first', 'usb', 'system', 'used', 'high', 'eoc', 'dma', 'sr', 'flash', 'datasheet', 'signal', 'cycle', 'counter', 'cache', 'y', 'update', 'sdma', 'timer', 'es', 'usart', 'emc', 'rtt', 'instead', 'functional', 'period', 'access', 'atarm', 'disabling', 'register.', 'notes', 'usage', 'boot', 'slave', 'pin', 'current', 'disabled', 'event', 'arm', 'series', 'using', 'voltage', 'enabled', 'design', 'wait', 'known', 'error', 'leakage', 'f', 'mhz', 'hardware', 'field', 'mode.', 'ddr', 'dsp', 'condition', 'occurs', 'buffer', 'match', 'example', 'dvdd', 'exceptions', 'sdram', 'user', 'performed', 'b', 'o', 'p', 'disable', 'oct', 'specifications', 'pdc', 'scbr', 'configured', 'pio', 'number', 'cdr', 'ssc', 'edge', 'code', 'programmed', 'revisions', 'also', 'serial', 'omap', 'cleared', 'nrst', 'bus', 'interrupts', 'maximum', 'tx', 'last', 'sleep', 'none', 'instant', 'new', 'documentation', 'frame', 'equal', 'control', 'transmitter', 'enable', 'conditions', 'r', 'internal', 'feedback', 'processor', 'rx', 'sprz', 'revised', 'receive', 'submit', 'june', 'case', 'aligned', 'figure', 'issue', 'supply', 'sent', 'bad', 'peripheral', 'burst', 'pull', 'limitations', 'selected', 'receiver', 'reset.', 'set.', 'pulse', 'already', 'load', 'writing', 'note', 'watchdog', 'digital', 'pins', 'fixed', 'baudrate', 'ram', 'equals', 'www.ti.com', 'rev.', 'rise', 'address', 'generated', 'setting', 'byte', 'workaround', 'clear', 'command', 'idma', 'z', 'cts', 'page', 'second', 'rising', 'csr', 'written', 'instruction', 'host', 'ovre', 'dcd', 'updated', 'character', 'incorporated', 'pll', 'table', 'interface', 'multiple', 'block', 'problem', 'size', 'handshaking', 'level', 'additional', 'correctly', 'transmit', 'march', 'neither', 'single', 'overrun', 'work', 'general', 'csaat', 'registers', 'timing', 'delay', 'due', 'texas', 'spck', 'copyright', 'instruments', 'received', 'less', 'transfers', 'priority', 'duty', 'operation', 'vpull', 'cycles', 'result', 'drive', 'transmission', 'inputs', 'non', 'different', 'temperature', 'section', 'higher', 'lcdr', 'ff', 'see', 'however', 'devices', 'occur', 'nack', 'connected', 'pmc', 'generation', 'loss', 'cannot', 'nand', 'per', 'although', 'another', 'id', 'buff', 'cause', 'recommended', 'trigger', 'txcomp', 'correct', 'operating', 'str', 'g', 'left', 'slow', 'either', 'source', 'chidx', 'sequence', 'least', 'within', 'application', 'cs', 'capture', 'cpol', 'us', 'unit', 'order', 'module', 'asynchronous', 'buffers', 'impact', 'pc', 'function', 'might', 'inactive', 'signals', 'configuration', 'effect', 'device.', 'max', 'take', 'ncpha', 'consumption', 'holding', 'ma', 'method', 'rtc', 'ready', 'required', 'phy', 'hsuart', 'chipintn', 'xoff', 'sram', 'characteristics', 'tk', 'pru', 'lead', 'port', 'running', 'completion', 'activity', 'would', 'abort', 'synchro', 'synchronous', 'valid', 'rate', 'fifo', 'vddio', 'aintc', 'request', 'periods', 'bit.', 'stop', 'ns', 'subsequent', 'empty', 'details', 'perform', 'periodic', 'writes', 'lower', 'values', 'whereas', 'analog', 'converted', 'starting', 'thr', 'e', 'channels', 'programming', 'back', 'expected.', 'i.e.', 'stored', 'successively', 'n', 'leads', 'speed', 'states', 'always', 'cdtyx', 'constraints', 'affected', 'data.', 'characters', 'automatically', 'falling', 'memory.', 'nd', 'bytes', 'link', 'functionality', 'ecc', 'packet', 'clk', 'isr', 'core', 'generate', 'need', 'requests', 'word', 'operate', 'k', 'step', 'selects', 'real', 'erase', 'patch', 'mci', 'vdd', 'transfer.', 'parameter', 'clears', 'lost.', 'logic', 'lost', 'occurring', 'vpbdiv', 'rxbrk', 'edma', 'zero.', 'regulator', 'uint', 'impedance', 'october', 'causes', 'lastxfer', 'gpi', 'flag.', 'go', 'lock', 'correspond', 'dqs', 'change', 'ccntx', 'main', 'electrical', 'around', 'tf', 'cprdx', 'corresponding', 'nvm', 'up.', 'limitation', 'check', 'still', 'without', 'delayed', 'ldr', 'lines', 'center', 'add', 'therefore', 'mck', 'shown', 'switching', 'deep', 'corruption', 'min', 'could', 'megamodule', 'description', 'idle', 'divider', 'impossible', 'probability', 'done', 'level.', 'range', 'way', 'thumb', 'accesses', 'normal', 'transmitting', 'sr.', 'incorrect', 'tc', 'prevent', 'regardless', 'syscfg', 'transition', 'driven', 'complete', 'certain', 'taken', 'soon', 'rhr', 'process', 'enabled.', 'occurs.', 'point']
print len(keywords)
vectors =[model[word] for word in keywords]
tsne = TSNE(n_components=2,random_state=0)
vectors2d = tsne.fit_transform(np.asfarray(vectors, dtype='float'))
for point, word in zip(vectors2d, keywords):
plt.scatter(point[0], point[1])
plt.annotate(word,
xy = (point[0], point[1]),
size='x-large')
plt.show() |
989,068 | 88c724a0aa42d55642231b506ad0dcead26e9b40 | # ipcalc/api/pingy/models.py
# Flask Imports
from flask_restplus import fields
# Local Imports
from . import api
pingy_model = api.model('Pingy', {
'ip_address': fields.String(required=True, description='IP Address'),
})
pingy_multi_model = api.model('Multi Pingy', {
'subnet': fields.String(required=True, description='Subnet'),
})
|
989,069 | 5c73314d3b554cab0ddac4809ac1fc2302718887 | print('*****BIENVENIDO AL EJERCICIO 002*****')
lado = int(input('porfavor ingrese el lado del cuadrado: \n' ))
perimetro = lado * 4
area = lado * lado
print(f'El perímetro es {perimetro} y el área es {area}')
|
989,070 | 2a5ef5636ec5dd8a961133be925dec882fd1a889 | # Generated by Django 2.0.1 on 2018-04-16 21:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0004_auto_20180416_1238'),
]
operations = [
migrations.RenameModel(
old_name='Response',
new_name='Comment',
),
]
|
989,071 | 1134987dd10a8073744643a5b83053e37ae2095e | """template.py
This is a template"""
def toStrConj(List):
assert List != []
clauseStr = '('
for lit in List:
clauseStr += lit
clauseStr += ' and '
clauseStr += ' (TRUE or -TRUE))'
return clauseStr
def toStrDisj(List):
assert List != []
clauseStr = '('
for lit in List:
clauseStr += lit
clauseStr += ' or '
clauseStr += '(TRUE and -TRUE) )'
return clauseStr
def toStrCNF(ListList):
"""Convert a formula in CNF represented as a list of list to
string representation."""
assert ListList != []
# Convert each clause into str
ListStr = [ toStrDisj(disj) for disj in ListList ]
# Convert the conjunction of things into str
StrStr = toStrConj(ListStr)
return StrStr
def main():
# Specify your value n
n = 8
# Preprocessing
n += 1 # because of Python end range
# Construct first constraint
C1a = [[ 'p' + str(i) + 'd' + str(j) for j in range(1,n) ] for i in \
range(1,n)]
strC1a = toStrCNF(C1a)
C1b = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(i)+'d'+str(k)+')' for i in \
range(1,n) for j in range(1,n) for k in range(1,n) if k != j ]
strC1b = toStrConj(C1b)
# Construct second constraint
C2a = [[ 'p' + str(i) + 'd' + str(j) for i in range(1,n) ] for j in \
range(1,n)]
strC2a = toStrCNF(C2a)
C2b = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(k)+'d'+str(j)+')' for i in \
range(1,n) for j in range(1,n) for k in range(1,n) if k != i ]
strC2b = toStrConj(C2b)
# Construct third constraint
# -45-deg diag constraint
C3a = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(i+k)+'d'+str(j+k)+')' for i in\
range(1,n) for j in range(1,n) for k in range(1,n) \
if 1 <= (i+k) and (i+k) < n and 1 <= (j+k) and (j+k) < n ]
strC3a = toStrConj(C3a)
C3b = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(i-k)+'d'+str(j-k)+')' for i in\
range(1,n) for j in range(1,n) for k in range(1,n) \
if 1 <= (i-k) and (i-k) < n and 1 <= (j-k) and (j-k) < n ]
strC3b = toStrConj(C3b)
# Construct fourth constraint
# 45-deg diag constraint
C4a = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(i+k)+'d'+str(j-k)+')' for i in\
range(1,n) for j in range(1,n) for k in range(1,n) \
if 1 <= (i+k) and (i+k) < n and 1 <= (j-k) and (j-k) < n ]
strC4a = toStrConj(C4a)
C4b = [ '(p'+str(i)+'d'+str(j)+' => -p'+str(i-k)+'d'+str(j+k)+')' for i in\
range(1,n) for j in range(1,n) for k in range(1,n) \
if 1 <= (i-k) and (i-k) < n and 1 <= (j+k) and (j+k) < n ]
strC4b = toStrConj(C4b)
#C2 = [[ 'p' + str(i) + 'd' + str(j) for j in range(1,n) ] for i in \
#range(1,n)]
# ListStrC1 = [ toStrDisj(disj) for disj in ListListC1 ]
# StrStrC1 = toStrConj(ListStrC1)
print toStrConj([strC1a,strC1b,strC2a,strC2b,strC3a,strC3b,strC4a,strC4b])
if __name__ == '__main__':
main()
|
989,072 | 5a71b312590457e3b73228992144478d6ed917ab | # importing required modules
import threading , socket , os
from Code import *
import ast
class Client:
# Creating Socket
user = None
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
def sendmess(self): # Sending Message
while True:
msg = input("You : ")
data = {"msg":self.user.send(bytes(msg,"utf-8")),"DHratchet":serialize(self.user.DHratchet.public_key(),True)}
self.sock.send(str(data).encode("utf-8"))
def recvmess(self): # Receiving message
while True:
data = self.sock.recv(1024)
if not data:
break
data = ast.literal_eval(data.decode("utf-8"))
flag = False
if self.user.other["DHratchet"] is None or serialize(self.user.other["DHratchet"],True) != data["DHratchet"]:
self.user.other["DHratchet"] = unserialize(data["DHratchet"],True)
flag = True
msg = self.user.recv(data["msg"],flag)
print("\b\b\b\b\b\b\b\b\bOther : " + msg.decode("utf-8") + "\n" + "You : ",end="")
def initialize(self):
self.user.other = unserialize(self.sock.recv(10000))
self.user.x3dh()
self.user.init_ratchets()
self.user.dh_ratchet()
def __init__(self):
# Local Ip
self.ip = "127.0.0.1"
try:
# Connect to Server
self.sock.connect((self.ip,8000))
except:
print("Server not Established.") # If there is an error in the connection , then displaying error message
exit(0)
allow = str(self.sock.recv(100),'utf-8')
if allow == "False":
print("Not reachable")
exit(0)
# Taking Id from user , based on Id the token is generated and send token to user mail
self.Id = input("Id: ")
# sending Id to server
self.sock.send(bytes(self.Id,"utf-8"))
# Receiving message from server
print(str(self.sock.recv(100),"utf-8"))
# incorrect count
i = 1
Verified = False
while True: # infinite loop until the break statement
token = input("Enter Key: ") # taking token from user which is sent to mail
self.sock.send(bytes(token,"utf-8")) # sending token to server
signal = str(self.sock.recv(100),"utf-8")
if i == 3:
break
if (signal == "Incorrect"): # if the user enters incorrect password in 3 times.
print("Wrong Key.Try again...")
i += 1
continue
Verified = True
break
if Verified==False:
print("S0rry 7ry 4g41n La73r !!!!!!!!!!!!!!!!!!!...")
exit(0)
print("\t\t\tLogediIn Successfully...!")
if allow == '0':
self.user = Bob()
public = {"IKb":self.user.IKb.public_key(),"SPKb":self.user.SPKb.public_key(),"OPKb":self.user.OPKb.public_key(),"DHratchet":self.user.DHratchet.public_key() if self.user.DHratchet else None }
self.sock.send(serialize(public))
elif(allow == '1'):
self.user = Alice()
public = {"IKa":self.user.IKa.public_key(),"EKa":self.user.EKa.public_key(),"DHratchet":self.user.DHratchet.public_key() if self.user.DHratchet else None}
self.sock.send(serialize(public))
init = self.sock.recv(1000)
if init:
self.initialize()
if isinstance(self.user,Alice):
public["DHratchet"] = serialize(self.user.DHratchet.public_key(),True)
self.sock.send(str(public).encode("utf-8"))
# Creating threads
bthread = threading.Thread(target = self.sendmess)
bthread.daemon = True
bthread.start()
cthread = threading.Thread(target = self.recvmess)
cthread.start()
# Creating Client Object
client = Client()
|
989,073 | fad79dd788f9fe557ce2559213af34146362507f | # coding=utf-8
'''
Created on 2013-12-5
@author: lidm1
'''
import ldap
class Ldap(object):
"""
Ldap for lenovo domain
"""
def __init__(self):
None
def connect(self,user_name,password):
try:
SERVER = "ldap://lenovo.com:389"
DN = user_name + "@lenovo.com"
l = ldap.initialize(SERVER)
l.protocol_version = 3
l.set_option(ldap.OPT_REFERRALS, 0)
l.simple_bind_s(DN, password)
self.ldap_obj=l
except:
print('ldap connetc failed')
def search(self,acc_name):
Base = "DC=lenovo,DC=com"
Scope = ldap.SCOPE_SUBTREE
Filter = "(&(objectClass=user)(sAMAccountName="+acc_name+"))"
Attrs = ["name", "userPrincipalName","departmentNumber", "telephoneNumber", "department",
"sAMAccountName", "mail", "manager", "title",
"msExchExtensionAttribute6", "employeeType", "l", "c",
"employeeNumber", "displayName"]
r = self.ldap_obj.search(Base, Scope, Filter, Attrs)
Type,user = self.ldap_obj.result(r,60)
Name,Attrs = user[0]
if hasattr(Attrs, 'has_key') and Attrs.has_key('displayName'):
return Attrs
return None
if __name__ == '__main__':
l=Ldap()
pass
|
989,074 | e84ad5298649533c91207a99b2cc56ac16857fd4 | from subprocess import check_output
check_output("dir C:")
|
989,075 | 9dc4e706a63f81f27bdfa15f9e9d3be4b7ab23a1 | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-predictor : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/forecast/create-predictor.html
describe-predictor : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/forecast/describe-predictor.html
list-predictors : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/forecast/list-predictors.html
"""
write_parameter("forecast", "delete-predictor") |
989,076 | 387c0250c3cb39bbb4f9b2ed5ffae83a54abbdf4 | #!/usr/bin/env python3.1
#
# Copyright (c) 2010, Philipp Stephani <st_philipp@yahoo.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import optparse
import os
import gzip
pattern = re.compile(br"^(Input:\d+:)([^/].*)$", re.M)
def fix_synctex_info(fname, input_dir):
def replace(match):
return (match.group(1)
+ (os.path.normpath
(os.path.join(input_dir.encode(), match.group(2)))))
open_file = gzip.open if fname.endswith(".gz") else open
with open_file(fname, "rb") as stream:
text = stream.read()
text = pattern.sub(replace, text)
with open_file(fname, "wb") as stream:
stream.write(text)
def main():
parser = optparse.OptionParser("Usage: %prog [options] files")
parser.add_option("-d", "--input-directory", metavar="DIR",
help=("use DIR as input directory "
"[default: current directory]"))
parser.set_defaults(input_directory=os.getcwd())
options, args = parser.parse_args()
for fname in args:
fix_synctex_info(fname, options.input_directory)
if __name__ == "__main__":
main()
|
989,077 | 5edd111470a6a18adaef20fc6c1ecaa8ae819d70 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 22:20:53 2021
@author: subrat
"""
##OOP python class and instances
class member:
def __init__(self, first,second,session):
self.first=first
self.second=second
self.session=session
self.email=first + second + "@gmail.com"
def display(self):
print ("name:",self.first+" "+self.second,"\nsession:",self.session)
one=member('subrat','kishore',2020)
second=member('ruchita','somani',2020)
one.display()
second.display()
|
989,078 | 0f7b136851a8d1225034f308c89a5e1ec5f37d09 | # Creates a graph.
import tensorflow as tf
#from tensorflow.compat import v1 as tf
#sess = tf.InteractiveSession()
@tf.function
def d(a,b):
return tf.matmul(a, b)
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
#c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
# Runs the op.
# tens1 = tf.constant([ [[1,2],[2,3]], [[3,4],[5,6]] ])
# print (sess.run(tens1)[1,1,0])
# self._sess.run(tf.initialize_all_variables())
for i in range(100000):
d(a,b)
print ('\n########################### No Errors ####################################') |
989,079 | 6d2eb1861601cebda4916ef0a41145ce996f9c05 | import random as r
zobrist_keys = [[r.randint(1, 2**64 - 1) for _ in range(12)] for _ in range(64)]
def hash_board(board): # chess.Boars
key = 0
for i in range(64):
if board.color_at(i) is not None:
ind = board.piece_type_at(i) + 6 * board.color_at(i) - 1 #abuse of type coercion
key ^= zobrist_keys[i][ind]
return key
def rehash(h_0, board, move): # pre-move board, chess.Move
newhash = h_0
from_sq = move.from_square
to_sq = move.to_square
ind = board.piece_type_at(from_sq) + 6 * board.color_at(from_sq) - 1
if ind in [5, 11] and from_sq in [4, 60]: # lazy castling
newhash = hash_board(board)
return newhash
newhash ^= zobrist_keys[from_sq][ind]
newhash ^= zobrist_keys[to_sq][ind]
if board.piece_type_at(to_sq) is not None:
ind = board.piece_type_at(to_sq) + 6 * board.color_at(to_sq) - 1
newhash ^= zobrist_keys[to_sq][ind]
return newhash |
989,080 | f07ad77aea20f8457250ccd969d7919ef8e56d85 | from flask import Flask, request, redirect, render_template, make_response, Response
from flask_login import LoginManager, login_user, login_required, logout_user
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
from wtforms.validators import DataRequired
from flask_wtf.csrf import CSRFProtect
import secrets
import subprocess
import os
from passlib.hash import sha256_crypt
app = Flask(__name__)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
app.secret_key = secrets.token_urlsafe(24)
csrf = CSRFProtect(app)
class User:
def __init__(self, uname, pword, twofa):
self.uname = uname
self.pword = pword
self.twofa = twofa
def getPassword(self):
return self.pword
def get2FA(self):
return self.twofa
def getUname(self):
return self.uname
def get_id(self):
return self.uname
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
# Globals
userDict = {}
class UserForm(FlaskForm):
uname = StringField('User Name:', validators=[DataRequired()])
pword = StringField('Password: ', validators=[DataRequired()])
twofa = StringField('2FA Token:', validators=[], id='2fa')
def addUser(uname, pword, twofa):
global userDict
userDict[uname] = User(uname, sha256_crypt.hash(pword), twofa)
def getUser(uname):
global userDict
return userDict[uname]
def userExists(uname):
global userDict
if uname in userDict:
return True
else:
return False
def passwordMatch(uname, pword):
global userDict
if sha256_crypt.verify(pword, userDict[uname].getPassword()):
return True
else:
return False
def twofaMatch(uname, twofa):
global userDict
if userDict[uname].get2FA() == twofa:
return True
else:
return False
@login_manager.user_loader
def load_user(id):
global userDict
if (id in userDict.keys()):
return userDict[id]
else:
return None
def secureResponse(render):
response = make_response(render)
response.headers['X-XSS-Protection'] = '1; mode=block'
#response.headers['Content-Security-Policy'] = "default-src '127.0.0.1:5000'"
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
return response
@app.errorhandler(404)
def not_found(e):
return secureResponse(render_template("PageNotFound.html"))
@app.route('/register', methods=('GET', 'POST'))
def register():
form = UserForm()
if form.validate_on_submit():
# return redirect('/success')
global userDict
user = form.uname.data
pword = form.pword.data
twofa = form.twofa.data
if (userExists(user)) or (not user) or (not pword):
return secureResponse(render_template('registrationResult.html', success="Failure"))
else:
addUser(user, pword, twofa)
return secureResponse(render_template('registrationResult.html', success="Success"))
return secureResponse(render_template('registerForm.html', form=form))
@app.route('/login', methods=('GET', 'POST'))
def login():
form = UserForm()
if form.validate_on_submit():
# return redirect('/success')
global userDict
user = form.uname.data
pword = form.pword.data
twofa = form.twofa.data
if userExists(user):
if passwordMatch(user, pword):
if twofaMatch(user, twofa):
login_user(getUser(user))
return secureResponse(render_template('loginResult.html', result="Success"))
else:
return secureResponse(render_template('loginResult.html', result="Two-factor Failure"))
else:
return secureResponse(render_template('loginResult.html', result="Incorrect"))
else:
return secureResponse(render_template('loginResult.html', result="Incorrect"))
return secureResponse(render_template('userLoginForm.html', form=form))
@app.route('/logout')
def logout():
logout_user()
return redirect('/login')
class spellCheckForm(FlaskForm):
inputtext = TextAreaField(u'Text to Check', [DataRequired()], render_kw={"rows": 40, "cols": 100})
@app.route('/spell_check', methods=('GET', 'POST'))
@login_required
def spellcheck():
form = spellCheckForm()
if form.validate_on_submit():
# return redirect('/success')
text = form.inputtext.data
f = open("tempUserInput", "w")
f.write(text)
f.close()
process = subprocess.run(['./a.out', 'tempUserInput', 'wordlist.txt'], check=True, stdout=subprocess.PIPE,
universal_newlines=True)
output = process.stdout
os.remove("tempUserInput")
misspelledOut = output.replace("\n", ", ").strip().strip(',')
return secureResponse(render_template('spellCheckResult.html', misspelled=misspelledOut, textout=text))
else:
return secureResponse(render_template('spellCheckForm.html', form=form))
if __name__ == '__main__':
app.run(debug=True)
|
989,081 | 7b7f7b32cd0582ecef923a13a87c57158ca49042 | '''Problem
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.'''
print('What is your input file?')
file_name = input()
file_open = open(file_name)
file_content = file_open.read()
compliment = ''
for i in range(len(file_content)):
b = file_content[-i-1]
if b == 'A':
compliment += 'T'
elif b == 'T':
compliment += 'A'
elif b == 'C':
compliment += 'G'
elif b == 'G':
compliment += 'C'
print(compliment)
file_open.close() |
989,082 | 870c9a030eaa424ef7afbdc5385ba287bb716c18 | from flask_wtf import FlaskForm
from wtforms import SubmitField, StringField, DecimalField, FileField
from wtforms.validators import DataRequired
class ProductForm(FlaskForm):
title = StringField('Название товара', validators=[DataRequired()])
picture = FileField('Изображение')
description = StringField('Описание товара', validators=[DataRequired()])
category = StringField('Категория')
producer = StringField('Производитель', validators=[DataRequired()])
price = DecimalField('Цена', validators=[DataRequired()])
count = DecimalField('Количество', places=0)
advantage = StringField('Преимущества покупки товара')
submit = SubmitField('Добавить')
|
989,083 | a3bd91948f464dbf1cbe8e5aa11a4ee9bf356201 |
import filetalk
D = filetalk.arg()
S = []
for cmd in D["EXPR"]:
if cmd == "+":
S.append(S.pop()+S.pop())
else:
S.append(cmd)
filetalk.write(D["WRITE_RESULT"], S.pop())
|
989,084 | ec560ac4c13231219f72946a51846e808364f5b5 | from __future__ import print_function
from __future__ import division
import tensorflow as tf
def average_pooling(emb, seq_len):
mask = tf.sequence_mask(seq_len, tf.shape(emb)[-2], dtype=tf.float32) # [B, T] / [B, T, max_cate_len]
mask = tf.expand_dims(mask, -1) # [B, T, 1] / [B, T, max_cate_len, 1]
emb *= mask # [B, T, H] / [B, T, max_cate_len, H]
sum_pool = tf.reduce_sum(emb, -2) # [B, H] / [B, T, H]
avg_pool = tf.div(sum_pool, tf.expand_dims(tf.cast(seq_len, tf.float32), -1) + 1e-8) # [B, H] / [B, T, H]
return avg_pool
def gelu(input_tensor):
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def transfer_emb(name, emb_prev, emb_upd, n1=10, n2=5, l1=20):
with tf.variable_scope(name):
embed_dim = emb_upd.get_shape().as_list()[-1] # H
embeds_norm = tf.sqrt(tf.reduce_sum(emb_prev * emb_prev, axis=-1)) # [num]
embeds_dot = tf.div(emb_prev * emb_upd, tf.expand_dims(embeds_norm, -1) + tf.constant(1e-15)) # [num, H]
stack_embeds = tf.stack([emb_prev, emb_upd, embeds_dot], axis=1) # [num, 3, H]
input1 = tf.expand_dims(stack_embeds, -1) # [num, 3, H, 1]
filter1 = tf.get_variable(name="cnn_filter1", shape=[3, 1, 1, n1]) # [3, 1, 1, n1]
output1 = tf.nn.conv2d(input1, filter1, strides=[1, 1, 1, 1], padding='VALID') # [num, 1, H, n1]
output1 = gelu(output1) # [num, 1, H, n1]
input2 = tf.transpose(output1, perm=[0, 3, 2, 1]) # [num, n1, H, 1]
filter2 = tf.get_variable(name="cnn_filter2", shape=[n1, 1, 1, n2]) # [n1, 1, 1, n2]
output2 = tf.nn.conv2d(input2, filter2, strides=[1, 1, 1, 1], padding='VALID') # [num, 1, H, n2]
output2 = gelu(output2) # [num, 1, H, n2]
cnn_output = tf.transpose(output2, perm=[0, 3, 2, 1]) # [num, n2, H, 1]
cnn_output = tf.reshape(cnn_output, shape=[-1, n2 * embed_dim]) # [num, n2 x H]
with tf.variable_scope('fcn1'):
fcn1_kernel = tf.get_variable(name='kernel', shape=[n2 * embed_dim, l1]) # [n2 x H, l1]
fcn1_bias = tf.get_variable(name='bias', shape=[l1]) # [l1]
with tf.variable_scope('fcn2'):
fcn2_kernel = tf.get_variable(name='kernel', shape=[l1, embed_dim]) # [l1, H]
fcn2_bias = tf.get_variable(name='bias', shape=[embed_dim]) # [H]
fcn1 = gelu(tf.matmul(cnn_output, fcn1_kernel) + fcn1_bias) # [num, l1]
fcn2 = tf.matmul(fcn1, fcn2_kernel) + fcn2_bias # [num, H]
return fcn2
def transfer_mlp(name, param_prev, param_upd, param_shape, n1=5, n2=3, l1=40):
with tf.variable_scope(name):
param_prev = tf.reshape(param_prev, [-1]) # [dim]
param_upd = tf.reshape(param_upd, [-1]) # [dim]
param_dim = param_upd.get_shape().as_list()[-1] # max_dim: 40 x 20 = 800
param_norm = tf.sqrt(tf.reduce_sum(param_prev * param_prev)) # scalar
param_dot = tf.div(param_prev * param_upd, param_norm + tf.constant(1e-15)) # [dim] / [] = [dim]
stack_param = tf.stack([param_prev, param_upd, param_dot], axis=0) # [3, dim]
input1 = tf.expand_dims(tf.expand_dims(stack_param, -1), 0) # [1, 3, dim, 1]
filter1 = tf.get_variable(name="cnn_filter1", shape=[3, 1, 1, n1]) # [3, 1, 1, n1]
output1 = tf.nn.conv2d(input1, filter1, strides=[1, 1, 1, 1], padding='VALID') # [1, 1, dim, n1]
output1 = gelu(output1) # [1, 1, dim, n1]
input2 = tf.transpose(output1, perm=[0, 3, 2, 1]) # [1, n1, dim, 1]
filter2 = tf.get_variable(name="cnn_filter2", shape=[n1, 1, 1, n2]) # [n1, 1, 1, n2]
output2 = tf.nn.conv2d(input2, filter2, strides=[1, 1, 1, 1], padding='VALID') # [1, 1, dim, n2]
output2 = gelu(output2) # [1, 1, dim, n2]
cnn_output = tf.transpose(output2, perm=[0, 3, 2, 1]) # [1, n2, dim, 1]
cnn_output = tf.reshape(cnn_output, shape=[1, -1]) # [1, n2 x dim]
with tf.variable_scope('fcn1'):
fcn1_kernel = tf.get_variable(name='kernel', shape=[n2 * param_dim, l1]) # [n2 x dim, l1]
fcn1_bias = tf.get_variable(name='bias', shape=[l1]) # [l1]
with tf.variable_scope('fcn2'):
fcn2_kernel = tf.get_variable(name='kernel', shape=[l1, param_dim]) # [l1, dim]
fcn2_bias = tf.get_variable(name='bias', shape=[param_dim]) # [dim]
fcn1 = gelu(tf.matmul(cnn_output, fcn1_kernel) + fcn1_bias) # [1, l1]
fcn2 = tf.matmul(fcn1, fcn2_kernel) + fcn2_bias # [1, dim]
output = tf.reshape(fcn2, shape=param_shape) # [dim1, dim2, ...]
return output
class SML(object):
def __init__(self, cates, cate_lens, hyperparams, prev_emb_dict, prev_mlp_dict, train_config=None):
self.train_config = train_config
# create placeholder
self.u = tf.placeholder(tf.int32, [None]) # [B]
self.i = tf.placeholder(tf.int32, [None]) # [B]
self.hist_i = tf.placeholder(tf.int32, [None, None]) # [B, T]
self.hist_len = tf.placeholder(tf.int32, [None]) # [B]
self.y = tf.placeholder(tf.float32, [None]) # [B]
self.base_lr = tf.placeholder(tf.float32, [], name='base_lr') # scalar
self.transfer_lr = tf.placeholder(tf.float32, [], name='transfer_lr') # scalar
cates = tf.convert_to_tensor(cates, dtype=tf.int32) # [num_cates, max_cate_len]
cate_lens = tf.convert_to_tensor(cate_lens, dtype=tf.int32) # [num_cates]
if train_config['transfer_emb']:
# -- create emb_w_upd begin -------
user_emb_w_upd = tf.get_variable("user_emb_w", [hyperparams['num_users'], hyperparams['user_embed_dim']])
item_emb_w_upd = tf.get_variable("item_emb_w", [hyperparams['num_items'], hyperparams['item_embed_dim']])
cate_emb_w_upd = tf.get_variable("cate_emb_w", [hyperparams['num_cates'], hyperparams['cate_embed_dim']])
# -- create emb_w_upd end -------
# -- create emb_w_prev begin ----
user_emb_w_prev = tf.convert_to_tensor(prev_emb_dict['user_emb_w'], tf.float32)
item_emb_w_prev = tf.convert_to_tensor(prev_emb_dict['item_emb_w'], tf.float32)
cate_emb_w_prev = tf.convert_to_tensor(prev_emb_dict['cate_emb_w'], tf.float32)
# -- create emb_w_prev end ----
# -- transfer emb_w begin ----
with tf.variable_scope('transfer_emb'):
user_emb_w = transfer_emb(name='user_emb_w',
emb_prev=user_emb_w_prev,
emb_upd=user_emb_w_upd,
n1=train_config['emb_n1'],
n2=train_config['emb_n2'],
l1=train_config['emb_l1'])
item_emb_w = transfer_emb(name='item_emb_w',
emb_prev=item_emb_w_prev,
emb_upd=item_emb_w_upd,
n1=train_config['emb_n1'],
n2=train_config['emb_n2'],
l1=train_config['emb_l1'])
cate_emb_w = transfer_emb(name='cate_emb_w',
emb_prev=cate_emb_w_prev,
emb_upd=cate_emb_w_upd,
n1=train_config['emb_n1'],
n2=train_config['emb_n2'],
l1=train_config['emb_l1'])
# -- transfer emb end ----
# -- update op begin -------
self.user_emb_w_upd_op = user_emb_w_upd.assign(user_emb_w)
self.item_emb_w_upd_op = item_emb_w_upd.assign(item_emb_w)
self.cate_emb_w_upd_op = cate_emb_w_upd.assign(cate_emb_w)
# -- update op end -------
else:
# -- create emb_w begin -------
user_emb_w = tf.get_variable("user_emb_w", [hyperparams['num_users'], hyperparams['user_embed_dim']])
item_emb_w = tf.get_variable("item_emb_w", [hyperparams['num_items'], hyperparams['item_embed_dim']])
cate_emb_w = tf.get_variable("cate_emb_w", [hyperparams['num_cates'], hyperparams['cate_embed_dim']])
# -- create emb_w end -------
if train_config['transfer_mlp']:
# -- create mlp_upd begin ---
concat_dim = hyperparams['user_embed_dim'] + (hyperparams['item_embed_dim'] + hyperparams['cate_embed_dim']) * 2
with tf.variable_scope('fcn1'):
fcn1_kernel_upd = tf.get_variable('kernel', [concat_dim, hyperparams['layers'][1]])
fcn1_bias_upd = tf.get_variable('bias', [hyperparams['layers'][1]])
with tf.variable_scope('fcn2'):
fcn2_kernel_upd = tf.get_variable('kernel', [hyperparams['layers'][1], hyperparams['layers'][2]])
fcn2_bias_upd = tf.get_variable('bias', [hyperparams['layers'][2]])
with tf.variable_scope('fcn3'):
fcn3_kernel_upd = tf.get_variable('kernel', [hyperparams['layers'][2], 1])
fcn3_bias_upd = tf.get_variable('bias', [1])
# -- create mlp_upd end ---
# -- create mlp_prev begin ----
fcn1_kernel_prev = tf.convert_to_tensor(prev_mlp_dict['fcn1/kernel'], tf.float32)
fcn1_bias_prev = tf.convert_to_tensor(prev_mlp_dict['fcn1/bias'], tf.float32)
fcn2_kernel_prev = tf.convert_to_tensor(prev_mlp_dict['fcn2/kernel'], tf.float32)
fcn2_bias_prev = tf.convert_to_tensor(prev_mlp_dict['fcn2/bias'], tf.float32)
fcn3_kernel_prev = tf.convert_to_tensor(prev_mlp_dict['fcn3/kernel'], tf.float32)
fcn3_bias_prev = tf.convert_to_tensor(prev_mlp_dict['fcn3/bias'], tf.float32)
# -- create mlp_prev end ----
# -- transfer mlp begin ----
with tf.variable_scope('transfer_mlp'):
with tf.variable_scope('fcn1'):
fcn1_kernel = transfer_mlp(name='kernel',
param_prev=fcn1_kernel_prev,
param_upd=fcn1_kernel_upd,
param_shape=[concat_dim, hyperparams['layers'][1]],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn1/kernel'])
fcn1_bias = transfer_mlp(name='bias',
param_prev=fcn1_bias_prev,
param_upd=fcn1_bias_upd,
param_shape=[hyperparams['layers'][1]],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn1/bias'])
with tf.variable_scope('fcn2'):
fcn2_kernel = transfer_mlp(name='kernel',
param_prev=fcn2_kernel_prev,
param_upd=fcn2_kernel_upd,
param_shape=[hyperparams['layers'][1], hyperparams['layers'][2]],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn2/kernel'])
fcn2_bias = transfer_mlp(name='bias',
param_prev=fcn2_bias_prev,
param_upd=fcn2_bias_upd,
param_shape=[hyperparams['layers'][2]],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn2/bias'])
with tf.variable_scope('fcn3'):
fcn3_kernel = transfer_mlp(name='kernel',
param_prev=fcn3_kernel_prev,
param_upd=fcn3_kernel_upd,
param_shape=[hyperparams['layers'][2], 1],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn3/kernel'])
fcn3_bias = transfer_mlp(name='bias',
param_prev=fcn3_bias_prev,
param_upd=fcn3_bias_upd,
param_shape=[1],
n1=train_config['mlp_n1'],
n2=train_config['mlp_n2'],
l1=train_config['mlp_l1_dict']['fcn3/bias'])
# -- transfer mlp end ----
# -- update op begin -------
self.fcn1_kernel_upd_op = fcn1_kernel_upd.assign(fcn1_kernel)
self.fcn1_bias_upd_op = fcn1_bias_upd.assign(fcn1_bias)
self.fcn2_kernel_upd_op = fcn2_kernel_upd.assign(fcn2_kernel)
self.fcn2_bias_upd_op = fcn2_bias_upd.assign(fcn2_bias)
self.fcn3_kernel_upd_op = fcn3_kernel_upd.assign(fcn3_kernel)
self.fcn3_bias_upd_op = fcn3_bias_upd.assign(fcn3_bias)
# -- update op end -------
else:
# -- create mlp begin ---
concat_dim = hyperparams['user_embed_dim'] + (hyperparams['item_embed_dim'] + hyperparams['cate_embed_dim']) * 2
with tf.variable_scope('fcn1'):
fcn1_kernel = tf.get_variable('kernel', [concat_dim, hyperparams['layers'][1]])
fcn1_bias = tf.get_variable('bias', [hyperparams['layers'][1]])
with tf.variable_scope('fcn2'):
fcn2_kernel = tf.get_variable('kernel', [hyperparams['layers'][1], hyperparams['layers'][2]])
fcn2_bias = tf.get_variable('bias', [hyperparams['layers'][2]])
with tf.variable_scope('fcn3'):
fcn3_kernel = tf.get_variable('kernel', [hyperparams['layers'][2], 1])
fcn3_bias = tf.get_variable('bias', [1])
# -- create mlp end ---
# -- emb begin -------
u_emb = tf.nn.embedding_lookup(user_emb_w, self.u) # [B, H]
ic = tf.gather(cates, self.i) # [B, max_cate_len]
ic_len = tf.gather(cate_lens, self.i) # [B]
i_emb = tf.concat([
tf.nn.embedding_lookup(item_emb_w, self.i),
average_pooling(tf.nn.embedding_lookup(cate_emb_w, ic), ic_len)
], axis=1) # [B, H x 2]
hist_c = tf.gather(cates, self.hist_i) # [B, T, max_cate_len]
hist_c_len = tf.gather(cate_lens, self.hist_i) # [B, T]
h_emb = tf.concat([
tf.nn.embedding_lookup(item_emb_w, self.hist_i),
average_pooling(tf.nn.embedding_lookup(cate_emb_w, hist_c), hist_c_len)
], axis=2) # [B, T, H x 2]
u_hist = average_pooling(h_emb, self.hist_len) # [B, H x 2]
# -- emb end -------
# -- mlp begin -------
fcn = tf.concat([u_emb, u_hist, i_emb], axis=-1) # [B, H x 5]
fcn_layer_1 = tf.nn.relu(tf.matmul(fcn, fcn1_kernel) + fcn1_bias) # [B, l1]
fcn_layer_2 = tf.nn.relu(tf.matmul(fcn_layer_1, fcn2_kernel) + fcn2_bias) # [B, l2]
fcn_layer_3 = tf.matmul(fcn_layer_2, fcn3_kernel) + fcn3_bias # [B, 1]
# -- mlp end -------
logits = tf.reshape(fcn_layer_3, [-1]) # [B]
self.scores = tf.sigmoid(logits) # [B]
# return same dimension as input tensors, let x = logits, z = labels, z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
self.losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=self.y)
self.loss = tf.reduce_mean(self.losses)
# base_optimizer
if train_config['base_optimizer'] == 'adam':
base_optimizer = tf.train.AdamOptimizer(learning_rate=self.base_lr)
elif train_config['base_optimizer'] == 'rmsprop':
base_optimizer = tf.train.RMSPropOptimizer(learning_rate=self.base_lr)
else:
base_optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.base_lr)
# transfer_optimizer
if train_config['transfer_optimizer'] == 'adam':
transfer_optimizer = tf.train.AdamOptimizer(learning_rate=self.transfer_lr)
elif train_config['transfer_optimizer'] == 'rmsprop':
transfer_optimizer = tf.train.RMSPropOptimizer(learning_rate=self.transfer_lr)
else:
transfer_optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.transfer_lr)
trainable_params = tf.trainable_variables()
base_params = [v for v in trainable_params if 'transfer' not in v.name]
transfer_params = [v for v in trainable_params if 'transfer' in v.name]
# update base model and transfer module
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
base_grads = tf.gradients(self.loss, base_params) # return a list of gradients (A list of `sum(dy/dx)` for each x in `xs`)
base_grads_tuples = zip(base_grads, base_params)
self.train_base_op = base_optimizer.apply_gradients(base_grads_tuples)
transfer_grads = tf.gradients(self.loss, transfer_params)
transfer_grads_tuples = zip(transfer_grads, transfer_params)
with tf.variable_scope('transfer_opt'):
self.train_transfer_op = transfer_optimizer.apply_gradients(transfer_grads_tuples)
def train_base(self, sess, batch):
loss, _ = sess.run([self.loss, self.train_base_op], feed_dict={
self.u: batch[0],
self.i: batch[1],
self.hist_i: batch[2],
self.hist_len: batch[3],
self.y: batch[4],
self.base_lr: self.train_config['base_lr'],
})
return loss
def train_transfer(self, sess, batch):
loss, _, = sess.run([self.loss, self.train_transfer_op], feed_dict={
self.u: batch[0],
self.i: batch[1],
self.hist_i: batch[2],
self.hist_len: batch[3],
self.y: batch[4],
self.transfer_lr: self.train_config['transfer_lr'],
})
return loss
def update(self, sess):
if self.train_config['transfer_emb']:
sess.run([self.user_emb_w_upd_op,
self.item_emb_w_upd_op,
self.cate_emb_w_upd_op])
if self.train_config['transfer_mlp']:
sess.run([self.fcn1_kernel_upd_op,
self.fcn1_bias_upd_op,
self.fcn2_kernel_upd_op,
self.fcn2_bias_upd_op,
self.fcn3_kernel_upd_op,
self.fcn3_bias_upd_op])
def inference(self, sess, batch):
scores, losses = sess.run([self.scores, self.losses], feed_dict={
self.u: batch[0],
self.i: batch[1],
self.hist_i: batch[2],
self.hist_len: batch[3],
self.y: batch[4],
})
return scores, losses
|
989,085 | 70e9ee92be7c98ae7efb747ab72078fd525c9b24 | #-*- coding: utf-8 -*-
import serial
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
class ChatApp(App):
def build(self):
#reconnaissance de la carte Arduino:
self.Arduino = serial.Serial('COM13', 9600)
self.fichier=open("data.txt","a")
#On cree une disposition pour l'affichage:
Layout=BoxLayout(orientation='vertical',spacing=40,padding=(200,20))
#On cree un label:
self.Label1=Label(text='2 + 3 = ?', font_size=20)
Layout.add_widget(self.Label1)
#On cree deux boutons reponses:
self.Bouton5=Button(text='5')
self.Bouton5.bind(on_press=self.send)
#On ajoute le bouton dans l'affichage:
Layout.add_widget(self.Bouton5)
self.Bouton6=Button(text='6')
self.Bouton6.bind(on_press=self.send)
#On ajoute le bouton dans l'affichage:
Layout.add_widget(self.Bouton6)
#On renvoie l'affichage:
return Layout
def send(self,instance):
self.Arduino.write('1')
self.fichier.write("\nbonjour ca marche")
if __name__ == '__main__':
ChatApp().run() |
989,086 | 56929c580b855ac8267373073725472ededc02ba | import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="human-eval",
py_modules=["human-eval"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": [
"evaluate_functional_correctness = human_eval.evaluate_functional_correctness",
]
}
)
|
989,087 | 6e4c582ce3aa3e0828407730395a8eb2f770663d | from app import db
from datetime import datetime
class Data(db.Model):
__tablename__ = "data"
id = db.Column(db.Integer,primary_key=True)
temperature = db.Column(db.Float)
humidity = db.Column(db.Float)
timestamp = db.Column(db.DateTime,default=datetime.now)
def to_json(self):
json_data = {
'id': self.id,
'temperature': self.temperature,
'humidity': self.humidity,
'timestamp': self.timestamp,
}
return json_data
def save(self):
db.session.add(self)
db.session.commit()
def __str__(self):
return self.to_json()
|
989,088 | b81b8fc9f9f6d4e2bad2f7af9de7c24e8d2c47ad | # Copyright 2011 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import stat
import pwd
import grp
import logging
from yaybu import resources
from yaybu.core import provider, error
class Link(provider.Provider):
policies = (resources.link.LinkAppliedPolicy,)
@classmethod
def isvalid(self, *args, **kwargs):
# TODO: validation could provide warnings based on things
# that are not the correct state at the point of invocation
# but that will be modified by the yaybu script
return super(Link, self).isvalid(*args, **kwargs)
def _get_owner(self):
""" Return the uid for the resource owner, or None if no owner is
specified. """
if self.resource.owner is not None:
try:
return pwd.getpwnam(self.resource.owner).pw_uid
except KeyError:
raise error.InvalidUser()
def _get_group(self):
""" Return the gid for the resource group, or None if no group is
specified. """
if self.resource.group is not None:
try:
return grp.getgrnam(self.resource.group).gr_gid
except KeyError:
raise error.InvalidGroup()
def _stat(self):
""" Extract stat information for the resource. """
st = os.lstat(self.resource.name)
uid = st.st_uid
gid = st.st_gid
mode = stat.S_IMODE(st.st_mode)
return uid, gid, mode
def apply(self, context):
changed = False
name = self.resource.name
to = self.resource.to
exists = False
uid = None
gid = None
mode = None
isalink = False
if not os.path.exists(to):
if not context.simulate:
raise error.DanglingSymlink("Destination of symlink %r does not exist" % to)
context.changelog.info("Destination of sylink %r does not exist" % to)
owner = self._get_owner()
group = self._get_group()
try:
linkto = os.readlink(name)
isalink = True
except OSError:
isalink = False
if not isalink or linkto != to:
if os.path.exists(name):
context.shell.execute(["/bin/rm", "-rf", name])
context.shell.execute(["/bin/ln", "-s", self.resource.to, name])
changed = True
try:
linkto = os.readlink(name)
isalink = True
except OSError:
isalink = False
if not isalink and not context.simulate:
raise error.OperationFailed("Did not create expected symbolic link")
if isalink:
uid, gid, mode = self._stat()
if owner is not None and owner != uid:
context.shell.execute(["/bin/chown", "-h", self.resource.owner, name])
changed = True
if group is not None and group != gid:
context.shell.execute(["/bin/chgrp", "-h", self.resource.group, name])
changed = True
return changed
class RemoveLink(provider.Provider):
policies = (resources.link.LinkRemovedPolicy,)
@classmethod
def isvalid(self, *args, **kwargs):
return super(RemoveLink, self).isvalid(*args, **kwargs)
def apply(self, context):
if os.path.exists(self.resource.name):
if not os.path.islink(self.resource.name):
raise error.InvalidProvider("%r: %s exists and is not a link" % (self, self.resource.name))
context.shell.execute(["/bin/rm", self.resource.name])
changed = True
else:
context.changelog.info("File %s missing already so not removed" % self.resource.name)
changed = False
return changed
|
989,089 | dec65803691e02b448bdc4e246475e59c9780864 | N, A, B = map(int, input().split())
print(B * N if N <= 5 else B * 5 + A * (N - 5))
|
989,090 | 3d688e304339b1669733b2f5526f9fac5fc086e0 | from collections import namedtuple
from models.supersenses import vocabs, embeddings
from models.supersenses.features.feature import Feature, FeatureType, MountPoint, Features
from models.supersenses.features.features_utils import get_parent, get_grandparent, get_child_of_type, get_children, \
is_capitalized, get_gov, get_obj
[LSTM, MLP] = [MountPoint.LSTM, MountPoint.MLP]
def build_features(hyperparameters, override=None):
override = override or {}
hp = hyperparameters.clone(override)
return Features([
Feature('token-word2vec', FeatureType.STRING, vocabs.TOKENS, embeddings.TOKENS_WORD2VEC, embedding_fallback=lambda tok: tok.token_word2vec, default_zero_vec=True, extractor=lambda tok, sent: tok.token, mount_point=LSTM, enable=hp.use_token, update=hp.update_token_embd, masked_only=False),
Feature('token.lemma-word2vec', FeatureType.STRING, vocabs.LEMMAS, embeddings.LEMMAS_WORD2VEC, embedding_fallback=lambda tok: tok.lemma_word2vec, default_zero_vec=True, update=hp.update_lemmas_embd, extractor=lambda tok, sent: tok.lemma, mount_point=LSTM, enable=True, masked_only=False),
Feature('token-internal', FeatureType.STRING, vocabs.TOKENS, embeddings.AUTO, extractor=lambda tok, sent: tok.token, embedding_fallback=lambda tok: [0] * hp.token_internal_embd_dim, default_zero_vec=True, mount_point=LSTM, enable=hp.use_token_internal, dim=hp.token_internal_embd_dim, update=True, masked_only=False),
Feature('token.ud_xpos', FeatureType.ENUM, vocabs.UD_XPOS, embeddings.AUTO, dim=hp.ud_xpos_embd_dim, update=True, extractor=lambda tok, sent: tok.ud_xpos, mount_point=MLP, enable=hp.use_ud_xpos),
Feature('token.dep', FeatureType.ENUM, vocabs.UD_DEPS, embeddings.AUTO, dim=hp.ud_deps_embd_dim, update=True, extractor=lambda tok, sent: tok.ud_dep, mount_point=MLP, enable=hp.use_ud_dep),
Feature('token.ner', FeatureType.ENUM, vocabs.NERS, embeddings.AUTO, dim=hp.ner_embd_dim, update=True, extractor=lambda tok, sent: tok.ner, mount_point=MLP, enable=hp.use_ner),
Feature('token.govobj-config', FeatureType.ENUM, vocabs.GOVOBJ_CONFIGS, embeddings.AUTO, dim=hp.govobj_config_embd_dim, update=True, extractor=lambda tok, sent: tok.govobj_config, mount_point=MLP, enable=hp.use_govobj),
Feature('token.lexcat', FeatureType.ENUM, vocabs.LEXCAT, embeddings.AUTO, dim=hp.lexcat_embd_dim, update=True, extractor=lambda tok, sent: tok.lexcat, mount_point=MLP, enable=hp.use_lexcat, masked_only=False),
# Feature('prep-onehot', FeatureType.ENUM, vocabs.PREPS, embeddings.PREPS_ONEHOT, extractor=lambda tok, sent: tok.token, mount_point=MLP, enable=hp.use_prep_onehot, fall_to_none=True),
Feature('capitalized-word-follows', FeatureType.ENUM, vocabs.BOOLEAN, embeddings.BOOLEAN, extractor=lambda tok, sent: str(len(sent) > tok.ind + 1 and is_capitalized(sent[tok.ind + 1]) or len(sent) > tok.ind + 2 and is_capitalized(sent[tok.ind + 2])), mount_point=MLP, masked_only=True, enable=True),
Feature('token-gov', FeatureType.REF, None, None, extractor=lambda tok, sent: get_gov(tok, sent).ind, mount_point=MLP, enable=hp.use_govobj),
Feature('token-gov.ud_xpos', FeatureType.ENUM, vocabs.UD_XPOS, embeddings.AUTO, dim=hp.ud_xpos_embd_dim, update=True, extractor=lambda tok, sent: get_gov(tok, sent).ud_xpos, mount_point=MLP, enable=hp.use_govobj and hp.use_ud_xpos),
Feature('token-gov.dep', FeatureType.ENUM, vocabs.UD_DEPS, embeddings.AUTO, dim=hp.ud_deps_embd_dim, update=True, extractor=lambda tok, sent: get_gov(tok, sent).ud_dep, mount_point=MLP, enable=hp.use_govobj and hp.use_ud_dep),
Feature('token-gov.ner', FeatureType.ENUM, vocabs.NERS, embeddings.AUTO, dim=hp.ner_embd_dim, update=True, extractor=lambda tok, sent: get_gov(tok, sent).ner, mount_point=MLP, enable=hp.use_govobj and hp.use_ner),
Feature('token-obj', FeatureType.REF, None, None, extractor=lambda tok, sent: get_obj(tok, sent).ind, mount_point=MLP, enable=hp.use_govobj),
Feature('token-obj.ud_xpos', FeatureType.ENUM, vocabs.UD_XPOS, embeddings.AUTO, dim=hp.ud_xpos_embd_dim, update=True, extractor=lambda tok, sent: get_obj(tok, sent).ud_xpos, mount_point=MLP, enable=hp.use_govobj and hp.use_ud_xpos),
Feature('token-obj.dep', FeatureType.ENUM, vocabs.UD_DEPS, embeddings.AUTO, dim=hp.ud_deps_embd_dim, update=True, extractor=lambda tok, sent: get_obj(tok, sent).ud_dep, mount_point=MLP, enable=hp.use_govobj and hp.use_ud_dep),
Feature('token-obj.ner', FeatureType.ENUM, vocabs.NERS, embeddings.AUTO, dim=hp.ner_embd_dim, update=True, extractor=lambda tok, sent: get_obj(tok, sent).ner, mount_point=MLP, enable=hp.use_govobj and hp.use_ner),
# Feature('token-spacy-pobj-child', FeatureType.REF, None, None, extractor=lambda tok, sent: get_child_of_type(tok, sent, 'pobj').ind, mount_point=MLP, enable=hp.use_ud_dep and hp.deps_from == 'spacy'),
# Feature('token-spacy-pobj-child.ud_xpos', FeatureType.ENUM, vocabs.UD_XPOS, embeddings.AUTO, dim=hp.ud_xpos_embd_dim, update=True, extractor=lambda tok, sent: get_child_of_type(tok, sent, 'pobj').ud_xpos, mount_point=MLP, enable=hp.use_ud_dep and hp.deps_from == 'spacy' and hp.use_ud_xpos),
# Feature('token-spacy-pobj-child.dep', FeatureType.ENUM, vocabs.UD_DEPS, embeddings.AUTO, dim=hp.ud_deps_embd_dim, update=True, extractor=lambda tok, sent: get_child_of_type(tok, sent, 'pobj').spacy_dep, mount_point=MLP, enable=hp.use_ud_dep and hp.deps_from == 'spacy'),
# Feature('token-spacy-pobj-child.ner', FeatureType.ENUM, vocabs.NERS, embeddings.AUTO, dim=hp.ner_embd_dim, update=True, extractor=lambda tok, sent: get_child_of_type(tok, sent, 'pobj').spacy_ner, mount_point=MLP, enable=hp.use_ud_dep and hp.deps_from == 'spacy' and hp.use_ner),
#
# Feature('token-has-children', FeatureType.ENUM, vocabs.BOOLEAN, embeddings.BOOLEAN, extractor=lambda tok, sent: str(len(get_children(tok, sent)) > 0), mount_point=MLP, enable=hp.use_ud_dep and hp.deps_from == 'spacy'),
]) |
989,091 | 58c2c9fc4138c8844ad73bfa4ba68061601c5508 | import numpy as np
from Box2D import b2BodyDef, b2_staticBody, b2World
from Setup.MazeFunctions import BoxIt
from scipy.spatial import cKDTree
from pandas import read_excel
size_per_shape = {'ant': {'H': ['XS', 'S', 'M', 'L', 'SL', 'XL'],
'I': ['XS', 'S', 'M', 'L', 'SL', 'XL'],
'T': ['XS', 'S', 'M', 'L', 'SL', 'XL'],
'SPT': ['S', 'M', 'L', 'XL'],
'RASH': ['S', 'M', 'L', 'XL'],
'LASH': ['S', 'M', 'L', 'XL'],
},
'human': {'SPT': ['S', 'M', 'L']},
'humanhand': {'SPT': ['']}
}
StateNames = {'H': [0, 1, 2, 3, 4, 5], 'I': [0, 1, 2, 3, 4, 5], 'T': [0, 1, 2, 3, 4, 5],
'SPT': [0, 1, 2, 3, 4, 5, 6], 'LASH': [0, 1, 2, 3, 4, 5, 6], 'RASH': [0, 1, 2, 3, 4, 5, 6]}
ResizeFactors = {'ant': {'XL': 1, 'SL': 0.75, 'L': 0.5, 'M': 0.25, 'S': 0.125, 'XS': 0.125 / 2},
'dstar': {'XL': 1, 'SL': 0.75, 'L': 0.5, 'M': 0.25, 'S': 0.125, 'XS': 0.125 / 2},
'human': {'Small Near': 1, 'Small Far': 1, 'S': 1, 'M': 1, 'Medium': 1, 'Large': 1, 'L': 1},
'humanhand': {'': 1}}
# there are a few I mazes, which have a different exit size,
# x, y, theta
def start(size, shape, solver):
maze = Maze(size=size, shape=shape, solver=solver)
if shape == 'SPT':
# return [(maze.slits[0] - maze.slits[-1]) / 2 + maze.slits[-1] - 0.5, maze.arena_height / 2, 0]
return [maze.slits[0] * 0.5, maze.arena_height / 2, 0]
elif shape in ['H', 'I', 'T', 'RASH', 'LASH']:
return [maze.slits[0] - 5, maze.arena_height / 2, np.pi - 0.1]
def end(size, shape, solver):
maze = Maze(size=size, shape=shape, solver=solver)
return [maze.slits[-1] + 5, maze.arena_height / 2, 0]
class Maze(b2World):
def __init__(self, *args, size='XL', shape='SPT', solver='ant', free=False):
super().__init__(gravity=(0, 0), doSleep=True)
self.shape = shape # loadshape (maybe this will become name of the maze...)
self.size = size # size
self.solver = solver
self.statenames = StateNames[shape]
self.getMazeDim(*args)
self.body = self.CreateMaze(free)
self.get_zone()
def getMazeDim(self, *args):
df = read_excel('C:\\Users\\tabea\\PycharmProjects\\AntsShapes\\Setup\\MazeDimensions_' + self.solver + '.xlsx',
engine='openpyxl')
if self.solver in ['ant', 'dstar']: # all measurements in cm
d = df.loc[df['Name'] == self.size + '_' + self.shape]
if 'L_I1' in args:
d = df.loc[df['Name'] == 'L_I1']
self.arena_length = d['arena_length'].values[0]
self.arena_height = d['arena_height'].values[0]
self.exit_size = d['exit_size'].values[0]
self.wallthick = d['wallthick'].values[0]
if type(d['slits'].values[0]) == str:
self.slits = [float(s) for s in d['slits'].values[0].split(', ')]
else:
self.slits = [d['slits'].values[0]]
elif self.solver == 'human': # all measurements in meters
# TODO: measure the slits again...
# these coordinate values are given inspired from the drawing in \\phys-guru-cs\ants\Tabea\Human
# Experiments\ExperimentalSetup
d = df.loc[df['Name'] == self.size]
A = [float(s) for s in d['A'].values[0].split(',')]
# B = [float(s) for s in d['B'].values[0].split(',')]
C = [float(s) for s in d['C'].values[0].split(',')]
D = [float(s) for s in d['D'].values[0].split(',')]
E = [float(s) for s in d['E'].values[0].split(',')]
self.arena_length, self.exit_size = A[0], D[1] - C[1]
self.wallthick = 0.1
self.arena_height = 2 * C[1] + self.exit_size
self.slits = [(E[0] + self.wallthick / 2),
(C[0] + self.wallthick / 2)] # These are the x positions at which the slits are positions
elif self.solver == 'humanhand': # only SPT
d = df.loc[df['Name'] == self.solver]
self.arena_length = d['arena_length'].values[0]
self.arena_height = d['arena_height'].values[0]
self.exit_size = d['exit_size'].values[0]
self.wallthick = d['wallthick'].values[0]
self.slits = [float(s) for s in d['slits'].values[0].split(', ')]
self.slitpoints = np.empty((len(self.slits) * 2, 4, 2), float)
def CreateMaze(self, free):
my_maze = self.CreateBody(b2BodyDef(position=(0, 0), angle=0, type=b2_staticBody, userData='my_maze'))
if free:
my_maze.CreateLoopFixture(
vertices=[(0, 0), (0, self.arena_height * 3), (self.arena_length * 3, self.arena_height * 3),
(self.arena_length * 3, 0)])
else:
my_maze.CreateLoopFixture(
vertices=[(0, 0),
(0, self.arena_height),
(self.arena_length, self.arena_height),
(self.arena_length, 0),
])
self.CreateSlitObject(my_maze)
return my_maze
def CreateSlitObject(self, my_maze):
# # The x and y position describe the point, where the middle (in x direction) of the top edge (y direction)
# of the lower wall of the slit is...
""" We need a special case for L_SPT because in the manufacturing the slits were not vertically glued. """
if self.shape == 'LongT':
pass
# self.slitpoints[i]
if self.shape == 'SPT':
if self.size == 'L' and self.solver == 'ant':
slitLength = 4.1
# this is the left (inside), bottom Slit
self.slitpoints[0] = np.array([[self.slits[0], 0],
[self.slits[0], slitLength],
[self.slits[0] + self.wallthick, slitLength],
[self.slits[0] + self.wallthick, 0]]
)
# this is the left (inside), upper Slit
self.slitpoints[1] = np.array([[self.slits[0] - 0.05, slitLength + self.exit_size],
[self.slits[0] + 0.1, self.arena_height],
[self.slits[0] + self.wallthick + 0.1, self.arena_height],
[self.slits[0] + self.wallthick - 0.05, slitLength + self.exit_size]]
)
# this is the right (outside), lower Slit
self.slitpoints[2] = np.array([[self.slits[1], 0],
[self.slits[1] + 0.1, slitLength],
[self.slits[1] + self.wallthick + 0.1, slitLength],
[self.slits[1] + self.wallthick, 0]]
)
# this is the right (outside), upper Slit
self.slitpoints[3] = np.array([[self.slits[1] + 0.2, slitLength + self.exit_size],
[self.slits[1] + 0.2, self.arena_height],
[self.slits[1] + self.wallthick + 0.2, self.arena_height],
[self.slits[1] + self.wallthick + 0.2, slitLength + self.exit_size]]
)
# elif size == 'M' or size == 'XL'
else:
slitLength = (self.arena_height - self.exit_size) / 2
# this is the left (inside), bottom Slit
self.slitpoints[0] = np.array([[self.slits[0], 0],
[self.slits[0], slitLength],
[self.slits[0] + self.wallthick, slitLength],
[self.slits[0] + self.wallthick, 0]]
)
# this is the left (inside), upper Slit
self.slitpoints[1] = np.array([[self.slits[0], slitLength + self.exit_size],
[self.slits[0], self.arena_height],
[self.slits[0] + self.wallthick, self.arena_height],
[self.slits[0] + self.wallthick, slitLength + self.exit_size]]
)
# this is the right (outside), lower Slit
self.slitpoints[2] = np.array([[self.slits[1], 0],
[self.slits[1], slitLength],
[self.slits[1] + self.wallthick, slitLength],
[self.slits[1] + self.wallthick, 0]]
)
# this is the right (outside), upper Slit
self.slitpoints[3] = np.array([[self.slits[1], slitLength + self.exit_size],
[self.slits[1], self.arena_height],
[self.slits[1] + self.wallthick, self.arena_height],
[self.slits[1] + self.wallthick, slitLength + self.exit_size]]
)
# slit_up
my_maze.CreatePolygonFixture(vertices=self.slitpoints[0].tolist())
my_maze.CreatePolygonFixture(vertices=self.slitpoints[2].tolist())
# slit_down
my_maze.CreatePolygonFixture(vertices=self.slitpoints[1].tolist())
my_maze.CreatePolygonFixture(vertices=self.slitpoints[3].tolist())
# this is for all the 'normal SPT Mazes', that have no manufacturing mistakes
else:
self.slitpoints = np.empty((len(self.slits) * 2, 4, 2), float)
for i, slit in enumerate(self.slits):
# this is the lower Slit
self.slitpoints[2 * i] = np.array([[slit, 0],
[slit, (self.arena_height - self.exit_size) / 2],
[slit + self.wallthick, (self.arena_height - self.exit_size) / 2],
[slit + self.wallthick, 0]]
)
my_maze.CreatePolygonFixture(vertices=self.slitpoints[2 * i].tolist())
# this is the upper Slit
self.slitpoints[2 * i + 1] = np.array([[slit, (self.arena_height + self.exit_size) / 2],
[slit, self.arena_height],
[slit + self.wallthick, self.arena_height],
[slit + self.wallthick,
(self.arena_height + self.exit_size) / 2]]
)
my_maze.CreatePolygonFixture(vertices=self.slitpoints[2 * i + 1].tolist())
# I dont want to have the vertical line at the first exit
self.slitTree = BoxIt(np.array([[0, 0],
[0, self.arena_height],
[self.slits[-1], self.arena_height],
[self.slits[-1], 0]]),
0.1, without='right')
for slit_points in self.slitpoints:
self.slitTree = np.vstack((self.slitTree, BoxIt(slit_points, 0.01)))
self.slitTree = cKDTree(self.slitTree)
def get_zone(self):
if self.shape == 'SPT':
self.zone = np.array([[0, 0],
[0, self.arena_height],
[self.slits[0], self.arena_height],
[self.slits[0], 0]])
else:
RF = ResizeFactors[self.solver][self.size]
self.zone = np.array(
[[self.slits[0] - self.arena_length * RF / 2, self.arena_height / 2 - self.arena_height * RF / 2],
[self.slits[0] - self.arena_length * RF / 2, self.arena_height / 2 + self.arena_height * RF / 2],
[self.slits[0], self.arena_height / 2 + self.arena_height * RF / 2],
[self.slits[0], self.arena_height / 2 - self.arena_height * RF / 2]])
return
def possible_state_transitions(self, From, To):
transitions = dict()
s = self.statenames
if self.shape == 'H':
transitions[s[0]] = [s[0], s[1], s[2]]
transitions[s[1]] = [s[1], s[0], s[2], s[3]]
transitions[s[2]] = [s[2], s[0], s[1], s[4]]
transitions[s[3]] = [s[3], s[1], s[4], s[5]]
transitions[s[4]] = [s[4], s[2], s[3], s[5]]
transitions[s[5]] = [s[5], s[3], s[4]]
return transitions[self.states[-1]].count(To) > 0
if self.shape == 'SPT':
transitions[s[0]] = [s[0], s[1]]
transitions[s[1]] = [s[1], s[0], s[2]]
transitions[s[2]] = [s[2], s[1], s[3]]
transitions[s[3]] = [s[3], s[2], s[4]]
transitions[s[4]] = [s[4], s[3], s[5]]
transitions[s[5]] = [s[5], s[4], s[6]]
transitions[s[6]] = [s[6], s[5]]
return transitions[self.states[From]].count(To) > 0
def minimal_path_length(self):
from DataFrame.create_dataframe import df
from Classes_Experiment.mr_dstar import filename_dstar
p = df.loc[df['filename'] == filename_dstar(self.size, self.shape, 0, 0)][['path length [length unit]']]
return p.values[0][0]
def maze_corners(maze):
corners = [[0, 0],
[0, maze.arena_height],
[maze.slits[-1] + 20, maze.arena_height],
[maze.slits[-1] + 20, 0],
]
return corners + list(np.resize(maze.slitpoints, (16, 2)))
|
989,092 | 6220d514d19e43489222500c170ab546170ab3d0 | from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework.response import Response
from ..models import *
from .. import helper
from ..serializers import *
from rest_framework import generics
from django.contrib.auth import get_user_model
User = Vendors
import json
#from django.contrib.auth.models import User
getitem = helper.getitem
try:
from html import escape # python 3.x
except ImportError:
from cgi import escape # python 2.x
try:
from html import unescape # python 3.4+
except ImportError:
try:
from html.parser import HTMLParser # python 3.x (<3.4)
except ImportError:
from HTMLParser import HTMLParser # python 2.x
unescape = HTMLParser().unescape
##########################
### APis List
#########################
class GetJson():
def getjson(self,request):
finaldict={}
bodyhaskeys=False
if request.body:
try:
json.loads(request.body)
bodyhaskeys=True
except:
print("Could not load Json from body: ", request.body)
try:
if bodyhaskeys:
json1= json.loads(request.body)
for each in json1.keys():
finaldict[each] = json1[each]
if len(request.POST.keys()):
for each in request.POST.keys():
finaldict[each] = request.POST[each]
return finaldict
except Exception as e:
print("Error while Getting json: ", e)
return finaldict
def getsessionuser(request):
if 'sessionid1' in request.COOKIES.keys():
user = validatesession(request.COOKIES['sessionid1'])
if user:
return user
else:
return None
def validatesession(sessionid):
try:
sessionid = Sessions.objects.get(key= sessionid)
except ObjectDoesNotExist:
sessionid=None
if sessionid:
try:
v=Vendors.objects.get(mobile=sessionid.username)
except ObjectDoesNotExist:
v=None
return v
def checkuser(username):
v=None
try:
v=Vendors.objects.get(mobile=username)
except ObjectDoesNotExist:
v=None
return v
class listDrivers(APIView):
def get(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"listdrivers","status":"false","info":"No User session was found"})
else:
filters={}
filters["owner"] = user.mobile
drivers=Drivers.objects.filter(**filters )
serializer= DriverSerializer(drivers, many=True)
return Response(serializer.data)
class listAllDrivers(APIView):
def get(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"listalldrivers","status":"false","info":"No User session was found"})
elif user.is_staff :
filters={}
if("owner" in jsondata.keys()):
filters["owner"] = jsondata["owner"]
drivers=Drivers.objects.filter(**filters )
serializer= DriverSerializer(drivers, many=True)
return Response(serializer.data)
else:
return Response({"api":"listalldrivers","status":"false","info":"User is not staff"})
class listVehicles(APIView):
def get(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"listvehicles","status":"false","info":"No User session was found"})
else:
filters={}
filters["owner"] = user.mobile
vehicles=Vehicles.objects.filter(**filters )
serializer= VehicleSerializer(vehicles, many=True)
return Response(serializer.data)
class listAllVehicles(APIView):
def get(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"listallvehicles","status":"false","info":"No User session was found"})
elif user.is_staff :
filters={}
if("owner" in jsondata.keys()):
filters["owner"] = jsondata["owner"]
vehicles=Vehicles.objects.filter(**filters )
serializer= VehicleSerializer(vehicles, many=True)
return Response(serializer.data)
else:
return Response({"api":"listallvehicles","status":"false","info":"User is not staff"})
@api_view(['POST'])
def deleteDriver(request):
print("hello")
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"deletedriver","status":"false","info":"No User session was found"})
ln = getitem(jsondata, "licenseno")
if not ln:
return Response({"api":"deletedriver","status":"false","info":"No Driver found or No licenseno privided"})
try:
driver = Drivers.objects.get(licenseno=ln)
if user.is_staff or driver.owner == user.mobile:
driver.delete()
else:
return Response({"api":"deletedriver","status":"true","info":"Driver already deleted."})
except ObjectDoesNotExist:
return Response({"api":"deletedriver","status":"true","info":"Driver already deleted."})
return Response({"api":"deletedriver","status":"true","info":"Driver Deleted"})
@api_view(['POST'])
def deleteVehicle(request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"deletevehicle","status":"false","info":"No User session was found"})
rc = getitem(jsondata, "rcnumber")
if not rc:
return Response({"api":"deletevehicle","status":"false","info":"No Car found or No RCnumber privided"})
try:
vehicle = Vehicles.objects.get(rcnumber=rc)
vehicle.delete()
except ObjectDoesNotExist:
return Response({"api":"deletevehicle","status":"true","info":"Vehicle already deleted."})
return Response({"api":"deletevehicle","status":"true","info":"vehicle Deleted"})
class addDriver(APIView):
def post(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
if not user:
return Response({"api":"adddriver","status":"false","info":"No User session was found"})
else:
try:
licenseno = getitem(jsondata, "licenseno" )
owner = getitem(jsondata, "owner" )
aadharno = getitem(jsondata, "aadharno" )
licensefile = getitem(jsondata, "licensefile" )
aadharfile = getitem(jsondata, "aadharfile" )
name = getitem(jsondata, "name" )
pancard = getitem(jsondata, "pancard" )
nickname = getitem(jsondata, "nickname" )
photourl = getitem(jsondata, "photourl" )
number = getitem(jsondata, "number" )
if ( user.is_staff ) and checkuser(owner):
pass
else:
owner = user.mobile
try:
driver=Drivers.objects.get(licenseno=licenseno)
except ObjectDoesNotExist:
driver=Drivers()
#driver= Drivers(
if not licenseno:
return Response({"api":"adddriver","status":"false","info":"licenseno field is mendatory"})
driver.licenseno = licenseno # ,
if owner:
driver.owner = owner # ,
if aadharno:
driver.aadharno = aadharno # ,
if licensefile:
driver.licensefile = licensefile# ,
if aadharfile:
driver.aadharfile = aadharfile # ,
if name:
driver.name = name # ,
if pancard:
driver.pancard = pancard # ,
if nickname:
driver.nickname = nickname # ,
if photourl:
driver.photourl = photourl # ,
if number:
driver.number = number # ,
#)
if user.is_staff:
isActive=getitem(jsondata, "isActive")
if isActive.lower() == "true":
driver.isActive = True
driver.save()
return Response({"api":"adddriver","status":"true","info":"Driver updated."})
except Exception as e:
return Response({"api":"adddriver","status":"false","info":str(e)})
class addVehicle(APIView):
def post(self, request):
user=getsessionuser(request)
jsondata=GetJson().getjson(request)
types=["Hatchback","Sedan","SUV","MPV", "VAN"]
if not user:
return Response({"api":"addVehicle","status":"false","info":"No User session was found"})
else:
try:
rcnumber = getitem(jsondata, "rcnumber" )
vtype = getitem(jsondata, "vtype" )
model = getitem(jsondata, "model" )
insuranceno = getitem(jsondata, "insuranceno" )
permitno = getitem(jsondata, "permitno" )
permitdate = getitem(jsondata, "permitdate" )
insdate = getitem(jsondata, "insdate" )
nickname = getitem(jsondata, "nickname" )
photo = getitem(jsondata, "photo" )
nameofrc = getitem(jsondata, "nameofrc" )
owner = getitem(jsondata, "owner" )
if vtype and (vtype not in types):
return Response({"api":"addVehicle","status":"false","info":"Please enter valid vehicle types, ", "vehicles": ["Hatchback","Sedan","SUV","MPV", "VAN"] })
if ( user.is_staff ) and checkuser(owner):
pass
else:
owner = user.mobile
if not rcnumber:
return Response({"api":"addVehicle","status":"false","info":"RC Number is mendatory field."})
try:
vehicle = Vehicles.objects.get(rcnumber = rcnumber)
except ObjectDoesNotExist:
vehicle=Vehicles()
#vehicle= Vehicles(
vehicle.rcnumber = rcnumber #,
if vtype:
vehicle.vtype = vtype #,
if model:
vehicle.model = model #,
if insuranceno:
vehicle.insuranceno = insuranceno #,
if permitno:
vehicle.permitno = permitno #,
if permitdate:
vehicle.permitdate = permitdate #,
if insdate:
vehicle.insdate = insdate #,
if nickname:
vehicle.nickname = nickname #,
if photo:
vehicle.photo = photo #,
if nameofrc:
vehicle.nameofrc = nameofrc #,
if owner:
vehicle.owner = owner #
#)
if user.is_staff:
isActive=getitem(jsondata, "isActive")
if isActive.lower() == "true":
vehicle.isActive = True
vehicle.save()
return Response({"api":"addvehicle","status":"true","info":"Vehicle updated."})
except Exception as e:
return Response({"api":"addvehicle","status":"false","info":str(e)})
|
989,093 | 37645d785e400cac770dc19861956fd12337fa5b | # from the jupyter notebook provided by the class
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torchvision.datasets as ds
import pylab as plt
def load_mnist(datadir='./data_cache'):
train_ds = ds.MNIST(root=datadir, train=True,
download=True, transform=None)
test_ds = ds.MNIST(root=datadir, train=False,
download=True, transform=None)
def to_xy(dataset):
X = np.array(dataset.data) / 255.0 # [0, 1]
Y = np.array(dataset.targets)
return X, Y
X_tr, Y_tr = to_xy(train_ds)
X_te, Y_te = to_xy(test_ds)
return X_tr, Y_tr, X_te, Y_te
X_tr, Y_tr, X_te, Y_te = load_mnist()
i = np.random.choice(len(X_tr))
plt.imshow(X_tr[i], cmap='gray')
plt.title(f'digit: {Y_tr[i]}')
print('original X_tr:', X_tr.shape)
# select 500 random examples
n = 500
I = np.random.choice(len(X_tr), n, replace=False)
X = X_tr[I]
Y = (Y_tr[I] % 2) * 2.0 - 1 # odd/even --> +1/-1
X = X.reshape(-1, 28*28) # flatten
print('reshaped X:', X.shape)
print('reshaped Y:', Y.shape)
# problem 3 part 2
def dLdbeta(X,Y,beta):
if len(X.shape)==1:
return np.multiply(np.dot(X.T,X), beta) - np.multiply(X.T,Y)
else:
return np.matmul(np.matmul(X.T,X), beta) - np.matmul(X.T,Y)
# this sgd uses all samples with minibatches of size 1. may want to add minibatches to this for better tradeoff between convergence and computational efficiency
def sgd(X, Y, learning_rate=0.01, n_epochs=50):
beta = np.random.randn(784, ) # start with random beta
epoch = 0
while epoch < n_epochs:
inds = np.arange(len(X))
np.random.shuffle(inds) # shuffle the inds
for i in inds:
beta = beta-learning_rate*dLdbeta(X[i],Y[i],beta)
epoch += 1
learning_rate = learning_rate / 1.02 # gradually reducing the learning rate
return beta
def gd(X, Y, learning_rate=0.0001, n_epochs=50):
beta = np.random.randn(784, ) # start with random beta
epoch = 0
while epoch < n_epochs:
beta = beta-learning_rate*dLdbeta(X,Y,beta)
epoch += 1
learning_rate = learning_rate / 1.02 # gradually reducing the learning rate
return beta
# running SGD
print("SGD resutls")
beta_sgd = sgd(X,Y)
# double check:
train_error = np.linalg.norm(np.matmul(X,beta_sgd)-Y)**2/Y.shape[0]
print(f"train error: {train_error}")
print(f"train acc: {100*np.sum(np.round(np.matmul(X,beta_sgd))==Y)/Y.shape[0]}%")
# get the 'test error'
test_error = np.linalg.norm(np.matmul(X_te.reshape(-1, 28*28),beta_sgd)-Y_te)**2/Y_te.shape[0]
print(f"test error: {test_error}")
print(f"test acc: {100*np.sum(np.round(np.matmul(X_te.reshape(-1, 28*28),beta_sgd))==Y_te)/Y_te.shape[0]}%")
# running GD
print("GD resutls")
beta_gd = gd(X,Y)
# double check:
train_error = np.linalg.norm(np.matmul(X,beta_gd)-Y)**2/Y.shape[0]
print(f"train error: {train_error}")
print(f"train acc: {100*np.sum(np.round(np.matmul(X,beta_gd))==Y)/Y.shape[0]}%")
# get the 'test error'
test_error = np.linalg.norm(np.matmul(X_te.reshape(-1, 28*28),beta_gd)-Y_te)**2/Y_te.shape[0]
print(f"test error: {test_error}")
print(f"test acc: {100*np.sum(np.round(np.matmul(X_te.reshape(-1, 28*28),beta_gd))==Y_te)/Y_te.shape[0]}%") |
989,094 | 938469b07cb2d441712be70a42dbbffc141a9ba7 | import re
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from devilry.apps.core.testhelper import TestHelper
from devilry_qualifiesforexam.models import Status
from devilry_qualifiesforexam.views import StatusPrintView
from devilry_qualifiesforexam.views import extract_lastname
from devilry_qualifiesforexam.views import cmp_lastname
class TestStatusPrintView(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.create_superuser('superuser')
def _get_url(self, status_id):
return reverse('devilry_qualifiesforexam_statusprint', kwargs={'status_id': status_id})
def _getas(self, username, status_id, data={}):
self.client.login(username=username, password='test')
return self.client.get(self._get_url(status_id), data)
def test_status_not_found(self):
response = self._getas('superuser', 1)
self.assertEqual(response.status_code, 404)
def test_status_forbidden(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
self.testhelper.create_user('nobody')
response = self._getas('nobody', status.pk)
self.assertEqual(response.status_code, 403)
def test_status_not_ready(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.NOTREADY)
response = self._getas('superuser', status.pk)
self.assertEqual(response.status_code, 404)
def test_status_periodadmin(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:admin(periodadmin):begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
response = self._getas('periodadmin', status.pk)
self.assertEqual(response.status_code, 200)
def test_status_superuser(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-3):ends(6)'])
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
response = self._getas('superuser', status.pk)
self.assertEqual(response.status_code, 200)
def test_extract_lastname(self):
self.assertEqual(extract_lastname(self.testhelper.create_user('unused_a', None)), '')
self.assertEqual(extract_lastname(self.testhelper.create_user('unused_b', ' ')), '')
self.assertEqual(extract_lastname(self.testhelper.create_user('unused_c', 'Test')), 'Test')
self.assertEqual(extract_lastname(self.testhelper.create_user('unused_d', 'Test User')), 'User')
self.assertEqual(extract_lastname(self.testhelper.create_user('unused_e', 'My Test User')), 'User')
self.assertEqual(extract_lastname(User.objects.create(username='unused_x')), '') # NOTE: No user profile
def test_cmp_lastname(self):
user_a = self.testhelper.create_user('a', 'User A')
user_b = self.testhelper.create_user('b', 'User B')
self.assertEqual(cmp_lastname(user_b, user_a), 1)
def _create_relateduser(self, username, full_name=''):
user = self.testhelper.create_user(username, full_name)
relstudent = self.testhelper.sub_p1.relatedstudent_set.create(user=user)
return relstudent
def test_sortby_username(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-3):ends(6)'])
student1 = self._create_relateduser('student1')
student2 = self._create_relateduser('student2')
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
status.students.create(relatedstudent=student1, qualifies=True)
status.students.create(relatedstudent=student2, qualifies=True)
self.assertEqual(
[s.relatedstudent for s in StatusPrintView.get_studentstatuses_by_sorter(status, 'username')],
[student1, student2])
def test_sortby_name(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-3):ends(6)'])
student1 = self._create_relateduser('student1', 'Student Z')
student2 = self._create_relateduser('student2', 'Student B')
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
status.students.create(relatedstudent=student1, qualifies=True)
status.students.create(relatedstudent=student2, qualifies=True)
self.assertEqual(
[s.relatedstudent for s in StatusPrintView.get_studentstatuses_by_sorter(status, 'name')],
[student2, student1])
def test_sortby_lastname(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-3):ends(6)'])
homer = self._create_relateduser('student1', 'Homer Simpson')
superman = self._create_relateduser('student2', 'Super Man')
peterparker = self._create_relateduser('student3', 'Peter Parker')
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
status.students.create(relatedstudent=homer, qualifies=True)
status.students.create(relatedstudent=superman, qualifies=True)
status.students.create(relatedstudent=peterparker, qualifies=True)
self.assertEqual(
[s.relatedstudent for s in StatusPrintView.get_studentstatuses_by_sorter(status, 'lastname')],
[superman, peterparker, homer])
def _extract_by_spanclass(self, html, cssclass):
return re.findall('<span class="{0}">(.+?)</span>'.format(cssclass), html)
def test_sortby_view(self):
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-3):ends(6)'])
student1 = self._create_relateduser('student1', 'Homer Simpson')
student2 = self._create_relateduser('student2', 'Peter Parker')
status = Status.objects.create(
user=self.testhelper.superuser,
period=self.testhelper.sub_p1,
status=Status.READY)
status.students.create(relatedstudent=student1, qualifies=True)
status.students.create(relatedstudent=student2, qualifies=True)
response = self._getas('superuser', status.pk, {'sortby': 'lastname'})
self.assertEqual(response.status_code, 200)
usernames = self._extract_by_spanclass(response.content, 'fullname')
self.assertEqual(usernames, ['Peter Parker', 'Homer Simpson'])
|
989,095 | 2578a29cb4e521bf8461942f33d45fe4a483f6ff | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import DataLoader,Dataset
import torchvision
from torchvision import transforms, datasets
import os
import argparse
import shutil
from PIL import Image
import math
import time
import numpy as np
from model.SkrNet import *
from data.dataset import *
from utils.image import *
from utils.parse import *
from utils.utils import *
parser = argparse.ArgumentParser(description='SkrNet Object Detection training')
parser.add_argument('--model', type=str, default='SkrNet', metavar='model',
help='model to train (SkrNet,VGG16,ResNet18)')
parser.add_argument('--batch', type=int, default=32, metavar='N',
help='batch size for each GPU during training (default: 32)')
parser.add_argument('--lr', type=float, default=0.01, metavar='N',
help='learning rate (default: 0.001)')
parser.add_argument('--workers', default=32, type=int, metavar='N',
help='number of data loading threads (default: 32)')
parser.add_argument('--device', type=str, default='0', metavar='N',
help='device id')
parser.add_argument('--dataset', type=str, default='data/dji.data',
help='dataset (default: data/dji.data')
parser.add_argument('--end', type=int, default=160, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrain', default='', type=str, metavar='PATH',
help='path to pretrain checkpoint (default: none)')
parser.add_argument('--optimizer', default='Adam', type=str, metavar='Optimizer',
help='optimizer: SGD, Adagrad, Adam, Adadelta, Adamax, ASGD, RMSprop')
parser.add_argument('--log', default='./logs/%s.log'%time.strftime('%Y-%m-%d_%H:%M:%S',time.localtime(time.time())), type=str, metavar='PATH',
help='path to log (default: none)')
args = parser.parse_args()
def log(log_file,str):
log = open(log_file,'a+')
log.writelines(str+'\n')
log.close()
def test(model, data_loader):
model.eval()
avg_iou = 0.0
for img, target in data_loader:
img, target = img.cuda(), target.cuda()
img, target = Variable(img), Variable(target)
output = model(img)
avg_iou += evaluate(output, target)
avg_iou /= float(len(data_loader))
return avg_iou
def train(model, data_loader, loss_func, optimizer):
model.train()
avg_loss, avg_recall50, avg_recall75, avg_iou = 0.0, 0.0, 0.0, 0.0
total_batch = len(data_loader)
ready_batch = 0
for img, target in data_loader:
img, target = img.cuda(), target.cuda()
img, target = Variable(img), Variable(target)
optimizer.zero_grad()
outputs = model(img)
loss, recall50, recall75, iou = loss_func(outputs, target)
avg_loss += loss.item()
avg_recall50 += recall50
avg_recall75 += recall75
avg_iou += iou
loss.backward()
optimizer.step()
ready_batch += 1
print("{}/{} ready/total".format(ready_batch, total_batch))
print(optimizer)
avg_loss /= float(len(data_loader))
avg_recall50 /= float(len(data_loader))
avg_recall75 /= float(len(data_loader))
avg_iou /= float(len(data_loader))
return avg_loss, avg_recall50, avg_recall75, avg_iou
data_config = parse_data_config(args.dataset)
train_path = data_config["train"]
valid_path = data_config["valid"]
if(args.pretrain):
model = SkrNet(detection = False)
model.load_state_dict(torch.load(args.pretrain))
model.detection = True
print('load pretrain model')
else:
model = SkrNet()
num_gpu = len(args.device.split(','))
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if(len(args.device)>1):
model.to("cuda:{}".format(args.device.split(',')[0]))
device_ids = [int(device) for device in args.device.split(',')]
model = nn.DataParallel(model,device_ids=device_ids).cuda()
region_loss = model.module.loss
# region_loss = nn.DataParallel(model.module.loss,device_ids=device_ids).cuda()
else:
model.to("cuda:{}".format(args.device))
model.cuda()
region_loss = model.loss
train_dataset = ListDataset(train_path)
valid_dataset = ListDataset(valid_path)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=num_gpu*args.batch, shuffle=True, num_workers=args.workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=int(num_gpu*args.batch/4), shuffle=True, num_workers=args.workers, pin_memory=True)
if(args.optimizer == 'SGD'):
optimizer = torch.optim.SGD(model.parameters(), lr = args.lr)
elif(args.optimizer == 'Adam'):
optimizer = torch.optim.Adam(model.parameters())
history_score = np.zeros((args.end + 1,4))
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.1, patience=5, verbose=True)
for epoch in range(args.start, args.end):
start = time.time()
print('epoch%d...'%epoch)
log(args.log,'epoch%d...'%epoch)
log(args.log,str(optimizer))
loss, recall50, recall75, avg_iou = train(model,train_loader,region_loss,optimizer)
scheduler.step(avg_iou)
print('training: avg loss: %f, avg recall50: %f, avg recall75:%f, avg iou:%f\n' % (loss,recall50,recall75,avg_iou))
log(args.log,'training: avg loss: %f, avg recall50: %f, avg recall75:%f, avg iou:%f\n' % (loss,recall50,recall75,avg_iou))
iou = test(model, valid_loader)
print('testing: avg iou: %f\n' % iou)
log(args.log,'testing: avg iou: %f\n' % iou)
if iou > max(history_score[:,3]):
torch.save(model.module.state_dict(), './checkpoint/detection/%s_%.4f.pkl'%(args.model,iou))
history_score[epoch][0] = loss
history_score[epoch][2] = recall75
history_score[epoch][3] = iou
print('epoch%d time %.4fs\n' % (epoch,time.time()-start)) |
989,096 | 3badbc835fbad26a001092d745d6c89a520f5b22 | import turtle
t = turtle.Turtle()
sc = turtle.Screen()
sc.bgcolor("gray")
t.pencolor("red")
a = 0
b = 0
t.speed(0)
t.penup()
t.goto(0,200)
t.pendown()
while(True):
t.forward(a)
t.right(b)
a+=3
b+=1
if b == 210:
break
t.hideturtle()
turtle.done() |
989,097 | c6d611644cdd8a529f15fc4e4ba19c4debf887f7 | #
# @lc app=leetcode.cn id=832 lang=python3
#
# [832] 翻转图像
#
# @lc code=start
class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
l_r = len(A)
l_c = len(A[0])
for r in range(l_r):
for c in range((l_c+1)//2):
A[r][c], A[r][l_c-1-c] = A[r][l_c-1-c]^1, A[r][c]^1
return A
# @lc code=end
|
989,098 | 36b6c2c5a6da44b688a5ba92e751abbdf26a158a | import numpy as np
from scipy.spatial.transform import Rotation as R
class ObsGen:
def reset (self):
pass
class TeacherObsGenerator (ObsGen):
def __init__ (self, state):
self.state = state
self.sub_gen_class = {"real_obs": RealisticObsGenerator, "sim_obs": SimObsGenerator, "vf_obs": VfGenerator}
self.sub_gen = {key:Gen(self.state) for key, Gen in self.sub_gen_class.items()}
self.obs_dim = {key:gen.obs_dim for key, gen in self.sub_gen.items()}
def reset (self):
for key, gen in self.sub_gen.items():
gen.reset()
def generate (self):
return {key:gen.generate() for key, gen in self.sub_gen.items()}
def get_sym_obs_matrix (self):
return {key:gen.get_sym_obs_matrix() for key, gen in self.sub_gen.items()}
class RealisticObsGenerator (ObsGen):
def __init__ (self, state):
self.state = state
self.sub_gen_class = [ JointTarget,
# JointDelta,
# JointPos,
JointFlexibleDelta,
JointFlexiblePos,
# JointSpeed, # <- to remove
Phase,
RandLocalUp,
# LocalUp,
#RotVel, # <- to remove
Cmd_PosVel,
Cmd_RotVel,
#LastAction,
#Height, # <- to remove
#LocPosVel, # <- to remove
]
self.sub_gen = [Gen(self.state) for Gen in self.sub_gen_class]
self.obs_dim = sum([gen.obs_dim for gen in self.sub_gen])
def reset (self):
for gen in self.sub_gen:
gen.reset()
def generate (self):
return np.concatenate([gen.generate() for gen in self.sub_gen])
def get_sym_obs_matrix (self):
to_return = np.zeros((self.obs_dim, self.obs_dim))
a = 0
for gen in self.sub_gen:
b = a + gen.obs_dim
to_return[a:b,a:b] = gen.get_sym_obs_matrix()
a = b
return to_return.astype(np.float32)
class SimObsGenerator (ObsGen):
def __init__ (self, state):
self.state = state
self.sub_gen_class = [
RotVel,
Height,
LocPosVel,
FootFric,
FootClearance,
# FootNormal,
MotorConsts,
GravityOffset,
# # duplicate with realistic :
# JointTarget,
# JointDelta,
# JointPos,
JointSpeed,
JointHiddenDelta,
# Phase,
# # RandLocalUp,
# LocalUp,
# #RotVel, # <- to remove
# Cmd_PosVel,
# Cmd_RotVel,
]
self.sub_gen = [Gen(self.state) for Gen in self.sub_gen_class]
self.obs_dim = sum([gen.obs_dim for gen in self.sub_gen])
def reset (self):
for gen in self.sub_gen:
gen.reset()
def generate (self):
return np.concatenate([gen.generate() for gen in self.sub_gen])
def get_sym_obs_matrix (self):
to_return = np.zeros((self.obs_dim, self.obs_dim))
a = 0
for gen in self.sub_gen:
b = a + gen.obs_dim
to_return[a:b,a:b] = gen.get_sym_obs_matrix()
a = b
return to_return.astype(np.float32)
class VfGenerator (ObsGen):
def __init__ (self, state):
self.state = state
self.sub_gen_class = [
JointSpeed,
JointHiddenDelta,
RotVel,
Height,
LocPosVel,
FootFric,
FootClearance,
FootNormal,
MotorConsts,
GravityOffset,
]
self.sub_gen = [Gen(self.state) for Gen in self.sub_gen_class]
self.obs_dim = sum([gen.obs_dim for gen in self.sub_gen])
def reset (self):
for gen in self.sub_gen:
gen.reset()
def generate (self):
return np.concatenate([gen.generate() for gen in self.sub_gen])
def get_sym_obs_matrix (self):
to_return = np.zeros((self.obs_dim, self.obs_dim))
a = 0
for gen in self.sub_gen:
b = a + gen.obs_dim
to_return[a:b,a:b] = gen.get_sym_obs_matrix()
a = b
return to_return.astype(np.float32)
class MotorGenerator (ObsGen):
def __init__ (self, state):
self.state = state
self.sub_gen_class = [
JointDelta,
JointSpeed,
Phase,
LocalUp,
FootClearance,
FootNormal,
]
self.sub_gen = [Gen(self.state) for Gen in self.sub_gen_class]
self.obs_dim = sum([gen.obs_dim for gen in self.sub_gen])
def reset (self):
for gen in self.sub_gen:
gen.reset()
def generate (self):
return np.concatenate([gen.generate() for gen in self.sub_gen])
def get_sym_obs_matrix (self):
to_return = np.zeros((self.obs_dim, self.obs_dim))
a = 0
for gen in self.sub_gen:
b = a + gen.obs_dim
to_return[a:b,a:b] = gen.get_sym_obs_matrix()
a = b
return to_return.astype(np.float32)
# -------------------------------------------- Joint related --------------------------------------------
switch_legs = np.asarray([ [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], ])
class JointTarget (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
self.mean = np.asarray([0., 0.628, -1.257] * 4)
def generate (self):
return np.asarray(self.state.joint_target) - self.mean
def get_sym_obs_matrix (self):
return switch_legs
class JointDelta (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.joint_target) - np.asarray(self.state.joint_rot)
def get_sym_obs_matrix (self):
return switch_legs
class ZeroJointDelta (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return (np.asarray(self.state.joint_target) - np.asarray(self.state.joint_rot))*0
def get_sym_obs_matrix (self):
return switch_legs
class JointPos (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
self.mean = np.asarray([0., 0.628, -1.257] * 4)
def generate (self):
return np.asarray(self.state.joint_rot) - self.mean
def get_sym_obs_matrix (self):
return switch_legs
class JointFlexiblePos (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
self.mean = np.asarray([0., 0.628, -1.257] * 4)
self.kp = np.asarray([200, 200, 200] * 4)
def generate (self):
return np.asarray(self.state.joint_rot) - self.mean
# return np.asarray(self.state.joint_rot) + np.asarray(self.state.joint_torque)/self.kp - self.mean
def get_sym_obs_matrix (self):
return switch_legs
class JointFlexibleDelta (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
self.kp = np.asarray([200, 200, 200] * 4)
def generate (self):
return np.asarray(self.state.joint_target) - (np.asarray(self.state.joint_rot))# + np.asarray(self.state.joint_torque)/self.kp)
def get_sym_obs_matrix (self):
return switch_legs
class JointSpeed (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.joint_rot_speed)/30
def get_sym_obs_matrix (self):
return switch_legs
class JointHiddenDelta (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.joint_target) - np.asarray(self.state.joint_rot)
def get_sym_obs_matrix (self):
return switch_legs
class LastAction (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.last_action)
def get_sym_obs_matrix (self):
return switch_legs
class Phase (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 2
def generate (self):
return np.asarray([np.sin(self.state.phase), np.cos(self.state.phase)])
def get_sym_obs_matrix (self):
return np.diag([-1, -1])
class FootPos (ObsGen):
def __init__(self, state):
self.state = state
self.obs_dim = 12
def generate(self):
return np.asarray(self.state.loc_foot_pos) * 10
class FootMeanPos (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.mean_loc_foot_pos) * 10
# -------------------------------------------- IMU related --------------------------------------------
class LocalUp (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return np.asarray(self.state.loc_up_vect)
def get_sym_obs_matrix(self):
return np.diag([1, -1, 1])
class RandLocalUp (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def reset (self):
s = 1
self.random_r = R.from_euler('zyx', np.random.uniform(-s,s, size=(3,)), degrees=True)
# self.random_r = R.from_euler('zyx', [0, 10, 0], degrees=True)
def generate (self):
# print(self.random_r.apply(np.asarray(self.state.loc_up_vect)))
s = 1
self.noise_r = R.from_euler('zyx', np.random.normal(scale=s, size=(3,)), degrees=True)
return self.noise_r.apply(self.random_r.apply(np.asarray(self.state.loc_up_vect)))
# return np.asarray([0, 0, 1])
def get_sym_obs_matrix(self):
return np.diag([1, -1, 1])
class RotVel (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return np.maximum(np.minimum(np.asarray(self.state.loc_rot_speed)*0.1, 1), -1)
def get_sym_obs_matrix(self):
return np.diag([-1, 1, -1])
# -------------------------------------------- CMD related --------------------------------------------
class Cmd_PosVel (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return np.asarray(self.state.target_speed)
# return np.asarray([1, 0])
def get_sym_obs_matrix(self):
return np.diag([1, -1, 1])
class Cmd_RotVel (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return np.asarray(self.state.target_rot_speed)
# return np.asarray([0])
def get_sym_obs_matrix(self):
return np.diag([-1, 1, -1])
# -------------------------------------------- True Cheating --------------------------------------------
class Height (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 1
def generate (self):
return np.asarray([self.state.base_pos[2]])
def get_sym_obs_matrix(self):
return np.diag([1])
class LocPosVel (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return np.asarray(self.state.loc_pos_speed)
def get_sym_obs_matrix(self):
return np.diag([1, -1, 1])
class FootScans (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 36
def generate (self):
return np.asarray(self.state.foot_scans) * 10
class FootFric (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 4
def generate (self):
return np.asarray(self.state.foot_f)*10
def get_sym_obs_matrix(self):
return np.asarray([
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
])
class FootClearance (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 4
def generate (self):
return np.asarray(self.state.foot_clearance) * 10
def get_sym_obs_matrix(self):
return np.asarray([
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]
])
class FootNormal (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 12
def generate (self):
return np.asarray(self.state.loc_foot_normal.flatten())
def get_sym_obs_matrix(self):
return switch_legs
# -------------------------------------------- Misc --------------------------------------------
class MotorConsts (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 2
def generate (self):
return np.asarray([(self.state.kp0-60)/10, (self.state.kd0_fac-0.12)/0.2])
def get_sym_obs_matrix(self):
return np.diag([1, 1])
class GravityOffset (ObsGen):
def __init__ (self, state):
self.state = state
self.obs_dim = 3
def generate (self):
return (self.state.loc_gravity - np.asarray([0, 0, -9.81]))
def get_sym_obs_matrix(self):
return np.diag([1, -1, 1])
|
989,099 | 553b6ad342d1f566bc3bd1dd59ee0dd4df103eac | #用Python进行SQLite数据库操作
import sqlite3
#创建数据库
con=sqlite3.connect('templates/flaskr.db')
#游标对象是用来执行select查询数据库的,db连接对象才是用来做增删改操作的
cur=con.cursor()
#创建表格
try:
cur.execute('create table region(id interger primary key,name varchar(10))')
except sqlite3.OperationalError as e:
print('表格region已存在')
#插入一条数据
try:
cur.execute('insert into region(id,name) values("7","杭州")')
except sqlite3.IntegrityError as e:
print('单条id已存在')
#插入多条记录
try:
regions=[("5","上海"),["6","北京"]]
for region in regions:
cur.execute("insert into region(id,name) values(?,?)",region)
except sqlite3.IntegrityError as e:
print('多条id已存在')
#查询数据
cur.execute("select * from region")
print(cur.execute("select * from region"))
# print(cur.fetchall())
fetchall=cur.fetchall()
print(fetchall)
for item in fetchall:
for element in item:
print(element)
#提交数据库
con.commit()
con.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.