content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#importing necessary libraries import numpy as np import pandas as pd import string import streamlit as st header = st.container() dataset = st.container() fearure = st.container() model_training = st.container() with header: st.title("Emotion detection using Text") with dataset: st.header("Emotion Detection Datasets") df = get_data("1-P-3-ISEAR.csv") df.columns = ['sn','Target','Sentence'] df.drop('sn',inplace=True,axis =1) df.head() df.duplicated().sum() df.drop_duplicates(inplace = True) st.subheader("Lets check if the dataset is fairly distrributed.") col1 , col2 = st.columns(2) target_count = df['Target'].value_counts() col1.table(target_count) col2.text("Line Chart of the total output counts") col2.line_chart(target_count ) st.markdown("From the above data, we can easily say the data iss fairly distributed.") with fearure: st.header("Learning about Feature and converting them") # df['Sentence'] = df['Sentence'].apply(lowercase) df['Sentence'] = df['Sentence'].apply(lowercase).apply(remove_punc) #Removing the stop words import nltk nltk.download('omw-1.4') nltk.download('stopwords') from nltk.corpus import stopwords df['Sentence'] = df['Sentence'].apply(remove_stopwords) #Lemmatization i.e changing words into it's root form from nltk.stem import WordNetLemmatizer nltk.download('wordnet') from nltk.corpus import wordnet lemmatizer = WordNetLemmatizer() df['Sentence'] = df['Sentence'].apply(lemmatize) st.markdown('As the part of data pre-processing, we have done the following things:') st.text(" - Converting the sentence to lowercase ") st.text(" -Removing the Punction ") st.text(" -Removing the stop words ") st.text(" -Lemmatization i.e changing words into it is root form ,") st.markdown("After all these our data looks like-") st.dataframe(df.head()) st.markdown("After doing Train Test split we will apply TGIF, It is technique to transform text into a meaningful vector of numbers. TFIDF penalizes words that come up too often and dont really have much use. So it rescales the frequency of words that are common which makes scoring more balanced") with model_training: from sklearn.model_selection import train_test_split X = df['Sentence'] y = df['Target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,random_state=10) from sklearn.feature_extraction.text import TfidfVectorizer tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2)) train_tfidf = tfidf.fit_transform(X_train) test_tfidf = tfidf.transform(X_test) from sklearn.linear_model import LogisticRegression logistic = LogisticRegression(max_iter=1000) logistic.fit(train_tfidf,y_train) from sklearn.naive_bayes import MultinomialNB nb = MultinomialNB() nb.fit(train_tfidf,y_train) st.header('Checking The Accuracy using diffrent model.') import joblib joblib.dump(logistic, './mymodel/logistic_model.joblib') joblib.dump(nb, './mymodel/naive_bayes_model.joblib') joblib.dump(tfidf, './mymodel/tfidf_model.joblib') sel_col , disp_col = st.columns(2) with sel_col: sel_col.subheader("Logistic Regression") sel_col.markdown("Logistic Regression Train Error") sel_col.write(logistic.score(train_tfidf, y_train)) sel_col.markdown("Logistic Regression Test Error") sel_col.write( logistic.score(test_tfidf, y_test)) with disp_col: disp_col.subheader("Naive Bias") disp_col.markdown("Naive Bias Train Error") disp_col.write(nb.score(train_tfidf, y_train)) disp_col.markdown("Naive Bias Test Error") disp_col.write(nb.score(test_tfidf, y_test))
[ 2, 11748, 278, 3306, 12782, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 11748, 4731, 198, 198, 11748, 4269, 18250, 355, 336, 198, 198, 25677, 796, 336, 13, 34924, 3419, 198, 19608, 292, 316...
2.581162
1,497
#!/usr/bin/env python # -*- coding:utf-8 -*- SECRET_KEY = 'some secret key' TEMPLATES_AUTO_RELOAD = True PROJECT_NAME = 'SpiderManage' # Redis Config REDIS_HOST = '120.25.227.8' REDIS_PORT = 6379 REDIS_PASSWORD = 'xuxinredis' # SQLite # SQLALCHEMY_DATABASE_URI = 'sqlite:///C:/Users/sheep3/workplace/SpiderManage/data.db' # SQLALCHEMY_TRACK_MODIFICATIONS = True # MYSQL SQLALCHEMY_DATABASE_URI = 'mysql://root:xuxin.mysql@120.25.227.8:3306/spider_db?charset=utf8' SQLALCHEMY_COMMIT_ON_TEARDOWN = True SQLALCHEMY_TRACK_MODIFICATIONS = True
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 198, 23683, 26087, 62, 20373, 796, 705, 11246, 3200, 1994, 6, 198, 51, 3620, 6489, 29462, 62, 39371, 46, 62, 16448, 41048,...
2.177419
248
from PyPrometheusQueryClient import PrometheusQueryClient import json from pathlib import Path from datetime import datetime
[ 6738, 9485, 24129, 36916, 20746, 11792, 1330, 42696, 20746, 11792, 198, 11748, 33918, 220, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 4818, 8079, 1330, 4818, 8079 ]
4.62963
27
import pytest import os import json import jsonschema from ruamel.yaml import YAML
[ 11748, 12972, 9288, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 44804, 684, 2395, 2611, 198, 6738, 7422, 17983, 13, 88, 43695, 1330, 575, 2390, 43, 628 ]
3.111111
27
import vkrpg
[ 11748, 410, 38584, 6024, 198, 220, 220, 220, 220, 220, 220, 220, 220 ]
1.615385
13
import secrets from flask import Flask , render_template , url_for , send_from_directory from flaskprediction import app from flaskprediction.utils.predict import Predictor from flaskprediction.forms import CarDetailsForm , TitanicDetailsForm , BostonDetailsForm , HeightDetailsForm, CatImageForm from PIL import Image import os def save_picture(form_picture): random_hex = secrets.token_hex(8) _, f_ext = os.path.splitext(form_picture.filename) picture_fn = random_hex + f_ext picture_path = os.path.join(app.root_path, 'static/pics', picture_fn) output_size = (64, 64) i = Image.open(form_picture) i.thumbnail(output_size) i.save(picture_path) return picture_path
[ 11748, 13141, 198, 198, 6738, 42903, 1330, 46947, 837, 8543, 62, 28243, 837, 19016, 62, 1640, 837, 3758, 62, 6738, 62, 34945, 220, 220, 198, 6738, 42903, 28764, 2867, 1330, 598, 198, 6738, 42903, 28764, 2867, 13, 26791, 13, 79, 17407, ...
2.974895
239
import requests import re from time import sleep from datetime import datetime import shutil import elasticsearch2 from elasticsearch_dsl import Search, Q from collections import OrderedDict from sqlalchemy import create_engine from subprocess import Popen, PIPE, STDOUT import shlex import glob import csv # from apiclient.discovery import build from utils import Filename, FileType, Date, conf, logger, sort
[ 198, 198, 11748, 7007, 198, 11748, 302, 198, 6738, 640, 1330, 3993, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 4423, 346, 198, 11748, 27468, 12947, 17, 198, 6738, 27468, 12947, 62, 67, 6649, 1330, 11140, 11, 1195, 198, 6738, ...
3.663717
113
import enum import re import lxml.html from typing import Any, Dict, List, Union, Optional from typing import TYPE_CHECKING if TYPE_CHECKING: from .atserver import AternosServer DAT_PREFIX = 'Data:' DAT_GR_PREFIX = 'Data:GameRules:' # checking timezone format tzcheck = re.compile(r'(^[A-Z]\w+\/[A-Z]\w+$)|^UTC$') # options types converting convert = { 'config-option-number': int, 'config-option-select': int, 'config-option-toggle': bool }
[ 11748, 33829, 201, 198, 11748, 302, 201, 198, 11748, 300, 19875, 13, 6494, 201, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 4479, 11, 32233, 201, 198, 6738, 19720, 1330, 41876, 62, 50084, 2751, 201, 198, 201, 198, 361, ...
2.494681
188
from __future__ import print_function import unittest import numpy as np from openmdao.api import Problem, Group, pyOptSparseDriver, DirectSolver, SqliteRecorder from dymos import Phase from dymos.utils.indexing import get_src_indices_by_row from dymos.phases.components import ControlInterpComp from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE from CADRE.attitude_dymos.angular_velocity_comp import AngularVelocityComp from CADRE.odes_dymos.cadre_systems_ode import CadreSystemsODE GM = 398600.44 rmag = 7000.0 period = 2 * np.pi * np.sqrt(rmag ** 3 / GM) vcirc = np.sqrt(GM / rmag) duration = period duration = 6 * 3600.0 p = Problem(model=Group()) p.driver = pyOptSparseDriver() p.driver.options['optimizer'] = 'SNOPT' p.driver.options['dynamic_simul_derivs'] = True p.driver.opt_settings['Major iterations limit'] = 1000 p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-4 p.driver.opt_settings['Major optimality tolerance'] = 1.0E-4 p.driver.opt_settings['Major step limit'] = 0.1 p.driver.opt_settings['iSumm'] = 6 p.driver.recording_options['includes'] = ['*'] p.driver.recording_options['record_objectives'] = True p.driver.recording_options['record_constraints'] = True p.driver.recording_options['record_desvars'] = True recorder = SqliteRecorder("cases.sql") p.driver.add_recorder(recorder) NUM_SEG = 30 TRANSCRIPTION_ORDER = 3 orbit_phase = Phase('radau-ps', ode_class=CadreOrbitODE, num_segments=NUM_SEG, transcription_order=TRANSCRIPTION_ORDER, compressed=False) p.model.add_subsystem('orbit_phase', orbit_phase) orbit_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration) orbit_phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km') orbit_phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s') # orbit_phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None) # orbit_phase.add_design_parameter('P_bat', opt=False, units='W') orbit_phase.add_control('Gamma', opt=True, lower=-90, upper=90, units='deg', ref0=-90, ref=90, continuity=True, rate_continuity=True) # Add a control interp comp to interpolate the rates of O_BI from the orbit phase. faux_control_options = {'O_BI': {'units': None, 'shape': (3, 3)}} p.model.add_subsystem('obi_rate_interp_comp', ControlInterpComp(control_options=faux_control_options, time_units='s', grid_data=orbit_phase.grid_data), promotes_outputs=[('control_rates:O_BI_rate', 'Odot_BI')]) control_input_nodes_idxs = orbit_phase.grid_data.subset_node_indices['control_input'] src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3)) p.model.connect('orbit_phase.rhs_all.O_BI', 'obi_rate_interp_comp.controls:O_BI', src_indices=src_idxs, flat_src_indices=True) p.model.connect('orbit_phase.time.dt_dstau', ('obi_rate_interp_comp.dt_dstau', 'w_B_rate_interp_comp.dt_dstau')) # Use O_BI and Odot_BI to compute the angular velocity vector p.model.add_subsystem('angular_velocity_comp', AngularVelocityComp(num_nodes=orbit_phase.grid_data.num_nodes)) p.model.connect('orbit_phase.rhs_all.O_BI', 'angular_velocity_comp.O_BI') p.model.connect('Odot_BI', 'angular_velocity_comp.Odot_BI') # Add another interpolation comp to compute the rate of w_B faux_control_options = {'w_B': {'units': '1/s', 'shape': (3,)}} p.model.add_subsystem('w_B_rate_interp_comp', ControlInterpComp(control_options=faux_control_options, time_units='s', grid_data=orbit_phase.grid_data), promotes_outputs=[('control_rates:w_B_rate', 'wdot_B')]) src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,)) p.model.connect('angular_velocity_comp.w_B', 'w_B_rate_interp_comp.controls:w_B', src_indices=src_idxs, flat_src_indices=True) # Now add the systems phase systems_phase = Phase('radau-ps', ode_class=CadreSystemsODE, num_segments=NUM_SEG, transcription_order=TRANSCRIPTION_ORDER, compressed=False) p.model.add_subsystem('systems_phase', systems_phase) systems_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration) systems_phase.set_state_options('SOC', defect_ref=10, lower=0.2, fix_initial=True, units=None) systems_phase.set_state_options('w_RW', defect_ref=10000, fix_initial=True, units='1/s') systems_phase.set_state_options('data', defect_ref=10, fix_initial=True, units='Gibyte') systems_phase.set_state_options('temperature', ref0=273, ref=373, defect_ref=1000, fix_initial=True, units='degK') systems_phase.add_design_parameter('LD', opt=False, units='d') systems_phase.add_design_parameter('fin_angle', opt=True, lower=0., upper=np.pi / 2.) systems_phase.add_design_parameter('antAngle', opt=True, lower=-np.pi / 4, upper=np.pi / 4) systems_phase.add_design_parameter('cellInstd', opt=True, lower=0.0, upper=1.0, ref=1.0) # Add r_e2b_I and O_BI as non-optimized controls, allowing them to be connected to external sources systems_phase.add_control('r_e2b_I', opt=False, units='km') systems_phase.add_control('O_BI', opt=False) systems_phase.add_control('w_B', opt=False) systems_phase.add_control('wdot_B', opt=False) systems_phase.add_control('P_comm', opt=True, lower=0.0, upper=30.0, units='W') systems_phase.add_control('Isetpt', opt=True, lower=1.0E-4, upper=0.4, units='A') systems_phase.add_objective('data', loc='final', ref=-1.0) # Connect r_e2b_I and O_BI values from all nodes in the orbit phase to the input values # in the attitude phase. src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,)) p.model.connect('orbit_phase.states:r_e2b_I', 'systems_phase.controls:r_e2b_I', src_indices=src_idxs, flat_src_indices=True) p.model.connect('angular_velocity_comp.w_B', 'systems_phase.controls:w_B', src_indices=src_idxs, flat_src_indices=True) p.model.connect('wdot_B', 'systems_phase.controls:wdot_B', src_indices=src_idxs, flat_src_indices=True) src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3)) p.model.connect('orbit_phase.rhs_all.O_BI', 'systems_phase.controls:O_BI', src_indices=src_idxs, flat_src_indices=True) p.model.options['assembled_jac_type'] = 'csc' p.model.linear_solver = DirectSolver(assemble_jac=True) p.setup(check=True) # from openmdao.api import view_model # view_model(p.model) # Initialize values in the orbit phase p['orbit_phase.t_initial'] = 0.0 p['orbit_phase.t_duration'] = duration # p['systems_phase.states:w_RW'][:, 0] = 0.0 # p['systems_phase.states:w_RW'][:, 1] = 0.0 # p['systems_phase.states:w_RW'][:, 2] = 0.0 # Default starting orbit # [ 2.89078958e+03 5.69493134e+03 -2.55340189e+03 2.56640460e-01 # 3.00387409e+00 6.99018448e+00] p['orbit_phase.states:r_e2b_I'][:, 0] = 2.89078958e+03 p['orbit_phase.states:r_e2b_I'][:, 1] = 5.69493134e+03 p['orbit_phase.states:r_e2b_I'][:, 2] = -2.55340189e+03 p['orbit_phase.states:v_e2b_I'][:, 0] = 2.56640460e-01 p['orbit_phase.states:v_e2b_I'][:, 1] = 3.00387409e+00 p['orbit_phase.states:v_e2b_I'][:, 2] = 6.99018448e+00 # Initialize values in the systems phase p['systems_phase.t_initial'] = 0.0 p['systems_phase.t_duration'] = duration # p['systems_phase.states:w_RW'][:, 0] = 0.0 # p['systems_phase.states:w_RW'][:, 1] = 0.0 # p['systems_phase.states:w_RW'][:, 2] = 0.0 p['systems_phase.states:SOC'] = systems_phase.interpolate(ys=[1, .5], nodes='state_input') p['systems_phase.states:w_RW'] = 100.0 p['systems_phase.states:data'] = systems_phase.interpolate(ys=[0, 10], nodes='state_input') p['systems_phase.states:temperature'] = 273.0 # p['systems_phase.states:v_e2b_I'][:, 0] = 0.0 # p['systems_phase.states:v_e2b_I'][:, 1] = vcirc # p['systems_phase.states:v_e2b_I'][:, 2] = 0.0 p['systems_phase.controls:P_comm'] = 0.01 p['systems_phase.controls:Isetpt'] = 0.1 p['systems_phase.design_parameters:LD'] = 5233.5 p['systems_phase.design_parameters:fin_angle'] = np.radians(70.0) p['systems_phase.design_parameters:cellInstd'] = 0.0 p.run_model() # Simulate the orbit phase to get a (exact) guess to the orbit history solution. exp_out = orbit_phase.simulate() # import matplotlib.pyplot as plt # from mpl_toolkits import mplot3d # # plt.figure() # ax = plt.axes(projection='3d') # # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-') # ax.plot3D(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], exp_out.get_values('r_e2b_I')[:, 2], 'b-') # plt.show() p['orbit_phase.states:r_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('r_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input') p['orbit_phase.states:v_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('v_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input') p.run_driver() r_e2b_I = p.model.orbit_phase.get_values('r_e2b_I') v_e2b_I = p.model.orbit_phase.get_values('v_e2b_I') rmag_e2b = p.model.orbit_phase.get_values('rmag_e2b_I') # exp_out = systems_phase.simulate(times=500) import matplotlib.pyplot as plt plt.figure() plt.plot(orbit_phase.get_values('r_e2b_I')[:, 0], orbit_phase.get_values('r_e2b_I')[:, 1], 'ro') plt.figure() # plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('data'), 'ro') plt.figure() # plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_comm'), 'r-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_sol'), 'b-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_RW'), 'g-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_bat'), 'k-') plt.figure() plt.plot(systems_phase.get_values('time'), systems_phase.get_values('SOC'), 'r-') plt.plot(systems_phase.get_values('time'), systems_phase.get_values('dXdt:SOC'), 'r--') plt.show() # plt.figure() # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-') # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro') # assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9) # delta_trua = 2 * np.pi * (duration / period) # assert_rel_error(self, r_e2b_I[-1, :], # rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]), # tolerance=1.0E-9) # assert_rel_error(self, v_e2b_I[-1, :], # vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]), # tolerance=1.0E-9) # def test_partials(self): # np.set_printoptions(linewidth=10000, edgeitems=1024) # cpd = self.p.check_partials(compact_print=True, out_stream=None) # assert_check_partials(cpd, atol=1.0E-4, rtol=1.0) # # def test_simulate(self): # phase = self.p.model.orbit_phase # exp_out = phase.simulate(times=500) # # import matplotlib.pyplot as plt # # plt.figure() # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-') # plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro') # # # plt.figure() # # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-') # # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro') # # plt.show()
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 555, 715, 395, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 1280, 9132, 5488, 13, 15042, 1330, 20647, 11, 4912, 11, 12972, 27871, 50, 29572, 32103, 11, 4128, ...
2.19408
5,405
from deso.utils import getUserJWT import requests
[ 6738, 748, 78, 13, 26791, 1330, 651, 12982, 41, 39386, 198, 11748, 7007, 628 ]
3.642857
14
# This file is executed on every boot (including wake-boot from deepsleep) import esp esp.osdebug(None) import wifi wifi.connect(repl=False) import gc gc.collect()
[ 2, 770, 2393, 318, 10945, 319, 790, 6297, 357, 8201, 7765, 12, 18769, 422, 2769, 42832, 8, 198, 198, 11748, 15024, 198, 9774, 13, 418, 24442, 7, 14202, 8, 198, 198, 11748, 43121, 198, 86, 22238, 13, 8443, 7, 35666, 28, 25101, 8, 1...
3.072727
55
import pytest from app.models import AVScanResult
[ 11748, 12972, 9288, 198, 6738, 598, 13, 27530, 1330, 14661, 33351, 23004, 628, 628, 628, 198 ]
3.5
16
# -*- coding: utf-8 -*- """Activate virtualenv for current interpreter: Source: https://github.com/pypa/virtualenv Use exec(open(this_file).read(), {'__file__': this_file}). """ import os import site import sys try: abs_file = os.path.abspath(__file__) except NameError: raise AssertionError( "You must use exec(open(this_file).read(), {'__file__': this_file}))") # Prepend bin to PATH (this file is inside the bin directory) bin_dir = os.path.dirname(abs_file) os.environ["PATH"] = os.pathsep.join( [bin_dir] + os.environ.get("PATH", "").split(os.pathsep)) # Virtual env is right above bin directory base = os.path.dirname(bin_dir) os.environ["VIRTUAL_ENV"] = base # Concat site-packages library path IS_WIN = sys.platform == "win32" IS_PYPY = hasattr(sys, "pypy_version_info") IS_JYTHON = sys.platform.startswith("java") if IS_JYTHON or IS_WIN: site_packages = os.path.join(base, "Lib", "site-packages") elif IS_PYPY: site_packages = os.path.join(base, "site-packages") else: python_lib = "python{}.{}".format(*sys.version_info) site_packages = os.path.join(base, "lib", python_lib, "site-packages") # Add the virtual environment libraries to the host python import mechanism prev_length = len(sys.path) site.addsitedir(site_packages) sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] sys.real_prefix = sys.prefix sys.prefix = base # vim: set ts=4 sw=4 tw=80 et :
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 25526, 378, 7166, 24330, 329, 1459, 28846, 25, 198, 7416, 25, 3740, 1378, 12567, 13, 785, 14, 79, 4464, 64, 14, 32844, 24330, 198, 198, 11041, 2452, 7, 9654, 7,...
2.608059
546
from sqlalchemy import create_engine from sqlalchemy import ( Table, Column, Integer, String, Text, Boolean, ForeignKey, Enum, DateTime ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship, backref, sessionmaker Session = sessionmaker() Model = declarative_base() def _sync(connection): """ This will build the database for whatever connection you pass.""" Model.metadata.create_all(connection.bind) host_tags = Table("host_tags", Model.metadata, Column("host_id", Integer, ForeignKey("hosts.id"), primary_key=True), Column("tag_id", Integer, ForeignKey("tags.id"), primary_key=True) )
[ 6738, 44161, 282, 26599, 1330, 2251, 62, 18392, 198, 6738, 44161, 282, 26599, 1330, 357, 198, 220, 220, 220, 8655, 11, 29201, 11, 34142, 11, 10903, 11, 8255, 11, 41146, 11, 198, 220, 220, 220, 8708, 9218, 11, 2039, 388, 11, 7536, 75...
3.222222
207
from keychain import Keychain
[ 6738, 1994, 7983, 1330, 7383, 7983, 198 ]
4.285714
7
# PYTHON STANDARD LIBRARY IMPORTS --------------------------------------------- from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import deque from collections import OrderedDict from math import radians from math import pi from operator import itemgetter # DUNDER ---------------------------------------------------------------------- __all__ = [ "KnitNetwork" ] # THIRD PARTY MODULE IMPORTS -------------------------------------------------- import networkx as nx # LOCAL MODULE IMPORTS -------------------------------------------------------- from cockatoo._knitnetworkbase import KnitNetworkBase from cockatoo._knitmappingnetwork import KnitMappingNetwork from cockatoo._knitdinetwork import KnitDiNetwork from cockatoo.environment import RHINOINSIDE from cockatoo.exception import KnitNetworkError from cockatoo.exception import KnitNetworkGeometryError from cockatoo.exception import NoEndNodesError from cockatoo.exception import NoWeftEdgesError from cockatoo.exception import MappingNetworkError from cockatoo.utilities import pairwise # RHINO IMPORTS --------------------------------------------------------------- if RHINOINSIDE: import rhinoinside rhinoinside.load() from Rhino.Geometry import Brep as RhinoBrep from Rhino.Geometry import Curve as RhinoCurve from Rhino.Geometry import Line as RhinoLine from Rhino.Geometry import Interval as RhinoInterval from Rhino.Geometry import Mesh as RhinoMesh from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface from Rhino.Geometry import Point3d as RhinoPoint3d from Rhino.Geometry import Polyline as RhinoPolyline from Rhino.Geometry import Surface as RhinoSurface from Rhino.Geometry import Vector3d as RhinoVector3d else: from Rhino.Geometry import Brep as RhinoBrep from Rhino.Geometry import Curve as RhinoCurve from Rhino.Geometry import Line as RhinoLine from Rhino.Geometry import Interval as RhinoInterval from Rhino.Geometry import Mesh as RhinoMesh from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface from Rhino.Geometry import Point3d as RhinoPoint3d from Rhino.Geometry import Polyline as RhinoPolyline from Rhino.Geometry import Surface as RhinoSurface from Rhino.Geometry import Vector3d as RhinoVector3d # CLASS DECLARATION ----------------------------------------------------------- def ToString(self): """ Return a textual description of the network. Returns ------- description : str A textual description of the network. Notes ----- Used for overloading the Grasshopper display in data parameters. """ return repr(self) # INITIALIZATION OF POSITION CONTOUR EDGES -------------------------------- def initialize_position_contour_edges(self): """ Creates all initial position contour edges as neither 'warp' nor 'weft' by iterating over all nodes in the network and grouping them based on their 'position' attribute. Notes ----- This method is automatically called when creating a KnitNetwork using the create_from_contours method! Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # get all nodes by position posList = self.all_nodes_by_position(data=True) for i, pos in enumerate(posList): for j, node in enumerate(pos): k = j + 1 if k < len(pos): self.create_contour_edge(node, pos[k]) # INITIALIZATION OF 'WEFT' EDGES BETWEEN 'LEAF' NODES --------------------- def initialize_leaf_connections(self): """ Create all initial connections of the 'leaf' nodes by iterating over all position contours and creating 'weft' edges between the 'leaf' nodes of the position contours. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # get all leaves leafNodes = self.all_leaves_by_position(True) # loop through all the positions leaves for i, lpos in enumerate(leafNodes): j = i + 1 # loop through pairs of leaves if j < len(leafNodes): startLeaf = lpos[0] endLeaf = lpos[1] nextStart = leafNodes[j][0] nextEnd = leafNodes[j][1] # add edges to the network self.create_weft_edge(startLeaf, nextStart) self.create_weft_edge(endLeaf, nextEnd) # INITIALIZATION OF PRELIMINARY 'WEFT' EDGES ------------------------------ def attempt_weft_connection(self, node, candidate, source_nodes, max_connections=4, verbose=False): """ Method for attempting a 'weft' connection to a candidate node based on certain parameters. Parameters ---------- node : :obj:`tuple` 2-tuple representing the source node for the possible 'weft' edge. candidate ::obj:`tuple` -tuple representing the target node for the possible 'weft' edge. source_nodes : :obj:`list` List of nodes on the position contour of node. Used to check if the candidate node already has a connection. max_connections : int, optional The new 'weft' connection will only be made if the candidate nodes number of connected neighbors is below this. Defaults to ``4``. verbose : bool, optional If ``True``, this routine and all its subroutines will print messages about what is happening to the console. Defaults to ``False``. Returns ------- bool ``True`` if the connection has been made, ``False`` otherwise. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None # get connected neighbors connecting_neighbors = self[candidate[0]] # only do something if the maximum is not reached if len(connecting_neighbors) < max_connections: # determine if the node is already connected to a node from # the input source nodes isConnected = False for cn in connecting_neighbors: if cn in [v[0] for v in source_nodes]: isConnected = True # print info on verbose setting v_print("Candidate node {} is ".format(candidate[0]) + "already connected! " + "Skipping to next " + "node...") break # check the flag and act accordingly if not isConnected: # print info on verbose setting v_print("Connecting node {} to best ".format(node[0]) + "candidate {}.".format(candidate[0])) # if all conditions are met, make the 'weft' connection if node[1]["position"] < candidate[1]["position"]: self.create_weft_edge(node, candidate) else: self.create_weft_edge(candidate, node) return True else: return False else: return False def _create_initial_weft_connections(self, contour_set, force_continuous_start=False, force_continuous_end=False, max_connections=4, precise=False, verbose=False): """ Private method for creating initial 'weft' connections for the supplied set of contours, starting from the first contour in the set and propagating to the last contour in the set. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None if len(contour_set) < 2: v_print("Not enough contours in contour set!") return # print info on verbose output v_print("Creating initial 'weft' connections for contour set...") # loop over all nodes of positions (list of lists of tuples) for i, pos in enumerate(contour_set): # pos is a list of tuples (nodes) if i < len(contour_set): j = i + 1 if j == len(contour_set): break # get initial and target nodes without 'leaf' nodes initial_nodes = contour_set[i][1:-1] target_nodes = contour_set[j][1:-1] # options for continuous start and end if force_continuous_start: initial_nodes = initial_nodes[1:] target_nodes = target_nodes[1:] if force_continuous_end: initial_nodes = initial_nodes[:-1] target_nodes = target_nodes[:-1] # skip if one of the contours has no nodes if len(initial_nodes) == 0 or len(target_nodes) == 0: continue # define forbidden node index forbidden_node = -1 # loop through all nodes on the current position for k, node in enumerate(initial_nodes): # print info on verbose setting v_print("Processing node {} on position {}:".format( node[0], node[1]["position"])) # get the geometry for the current node thisPt = node[1]["geo"] # filtering according to forbidden nodes target_nodes = [tn for tn in target_nodes if tn[0] >= forbidden_node] if len(target_nodes) == 0: continue # get four closest nodes on adjacent contour if precise: allDists = [thisPt.DistanceTo(tv[1]["geo"]) for tv in target_nodes] else: allDists = [thisPt.DistanceToSquared(tv[1]["geo"]) for tv in target_nodes] # sort the target nodes by distance to current node allDists, sorted_target_nodes = zip( *sorted(zip(allDists, target_nodes), key=itemgetter(0))) # the four closest nodes are the possible connections possible_connections = sorted_target_nodes[:4] # print info on verbose setting v_print("Possible connections: {}".format( [pc[0] for pc in possible_connections])) # handle edge case where there is no possible # connection or just one if len(possible_connections) == 0: # skip if there are no possible connections continue elif len(possible_connections) == 1: # attempt to connect to only possible candidate fCand = possible_connections[0] res = self.attempt_weft_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node if res: forbidden_node = fCand[0] continue # get the contours current direction if k < len(initial_nodes)-1: contourDir = RhinoLine( thisPt, initial_nodes[k+1][1]["geo"]).Direction elif k == len(initial_nodes)-1: contourDir = RhinoLine( initial_nodes[k-1][1]["geo"], thisPt).Direction contourDir.Unitize() # get the directions of the possible connections candidatePoints = [pc[1]["geo"] for pc in possible_connections] candidateDirections = [RhinoLine( thisPt, cp).Direction for cp in candidatePoints] [cd.Unitize() for cd in candidateDirections] # get the angles between contour dir and possible conn dir normals = [RhinoVector3d.CrossProduct( contourDir, cd) for cd in candidateDirections] angles = [RhinoVector3d.VectorAngle( contourDir, cd, n) for cd, n in zip( candidateDirections, normals)] # compute deltas as a mesaure of perpendicularity deltas = [abs(a - (0.5 * pi)) for a in angles] # sort possible connections by distance, then by delta allDists, deltas, angles, most_perpendicular = zip( *sorted(zip( allDists, deltas, angles, possible_connections[:]), key=itemgetter(0, 1))) # get node neighbors nNeighbors = self[node[0]] # compute angle difference aDelta = angles[0] - angles[1] # CONNECTION FOR LEAST ANGLE CHANGE ----------------------- if len(nNeighbors) > 2 and aDelta < radians(6.0): # print info on verbose setting v_print("Using procedure for least angle " + "change connection...") # get previous connected edge and its direction prevEdges = self.node_weft_edges(node[0], data=True) if len(prevEdges) > 1: raise KnitNetworkError( "More than one previous 'weft' connection! " + "This was unexpeced...") prevDir = prevEdges[0][2]["geo"].Direction else: prevDir = prevEdges[0][2]["geo"].Direction prevDir.Unitize() # get directions for the best two candidates mpA = most_perpendicular[0] mpB = most_perpendicular[1] dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction dirA.Unitize() dirB.Unitize() # get normals for angle measurement normalA = RhinoVector3d.CrossProduct(prevDir, dirA) normalB = RhinoVector3d.CrossProduct(prevDir, dirB) # measure the angles angleA = RhinoVector3d.VectorAngle( prevDir, dirA, normalA) angleB = RhinoVector3d.VectorAngle( prevDir, dirB, normalB) # select final candidate for connection by angle if angleA < angleB: fCand = mpA else: fCand = mpB # attempt to connect to final candidate res = self.attempt_weft_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node for next pass if res: forbidden_node = fCand[0] # CONNECTION FOR MOST PERPENDICULAR -------------------- else: # print info on verbose setting v_print("Using procedure for most " + "perpendicular connection...") # define final candidate fCand = most_perpendicular[0] # attempt to connect to final candidate node res = self.attempt_weft_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node if connection has been made if res: forbidden_node = fCand[0] def _create_second_pass_weft_connections(self, contour_set, include_leaves=False, least_connected=False, precise=False, verbose=False): """ Private method for creating second pass 'weft' connections for the given set of contours. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ v_print = print if verbose else lambda *a, **k: None # get attributes only once position_attributes = nx.get_node_attributes(self, "position") num_attributes = nx.get_node_attributes(self, "num") if len(contour_set) < 2: v_print("Not enough contours in contour set!") return # print info on verbose output v_print("Creating second pass 'weft' connections for contour set...") # loop over all nodes of positions (list of lists of tuples) for i, pos in enumerate(contour_set): # get initial nodes initial_nodes = contour_set[i] # get target position candidates if (i > 0 and i < len(contour_set)-1 and i != 0 and i != len(contour_set)-1): target_positionA = contour_set[i-1][0][1]["position"] target_positionB = contour_set[i+1][0][1]["position"] elif i == 0: target_positionA = None target_positionB = contour_set[i+1][0][1]["position"] elif i == len(contour_set)-1: target_positionA = contour_set[i-1][0][1]["position"] target_positionB = None # loop through all nodes on current position for k, node in enumerate(initial_nodes): # print info on verbose setting v_print( "Processing node {} on position {}:".format( node[0], node[1]["position"])) # get connecting edges on target position conWeftEdges = self.node_weft_edges(node[0], data=True) conPos = [] if len(conWeftEdges) == 0 and verbose: # print info on verbose setting v_print("No previously connected weft edges...") for weftEdge in conWeftEdges: weftEdgeFrom = weftEdge[0] weftEdgeTo = weftEdge[1] if weftEdgeFrom != node[0]: posEdgeTarget = position_attributes[weftEdgeFrom] elif weftEdgeTo != node[0]: posEdgeTarget = position_attributes[weftEdgeTo] if posEdgeTarget not in conPos: conPos.append(posEdgeTarget) # select target position and continue in edge case scenarios target_positions = [] if target_positionA == None: if target_positionB in conPos: v_print("Node is connected. Skipping...") continue target_positions.append(target_positionB) elif target_positionB == None: if target_positionA in conPos: v_print("Node is connected. Skipping...") continue target_positions.append(target_positionA) elif ((target_positionA in conPos) and (target_positionB in conPos)): v_print("Node is connected. Skipping...") continue elif ((target_positionB in conPos) and (target_positionA not in conPos)): target_positions.append(target_positionA) elif ((target_positionA in conPos) and (target_positionB not in conPos)): target_positions.append(target_positionB) elif (target_positionA != None and target_positionB != None and len(conPos) == 0): target_positions = [target_positionA, target_positionB] # print info on verbose setting if verbose and len(target_positions) > 1: v_print("Two target positions: {}, {}".format( *target_positions)) elif verbose and len(target_positions) == 1: v_print("Target position: {}".format(target_positions[0])) # skip if there are no target positions if len(target_positions) == 0: v_print("No target position! Skipping...") continue # only proceed if there is a target position for target_position in target_positions: # get target nodes target_nodes = self.nodes_on_position( target_position, True) # get the point geo of this node thisPt = node[1]["geo"] # get a window of possible connections on the target # position by looking for the previos node on this contour # connected to target position, then propagating along # the target position to the next node that is connected # to this position. these two nodes will define the window # NOTE: the current node should never have a connection # to target position (theoretically!), otherwise it should # have fallen through the checks by now # print info on verbose setting v_print("Target position is {}. ".format(target_position) + "Computing window...") # get the previous node on this contour prevNode = initial_nodes[k-1] # assume that the previous node has a connection prevCon = self.node_weft_edges(prevNode[0], data=True) # get possible connections from previous connection possible_connections = [] for edge in prevCon: edgeFrom = edge[0] edgeTo = edge[1] if edgeFrom != prevNode[0]: prevNodeTargetPos = position_attributes[edgeFrom] prevNodeTargetIndex = num_attributes[edgeFrom] elif edgeTo != prevNode[0]: prevNodeTargetPos = position_attributes[edgeTo] prevNodeTargetIndex = num_attributes[edgeTo] if prevNodeTargetPos == target_position: possible_connections.append( target_nodes[prevNodeTargetIndex]) # the farthest connection of the previous node is the first # point for our window if len(possible_connections) > 1: possible_connections.sort(key=lambda x: x[1]["num"]) possible_connections.reverse() start_of_window = possible_connections[0] elif len(possible_connections) == 1: start_of_window = possible_connections[0] elif len(possible_connections) == 0: # print info on verbose setting v_print("No possible connection, skipping...") continue # get the next node on this pos that is # connected to target position if k < len(initial_nodes)-1: future_nodes = initial_nodes[k+1:] for futurenode in future_nodes: filteredWeftEdges = [] futureWeftEdges = self.node_weft_edges( futurenode[0], data=True) for futureweft in futureWeftEdges: fwn = (futureweft[1], self.node[futureweft[1]]) fwn_pos = fwn[1]["position"] fwn_num = fwn[1]["num"] if (fwn_pos == target_position and fwn_num == start_of_window[1]["num"]): # if the start of the window is found, # it is the only possible connection filteredWeftEdges = [futureweft] break if (fwn_pos == target_position and fwn_num > start_of_window[1]["num"]): filteredWeftEdges.append(futureweft) else: continue if (not filteredWeftEdges or len(filteredWeftEdges) == 0): end_of_window = None continue # sort the filtered weft edges based on the 'num' # attribute of their target node filteredWeftEdges.sort( key=lambda x: self.node[x[1]]["num"]) # get the end of the window from the first edge on # the target position end_of_window = ( filteredWeftEdges[0][1], self.node[filteredWeftEdges[0][1]]) break else: end_of_window = None # define the window if end_of_window == None: window = [start_of_window] elif end_of_window == start_of_window: window = [start_of_window] else: window = [(n, d) for n, d in self.nodes_iter(data=True) if n >= start_of_window[0] and n <= end_of_window[0]] if len(window) == 0: # print info on verbose setting v_print("Length of window is 0, skipping...") elif len(window) == 1: # print info on verbose setting v_print("Window has only one node.") v_print("Connecting to node {}".format(window[0][0]) + " on position {}...".format( window[0][1]["position"])) # connect weft edge if node[1]["position"] < window[0][1]["position"]: self.create_weft_edge(node, window[0]) else: self.create_weft_edge(window[0], node) else: # print info on verbose setting v_print("Processing window nodes: {}".format( [w[0] for w in window])) # sort nodes in window by distance if precise: allDists = [thisPt.DistanceTo(pc[1]["geo"]) for pc in window] else: allDists = [thisPt.DistanceToSquared(pc[1]["geo"]) for pc in window] allDists, window = zip(*sorted(zip(allDists, window), key=itemgetter(0))) if least_connected: wn_count = [len(self[n[0]]) for n in window] wn_count, allDists, window = zip( *sorted(zip(allDists, wn_count, window), key=itemgetter(0, 1))) # set final candidate node fCand = window[0] else: # get the contours current direction if k < len(initial_nodes)-1: contourDir = RhinoLine( thisPt, initial_nodes[k+1][1]["geo"]).Direction elif k == len(initial_nodes)-1: contourDir = RhinoLine( initial_nodes[k-1][1]["geo"], thisPt).Direction contourDir.Unitize() # get the directions of the possible connections candidatePoints = [pc[1]["geo"] for pc in window] candidateDirections = [ RhinoLine(thisPt, cp).Direction for cp in candidatePoints] [cd.Unitize() for cd in candidateDirections] # get the angles between contour dir and window dir normals = [RhinoVector3d.CrossProduct( contourDir, cd) for cd in candidateDirections] angles = [RhinoVector3d.VectorAngle( contourDir, cd, n) for cd, n in zip( candidateDirections, normals)] # compute deltas as a mesaure of perpendicularity deltas = [abs(a - (0.5 * pi)) for a in angles] # sort window by distance, then by delta allDists, deltas, most_perpendicular = zip(*sorted( zip(allDists, deltas, window), key=itemgetter(0, 1))) # set final candidate node for connection fCand = most_perpendicular[0] # print info on verbose setting v_print("Connecting to node " + "{} on position {}...".format( fCand[0], fCand[1]["position"])) # connect weft edge to best target if node[1]["position"] < fCand[1]["position"]: self.create_weft_edge(node, fCand) else: self.create_weft_edge(fCand, node) def initialize_weft_edges(self, start_index=None, propagate_from_center=False, force_continuous_start=False, force_continuous_end=False, angle_threshold=radians(6.0), max_connections=4, least_connected=False, precise=False, verbose=False): """ Attempts to create all the preliminary 'weft' connections for the network. Parameters ---------- start_index : int, optional This value defines at which index the list of contours is split. If no index is supplied, will split the list at the longest contour. Defaults to ``None``. propagate_from_center : bool, optional If ``True``, will propagate left and right set of contours from the center contour defined by start_index or the longest contour ( < | > ). Otherwise, the propagation of the contours left to the center will start at the left boundary ( > | > ). Defaults to ``False`` force_continuous_start : bool, optional If ``True``, forces the first row of stitches to be continuous. Defaults to ``False``. force_continuous_end : bool, optional If ``True``, forces the last row of stitches to be continuous. Defaults to ``False``. max_connections : int, optional The maximum connections a node is allowed to have to be considered for an additional 'weft' connection. Defaults to ``4``. least_connected : bool, optional If ``True``, uses the least connected node from the found candidates. Defaults to ``False`` precise : bool, optional If ``True``, the distance between nodes will be calculated using the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much faster Rhino.Geometry.Point3d.DistanceToSquared method is used. Defaults to ``False``. verbose : bool, optional If ``True``, this routine and all its subroutines will print messages about what is happening to the console. Great for debugging and analysis. Defaults to ``False``. Raises ------ KnitNetworkError If the supplied splitting index is too high. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # get all the positions / contours AllPositions = self.all_nodes_by_position(data=True) if start_index == None: # get index of longest contour start_index = self.longest_position_contour()[0] elif start_index >= len(AllPositions): raise KnitNetworkError("Supplied splitting index is too high!") # if continuous start is True, connect the whole first row if force_continuous_start: chain = [pos[1] for pos in AllPositions] for pair in pairwise(chain): self.create_weft_edge(pair[0], pair[1]) # if continuous end is True, connect the whole last row if force_continuous_end: chain = [pos[-2] for pos in AllPositions] for pair in pairwise(chain): self.create_weft_edge(pair[0], pair[1]) # split position list into two sets based on start index leftContours = AllPositions[0:start_index+1] # optional propagation from center # NOTE: this has shown problems / weird stitch geometries if propagate_from_center: leftContours.reverse() rightContours = AllPositions[start_index:] # create the initial weft connections self._create_initial_weft_connections( leftContours, force_continuous_start=force_continuous_start, force_continuous_end=force_continuous_end, max_connections=max_connections, precise=precise, verbose=verbose) self._create_initial_weft_connections( rightContours, force_continuous_start=force_continuous_start, force_continuous_end=force_continuous_end, max_connections=max_connections, precise=precise, verbose=verbose) # create second pass weft connections self._create_second_pass_weft_connections( leftContours, least_connected, precise=precise, verbose=verbose) self._create_second_pass_weft_connections( rightContours, least_connected, precise=precise, verbose=verbose) return True # INITIALIZATION OF PRELIMINARY 'WARP' EDGES ------------------------------ def initialize_warp_edges(self, contour_set=None, verbose=False): """ Method for initializing first 'warp' connections once all preliminary 'weft' connections are made. Parameters ---------- contour_set : :obj:`list`, optional List of lists of nodes to initialize 'warp' edges. If none are supplied, all nodes ordered by thei 'position' attributes are used. Defaults to ``None``. verbose : bool, optional If ``True``, will print verbose output to the console. Defaults to ``False``. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # if no contour set is provided, use all contours of this network if contour_set == None: contour_set = self.all_nodes_by_position(data=True) # loop through all positions in the set of contours for i, pos in enumerate(contour_set): # get all nodes on current contour initial_nodes = contour_set[i] # loop through all nodes on this contour for k, node in enumerate(initial_nodes): connected_edges = self.edges(node[0], data=True) numweft = len(self.node_weft_edges(node[0])) if (len(connected_edges) > 4 or numweft > 2 or i == 0 or i == len(contour_set)-1): # set 'end' attribute for this node self.node[node[0]]["end"] = True # loop through all candidate edges for j, edge in enumerate(connected_edges): # if it's not a 'weft' edge, assign attributes if not edge[2]["weft"]: connected_node = edge[1] # set 'end' attribute to conneted node self.node[connected_node]["end"] = True # set 'warp' attribute to current edge self[edge[0]][edge[1]]["warp"] = True # ASSIGNING OF 'SEGMENT' ATTRIBUTES FOR MAPPING NETWORK ------------------- def _traverse_weft_edge_until_end(self, start_end_node, start_node, seen_segments, way_nodes=None, way_edges=None, end_nodes=None): """ Private method for traversing a path of 'weft' edges until another 'end' node is discoverd. """ # initialize output lists if way_nodes == None: way_nodes = deque() way_nodes.append(start_node[0]) if way_edges == None: way_edges = deque() if end_nodes == None: end_nodes = deque() # get the connected edges and filter them, sort out the ones that # already have a 'segment' attribute assigned connected_weft_edges = self.node_weft_edges(start_node[0], data=True) filtered_weft_edges = [] for cwe in connected_weft_edges: if cwe[2]["segment"] != None: continue if cwe in way_edges: continue elif (cwe[1], cwe[0], cwe[2]) in way_edges: continue filtered_weft_edges.append(cwe) if len(filtered_weft_edges) > 1: print(filtered_weft_edges) print("More than one filtered candidate weft edge! " + "Segment complete...?") elif len(filtered_weft_edges) == 1: fwec = filtered_weft_edges[0] connected_node = (fwec[1], self.node[fwec[1]]) # if the connected node is an end node, the segment is finished if connected_node[1]["end"]: # find out which order to set segment attributes if start_end_node > connected_node[0]: segStart = connected_node[0] segEnd = start_end_node else: segStart = start_end_node segEnd = connected_node[0] if (segStart, segEnd) in seen_segments: segIndex = len([s for s in seen_segments if s == (segStart, segEnd)]) else: segIndex = 0 # append the relevant data to the lists end_nodes.append(connected_node[0]) way_edges.append(fwec) seen_segments.append((segStart, segEnd)) # set final 'segment' attributes to all the way nodes for waynode in way_nodes: self.node[waynode]["segment"] = (segStart, segEnd, segIndex) # set final 'segment' attributes to all the way edges for wayedge in way_edges: self[wayedge[0]][wayedge[1]]["segment"] = (segStart, segEnd, segIndex) # return the seen segments return seen_segments else: # set the initial segment attribute to the node self.node[connected_node[0]]["segment"] = (start_end_node, None, None) # set the initial segment attribute to the edge self[fwec[0]][fwec[1]]["segment"] = (start_end_node, None, None) # append the relevant data to the lists way_nodes.append(connected_node[0]) way_edges.append(fwec) # call this method recursively until a 'end' node is found return self._traverse_weft_edge_until_end( start_end_node, connected_node, seen_segments, way_nodes, way_edges, end_nodes) else: return seen_segments def traverse_weft_edges_and_set_attributes(self, start_end_node): """ Traverse a path of 'weft' edges starting from an 'end' node until another 'end' node is discovered. Set 'segment' attributes to nodes and edges along the way. start_end_node : :obj:`tuple` 2-tuple representing the node to start the traversal. """ # get connected weft edges and sort them by their connected node weft_connections = self.node_weft_edges(start_end_node[0], data=True) weft_connections.sort(key=lambda x: x[1]) # loop through all connected weft edges seen_segments = [] for cwe in weft_connections: # check if connected weft edge already has a segment attribute if cwe[2]["segment"]: continue # get connected node connected_node = (cwe[1], self.node[cwe[1]]) # check the connected node. if it is an end node, we are done if connected_node[1]["end"]: # get segment start and end if start_end_node[0] > connected_node[0]: segStart = connected_node[0] segEnd = start_end_node[0] else: segStart = start_end_node[0] segEnd = connected_node[0] # get segment index if (segStart, segEnd) in seen_segments: segIndex = len([s for s in seen_segments if s == (segStart, segEnd)]) else: segIndex = 0 # set the final segment attribute to the edge self[cwe[0]][cwe[1]]["segment"] = (segStart, segEnd, segIndex) seen_segments.append((segStart, segEnd)) # if the connected node is not an end node, we need to travel # until we find one else: seen_segments = self._traverse_weft_edge_until_end( start_end_node[0], connected_node, seen_segments, way_edges=[cwe]) def assign_segment_attributes(self): """ Get the segmentation for loop generation and assign 'segment' attributes to 'weft' edges and nodes. """ if len(self.weft_edges) == 0: errMsg = ("No 'weft' edges in KnitNetwork! Segmentation " + "is impossible.") raise NoWeftEdgesError(errMsg) if len(self.end_nodes) == 0: errMsg = ("No 'end' nodes in KnitNetwork! Segmentation " + "is impossible.") raise NoEndNodesError(errMsg) # remove contour and 'warp' edges and store them warp_storage = [] contour_storage = [] for edge in self.edges(data=True): if not edge[2]["weft"]: if edge[2]["warp"]: warp_storage.append(edge) else: contour_storage.append(edge) self.remove_edge(edge[0], edge[1]) # get all 'end' nodes ordered by their 'position' attribute all_ends_by_position = self.all_ends_by_position(data=True) # loop through all 'end' nodes for position in all_ends_by_position: for endnode in position: self.traverse_weft_edges_and_set_attributes(endnode) # add all previously removed edges back into the network [self.add_edge(edge[0], edge[1], attr_dict=edge[2]) for edge in warp_storage + contour_storage] # CREATION OF MAPPING NETWORK --------------------------------------------- def create_mapping_network(self): """ Creates the corresponding mapping network for the final loop generation from a KnitNetwork instance with fully assigned 'segment' attributes. The created mapping network will be part of the KnitNetwork instance. It can be accessed using the mapping_network property. Notes ----- All nodes without an 'end' attribute as well as all 'weft' edges are removed by this step. Final nodes as well as final 'weft' and 'warp' edges can only be created using the mapping network. Returns ------- success : bool ``True`` if the mapping network has been successfully created, ``False`` otherwise. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # create a new KnitMappingNetwork instance MappingNetwork = KnitMappingNetwork() # get all edges of the current network by segment weft_edges = sorted(self.weft_edges, key=lambda x: x[2]["segment"]) warp_edges = self.warp_edges # initialize deque container for segment ids segment_ids = deque() # loop through all 'weft' edges and fill container with unique ids for edge in weft_edges: segment_id = edge[2]["segment"] if segment_id not in segment_ids: segment_ids.append(segment_id) # error checking if len(segment_ids) == 0: errMsg = ( "The network contains no 'weft' edges with a 'segment' " + "attribute assigned to them. A KnitMappingNetwork can " + "only be created from a KnitNetwork with initialized " + "'weft' edges for courses and corresponding 'warp' " + "edges connecting their 'end' nodes.") raise NoWeftEdgesError(errMsg) # loop through all unique segment ids for id in segment_ids: # get the corresponding edges for this id and sort them segment_edges = [e for e in weft_edges if e[2]["segment"] == id] segment_edges.sort(key=lambda x: x[0]) # extract start and end nodes start_node = (id[0], self.node[id[0]]) endNode = (id[1], self.node[id[1]]) # get all the geometry of the individual edges segment_geo = [e[2]["geo"] for e in segment_edges] # create a segment contour edge in the mapping network res = MappingNetwork.create_segment_contour_edge( start_node, endNode, id, segment_geo) if not res: errMsg = ("SegmentContourEdge at segment id {} could not be " + "created!") raise KnitNetworkError(errMsg) # add all warp edges to the mapping network to avoid lookup hassle for warp_edge in warp_edges: if warp_edge[0] > warp_edge[1]: warp_from = warp_edge[1] warp_to = warp_edge[0] else: warp_from = warp_edge[0] warp_to = warp_edge[1] MappingNetwork.add_edge(warp_from, warp_to, attr_dict=warp_edge[2]) # set mapping network property for this instance self.mapping_network = MappingNetwork # ditch all edges that are not 'warp' and nodes without 'end' attribute [self.remove_node(n) for n, d in self.nodes_iter(data=True) if not d["end"]] [self.remove_edge(s, e) for s, e, d in self.edges_iter(data=True) if not d["warp"]] return True # MAPPING NETWORK PROPERTY ------------------------------------------------ def _get_mapping_network(self): """ Gets the associated mapping network for this KnitNetwork instance. """ return self._mapping_network def _set_mapping_network(self, mapping_network): """ Setter for this instance's associated mapping network. """ # set mapping network to instance if (isinstance(mapping_network, KnitMappingNetwork) or mapping_network == None): self._mapping_network = mapping_network else: raise ValueError("Input is not of type KnitMappingNetwork!") mapping_network = property(_get_mapping_network, _set_mapping_network, None, "The associated mapping network of this " + "KnitNetwork instance.") # RETRIEVAL OF NODES AND EDGES FROM MAPPING NETWORK ----------------------- def all_nodes_by_segment(self, data=False, edges=False): """ Returns all nodes of the network ordered by 'segment' attribute. Note: 'end' nodes are not included! Parameters ---------- data : bool, optional If ``True``, the nodes contained in the output will be represented as 2-tuples in the form of (node_identifier, node_data). Defaults to ``False`` edges : bool, optional If ``True``, the returned output list will contain 3-tuples in the form of (segment_value, segment_nodes, segment_edge). Defaults to ``False``. Returns ------- nodes_by_segment : :obj:`list` of :obj:`tuple` List of 2-tuples in the form of (segment_value, segment_nodes) or 3-tuples in the form of (segment_value, segment_nodes, segment_edge) depending on the ``edges`` argument. Raises ------ MappingNetworkError If the mapping network is not available for this instance. """ # retrieve mappingnetwork mapnet = self.mapping_network if not mapnet: errMsg = ("Mapping network has not been built for this instance!") raise MappingNetworkError(errMsg) allSegments = mapnet.segment_contour_edges allSegmentNodes = [(n, d) for n, d in self.nodes_iter(data=True) if d["segment"]] segdict = {} for n in allSegmentNodes: if n[1]["segment"] not in segdict: segdict[n[1]["segment"]] = [n] else: segdict[n[1]["segment"]].append(n) anbs = [] if data and edges: for segment in allSegments: segval = segment[2]["segment"] try: segnodes = sorted(segdict[segval]) except KeyError: segnodes = [] anbs.append((segval, segnodes, segment)) elif data and not edges: for segment in allSegments: segval = segment[2]["segment"] try: segnodes = sorted(segdict[segval]) except KeyError: segnodes = [] anbs.append((segval, segnodes)) elif not data and edges: for segment in allSegments: segval = segment[2]["segment"] try: segnodes = sorted(segdict[segval]) except KeyError: segnodes = [] anbs.append((segval, [sn[0] for sn in segnodes], segment)) elif not data and not edges: for segment in allSegments: segval = segment[2]["segment"] try: segnodes = sorted(segdict[segval]) except KeyError: segnodes = [] anbs.append((segval, [sn[0] for sn in segnodes])) return anbs # STITCH WIDTH SAMPLING --------------------------------------------------- def sample_segment_contours(self, stitch_width): """ Samples the segment contours of the mapping network with the given stitch width. The resulting points are added to the network as nodes and a 'segment' attribute is assigned to them based on their origin segment contour edge. Parameters ---------- stitch_width : float The width of a single stitch inside the knit. Raises ------ MappingNetworkError If the mapping network is not available for this instance. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # retrieve mapping network mapnet = self.mapping_network if not mapnet: errMsg = ("Mapping network has not been built for this " + "instance, sampling segment contours is impossible!") raise MappingNetworkError(errMsg) # get the highest index of all the nodes in the network maxNode = max(self.nodes()) # get all the segment geometry ordered by segment number segment_contours = mapnet.segment_contour_edges # sample all segments with the stitch width nodeindex = maxNode + 1 for i, seg in enumerate(segment_contours): # get the geometry of the contour and reparametreize its domain geo = seg[2]["geo"] geo = geo.ToPolylineCurve() geo.Domain = RhinoInterval(0.0, 1.0) # compute the division points crvlen = geo.GetLength() density = int(round(crvlen / stitch_width)) if density == 0: continue divT = geo.DivideByCount(density, False) divPts = [geo.PointAt(t) for t in divT] # set leaf attribute # TODO: better leaf strategy - this works but assigns false # leaf nodes. usually not a problem but it should be fixed anyway if self.node[seg[0]]["leaf"] and self.node[seg[1]]["leaf"]: nodeLeaf = True else: nodeLeaf = False # add all the nodes to the network for j, pt in enumerate(divPts): # add node to network self.node_from_point3d( nodeindex, pt, position=None, num=j, leaf=nodeLeaf, start=False, end=False, segment=seg[2]["segment"], increase=False, decrease=False, color=None) # increment node index nodeindex += 1 # CREATION OF FINAL 'WEFT' CONNECTIONS ------------------------------------ def create_final_weft_connections(self): """ Loop through all the segment contour edges and create all 'weft' connections for this network. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # get all nodes by segment contour SegmentValues, AllNodesBySegment = zip(*self.all_nodes_by_segment( data=True)) # loop through all the segment contours for i, segment in enumerate(AllNodesBySegment): segval = SegmentValues[i] firstNode = (segval[0], self.node[segval[0]]) lastNode = (segval[1], self.node[segval[1]]) if len(segment) == 0: self.create_weft_edge(firstNode, lastNode, segval) elif len(segment) == 1: self.create_weft_edge(firstNode, segment[0], segval) self.create_weft_edge(segment[0], lastNode, segval) else: # loop through all nodes on the current segment and create # the final 'weft' edges for j, node in enumerate(segment): if j == 0: self.create_weft_edge(firstNode, node, segval) self.create_weft_edge(node, segment[j+1], segval) elif j < len(segment)-1: self.create_weft_edge(node, segment[j+1], segval) elif j == len(segment)-1: self.create_weft_edge(node, lastNode, segval) # CREATION OF FINAL 'WARP' CONNECTIONS ------------------------------------ def attempt_warp_connection(self, node, candidate, source_nodes, max_connections=4, verbose=False): """ Method for attempting a 'warp' connection to a candidate node based on certain parameters. Parameters ---------- node : node The starting node for the possible 'weft' edge. candidate : node The target node for the possible 'weft' edge. source_nodes : :obj:`list` List of nodes on the position contour of node. Used to check if the candidate node already has a connection. max_connections : int, optional The new 'weft' connection will only be made if the candidate nodes number of connected neighbors is below this. Defaults to ``4``. verbose : bool, optional If ``True``, this routine and all its subroutines will print messages about what is happening to the console. Defaults to ``False``. Returns ------- result : bool True if the connection has been made, otherwise false. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None connecting_neighbors = self[candidate[0]] if len(connecting_neighbors) < max_connections: isConnected = False for cn in connecting_neighbors: if cn in [v[0] for v in source_nodes]: isConnected = True # print info on verbose setting v_print("Candidate node {} is ".format(candidate[0]) + "already connected! Skipping to next node...") break if not isConnected: # print info on verbose setting v_print("Connecting node {} to best candidate {}.".format( node[0], candidate[0])) # finally create the warp edge for good self.create_warp_edge(node, candidate) return True else: return False else: return False def _create_initial_warp_connections(self, segment_pair, max_connections=4, precise=False, verbose=False): """ Private method for creating first pass 'warp' connections for the supplied pair of segment chains. The pair is only defined as a list of nodes, the nodes have to be supplied with their attribute data! Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None if len(segment_pair) < 2: v_print("Not enough contour segments in supplied set!") return # print info on verbose output v_print("Creating initial 'warp' connections for contour set...") # get initial and target nodes without 'end' nodes initial_nodes = segment_pair[0] target_nodes = segment_pair[1] # define forbidden node index forbidden_node = -1 # do nothing if one of the sets is empty if len(initial_nodes) == 0 or len(target_nodes) == 0: return # loop through all nodes on the current segment for k, node in enumerate(initial_nodes): # get geometry from current node thisPt = node[1]["geo"] # print info on verbose setting v_print("Processing node {} on segment {}:".format( node[0], node[1]["segment"])) # filtering according to forbidden nodes if forbidden_node != -1: target_nodes = [tnode for tx, tnode in enumerate(target_nodes) if tx >= target_nodes.index(forbidden_node)] if len(target_nodes) == 0: continue # compute distances to target nodes if precise: allDists = [thisPt.DistanceTo(tn[1]["geo"]) for tn in target_nodes] else: allDists = [thisPt.DistanceToSquared(tn[1]["geo"]) for tn in target_nodes] # sort nodes after distances allDists, sorted_target_nodes = zip(*sorted( zip(allDists, target_nodes), key=itemgetter(0))) # the four nearest nodes are the possible connections possible_connections = sorted_target_nodes[:4] # print info on verbose setting v_print("Possible connections: {}".format([pc[0] for pc in possible_connections])) # handle edge case where there is no possible connection or just # one if len(possible_connections) == 0: continue elif len(possible_connections) == 1: # attempt to connect to only possible candidate fCand = possible_connections[0] res = self.attempt_warp_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node if res: forbidden_node = fCand continue # get the segment contours current direction if k < len(initial_nodes)-1: contourDir = RhinoLine(thisPt, initial_nodes[k+1][1]["geo"]).Direction elif k == len(initial_nodes)-1: contourDir = RhinoLine( initial_nodes[k-1][1]["geo"], thisPt).Direction contourDir.Unitize() # get the directions of the possible connections candidatePoints = [pc[1]["geo"] for pc in possible_connections] candidateDirections = [RhinoLine( thisPt, cp).Direction for cp in candidatePoints] [cd.Unitize() for cd in candidateDirections] # get the angles between segment contour dir and possible conn dir normals = [RhinoVector3d.CrossProduct( contourDir, cd) for cd in candidateDirections] angles = [RhinoVector3d.VectorAngle( contourDir, cd, n) for cd, n in zip( candidateDirections, normals)] # compute deltas as a measure of perpendicularity deltas = [abs(a - (0.5 * pi)) for a in angles] # sort possible connections first by distance, then by delta (allDists, deltas, angles, most_perpendicular) = zip(*sorted(zip(allDists, deltas, angles, possible_connections[:]), key=itemgetter(0, 1))) # compute angle difference aDelta = angles[0] - angles[1] # get node neighbors nNeighbors = self[node[0]] # CONNECTION FOR LEAST ANGLE CHANGE ------------------------------- if len(nNeighbors) > 2 and aDelta < radians(6.0): # print info on verbose setting v_print("Using procedure for least angle " + "change connection...") # get previous connected edge and its direction prevEdges = self.node_warp_edges(node[0], data=True) if len(prevEdges) > 1: print("More than one previous " + "'warp' connection! This was unexpected..." + "Taking the first one..?") prevDir = prevEdges[0][2]["geo"].Direction else: prevDir = prevEdges[0][2]["geo"].Direction prevDir.Unitize() # get directions for the best two candidates mpA = most_perpendicular[0] mpB = most_perpendicular[1] dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction dirA.Unitize() dirB.Unitize() # get normals for angle measurement normalA = RhinoVector3d.CrossProduct(prevDir, dirA) normalB = RhinoVector3d.CrossProduct(prevDir, dirB) # measure the angles angleA = RhinoVector3d.VectorAngle(prevDir, dirA, normalA) angleB = RhinoVector3d.VectorAngle(prevDir, dirB, normalB) # select final candidate for connection if angleA < angleB: fCand = mpA else: fCand = mpB # attempt connection to final candidate res = self.attempt_warp_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node if res: forbidden_node = fCand continue # CONNECTION FOR MOST PERPENDICULAR ------------------------------- else: # print info on verbose setting v_print("Using procedure for most " + "perpendicular connection...") # define final candidate node fCand = most_perpendicular[0] # attempt connection to final candidate res = self.attempt_warp_connection( node, fCand, initial_nodes, max_connections=max_connections, verbose=verbose) # set forbidden node if res: forbidden_node = fCand def _create_second_pass_warp_connection(self, source_nodes, source_index, window, precise=False, verbose=False, reverse=False): """ Private method for creating second pass 'warp' connections for the given set of contours. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None if len(window) == 0: # print info on verbose setting v_print("Length of window is 0, skipping...") elif len(window) == 1: # print info on verbose setting v_print("Window has only one node.") v_print("Connecting to node {}.".format(window[0][0])) # connect 'warp' edge if reverse: self.create_warp_edge(window[0], source_nodes[source_index]) else: self.create_warp_edge(source_nodes[source_index], window[0]) else: # retrive the point of the current source node thisPt = source_nodes[source_index][1]["geo"] # print info on verbose setting v_print("Processing window nodes: {}".format( [w[0] for w in window])) # sort nodes in window by distance if precise: allDists = [thisPt.DistanceTo(pc[1]["geo"]) for pc in window] else: allDists = [thisPt.DistanceToSquared(pc[1]["geo"]) for pc in window] allDists, window = zip(*sorted(zip(allDists, window), key=itemgetter(0))) # get the contours current direction if source_index < len(source_nodes)-1: sourceDir = RhinoLine( thisPt, source_nodes[source_index+1][1]["geo"]).Direction elif source_index == len(source_nodes)-1: sourceDir = RhinoLine(source_nodes[source_index-1][1]["geo"], thisPt).Direction sourceDir.Unitize() # get the directions of the possible connections candidatePoints = [pc[1]["geo"] for pc in window] candidateDirections = [RhinoLine(thisPt, cp).Direction for cp in candidatePoints] [cd.Unitize() for cd in candidateDirections] # get the angles between contour dir and window dir normals = [RhinoVector3d.CrossProduct(sourceDir, cd) for cd in candidateDirections] angles = [RhinoVector3d.VectorAngle(sourceDir, cd, n) for cd, n in zip(candidateDirections, normals)] # compute deltas as a mesaure of perpendicularity deltas = [abs(a - (0.5 * pi)) for a in angles] # sort window by distance, then by delta allDists, deltas, most_perpendicular = zip(*sorted( zip(allDists, deltas, window), key=itemgetter(0, 1))) # set final candidate node for connection fCand = most_perpendicular[0] # print info on verbose setting v_print("Connecting to node " + "{} on segment {}...".format(fCand[0], fCand[1]["segment"])) # connect warp edge to best target if reverse: self.create_warp_edge(fCand, source_nodes[source_index]) else: self.create_warp_edge(source_nodes[source_index], fCand) def create_final_warp_connections(self, max_connections=4, include_end_nodes=True, precise=False, verbose=False): """ Create the final 'warp' connections by building chains of segment contour edges and connecting them. For each source chain, a target chain is found using an 'educated guessing' strategy. This means that the possible target chains are guessed by leveraging known topology facts about the network and its special 'end' nodes. Parameters ---------- max_connections : int, optional The number of maximum previous connections a candidate node for a 'warp' connection is allowed to have. Defaults to ``4``. include_end_nodes : bool, optional If ``True``, 'end' nodes between adjacent segment contours in a source chain will be included in the first pass of connecting 'warp' edges. Defaults to ``True``. precise : bool If ``True``, the distance between nodes will be calculated using the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much faster Rhino.Geometry.Point3d.DistanceToSquared method is used. Defaults to ``False``. verbose : bool, optional If ``True``, this routine and all its subroutines will print messages about what is happening to the console. Great for debugging and analysis. Defaults to ``False``. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # define verbose print function v_print = print if verbose else lambda *a, **k: None # get all segment ids, nodes per segment and edges SegmentValues, AllNodesBySegment, SegmentContourEdges = zip( *self.all_nodes_by_segment(data=True, edges=True)) # build a dictionary of the segments by their index SegmentDict = dict(zip(SegmentValues, zip(SegmentContourEdges, AllNodesBySegment))) # build source and target chains source_chains, target_chain_dict = self.mapping_network.build_chains( False, True) # initialize container dict for connected chains connected_chains = dict() # initialize segment mapping dictionaries source_to_target = OrderedDict() target_to_source = OrderedDict() source_to_key = dict() target_to_key = dict() # ITERATE OVER SOURCE SEGMENT CHAINS ---------------------------------- # loop through all source chains and find targets in target chains # using an 'educated guess strategy' for i, source_chain in enumerate(source_chains): # get the first and last node ('end' nodes) firstNode = (source_chain[0][0][0], self.node[source_chain[0][0][0]]) lastNode = (source_chain[0][-1][1], self.node[source_chain[0][-1][1]]) # get the chain value of the current chain chain_value = source_chain[1] # extract the ids of the current chain current_ids = tuple(source_chain[0]) # extract the current chains geometry current_chain_geo_list = [SegmentDict[id][0][2]["geo"] for id in current_ids] current_chain_geo = RhinoCurve.JoinCurves( [ccg.ToPolylineCurve() for ccg in current_chain_geo_list])[0] current_chain_spt = current_chain_geo.PointAtNormalizedLength(0.5) # retrieve the current segments from the segment dictionary by id current_segment_nodes = [SegmentDict[id][1] for id in current_ids] # retrieve the current nodes from the list of current segments current_nodes = [] for j, csn in enumerate(current_segment_nodes): if include_end_nodes and j > 0: current_nodes.append((current_ids[j][0], self.node[current_ids[j][0]])) [current_nodes.append(n) for n in csn] # reset the target key target_key = None # print info on verbose setting v_print("--------------------------------------------------------") v_print("Processing segment chain {} ...".format(source_chain)) # CASE 1 - ENCLOSED SHORT ROW <====> ALL CASES -------------------- # look for possible targets using a guess about the chain value possible_target_keys = [key for key in target_chain_dict if key[0] == chain_value[0] and key[1] == chain_value[1] and key not in connected_chains] if len(possible_target_keys) > 0: # find the correct chain by using geometric distance possible_target_chains = [target_chain_dict[tk] for tk in possible_target_keys] # for every chain in the possible target chains, get the # geometry and compute a sample distance filtered_target_keys = [] possible_target_chain_dists = [] for j, ptc in enumerate(possible_target_chains): # retrieve possible target geometry and join into one crv ptc_geo_list = [SegmentDict[id][0][2]["geo"] for id in ptc] if ptc_geo_list == current_chain_geo_list: continue ptc_geo = RhinoCurve.JoinCurves( [ptcg.ToPolylineCurve() for ptcg in ptc_geo_list])[0] # get a sample point and measure the distance to the # source chain sample point ptc_spt = ptc_geo.PointAtNormalizedLength(0.5) if precise: ptc_dist = current_chain_spt.DistanceTo(ptc_spt) else: ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt) # append the filtered key to the key list filtered_target_keys.append(possible_target_keys[j]) # append the measured distance to the distance list possible_target_chain_dists.append(ptc_dist) if len(filtered_target_keys) > 0: # sort filtered target keys using the distances possible_target_chain_dists, filtered_target_keys = zip( *sorted(zip( possible_target_chain_dists, filtered_target_keys), key=itemgetter(0))) # set target key target_key = filtered_target_keys[0] else: target_key = None else: target_key = None # attempt warp connections if we have found a correct key if target_key: # get the guessed target chain from the chain dictionary target_chain = target_chain_dict[target_key] # extract the ids for node retrieval target_ids = tuple([seg for seg in target_chain]) # retrieve the target nodes from the segment dictionary by id target_segment_nodes = [SegmentDict[id][1] for id in target_ids] target_nodes = [] for j, tsn in enumerate(target_segment_nodes): if include_end_nodes and j > 0: target_nodes.append(( target_ids[j][0], self.node[target_ids[j][0]])) [target_nodes.append(n) for n in tsn] # print info on verbose setting v_print("<=====> detected. Connecting to " + "segment chain {}.".format(target_key)) # we have successfully verified our target segment and # can create some warp edges! segment_pair = [current_nodes, target_nodes] # fill mapping dictionaries if current_ids not in source_to_target: source_to_target[current_ids] = target_ids if current_ids not in source_to_key: source_to_key[current_ids] = chain_value if target_ids not in target_to_source: target_to_source[target_ids] = current_ids if target_ids not in target_to_key: target_to_key[target_ids] = target_key # create initial warp connections between the chains connected_chains[target_key] = True self._create_initial_warp_connections( segment_pair, max_connections=max_connections, precise=precise, verbose=verbose) continue # CASE 2 - SHORT ROW TO THE RIGHT <=====/ ALL CASES --------------- # look for possible targets using a guess about the chain value possible_target_keys = [key for key in target_chain_dict if key[0] == chain_value[0] and key[1] == chain_value[1]+1 and key not in connected_chains] if len(possible_target_keys) == 1: target_key = possible_target_keys[0] elif len(possible_target_keys) > 1: # find the correct chain by using geometric distance possible_target_chains = [target_chain_dict[tk] for tk in possible_target_keys] # for every chain in the possible target chains, get the # geometry and compute a sample distance possible_target_chain_dists = [] for ptc in possible_target_chains: # retrieve possible target geometry and join into one crv ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc] ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve() for pg in ptc_geo])[0] # get a sample point and measure the distance to the # source chain sample point ptc_spt = ptc_geo.PointAtNormalizedLength(0.5) if precise: ptc_dist = current_chain_spt.DistanceTo(ptc_spt) else: ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt) # append the measured distance to the list possible_target_chain_dists.append(ptc_dist) # sort possible target keys using the distances possible_target_chain_dists, possible_target_keys = zip( *sorted(zip(possible_target_chain_dists, possible_target_keys), key=itemgetter(0))) target_key = possible_target_keys[0] else: target_key = None # attempt warp connections if we have found a correct key if target_key: # get the guessed target chain from the chain dictionary target_chain = target_chain_dict[target_key] # extract the ids for node retrieval target_ids = tuple([seg for seg in target_chain]) # retrieve the target nodes from the segment dictionary by id target_segment_nodes = [SegmentDict[id][1] for id in target_ids] target_nodes = [] for j, tsn in enumerate(target_segment_nodes): if include_end_nodes and j > 0: target_nodes.append((target_ids[j][0], self.node[target_ids[j][0]])) [target_nodes.append(n) for n in tsn] targetFirstNode = target_ids[0][0] targetLastNode = target_ids[-1][1] # check if firstNode and targetFirstNode are connected via a # 'warp' edge to verify if (targetFirstNode == firstNode[0] and targetLastNode in self[lastNode[0]]): # print info on verbose setting v_print("<=====/ detected. Connecting " + "to segment {}.".format(target_key)) # we have successfully verified our target segment and # can create some warp edges! segment_pair = [current_nodes, target_nodes] connected_chains[target_key] = True # fill mapping dictionaries if current_ids not in source_to_target: source_to_target[current_ids] = target_ids if current_ids not in source_to_key: source_to_key[current_ids] = chain_value if target_ids not in target_to_source: target_to_source[target_ids] = current_ids if target_ids not in target_to_key: target_to_key[target_ids] = target_key # create initial 'warp' connections between the chains self._create_initial_warp_connections( segment_pair, max_connections=max_connections, precise=precise, verbose=verbose) continue else: v_print("No real connection for <=====/. Next case...") # CASE 3 - SHORT ROW TO THE LEFT /====> ALL CASES ----------------- # look for possible targets using a guess about the chain value possible_target_keys = [key for key in target_chain_dict if key[0] == chain_value[0]+1 and key[1] == chain_value[1] and key not in connected_chains] if len(possible_target_keys) == 1: target_key = possible_target_keys[0] elif len(possible_target_keys) > 1: # find the correct chain by using geometric distance possible_target_chains = [target_chain_dict[tk] for tk in possible_target_keys] # for every chain in the possible target chains, get the # geometry and compute a sample distance possible_target_chain_dists = [] for ptc in possible_target_chains: # retrieve possible target geometry and join into one crv ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc] ptc_geo = RhinoCurve.JoinCurves( [pg.ToPolylineCurve() for pg in ptc_geo])[0] # get a sample point and measure the distance to the # source chain sample point ptc_spt = ptc_geo.PointAtNormalizedLength(0.5) if precise: ptc_dist = current_chain_spt.DistanceTo(ptc_spt) else: ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt) # append the measured distance to the list possible_target_chain_dists.append(ptc_dist) # sort possible target keys using the distances possible_target_chain_dists, possible_target_keys = zip( *sorted(zip(possible_target_chain_dists, possible_target_keys), key=itemgetter(0))) target_key = possible_target_keys[0] else: target_key = None # attempt warp connections if we have found a correct key if target_key: # get the guessed target chain from the chain dictionary target_chain = target_chain_dict[target_key] # extract the ids for node retrieval target_ids = tuple([seg for seg in target_chain]) # retrieve the target nodes from the segment dictionary by id target_segment_nodes = [SegmentDict[id][1] for id in target_ids] target_nodes = [] for j, tsn in enumerate(target_segment_nodes): if include_end_nodes and j > 0: target_nodes.append((target_ids[j][0], self.node[target_ids[j][0]])) [target_nodes.append(n) for n in tsn] targetFirstNode = target_ids[0][0] targetLastNode = target_ids[-1][1] # check if firstNode and targetFirstNode are connected via a # 'warp' edge to verify if (targetFirstNode in self[firstNode[0]] and targetLastNode == lastNode[0]): # print info on verbose setting v_print("/=====> detected. Connecting " + "to segment {}.".format(target_key)) # we have successfully verified our target segment and # can create some warp edges! segment_pair = [current_nodes, target_nodes] connected_chains[target_key] = True # fill mapping dictionaries if current_ids not in source_to_target: source_to_target[current_ids] = target_ids if current_ids not in source_to_key: source_to_key[current_ids] = chain_value if target_ids not in target_to_source: target_to_source[target_ids] = current_ids if target_ids not in target_to_key: target_to_key[target_ids] = target_key self._create_initial_warp_connections( segment_pair, max_connections=max_connections, precise=precise, verbose=verbose) continue else: v_print("No real connection for /=====>. Next case...") # CASE 4 - REGULAR ROW /=====/ ALL CASES -------------------------- # look for possible targets using a guess about the chain value possible_target_keys = [key for key in target_chain_dict if key[0] == chain_value[0]+1 and key[1] == chain_value[1]+1 and key not in connected_chains] if len(possible_target_keys) == 1: target_key = possible_target_keys[0] elif len(possible_target_keys) > 1: # find the correct chain by using geometric distance possible_target_chains = [target_chain_dict[tk] for tk in possible_target_keys] # for every chain in the possible target chains, get the # geometry and compute a sample distance possible_target_chain_dists = [] for ptc in possible_target_chains: # retrieve possible target geometry and join into one crv ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc] ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve() for pg in ptc_geo])[0] # get a sample point and measure the distance to the # source chain sample point ptc_spt = ptc_geo.PointAtNormalizedLength(0.5) if precise: ptc_dist = current_chain_spt.DistanceTo(ptc_spt) else: ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt) # append the measured distance to the list possible_target_chain_dists.append(ptc_dist) # sort possible target keys using the distances possible_target_chain_dists, possible_target_keys = zip( *sorted(zip(possible_target_chain_dists, possible_target_keys), key=itemgetter(0))) target_key = possible_target_keys[0] else: target_key = None # attempt warp connections if we have found a correct key if target_key: # get the guessed target chain from the chain dictionary target_chain = target_chain_dict[target_key] # extract the ids for node retrieval target_ids = tuple([seg for seg in target_chain]) # retrieve the target nodes from the segment dictionary by id target_segment_nodes = [SegmentDict[id][1] for id in target_ids] target_nodes = [] for j, tsn in enumerate(target_segment_nodes): if include_end_nodes and j > 0: target_nodes.append((target_ids[j][0], self.node[target_ids[j][0]])) [target_nodes.append(n) for n in tsn] # set target first and last node ('end' nodes) targetFirstNode = target_ids[0][0] targetLastNode = target_ids[-1][1] # check if firstNode and targetFirstNode are connected via a # 'warp' edge to verify if (targetFirstNode in self[firstNode[0]] and targetLastNode in self[lastNode[0]]): # print info on verbose setting v_print("/=====/ detected. Connecting " + "to segment {}.".format(target_key)) # we have successfully verified our target segment and # can create some warp edges! segment_pair = [current_nodes, target_nodes] connected_chains[target_key] = True # fill mapping dictionaries if current_ids not in source_to_target: source_to_target[current_ids] = target_ids if current_ids not in source_to_key: source_to_key[current_ids] = chain_value if target_ids not in target_to_source: target_to_source[target_ids] = current_ids if target_ids not in target_to_key: target_to_key[target_ids] = target_key self._create_initial_warp_connections( segment_pair, max_connections=max_connections, precise=precise, verbose=verbose) continue else: v_print("No real connection for /=====/. No cases match.") # INVOKE SECOND PASS FOR SOURCE ---> TARGET --------------------------- for i, current_chain in enumerate(source_to_target): v_print("--------------------------------------------------------") v_print("S>T Current Chain: {}".format(current_chain)) # build a list of nodes containing all nodes in the current chain # including all 'end' nodes current_chain_nodes = [] for j, ccid in enumerate(current_chain): current_chain_nodes.append((ccid[0], self.node[ccid[0]])) [current_chain_nodes.append(n) for n in SegmentDict[ccid][1]] current_chain_nodes.append((current_chain[-1][1], self.node[current_chain[-1][1]])) # retrieve target chain from the source to target mapping target_chain = source_to_target[current_chain] cckey = source_to_key[current_chain] tckey = target_to_key[target_chain] # build a list of nodes containing all nodes in the target chain # including all 'end' nodes target_chain_nodes = [] for j, tcid in enumerate(target_chain): target_chain_nodes.append((tcid[0], self.node[tcid[0]])) [target_chain_nodes.append(n) for n in SegmentDict[tcid][1]] target_chain_nodes.append((target_chain[-1][1], self.node[target_chain[-1][1]])) # initialize start of window marker start_of_window = -1 # loop through all nodes on the current chain for k, node in enumerate(current_chain_nodes): # find out if the current node is already principally connected node_connected = False # if the node is the first or the last node, it is defined as # connected per-se if k == 0 or k == len(current_chain_nodes)-1: node_connected = True # find out if the current node is already connected to the # target chain, get node warp edges and their target nodes node_warp_edges = self.node_warp_edges(node[0], data=False) warp_edge_targets = [we[1] for we in node_warp_edges] # loop over warp edge targets to get the start of the window for wet in warp_edge_targets: # loop over target chain nodes for n, tcn in enumerate(target_chain_nodes): # if a warp edge target is in the target chain, # the node is connected and star of window for next # node is defined if wet == tcn[0]: if n > start_of_window or start_of_window == -1: start_of_window = n node_connected = True # if the node is not connected to the target chain, we # need to find the end of the window if not node_connected: v_print("Node: {}".format(node[0])) v_print("Start of window: {}".format(start_of_window)) # re-check start of window for <.====/ case if len(target_chain_nodes) >= 2 and start_of_window == -1: if target_chain_nodes[0] == current_chain_nodes[0]: start_of_window = 1 else: start_of_window = 0 end_of_window = None # loop over target chain nodes for n, tcn in enumerate(target_chain_nodes): if n >= start_of_window: if tcn[0] == current_chain_nodes[-1][0]: end_of_window = n # get all warp edges of the current target node # and their targets tcn_warp_edges = self.node_warp_edges(tcn[0], data=False) tcn_warp_edge_targets = [we[1] for we in tcn_warp_edges] # loop over warp edge targets for twet in tcn_warp_edge_targets: if (twet in [cn[0] for cn in current_chain_nodes]): end_of_window = n break if end_of_window and end_of_window > start_of_window: break # re-check end of window for /====.> case if end_of_window: tcn_we = target_chain_nodes[end_of_window] ccn_end = current_chain_nodes[-1] ccn_len = len(current_chain_nodes) if tcn_we == ccn_end and k == ccn_len-2: end_of_window -= 1 if end_of_window < start_of_window: start_of_window = -1 end_of_window = None # if we have a valid window, set the target nodes if start_of_window != -1 and end_of_window != None: if end_of_window == len(target_chain_nodes)-1: window = target_chain_nodes[start_of_window:] else: window = target_chain_nodes[start_of_window: end_of_window+1] v_print("End of window: {}".format(end_of_window)) # execute connection to target if cckey <= tckey: rev = False else: rev = True v_print("Connecting chain {} to chain {}".format( cckey, tckey)) self._create_second_pass_warp_connection( current_chain_nodes, k, window, precise=precise, verbose=verbose, reverse=rev) else: # print info on verbose setting v_print("No valid window for current chain!") # INVOKE SECOND PASS FOR TARGET ---> SOURCE --------------------------- for i, current_chain in enumerate(target_to_source): v_print("--------------------------------------------------------") v_print("T>S Current Chain: {}".format(current_chain)) # build a list of nodes containing all nodes in the current chain # including all 'end' nodes current_chain_nodes = [] for j, ccid in enumerate(current_chain): current_chain_nodes.append((ccid[0], self.node[ccid[0]])) [current_chain_nodes.append(n) for n in SegmentDict[ccid][1]] current_chain_nodes.append((current_chain[-1][1], self.node[current_chain[-1][1]])) # retrieve target chain from the source to target mapping target_chain = target_to_source[current_chain] cckey = target_to_key[current_chain] tckey = source_to_key[target_chain] # build a list of nodes containing all nodes in the target chain # including all 'end' nodes target_chain_nodes = [] for j, tcid in enumerate(target_chain): target_chain_nodes.append((tcid[0], self.node[tcid[0]])) [target_chain_nodes.append(n) for n in SegmentDict[tcid][1]] target_chain_nodes.append((target_chain[-1][1], self.node[target_chain[-1][1]])) # initialize start of window marker start_of_window = -1 # loop through all nodes on the current chain for k, node in enumerate(current_chain_nodes): # find out if the current node is already principally connected node_connected = False if k == 0 or k == len(current_chain_nodes)-1: node_connected = True # find out if the current node is already connected to the # target chain node_warp_edges = self.node_warp_edges(node[0], data=False) warp_edge_targets = [we[1] for we in node_warp_edges] # loop over weft edge targets for wet in warp_edge_targets: # if warp edge target is in target chain nodes, node # is connected and the start of our window for the next # node for n, tcn in enumerate(target_chain_nodes): if wet == tcn[0]: if n > start_of_window or start_of_window == -1: start_of_window = n node_connected = True # if the node is not connected to the target chain, we # need to find the end of the window if not node_connected: # print info on verbose output v_print("Node: {}".format(node[0])) v_print("Start of window: {}".format(start_of_window)) # re-check start of window for <.====/ case if len(target_chain_nodes) >= 2 and start_of_window == -1: if target_chain_nodes[0] == current_chain_nodes[0]: start_of_window = 1 else: start_of_window = 0 end_of_window = None # loop over target chain nodes for n, tcn in enumerate(target_chain_nodes): if n >= start_of_window: if tcn[0] == current_chain_nodes[-1][0]: end_of_window = n # get all warp edges of the current target node and # their targets tcn_warp_edges = self.node_warp_edges(tcn[0], data=False) tcn_warp_edge_targets = [we[1] for we in tcn_warp_edges] # loop over warp edge targets of current target # node for twet in tcn_warp_edge_targets: # if warp edge target is in current chain, # it is the end of the window if (twet in [cn[0] for cn in current_chain_nodes]): end_of_window = n break if end_of_window and end_of_window > start_of_window: break # re-check end of window for /====.> case if end_of_window: tcn_we = target_chain_nodes[end_of_window] ccn_end = current_chain_nodes[-1] ccn_len = len(current_chain_nodes) if tcn_we == ccn_end and k == ccn_len-2: end_of_window -= 1 if end_of_window < start_of_window: start_of_window = -1 end_of_window = None # if there is a valid window, set the target chain nodes if start_of_window != -1 and end_of_window != None: if end_of_window == len(target_chain_nodes)-1: window = target_chain_nodes[start_of_window:] else: window = target_chain_nodes[start_of_window: end_of_window+1] # print info on verbose output v_print("End of window: {}".format(end_of_window)) # execute connection if cckey < tckey: rev = False else: rev = True v_print("Connecting chain {} to chain {}.".format( cckey, tckey)) self._create_second_pass_warp_connection( current_chain_nodes, k, window, precise=precise, verbose=verbose, reverse=rev) else: v_print("No valid window for current chain!") # FIND FACES OF NETWORK --------------------------------------------------- def to_KnitDiNetwork(self): """ Constructs and returns a directed KnitDiNetwork based on this network by duplicating all edges so that [u -> v] and [v -> u] for every edge [u - v] in this undirected network. Returns ------- directed_network : :class:`KnitDiNetwork` The directed representation of this network. """ # create a directed network with duplicate edges in opposing directions dirnet = KnitDiNetwork() dirnet.name = self.name dirnet.add_nodes_from(self) dirnet.add_edges_from((u, v, data) for u, nbrs in self.adjacency_iter() for v, data in nbrs.items()) dirnet.graph = self.graph dirnet.node = self.node dirnet.mapping_network = self.mapping_network return dirnet def find_cycles(self, mode=-1): """ Finds the cycles (faces) of this network by utilizing a wall-follower mechanism. Parameters ---------- mode : int, optional Determines how the neighbors of each node are sorted when finding cycles for the network. ``-1`` equals to using the world XY plane. ``0`` equals to using a plane normal to the origin nodes closest point on the reference geometry. ``1`` equals to using a plane normal to the average of the origin and neighbor nodes' closest points on the reference geometry. ``2`` equals to using an average plane between a plane fit to the origin and its neighbor nodes and a plane normal to the origin nodes closest point on the reference geometry. Defaults to ``-1``. Warning ------- Modes other than ``-1`` are only possible if this network has an underlying reference geometry in form of a Mesh or NurbsSurface. The reference geometry should be assigned when initializing the network by assigning the geometry to the "reference_geometry" attribute of the network. Notes ----- Based on an implementation inside the COMPAS framework. For more info see [16]_. """ return self.to_KnitDiNetwork().find_cycles(mode=mode) def create_mesh(self, mode=-1, max_valence=4): """ Constructs a mesh from this network by finding cycles and using them as mesh faces. Parameters ---------- mode : int, optional Determines how the neighbors of each node are sorted when finding cycles for the network. ``-1`` equals to using the world XY plane. ``0`` equals to using a plane normal to the origin nodes closest point on the reference geometry. ``1`` equals to using a plane normal to the average of the origin and neighbor nodes' closest points on the reference geometry. ``2`` equals to using an average plane between a plane fit to the origin and its neighbor nodes and a plane normal to the origin nodes closest point on the reference geometry. Defaults to ``-1``. max_valence : int, optional Sets the maximum edge valence of the faces. If this is set to > 4, n-gon faces (more than 4 edges) are allowed. Otherwise, their cycles are treated as invalid and will be ignored. Defaults to ``4``. Warning ------- Modes other than ``-1`` are only possible if this network has an underlying reference geometry in form of a Mesh or NurbsSurface. The reference geometry should be assigned when initializing the network by assigning the geometry to the "reference_geometry" attribute of the network. """ return self.to_KnitDiNetwork().create_mesh(mode=mode, max_valence=max_valence) # DUALITY ----------------------------------------------------------------- def create_dual(self, mode=-1, merge_adj_creases=False, mend_trailing_rows=False): """ Creates the dual of this KnitNetwork while translating current edge attributes to the edges of the dual network. Parameters ---------- mode : int, optional Determines how the neighbors of each node are sorted when finding cycles for the network. ``-1`` equals to using the world XY plane. ``0`` equals to using a plane normal to the origin nodes closest point on the reference geometry. ``1`` equals to using a plane normal to the average of the origin and neighbor nodes' closest points on the reference geometry. ``2`` equals to using an average plane between a plane fit to the origin and its neighbor nodes and a plane normal to the origin nodes closest point on the reference geometry. Defaults to ``-1``. merge_adj_creases : bool, optional If ``True``, will merge adjacent 'increase' and 'decrease' nodes connected by a 'weft' edge into a single node. This effectively simplifies the pattern, as a decrease is unneccessary to perform if an increase is right beside it - both nodes can be replaced by a single regular node (stitch). Defaults to ``False``. mend_trailing_rows : bool, optional If ``True``, will attempt to mend trailing rows by reconnecting nodes. Defaults to ``False``. Returns ------- dual_network : :class:`KnitDiNetwork` The dual network of this KnitNetwork. Warning ------- Modes other than -1 (default) are only possible if this network has an underlying reference geometry in form of a Mesh or NurbsSurface. The reference geometry should be assigned when initializing the network by assigning the geometry to the 'reference_geometry' attribute of the network. Notes ----- Closely resembles the implementation described in *Automated Generation of Knit Patterns for Non-developable Surfaces* [1]_. Also see *KnitCrete - Stay-in-place knitted formworks for complex concrete structures* [2]_. """ # first find the cycles of this network cycles = self.find_cycles(mode=mode) # get node data for all nodes once node_data = {k: self.node[k] for k in self.nodes_iter()} # create new directed KnitDiNetwork for dual network DualNetwork = KnitDiNetwork( reference_geometry=self.graph["reference_geometry"]) # create mapping dict for edges to adjacent cycles edge_to_cycle = {(u, v): None for u, v in self.edges_iter()} edge_to_cycle.update({(v, u): None for u, v in self.edges_iter()}) # CREATE NODES OF DUAL ------------------------------------------------ # for each cycle, find the centroid node for ckey in sorted(cycles.keys()): cycle = cycles[ckey] clen = len(cycle) # skip invalid cycles (ngons and self-loops) if clen > 4 or clen < 3: continue # loop over cycle edges and fill mapping dicts closed_cycle = cycle[:] closed_cycle.append(cycle[0]) for u, v in pairwise(closed_cycle): edge_to_cycle[(u, v)] = ckey # get coords of cycle nodes cycle_coords = [[node_data[k]["x"], node_data[k]["y"], node_data[k]["z"]] for k in cycle] # compute centroid cx, cy, cz = zip(*cycle_coords) centroid = [sum(cx) / clen, sum(cy) / clen, sum(cz) / clen] centroid_pt = RhinoPoint3d(*centroid) # get node 'leaf' attributes is_leaf = True in [node_data[k]["leaf"] for k in cycle] # get node 'color' attributes. only if all colors of the cycle # match, the color attribute will be set! colors = [node_data[k]["color"] for k in cycle] if all(x == colors[0] for x in colors): cycle_color = colors[0] else: cycle_color = None # add node to dual network DualNetwork.node_from_point3d(ckey, centroid_pt, position=None, num=None, leaf=is_leaf, start=False, end=False, segment=None, increase=False, decrease=False, color=cycle_color) # CREATE EDGES IN DUAL ------------------------------------------------ # loop over original edges and create corresponding edges in dual for u, v, d in self.edges_iter(data=True): u, v = self.edge_geometry_direction(u, v) cycle_a = edge_to_cycle[(u, v)] cycle_b = edge_to_cycle[(v, u)] if cycle_a != None and cycle_b != None: node_a = (cycle_a, DualNetwork.node[cycle_a]) node_b = (cycle_b, DualNetwork.node[cycle_b]) if d["warp"]: DualNetwork.create_weft_edge(node_b, node_a) elif d["weft"]: DualNetwork.create_warp_edge(node_a, node_b) # SET ATTRIBUTES OF DUAL NODES ---------------------------------------- # loop over all nodes of the network and set crease and end attributes for node in DualNetwork.nodes_iter(): node_data = DualNetwork.node[node] warp_in = DualNetwork.node_warp_edges_in(node) warp_out = DualNetwork.node_warp_edges_out(node) weft_in = DualNetwork.node_weft_edges_in(node) weft_out = DualNetwork.node_weft_edges_out(node) warplen = len(warp_in) + len(warp_out) weftlen = len(weft_in) + len(weft_out) # 2 warp edges and 1 weft edge >> end if warplen == 2 and weftlen == 1: node_data["end"] = True if weft_out: node_data["start"] = True # 1 warp edge and 1 weft edge >> end and increase / decrease elif warplen == 1 and weftlen == 1: node_data["end"] = True if weft_out: node_data["start"] = True if warp_out and not node_data["leaf"]: node_data["increase"] = True elif warp_in and not node_data["leaf"]: node_data["decrease"] = True # 2 warp edges and 0 weft edges >> end elif warplen == 2 and weftlen == 0: node_data["end"] = True node_data["start"] = True # 1 warp edge and 0 weft edges >> end elif warplen == 1 and weftlen == 0: node_data["end"] = True node_data["start"] = True # 0 warp edges and 1 weft edge >> end elif warplen == 0 and weftlen == 1: node_data["end"] = True if weft_out: node_data["start"] = True # 1 warp edge and 2 weft edges >> increase or decrease elif warplen == 1 and weftlen == 2: if not node_data["leaf"]: if warp_out: node_data["increase"] = True elif warp_in: node_data["decrease"] = True # MERGE ADJACENT INCREASES/DECREASES ---------------------------------- if merge_adj_creases: increase_nodes = [inc for inc in DualNetwork.nodes_iter(data=True) if inc[1]["increase"]] for increase, data in increase_nodes: pred = DualNetwork.predecessors(increase) suc = DualNetwork.successors(increase) pred = [p for p in pred if DualNetwork.node[p]["decrease"]] suc = [s for s in suc if DualNetwork.node[s]["decrease"]] # merge only with pred or with suc but not both if (len(pred) == 1 and DualNetwork.edge[pred[0]][increase]["weft"]): # merge nodes, edge is pred, increase pred = pred[0] pd = DualNetwork.node[pred] # remove the connecting edge DualNetwork.remove_edge(pred, increase) # get the points of the nodes increase_pt = data["geo"] pred_pt = pd["geo"] # compute the new merged point new_vec = RhinoVector3d(increase_pt - pred_pt) new_pt = pred_pt + (new_vec * 0.5) # replace the increase with the new pt and invert the # increase attribute data["geo"] = new_pt data["x"] = new_pt.X data["y"] = new_pt.Y data["z"] = new_pt.Z data["increase"] = False # edit the edges of the increase for edge in DualNetwork.edges_iter(increase, data=True): edge[2]["geo"] = RhinoLine( data["geo"], DualNetwork.node[edge[1]]["geo"]) # edit edges of decrease for edge in DualNetwork.in_edges_iter(pred, data=True): if edge[2]["warp"]: fromNode = (edge[0], DualNetwork.node[edge[0]]) toNode = (increase, data) DualNetwork.create_warp_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) elif edge[2]["weft"]: fromNode = (edge[0], DualNetwork.node[edge[0]]) toNode = (increase, data) DualNetwork.create_weft_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) DualNetwork.remove_node(pred) elif (not pred and len(suc) == 1 and DualNetwork.edge[increase][suc[0]]["weft"]): # merge nodes, edge is increase, suc suc = suc[0] sd = DualNetwork.node[suc] # remove the connecting edge DualNetwork.remove_edge(increase, suc) # get the points of the nodes increase_pt = data["geo"] suc_pt = sd["geo"] # compute the new merged point new_vec = RhinoVector3d(suc_pt - increase_pt) new_pt = increase_pt + (new_vec * 0.5) # replace the increase with the new pt and invert the # increase attribute data["geo"] = new_pt data["x"] = new_pt.X data["y"] = new_pt.Y data["z"] = new_pt.Z data["increase"] = False # edit the edges of the increase for edge in DualNetwork.edges_iter(increase, data=True): edge[2]["geo"] = RhinoLine( data["geo"], DualNetwork.node[edge[1]]["geo"]) for edge in DualNetwork.in_edges_iter(increase, data=True): edge[2]["geo"] = RhinoLine( DualNetwork.node[edge[0]]["geo"], data["geo"]) # edit incoming edges of decrease for edge in DualNetwork.in_edges_iter(suc, data=True): if edge[2]["warp"]: fromNode = (edge[0], DualNetwork.node[edge[0]]) toNode = (increase, data) DualNetwork.create_warp_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) elif edge[2]["weft"]: fromNode = (edge[0], DualNetwork.node[edge[0]]) toNode = (increase, data) DualNetwork.create_weft_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) # edit outgoing edges of decrease for edge in DualNetwork.edges_iter(suc, data=True): if edge[2]["warp"]: fromNode = (increase, data) toNode = (edge[1], DualNetwork.node[edge[1]]) DualNetwork.create_warp_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) elif edge[2]["weft"]: fromNode = (increase, data) toNode = (edge[1], DualNetwork.node[edge[1]]) DualNetwork.create_weft_edge(fromNode, toNode) DualNetwork.remove_edge(edge[0], edge[1]) DualNetwork.remove_node(suc) # ATTEMPT TO MEND TRAILING ROWS --------------------------------------- if mend_trailing_rows: # TODO: find a safer / more robust implementation attempt! errMsg = ("This option is not satisfyingly implemented for this " + "method, yet. Therefore, it is deactivated for now.") raise NotImplementedError(errMsg) # get all nodes which are 'leaf' and 'end' (right side) # and all nodes which are 'leaf' and 'start' (left side) trailing = sorted([(n, d) for n, d in DualNetwork.nodes_iter(data=True) if d["leaf"] and d["end"]], key=lambda x: x[0]) trailing_left = deque([t for t in trailing if t[1]["start"]]) trailing_right = deque([t for t in trailing if not t[1]["start"]]) # from the trailing left nodes... # travel one outgoing 'weft' # from there travel one incoming 'warp' # if the resulting node is 'start', 'end' and has 3 edges in total # >> take its outgoing 'warp' edge (we already traveled that so # we should already have it) # >> connect it to the trailing left node # >> remove the 'leaf' attribute from the trailing node as it is no # longer trailing # >> add the 'increase' attribute to the previous target of the # 'warp' edge while len(trailing_left) > 0: # pop an item from the deque trail = trailing_left.popleft() # travel one outgoing 'weft' edge weft_out = DualNetwork.node_weft_edges_out(trail[0], data=True) if not weft_out: continue weft_out = weft_out[0] # check the target of the 'weft' edge for incoming 'warp' warp_in = DualNetwork.node_warp_edges_in( weft_out[1], data=True) warp_out = DualNetwork.node_warp_edges_out( weft_out[1], data=True) if not warp_in: continue warp_in = warp_in[0] candidate = (warp_in[0], DualNetwork.node[warp_in[0]]) nce = len(DualNetwork.in_edges(warp_in[0])) nce += len(DualNetwork.edges(warp_in[0])) # if this condition holds, we have a trailing increase if (candidate[1]["start"] and candidate[1]["end"] and nce == 3): # remove found 'warp' edge DualNetwork.remove_edge(warp_in[0], warp_in[1]) # assign 'increase' attribute to former 'warp' edge target DualNetwork.node[warp_in[1]]["increase"] = True # connect candidate to trail with new 'warp' edge DualNetwork.create_warp_edge(candidate, trail) # remove 'leaf' attribute of former trail trail[1]["leaf"] = False else: if warp_out: warp_out = warp_out[0] candidate = (warp_out[1], DualNetwork.node[warp_out[1]]) nce = len(DualNetwork.in_edges(warp_out[1])) nce += len(DualNetwork.edges(warp_out[1])) # if this condition holds, we have a trailing decrease if (candidate[1]["start"] and candidate[1]["end"] and nce == 3): # remove found 'warp' edge DualNetwork.remove_edge(warp_out[0], warp_out[1]) # assign 'decrease' attribute to former 'warp' # edge source DualNetwork.node[warp_out[0]]["decrease"] = True # connect former trail to candidate with new # 'warp' edge DualNetwork.create_warp_edge(trail, candidate) # remove 'leaf' attribute of former trail trail[1]["leaf"] = False while len(trailing_right) > 0: # pop an item from the deque trail = trailing_right.popleft() # travel one incoming 'weft' edge weft_in = DualNetwork.node_weft_edges_in(trail[0], data=True) if not weft_in: continue weft_in = weft_in[0] # check the target of the 'weft' edge for incoming 'warp' warp_in = DualNetwork.node_warp_edges_in(weft_in[0], data=True) warp_out = DualNetwork.node_warp_edges_out(weft_in[0], data=True) if not warp_in: continue warp_in = warp_in[0] candidate = (warp_in[0], DualNetwork.node[warp_in[0]]) nce = len(DualNetwork.in_edges(warp_in[0])) nce += len(DualNetwork.edges(warp_in[0])) # if this condition holds, we have a trailing increase if candidate[1]["end"] and nce == 3: # remove found 'warp' edge DualNetwork.remove_edge(warp_in[0], warp_in[1]) # assign 'increase' attribute to former 'warp' edge target DualNetwork.node[warp_in[1]]["increase"] = True # connect candidate to trail with new 'warp' edge DualNetwork.create_warp_edge(candidate, trail) # remove 'leaf' attribute of former trail trail[1]["leaf"] = False else: if warp_out: warp_out = warp_out[0] candidate = (warp_out[1], DualNetwork.node[warp_out[1]]) nce = len(DualNetwork.in_edges(warp_out[1])) nce += len(DualNetwork.edges(warp_out[1])) # if this condition holds, we have a trailing decrease if (candidate[1]["start"] and candidate[1]["end"] and nce == 3): # remove found 'warp' edge DualNetwork.remove_edge(warp_out[0], warp_out[1]) # assign 'decrease' attribute to former 'warp' # edge source DualNetwork.node[warp_out[0]]["decrease"] = True # connect former trail to candidate with new # 'warp' edge DualNetwork.create_warp_edge(trail, candidate) # remove 'leaf' attribute of former trail trail[1]["leaf"] = False return DualNetwork # MAIN ------------------------------------------------------------------------ if __name__ == '__main__': pass
[ 2, 350, 56, 4221, 1340, 49053, 9795, 45651, 49, 13153, 30023, 33002, 20368, 32501, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6...
1.853216
77,917
# from binary_tree import * # # root = Node(8) # # root.insert(3) # root.insert(10) # root.insert(1) # root.insert(6) # root.insert(4) # root.insert(7) # root.insert(14) # root.insert(13) # node, parent = root.lookup(6) # print(node, parent) # root.print_tree() # # root.delete(10) # # root.print_tree() import tkinter as tk from tkinter import * # import tkMessageBox as messagesbox import tkinter.messagebox as messagebox import ttk from tkinter import simpledialog from treeview import TreeView from random import shuffle from naive import NaiveBST, perfect_inserter from random import * import random if __name__ == '__main__': app = main_GUI(None) app.title("Binary Search Tree") app.mainloop()
[ 2, 422, 13934, 62, 21048, 1330, 1635, 198, 2, 198, 2, 6808, 796, 19081, 7, 23, 8, 198, 2, 198, 2, 6808, 13, 28463, 7, 18, 8, 198, 2, 6808, 13, 28463, 7, 940, 8, 198, 2, 6808, 13, 28463, 7, 16, 8, 198, 2, 6808, 13, 28463, ...
2.70566
265
import requests import json from concurrent.futures import ProcessPoolExecutor as Executor from concurrent.futures import ThreadPoolExecutor from time import sleep, time from couchbase.bucket import Bucket from cbagent.collectors import Latency, Collector from logger import logger from perfrunner.helpers.misc import uhex from spring.docgen import Document from cbagent.metadata_client import MetadataClient from cbagent.stores import PerfStore from perfrunner.settings import ( ClusterSpec, PhaseSettings, TargetIterator, TestConfig, )
[ 11748, 7007, 198, 11748, 33918, 198, 198, 6738, 24580, 13, 69, 315, 942, 1330, 10854, 27201, 23002, 38409, 355, 8393, 38409, 198, 6738, 24580, 13, 69, 315, 942, 1330, 14122, 27201, 23002, 38409, 198, 198, 6738, 640, 1330, 3993, 11, 640,...
3.636364
154
from jnpr.junos import Device from jnpr.junos.utils.config import Config clean_routing_table()
[ 6738, 474, 77, 1050, 13, 29741, 418, 1330, 16232, 198, 6738, 474, 77, 1050, 13, 29741, 418, 13, 26791, 13, 11250, 1330, 17056, 628, 198, 27773, 62, 81, 13660, 62, 11487, 3419, 628 ]
2.969697
33
""" Author : vakhet at gmail.com This script gets all your NPC names from the original rAthena folder and updates their lines in navi_npc_krpri.lub wherever matches the map_name and coords """ import re import os import random import sqlite3 NPC_match = r'^[\w\d_]+,\d+,\d+,\d+\tscript\t[\w\d_ -]+#*[\w\d_ -]*\t[\d,{]+$' allfiles = [] log = open('result.log', 'w', errors='ignore') conn = sqlite3.connect('db.sqlite') db = conn.cursor() intro = ''' Renew navi_npc_krpri.lub | Version 0.2 | (C) 2017 vakhet @ gmail.com Changes: v0.2 - *.new file now creates in same folder with original *.lub ''' outro = ''' Check results in result.log NEW file generated: navi_npc_krpri.new ''' db.executescript(''' DROP TABLE IF EXISTS npc; CREATE TABLE npc ( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, map TEXT, thing1 INTEGER, thing2 INTEGER, thing3 INTEGER, name TEXT, shadow TEXT, x INTEGER, y INTEGER ) ''') # The Beginning print(intro) while True: path_rathena = input('Enter path to NPC: ') if not os.path.exists(path_rathena): print('Wrong path!\n\n') continue else: break while True: path_navi = input('Enter path to navi_npc_krpri.lub: ') if not os.path.exists(path_navi+'\\navi_npc_krpri.lub'): print('Wrong path!\n\n') continue else: break stage_1() # scan for *.txt in \npc directory stage_2() # build DB from navi_npc_krpri.lub stage_3() # update NPC names in DB from *.txt stage_4() # building navi_npc_krpri.new print('Complete list of changes see in log.txt') print('NEW file generated: navi_npc_krpri.new') input('\nPress any key')
[ 37811, 201, 198, 220, 220, 220, 6434, 1058, 410, 461, 3202, 379, 308, 4529, 13, 785, 201, 198, 201, 198, 1212, 4226, 3011, 477, 534, 15888, 3891, 422, 262, 2656, 374, 2953, 831, 64, 9483, 201, 198, 392, 5992, 511, 3951, 287, 6812, ...
2.170552
815
from rpicarserver import ext
[ 6738, 374, 16564, 945, 18497, 1330, 1070, 198 ]
3.625
8
"""Test nest diagnostics.""" from typing import Any from .conftest import ComponentSetup from tests.common import MockConfigEntry from tests.components.diagnostics import get_diagnostics_for_config_entry THERMOSTAT_TYPE = "sdm.devices.types.THERMOSTAT"
[ 37811, 14402, 16343, 6689, 34558, 526, 15931, 198, 198, 6738, 19720, 1330, 4377, 198, 198, 6738, 764, 1102, 701, 395, 1330, 35100, 40786, 198, 198, 6738, 5254, 13, 11321, 1330, 44123, 16934, 30150, 198, 6738, 5254, 13, 5589, 3906, 13, 4...
3.394737
76
import pandas as pd import numpy as np df = pd.read_csv('poblacion.csv') pd.options.display.float_format = '{:,.1f}'.format df = pd.read_csv('poblacion.csv') df['year'] = pd.Categorical(df['year'].apply(str)) idx_filtro = df['Country'].isin(['Mexico','Panama']) df_filtro_country = df[idx_filtro] df_filtro_country =df_filtro_country.set_index(['Country','year']).sort_index(ascending= [False,True]) print(df_filtro_country.unstack('Country')) ids = pd.IndexSlice print(df_filtro_country.loc[ids['Albania':'Azerbaijan','2015':'2016'],:].sort_index())
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 7568, 796, 279, 67, 13, 961, 62, 40664, 10786, 79, 45292, 49443, 13, 40664, 11537, 198, 30094, 13, 25811, 13, 13812, 13, 22468, 62, 18982, 796, 705, 90, 25, 3...
2.376068
234
from django.contrib.auth.models import User from django.db import models
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 628, 628, 198 ]
3.347826
23
# # Py-Alpha-AMD Registration Framework # Author: Johan Ofverstedt # Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information # # Copyright 2019 Johan Ofverstedt # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # # Symmetric Average Minimal Distances (AMD) Distance implemented as a class. # import numpy as np
[ 198, 2, 198, 2, 9485, 12, 38077, 12, 28075, 24610, 25161, 198, 2, 6434, 25, 16053, 272, 3226, 332, 30679, 83, 198, 2, 20984, 25, 12549, 290, 3851, 436, 1632, 3020, 19482, 7412, 24610, 13403, 319, 4307, 1817, 14336, 3191, 2558, 6377, ...
3.92
350
# Generated by Django 3.0.8 on 2020-07-28 12:46 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 23, 319, 12131, 12, 2998, 12, 2078, 1105, 25, 3510, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
from math import factorial n = int(input('Digite um nmero, para obter seu fatorial: ')) print('{}! {}'.format(n, factorial(n)))
[ 6738, 10688, 1330, 1109, 5132, 198, 77, 796, 493, 7, 15414, 10786, 19511, 578, 23781, 299, 647, 78, 11, 31215, 909, 353, 384, 84, 277, 21592, 25, 705, 4008, 198, 4798, 10786, 90, 92, 0, 220, 23884, 4458, 18982, 7, 77, 11, 1109, 51...
2.6875
48
#!/usr/bin/env python3 from setuptools import setup, Extension setup( ext_modules=[ Extension('bloom._hashc', ['bloom/_hashcmodule.c']) ])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 27995, 198, 198, 40406, 7, 198, 220, 220, 220, 1070, 62, 18170, 41888, 198, 220, 220, 220, 220, 220, 220, 220, 27995, 10786, 2436, 4...
2.453125
64
#!/usr/bin/env python3 import atexit import logging import os import random import subprocess import sys import time import msgpack logger = logging.getLogger(__name__) def unmsgpack(ob): "convert dict from msgpack.loads() with byte string keys to text string keys" if isinstance(ob, dict): od = {} for k,v in ob.items(): k = maybedecode(k) okv = False if (not okv) and (k == 'note'): try: v = unmsgpack(mloads(v)) okv = True except: pass if (not okv) and k in ('type', 'note'): try: v = v.decode() okv = True except: pass if not okv: v = unmsgpack(v) od[k] = v return od if isinstance(ob, list): return [unmsgpack(v) for v in ob] #if isinstance(ob, bytes): # return base64.b64encode(ob).decode() return ob # whoever calls this will need to import boto and get the s3 client
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 379, 37023, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 11748, 640, 198, 198, 11748, 31456, 8002, 198, 198, 64...
1.859532
598
#!/usr/bin/env python # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------------------------------------------------- # INFO: # ----------------------------------------------------------------------------------------------------------------------- """ Author: Evan Hubinger License: Apache 2.0 Description: Wrapper around PyParsing that selects the best available implementation. """ # ----------------------------------------------------------------------------------------------------------------------- # IMPORTS: # ----------------------------------------------------------------------------------------------------------------------- from __future__ import print_function, absolute_import, unicode_literals, division from coconut.root import * # NOQA import os import sys import traceback import functools import inspect from warnings import warn from collections import defaultdict from coconut.constants import ( PURE_PYTHON, PYPY, use_fast_pyparsing_reprs, use_packrat_parser, packrat_cache_size, default_whitespace_chars, varchars, min_versions, pure_python_env_var, enable_pyparsing_warnings, use_left_recursion_if_available, ) from coconut.util import get_clock_time # NOQA from coconut.util import ( ver_str_to_tuple, ver_tuple_to_str, get_next_version, ) # warning: do not name this file cPyparsing or pyparsing or it might collide with the following imports try: if PURE_PYTHON: raise ImportError("skipping cPyparsing check due to " + pure_python_env_var + " = " + os.environ.get(pure_python_env_var, "")) import cPyparsing as _pyparsing from cPyparsing import * # NOQA from cPyparsing import __version__ PYPARSING_PACKAGE = "cPyparsing" PYPARSING_INFO = "Cython cPyparsing v" + __version__ except ImportError: try: import pyparsing as _pyparsing from pyparsing import * # NOQA from pyparsing import __version__ PYPARSING_PACKAGE = "pyparsing" PYPARSING_INFO = "Python pyparsing v" + __version__ except ImportError: traceback.print_exc() __version__ = None PYPARSING_PACKAGE = "cPyparsing" PYPARSING_INFO = None # ----------------------------------------------------------------------------------------------------------------------- # VERSION CHECKING: # ----------------------------------------------------------------------------------------------------------------------- min_ver = min(min_versions["pyparsing"], min_versions["cPyparsing"][:3]) # inclusive max_ver = get_next_version(max(min_versions["pyparsing"], min_versions["cPyparsing"][:3])) # exclusive cur_ver = None if __version__ is None else ver_str_to_tuple(__version__) if cur_ver is None or cur_ver < min_ver: min_ver_str = ver_tuple_to_str(min_ver) raise ImportError( "Coconut requires pyparsing/cPyparsing version >= " + min_ver_str + ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "") + " (run '{python} -m pip install --upgrade {package}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE), ) elif cur_ver >= max_ver: max_ver_str = ver_tuple_to_str(max_ver) warn( "This version of Coconut was built for pyparsing/cPyparsing versions < " + max_ver_str + ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "") + " (run '{python} -m pip install {package}<{max_ver}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE, max_ver=max_ver_str), ) # ----------------------------------------------------------------------------------------------------------------------- # SETUP: # ----------------------------------------------------------------------------------------------------------------------- if cur_ver >= (3,): MODERN_PYPARSING = True _trim_arity = _pyparsing.core._trim_arity _ParseResultsWithOffset = _pyparsing.core._ParseResultsWithOffset else: MODERN_PYPARSING = False _trim_arity = _pyparsing._trim_arity _ParseResultsWithOffset = _pyparsing._ParseResultsWithOffset USE_COMPUTATION_GRAPH = ( not MODERN_PYPARSING # not yet supported and not PYPY # experimentally determined ) if enable_pyparsing_warnings: if MODERN_PYPARSING: _pyparsing.enable_all_warnings() else: _pyparsing._enable_all_warnings() _pyparsing.__diag__.warn_name_set_on_empty_Forward = False if MODERN_PYPARSING and use_left_recursion_if_available: ParserElement.enable_left_recursion() elif use_packrat_parser: ParserElement.enablePackrat(packrat_cache_size) ParserElement.setDefaultWhitespaceChars(default_whitespace_chars) Keyword.setDefaultKeywordChars(varchars) # ----------------------------------------------------------------------------------------------------------------------- # FAST REPRS: # ----------------------------------------------------------------------------------------------------------------------- if PY2: def fast_repr(cls): """A very simple, fast __repr__/__str__ implementation.""" return "<" + cls.__name__ + ">" else: fast_repr = object.__repr__ _old_pyparsing_reprs = [] def set_fast_pyparsing_reprs(): """Make pyparsing much faster by preventing it from computing expensive nested string representations.""" for obj in vars(_pyparsing).values(): try: if issubclass(obj, ParserElement): _old_pyparsing_reprs.append((obj, (obj.__repr__, obj.__str__))) obj.__repr__ = functools.partial(fast_repr, obj) obj.__str__ = functools.partial(fast_repr, obj) except TypeError: pass def unset_fast_pyparsing_reprs(): """Restore pyparsing's default string representations for ease of debugging.""" for obj, (repr_method, str_method) in _old_pyparsing_reprs: obj.__repr__ = repr_method obj.__str__ = str_method if use_fast_pyparsing_reprs: set_fast_pyparsing_reprs() # ----------------------------------------------------------------------------------------------------------------------- # PROFILING: # ----------------------------------------------------------------------------------------------------------------------- _timing_info = [None] # in list to allow reassignment def add_timing_to_method(cls, method_name, method): """Add timing collection to the given method. It's a monstrosity, but it's only used for profiling.""" from coconut.terminal import internal_assert # hide to avoid circular import args, varargs, keywords, defaults = inspect.getargspec(method) internal_assert(args[:1] == ["self"], "cannot add timing to method", method_name) if not defaults: defaults = [] num_undefaulted_args = len(args) - len(defaults) def_args = [] call_args = [] fix_arg_defaults = [] defaults_dict = {} for i, arg in enumerate(args): if i >= num_undefaulted_args: default = defaults[i - num_undefaulted_args] def_args.append(arg + "=_timing_sentinel") defaults_dict[arg] = default fix_arg_defaults.append( """ if {arg} is _timing_sentinel: {arg} = _exec_dict["defaults_dict"]["{arg}"] """.strip("\n").format( arg=arg, ), ) else: def_args.append(arg) call_args.append(arg) if varargs: def_args.append("*" + varargs) call_args.append("*" + varargs) if keywords: def_args.append("**" + keywords) call_args.append("**" + keywords) new_method_name = "new_" + method_name + "_func" _exec_dict = globals().copy() _exec_dict.update(locals()) new_method_code = """ def {new_method_name}({def_args}): {fix_arg_defaults} _all_args = (lambda *args, **kwargs: args + tuple(kwargs.values()))({call_args}) _exec_dict["internal_assert"](not any(_arg is _timing_sentinel for _arg in _all_args), "error handling arguments in timed method {new_method_name}({def_args}); got", _all_args) _start_time = _exec_dict["get_clock_time"]() try: return _exec_dict["method"]({call_args}) finally: _timing_info[0][str(self)] += _exec_dict["get_clock_time"]() - _start_time {new_method_name}._timed = True """.format( fix_arg_defaults="\n".join(fix_arg_defaults), new_method_name=new_method_name, def_args=", ".join(def_args), call_args=", ".join(call_args), ) exec(new_method_code, _exec_dict) setattr(cls, method_name, _exec_dict[new_method_name]) return True def collect_timing_info(): """Modifies pyparsing elements to time how long they're executed for. It's a monstrosity, but it's only used for profiling.""" from coconut.terminal import logger # hide to avoid circular imports logger.log("adding timing to pyparsing elements:") _timing_info[0] = defaultdict(float) for obj in vars(_pyparsing).values(): if isinstance(obj, type) and issubclass(obj, ParserElement): added_timing = False for attr_name in dir(obj): attr = getattr(obj, attr_name) if ( callable(attr) and not isinstance(attr, ParserElement) and not getattr(attr, "_timed", False) and attr_name not in ( "__getattribute__", "__setattribute__", "__init_subclass__", "__subclasshook__", "__class__", "__setattr__", "__getattr__", "__new__", "__init__", "__str__", "__repr__", "__hash__", "__eq__", "_trim_traceback", "_ErrorStop", "enablePackrat", "inlineLiteralsUsing", "setDefaultWhitespaceChars", "setDefaultKeywordChars", "resetCache", ) ): added_timing |= add_timing_to_method(obj, attr_name, attr) if added_timing: logger.log("\tadded timing to", obj) def print_timing_info(): """Print timing_info collected by collect_timing_info().""" print( """ ===================================== Timing info: (timed {num} total pyparsing objects) ===================================== """.rstrip().format( num=len(_timing_info[0]), ), ) sorted_timing_info = sorted(_timing_info[0].items(), key=lambda kv: kv[1]) for method_name, total_time in sorted_timing_info: print("{method_name}:\t{total_time}".format(method_name=method_name, total_time=total_time))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 16529, 3880, 19351, 6329, 198, 2, 24890, 25, 198, 2, 16529, 3880, 19351, 6329, 198, 198, 37811, 198, 13838, 2...
2.467292
4,479
#!/usr/bin/env python # -*- coding: utf-8 -*- import random from python_hll.hlltype import HLLType from python_hll.hll import HLL from python_hll.serialization import SerializationUtil """Unit tests for BitVector.""" def test_add_basic(): """ Tests basic set semantics of ``HLL.add_raw()``. """ # Adding a single positive value to an empty set should work. hll = new_hll(128) # arbitrary hll.add_raw(1) # positive assert hll.cardinality() == 1 # Adding a single negative value to an empty set should work. hll = new_hll(128) # arbitrary hll.add_raw(-1) # negative assert hll.cardinality() == 1 # Adding a duplicate value to a set should be a no-op. hll = new_hll(128) # arbitrary hll.add_raw(1) # positive hll.add_raw(1) # dupe assert hll.cardinality() == 1 def test_union(): """ Tests ``HLL.union()``. """ # Unioning two distinct sets should work hll_a = new_hll(128) # arbitrary hll_b = new_hll(128) # arbitrary hll_a.add_raw(1) hll_a.add_raw(2) hll_b.add_raw(3) hll_a.union(hll_b) assert hll_a.cardinality() == 3 # Unioning two sets whose union doesn't exceed the cardinality cap should not promote hll_a = new_hll(128) # arbitrary hll_b = new_hll(128) # arbitrary hll_a.add_raw(1) hll_a.add_raw(2) hll_b.add_raw(1) hll_a.union(hll_b) assert hll_a.cardinality() == 2 assert hll_a.get_type() == HLLType.EXPLICIT # Unioning two sets whose union exceeds the cardinality cap should promote hll_a = new_hll(128) # arbitrary hll_b = new_hll(128) # arbitrary for i in range(0, 128): hll_a.add_raw(i) hll_b.add_raw(i+128) hll_a.union(hll_b) assert hll_a.get_type() == HLLType.SPARSE def test_clear(): """ Tests ``HLL.clear()`` """ hll = new_hll(128) # arbitrary hll.add_raw(1) hll.clear() assert hll.cardinality() == 0 def test_to_from_bytes(): """ Tests ``HLL.to_bytes() and ``HLL.from_bytes(). """ schema_version = SerializationUtil.DEFAULT_SCHEMA_VERSION type = HLLType.EXPLICIT padding = schema_version.padding_bytes(type) bytes_per_word = 8 # Should work on an empty set hll = new_hll(128) bytes = hll.to_bytes(schema_version) assert len(bytes) == padding # no elements, just padding in_hll = HLL.from_bytes(bytes) assert_elements_equal(hll, in_hll) # Should work on a partially filled set hll = new_hll(128) for i in range(0, 3): hll.add_raw(i) bytes = hll.to_bytes(schema_version) assert len(bytes) == padding + bytes_per_word * 3 in_hll = HLL.from_bytes(bytes) assert_elements_equal(hll, in_hll) # Should work on a full set explicit_threshold = 128 hll = new_hll(explicit_threshold) for i in range(0, explicit_threshold): hll.add_raw(27 + i) bytes = hll.to_bytes(schema_version) assert len(bytes) == padding + bytes_per_word * explicit_threshold in_hll = HLL.from_bytes(bytes) assert_elements_equal(hll, in_hll) def test_random_values(): """ Tests correctness against `set()`. """ explicit_threshold = 4096 canonical = set() hll = new_hll(explicit_threshold) seed = 1 # constant so results are reproducible random.seed(seed) max_java_long = 9223372036854775807 for i in range(0, explicit_threshold): random_long = random.randint(1, max_java_long) canonical.add(random_long) hll.add_raw(random_long) canonical_cardinality = len(canonical) assert hll.cardinality() == canonical_cardinality def test_promotion(): """ Tests promotion to ``HLLType.SPARSE`` and ``HLLType.FULL``. """ explicit_threshold = 128 hll = HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT) for i in range(0, explicit_threshold + 1): hll.add_raw(i) assert hll.get_type() == HLLType.SPARSE hll = HLL(11, 5, 4, False, HLLType.EXPLICIT) # expthresh=4 => explicit_threshold=8 for i in range(0, 9): hll.add_raw(i) assert hll.get_type() == HLLType.FULL # ------------------------------------------------------------ # assertion helpers def assert_elements_equal(hll_a, hll_b): """ Asserts that values in both sets are exactly equal. """ assert hll_a._explicit_storage == hll_b._explicit_storage def new_hll(explicit_threshold): """ Builds a ``HLLType.EXPLICIT`` ``HLL`` instance with the specified explicit threshold. :param explicit_threshold: explicit threshold to use for the constructed ``HLL``. This must be greater than zero. :type explicit_threshold: int :returns: A default-sized ``HLLType.EXPLICIT`` empty ``HLL`` instance. This will never be ``None``. :rtype: HLL """ return HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 4738, 198, 198, 6738, 21015, 62, 71, 297, 13, 71, 297, 4906, 1330, 367, 3069, 6030, 198, 6738, 21015, 62,...
2.370867
2,087
from weixin.utils.WeiXinUtils import * # 5.main() # 5.main() if __name__ == '__main__': # names = input("") # hours = int(input("")) # minutes = int(input("")) # number = input("") # hello(names, hours, minutes, number) names = input("") hours = int(input("")) minutes = int(input("")) number = input("") print(names) print(hours) print(minutes) print(number) g = getYMD() g1 = get_iciba_everyday_chicken_soup() # number name = 'http://t.weather.sojson.com/api/weather/city/' + number # get_sentence g2 = get_sentence(name) times = g2['cityInfo'] for key, name in times.items(): city = times['city'] parent = times['parent'] # time1 = g2['data'] for key, name in time1.items(): shidu = time1['shidu'] pm25 = time1['pm25'] quality = time1['quality'] ganmao = time1['ganmao'] time1 = g2['data'] time2 = time1.get('forecast', '') time2 = time2[0] itchat.auto_login(hotReload=True) for key, name in time2.items(): high = time2['high'] low = time2['low'] fx = time2['fx'] fl = time2['fl'] type = time2['type'] notice = time2['type'] # users = itchat.search_friends(names) # userName = users[0]['UserName'] while True: t = datetime.datetime.now() t1 = t.strftime('%Y-%m-%d %H:%M:%S') hour = t.hour minute = t.minute second = t.second print('%d:%d:%d' % (hour, minute, second)) if hour == hours and minute == minutes: itchat.send_msg("%s" % g, toUserName=userName) itchat.send_msg('%s' % g1, toUserName=userName) itchat.send_msg('%s\n' '%s\n' '%s\n ' '%s\n' '%s\n ' '%s\n' '%s \n' 'PM2.5: %s\n' '%s \n' '%s\n' '%s - %s ' % (parent, city, high, low, fx, fl, shidu, pm25, quality, ganmao, type, notice), toUserName=userName) break else: time.sleep(5) # 5 continue itchat.run() time.sleep(86400)
[ 6738, 356, 844, 259, 13, 26791, 13, 1135, 72, 55, 259, 18274, 4487, 1330, 1635, 628, 198, 2, 642, 13, 12417, 3419, 628, 198, 2, 642, 13, 12417, 3419, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 22...
1.763756
1,363
from .models import Dealer from .models import Employee from .models import Customer from .models import Medicine from .models import Purchase from django.shortcuts import render from django.db import IntegrityError
[ 6738, 764, 27530, 1330, 44480, 198, 6738, 764, 27530, 1330, 36824, 198, 6738, 764, 27530, 1330, 22092, 198, 6738, 764, 27530, 1330, 11558, 198, 6738, 764, 27530, 1330, 27637, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 673...
3.920635
63
from django.shortcuts import render from .models import Profile # Create your views here.
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 764, 27530, 1330, 13118, 220, 198, 198, 2, 13610, 534, 5009, 994, 13, 628, 220 ]
3.76
25
# Generated by Django 3.0.4 on 2021-03-20 12:41 import phone_field.models from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 19, 319, 33448, 12, 3070, 12, 1238, 1105, 25, 3901, 198, 198, 11748, 3072, 62, 3245, 13, 27530, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.945946
37
#!/usr/bin/env python # coding=utf-8 ''' Author: Shuangchi He / Yulv Email: yulvchi@qq.com Date: 2022-01-28 14:21:09 Motto: Entities should not be multiplied unnecessarily. LastEditors: Shuangchi He LastEditTime: 2022-04-06 11:40:23 FilePath: /Model_Inference_Deployment/src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py Description: Init from https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html Exporting a model from PyTorch to ONNX and running it using ONNX RUNTIME. ''' import argparse import os import numpy as np from PIL import Image import torch.nn as nn import torch.nn.init as init import torch.utils.model_zoo as model_zoo import torchvision.transforms as transforms import onnx import torch.onnx import onnxruntime from utils import check_dir, torchtensor2numpy # Super Resolution model definition in PyTorch def PyTorch2ONNX(torch_model, dummy_input_to_model, onnx_save_dir, check_onnx_model=True): ''' Export the model. (PyTorch2ONNX) ''' torch.onnx.export( torch_model, # model being run. dummy_input_to_model, # model input (or a tuple for multiple inputs). onnx_save_dir, # where to save the model (can be a file or file-like object). export_params=True, # store the trained parameter weights inside the model file. opset_version=10, # the ONNX version to export the model to. do_constant_folding=True, # whether to execute constant folding for optimization. input_names=['input'], # the model's input names. output_names=['output'], # the model's output names. dynamic_axes={ # variable length axes. 'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) if check_onnx_model: # Verify the models structure and confirm that the model has a valid schema. onnx_model = onnx.load(onnx_save_dir) onnx.checker.check_model(onnx_model) def Verify_ONNX_in_ONNX_RUNTIME(onnx_dir, dummy_input_to_model, torch_out): ''' Verify ONNX Runtime and PyTorch are computing the same value for the model. ''' # Create an inference session. ort_session = onnxruntime.InferenceSession(onnx_dir) # Compute ONNX Runtime output prediction. ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(dummy_input_to_model)} ort_outs = ort_session.run(None, ort_inputs) # Compare ONNX Runtime and PyTorch results np.testing.assert_allclose(torchtensor2numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05) print("Exported model has been tested with ONNXRuntime, and the result looks good!") def Run_ONNX_in_ONNX_RUNTIME(onnx_dir, img_path, img_save_path): ''' Running the model on an image using ONNX Runtime. ''' # Take the tensor representing the greyscale resized image. img = Image.open(img_path) resize = transforms.Resize([224, 224]) img = resize(img) img_ycbcr = img.convert('YCbCr') img_y, img_cb, img_cr = img_ycbcr.split() to_tensor = transforms.ToTensor() img_y = to_tensor(img_y) img_y.unsqueeze_(0) # Create an inference session. ort_session = onnxruntime.InferenceSession(onnx_dir) # Run the ONNX model in ONNX Runtime. ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(img_y)} ort_outs = ort_session.run(None, ort_inputs) img_out_y = ort_outs[0] # Get the output image. img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L') final_img = Image.merge( "YCbCr", [ img_out_y, img_cb.resize(img_out_y.size, Image.BICUBIC), img_cr.resize(img_out_y.size, Image.BICUBIC), ]).convert("RGB") # Save the image, compare this with the output image from mobile device. final_img.save(img_save_path) if __name__ == "__main__": parse = argparse.ArgumentParser(description='PyTorch2ONNX_Run_in_ONNX_RUNTIME') parse.add_argument('--img_path', type=str, default='{}/data/cat.jpg'.format(os.path.dirname(os.path.abspath(__file__)))) parse.add_argument('--check_onnx_model', type=bool, default=True) parse.add_argument('--output_dir', type=str, default='{}/output'.format(os.path.dirname(os.path.abspath(__file__)))) args = parse.parse_args() check_dir(args.output_dir) args.onnx_save_dir = '{}/super_resolution.onnx'.format(args.output_dir) args.img_save_path = '{}/cat_superres_with_ort.jpg'.format(args.output_dir) main(args)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 28, 40477, 12, 23, 198, 7061, 6, 198, 13838, 25, 32344, 648, 11072, 679, 1220, 575, 377, 85, 198, 15333, 25, 331, 377, 85, 11072, 31, 38227, 13, 785, 198, 10430, 25, 3316...
2.277778
2,070
import numpy as np import tensorflow as tf from sklearn.model_selection import StratifiedKFold # Set dataset seed np.random.seed(seed=842102) if __name__ == "__main__": dataset = Dataset();
[ 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 29186, 1431, 42, 37, 727, 198, 198, 2, 5345, 27039, 9403, 198, 37659, 13, 25120, 13, 28826, 7, 28826, 28, 23...
2.882353
68
# -*- coding: utf-8 -*- """ Created on Thu Oct 15 14:03:52 2015 @author: jemanjohnson """ import numpy as np import matplotlib.pyplot as plt import os import scipy.io from sklearn import preprocessing from time import time from sklearn.preprocessing import MinMaxScaler # Image Reshape Function def img_as_array(img, gt=False): """Takes a N*M*D image where: * N - number of rows * M - number of columns * D - dimension of data Returns: -------- Image as an array with dimensions - (N*M) by D """ if gt == False: img_array = img.reshape( img.shape[0]*img.shape[1], img.shape[2]) else: img_array = img.reshape( img.shape[0]*img.shape[1]) return img_array # Image Normalization function def standardize(data): """ Quick function to standardize my data between 0 and 1 """ return MinMaxScaler().fit_transform(data) # Define HSI X and y Ground Truth pairing function def img_gt_idx(img, img_gt, printinfo=False): """Takes a flattened image array and extracts the image indices that correspond to the ground truth that we have. """ # Find the non-zero entries n_samples = (img_gt>0).sum() # Find the classification labels classlabels = np.unique(img_gt[img_gt>0]) # Create X matrix containing the features X = img[img_gt>0,:] # Create y matrix containing the labels y = img_gt[img_gt>0] # Print out useful information if printinfo: print('We have {n} ground-truth samples.'.format( n=n_samples)) print('The training data includes {n} classes: {classes}'.format( n=classlabels.size, classes=classlabels.T)) print('Dimensions of matrix X: {sizeX}'.format(sizeX=X.shape)) print('Dimensions of matrix y: {sizey}'.format(sizey=y.shape)) return X, y #
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 26223, 2556, 1315, 1478, 25, 3070, 25, 4309, 1853, 198, 198, 31, 9800, 25, 474, 8463, 30686, 1559, 198, 37811, 198, 198, 11748, 299, 32152, 355, ...
2.358173
832
""" Tests for the finsignia.gae.controllers module. """ import os import sys from finsignia.gae import loader import unittest def test_cases(): return [ApplicationControllerTest, ResourceControllerTest] if '__main__' == __name__: unittest.main()
[ 37811, 198, 51, 3558, 329, 262, 957, 12683, 544, 13, 25002, 13, 3642, 36667, 8265, 13, 198, 37811, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 6738, 957, 12683, 544, 13, 25002, 1330, 40213, 198, 198, 11748, 555, 715, 395, 198...
3.109756
82
from tweepypoll.tweepypoll import visualize_poll import pandas as pd import altair as alt def test_visualize_poll(): """Test visualize_poll on a dictionary input""" sample_poll_obj = [ { "text": "Important research!!!", "duration": 1440, "date": "2022-01-22T04:01:08.000Z", "poll options": [ {"position": 1, "label": "Cookies", "votes": 29}, {"position": 2, "label": "Cupcakes", "votes": 5}, {"position": 3, "label": "Donuts", "votes": 24}, {"position": 4, "label": "Ice Cream", "votes": 25}, ], "user": "GregShahade", "total": 83, } ] test_plot = visualize_poll(sample_poll_obj) # test settings on altair plot assert isinstance( test_plot[0], alt.Chart ), "The type of the output mush be a altair chart" assert ( test_plot[0].encoding.x.shorthand == "votes" ), "The votes should be mapped to the x axis" assert ( test_plot[0].encoding.y.shorthand == "label" ), "The label should be mapped to the y axis" assert test_plot[0].mark == "bar", "mark should be a bar" assert ( test_plot[0].encoding.color.title == "Options" ), "Option should be the legend title" # check if show_user=True, correct user name is printed assert sample_poll_obj[0]["user"] == "GregShahade", "The user name is not correct." # check if show_date=True, correct date and time is printed assert ( pd.Timestamp(sample_poll_obj[0]["date"]).strftime("%Y-%m-%d %H:%M:%S") == "2022-01-22 04:01:08" ), "Date and time is not correct." # check if show_duration=True, correct duration is printed assert sample_poll_obj[0]["duration"] / 60 == 24.0, "Duration is not correct." # check if calculated total votes is equal to the input dict df = pd.DataFrame(sample_poll_obj[0]["poll options"]) assert ( df["votes"].sum() == sample_poll_obj[0]["total"] ), "Total response is not correct."
[ 6738, 4184, 538, 4464, 692, 13, 83, 732, 538, 4464, 692, 1330, 38350, 62, 30393, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 5988, 958, 355, 5988, 628, 198, 4299, 1332, 62, 41464, 1096, 62, 30393, 33529, 198, 220, 220, 220, 37...
2.362187
878
""" Avahi Network Service Scripting """ import Queue import threading import avahi, dbus, gobject from dbus import DBusException from dbus.mainloop.glib import DBusGMainLoop __all__ = ["QuiltAvahiServer", "QuiltAvahiClient"] TYPE = '_quilt._tcp' from threading import Thread
[ 37811, 220, 198, 220, 220, 220, 5184, 32810, 7311, 4809, 12327, 278, 198, 37811, 198, 11748, 4670, 518, 198, 11748, 4704, 278, 198, 11748, 1196, 32810, 11, 288, 10885, 11, 48484, 752, 198, 6738, 288, 10885, 1330, 360, 16286, 16922, 220,...
2.927835
97
from appium import webdriver from .utils import PATH desired_caps = dict( platformName='Android', platformVersion='10', automationName='uiautomator2', deviceName='Android Emulator', app=PATH('app/ApiDemos-debug.apk.zip') ) driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
[ 6738, 598, 1505, 1330, 3992, 26230, 198, 198, 6738, 764, 26791, 1330, 46490, 628, 198, 8906, 1202, 62, 27979, 796, 8633, 7, 198, 220, 220, 220, 3859, 5376, 11639, 25934, 3256, 198, 220, 220, 220, 3859, 14815, 11639, 940, 3256, 198, 22...
2.823009
113
import json import os import argparse HOME = os.environ['HOME']+'/results/' parser = argparse.ArgumentParser(description="Python script generates the SCFiles using MSPIDs") parser.add_argument("-m", "--mspids", nargs="+", required=True, help="1 or more MSPIDs") parser.add_argument("-n", "--networkId", metavar='', required=True, help="Network ID") args = parser.parse_args() if __name__ == "__main__": scFileCreator = SCFileCreator()
[ 11748, 33918, 198, 11748, 28686, 198, 11748, 1822, 29572, 198, 198, 39069, 796, 28686, 13, 268, 2268, 17816, 39069, 20520, 10, 26488, 43420, 14, 6, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 11213, 2625, 37906, 4226, ...
2.979866
149
import re #Regular expression library from django import forms from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.template import RequestContext from django.contrib.auth.forms import AuthenticationForm from catalogue.models import Submitted from models import GlobularCluster as GC from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions #from crispy_forms.bootstrap import InlineField #class login_page(forms.Form): # username = forms.CharField(label='Username', max_length=30) #password = forms.CharField(widget=forms.PasswordInput) #model = User #widgets = { # 'password': forms.PasswordInput(), #}
[ 11748, 302, 220, 220, 1303, 40164, 5408, 5888, 198, 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 9515, 13921, 3673, ...
3.547009
234
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F """ modified by zyl 2021/3/2 """ # class BatchMultiHeadGraphAttention(nn.Module): # """ # graph attetion layer(GAL) # """ # def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True): # super(BatchMultiHeadGraphAttention, self).__init__() # self.n_head = n_head # self.f_in = f_in # self.f_out = f_out # self.w = nn.Parameter(torch.Tensor(n_head, f_in, f_out)) # self.a_src = nn.Parameter(torch.Tensor(n_head, f_out, 1)) # self.a_dst = nn.Parameter(torch.Tensor(n_head, f_out, 1)) # self.leaky_relu = nn.LeakyReLU(negative_slope=0.2) # self.softmax = nn.Softmax(dim=-1) # self.dropout = nn.Dropout(attn_dropout) # if bias: # self.bias = nn.Parameter(torch.Tensor(f_out)) # nn.init.constant_(self.bias, 0) # else: # self.register_parameter("bias", None) # nn.init.xavier_uniform_(self.w, gain=1.414) # nn.init.xavier_uniform_(self.a_src, gain=1.414) # nn.init.xavier_uniform_(self.a_dst, gain=1.414) # def forward(self, h, adj): # bs, n = h.size()[:2] # h_prime = torch.matmul(h.unsqueeze(1), self.w) # attn_src = torch.matmul(h_prime, self.a_src) # attn_dst = torch.matmul(h_prime, self.a_dst) # attn = attn_src.expand(-1, -1, -1, n) + attn_dst.expand(-1, -1, -1, n).permute(0, 1, 3, 2) # attn = self.leaky_relu(attn) # attn = self.softmax(attn) # attn = self.dropout(attn) # attn = torch.matmul(torch.squeeze(attn, dim=0), adj) # attn = torch.unsqueeze(attn, 0) # output = torch.matmul(attn, h_prime) # if self.bias is not None: # return output + self.bias, attn # else: # return output, attn # def __repr__(self): # return ( # self.__class__.__name__ # + " (" # + str(self.n_head) # + " -> " # + str(self.f_in) # + " -> " # + str(self.f_out) # + ")" # ) # """ # modified by zyl 2021/2/6 graph attetion network # """ # class GAT(nn.Module): # def __init__(self, n_units, n_heads, dropout=0.2, alpha=0.2): # super(GAT, self).__init__() # self.n_layer = len(n_units) - 1 # self.dropout = dropout # self.layer_stack = nn.ModuleList() # for i in range(self.n_layer): # f_in = n_units[i] * n_heads[i - 1] if i else n_units[i] # self.layer_stack.append( # BatchMultiHeadGraphAttention( # n_heads[i], f_in=f_in, f_out=n_units[i + 1], attn_dropout=dropout # ) # ) # self.norm_list = [ # torch.nn.InstanceNorm1d(32).cuda(), # torch.nn.InstanceNorm1d(64).cuda(), # ] # def forward(self, x, adj): # bs, n = x.size()[:2] # for i, gat_layer in enumerate(self.layer_stack): # # x = self.norm_list[i](x.permute(0, 2, 1)).permute(0, 2, 1) # x, attn = gat_layer(x, adj) # if i + 1 == self.n_layer: # x = x.squeeze(dim=1) # else: # x = F.elu(x.contiguous().view(bs, n, -1)) # x = F.dropout(x, self.dropout, training=self.training) # else: # return x # """ # modified by zyl 2021/2/6 graph attetion network encoder # """ # class GATEncoder(nn.Module): # def __init__(self, n_units, n_heads, dropout, alpha): # super(GATEncoder, self).__init__() # self.gat_intra = GAT([40,72,16], n_heads, dropout, alpha) # self.gat_inter = GAT([16,72,16], n_heads, dropout, alpha) # self.out_embedding = nn.Linear(16*2, 24) # def normalize(self, adj, dim): # N = adj.size() # adj2 = torch.sum(adj, dim) # # norm = adj2.unsqueeze(1).float() # # norm = norm.pow(-1) # # norm_adj = adj.mul(norm) # # return norm_adj # def forward(self, obs_traj_embedding, seq_start_end, end_pos, end_group): # graph_embeded_data = [] # for start, end in seq_start_end.data: # curr_seq_embedding_traj = obs_traj_embedding[:, start:end, :] # h_states = torch.squeeze(obs_traj_embedding, dim=0) # num_ped = end - start # curr_end_group = end_group[start:end] # eye_mtx = torch.eye(num_ped, device=end_group.device).bool() # A_g = curr_end_group.repeat(1, num_ped) # B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1) # M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx # A_intra = self.normalize(M_intra, dim=1).cuda() # curr_seq_graph_intra = self.gat_intra(curr_seq_embedding_traj, A_intra) # # print("curr_seq_embedding_traj:", curr_seq_embedding_traj.size()) # # print("curr_seq_graph_intra:", curr_seq_graph_intra.size()) # R_intra_unique = torch.unique(M_intra, sorted=False, dim=0) # n_group = R_intra_unique.size()[0] # R_intra_unique.unsqueeze_(1) # R_intra = [] # for i in range(n_group-1, -1, -1): # R_intra.append(R_intra_unique[i]) # R_intra = torch.cat(R_intra, dim=0) # R_intra = self.normalize(R_intra, dim=1).cuda() # curr_seq_graph_state_in = torch.matmul(R_intra, torch.squeeze(curr_seq_graph_intra, dim=0)) # curr_seq_graph_state_in = torch.unsqueeze(curr_seq_graph_state_in, 0) # M_inter = torch.ones((n_group, n_group), device=end_group.device).bool() # A_inter = self.normalize(M_inter, dim=1).cuda() # curr_seq_graph_out = self.gat_inter(curr_seq_graph_state_in, A_inter) # curr_seq_graph_inter = torch.matmul(R_intra.T, torch.squeeze(curr_seq_graph_out, dim=0)) # curr_seq_graph_inter = torch.unsqueeze(curr_seq_graph_inter, 0) # curr_gat_state = torch.cat([curr_seq_graph_intra, curr_seq_graph_inter],dim=2) # curr_gat_state = torch.squeeze(curr_gat_state, dim=0) # curr_gat_state = self.out_embedding(curr_gat_state) # curr_gat_state = torch.unsqueeze(curr_gat_state, 0) # graph_embeded_data.append(curr_gat_state) # graph_embeded_data = torch.cat(graph_embeded_data, dim=1) # return graph_embeded_data
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 628, 628, 198, 198, 37811, 198, 220, 220, 220, 9518, 416, 1976, 2645, 33448, 14, 18, 14, ...
1.775221
3,737
#!/usr/bin/python # # Generates ipfixtypes.hh from IPFIX spec and schema # # Copyright (c) 2006 Mazu Networks, Inc. # # $Id: ipfixtypes.py,v 1.1 2006/05/12 16:43:44 eddietwo Exp $ # import xml.dom.minidom import sys import time def main(): if len(sys.argv) < 2: print "Usage: %s [OPTION]... [FILE]..." % sys.argv[0] sys.exit(0) dataTypes = {} fieldTypes = {} for file in sys.argv[1:]: spec = IPFIXSpecification(file) for field in spec.fieldDefinitions(): if dataTypes.has_key(field.dataType): dataTypes[field.dataType].append(field.name) else: dataTypes[field.dataType] = [field.name] fieldTypes[int(field.fieldId)] = field.name for dataType in spec.dataTypes(): if not dataTypes.has_key(dataType): dataTypes[dataType] = [] # IPFIX_unsigned8, data_types = ["IPFIX_%s" % dataType for dataType in dataTypes] data_types = ",\n ".join(data_types) # IPFIX_octetDeltaCount = 1, field_types = fieldTypes.items() field_types.sort() field_types = ["IPFIX_%s = %d" % (name, fieldId) for fieldId, name in field_types] field_types = ",\n ".join(field_types) # case IPFIX_octetDeltaCount: # case IPFIX_packetDeltaCount: # ... # return IPFIX_unsigned64; ipfix_datatypes = [] for dataType, names in dataTypes.iteritems(): if names: ipfix_datatypes += ["case IPFIX_%s:" % name for name in names] ipfix_datatypes.append(" return IPFIX_%s;" % dataType) ipfix_datatypes = "\n ".join(ipfix_datatypes) # case IPFIX_octetDeltaCount: return "octetDeltaCount"; ipfix_names = ["case IPFIX_%s: return \"%s\";" % \ (name, name) for name in fieldTypes.values()] ipfix_names = "\n ".join(ipfix_names) # else if (strcmp(name, "octetDeltaCount") == 0) { return IPFIX_octetDeltaCount; } ipfix_types = ["else if (strcmp(name, \"%s\") == 0) { return IPFIX_%s; }" % \ (name, name) for name in fieldTypes.values()] ipfix_types = "\n ".join(ipfix_types) date = time.asctime() print """ // DO NOT EDIT. Generated at %(date)s. #ifndef IPFIXTYPES_HH #define IPFIXTYPES_HH CLICK_DECLS enum IPFIX_dataType { IPFIX_unknown = 0, %(data_types)s }; enum IPFIX_fieldType { %(field_types)s }; static inline IPFIX_dataType ipfix_datatype(uint16_t type) { switch (type) { %(ipfix_datatypes)s } return IPFIX_unknown; } static inline const char * ipfix_name(uint16_t type) { switch (type) { %(ipfix_names)s } return "unknown"; } static inline uint16_t ipfix_type(const char *name) { if (0) { } %(ipfix_types)s else { return 0; } } CLICK_ENDDECLS #endif """.strip() % locals() if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 2980, 689, 20966, 69, 6346, 9497, 13, 12337, 422, 6101, 47084, 1020, 290, 32815, 198, 2, 198, 2, 15069, 357, 66, 8, 4793, 21625, 84, 27862, 11, 3457, 13, 198, 2, 198, 2, 720,...
2.245044
1,261
import enum
[ 11748, 33829, 628 ]
4.333333
3
# -*- coding: utf-8 -*- ''' Created on 24 Aug 2010 @author: Matthew Wilcoxson functions convert from one value to another in the form: def conversion(value): #do something return new_value ''' import time
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 41972, 319, 1987, 2447, 3050, 198, 198, 31, 9800, 25, 9308, 5187, 40359, 1559, 198, 198, 12543, 2733, 10385, 422, 530, 1988, 284, 1194, 287, 262, 1296, 2...
2.92
75
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2018 QiaoPeng. # # Invenio is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. """Version information for Wxpy-Index. This file is imported by ``wxpy_index.__init__``, and parsed by ``setup.py``. """ from __future__ import absolute_import, print_function __version__ = '0.1.0.dev20180000'
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 554, 574, 952, 13, 198, 2, 15069, 357, 34, 8, 2864, 45724, 47, 1516, 13, 198, 2, 198, 2, 554, 574, 952, 318, 1479, 3788, 26,...
3.385666
293
from absl import app from absl import flags from absl import logging import csv import importlib import numpy as np import os.path as path import random from sklearn.model_selection import train_test_split import time from transformations.reader.matrix import test_argument_and_file, load_and_log import transformations.label_noise as label_noise import methods.knn as knn import methods.knn_extrapolate as knn_extrapolate import methods.ghp as ghp import methods.kde as kde import methods.onenn as onenn import methods.lr_model as lr_model FLAGS = flags.FLAGS flags.DEFINE_string("path", ".", "Path to the matrices directory") flags.DEFINE_string("features_train", None, "Name of the train features numpy matrix exported file (npy)") flags.DEFINE_string("features_test", None, "Name of the test features numpy matrix exported file (npy)") flags.DEFINE_string("labels_train", None, "Name of the train labels numpy matrix exported file (npy)") flags.DEFINE_string("labels_test", None, "Name of the test labels numpy matrix exported file (npy)") flags.DEFINE_list("noise_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], "Run at different noise levels") flags.DEFINE_integer("noise_runs", 5, "Number of runs for different noise levels") flags.DEFINE_string("output_file", None, "File to write the output in CSV format (including headers)") flags.DEFINE_bool("output_overwrite", True, "Writes (if True) or appends (if False) to the specified output file if any") flags.DEFINE_enum("method", None, ["knn", "knn_loo", "knn_extrapolate", "ghp", "kde_knn_loo", "kde", "onenn", "lr_model"], "Method to estimate the bayes error (results in either 1 value or a lower and upper bound)") if __name__ == "__main__": app.run(main)
[ 6738, 2352, 75, 1330, 598, 198, 6738, 2352, 75, 1330, 9701, 198, 6738, 2352, 75, 1330, 18931, 198, 11748, 269, 21370, 198, 11748, 1330, 8019, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 13, 6978, 355, 3108, 198, 11748, 4738, ...
3.074074
567
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Wed Jan 10 09:45:23 2018 @author: www.github.com/GustavZ """ import os import sys import numpy as np from rod.config import Config from rod.helper import get_model_list, check_if_optimized_model from rod.model import ObjectDetectionModel, DeepLabModel ROOT_DIR = os.getcwd() #MODELS_DIR = os.path.join(ROOT_DIR,'models') MODELS_DIR = '/home/gustav/workspace/eetfm_automation/nmsspeed_test' INPUT_TYPE = 'image' # Read sequentail Models or Gather all Models from models/ config = Config('od') if config.SEQ_MODELS: model_names = config.SEQ_MODELS else: model_names = get_model_list(MODELS_DIR) # Sequential testing for model_name in model_names: print("> testing model: {}".format(model_name)) # conditionals optimized=False single_class=False # Test Model if 'hands' in model_name or 'person' in model_name: single_class=True if 'deeplab' in model_name: config = create_test_config('dl',model_name,optimized,single_class) model = DeepLabModel(config).prepare_model(INPUT_TYPE) else: config = create_test_config('od',model_name,optimized,single_class) model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE) # Check if there is an optimized graph model_dir = os.path.join(os.getcwd(),'models',model_name) optimized = check_if_optimized_model(model_dir) # Again for the optimized graph if optimized: if 'deeplab' in model_name: config = create_test_config('dl',model_name,optimized,single_class) model = DeepLabModel(config).prepare_model(INPUT_TYPE) else: config = create_test_config('od',model_name,optimized,single_class) model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE) model.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 2365, 838, 7769, 25, 2231, 25, 1954, 2864, 198, 198, 31, 9800, 25, 7324, 13, 12567,...
2.544828
725
# Copyright 2021, Robotics Lab, City College of New York # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Originating Author: Jinglun Feng, (jfeng1@ccny.cuny.edu) import argparse import logging import os import sys import numpy as np import torch import torch.nn as nn from torch import optim from tqdm import tqdm from torch.autograd import Variable from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader, random_split from torchvision.utils import save_image from model import UNet3D from utils.data_loader import BasicDataset from utils.utils import PointLoss from eval import eval_net if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') args = args_setting() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Let\'s use {torch.cuda.device_count()} GPUs!') net = UNet3D(residual='conv') net = torch.nn.DataParallel(net) if args.load != '': net.load_state_dict( torch.load(args.load, map_location=device) ) logging.info(f'Model loaded from {args.load}') logging.info(f'Network Structure:\n' f'\t{net}\n') net.to(device=device) try: train_net(net=net, epochs=args.epochs, batch_size=args.batchsize, lr=args.lr, device=device) except KeyboardInterrupt: torch.save(net.state_dict(), 'INTERRUPTED.pth') logging.info('Saved interrupt') try: sys.exit(0) except SystemExit: os._exit(0)
[ 2, 15069, 33448, 11, 47061, 3498, 11, 2254, 5535, 286, 968, 1971, 198, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, ...
2.599515
824
import sys
[ 11748, 25064, 628 ]
4
3
from bs4 import BeautifulSoup from ricecooker.classes import nodes
[ 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 198, 6738, 11464, 27916, 263, 13, 37724, 1330, 13760, 628, 198 ]
3.5
20
from __future__ import absolute_import, division, print_function import six.moves.cPickle as pickle from glob import glob import os import pytest from dxtbx.model import Experiment, ExperimentList from dxtbx.model.experiment_list import ExperimentListFactory, \ ExperimentListDumper, ExperimentListDict # def test_experimentlist_index(experiment_list): # # Check the indices of exisiting experiments # assert experiment_list.index(experiment_list[0]) is 0 # assert experiment_list.index(experiment_list[1]) is 1 # assert experiment_list.index(experiment_list[2]) is 2 # assert experiment_list.index(experiment_list[3]) is 1 # assert experiment_list.index(experiment_list[4]) is 0 # # Check index of non exisiting experiment # try: # experiment_list.index(Experiment()) # assert False # except ValueError: # pass
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 2237, 13, 76, 5241, 13, 66, 31686, 293, 355, 2298, 293, 198, 6738, 15095, 1330, 15095, 198, 11748, 28686, 198, 198, 11748, 12972, 9288, 198...
3.090909
275
from sys import argv if __name__=="__main__": make_new_reference_files(argv[1], argv[2], argv[3], argv[4])
[ 6738, 25064, 1330, 1822, 85, 198, 198, 361, 11593, 3672, 834, 855, 1, 834, 12417, 834, 1298, 198, 220, 220, 220, 787, 62, 3605, 62, 35790, 62, 16624, 7, 853, 85, 58, 16, 4357, 1822, 85, 58, 17, 4357, 1822, 85, 58, 18, 4357, 1822...
2.24
50
"""Module which defines collaborating app ids. This module is used by: settings.py scripts/deploy.sh """ import os # List of (playground appid, mimic app id, playground app id alias) _APP_ID_TUPLES = [ # production environment ('try-appengine', 'shared-playground', 'cloud-playground'), # development environment ('fredsa-bliss', 'fredsa-hr', None), ('dansanderson-bliss', 'dansanderson-mimic', None), ] # Our app id _APP_ID = os.environ['APPLICATION_ID'].split('~')[-1] # support regular 'appspot.com' app ids only assert ':' not in _APP_ID, ('{} app ids are unsupported' .format(_APP_ID.split(':')[0])) app_ids = _GetTupleFor(_APP_ID) # The application where the playground IDE runs PLAYGROUND_APP_ID = app_ids[0] # The application where user code runs MIMIC_APP_ID = app_ids[1] # The application alias where the playground IDE runs PLAYGROUND_APP_ID_ALIAS = app_ids[2] # Whether we're using two collaborating app ids TWO_COLLABORATING_APP_IDS = PLAYGROUND_APP_ID != MIMIC_APP_ID def PrintAppIds(): """Prints a new line delimited list of known app ids.""" print '\n'.join(set((PLAYGROUND_APP_ID, MIMIC_APP_ID)))
[ 37811, 26796, 543, 15738, 38152, 598, 220, 2340, 13, 198, 198, 1212, 8265, 318, 973, 416, 25, 198, 220, 6460, 13, 9078, 198, 220, 14750, 14, 2934, 1420, 13, 1477, 198, 37811, 628, 198, 11748, 28686, 628, 198, 2, 7343, 286, 357, 1759...
2.666667
444
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import six from six.moves import zip_longest import time import salt from salt.exceptions import CommandExecutionError LOG = logging.getLogger(__name__) KEYSTONE_LOADED = False def __virtual__(): """Only load if the nova module is in __salt__""" if 'keystonev3.project_get_details' in __salt__: global KEYSTONE_LOADED KEYSTONE_LOADED = True return 'novav21' def _get_keystone_project_id_by_name(project_name, cloud_name): if not KEYSTONE_LOADED: LOG.error("Keystone module not found, can not look up project ID " "by name") return None project = __salt__['keystonev3.project_get_details']( project_name, cloud_name=cloud_name) if not project: return None return project['project']['id'] def cell_present(name='cell1', transport_url='none:///', db_engine='mysql', db_name='nova_upgrade', db_user='nova', db_password=None, db_address='0.0.0.0'): """Ensure nova cell is present For newly created cells this state also runs discover_hosts and map_instances.""" cell_info = __salt__['cmd.shell']( "nova-manage cell_v2 list_cells --verbose | " "awk '/%s/ {print $4,$6,$8}'" % name).split() db_connection = ( '%(db_engine)s+pymysql://%(db_user)s:%(db_password)s@' '%(db_address)s/%(db_name)s?charset=utf8' % { 'db_engine': db_engine, 'db_user': db_user, 'db_password': db_password, 'db_address': db_address, 'db_name': db_name}) args = {'transport_url': transport_url, 'db_connection': db_connection} # There should be at least 1 component printed to cell_info if len(cell_info) >= 1: cell_info = dict(zip_longest( ('cell_uuid', 'existing_transport_url', 'existing_db_connection'), cell_info)) cell_uuid, existing_transport_url, existing_db_connection = cell_info command_string = '' if existing_transport_url != transport_url: command_string = ( '%s --transport-url %%(transport_url)s' % command_string) if existing_db_connection != db_connection: command_string = ( '%s --database_connection %%(db_connection)s' % command_string) if not command_string: return _no_change(name, 'Nova cell') try: __salt__['cmd.shell']( ('nova-manage cell_v2 update_cell --cell_uuid %s %s' % ( cell_uuid, command_string)) % args) LOG.warning("Updating the transport_url or database_connection " "fields on a running system will NOT result in all " "nodes immediately using the new values. Use caution " "when changing these values.") ret = _updated(name, 'Nova cell', args) except Exception as e: ret = _update_failed(name, 'Nova cell') ret['comment'] += '\nException: %s' % e return ret args.update(name=name) try: cell_uuid = __salt__['cmd.shell']( 'nova-manage cell_v2 create_cell --name %(name)s ' '--transport-url %(transport_url)s ' '--database_connection %(db_connection)s --verbose' % args) __salt__['cmd.shell']('nova-manage cell_v2 discover_hosts ' '--cell_uuid %s --verbose' % cell_uuid) __salt__['cmd.shell']('nova-manage cell_v2 map_instances ' '--cell_uuid %s' % cell_uuid) ret = _created(name, 'Nova cell', args) except Exception as e: ret = _create_failed(name, 'Nova cell') ret['comment'] += '\nException: %s' % e return ret def cell_absent(name, force=False): """Ensure cell is absent""" cell_uuid = __salt__['cmd.shell']( "nova-manage cell_v2 list_cells | awk '/%s/ {print $4}'" % name) if not cell_uuid: return _non_existent(name, 'Nova cell') try: __salt__['cmd.shell']( 'nova-manage cell_v2 delete_cell --cell_uuid %s %s' % ( cell_uuid, '--force' if force else '')) ret = _deleted(name, 'Nova cell') except Exception as e: ret = _delete_failed(name, 'Nova cell') ret['comment'] += '\nException: %s' % e return ret def _db_version_update(db, version, human_readable_resource_name): existing_version = __salt__['cmd.shell']( 'nova-manage %s version 2>/dev/null' % db) try: existing_version = int(existing_version) version = int(version) except Exception as e: ret = _update_failed(existing_version, human_readable_resource_name) ret['comment'] += ('\nCan not convert existing or requested version ' 'to integer, exception: %s' % e) LOG.error(ret['comment']) return ret if existing_version < version: try: __salt__['cmd.shell']( 'nova-manage %s sync --version %s' % (db, version)) ret = _updated(existing_version, human_readable_resource_name, {db: '%s sync --version %s' % (db, version)}) except Exception as e: ret = _update_failed(existing_version, human_readable_resource_name) ret['comment'] += '\nException: %s' % e return ret return _no_change(existing_version, human_readable_resource_name) def api_db_version_present(name=None, version="20"): """Ensures that specific api_db version is present""" return _db_version_update('api_db', version, 'Nova API database version') def db_version_present(name=None, version="334"): """Ensures that specific db version is present""" return _db_version_update('db', version, 'Nova database version') def online_data_migrations_present(name=None, api_db_version="20", db_version="334"): """Runs online_data_migrations if databases are of specific versions""" ret = {'name': 'online_data_migrations', 'changes': {}, 'result': False, 'comment': 'Current nova api_db version != {0} or nova db version ' '!= {1}.'.format(api_db_version, db_version)} cur_api_db_version = __salt__['cmd.shell']( 'nova-manage api_db version 2>/dev/null') cur_db_version = __salt__['cmd.shell']( 'nova-manage db version 2>/dev/null') try: cur_api_db_version = int(cur_api_db_version) cur_db_version = int(cur_db_version) api_db_version = int(api_db_version) db_version = int(db_version) except Exception as e: LOG.error(ret['comment']) ret['comment'] = ('\nCan not convert existing or requested database ' 'versions to integer, exception: %s' % e) return ret if cur_api_db_version == api_db_version and cur_db_version == db_version: try: __salt__['cmd.shell']('nova-manage db online_data_migrations') ret['result'] = True ret['comment'] = ('nova-manage db online_data_migrations was ' 'executed successfuly') ret['changes']['online_data_migrations'] = ( 'online_data_migrations run on nova api_db version {0} and ' 'nova db version {1}'.format(api_db_version, db_version)) except Exception as e: ret['comment'] = ( 'Failed to execute online_data_migrations on nova api_db ' 'version %s and nova db version %s, exception: %s' % ( api_db_version, db_version, e)) return ret def _find_failed(name, resource): return { 'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to find {0}s with name {1}'.format(resource, name)} def _created(name, resource, changes): return { 'name': name, 'changes': changes, 'result': True, 'comment': '{0} {1} created'.format(resource, name)} def _create_failed(name, resource): return { 'name': name, 'changes': {}, 'result': False, 'comment': '{0} {1} creation failed'.format(resource, name)} def _no_change(name, resource): return { 'name': name, 'changes': {}, 'result': True, 'comment': '{0} {1} already is in the desired state'.format( resource, name)} def _updated(name, resource, changes): return { 'name': name, 'changes': changes, 'result': True, 'comment': '{0} {1} was updated'.format(resource, name)} def _update_failed(name, resource): return { 'name': name, 'changes': {}, 'result': False, 'comment': '{0} {1} update failed'.format(resource, name)} def _deleted(name, resource): return { 'name': name, 'changes': {}, 'result': True, 'comment': '{0} {1} deleted'.format(resource, name)} def _delete_failed(name, resource): return { 'name': name, 'changes': {}, 'result': False, 'comment': '{0} {1} deletion failed'.format(resource, name)} def _non_existent(name, resource): return { 'name': name, 'changes': {}, 'result': True, 'comment': '{0} {1} does not exist'.format(resource, name)}
[ 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 220, 220, ...
2.26258
4,372
from sys import modules from importlib import import_module modules['server'] = import_module('src') from werkzeug.serving import run_simple from server.app import App from server.mode import Mode if __name__=='__main__': app = App(mode=Mode.Development) run_simple('localhost', 8000, app, use_reloader=True)
[ 6738, 25064, 1330, 13103, 201, 198, 6738, 1330, 8019, 1330, 1330, 62, 21412, 201, 198, 18170, 17816, 15388, 20520, 796, 1330, 62, 21412, 10786, 10677, 11537, 201, 198, 201, 198, 6738, 266, 9587, 2736, 1018, 13, 31293, 1330, 1057, 62, 36...
3.037383
107
""" An abstraction layer for metadata fetchers. Supports both syncronous and asyncronous fetchers with cache. """ from .logs import get_log import os import requests from .constants import config from datetime import datetime from collections import deque import six from concurrent import futures import traceback from .parse import parse_resource from itertools import chain from .exceptions import ResourceException from .utils import url_get from copy import deepcopy, copy if six.PY2: from UserDict import DictMixin as ResourceManagerBase elif six.PY3: from collections import MutableMapping as ResourceManagerBase requests.packages.urllib3.disable_warnings() log = get_log(__name__)
[ 37811, 198, 198, 2025, 34651, 7679, 329, 20150, 11351, 3533, 13, 45267, 1111, 17510, 1313, 516, 290, 30351, 1313, 516, 11351, 3533, 351, 12940, 13, 198, 198, 37811, 198, 198, 6738, 764, 6404, 82, 1330, 651, 62, 6404, 198, 11748, 28686, ...
3.602041
196
from pychonet.EchonetInstance import EchonetInstance
[ 6738, 12972, 354, 36823, 13, 36, 354, 36823, 33384, 1330, 412, 354, 36823, 33384, 198 ]
3.533333
15
from IPython.core.magic import Magics, magics_class, line_cell_magic from sys import stdout from os import linesep from os.path import join, expanduser from adlmagics.version import adlmagics_version from adlmagics.converters.dataframe_converter import DataFrameConverter from adlmagics.utils.json_file_persister import JsonFilePersister from adlmagics.utils.ipshell_result_receiver import IPShellResultReceiver from adlmagics.presenters.presenter_base import PresenterBase from adlmagics.presenters.text_presenter import TextPresenter from adlmagics.presenters.adla_job_presenter import AdlaJobPresenter from adlmagics.presenters.adla_jobs_presenter import AdlaJobsPresenter from adlmagics.presenters.adls_files_presenter import AdlsFilesPresenter from adlmagics.presenters.adls_folders_presenter import AdlsFoldersPresenter from adlmagics.services.azure_token_service import AzureTokenService from adlmagics.services.adla_service_sdk_impl import AdlaServiceSdkImpl from adlmagics.services.adls_service_sdk_impl import AdlsServiceSdkImpl from adlmagics.services.session_service import SessionService from adlmagics.services.presenter_factory import PresenterFactory from adlmagics.magics.session.session_magic_base import SessionMagicBase from adlmagics.magics.session.session_viewing_magic import SessionViewingMagic from adlmagics.magics.session.session_item_setting_magic import SessionItemSettingMagic from adlmagics.magics.azure.azure_magic_base import AzureMagicBase from adlmagics.magics.azure.azure_login_magic import AzureLoginMagic from adlmagics.magics.azure.azure_logout_magic import AzureLogoutMagic from adlmagics.magics.adla.adla_magic_base import AdlaMagicBase from adlmagics.magics.adla.adla_accounts_listing_magic import AdlaAccountsListingMagic from adlmagics.magics.adla.adla_job_viewing_magic import AdlaJobViewingMagic from adlmagics.magics.adla.adla_job_submission_magic import AdlaJobSubmissionMagic from adlmagics.magics.adla.adla_jobs_listing_magic import AdlaJobsListingMagic from adlmagics.magics.adls.adls_magic_base import AdlsMagicBase from adlmagics.magics.adls.adls_accounts_listing_magic import AdlsAccountsListingMagic from adlmagics.magics.adls.adls_folders_listing_magic import AdlsFoldersListingMagic from adlmagics.magics.adls.adls_files_listing_magic import AdlsFilesListingMagic from adlmagics.magics.adls.adls_file_sampling_magic import AdlsFileSamplingMagic
[ 6738, 6101, 7535, 13, 7295, 13, 32707, 1330, 2944, 873, 11, 2153, 873, 62, 4871, 11, 1627, 62, 3846, 62, 32707, 198, 6738, 25064, 1330, 14367, 448, 198, 6738, 28686, 1330, 3951, 538, 198, 6738, 28686, 13, 6978, 1330, 4654, 11, 4292, ...
3.035264
794
import asyncio import json from datetime import datetime, timedelta from aiohttp import web from .utils import read_body, get_week_times, get_formatted_time from ..data.models import RunRow from ..data.postgres_async_db import AsyncPostgresDB import logging
[ 11748, 30351, 952, 198, 11748, 33918, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 257, 952, 4023, 1330, 3992, 198, 6738, 764, 26791, 1330, 1100, 62, 2618, 11, 651, 62, 10464, 62, 22355, 11, 651, 62, 687, 1689...
3.453333
75
# -*- coding: utf-8 -*- from setup_teardown import start_db, stop_db from nose.tools import * from pykt import KyotoTycoon, KTException
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 9058, 62, 660, 446, 593, 1330, 923, 62, 9945, 11, 2245, 62, 9945, 198, 6738, 9686, 13, 31391, 1330, 1635, 198, 6738, 12972, 21841, 1330, 36298, 25492, 20912, 11, ...
2.914894
47
import Resources import Colors import pygame screen = None # Base class for drawable objects # Created from image and coordinates, stores image and rect
[ 198, 11748, 13864, 198, 11748, 29792, 198, 198, 11748, 12972, 6057, 198, 198, 9612, 796, 6045, 198, 198, 2, 7308, 1398, 329, 3197, 540, 5563, 198, 2, 15622, 422, 2939, 290, 22715, 11, 7000, 2939, 290, 13621, 198 ]
4.131579
38
import re import datetime import bs4 from typing import Dict, Tuple, Optional, TYPE_CHECKING, ClassVar, Pattern, cast, Match, Any from .base import BaseMangaExtractor, MangaExtractorData from ..constants import STATUS_IDS, CENSOR_IDS if TYPE_CHECKING: from ..ext_info import ExternalInfo
[ 11748, 302, 198, 11748, 4818, 8079, 198, 198, 11748, 275, 82, 19, 198, 198, 6738, 19720, 1330, 360, 713, 11, 309, 29291, 11, 32233, 11, 41876, 62, 50084, 2751, 11, 5016, 19852, 11, 23939, 11, 3350, 11, 13225, 11, 4377, 198, 198, 673...
3.193548
93
#!/usr/bin/env python3 import os.path import threading from .. import cryptfile from ..util import * from ..client import create_client from ..syncdir import FsProvider, FsListener
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 11748, 28686, 13, 6978, 198, 11748, 4704, 278, 198, 198, 6738, 11485, 1330, 8194, 7753, 198, 6738, 11485, 22602, 1330, 1635, 198, 6738, 11485, 16366, 1330, 2251, 62, 16366, 198, 6738...
3.345455
55
#!/usr/bin/env python ####################################################### # Copyright (c) 2018, ArrayFire # All rights reserved. # # This file is distributed under 3-clause BSD license. # The complete license agreement can be obtained at: # http://arrayfire.com/licenses/BSD-3-Clause ######################################################## from time import time import arrayfire as af import os import sys if __name__ == "__main__": if (len(sys.argv) > 1): af.set_device(int(sys.argv[1])) console = (sys.argv[2] == '-') if len(sys.argv) > 2 else False af.info() print("** ArrayFire Harris Corner Detector Demo **\n") harris_demo(console)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 29113, 14468, 4242, 21017, 198, 2, 15069, 357, 66, 8, 2864, 11, 15690, 13543, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 770, 2393, 318, 9387, 739, 513, 12, 565, 682, 347,...
3.09589
219
import numpy as np from scipy.linalg import sqrtm from sklearn.preprocessing import StandardScaler
[ 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 75, 1292, 70, 1330, 19862, 17034, 76, 198, 6738, 1341, 35720, 13, 3866, 36948, 1330, 8997, 3351, 36213, 628 ]
3.333333
30
#!/usr/bin/env python3 # encoding: utf-8 """ fake-registration-server.py Created by nano on 2018-11-22. Copyright (c) 2018 VTRUST. All rights reserved. """ import tornado.web import tornado.locks from tornado.options import define, options, parse_command_line define("port", default=80, help="run on the given port", type=int) define("addr", default="192.168.254.1", help="run on the given ip", type=str) define("debug", default=True, help="run in debug mode") import os import signal signal.signal(signal.SIGINT, exit_cleanly) from base64 import b64encode import hashlib import hmac import binascii from time import time timestamp = lambda : int(time()) def main(): parse_command_line() app = tornado.web.Application( [ (r"/", MainHandler), ('/files/(.*)', FilesHandler, {'path': str('../files/')}), (r".*", tornado.web.RedirectHandler, {"url": "http://" + options.addr + "/", "permanent": False}), ], debug=options.debug, ) try: app.listen(options.port, options.addr) print("Listening on " + options.addr + ":" + str(options.port)) tornado.ioloop.IOLoop.current().start() except OSError as err: print("Could not start server on port " + str(options.port)) if err.errno == 98: # EADDRINUSE print("Close the process on this port and try again") else: print(err) if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 37811, 198, 30706, 12, 2301, 33397, 12, 15388, 13, 9078, 198, 41972, 416, 38706, 319, 2864, 12, 1157, 12, 1828, 13, 198, 15269, 357, 66, 8,...
2.778926
484
import numpy as np import fast_carpenter.summary.binning_config as mgr from . import dummy_binning_descriptions as binning
[ 11748, 299, 32152, 355, 45941, 198, 11748, 3049, 62, 66, 5117, 9255, 13, 49736, 13, 8800, 768, 62, 11250, 355, 285, 2164, 198, 6738, 764, 1330, 31548, 62, 8800, 768, 62, 20147, 1968, 507, 355, 9874, 768, 628, 628, 628, 198 ]
3.146341
41
from __future__ import division from constants import * import numpy as np import os precueITIs = np.random.exponential(standard_parameters['mean_iti_precue'], standard_parameters['n_targets']) + standard_parameters['min_iti_precue'] np.save('ITIs/precueITIs.npy',precueITIs) postcueITIs = np.random.exponential(standard_parameters['mean_iti_postcue'], standard_parameters['n_targets']) + standard_parameters['min_iti_postcue'] np.save('ITIs/postcueITIs.npy',postcueITIs) spITIs = np.round(np.random.exponential(standard_parameters['mean_iti_sp'], standard_parameters['n_targets']) + standard_parameters['min_iti_sp']).astype('int32') np.save('ITIs/spITIs.npy',spITIs)
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 38491, 1330, 1635, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 198, 3866, 15509, 2043, 3792, 796, 45941, 13, 25120, 13, 11201, 35470, 7, 20307, 62, 17143, 7307, 17816, 32604,...
2.738776
245
''' PUT FUNCTION HERE !! Author Davinci '''
[ 7061, 6, 198, 30076, 29397, 4177, 2849, 15698, 37867, 198, 198, 13838, 2544, 259, 979, 198, 7061, 6, 628 ]
2.421053
19
# Implementation of the Set ADT container using a Python list. # An iterator for the Set ADT.
[ 2, 46333, 286, 262, 5345, 5984, 51, 9290, 1262, 257, 11361, 1351, 13, 198, 198, 2, 1052, 41313, 329, 262, 5345, 5984, 51, 13, 198 ]
3.8
25
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
input() S=input() dot=S.count(".") ans=dot count=0 for s in S: if s=="#":count+=1 else:dot-=1 ans=(min(ans,count+dot)) print(ans)
[ 15414, 3419, 198, 50, 28, 15414, 3419, 198, 26518, 28, 50, 13, 9127, 7203, 19570, 198, 504, 28, 26518, 198, 9127, 28, 15, 198, 1640, 264, 287, 311, 25, 198, 220, 220, 220, 611, 264, 855, 1, 2, 1298, 9127, 47932, 16, 198, 220, 22...
1.985915
71
from flask import (Blueprint, abort, flash, redirect, render_template, request, url_for) from flask_login import current_user, login_required from app import db from app.billing.forms import CreateBillingForm from app.models import Billing from sqlalchemy import desc billing = Blueprint('billing', __name__)
[ 6738, 42903, 1330, 357, 14573, 4798, 11, 15614, 11, 7644, 11, 18941, 11, 8543, 62, 28243, 11, 2581, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 19016, 62, 1640, 8, 198, 6738, ...
3.160377
106
""" Author: Ryan Faulkner Date: October 19th, 2014 Container for mashup logic. """ import json import random from sqlalchemy.orm.exc import UnmappedInstanceError from flickipedia.redisio import DataIORedis from flickipedia.model.articles import ArticleModel, ArticleContentModel from flickipedia.config import log, settings from flickipedia.model.likes import LikeModel from flickipedia.model.exclude import ExcludeModel from flickipedia.model.photos import PhotoModel from flickipedia.parse import parse_strip_elements, parse_convert_links, \ handle_photo_integrate, format_title_link, add_formatting_generic def get_article_count(): """ Fetch total article count :return: int; total count of articles """ DataIORedis().connect() # Fetch article count from redis (query from DB if not present) # Refresh according to config for rate article_count = DataIORedis().read(settings.ARTICLE_COUNT_KEY) if not article_count \ or random.randint(1, settings.ARTICLE_COUNT_REFRESH_RATE) == 1 \ or article_count < settings.MYSQL_MAX_ROWS: with ArticleModel() as am: article_count = am.get_article_count() DataIORedis().write(settings.ARTICLE_COUNT_KEY, article_count) return int(article_count) def get_max_article_id(): """ Fetch the maximum article ID :return: int; maximum id from article meta """ max_aid = DataIORedis().read(settings.MAX_ARTICLE_ID_KEY) if not max_aid \ or random.randint(1, settings.ARTICLE_MAXID_REFRESH_RATE) == 1: with ArticleModel() as am: max_aid = am.get_max_id() DataIORedis().write(settings.MAX_ARTICLE_ID_KEY, max_aid) return max_aid def get_article_stored_body(article): """ Fetch corresponding article object :param article: str; article name :return: json, Article; stored page content, corresponding article model object """ with ArticleModel() as am: article_obj = am.get_article_by_name(article) try: with ArticleContentModel() as acm: body = acm.get_article_content(article_obj._id).markup except Exception as e: log.info('Article markup not found: "%s"' % e.message) body = '' return body def get_wiki_content(article): """ Retrieve the wiki content from the mediawiki API :param article: str; article name :return: Wikipedia; mediawiki api response object """ pass def get_flickr_photos(flickr_json): """ Retrience Flickr photo content from Flickr API :param article: str; article name :return: list; list of Flickr photo json """ photos = [] for i in xrange(settings.NUM_PHOTOS_TO_FETCH): try: photos.append( { 'owner': flickr_json['photos']['photo'][i]['owner'], 'photo_id': flickr_json['photos']['photo'][i]['id'], 'farm': flickr_json['photos']['photo'][i]['farm'], 'server': flickr_json['photos']['photo'][i]['server'], 'title': flickr_json['photos']['photo'][i]['title'], 'secret': flickr_json['photos']['photo'][i]['secret'], }, ) except (IndexError, KeyError) as e: log.error('No more photos to process for: - "%s"' % (e.message)) log.debug('Photo info: %s' % (str(photos))) return photos def manage_article_storage(max_article_id, article_count): """ Handle the storage of new articles :param max_article_id: int; article id :param article_count: int; total count of articles :return: bool; success """ if article_count >= settings.MYSQL_MAX_ROWS: if max_article_id: # TODO - CHANGE THIS be careful, could iterate many times article_removed = False attempts = 0 while not article_removed \ or attempts > settings.MAX_RETRIES_FOR_REMOVE: attempts += 1 article_id = random.randint(0, int(max_article_id)) with ArticleModel() as am: log.info('Removing article id: ' + str(article_id)) try: am.delete_article(article_id) article_removed = True except UnmappedInstanceError: continue else: log.error('Could not determine a max article id.') return True def handle_article_insert(article, wiki_page_id): """ Handle insertion of article meta data :param article_id: int; article id :return: int, bool; success """ with ArticleModel() as am: if am.insert_article(article, wiki_page_id): article_obj = am.get_article_by_name(article) article_id = article_obj._id success = True else: log.error('Couldn\'t insert article: "%s"' % article) article_id = -1 success = False return article_id, success def handle_article_content_insert(article_id, page_content, is_new_article): """ Handle the insertion of article content :param article_id: int; article id :param page_content: json; page content :param is_new_article: bool; a new article? :return: bool; success """ with ArticleContentModel() as acm: if is_new_article: acm.insert_article(article_id, json.dumps(page_content)) else: acm.update_article(article_id, json.dumps(page_content)) def prep_page_content(article_id, article, wiki, photos, user_obj): """ Prepare the formatted article content :param article_id: int; article id :param article: str; article name :param wiki_resp: wikipedia; mediawiki api response :param photos: list; list of photo json :param user_obj: User; user object for request :return: dict; formatted page response passed to jinja template """ html = parse_strip_elements(wiki.html()) html = parse_convert_links(html) html = add_formatting_generic(html) photo_ids = process_photos(article_id, photos, user_obj) html = handle_photo_integrate(photos, html, article) page_content = { 'title': format_title_link(wiki.title, article), 'content': html, 'section_img_class': settings.SECTION_IMG_CLASS, 'num_photos': len(photos), 'article_id': article_id, 'user_id': user_obj.get_id(), 'photo_ids': photo_ids } return page_content def update_last_access(article_id): """ Update article last access :param article_id: int; article id :return: bool; success """ pass def order_photos_by_rank(article_id, photos): """ Reorders photos by score """ # Compute scores for i in xrange(len(photos)): # Get Exclusions & Endorsements with ExcludeModel() as em: exclusions = em.get_excludes_article_photo(article_id, photos[i]['photo_id']) with LikeModel() as lm: endorsements = lm.get_likes_article_photo(article_id, photos[i]['photo_id']) photos[i]['score'] = len(endorsements) - len(exclusions) # lambda method for sorting by score descending f = lambda x, y: cmp(-x['score'], -y['score']) return sorted(photos, f) def process_photos(article_id, photos, user_obj): """ Handles linking photo results with the model and returns a list of Flickr photo ids to pass to templating :param article_id: int; article id :param photos: list of photos :param user_obj: User; user object for request :return: List of Flickr photo ids """ photo_ids = [] for photo in photos: # Ensure that each photo is modeled with PhotoModel() as pm: photo_obj = pm.get_photo(photo['photo_id'], article_id) if not photo_obj: log.info('Processing photo: "%s"' % str(photo)) if pm.insert_photo(photo['photo_id'], article_id): photo_obj = pm.get_photo( photo['photo_id'], article_id) if not photo_obj: log.error('DB Error: Could not retrieve or ' 'insert: "%s"' % str(photo)) continue else: log.error('Couldn\'t insert photo: "%s"' % ( photo['photo_id'])) photo['id'] = photo_obj._id photo['votes'] = photo_obj.votes # Retrieve like data with LikeModel() as lm: if lm.get_like(article_id, photo_obj._id, user_obj.get_id()): photo['like'] = True else: photo['like'] = False photo_ids.append(photo['photo_id']) return photo_ids
[ 37811, 198, 220, 220, 220, 6434, 25, 6047, 44760, 74, 1008, 198, 220, 220, 220, 7536, 25, 220, 220, 3267, 678, 400, 11, 1946, 628, 220, 220, 220, 43101, 329, 30407, 929, 9156, 13, 198, 37811, 198, 198, 11748, 33918, 198, 11748, 4738...
2.251296
4,051
from bluedot import BlueDot from gpiozero import PWMLED from signal import pause led = PWMLED(27) bd = BlueDot() bd.when_moved = set_brightness pause()
[ 6738, 698, 1739, 313, 1330, 4518, 35, 313, 198, 6738, 27809, 952, 22570, 1330, 44141, 5805, 1961, 198, 6738, 6737, 1330, 14985, 198, 198, 992, 796, 44141, 5805, 1961, 7, 1983, 8, 198, 17457, 796, 4518, 35, 313, 3419, 198, 17457, 13, ...
2.75
56
from django.urls import path from . import views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/',views.post_detail,name="post_detail"), path('comment/reply/', views.reply_page, name="reply"), path('tag/<slug:tag_slug>/',views.post_list, name='post_tag'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 1330, 5009, 198, 198, 1324, 62, 3672, 796, 705, 14036, 6, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 7353, 62, 4868, 11, 1438, ...
2.503817
131
import os from tempfile import TemporaryDirectory import codecs import logging from grizzled.file.includer import * from grizzled.os import working_directory from grizzled.text import strip_margin import pytest
[ 11748, 28686, 198, 6738, 20218, 7753, 1330, 46042, 43055, 198, 11748, 40481, 82, 198, 11748, 18931, 198, 6738, 46307, 992, 13, 7753, 13, 259, 758, 263, 1330, 1635, 198, 6738, 46307, 992, 13, 418, 1330, 1762, 62, 34945, 198, 6738, 46307,...
3.944444
54
from output.models.nist_data.list_pkg.unsigned_short.schema_instance.nistschema_sv_iv_list_unsigned_short_min_length_2_xsd.nistschema_sv_iv_list_unsigned_short_min_length_2 import NistschemaSvIvListUnsignedShortMinLength2 __all__ = [ "NistschemaSvIvListUnsignedShortMinLength2", ]
[ 6738, 5072, 13, 27530, 13, 77, 396, 62, 7890, 13, 4868, 62, 35339, 13, 43375, 62, 19509, 13, 15952, 2611, 62, 39098, 13, 77, 1023, 2395, 2611, 62, 21370, 62, 452, 62, 4868, 62, 43375, 62, 19509, 62, 1084, 62, 13664, 62, 17, 62, ...
2.553571
112
from django.conf.urls import url from simple.localsite.views import example urlpatterns = [ url(r'example/', example), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 19016, 198, 198, 6738, 2829, 13, 17946, 874, 578, 13, 33571, 1330, 1672, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 19016, 7, 81, 6, 20688, 14, 3256, 1672, 828, 198, ...
2.822222
45
from django.test import TestCase from django.contrib.auth import get_user_model from django.contrib.auth.models import Permission from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from tbconnect.models import TBCheck, TBTest from userprofile.models import HealthCheckUserProfile from userprofile.tests.test_views import BaseEventTestCase from tbconnect.serializers import TBCheckSerializer
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 2448, 3411, 198, 6738, 42625, 14208,...
3.66129
124
import nltk import random from preprocess import compile_corpus from nltk.translate import IBMModel1, AlignedSent, Alignment def get_rand_sent(): ''' Redirect the standard output of the program -- i.e. the random sentences -- and transfer it over to the appropriate file. From there we will take a look at the sentence pair and include the hand alignment (gold standard) to proceed with evaluating the IBM model. ''' i = 0 while i < 20: index = random.randint(0, len(corpus)) try: # only print out "valid" sentence pairs # valid = sentence pairs with system-created alignments print(" ".join(corpus[index].mots), "\t", " ".join(corpus[index].words), "\t", corpus[index].alignment) i += 1 except: pass if __name__ == "__main__": main()
[ 11748, 299, 2528, 74, 198, 11748, 4738, 198, 6738, 662, 14681, 1330, 17632, 62, 10215, 79, 385, 198, 6738, 299, 2528, 74, 13, 7645, 17660, 1330, 19764, 17633, 16, 11, 978, 3916, 31837, 11, 978, 16747, 628, 198, 4299, 651, 62, 25192, ...
2.586826
334
from os import environ, path from .server import Server from .config import configure
[ 6738, 28686, 1330, 551, 2268, 11, 3108, 198, 198, 6738, 764, 15388, 1330, 9652, 198, 6738, 764, 11250, 1330, 17425, 628 ]
4.190476
21
# -*- coding: utf-8 -*- from checker.backends import BaseBackend from checker import logger log = logger.getLogger(__name__)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 2198, 263, 13, 1891, 2412, 1330, 7308, 7282, 437, 198, 6738, 2198, 263, 1330, 49706, 198, 198, 6404, 796, 49706, 13, 1136, 11187, 1362, 7, 834, 3672, 834, 8...
2.782609
46
import os import re from time import time from vial import vfunc, vim from vial.fsearch import get_files from vial.utils import get_projects, redraw MAX_FILESIZE = 10 * 1024 * 1024
[ 11748, 28686, 198, 11748, 302, 198, 198, 6738, 640, 1330, 640, 198, 198, 6738, 410, 498, 1330, 410, 20786, 11, 43907, 198, 6738, 410, 498, 13, 69, 12947, 1330, 651, 62, 16624, 198, 6738, 410, 498, 13, 26791, 1330, 651, 62, 42068, 11...
3.1
60
# -*- coding: utf-8 -*- """ Unit tests for new Project/Flight data classes, including JSON serialization/de-serialization """ import time from datetime import datetime from typing import Tuple from uuid import uuid4 from pathlib import Path import pytest import pandas as pd from dgp.core import DataType from dgp.core.models.project import AirborneProject from dgp.core.hdf5_manager import HDF5Manager from dgp.core.models.datafile import DataFile from dgp.core.models.dataset import DataSet from dgp.core.models import flight from dgp.core.models.meter import Gravimeter
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 26453, 5254, 329, 649, 4935, 14, 43069, 1366, 6097, 11, 1390, 19449, 198, 46911, 1634, 14, 2934, 12, 46911, 1634, 198, 37811, 198, 11748, 640, 198, 6738...
3.255556
180
from django.conf.urls import include, url from rest_framework.documentation import include_docs_urls from examples.rest import router from .views import index urlpatterns = [ url(r'^$', index, name='index'), url(r'^', include(router.urls)), url(r'^dynamicforms/', include('dynamicforms.urls')), url(r'^api-docs/', include_docs_urls(title='Example API documentation')), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 2291, 11, 19016, 198, 6738, 1334, 62, 30604, 13, 22897, 341, 1330, 2291, 62, 31628, 62, 6371, 82, 198, 198, 6738, 6096, 13, 2118, 1330, 20264, 198, 6738, 764, 33571, 1330, 6376, 198, ...
2.818841
138
"""Test cases for concepts.""" from typing import Any import pytest import requests
[ 37811, 14402, 2663, 329, 10838, 526, 15931, 198, 6738, 19720, 1330, 4377, 198, 198, 11748, 12972, 9288, 198, 11748, 7007, 628 ]
4.095238
21
''' Mostly these are internal imports related to django and rest_framework. The os and io imports are for creating files, paths and parsing bytes objects respectively ''' from django.db import models from django.contrib.auth.models import User from rest_framework.renderers import JSONRenderer from rest_framework.parsers import JSONParser from vault_backend.extra_functions import * import os import io ''' The Vault model represents the basic password vault in passman. This model will store the directory path, filename and vault_name specified. This is linked to the User model for only displaying vaults belonging to the authenticated user. The Vault model is later referenced in different places for creating and updating records stored in it. ''' # Delete Record functionality in vault.Not tested delete functionality yet. Might implement in future. ''' def delete_data(self, sitename, password): try: delete_data = {'site_name':sitename, 'password':password} data = self.get_data() if self.check_data(delete_data, data): data.remove(delete_data) if data: for dictionary_data in data: self.add_data(dictionary_data['site_name'], dictionary_data['password']) return 0 else: self.create_vault() return 0 except ValueError: return 'No Such Value' '''
[ 7061, 6, 220, 198, 6943, 306, 777, 389, 5387, 17944, 3519, 284, 42625, 14208, 290, 1334, 62, 30604, 13, 198, 464, 28686, 290, 33245, 17944, 389, 329, 4441, 3696, 11, 13532, 290, 32096, 9881, 5563, 8148, 220, 198, 7061, 6, 198, 198, ...
2.740741
540
#! /usr/bin/python # coding=utf-8 import struct import os import hashlib import Instruction Access_Flag = {'public': 1, 'private': 2, 'protected': 4, 'static': 8, 'final': 0x10, 'synchronized': 0x20, 'volatile': 0x40, 'bridge': 0x40, 'transient': 0x80, 'varargs': 0x80, 'native': 0x100, 'interface': 0x200, 'abstract': 0x400, 'strictfp': 0x800, 'synthetic': 0x1000, 'annotation': 0x2000, 'enum': 0x4000, 'constructor': 0x10000, 'declared_synchronized': 0x20000} TypeDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C', 'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D', 'boolean[]': '[Z', 'byte[]': '[B', 'short[]': '[S', 'char[]': '[C', 'int[]': 'I', 'long[]': '[J', 'float[]': '[F', 'double[]': 'D'} ShortyDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C', 'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D'} ACSII = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '0': 0, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15} # ---------------------------------------------------------------------------------------- # alignment: 4bytes # alignment: 4bytes # alignment:none # alignment: 4bytes # alignment: none # alignment: none # alignment: 4 bytes # alignment: none # default: 0 create from file 1 create from memory def jiaguAll(dexfile, outfile): method_list = [] # record all method need to protect tmp_method = dexfile.getmethodItem("Lcom/cc/test/MainActivity;", "onCreate") method_list.append({"access": tmp_method["method"].access_flags, "ref": tmp_method["method"].coderef, "classidx": tmp_method["classidx"], "methodidx": tmp_method["methodidx"]}) tmp_method["method"].access_flags = int(Access_Flag['native'] | Access_Flag['public']) tmp_method["method"].modified = 1 # change the access flag, make it native dexfile.makeoffset() # make offset if os.path.exists(outfile): # if exists, delete it print("the file is exist, just replace it") os.remove(outfile) file = open(outfile, 'wb+') file.seek(0, 0) size = len(method_list) filesize = dexfile.dexheader.file_size # in order to adjust the dex file dexfile.dexheader.file_size += 16 * size # each injected data need 16 bytes dexfile.dexmaplist.copy(file) file.seek(filesize, 0) print("file size :", filesize, " size : ", size) for i in range(0, size): file.write(struct.pack("I", method_list[i]["classidx"])) file.write(struct.pack("I", method_list[i]["methodidx"])) file.write(struct.pack("I", method_list[i]["access"])) file.write(struct.pack("I", method_list[i]["ref"].start)) print("inject data :", method_list[i]["classidx"], method_list[i]["methodidx"]) # assume that the code ref is not None, otherwise it make no sense(no need to protect) file_sha = get_file_sha1(file) tmp = bytes(file_sha) i = 0 file.seek(12) while i < 40: num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]] file.write(struct.pack("B", num)) i += 2 csum = checksum(file, dexfile.dexheader.file_size) print("checksum:", hex(csum), "file size:", dexfile.dexheader.file_size) file.seek(8) file.write(struct.pack("I", csum)) file.close() if __name__ == '__main__': dexfile = DexFile("classes.dex") # jiaguAll(dexfile, "classescp.dex") # dexfile.printclasscode("Lcom/cc/test/MainActivity;", "onCreate") # dexfile.printf(3) # dexfile.addstr("DexParse.java") # dexfile.addstr("Lcom/cc/test/DexParse.java") # dexfile.modifystr("A Text From CwT", "A Text From DexParse") # dexfile.printf() # note: you need to delete file classescp.dex first, otherwise # new dex file will append the old one # dexfile.copytofile("classescp.dex")
[ 2, 0, 1220, 14629, 14, 8800, 14, 29412, 201, 198, 2, 19617, 28, 40477, 12, 23, 201, 198, 11748, 2878, 201, 198, 11748, 28686, 201, 198, 11748, 12234, 8019, 201, 198, 11748, 46486, 201, 198, 201, 198, 15457, 62, 34227, 796, 1391, 6, ...
2.22981
1,845
from rest_framework import serializers from api.models import User, UserProfile, Post, News, Video from datetime import datetime
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 40391, 13, 27530, 1330, 11787, 11, 11787, 37046, 11, 2947, 11, 3000, 11, 7623, 198, 6738, 4818, 8079, 1330, 4818, 8079, 628, 628 ]
4.125
32
#!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 Michael Hull. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- from ..core import StdChlAlphaBetaBeta from morphforge.units import qty from morphforge import units from hocmodbuilders.mmwriter_alphabetabeta import NEURONChlWriterAlphaBetaBeta from morphforge.simulation.neuron.hocmodbuilders import HocModUtils from morphforge.simulation.neuron import NEURONChl_Base from morphforge.constants.standardtags import StandardTags from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordableOnLocation # Register the channel NEURONEnvironment.channels.register_plugin( StdChlAlphaBetaBeta, NEURONChl_AlphaBetaBeta)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 16529, 30934, 198, 2, 15069, 357, 66, 8, 2321, 3899, 28238, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2...
3.581583
619
import markovify import re import nltk import os import urllib.request from shutil import copyfile # We need a temporary(ish) place to store the data we retrieve. # If you are running this in a docker container you may want to mount a volume and use it. # Also be sure to make a symlink between it and the assets directory. See our dockerfile for an example! datadir = "./web/assets/data" if 'DATA_DIR' in os.environ: datadir = os.environ['DATA_DIR'] if not os.path.exists(datadir): os.mkdir(datadir) # Basically the example from the markovify documentation that uses parts of speech and stuff to make better sentences # Grab a list of fortunes from Github if not os.path.exists(datadir+"/cookie.txt"): urllib.request.urlretrieve("https://raw.githubusercontent.com/ianli/fortune-cookies-galore/master/fortunes.txt", datadir+"/cookie.txt") # Grab the US constitution raw text if not os.path.exists(datadir+'/const.txt'): urllib.request.urlretrieve("https://www.usconstitution.net/const.txt", datadir+"/const.txt") if not os.path.exists(datadir+'/tweeter.txt'): urllib.request.urlretrieve("https://raw.githubusercontent.com/ElDeveloper/tweets/master/tweets_text.txt", datadir+"/tweeter.txt") # Read both files into variables with open(datadir+"/cookie.txt") as f: text = f.read() with open(datadir+'/const.txt') as f: tswext = f.read() with open(datadir+"/tweeter.txt") as f: tweetext = f.read() # Break up the text to make it more workable cookie_text_split = text.split("\n") const_text_split = tswext.split("\n") tweet_text_split = tweetext.split("\n") # Some cleanup to remove things in the fortune cookie file that aren't really fortunes. # (There are some odd facts and quotes in here. This is a bit barbaric, but this is a fun project anyway! No need for perfection...) # Same thing for the constitution text - this just removes the comment at the top. # Apply the cleanups from above cookie_text_split[:] = [x for x in cookie_text_split if excluded(x)] const_text_split[:] = [x for x in const_text_split if exwifted(x)] # Merge the text back into one big blob like markovify expects. (There's probably a better way to do this, but again, fun project. Efficiency's not that important... cookie_text_model = POSifiedText("\n".join(cookie_text_split)) const_text_model = POSifiedText("\n".join(const_text_split)) tweet_text_model = POSifiedText("\n".join(tweet_text_split)) # Combine them into a terrifying structure const_and_cookie_model = markovify.combine([cookie_text_model, const_text_model]) tweet_and_cookie_model = markovify.combine([cookie_text_model, tweet_text_model], [4, 1]) everything_model = markovify.combine([cookie_text_model, const_text_model, tweet_text_model], [4, 1, 1]) # Print a couple lines to the terminal to show that everything's working... print("Examples:") for i in range(5): print(const_and_cookie_model.make_short_sentence(240, tries=25)) # Now, open a temporary file and write some javascript surrounding our story. with open(datadir+"/cookie.js.new", "w+") as file: # NOTE: I don't escape anything here... with bad seed text it'd be quite possible to inject weird js, etc. file.write("window.fortuneCookies=[\n") print("Running cookie") # Write 100 lines of junk into the js file. Note that leaving the closing comma is ok, as javascript doesn't care. for i in range(250): file.write("\"" + cookie_text_model.make_short_sentence(240, tries=25) + "\",\n") # Close it up! file.write("];") print("Running const + cookie") file.write("window.constCookies=[\n") for i in range(250): file.write("\"" + const_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n") file.write("];") print("Running const only") file.write("window.constLines=[\n") for i in range(250): file.write("\"" + const_text_model.make_short_sentence(240, tries=25) + "\",\n") file.write("];") print("Running tweet only") file.write("window.tweetLines=[\n") for i in range(250): file.write("\"" + tweet_text_model.make_short_sentence(240, tries=25) + "\",\n") file.write("];") print("Running tweet cookie") file.write("window.tweetCookie=[\n") for i in range(250): file.write("\"" + tweet_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n") file.write("];") print("Running everything") file.write("window.everythingCookie=[\n") for i in range(250): file.write("\"" + everything_model.make_short_sentence(240, tries=25) + "\",\n") file.write("];") # Finally, copy our temp file over the old one, so clients can start seeing it. copyfile(datadir+"/cookie.js.new", datadir+"/cookie.js")
[ 11748, 1317, 709, 1958, 198, 11748, 302, 198, 11748, 299, 2528, 74, 198, 11748, 28686, 198, 11748, 2956, 297, 571, 13, 25927, 198, 198, 6738, 4423, 346, 1330, 4866, 7753, 198, 198, 2, 775, 761, 257, 8584, 7, 680, 8, 1295, 284, 3650,...
2.832139
1,674
import environs env = environs.Env() env.read_env() from wsgi_microservice_middleware.cors import CORSMiddleware from wsgi_microservice_middleware.request_id import ( RequestIdFilter, RequestIdMiddleware, current_request_id, RequestIdJsonLogFormatter ) __all__ = [ 'CORSMiddleware', 'RequestIdFilter', 'RequestIdMiddleware', 'current_request_id', 'RequestIdJsonLogFormatter' ]
[ 11748, 17365, 343, 684, 198, 198, 24330, 796, 17365, 343, 684, 13, 4834, 85, 3419, 198, 24330, 13, 961, 62, 24330, 3419, 198, 198, 6738, 266, 82, 12397, 62, 24055, 15271, 62, 27171, 1574, 13, 66, 669, 1330, 23929, 12310, 2509, 1574, ...
2.482558
172
import turtle as t import random COLORS = ["red", "orange", "yellow", "green", "blue", "purple"] STARTING_MOVE_DISTANCE = 5 MOVE_INCREMENT = 1
[ 11748, 28699, 355, 256, 198, 11748, 4738, 198, 198, 25154, 20673, 796, 14631, 445, 1600, 366, 43745, 1600, 366, 36022, 1600, 366, 14809, 1600, 366, 17585, 1600, 366, 14225, 1154, 8973, 198, 2257, 7227, 2751, 62, 11770, 6089, 62, 35, 880...
2.654545
55
""" Advent of code 2021 day 05 / 2 """ import math from os import path import re from collections import Counter def solution(data): """ Solution to the problem """ lines = preprocess(data) solver = Code(lines) return solver.solve() if __name__ == "__main__": with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file: print(solution(input_file.read()))
[ 37811, 33732, 286, 2438, 33448, 1110, 8870, 1220, 362, 37227, 198, 198, 11748, 10688, 198, 6738, 28686, 1330, 3108, 198, 11748, 302, 198, 6738, 17268, 1330, 15034, 628, 628, 198, 4299, 4610, 7, 7890, 2599, 198, 220, 220, 220, 37227, 281...
2.80137
146
""" Tissue Corrections ================== """ ################################################################################################### # .. _tutorial_removing_autoflourescence: # # Removing autofluorescence # ========================= # # In addition to the bright spots (signal) that we want to detect, microscopy experiments on tissue # slices often have a non-zero amount of auto-fluorescence from the cell bodies. This can be mitigated # by "clearing" strategies whereby tissue lipids and proteins are digested, or computationally by # estimating and subtracting the background values. # # We use the same test image from the previous section to demonstrate how this can work. # # Clipping # -------- # The simplest way to remove background is to set a global, (linear) cut-off and clip out the # background values. import starfish import starfish.data from starfish.image import Filter from starfish.types import Axes experiment: starfish.Experiment = starfish.data.ISS(use_test_data=True) field_of_view: starfish.FieldOfView = experiment["fov_001"] image: starfish.ImageStack = field_of_view.get_image("primary") ################################################################################################### # Next, create the clip filter. Here we clip at the 50th percentile, optimally separates the spots # from the background clip_50 = Filter.Clip(p_min=97) clipped: starfish.ImageStack = clip_50.run(image) ################################################################################################### # plot both images import matplotlib.pyplot as plt import xarray as xr # get the images orig_plot: xr.DataArray = image.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze() clip_plot: xr.DataArray = clipped.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze() f, (ax1, ax2) = plt.subplots(ncols=2) ax1.imshow(orig_plot) ax1.set_title("original") ax2.imshow(clip_plot) ax2.set_title("clipped") ################################################################################################### #
[ 37811, 198, 51, 21949, 40526, 198, 4770, 855, 198, 37811, 198, 198, 29113, 29113, 29113, 21017, 198, 2, 11485, 4808, 83, 44917, 62, 2787, 5165, 62, 2306, 1659, 75, 280, 411, 43696, 25, 198, 2, 198, 2, 3982, 5165, 1960, 1659, 2290, 4...
3.749077
542