blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f187845d86a0daf4cd9e7251b45628a9ac90250f | 0a19f97e8f382306686f5a08f3f898d4ed02e4d5 | /code.py | 211738bc6777881613f4683118d4c833bd659879 | [] | no_license | 5555-harshu/direct_line | 8a8c2612a850bb7bcb66740cab926551f77a12bf | d77e133551965011cf552552b366e4460acf47e4 | refs/heads/master | 2022-12-05T17:43:00.705928 | 2020-08-11T11:40:29 | 2020-08-11T11:40:29 | 286,725,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | # direct_line test code Assumption
# I assume screen temprature is the temprature on that date. However data is not looking correct as there are many things.
# I did not calculate temprature using the attributes given in the input file, if that was required.
# There must be some formula to calculte the temprature for that date as SCREEN TEMPRATURE IS DIFFERENT for different Forecast sites for the same date as Obvercation time is also not present in data.
# -99 temprature is not possible as baltasound, does not make sense to me.
import os
import sys
import pandas as pd
## importing libraries of SPARK
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("yarn") \
.enableHiveSupport() \
.getOrCreate()
path = "my input path"
df = spark.read.option("header","True").csv(path)
df.createOrReplaceTempView("weather")
# Writing the data to output location as Parquet file
# as the ask is to get Hottest day , temprature and Region-- I just added country as well
spark.sql("""SELECT
date(ObservationDate) ,
Region, Country,
MAX(ScreenTemperature) AS max_temp
FROM weather GROUP BY 1,2,3 """).createOrReplaceTempView("final")
spark.table("final").write.parquet("outputpath")
# parquet file can be read as
spark.read.parquet("outputpath").createOrReplaceTempView("weather_tbl")
# WHAT IS THE HOTTEST TEMPRATURE in these 2 months
spark.sql("""SELECT MAX(max_temp) FROM final""").show()
# Which date was the hottest day
# What was the temperature on that day
# In which region was the hottest day
spark.sql("""SELECT
DISTINCT ObservationDate ,
region ,
max_temp FROM final WHERE max_temp = (SELECT MAX(max_temp) FROM final)
""").show(200,truncate=False)
| [
"noreply@github.com"
] | noreply@github.com |
bb4b9285535ad7d5f312f536b86fbe646c634094 | 5814ba283e1a652475d206ff2cfd9fff94ddcc36 | /{{cookiecutter.app_name}}/tests/test_models.py | a304a6de1207e6e44c7533410111bc239f973ec5 | [] | no_license | smeggingsmegger/flask-cookiecutter | 76bc4d42f0e521b9109161c1ef0fde28462b4fc3 | cee546c3272aa55f25a0e4b5608d59b475f85ead | refs/heads/master | 2021-01-20T19:50:22.822652 | 2016-08-23T12:56:23 | 2016-08-23T12:56:23 | 62,155,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | #! ../env/bin/python
# -*- coding: utf-8 -*-
import pytest
from {{cookiecutter.app_name}}.models import db, User
create_user = False
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_user_save(self, testapp):
""" Test Saving the user model to the database """
admin = User(username="admin", password="supersafepassword")
db.session.add(admin)
db.session.commit()
user = User.query.filter_by(username="admin").first()
assert user is not None
def test_user_password(self, testapp):
""" Test password hashing and checking """
admin = User(username="admin", password="supersafepassword")
assert admin.username == 'admin'
assert admin.check_password('supersafepassword')
| [
"sblevins@gmail.com"
] | sblevins@gmail.com |
1136cdfff801610b81725a9af6321f3f4d7b58b4 | 6697195831c8904d8712959c4ace4eb6617a8c0e | /env/bin/pyreverse | 8a36f8f02a2eee578593774e4ad06b51af86a94a | [] | no_license | vishaldhakal/edugate | 9652d56c56dc059922dfb71b6a8694f8f0c41e16 | 943de19084ca5d9734708ade1743f2fb33998d4d | refs/heads/main | 2023-03-15T06:14:10.299756 | 2021-03-09T07:35:41 | 2021-03-09T07:35:41 | 345,921,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/vishaldhakal/Desktop/educate/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"vishaldhakal@Vishals-Air.domain.name"
] | vishaldhakal@Vishals-Air.domain.name | |
16ace28f744c8daf204afc559ff8eee3329a0453 | 43f52da1a41a46ca7255769559029cd53c275036 | /manage.py | f5153e2a3b25216d574784fbe09eef23e04d3b93 | [] | no_license | thanuj11/Web-Application-for-Online-Car-reservation-System | 992e15b7e02c79e45f0e959d12673eef29f1c7c6 | bb70557e3487395ea8faaa476f121833326260f7 | refs/heads/master | 2021-04-04T00:07:46.698946 | 2019-02-12T22:50:55 | 2019-02-12T22:50:55 | 124,616,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "online_reservation.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"dthanuj28@gmail.com"
] | dthanuj28@gmail.com |
89e7931d5c59894357853c14f7e7a0e855c0f723 | b7f336685d0ca204db9b8edebc20863351a74ad4 | /views.py | df86f79d7fad36423ebd04a723be6ee5928cd97a | [] | no_license | novicasarenac/snowboy-streaming | 77de0ecff0902e5c317b98c704cd6901d3062459 | c31ab4d2948fdb4160a29edb122024f7f31b8838 | refs/heads/master | 2020-04-01T15:50:55.029603 | 2018-10-16T21:31:52 | 2018-10-16T21:31:52 | 153,354,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from app import app
from app import socketio
from flask import render_template, session
from ws_snowboy_decoder import WSHotwordDetector
from resampler import resample
from flask_socketio import emit
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('connect')
def connect():
print('Client connected')
session['hotword_detector'] = WSHotwordDetector('models/snowboy.umdl',
sensitivity=0.8,
audio_gain=1)
@socketio.on('sample_rate')
def client_sample_rate(sample_rate):
print('Client\'s sample rate: {}'.format(sample_rate))
session['sample_rate'] = sample_rate
@socketio.on('audio')
def audio(data):
resampled_data = resample(session['sample_rate'], 16000, data)
session['hotword_detector'].extend_buffer(resampled_data)
if session['hotword_detector'].check_buffer():
detected = session['hotword_detector'].perform_detection()
if detected:
print('Hotword detected!')
emit('detected')
| [
"nolesarenac@yahoo.com"
] | nolesarenac@yahoo.com |
3c3bb96ed71efa8609883259af8dd7294751d67a | fa0fe3b1b2fcd4e65db8e222ea37df866f44e088 | /diff.py | 7f410ac0f3b2258264c9a369ff43a225ee71f5a8 | [
"Apache-2.0"
] | permissive | stanfordnlp/wge | 031a5b9d70b43dc7dc51a23483afe23685cb6a6e | bed526657a62a64a163f80deb4a8627cb63871a3 | refs/heads/master | 2023-06-15T06:23:05.801754 | 2023-06-05T21:38:35 | 2023-06-05T21:38:35 | 122,270,144 | 97 | 38 | null | 2018-03-25T06:56:34 | 2018-02-20T23:32:48 | HTML | UTF-8 | Python | false | false | 2,868 | py | import argparse
import json
import os
from collections import OrderedDict
from os.path import join, splitext
from gtd.io import IntegerDirectories
from variational import data
parser = argparse.ArgumentParser()
parser.add_argument('run1', type=int)
parser.add_argument('run2', type=int)
args = parser.parse_args()
class Traces(OrderedDict):
def __init__(self, d):
items = sorted(d.items())
for step_num, traces in items:
assert isinstance(step_num, int)
assert isinstance(traces, list)
assert isinstance(traces[0], dict)
super(Traces, self).__init__(items)
# TODO(kelvin): add 'replay' as a trace type
TRACE_TYPES = ['explore_program', 'explore_neural', 'test']
def load_trace_groups(run_num):
"""Load traces for a particular TrainingRun.
Returns:
trace_groups (dict[str, Traces]): map from trace type to Traces
"""
run_dirs = IntegerDirectories(data.workspace.experiments)
traces_dir = join(run_dirs[run_num], 'traces')
trace_groups = {}
for trace_type in TRACE_TYPES:
trace_dir = join(traces_dir, trace_type)
filenames = os.listdir(trace_dir)
train_step_to_trace = {}
for full_name in filenames:
name, ext = splitext(full_name)
if ext != '.json':
continue
full_path = join(trace_dir, full_name)
train_step = int(name)
with open(full_path, 'r') as f:
trace = json.load(f)
train_step_to_trace[train_step] = trace
trace_groups[trace_type] = Traces(train_step_to_trace)
return trace_groups
def fmt(collection):
return ', '.join(str(o) for o in sorted(collection))
def trace_diff(trace1, trace2):
trace1_extra = set(trace1) - set(trace2)
trace2_extra = set(trace2) - set(trace1)
overlap = sorted(set(trace1) & set(trace2))
print 'trace1+: {}'.format(fmt(trace1_extra))
print 'trace2+: {}'.format(fmt(trace2_extra))
print 'overlapping keys:'
for key in overlap:
same = trace1[key] == trace2[key]
same_str = 'same' if same else 'DIFFERENT'
print '\t{}: {}'.format(key, same_str)
def traces_diff(traces1, traces2):
# find overlapping train_steps
overlap = sorted(set(traces1) & set(traces2))
print 'Traces overlap on train steps: {}'.format(fmt(overlap))
for train_step in overlap:
print '-- STEP {} --'.format(train_step)
print 'NOTE: only comparing first episode of each trace.'
trace_diff(traces1[train_step][0], traces2[train_step][0])
print
trace_groups_1 = load_trace_groups(args.run1)
trace_groups_2 = load_trace_groups(args.run2)
for trace_type in TRACE_TYPES:
print '===== {} ====='.format(trace_type)
traces_diff(trace_groups_1[trace_type], trace_groups_2[trace_type])
| [
"evanliu@stanford.edu"
] | evanliu@stanford.edu |
3364a121d0d65b2d564c1caf77f12215681d78c9 | f5d94d12733c480848ee002a5b4df8d5b5f33a80 | /manage.py | 87dcbd8076a3cc64484a4465af55d5b5cc97e79e | [] | no_license | nagkumar91/dj_1_8_test | a19b574b0c1cfe1ad279a38bd5d5d7357d75b114 | 724fc87dbef6519b781b1dcb464ad288b64b6f4d | refs/heads/master | 2021-01-25T10:07:19.643798 | 2015-05-04T08:17:25 | 2015-05-04T08:17:25 | 35,017,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_1_8_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"me@nagkumar.com"
] | me@nagkumar.com |
ad26f2dd3de6339480f4232b37b7660935fb3993 | a69175b22a4b27eac49617e3621c2d4b44d09af6 | /tts.py | a8fba66e9f3fe93b7fb6753705aec94a0e8becc6 | [] | no_license | prajwalccc13/Voice-Assistant | 7badd8997f6dd32a196384271161d705657bcc48 | 2eb7ecf3f04ce796ef2b181d8e2c701f104883d9 | refs/heads/main | 2023-08-19T08:00:44.469920 | 2021-09-30T18:22:44 | 2021-09-30T18:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import pyttsx3
engine = pyttsx3.init(driverName='flite')
engine.setProperty('rate',140)
"""VOICE"""
voices = engine.getProperty('voices') #getting details of current voice
# engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
engine.setProperty('voice', voices[2].id) #changing index, changes voices. 1 for female
engine.say("I will speak this text.")
engine.runAndWait() | [
"prajwalccc13@outlook.com"
] | prajwalccc13@outlook.com |
0b863056205532158e8b81c5c836aa686db31747 | 1ca3067846c8472159b07d085d33866d07dc66a1 | /graphsage/minibatch.py | 180648d115a5a67a24375d106ce96e2d0c80d069 | [
"MIT"
] | permissive | WangNuoWa/GraphSAGE | 00800b1b9719c84fb3f792a5eabd0a22f46327bf | 0d9c4a739261c9fd86736af95406d6004de4833d | refs/heads/master | 2021-06-27T00:22:58.032898 | 2017-09-16T21:34:13 | 2017-09-16T21:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,607 | py | from __future__ import division
from __future__ import print_function
import numpy as np
np.random.seed(123)
class EdgeMinibatchIterator(object):
""" This minibatch iterator iterates over batches of sampled edges or
random pairs of co-occuring edges.
G -- networkx graph
id2idx -- dict mapping node ids to index in feature tensor
placeholders -- tensorflow placeholders object
context_pairs -- if not none, then a list of co-occuring node pairs (from random walks)
batch_size -- size of the minibatches
max_degree -- maximum size of the downsampled adjacency lists
n2v_retrain -- signals that the iterator is being used to add new embeddings to a n2v model
fixed_n2v -- signals that the iterator is being used to retrain n2v with only existing nodes as context
"""
def __init__(self, G, id2idx,
placeholders, context_pairs=None, batch_size=100, max_degree=25,
n2v_retrain=False, fixed_n2v=False,
**kwargs):
self.G = G
self.nodes = G.nodes()
self.id2idx = id2idx
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.batch_num = 0
self.nodes = np.random.permutation(G.nodes())
self.adj, self.deg = self.construct_adj()
self.test_adj = self.construct_test_adj()
if context_pairs is None:
edges = G.edges()
else:
edges = context_pairs
self.train_edges = self.edges = np.random.permutation(edges)
if not n2v_retrain:
self.train_edges = self._remove_isolated(self.train_edges)
self.val_edges = [e for e in G.edges_iter() if G[e[0]][e[1]]['train_removed']]
else:
if fixed_n2v:
self.train_edges = self.val_edges = self._n2v_prune(self.edges)
else:
self.train_edges = self.val_edges = self.edges
print(len([n for n in G.nodes_iter() if not G.node[n]['test'] and not G.node[n]['val']]), 'train nodes')
print(len([n for n in G.nodes_iter() if G.node[n]['test'] or G.node[n]['val']]), 'test nodes')
self.val_set_size = len(self.val_edges)
def _n2v_prune(self, edges):
is_val = lambda n : self.G.node[n]["val"] or self.G.node[n]["test"]
return [e for e in edges if not is_val(e[1])]
def _remove_isolated(self, edge_list):
new_edge_list = []
for n1, n2 in edge_list:
if (self.deg[self.id2idx[n1]] == 0 or self.deg[self.id2idx[n2]] == 0) \
and (not self.G.node[n1]['test'] or self.G.node[n1]['val']) \
and (not self.G.node[n2]['test'] or self.G.node[n2]['val']):
continue
else:
new_edge_list.append((n1,n2))
return new_edge_list
def construct_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
deg = np.zeros((len(self.id2idx),))
for nodeid in self.G.nodes():
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)
if (not self.G[nodeid][neighbor]['train_removed'])])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
for nodeid in self.G.nodes():
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def end(self):
return self.batch_num * self.batch_size > len(self.train_edges) - self.batch_size + 1
def batch_feed_dict(self, batch_edges):
batch1 = []
batch2 = []
for node1, node2 in batch_edges:
batch1.append(self.id2idx[node1])
batch2.append(self.id2idx[node2])
feed_dict = dict()
feed_dict.update({self.placeholders['batch_size'] : len(batch_edges)})
feed_dict.update({self.placeholders['batch1']: batch1})
feed_dict.update({self.placeholders['batch2']: batch2})
return feed_dict
def next_minibatch_feed_dict(self):
start = self.batch_num * self.batch_size
self.batch_num += 1
batch_edges = self.train_edges[start : start + self.batch_size]
return self.batch_feed_dict(batch_edges)
def val_feed_dict(self, size=None):
edge_list = self.val_edges
if size is None:
return self.batch_feed_dict(edge_list)
else:
ind = np.random.permutation(len(edge_list))
val_edges = [edge_list[i] for i in ind[:min(size, len(ind))]]
return self.batch_feed_dict(val_edges)
def incremental_val_feed_dict(self, size, iter_num):
edge_list = self.val_edges
val_edges = edge_list[iter_num*size:min((iter_num+1)*size,
len(edge_list))]
return self.batch_feed_dict(val_edges), (iter_num+1)*size >= len(self.val_edges), val_edges
def incremental_embed_feed_dict(self, size, iter_num):
node_list = self.nodes
val_nodes = node_list[iter_num*size:min((iter_num+1)*size,
len(node_list))]
val_edges = [(n,n) for n in val_nodes]
return self.batch_feed_dict(val_edges), (iter_num+1)*size >= len(node_list), val_edges
def label_val(self):
train_edges = []
val_edges = []
for n1, n2 in self.G.edges_iter():
if (self.G.node[n1]['val'] or self.G.node[n1]['test']
or self.G.node[n2]['val'] or self.G.node[n2]['test']):
val_edges.append((n1,n2))
else:
train_edges.append((n1,n2))
return train_edges, val_edges
def shuffle(self):
""" Re-shuffle the training set.
Also reset the batch number.
"""
self.train_edges = np.random.permutation(self.train_edges)
self.nodes = np.random.permutation(self.nodes)
self.batch_num = 0
class NodeMinibatchIterator(object):
"""
This minibatch iterator iterates over nodes for supervised learning.
G -- networkx graph
id2idx -- dict mapping node ids to integer values indexing feature tensor
placeholders -- standard tensorflow placeholders object for feeding
label_map -- map from node ids to class values (integer or list)
num_classes -- number of output classes
batch_size -- size of the minibatches
max_degree -- maximum size of the downsampled adjacency lists
"""
def __init__(self, G, id2idx,
placeholders, label_map, num_classes,
batch_size=100, max_degree=25,
**kwargs):
self.G = G
self.nodes = G.nodes()
self.id2idx = id2idx
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.batch_num = 0
self.label_map = label_map
self.num_classes = num_classes
self.adj, self.deg = self.construct_adj()
self.test_adj = self.construct_test_adj()
self.val_nodes = [n for n in self.G.nodes_iter() if self.G.node[n]['val']]
self.test_nodes = [n for n in self.G.nodes_iter() if self.G.node[n]['test']]
self.no_train_nodes_set = set(self.val_nodes + self.test_nodes)
self.train_nodes = set(G.nodes()).difference(self.no_train_nodes_set)
# don't train on nodes that only have edges to test set
self.train_nodes = [n for n in self.train_nodes if self.deg[id2idx[n]] > 0]
def _make_label_vec(self, node):
label = self.label_map[node]
if isinstance(label, list):
label_vec = np.array(label)
else:
label_vec = np.zeros((self.num_classes))
class_ind = self.label_map[node]
label_vec[class_ind] = 1
return label_vec
def construct_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
deg = np.zeros((len(self.id2idx),))
for nodeid in self.G.nodes():
if self.G.node[nodeid]['test'] or self.G.node[nodeid]['val']:
continue
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)
if (not self.G[nodeid][neighbor]['train_removed'])])
deg[self.id2idx[nodeid]] = len(neighbors)
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj, deg
def construct_test_adj(self):
adj = len(self.id2idx)*np.ones((len(self.id2idx)+1, self.max_degree))
for nodeid in self.G.nodes():
neighbors = np.array([self.id2idx[neighbor]
for neighbor in self.G.neighbors(nodeid)])
if len(neighbors) == 0:
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[self.id2idx[nodeid], :] = neighbors
return adj
def end(self):
return self.batch_num * self.batch_size > len(self.train_nodes) - self.batch_size
def batch_feed_dict(self, batch_nodes, val=False):
batch1id = batch_nodes
batch1 = [self.id2idx[n] for n in batch1id]
labels = np.vstack([self._make_label_vec(node) for node in batch1id])
feed_dict = dict()
feed_dict.update({self.placeholders['batch_size'] : len(batch1)})
feed_dict.update({self.placeholders['batch']: batch1})
feed_dict.update({self.placeholders['labels']: labels})
return feed_dict, labels
def node_val_feed_dict(self, size=None, test=False):
if test:
val_nodes = self.test_nodes
else:
val_nodes = self.val_nodes
if not size is None:
val_nodes = np.random.choice(val_nodes, size, replace=True)
# add a dummy neighbor
ret_val = self.batch_feed_dict(val_nodes)
return ret_val[0], ret_val[1]
def incremental_node_val_feed_dict(self, size, iter_num, test=False):
if test:
val_nodes = self.test_nodes
else:
val_nodes = self.val_nodes
val_node_subset = val_nodes[iter_num*size:min((iter_num+1)*size,
len(val_nodes))]
# add a dummy neighbor
ret_val = self.batch_feed_dict(val_node_subset)
return ret_val[0], ret_val[1], (iter_num+1)*size >= len(val_nodes), val_node_subset
def next_minibatch_feed_dict(self):
start = self.batch_num * self.batch_size
self.batch_num += 1
batch_nodes = self.train_nodes[start : start + self.batch_size]
return self.batch_feed_dict(batch_nodes)
def incremental_embed_feed_dict(self, size, iter_num):
node_list = self.nodes
val_nodes = node_list[iter_num*size:min((iter_num+1)*size,
len(node_list))]
return self.batch_feed_dict(val_nodes), (iter_num+1)*size >= len(node_list), val_nodes
def shuffle(self):
""" Re-shuffle the training set.
Also reset the batch number.
"""
self.train_nodes = np.random.permutation(self.train_nodes)
self.batch_num = 0
| [
"wleif@stanford.edu"
] | wleif@stanford.edu |
370cbbbaa94a1cb540bfb5ddd385885f7480ba1a | 7c5baa5916f5ee9f104205d8d4e23bca5d55ca36 | /zoldesktop/spiders/09_ZOL壁纸_分辨率选择版.py | 19ad8b4f3316690da3d86005afcb269460dd3320 | [] | no_license | Modestzero/ZOL-wallpapaer | d837a120120ffcdbee1401f63b363befaf1b4b45 | 14b5ab7dcd44679ff5c6ed97d0baf01f6e706147 | refs/heads/master | 2023-02-07T08:06:32.748454 | 2020-12-31T07:50:01 | 2020-12-31T07:50:01 | 279,782,108 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,933 | py | import os
import re
from queue import Queue
from random import randint
from threading import Thread
from time import sleep
import requests
from lxml import etree
def select_ratio(url, choice):
response = requests.get(url, headers=headers)
e = etree.HTML(response.text)
ratios_list = { }
ratios_list['1'] = 'http://desk.zol.com.cn/pc/'
for i, ratios in zip(range(2, 12), e.xpath('//dl[@class="filter-item clearfix"]/dd/a/@href')):
ratios_list[str(i)] = 'http://desk.zol.com.cn{}'.format(ratios)
print(ratios_list[choice])
sum_page = ''.join(e.xpath('//span/font/text()'))
return ratios_list[choice], sum_page
def download(url):
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
}
response = requests.get(url, headers=headers)
e = etree.HTML(response.text)
return e
class Crawl_list(Thread):
def __init__(self, url, page, url_list_queue):
super().__init__()
self.url = url
self.page = page
self.url_list_queue = url_list_queue
def run(self):
for i in range(0, self.page):
print('正在解析第{}页壁纸列表信息...'.format(i+1))
e = download(self.url)
url_list = e.xpath('//li[@class="photo-list-padding"]/a/@href')
url_next = e.xpath('//div[@class="page"]/a[@class="next"]/@href')
for each in url_list:
self.url_list_queue.put('http://desk.zol.com.cn{}'.format(each))
self.url = 'http://desk.zol.com.cn{}'.format(url_next[0])
sleep(t)
class Parse_image(Thread):
def __init__(self, choice_ratio, url_list_queue, image_url_queue):
super().__init__()
self.choice_ratio = choice_ratio
self.url_list_queue = url_list_queue
self.image_url_queue = image_url_queue
def run(self):
print('正在解析图片地址...')
while self.url_list_queue.empty() == False:
url = self.url_list_queue.get()
e = download(url)
ratio_href = e.xpath('//dd/a/@id')
image_name = e.xpath('//div/h3/a/text()')[0]
next_href = e.xpath('//div/a[@id="pageNext"]/@href')[0]
serial_number = e.xpath('//span/span/text()')
if next_href != 'javascript:;':
sleep(t)
if self.choice_ratio in ratio_href:
href = ''.join(e.xpath('//dd/a[@id="{}"]/@href'.format(self.choice_ratio)))
image_url = image_name + '_' + ''.join(serial_number) + '-' + 'http://desk.zol.com.cn{}'.format(href)
self.image_url_queue.put(image_url)
next_p = 'http://desk.zol.com.cn{}'.format(next_href)
self.url_list_queue.put(next_p)
else:
next_p = 'http://desk.zol.com.cn{}'.format(next_href)
self.url_list_queue.put(next_p)
else:
if self.choice_ratio in ratio_href:
href = ''.join(e.xpath('//dd/a[@id="{}"]/@href'.format(self.choice_ratio)))
image_url = image_name + '_' + ''.join(serial_number) + '-' + 'http://desk.zol.com.cn{}'.format(href)
self.image_url_queue.put(image_url)
class Down_Image(Thread):
def __init__(self, image_url_queue):
super().__init__()
self.image_url_queue = image_url_queue
def run(self):
print('正在下载保存...')
dir_h = os.getcwd() + '/download'
try:
os.mkdir(dir_h)
except:
pass
while self.image_url_queue.empty() == False:
name, url = self.image_url_queue.get().split('-')
response = requests.get(url, headers=headers)
src = re.findall(r'src="(https://.+)"', response.text)[0]
if src[:6] != 'https:':
pic_src = 'https://desk-fd.zol-img.com.cn' + src
else:
pic_src = src
pic_info = requests.get(pic_src, headers=headers)
dir_name = name.split('_')[0]
new_dir = dir_h + '/{}'.format(dir_name)
try:
os.mkdir(new_dir)
except:
pass
sleep(t)
with open(new_dir +'/{}.jpg'.format(name), 'wb') as f:
f.write(pic_info.content)
f.flush()
if __name__ == '__main__':
url_list_queue = Queue()
url_next_queue = Queue()
image_url_queue = Queue()
start_url = 'http://desk.zol.com.cn/pc/'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
}
# 随机睡眠时间
t = randint(0, 2)
print("""
1. \t全部
2. \t4096x2160(4k)
3. \t2560x1440(2k)
4. \t2880x1800(Retina屏)
5. \t2560x1600(27-30英寸)
6. \t1920x1200
7. \t1920x1080(15-23英寸)
8. \t1680x1050(22英寸)
9. \t1600x900(20英寸)
10.\t1440x900(15-19英寸)
11.\t1280x1024(17-19英寸)
""")
choice_dict = {
'1': '全部',
'2': '4096x2160',
'3': '2560x1440',
'4': '2880x1800',
'5': '2560x1600',
'6': '1920x1200',
'7': '1920x1080',
'8': '1680x1050',
'9': '1600x900',
'10': '1440x900',
'11': '1280x1024',
}
while True:
choice = input('请输入需要的分辨率(默认: 7): ')
if choice != '':
if int(choice) in range(1, 12):
choice_ratio = choice_dict[choice]
break
else:
print('参数有误, 请重新输入...')
else:
choice = str(7)
choice_ratio = choice_dict[choice]
break
url, sum_page = select_ratio(start_url, choice)
while True:
pages = input('每页有21组图片, 请输入需要下载的页数(默认: 1页): ')
if pages != '':
if int(pages) in range(1, int(sum_page)):
page = int(pages)
break
else:
print('页数错误, 请重新输入.')
else:
page = 1
break
image_list = Crawl_list(url, page, url_list_queue)
image_list.start()
image_list.join()
parse_image = []
for i in range(50):
parses = Parse_image(choice_ratio, url_list_queue, image_url_queue)
parse_image.append(parses)
parses.start()
for each in parse_image:
each.join()
down_image = []
for i in range(30):
save = Down_Image(image_url_queue)
down_image.append(save)
save.start()
for each in down_image:
each.join()
| [
"1030693123@qq..com"
] | 1030693123@qq..com |
a001e486ee880546709fe00e62b39eaad931a21c | 0ae32dd39740b83fada1b6537e0a5f073d379d08 | /src/models/__init__.py | b296f9a4cb292418b47db1eaa9b2f601deadcdf6 | [] | no_license | RetormLi/my-XML | 2e8374b66f05e04a5bd6438048b2e0af7d1cacab | 0c02992103ba8dc5897f0bc3cc9513bfa25faaae | refs/heads/main | 2023-06-25T11:40:58.183494 | 2021-07-14T03:47:50 | 2021-07-14T03:47:50 | 385,805,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from .cnn import CnnClassifier
from .xml_cnn import XMLCNN
from .pure_cnn import PureCnn
| [
"1197334522@qq.com"
] | 1197334522@qq.com |
7fc48ac64107c97a8357f111ccd641bcaaf880af | aca01c2d073cc9ca2b71e12b8ed87a13a3d61438 | /design-patterns/src/iterators-ksiazka-adresowa.py | bed9ad1fa41d7eb0c99cdd60435c1395e01f065b | [
"MIT"
] | permissive | sli1989/book-python | ee2ee0f37b3173b6921db722a4cb2593d6df1f2b | 51ea279bcc26c4b9b8a1d726e2683c019a28d62b | refs/heads/master | 2020-04-15T11:39:07.209256 | 2019-01-06T23:27:55 | 2019-01-06T23:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | class Kontakt:
def __init__(self, imie, nazwisko, adresy=[]):
self.imie = imie
self.nazwisko = nazwisko
self.adresy = adresy
class Adres:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
kontakt = Kontakt(imie='Pan', nazwisko='Twardowski', adresy=[
Adres(ulica='2101 E NASA Pkwy', miasto='Houston', stan='Texas',
kod='77058', panstwo='USA'),
Adres(ulica=None, miasto='Kennedy Space Center', kod='32899',
panstwo='USA'),
Adres(ulica='4800 Oak Grove Dr', miasto='Pasadena', kod='91109',
panstwo='USA'),
Adres(ulica='2825 E Ave P', miasto='Palmdale', stan='California',
kod='93550', panstwo='USA'),
])
for adres in kontakt:
print(adres)
| [
"matt@astrotech.io"
] | matt@astrotech.io |
5dd2d3b95992ed9936e51f728d58af8d4893d6b4 | d325e106ca0408ce0ca2c547975aa7632cc34e32 | /message_count/client.py | 97eba8f57d6c092edaf70bf7b3ac8adfa02e43ca | [] | no_license | woong97/gRPC-Examples | d44ee96c9e3d3a5405969da44ff6714d6aac058f | 6cf5097a7d3ee88afeb2027e89e720bc567f5786 | refs/heads/master | 2023-07-31T07:49:21.842955 | 2021-09-19T16:09:37 | 2021-09-19T16:09:37 | 406,507,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import os
import pingpong_pb2
import pingpong_pb2_grpc
import time
import grpc
print(__name__)
def run():
counter = 0
pid = os.getpid()
with grpc.insecure_channel("localhost:50051") as channel:
stub = pingpong_pb2_grpc.PingPongServiceStub(channel)
while True:
try:
start = time.time()
response = stub.ping(pingpong_pb2.Ping(count=counter))
counter = response.count
if counter % 1000 == 0:
print("%4f : resp=%s : procid=%i" %
(time.time() - start, response.count, pid))
except KeyboardInterrupt:
print("KeyboardInterrupt")
channel.unsubscribe(close)
exit()
def close(channel):
channel.close()
if __name__ == '__main__':
run() | [
"yjwoong97@gmail.com"
] | yjwoong97@gmail.com |
8b51d49d005e035931c97199225c9bfefcd9c452 | 4f2766354d1b97fc2edca2ece1d8c029faad80f6 | /engine/make-predictions.py | 919e8400ebd1db88b48bb42eaf20077a863a6517 | [] | no_license | codeunifier/cryptoflow | ebc79aba36f545abd850454e66c0a0d366750366 | 809dbb46e3a3b427a6eacf0e31aaa154920cf6e2 | refs/heads/master | 2023-05-24T22:17:03.497874 | 2019-11-19T15:18:20 | 2019-11-19T15:18:20 | 167,708,682 | 0 | 0 | null | 2023-05-22T21:47:25 | 2019-01-26T16:09:15 | TypeScript | UTF-8 | Python | false | false | 1,671 | py | import pandas as pd
import numpy as np
import time
import datetime
import keras
import tensorflow as tf
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import requests
from model import CryptoModel
from datamanager import DataManager
import os
import sys
import demjson
def predict(historicalData, timeframeId):
#in javascript, the server tosses today's price at the end of historicalData
data = []
historicalData = demjson.decode(historicalData)
#convert the historical data object into data array
for key in historicalData:
data.append(historicalData[key])
#if timeframeId == 0: #use all of the data
if timeframeId == "1": #get 6 points at increments of 5
data = data[::5]
elif timeframeId == "2": #get 6 points at increments of 15
data = data[::15]
if len(data) is not 6:
raise Exception('length of data is incorrect: %d' % len(data))
#data = data[len(data) - int(lookback):]
#normalize the data
scaler = MinMaxScaler(feature_range=(0, 1))
noramlized = scaler.fit_transform(np.reshape(data, (-1,1)))
#load the model
my_model = CryptoModel()
#my_model.load("my_model_" + lookback + ".h5")
my_model.load("my_model_6.h5")
#shaped = np.reshape(noramlized, (1,1,1))
shaped = np.reshape(noramlized, (1,1,6))
result = my_model.predict(shaped)
#revert result to normal scale
result_rescaled = scaler.inverse_transform(result)
print(result_rescaled[0][0])
if __name__ == '__main__':
predict(sys.argv[1], sys.argv[2]) | [
"eva15023@byui.edu"
] | eva15023@byui.edu |
b4f1d9f7d55cf307767c89a911376d87978e9513 | 477c2342a296ef4388da8347dc4f897bb3455906 | /python-for-coding-test/모험가 길드.py | 963ca0e9e3f53e4f617c21cccfe23cc9ccd1ecf8 | [] | no_license | llhbum/Problem-Solving_Python | eb56e7bac44c1fd15cbfc9766839a81d5743b797 | 97c03ffb4a2f6301c7f7443c11741ac94c9173c6 | refs/heads/master | 2023-02-11T08:03:02.831518 | 2021-01-07T11:47:04 | 2021-01-07T11:47:04 | 294,284,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | '''
INPUT
5
2 3 1 2 2
'''
n = int(input())
nList = list(map(int,input().split()))
nList.sort()
result = 0
cnt = 0
for i in nList:
cnt += 1
if cnt >= i:
result += 1
cnt = 0
print(result)
| [
"llhbum@gmail.com"
] | llhbum@gmail.com |
dccc2f93dfb78fed07cde8da78882492c7d21daf | 2dfbd44565c8e070061a6e790523a0734eff6ff9 | /hello_app/views.py | 202b16d6270fb12ea74f23115d710983ebc89eff | [] | no_license | sohailADev/hello_django | 96d1203d5f23568086fb196abd52c6e313fa2c82 | d8b8b97ca0f0519e7068bc4128b381354439605a | refs/heads/master | 2022-11-28T21:49:37.873232 | 2020-08-05T01:23:45 | 2020-08-05T01:23:45 | 285,052,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.shortcuts import render
# Create your views here.
def index_view(request):
return render(request,"index.html",{"index_view_varialbe":"Hello world!!!"}) | [
"sohailaslam0707@gmail.com"
] | sohailaslam0707@gmail.com |
128c41e485c8c714605cebb8f0550d44679ebf8b | e037112b3f85eac65f0d998428b513d46d5b3b49 | /scripts/coursebuilder/__init__.py | c47f7675e416d6f2c130fb13a81380bff711d255 | [] | no_license | bmschmidt/static_course_CMS | dc893bf791f423f5d0a296f597837345b755fdb2 | e2f6ac24e0dbdb1f95a299c71c54f5b0068ac410 | refs/heads/master | 2020-12-22T14:28:57.887140 | 2020-12-21T18:35:58 | 2020-12-21T18:35:58 | 236,823,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | from .settings import course_settings
| [
"bmschmidt@gmail.com"
] | bmschmidt@gmail.com |
dcdf4facd5b67bd0722e43992355d294ac565bd8 | 2203e8e65267ec913b17fcf1107380ea1ed73bcc | /accounts/urls.py | af7fc05254ac179c0c56739ec2b0f4929050a2d4 | [] | no_license | BurumeMulindwa/django-boards-acounts | 340af6c30f61f420e7fb6b9a5aa54440aef63f2b | 342a5795f58abf5a0cad01363cf45583c1133f8e | refs/heads/master | 2022-12-09T12:44:45.757927 | 2020-03-31T11:41:56 | 2020-03-31T11:41:56 | 251,586,395 | 0 | 0 | null | 2022-12-08T03:56:24 | 2020-03-31T11:44:13 | Python | UTF-8 | Python | false | false | 462 | py | from django.urls import path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from accounts import views as accounts_views
from boards import views
urlpatterns = [
# path('', views.home, name='home'),
path('signup/', accounts_views.signup, name='signup'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
]
| [
"burumemulindwa@gmail.com"
] | burumemulindwa@gmail.com |
895f4fd77041a989df7f6adafe8cbb780d71624f | f24a2574875042ad2f39bfea027098f1bee21050 | /DjangoLearning/common.py | 7cb2396c76abe356eafd798047fc0d648d6c0403 | [] | no_license | shashi634/QuizChamp | 9f0190b57add9781389aa9efef82b97ead0b9173 | 8bb9ed390e4cc0646c4b777e65a01b4825755a46 | refs/heads/master | 2020-08-28T10:27:09.570442 | 2019-11-10T10:21:01 | 2019-11-10T10:21:01 | 217,673,135 | 0 | 0 | null | 2019-11-02T17:19:34 | 2019-10-26T07:36:14 | JavaScript | UTF-8 | Python | false | false | 1,193 | py | import hashlib, binascii, os
import re
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render
def hashPassword(password):
"""Hash a password for storing."""
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),
salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
def checkEmail(email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if(re.search(regex,email)):
return True
else:
return False
def currentLoggedInUserData():
data = dict()
if "quizChampAdmin" in request.session:
sessionData = request.session["quizChampAdmin"].split('~')
data['UserName'] = sessionData[0]
data['EmailId'] = sessionData[1]
data['OrganizationId'] = sessionData[2]
return data
class AuthRequiredMiddleware(MiddlewareMixin):
def process_request(self, request):
if not "quizChampAdmin" in request.session:
return render(request,'login.html')
return None | [
"shankar634@hotmail.com"
] | shankar634@hotmail.com |
a1c5ec54e64f05ebf48791444f838fff8f9cc066 | 0e91533b80819ff6d20d84e7887797d39709ce9e | /vip/migrations/0001_initial.py | 6ccec0b1ff32d91056bd466102862e6e96ec392c | [
"Apache-2.0",
"MIT"
] | permissive | jhihwei/eshop | f4e3f9ea0f0ad40a97e21f379db858c66595bd9d | e9861e9a2ee084201ecd36bed628cf50e9dbeb7e | refs/heads/master | 2023-07-07T10:04:23.496720 | 2021-08-06T06:25:13 | 2021-08-06T06:25:13 | 393,273,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # Generated by Django 3.1.3 on 2021-01-04 03:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='user',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=10, verbose_name='名稱')),
('line_id', models.CharField(default='', max_length=60, verbose_name='line_id')),
('actived', models.BooleanField(default=False, verbose_name='啟用')),
],
options={
'verbose_name': '會員',
'verbose_name_plural': '會員',
'ordering': ('name',),
},
),
]
| [
"krel.jhan@gmail.com"
] | krel.jhan@gmail.com |
c0c3fd786947bf2e0f51e84538ce584a249110a4 | d444e1b1190ef62cd03e267e7f3031dc94c43340 | /venv/Scripts/easy_install-script.py | 0276406ac18099e9601d9482f3de79b4b05e98c2 | [] | no_license | cnfrank/webSpider | fccbd99918898ca0e976709acfdd7bd395fe592c | a96a72db1a9dbd76eae2fc4076ce29d8bdb4709d | refs/heads/master | 2020-04-13T22:55:02.443281 | 2018-12-29T08:22:14 | 2018-12-29T08:22:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!C:\Users\95700\PycharmProjects\webSpider\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"9570075@qq.com"
] | 9570075@qq.com |
9bf41e3da01de44b1c0c745ee89bd86373b4c4db | 2f2cc5bc44f0ff08c1f9888c3dd97ea2d3f3fe0a | /EXAMPLES/for_ex.py | b036718cf51e26c987b2303382ce052554a32419 | [] | no_license | solracq/Python-Training | ba33583292863393fab8156d18d48bc511c9f331 | 4c8a29da23b9e483f8e5c30f7ef4a7311a427381 | refs/heads/master | 2021-01-21T00:45:04.954824 | 2020-05-04T00:21:35 | 2020-05-04T00:21:35 | 14,281,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | #!/usr/bin/env python3
mylist = [ "Idle","Cleese","Chapman","Gilliam","Palin","Jones"]
mytup = ("Roger","Old Woman","Prince Herbert","Brother Maynard")
mystr = "She turned me into a newt"
for p in mylist:
print(p)
print()
for r in mytup:
print(r)
print()
for ch in mystr:
print(ch, end=' ')
print()
| [
"Solrac@192.168.0.18"
] | Solrac@192.168.0.18 |
eb7c72bc1dfe900c646c7d26ddc66400e27f3755 | 60f9b5dce4d11f1e89da620915918dacba738b45 | /billiard/reduction.py | 11ea7c4913c98a398df8837bd3ac3dfc3807ca5e | [
"BSD-3-Clause"
] | permissive | dexter-xiong/billiard | 5e7497a29d14b11b19cab5008110e69d3c9bae19 | 0fedae7cb7c7408a4287e3d161b4f2b63541c279 | refs/heads/master | 2020-12-31T01:36:59.953134 | 2014-12-18T20:58:25 | 2014-12-18T20:58:25 | 33,759,423 | 0 | 0 | null | 2015-04-11T02:25:20 | 2015-04-11T02:25:20 | null | UTF-8 | Python | false | false | 8,976 | py | #
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import functools
import io
import os
import pickle
import socket
import sys
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
PY3 = sys.version_info[0] == 3
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
(hasattr(socket, 'CMSG_LEN') and
hasattr(socket, 'SCM_RIGHTS') and
hasattr(socket.socket, 'sendmsg')))
#
# Pickler subclass
#
if PY3:
import copyreg
class ForkingPickler(pickle.Pickler):
'''Pickler subclass used by multiprocessing.'''
_extra_reducers = {}
_copyreg_dispatch_table = copyreg.dispatch_table
def __init__(self, *args):
super(ForkingPickler, self).__init__(*args)
self.dispatch_table = self._copyreg_dispatch_table.copy()
self.dispatch_table.update(self._extra_reducers)
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
cls._extra_reducers[type] = reduce
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getbuffer()
@classmethod
def loadbuf(cls, buf, protocol=None):
return cls.loads(buf.getbuffer(), protocol)
loads = pickle.loads
else:
class ForkingPickler(pickle.Pickler): # noqa
'''Pickler subclass used by multiprocessing.'''
dispatch = pickle.Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
'''Register a reduce function for a type.'''
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
@classmethod
def dumps(cls, obj, protocol=None):
buf = io.BytesIO()
cls(buf, protocol).dump(obj)
return buf.getvalue()
@classmethod
def loadbuf(cls, buf, protocol=None):
return cls.load(buf, protocol)
loads = pickle.loads
register = ForkingPickler.register
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# Windows
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
import _winapi
def duplicate(handle, target_process=None, inheritable=False):
'''Duplicate a handle. (target_process is a handle not a pid!)'''
if target_process is None:
target_process = _winapi.GetCurrentProcess()
return _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle, target_process,
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
def steal_handle(source_pid, handle):
'''Steal a handle from process identified by source_pid.'''
source_process_handle = _winapi.OpenProcess(
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
try:
return _winapi.DuplicateHandle(
source_process_handle, handle,
_winapi.GetCurrentProcess(), 0, False,
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(source_process_handle)
def send_handle(conn, handle, destination_pid):
'''Send a handle over a local connection.'''
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
conn.send(dh)
def recv_handle(conn):
'''Receive a handle over a local connection.'''
return conn.recv().detach()
class DupHandle(object):
'''Picklable wrapper for a handle.'''
def __init__(self, handle, access, pid=None):
if pid is None:
# We just duplicate the handle in the current process and
# let the receiving process steal the handle.
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
'''Get the handle. This should only be called once.'''
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
# The handle has already been duplicated for this process.
return self._handle
# We must steal the handle from the process whose pid is self._pid.
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
else:
# Unix
__all__ += ['DupFd', 'sendfds', 'recvfds']
import array
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
ACKNOWLEDGE = sys.platform == 'darwin'
def sendfds(sock, fds):
'''Send an array of fds over an AF_UNIX socket.'''
fds = array.array('i', fds)
msg = bytes([len(fds) % 256])
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
if ACKNOWLEDGE and sock.recv(1) != b'A':
raise RuntimeError('did not receive acknowledgement of fd')
def recvfds(sock, size):
'''Receive an array of fds over an AF_UNIX socket.'''
a = array.array('i')
bytes_size = a.itemsize * size
msg, ancdata, flags, addr = sock.recvmsg(
1, socket.CMSG_LEN(bytes_size),
)
if not msg and not ancdata:
raise EOFError
try:
if ACKNOWLEDGE:
sock.send(b'A')
if len(ancdata) != 1:
raise RuntimeError(
'received %d items of ancdata' % len(ancdata),
)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
if len(cmsg_data) % a.itemsize != 0:
raise ValueError
a.frombytes(cmsg_data)
assert len(a) % 256 == msg[0]
return list(a)
except (ValueError, IndexError):
pass
raise RuntimeError('Invalid data received')
def send_handle(conn, handle, destination_pid): # noqa
'''Send a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
sendfds(s, [handle])
def recv_handle(conn): # noqa
'''Receive a handle over a local connection.'''
fd = conn.fileno()
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
return recvfds(s, 1)[0]
def DupFd(fd):
'''Return a wrapper for an fd.'''
from ..forking import Popen
return Popen.duplicate_for_child(fd)
#
# Try making some callable types picklable
#
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
#
# Make sockets picklable
#
if sys.platform == 'win32':
def _reduce_socket(s):
from ..resource_sharer import DupSocket
return _rebuild_socket, (DupSocket(s),)
def _rebuild_socket(ds):
return ds.detach()
register(socket.socket, _reduce_socket)
else:
def _reduce_socket(s): # noqa
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto)
def _rebuild_socket(df, family, type, proto): # noqa
fd = df.detach()
return socket.socket(family, type, proto, fileno=fd)
register(socket.socket, _reduce_socket)
| [
"ask@celeryproject.org"
] | ask@celeryproject.org |
c7a62de7ce601948322d2f4cfd19486a18070ec1 | d12f83d6c4b958904ea47176c1f861b97e783e78 | /list.py | c88b6eb3d12f17dc83314a98b873188fd37df833 | [] | no_license | jifeng35/pycharm | 2cc7b2ab1e68ca15e0738b5b6aa9ba694e5fd01e | 2123c1dd3ee0be3dc498d8a793b2ce71a165b197 | refs/heads/master | 2023-07-08T06:26:24.230051 | 2021-08-07T02:50:30 | 2021-08-07T02:50:30 | 367,003,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | my_list = [1, 'a', 2.3875, "小李"]
print(my_list)
print(my_list[3], "输出末尾元素的两种方式", my_list[-1])
# -1是最后一个 0是第一个元素
print(type(my_list[0]))
print(type(my_list[1]))
print(type(my_list[2]))
print(type(my_list[3]))
# 列表中存储的元素为定义时的数据类型
for i in my_list:
print(i, end=",")
print("列表的长度为:", len(my_list))
j = 0
while j < len(my_list):
print(my_list[j], end=",")
j += 1
my_list.append("A1")
# append = push_back
for i in my_list:
print(i, end=",")
print("列表的长度为:", len(my_list))
a = [1, 2]
b = [3, 4]
a.append(b)
# append 是将括号内的数据作为一个元素插入到list中
print(a)
a.extend(b)
# extend 为扩展,将a中插入b中所有的单个元素
print(a)
a.insert(0, 'see')
# insert(int pos,template<class T> T object)
print(a)
del a[0]
print(a)
a.pop()
# pop()=pop_back()
print(a)
a.remove(b)
# 列表中内容重复,删除的时候删除对应内容下标较小的那个
# a.remove(10) 输入不存在的数据会报错,抛出异常
# 删除的是元素内容
print(a)
if 'q' in a:
print("exist")
else:
print("cannot find!")
print(a.index(1, 0, len(a)))
# 范围区间左闭右开,不包含结尾数字的对应下标的元素
print(a.count(1))
a.insert(3, 0)
a.insert(0, 4)
a.insert(0, 8)
a.sort()
print(a)
a.sort(reverse=True)
print(a)
| [
"84109179+jifeng35@users.noreply.github.com"
] | 84109179+jifeng35@users.noreply.github.com |
6c4f384947a59d6f8f492cfba3970f59e2044c54 | 6349ef281cf18c3ebc5864ebba07155829292952 | /test/多进程.py | a755a89e5fa84a1736a5ab4d8704958b7d82cdcf | [] | no_license | Ibunao/pythontest | e402aeb9b16f2835a8c0f340ecb94f8f185af780 | bffe8f03bc2b5cc5fb9563a91d2e7f6e3ffcaf5d | refs/heads/master | 2020-03-24T17:16:47.652647 | 2019-05-25T07:41:54 | 2019-05-25T07:41:54 | 142,854,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | from concurrent.futures import ProcessPoolExecutor
import requests
# 线程执行的任务
def task(url):
response = requests.get(url)
return response
# 线程执行完回调函数
def done(future, *arge, **kwargs):
# 获取到线程返回的数据
response = future.result()
print(response.status_code, response.content)
# 创建线程池
pool = ProcessPoolExecutor(7)
url_list = [
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
'http://www.baidu.com',
]
for url in url_list:
# 将任务添加到线程池
v = pool.submit(task, url)
# 添加线程任务执行结束后的回调函数
v.add_done_callback(done)
# wait=True等待线程池的自线程执行完,再往下执行主线程
pool.shutdown(wait=True)
print('end') | [
"idingran@163.com"
] | idingran@163.com |
3daba78686755bb45f71e5eac73448cbf43e45fb | a52fc147ebdb54f86e605eb69d9efd3b73d7775f | /rango/urls.py | 4fee132e486768199160654a15c5a19c700a6763 | [] | no_license | KienWelch/tango_with_django_project | 4f7ccdd4d2dfa62da4249d26002c6786d3fc27ce | 74133e6c0c34597e57d5e7eadff420090865909c | refs/heads/master | 2020-12-13T20:49:06.531115 | 2020-02-10T20:38:30 | 2020-02-10T20:38:30 | 234,528,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | """tango_with_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from rango import views
app_name = 'rango'
urlpatterns = [
path('', views.index, name='index'),
path('about/', views.about, name='about'),
path('category/<slug:category_name_slug>/',
views.show_category, name='show_category'),
path('add_category/', views.add_category, name='add_category'),
path('category/<slug:category_name_slug>/add_page/', views.add_page,
name='add_page'),
path('register/', views.register, name='register'),
path('login/', views.user_login, name='login'),
path('restricted/', views.restricted, name='restricted'),
path('logout/', views.user_logout, name='logout'),
]
| [
"59999504+KienWelch@users.noreply.github.com"
] | 59999504+KienWelch@users.noreply.github.com |
9bfe5a80ac97775d19c7714fa8ba2f44ee3460a4 | 2f90f032240d13b67bdc879c221e42e89d9a3605 | /hbruraldoctor/hbvirtual/bin/decrypto | eabb31961fb6635cdc9ca62655e0cdac7cddd853 | [
"Apache-2.0"
] | permissive | hallohubo/DjangoDocterAPI | 83ad091dae82fd06bf07368eac1485ad675d9f9e | 2d86d17c718affa968c0b2d4f9590aa08d43716e | refs/heads/master | 2020-06-02T08:47:13.519399 | 2019-06-21T10:07:40 | 2019-06-21T10:07:40 | 191,104,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/kobo/Desktop/hbruraldoctor/hbvirtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from crypto.decryptoapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"hallohubo@gmail.com"
] | hallohubo@gmail.com | |
f9e2781a80ab9850d43d46c6af52acced8635898 | 41831710427637e567cb72053f6d6c1845af0da2 | /apptest/views.py | ad664314e427819c3035d1b9c2a49e410ce23a3d | [] | no_license | lihengqi/testmanage | 1eef8516ff3ac4f9bd19f01ac3056b4ef94ee60a | 2afc31c472cccae01748ba97216c6eed424e4d5d | refs/heads/master | 2021-03-24T00:19:04.829412 | 2020-04-19T10:14:25 | 2020-04-19T10:14:25 | 247,496,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from django.contrib.auth import authenticate, login
from apptest.models import Appcase, Appcasestep
# Create your views here.
# app用例管理
@login_required
def appcase_manage(request):
appcase_list = Appcase.objects.all()
username = request.session.get("user", "") # 读取浏览器session
return render(request, "appcase_manage.html", {"user": username, "appcases": appcase_list})
# app用例测试步骤
@login_required
def appcasestep_manage(request):
username = request.session.get("user", "") # 读取浏览器session
appcasestep_list = Appcasestep.objects.all()
return render(request, "appcasestep_manage.html", {"user": username, "appcasesteps":appcasestep_list}) | [
"lvq897652402@163.com"
] | lvq897652402@163.com |
d8074cdceef3099fac3b9fe5188dce7732392b2d | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/Clarification/Backtracking/生成括号.py | 79c4cbc77e31405a0b2e94b1f993c9dc312741f0 | [
"MIT"
] | permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 685 | py | # 给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
#
# 例如,给出 n = 3,生成结果为:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
ans = []
def backtrack(s='',left=0,right=0):
if len(s) == 2 * n:
ans.append(s)
return
if left < n:
backtrack(s+'(',left+1,right)
if right < left:
backtrack(s+')',left,right+1)
backtrack()
return ans | [
"mzm@mail.dlut.edu.cn"
] | mzm@mail.dlut.edu.cn |
4d793f5d9c9393cbd4f436ee59a7d37d6704b26c | bd736cfd2e618ad1bfa66a1ad245d644ff98c8e5 | /routes/index.py | c14825d1701cc25a3a6cc281e8ecbdee61f08ff6 | [] | no_license | fairylin/bbs | c2c1d96f6cae74042ce19e5c959d5abc16e8f90f | 2d3d84939171f38b77676df31df4626958bebd4c | refs/heads/master | 2020-05-06T14:10:58.192178 | 2020-03-29T15:42:15 | 2020-03-29T15:42:15 | 180,176,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | from flask import (
render_template,
request,
redirect,
session,
url_for,
Blueprint,
make_response,
send_from_directory,
)
from werkzeug.utils import secure_filename
from models.user import User
from config import user_file_director
import os
from utils import log
main = Blueprint('index', __name__)
def current_user():
# 从 session 中找到 user_id 字段, 找不到就 -1
# 然后 User.find_by 来用 id 找用户
# 找不到就返回 None
uid = session.get('user_id', -1)
u = User.find_by(id=uid)
return u
"""
用户在这里可以
访问首页
注册
登录
用户登录后, 会写入 session, 并且定向到 /profile
"""
@main.route("/")
def index():
u = current_user()
return render_template("index.html", user=u)
@main.route("/register", methods=['POST'])
def register():
form = request.form
# 用类函数来判断
u = User.register(form)
return redirect(url_for('.index'))
@main.route("/login", methods=['POST'])
def login():
form = request.form
u = User.validate_login(form)
if u is None:
# 转到 topic.index 页面
return redirect(url_for('topic.index'))
else:
# session 中写入 user_id
session['user_id'] = u.id
# 设置 cookie 有效期为 永久
session.permanent = True
return redirect(url_for('topic.index'))
@main.route('/profile')
def profile():
u = current_user()
if u is None:
return redirect(url_for('.index'))
else:
return render_template('profile.html', user=u)
def allow_file(filename):
suffix = filename.split('.')[-1]
from config import accept_user_file_type
return suffix in accept_user_file_type
@main.route('/addimg', methods=['GET', 'POST'])
def add_img():
u = current_user()
if u is None:
return redirect(url_for(".profile"))
if "file" not in request.files:
return redirect(url_for(".profile"))
file = request.files['file']
if file.filename == "":
return redirect(url_for(".profile"))
if allow_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(user_file_director, filename))
u.user_image = filename
u.save()
return redirect(url_for(".profile"))
# send_from_derectory
# nginx 静态传输
@main.route('/uploads/<filename>')
def uploads(filename):
return send_from_directory(user_file_director, filename) | [
"15574412169@163.com"
] | 15574412169@163.com |
6c43608b41249f6e16862da77c32ec53324a78c4 | 06952f5ef7eba6fafa629856617699b1b43cac20 | /图像特效/02灰度处理2.py | dae86c1231dc8b151319856ceecf2f114b5b30c8 | [] | no_license | gaoxiang97/openCV-1 | 208e5a38a3f8218beaf41851f64116c8275f0bb4 | 1f068ad0f4fcf8f93d455a5390c607047c44eb03 | refs/heads/master | 2022-01-16T02:54:49.144806 | 2019-07-05T06:35:19 | 2019-07-05T06:35:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import cv2
import numpy as np
img = cv2.imread('image0.jpg', 1)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
#RGB R=G=B = GRAY
dst = np.zeros((height, width, 3), np.uint8)
for i in range(0, height):
for j in range(0, width):
(b, g, r) = img[i, j]
gray = (int(b)+int(g)+int(r))/3
dst[i, j] = np.uint8(gray)
cv2.imshow('dst', dst)
cv2.waitKey(0) | [
"885228764@qq.com"
] | 885228764@qq.com |
309933581c5906d2db8e8db38c4eb5949f694987 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03157/s868052818.py | ec6805ad1b92df0a841e5a07b2af49a175993650 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | from collections import defaultdict
H, W = map(int, input().split())
S = [input() for _ in range(H)]
es = defaultdict(list)
# あるマスについて左右見なくても右に向かってみていけば逆もとれる
for i in range(H):
for j in range(W):
if j < W-1 and S[i][j] != S[i][j+1]:
es[(i,j)].append((i,j+1))
es[(i,j+1)].append((i,j))
if i < H-1 and S[i][j] != S[i+1][j]:
es[(i,j)].append((i+1, j))
es[(i+1,j)].append((i, j))
checked = [[False for _ in range(W)] for H in range(H)]
ans = 0
for i in range(H):
for j in range(W):
if checked[i][j] == True:
continue
cnt_b = 0
cnt_w = 0
if S[i][j] == "#":
cnt_b += 1
else:
cnt_w += 1
checked[i][j] = True
stack = es[(i,j)]
while stack:
new_stack = []
for p,q in stack:
if checked[p][q] == False:
checked[p][q] = True
if S[p][q] == "#":
cnt_b += 1
else:
cnt_w += 1
new_stack.extend(es[(p,q)])
if len(new_stack) == 0:
break
else:
stack = new_stack
ans += cnt_b * cnt_w
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c29a49bb27bab979d299a65922ab8b19a11e297b | ed7a8554b99cd71cab7b6e46d11c65d4a644b358 | /Exercise7.py | 6ccfef98bb6377704effa159e513b0d44afc40a5 | [] | no_license | gorsheninii/zed_a._shaw | 5aa5c6f3af99b31167220229be2db57b324e6342 | 615c78dae2b04018c3872fed8d7696d9b4bace8c | refs/heads/master | 2023-03-26T16:13:47.966767 | 2021-03-29T18:05:45 | 2021-03-29T18:05:45 | 343,179,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | print("Mary has a small sheep.")
print("His furr was white, like a {}.".format('show'))
print("And anywhere, where Mary went,")
print("Small sheep follow her.")
print("."*10) #What?
end1 = "B"
end2 = "a"
end3 = "d"
end4 = "d"
end5 = "y"
end6 = "G"
end7 = "a"
end8 = "y"
print(end1+end2+end3+end4+end5, end=' ')
print(end6+end7+end8) | [
"zingo@mail.ru"
] | zingo@mail.ru |
fa078ebd935861277a4069ea88a2a99c8620354a | 17b10615c1dc6c824ba77cdc3661222bf95adca6 | /work/pharma/settings.py | 53be93302bf37c2cc0dd8af58a85e252cbf2d1a2 | [] | no_license | ayush-sah/AYUNIK | c692b5c3068245af77ad8e6233fbbc7e87c56d86 | 70e5c427ac0d5a4039477d47499c2922e76395c5 | refs/heads/main | 2023-06-20T08:58:40.240372 | 2021-07-12T14:57:51 | 2021-07-12T14:57:51 | 383,075,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,279 | py | """
Django settings for pharma project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2b1)97gnt9#@wv(%filjc7-s(!43si)sbvsq9!3962yx#1bnu&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# SESSION_COOKIE_AGE = 40
# SESSION_SAVE_EVERY_REQUEST = True
# LOGOUT_REDIRECT_URL = '/login/'
# Application definition
AUTH_PROFILE_MODULE = 'ayunik.UserProfile'
AUTH_PROFILE_MODULE = 'ayunik.PUserProfile'
AUTH_PROFILE_MODULE = 'ayunik.CUserProfile'
AUTH_PROFILE_MODULE = 'user.UserProfile'
AUTH_PROFILE_MODULE = 'user.CUserProfile'
AUTH_PROFILE_MODULE = 'user.PUserProfile'
AUTH_PROFILE_MODULE = 'User.UserProfile'
AUTH_PROFILE_MODULE = 'user.userprofile'
AUTH_PROFILE_MODULE = 'user.puserprofile'
AUTH_PROFILE_MODULE = 'user.cuserprofile'
# Application definition
INSTALLED_APPS = [
'ayunik.apps.AyunikConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pharma.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR,'ayunik/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pharma.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'pharm',
'USER':'postgres',
'PASSWORD':'123',
'HOST':'localhost',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL= '/assets/'
STATIC_URL='/media/'
STATIC_URL='/templates/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'ayunik/static'),
]
STATIC_ROOT=os.path.join(BASE_DIR,'assets')
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'quadrubalsquad29@gmail.com'
EMAIL_HOST_PASSWORD = '5v4w3x2y1z' | [
"ayush.sah@spit.ac.in"
] | ayush.sah@spit.ac.in |
79db950c2f9450ff729d2ac03f6271965dd807cf | d5049c3b59b943a158389deaefe9c48970a43c6c | /Lab4/UI.py | e33e0458a9bc51d6e7bef9164a7954f72ed438a3 | [] | no_license | LauraDiosan-CS/lab04-gatsp-DiosDuck | 18e013df30b1a8d0e182190c693cad7da47e68d1 | 647ae011fa5edf7ea4a4187b684f351b0482c328 | refs/heads/master | 2022-04-22T20:47:47.311060 | 2020-03-27T17:59:05 | 2020-03-27T17:59:05 | 250,198,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | from Service import Service
class UI():
def __init__(self):
self.__service=None
def main(self):
while 1:
try:
x = input()
if x == "0":
return
elif x == "1":
file=input()
self.__service=Service(file,1)
self.__service.prob1()
print("Functie terminata")
elif x == "2":
file=input()
self.__service=Service(file,2)
self.__service.prob1()
print("Functie terminata")
else:
print("Error")
except FileNotFoundError:
print("Fisierul nu exista") | [
"noreply@github.com"
] | noreply@github.com |
5d41718f3f1ed181db8cd1a776a5f2453bafd1e7 | 3a31529e99a5971bdbb761732a5b078a405e13e2 | /performance/migrations/0006_auto_20201019_2300.py | 8e3c2c934c6c1317002d698ebc80eff74de4eef3 | [] | no_license | gabriel-bandeira/backend-desafio-cnj | f6486487b90182f20f69885316e8411fefd35552 | a58a95ad8d47845a3309b350db8c9f496c60b002 | refs/heads/master | 2023-01-20T07:51:21.896143 | 2020-11-27T01:01:35 | 2020-11-27T01:01:35 | 304,983,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | # Generated by Django 3.1.2 on 2020-10-20 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('performance', '0005_auto_20201019_1407'),
]
operations = [
migrations.RemoveField(
model_name='vara',
name='time_macrostep_1',
),
migrations.RemoveField(
model_name='vara',
name='time_macrostep_2',
),
migrations.RemoveField(
model_name='vara',
name='time_macrostep_3',
),
migrations.RemoveField(
model_name='vara',
name='time_macrostep_4',
),
migrations.AddField(
model_name='vara',
name='time_baixa_ou_arquivamento',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_conclusao',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_decisao',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_despacho',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_distribuicao',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_julgamento',
field=models.IntegerField(default=None, null=True),
),
migrations.AddField(
model_name='vara',
name='time_transito_em_julgado',
field=models.IntegerField(default=None, null=True),
),
migrations.AlterField(
model_name='vara',
name='latitude',
field=models.FloatField(default=None, null=True),
),
migrations.AlterField(
model_name='vara',
name='longitude',
field=models.FloatField(default=None, null=True),
),
migrations.AlterField(
model_name='vara',
name='ranking',
field=models.IntegerField(default=None, null=True),
),
]
| [
"lfv.vercosa@gmail.com"
] | lfv.vercosa@gmail.com |
beada61a7378cf493b0f7dc69afdb144dabca034 | 98684d541d98672261d05e52c9d96fe6733079f5 | /my_bbox_tool.py | f4074e16937b8fc25d6f94f0c49d79b8fc68ad3f | [] | no_license | LucasWangZH/My_FasterRcnn | 2d9a308bbbe2f3a0b8ac8ba09690ef893978df7e | 6cb5c492491d0731da548fb43bfe5b2fe4dcfb23 | refs/heads/master | 2020-08-27T21:50:40.379512 | 2020-01-20T03:29:33 | 2020-01-20T03:29:33 | 217,497,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,973 | py | import numpy as np
def bbox_iou(src_bbox,dst_bbox):
"""
calc iou between bboxes
:param src_bbox: (N,4) y1x1,y2x2
:param dst_bbox: (K,4)
:return: (N,K)
"""
#iou = np.zeros(src_bbox.shape[0],dst_bbox.shape[0]).astype(np.float32)
# a_lt = src_bbox[:,:2]
# a_br = src_bbox[:,2:]
# b_lt = dst_bbox[:,:2]
# b_br = dst_bbox[:,2:]
# area_a = np.prod((a_br - a_lt),axis = 1)
# area_a = np.repeat(area_a,axis = 0).reshape(iou.shape)
#
# area_b = np.prod((b_br - b_lt),axis = 1).reshape(iou.shape[1],1)#k,1
# area_b = np.repeat(area_b,axis = 0).reshape(iou.shape)
#
# iou = area_a + area_b#N,K
#
# for i in range(src_bbox):
# for j in range(dst_bbox):
# bboxa = src_bbox[i,:]
# bboxb = dst_bbox[j,:]
# x1 = np.maximum(bboxa[1],bboxb[1])
# y1 = np.maximum(bboxa[0],bboxb[0])
# x2 = np.minimum(bboxa[3],bboxb[3])
# y2 = np.maximum(bboxa[2],bboxb[2])
#
# area_intersect = (y2 - y1) * (x2 - x1)
# if area_intersect > 0:
# iou[i][j] = area_intersect / iou[i][j]
# else:
# iou[i][j] = 0
# return iou
if len(dst_bbox.shape) == 3:
dst_bbox = dst_bbox[0,:,:]
elif len(dst_bbox.shape) == 2:
dst_bbox =dst_bbox
elif len(dst_bbox.shape) > 3:
raise IndexError
if src_bbox.shape[1] != 4 or dst_bbox.shape[1] != 4:
raise IndexError
#srcbbox和dstbbox比较,运用广播机制,出来N,K,2
lt = np.maximum(src_bbox[:,None,:2],dst_bbox[None,:,:2])
br = np.minimum(src_bbox[:,None,2:],dst_bbox[None,:,2:])
# if lt !< br, then 0
#如果lt not < br,就是0
area_i = np.prod(br - lt,axis = 2) * (lt < br).all(axis = 2)
area_a = np.prod(src_bbox[:,2:] - src_bbox[:,:2],axis = 1)
area_b = np.prod(dst_bbox[:,2:] - dst_bbox[:,:2],axis = 1)
#广播除法,自动补充维度
iou = area_i / ((area_a[:,None] + area_b) - area_i)
return iou
def bbox2loc(src_bbox,dst_bbox):
"""
encode bbox to loc(offsets and scales)
:param src_bbox: array(R,4)
:param dst_bbox: array(R,4)
:return: array(R,4) loc, loc contains offsets and scales.
The second axis contains four values :math:`t_y, t_x, t_h, t_w`.
:Formula: dy = (dst_bbox.ctry - src_bbox.ctry)/ src_bbox.height
dx = (dst_bbox.ctrx - src_bbox.ctrx)/ src_bbox.widht
dh = log(dst.height / src.height)
dw = log(dst.width / src.width)
"""
src_bbox_height = src_bbox[:,2] - src_bbox[:,0]
src_bbox_width = src_bbox[:,3] - src_bbox[:,1]
src_bbox_ctry = src_bbox[:,0] + 0.5 * src_bbox_height
src_bbox_ctrx = src_bbox[:,1] + 0.5 * src_bbox_width
dst_bbox_height = dst_bbox[:, 2] - dst_bbox[:, 0]
dst_bbox_width = dst_bbox[:, 3] - dst_bbox[:, 1]
dst_bbox_ctry = dst_bbox[:, 0] + 0.5 * dst_bbox_height
dst_bbox_ctrx = dst_bbox[:, 1] + 0.5 * dst_bbox_width
#用eps处理掉0和负数
eps = np.finfo(src_bbox_height.dtype).eps
src_bbox_height = np.maximum(src_bbox_height,eps)
src_bbox_width = np.maximum(src_bbox_width,eps)
dy = (dst_bbox_ctry - src_bbox_ctry) / src_bbox_height
dx = (dst_bbox_ctrx - src_bbox_ctrx) / src_bbox_width
dh = np.log(dst_bbox_height / src_bbox_height)
dw = np.log(dst_bbox_width / src_bbox_width)
loc = np.concatenate((dy[:,None],dx[:,None],dh[:,None],dw[:,None]),axis=1)
return loc
def loc2bbox(src_bbox,loc):
"""
Decode bbox from location,loc is the offset. Given one box and one offset,we can get a target box(the coordiantes in 2d pic)
loc是偏移量,就是给一个框,一个偏移量,出个目标框(就是给2d图中的坐标了)
:param src_bbox: array(R,4)[lty,ltx,bry,brx],R is the number of boxes
:param loc: array(R,4)[dy,dx,dh,dw](也就是t_y,t_x,t_h,t_w)
:return:array(R,4) dst_box [lty,ltx,bry,brx]
:Formula: center_y = dy*src_bbox.height + src_ctr_y
center_x = dx*src_bbox.weidth + src_ctr_x
h = exp(dh) * src_bbox.height
w = exp(dw) * src_bbox.width
dst_bbox.lty = center_y - 0.5 * h
dst_bbox.ltx = center_x - 0.5 * w
dst_bbox.bry = center_y + 0.5 * h
dst_bbox.brx = center_x + 0.5 * w
"""
dst_bbox = np.zeros((src_bbox.shape),dtype= np.float32)
src_bbox_height = src_bbox[:,2] - src_bbox[:,0]
src_bbox_width = src_bbox[:,3] - src_bbox[:,1]
src_bbox_ctry = src_bbox[:, 0] + 0.5 * src_bbox_height
src_bbox_ctrx = src_bbox[:, 1] + 0.5 * src_bbox_width
dst_cty = loc[:,0] * src_bbox_height + src_bbox_ctry
dst_ctx = loc[:,1] * src_bbox_width + src_bbox_ctrx
h = np.exp(loc[:,2]) * src_bbox_height
w = np.exp(loc[:,3]) * src_bbox_width
dst_bbox[:,0] = dst_cty - 0.5 * h
dst_bbox[:,1] = dst_ctx - 0.5 * w
dst_bbox[:,2] = dst_cty + 0.5 * h
dst_bbox[:,3] = dst_ctx + 0.5 * w
return dst_bbox
def get_inside_index(anchor, H, W):
# retrive the indexed of all the boxes that has all 4 coordinates inside the imgsize
#获取所有 4个坐标都在imgsize内部的bbox的index
index_inside = np.where(
(anchor[:, 0] >= 0) &
(anchor[:, 1] >= 0) &
(anchor[:, 2] <= H) &
(anchor[:, 3] <= W)
)[0]
return index_inside
def unmap(data,count,index,fill = 0):
#unmap a subset of item(data) back to the original set of items(of size count)
if len(data.shape) == 1:
ret = np.empty((count,),dtype= data.dtype)
ret.fill(fill)
ret[index] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[index, :] = data
return ret
def base_anchor_generator(base_size = 16,ratios = [0.5,1,2], scales = [8,16,32]):
"""
generate 9 base anchor, at (0,0) position, then shift it to generate that for the whole pic
生成9个base anchor,在(0,0)处,后面做漂移生成全图的
:param base_size:
:param ratios:
:param scales:
:return:
"""
ctrx = base_size / 2.
ctry = base_size / 2.
anchor_base = np.zeros(((len(ratios) * len(scales)),4),dtype = np.float32)
len_ratios = len(ratios)
for i in range(len(ratios)):
for j in range(len(scales)):
H = base_size * scales[i] * np.sqrt(ratios[j])
W = base_size * scales[i] * np.sqrt(ratios[len_ratios -1 - j])
anchor_base[i * len_ratios + j][0] = ctry - H / 2.
anchor_base[i * len_ratios + j][1] = ctrx - W / 2.
anchor_base[i * len_ratios + j][2] = ctry + H / 2.
anchor_base[i * len_ratios + j][3] = ctrx + W / 2.
return anchor_base
def enumerate_shift_anchor(anchor_base,feat_stride,height,width):
shift_y = np.arange(0,height * feat_stride,feat_stride)
shift_x = np.arange(0,width * feat_stride, feat_stride)
#shift_x is (w,1) shift_y is (h,1)
#after meshgrid,shift_x and shift_y are (w,h)
shift_x,shift_y = np.meshgrid(shift_x,shift_y)
#shift(w*h,4)
shift = np.stack((shift_y.ravel(),shift_x.ravel(),shift_y.ravel(),shift_x.ravel()),axis = 1 )
A = anchor_base.shape[0]
K = shift.shape[0]
#reshape anchor_base means that we add an axis, it turn to one of (A*4)
#anchor_base的reshape就相当于加了个轴,变成 1个A*4
#shiftreshape is same as reshape of anchorbase
#shiftreshape同anchorbase的reshape
#transpose is actually change the 0th axis and 1th axis, so it's turned from one (K*4) to k (1*4), for the sake of broadcasting laterS
#shift reshape后的transpose等同于0轴和1轴互换,就从1个k*4变成了k个1*4,方便后面的广播加法计算
anchor = anchor_base.reshape((1,A,4)) + shift.reshape((1,K,4)).transpose((1,0,2))
anchor = anchor.reshape((K*A,4)).astype(np.float32)#reshape成为所有anchor的矩阵形式
return anchor | [
"wangzh@deepblueai.com"
] | wangzh@deepblueai.com |
1be2d69acbec46ee753fc6db8b742d5347894dec | f793d068a05d5abeef66135425a22ac0ee6cb7df | /registru/wsgi.py | b3766ff0f5c5bc26652f73848e71db624dcc2a41 | [] | no_license | sinc144/registru | b18f13066fa364527120c5125f5221e68981e569 | 55079f6eca13afdeb332cb0b889fb2c6ac60b35f | refs/heads/master | 2021-04-06T03:11:46.378666 | 2018-03-15T11:33:50 | 2018-03-15T11:33:50 | 125,353,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for registru project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "registru.settings")
application = get_wsgi_application()
| [
"alecsa.alecs@gmail.com"
] | alecsa.alecs@gmail.com |
0a5f75f3493784e2864fbe6e0385c11db563cf4a | 80c43a62d7652dffa46306ecbb5e8f367403a26c | /Tech_Python/Python_kisokouza/Python基礎講座②/list_neural-network2.py | c0b9e7f84bc500f0441b6984f2a6bb511332c5f2 | [] | no_license | TakanoriYagi/Proseed_dmm_online | 4ddc0ed7202d31e70a2531c6ff1b8b36b4ec6ba9 | 40d7b6f1ddd8bb3a3390707667c9cdf97b2f3e5e | refs/heads/master | 2020-06-24T12:36:50.340477 | 2019-08-06T05:04:56 | 2019-08-06T05:04:56 | 198,964,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | x1 = 1.0
x2 = 2.0
x3 = -3.0
w1u1 = 3.0
w1u2 = 1.0
w1u3 = -3.0
w2u1 = 2.5
w2u2 = 2.0
w2u3 = -1.0
w3u1 = 4.5
w3u2 = -1.5
w3u3 = 5.0
w3 = -4.0
w4 = 1.5
w5 = 4.2
# それぞれの要素をリストで表示
X = [x1, x2, x3]
W_X = [[w1u1, w1u2, w1u3], [w2u1, w2u2, w2u3], [w3u1, w3u2, w3u3]]
W_U = [w3, w4, w5]
# u1, u2 , u3 を求める
u1 = X[0]*W_X[0][0] + X[1]*W_X[1][0] + X[2]*W_X[2][0]
u2 = X[0]*W_X[0][1] + X[1]*W_X[1][1] + X[2]*W_X[2][1]
u3 = X[0]*W_X[0][2] + X[1]*W_X[1][2] + X[2]*W_X[2][2]
# yを求める
y = u1*W_U[0] + u2*W_U[1] + u3*W_U[2]
print(y) | [
"takanori.jo@me.com"
] | takanori.jo@me.com |
663d615c02204499d6889f865bb4370a6ca20332 | cfd01f570ad4e54a159cb92df904b11266667500 | /Tarea1G06/Tarea1G06/settings.py | 3e46083e80dc472f3f67dee471fa4d5aae33d474 | [] | no_license | IN3501/tarea1-grupo06 | 85c278933dd3cc07327642f6f4af9ef6412b46d0 | e81ebf9d3fa123ed7988dc3b404ac1a1fd2e74fe | refs/heads/master | 2021-06-25T08:13:53.991713 | 2019-08-26T04:22:43 | 2019-08-26T04:22:43 | 204,368,162 | 0 | 0 | null | 2021-03-19T00:58:56 | 2019-08-26T00:49:42 | Python | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for Tarea1G06 project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g$h9k&j&959(7(l0-!dslq$m-bpk9yn4zuvnxkjb*n3yg5#si='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Tarea1G06.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Tarea1G06.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"cmontero@MacBook-Pro-de-cmontero.local"
] | cmontero@MacBook-Pro-de-cmontero.local |
27181cecd9421063c903d8f3ac12f702a58afdb0 | 74d094511ee9cb5cd113782ae818b77ccd0decd9 | /venv/Scripts/pip3.6-script.py | e20b3025b5c32d479c3f3a94bdb82cc47f96e295 | [] | no_license | woqls22/-Cryptocurrency_prices | 9ce36cf01dfcf49dea664542e339f09a3ccfef07 | 3086c9f22dc8b5920e7b592eacdd1caa22131353 | refs/heads/master | 2021-04-11T16:32:31.220295 | 2020-03-21T18:37:51 | 2020-03-21T18:37:51 | 249,037,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!C:\Users\woqls\AppData\Local\Programs\Python\Python36\crawling\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"woqls226@gmail.com"
] | woqls226@gmail.com |
9ad681dd43b6c7c3ad239efb29a60c322241f16f | 44091f741b544bc02527588f495ba3d4bb2d2f39 | /rasterise_all_at_once_from_postgis.py | f89bc14ae2fe74eb1ab255cb3e59dfc799fe9300 | [] | no_license | doctorluz/py-utils | 26abc84abd05dc4e81f77b508f4236f66b5470cd | 7ed5122e38d9b25673b3dc7ddf2a900a31bda744 | refs/heads/master | 2020-04-03T09:53:46.569586 | 2017-02-09T09:57:48 | 2017-02-09T09:57:48 | 62,695,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | import sys, json, os
import psycopg2
from db_processing_connect import db_connection_string
from subprocess import Popen
from rasterise_all_at_once_settings import *
try:
# create connection to database
conn = psycopg2.connect(db_connection_string)
# create a cursor object called cur
cur = conn.cursor()
# Make a list of unique values of id: going to try and burn them one at a time to go easier on memory
if whereClause != '':
strSql = """
SELECT %s FROM %s WHERE %s;
""" % (unique_id_field, tableName, whereClause)
else:
strSql = """
SELECT %s FROM %s;
""" % (unique_id_field, tableName)
print (strSql)
# execute the query
cur.execute(strSql)
# store the result of the query into Tuple c
myList = cur.fetchall()
# Create a command for the blank raster
if (not os.path.isfile(output_filename) or overwrite is True):
# create a base binary raster going to the edges of required region, 30 arc-second resolution
gdal_command = 'gdal_rasterize -co NBITS=1 -co COMPRESS=%s -ot Byte -burn 0 -a_srs %s -tr %s %s -te %d %d %d %d PG:\"%s\" -sql \"SELECT ST_SetSRID(ST_MakePolygon(ST_GeomFromText(\'LINESTRING(%d %d,%d %d, %d %d, %d %d, %d %d)\')), %d);\" %s' % (compressionStrategy, theProj, str(pixelRes), str(pixelRes), llx, lly, urx, ury, db_connection_string, llx,lly,llx,ury,urx,ury,urx,lly,llx,lly,epsg, output_filename)
proc = Popen(gdal_command, shell=True)
proc.wait()
if (proc.returncode != 0):
print proc.returncode
## #xmin,ymin,xmax,ymax = float(*extent)
## # was trying to cleverly unpack the list here, but it doesn't work
for theVal in myList:
theID = theVal[0]
print (theID)
gdal_command = 'gdal_rasterize -burn 1 '
if whereClause != '':
gdal_command += 'PG:\"%s\" -sql \"SELECT %s FROM %s WHERE %s=%d AND %s\" %s' %(db_connection_string, geometryFieldName, tableName, unique_id_field, theID, whereClause, output_filename)
else:
gdal_command += 'PG:\"%s\" -sql \"SELECT %s FROM %s WHERE %s=%d\" %s' %(db_connection_string, geometryFieldName, tableName, unique_id_field, theID, output_filename)
print gdal_command
proc = Popen(gdal_command, shell=True)
proc.wait()
if (proc.returncode != 0):
print proc.returncode
# closes the connection
conn.close()
except () as e:
print "ERROR"
print e.strerror
if conn:
conn.close()
| [
"doctorluz@gmail.com"
] | doctorluz@gmail.com |
2f961ffd53ac5c591c95cfb96f730b5bb45915e4 | 133e8c9df1d1725d7d34ea4317ae3a15e26e6c66 | /python/数据结构与算法/02链表/单链表.py | 9acd2da0cfb6f8e8a747ab11e1d4d6a83f289443 | [
"Apache-2.0"
] | permissive | 425776024/Learn | dfa8b53233f019b77b7537cc340fce2a81ff4c3b | 3990e75b469225ba7b430539ef9a16abe89eb863 | refs/heads/master | 2022-12-01T06:46:49.674609 | 2020-06-01T08:17:08 | 2020-06-01T08:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,886 | py | # -*- coding: utf-8 -*-
class Node(object):
def __init__(self, value=None, next=None):
# 这里我们 root 节点默认都是 None,所以都给了默认值
self.value = value # 值
self.next = next # 链接域, 指针
def __str__(self):
"""方便你打出来调试,复杂的代码可能需要断点调试"""
return '<Node: value: {}, next={}>'.format(self.value, self.next.value)
__repr__ = __str__
class LinkedList(object):
'''实现一个单向链表.'''
def __init__(self):
''' 初始化链表: 初始化时,为一个空链表.链表有两个标示head和tail都赋值None.'''
self.head = None
self.tail = None
def append(self, data):
'''
向链表新增元素:
1. 如果该链表是一个空链表,则链表head和tail都指向传进来的node节点.
2. 如果链表非空,则self.tail.next = node.next 指向新插入元素.
3. tail指向新插入的元素节点.
'''
node = Node(data)
if self.head is None:
self.head = node
self.tail = node
else:
self.tail.next = node
self.tail = node
def insert(self, index, value):
'''向链表插入一个元素node.
1. 从链表头开始遍历链表,当查找的index小于要插入索引的位置时,依次
指向下一个元素节点.直到找到要插入节点的索引位置.
2. 首先将插入的值,通过Node类实例化一个元素node.然后将它的next指针
指向它的下一个元素.即当前新元素节点之前的元素索引位置.
3. 将当前元素索引指向新插入元素node.
'''
cur = self.head
node = Node(value)
if index == 0:
node.next = self.head
if self.head is None:
self.tail = node
self.head = node
return
cur_index = 0
while cur_index < index - 1:
cur = cur.next
if cur.next is None:
raise Exception('list length less than index')
cur_index += 1
node.next = cur.next
cur.next = node
if cur.next is None:
self.tail = node
def remove(self, index):
'''从链表中删除一个元素节点.
1. 首先找到要删除的元素节点索引.
2. 然后将当前节点的next指向下一个下一个元素节点.
'''
cur = self.head
cur_index = 0
while cur_index < index-1:
cur = cur.next
if cur is None:
raise Exception('list length less than index')
cur_index +=1
cur.next = cur.next.next
if cur.next is None:
self.tail = cur
def removeEle(self, value):
""" 从链表中删除一个值
"""
cur = self.head
head = None
while cur is not None:
if cur.value == value:
if cur is self.head:
_head = cur.next
self.head = _head
if _head is self.tail:
self.tail = _head
del cur
return True
if cur is self.tail:
head.next = None
self.tail = head
del cur
return True
head.next = cur.next
del cur
return True
head = cur
cur = cur.next
return False
def iter(self):
'''
返回一个链表迭代器.
1. 首先判断该链表是否为一个空链表。如果时一个空链表,直接返回.
2. 如果是一个非空链表,首先指针指向head节点,然后将head节点data
返回.然后while循环,条件是下一个指针元素为真.然后弹出下一个元
素data,直到遍历到最后一个元素.
'''
if not self.head:
return
cur = self.head
yield cur.value
while cur.next:
cur = cur.next
yield cur.value
def __iter__(self):
for i in self.iter():
yield i
if __name__ == "__main__":
linked_list = LinkedList()
# 循环插入元素
for i in range(10):
linked_list.append(i)
# 向元素插入一个元素
linked_list.insert(0, 40)
# 向元素删除一个元素
linked_list.remove(4)
linked_list.removeEle(6)
# 遍历该链表
# for node in linked_list.iter():
# print node
# 遍历该链表
for node in linked_list:
print node
| [
"1248644045@qq.com"
] | 1248644045@qq.com |
48fa0b67d4214fe81cc12d36c2c97996bdba97a3 | 621a38539934504d82d81098ade33013a147cfc7 | /src/core/__init__.py | 8d6c6749a3005d8c4ca3766271ad17955a7f70b8 | [
"MIT"
] | permissive | chkp-eyalit/Karta | 2bf943f9a9721e8c8c5695a00002ba40bfdffec1 | 90602d73a9109f8f81ccb9978de6a5fc4ba4b8a3 | refs/heads/master | 2021-06-07T16:01:37.665386 | 2021-05-18T07:26:41 | 2021-05-18T07:26:41 | 177,569,979 | 0 | 0 | MIT | 2020-05-27T11:31:04 | 2019-03-25T11:06:04 | Python | UTF-8 | Python | false | false | 99 | py | from .file_layer import *
from .function_context import *
from .matching_engine import *
| [
"eyalit@checkpoint.com"
] | eyalit@checkpoint.com |
eda7d59af2ae751d7b25d53cd82272fde7a20c7d | eb19175c18053e5d414b4f6442bdfd0f9f97e24d | /tests/contrib_django/test_converter.py | 8c04699773df54369df8be04d36665643a5f9a55 | [
"MIT"
] | permissive | jhgg/graphene | 6c4c5a64b7b0f39c8f6b32d17f62e1c31ca03825 | 67904e8329de3d69fec8c82ba8c3b4fe598afa8e | refs/heads/master | 2020-12-25T21:23:22.556227 | 2015-10-15T19:56:40 | 2015-10-15T19:56:40 | 43,073,008 | 1 | 0 | null | 2015-09-24T14:47:19 | 2015-09-24T14:47:19 | null | UTF-8 | Python | false | false | 3,521 | py | from py.test import raises
from collections import namedtuple
from pytest import raises
import graphene
from graphene import relay
from graphene.contrib.django.converter import (
convert_django_field
)
from graphene.contrib.django.fields import (
ConnectionOrListField,
DjangoModelField
)
from django.db import models
from .models import Article, Reporter
def assert_conversion(django_field, graphene_field, *args):
field = django_field(*args, help_text='Custom Help Text')
graphene_type = convert_django_field(field)
assert isinstance(graphene_type, graphene_field)
assert graphene_type.description == 'Custom Help Text'
return graphene_type
def test_should_unknown_django_field_raise_exception():
with raises(Exception) as excinfo:
convert_django_field(None)
assert 'Don\'t know how to convert the Django field' in str(excinfo.value)
def test_should_date_convert_string():
assert_conversion(models.DateField, graphene.StringField)
def test_should_char_convert_string():
assert_conversion(models.CharField, graphene.StringField)
def test_should_text_convert_string():
assert_conversion(models.TextField, graphene.StringField)
def test_should_email_convert_string():
assert_conversion(models.EmailField, graphene.StringField)
def test_should_slug_convert_string():
assert_conversion(models.SlugField, graphene.StringField)
def test_should_url_convert_string():
assert_conversion(models.URLField, graphene.StringField)
def test_should_auto_convert_id():
assert_conversion(models.AutoField, graphene.IDField)
def test_should_positive_integer_convert_int():
assert_conversion(models.PositiveIntegerField, graphene.IntField)
def test_should_positive_small_convert_int():
assert_conversion(models.PositiveSmallIntegerField, graphene.IntField)
def test_should_small_integer_convert_int():
assert_conversion(models.SmallIntegerField, graphene.IntField)
def test_should_big_integer_convert_int():
assert_conversion(models.BigIntegerField, graphene.IntField)
def test_should_integer_convert_int():
assert_conversion(models.IntegerField, graphene.IntField)
def test_should_boolean_convert_boolean():
field = assert_conversion(models.BooleanField, graphene.BooleanField)
assert field.required is True
def test_should_nullboolean_convert_boolean():
field = assert_conversion(models.NullBooleanField, graphene.BooleanField)
assert field.required is False
def test_should_float_convert_float():
assert_conversion(models.FloatField, graphene.FloatField)
def test_should_manytomany_convert_connectionorlist():
graphene_type = convert_django_field(Reporter._meta.local_many_to_many[0])
assert isinstance(graphene_type, ConnectionOrListField)
assert isinstance(graphene_type.field_type, DjangoModelField)
assert graphene_type.field_type.model == Reporter
def test_should_manytoone_convert_connectionorlist():
graphene_type = convert_django_field(Reporter.articles.related)
assert isinstance(graphene_type, ConnectionOrListField)
assert isinstance(graphene_type.field_type, DjangoModelField)
assert graphene_type.field_type.model == Article
def test_should_onetoone_convert_model():
field = assert_conversion(models.OneToOneField, DjangoModelField, Article)
assert field.model == Article
def test_should_foreignkey_convert_model():
field = assert_conversion(models.ForeignKey, DjangoModelField, Article)
assert field.model == Article
| [
"me@syrusakbary.com"
] | me@syrusakbary.com |
325a673abe7fc34434f96cd346a1cfe3a1605b70 | 27c75f5179703e46de81c6306765f8347953ef07 | /Unit4/sps.py | d14c54a6bf8cce5df597794714098b437ec8a6ec | [] | no_license | eachofwhich/Udacity_CS212 | 735426d161233a5cb387747ff426e19b42ddaded | e194ea2b18136125c925e21dcce36a2f416f1b62 | refs/heads/master | 2016-09-01T17:14:49.220091 | 2012-09-23T19:42:06 | 2012-09-23T19:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,755 | py | # -----------------
# User Instructions
#
# Write a function, shortest_path_search, that generalizes the search algorithm
# that we have been using. This function should have three inputs, a start state,
# a successors function, and an is_goal function.
#
# You can use the solution to mc_problem as a template for constructing your
# shortest_path_search. You can also see the example is_goal and successors
# functions for a simple test problem below.
def shortest_path_search(start, successors, is_goal):
"""Find the shortest path from start state to a state
such that is_goal(state) is true."""
if is_goal(start): return [start]
to_explore, explored = [[start]], set()
while to_explore:
path = to_explore.pop(0)
last_state = path[-1]
for state, action in successors(last_state).items():
if state in explored: continue
explored.add(state)
new_path = path + [action, state]
if is_goal(state): return new_path
to_explore.append(new_path)
return []
def mc_problem1(start=(3, 3, 1, 0, 0, 0), goal=None):
"""Solve the missionaries and cannibals problem.
State is 6 ints: (M1, C1, B1, M2, C2, B2) on the start (1) and other (2) sides.
Find a path that goes from the initial state to the goal state (which, if
not specified, is the state with no people or boats on the start side."""
if goal is None:
goal = (0, 0, 0) + start[:3]
if start == goal:
return [start]
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
s = path[-1]
for (state, action) in csuccessors(s).items():
if state not in explored:
explored.add(state)
path2 = path + [action, state]
if state == goal:
return path2
else:
frontier.append(path2)
return Fail
Fail = []
def csuccessors(state):
"""Find successors (including those that result in dining) to this
state. But a state where the cannibals can dine has no successors."""
M1, C1, B1, M2, C2, B2 = state
## Check for state with no successors
if C1 > M1 > 0 or C2 > M2 > 0:
return {}
items = []
if B1 > 0:
items += [(sub(state, delta), a + '->')
for delta, a in deltas.items()]
if B2 > 0:
items += [(add(state, delta), '<-' + a)
for delta, a in deltas.items()]
return dict(items)
def add(X, Y):
"add two vectors, X and Y."
return tuple(x+y for x,y in zip(X, Y))
def sub(X, Y):
"subtract vector Y from X."
return tuple(x-y for x,y in zip(X, Y))
deltas = {(2, 0, 1, -2, 0, -1): 'MM',
(0, 2, 1, 0, -2, -1): 'CC',
(1, 1, 1, -1, -1, -1): 'MC',
(1, 0, 1, -1, 0, -1): 'M',
(0, 1, 1, 0, -1, -1): 'C'}
Fail = []
# --------------
# Example problem
#
# Let's say the states in an optimization problem are given by integers.
# From a state, i, the only possible successors are i+1 and i-1. Given
# a starting integer, find the shortest path to the integer 8.
#
# This is an overly simple example of when we can use the
# shortest_path_search function. We just need to define the appropriate
# is_goal and successors functions.
def is_goal(state):
if state == 8:
return True
else:
return False
def successors(state):
successors = {state + 1: '->',
state - 1: '<-'}
return successors
#test
assert shortest_path_search(5, successors, is_goal) == [5, '->', 6, '->', 7, '->', 8]
| [
"eliburmin@gmail.com"
] | eliburmin@gmail.com |
c8d1e0aea95375d24f5cd8721a8724c5a9da2785 | 9290fbfd26accae2b46f8693d633ceb418f356c4 | /spring-2017/src/others/hw2-ref.py | c8800b9261b3519bf76a9e20a52148dc9dd87c09 | [] | no_license | ngvinay/python-projects | 97e88e303ab2afaf6c5202e333f98fab4ad7a30c | 5ec71e9d2617bade302c0b8d3f7e6aa1645a49d0 | refs/heads/master | 2021-07-17T03:22:20.261290 | 2017-10-23T07:22:12 | 2017-10-23T07:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,686 | py | from __future__ import print_function
import sys
import numpy as np
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: home_work_1 <input file name>", file=sys.stderr)
exit(-1)
inputFileName = sys.argv[1]
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("HomeWorkTwo")
sc = SparkContext(conf=conf)
#Normalize the features for GD convergance
msDataRDD = sc.textFile(inputFileName)
def rootMeanSqrdError(targetAndPred):
return np.sqrt(targetAndPred.map(lambda targetAndPredTuple: (targetAndPredTuple[0] - targetAndPredTuple[1]) ** 2 ).mean())
from pyspark.mllib.regression import LabeledPoint
def parseLabeledPoint(line):
columnValues = line.split(',')
label, features = columnValues[0], columnValues[1:]
return LabeledPoint(label, features)
labels = msDataRDD.map(lambda x: x.split(',')[0]).collect()
minYear = float(min(labels))
rawLabeledPoints = msDataRDD.map(parseLabeledPoint)
labeledPoints = rawLabeledPoints.map(lambda lp: LabeledPoint(lp.label - minYear, lp.features))
labels = labeledPoints.map(lambda x: x.label)
features = labeledPoints.map(lambda x: x.features)
from pyspark.mllib.feature import Normalizer
normalizer = Normalizer()
data = labels.zip(normalizer.transform(features))
parsedData = data.map(lambda lp: LabeledPoint(lp[0],lp[1]))
#Part 1
def lossFunction(weights,lp):
"""
function that computes the value (wT
x - y) x and test this function on two examples.
"""
return np.dot((weights.dot(lp.features) - lp.label) , lp.features)
from pyspark.mllib.linalg import DenseVector
#test example one
weightOne = DenseVector([4, 5, 6])
lpExampleOne = LabeledPoint(3.0, [6, 2, 1])
costOne = lossFunction(weightOne, lpExampleOne)
print('Loss of first example is {0}'.format(costOne))
#test example two
weightTwo = DenseVector([1.5, 2.2, 3.4])
lpExampleTwo = LabeledPoint(5.0, [3.4, 4.1, 2.5])
costTwo = lossFunction(weightTwo, lpExampleTwo)
print('Loss of second example is {0}'.format(costTwo))
#Part 2
def labelAndPrediction(weights, observation):
"""
Implement a function that takes in weight and LabeledPoint instance and returns a <label, prediction tuple>
"""
return (observation.label, weights.dot(observation.features))
predictionExampleRdd = sc.parallelize([LabeledPoint(3.0, np.array([6,2,1])),
LabeledPoint(5.0, np.array([3.4, 4.1, 2.5]))])
labelAndPredictionOutput = predictionExampleRdd.map(lambda lp: labelAndPrediction(weightOne, lp))
print(labelAndPredictionOutput.collect())
#Part 3
def gradientDescent(trainData, numIters):
"""
Implement a gradient descent function for linear regression.
Test this function on an example.
"""
n = trainData.count()
noFeatures = len(trainData.take(1)[0].features)
theta = np.zeros(noFeatures)
learnRate = 1.0
# We will compute and store the training error after each iteration
errorTrain = np.zeros(numIters)
for i in range(numIters):
print('Iteration# {0} completed'.format(i+1))
labelsAndPredsTrain = trainData.map(lambda lp: labelAndPrediction(theta, lp))
errorTrain[i] = rootMeanSqrdError(labelsAndPredsTrain)
gradient = trainData.map(lambda lp: lossFunction(theta, lp)).sum()
tempLR = learnRate / (n * np.sqrt(i+1))
theta -= tempLR * gradient
return theta, errorTrain
#split dataset
trainData, validationData, testData = parsedData.randomSplit([.7, .2, .1], 52)
trainData.cache()
#test
n = 5
noOfFeatures = 5
gradientExample = (sc
.parallelize(trainData.take(n))
.map(lambda lp: LabeledPoint(lp.label, lp.features[0:noOfFeatures])))
print(gradientExample.take(1))
exampleWeights, exampleTrainingError = gradientDescent(gradientExample, 5)
print(exampleWeights)
gradientExample.map(lambda lp: labelAndPrediction(exampleWeights, lp)).collect()
#Part 4
#Train our model on training data and evaluate the model based on validation set.
numIters = 50
trainWeights, trainingRMSE = gradientDescent(trainData, numIters)
trainLabelAndPred = trainData.map(lambda lp: labelAndPrediction(trainWeights, lp))
trainRMSE = rootMeanSqrdError(trainLabelAndPred)
valLabelsAndPreds = validationData.map(lambda lp: labelAndPrediction(trainWeights, lp))
valRMSE = rootMeanSqrdError(valLabelsAndPreds)
print('Validation RMSE:\n\tTraining = {0:.3f}\n\tValidation = {1:.3f}'.format(trainRMSE,
valRMSE))
#Validation RMSE:
# Training = 11.948
# Validation = 11.943
#Part 5
from matplotlib.cm import get_cmap
from matplotlib.colors import ListedColormap, Normalize
import matplotlib.pyplot as plt
norm = Normalize()
cmap = get_cmap('YlOrRd')
clrs = cmap(np.asarray(norm(np.log(trainingRMSE))))[:,0:3]
fig, ax = plt.subplots()
plt.scatter(range(0, 50), np.log(trainingRMSE), s=14**2, c=clrs, edgecolors='#888888', alpha=0.75)
ax.set_xlabel('Iteration'), ax.set_ylabel(r'$\log_e(trainingRMSE)$')
#Part 6
testLabelsAndPreds = testData.map(lambda lp: labelAndPrediction(trainWeights, lp))
testRMSE = rootMeanSqrdError(testLabelsAndPreds)
print('Test RMSE:\n\tTest = {0:.3f}'.format(testRMSE))
#Validation RMSE:
# Test = 11.990 | [
"dipankar.biswas@teamaol.com"
] | dipankar.biswas@teamaol.com |
7d650c2929ce6966ec2dcfb1a114dd7f07f03ec1 | 586c448195b07549ea518b506534eac140f7194c | /recount/__init__.py | 81849e5a73593d7c2cc3a4ad22b48369491f4ff5 | [] | no_license | jgrison/recount | 01bcfccd184dd356d1185971dea8dd240e48e2bb | 8b7f0858dd8943141a4a84bbe26b2bdd1eebdbf7 | refs/heads/master | 2021-01-01T17:56:55.896372 | 2014-10-04T14:22:17 | 2014-10-04T14:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | from flask import Flask, request, g, redirect, render_template, url_for, flash, request
app = Flask(__name__)
# Homepage
@app.route('/')
def home():
return render_template('index.html')
# Lists all the reports we've made
@app.route('/reports')
def reports():
return render_template('reports/index.html')
# Generate New Report
@app.route('/reports/generate', methods=['POST'])
def generate_report():
return redirect(url_for('reports'))
# Delete a past report
@app.route('/reports/delete/<id>')
def delete_report():
return render_template('reports/index.html')
# Create a new report type
@app.route('/reports/build/add')
def add_report_type():
return render_template('reports/add.html')
# Edit a report type
@app.route('/reports/build/edit/<id>')
def edit_report_type(id):
return render_template('reports/edit.html')
# Delete a report type
@app.route('/reports/build/delete/<id>')
def delete_report_type(id):
return redirect(url_for('reports'))
if __name__ == "__main__":
app.run() | [
"jgrison@vicimus.com"
] | jgrison@vicimus.com |
3ecfeaa8f31a83953a4fb15ebd5095dea283bb3b | 50353ceacf14742bcb33f747ed6b5eddff4de8fd | /DAY4/arraysorting1.py | 7130b87501d0414da73ac1c1a10e05d950beb9ce | [] | no_license | manoznp/LearnPython-Challenge | 0384605bf086f31a46f94c3029e30db9047fd99e | 6c7f603a7c94f9c2724c72f7bf1a241fb7332804 | refs/heads/master | 2020-04-08T23:10:03.703926 | 2018-12-02T10:34:56 | 2018-12-02T10:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #Descending order
def sort_Ascending(data):
for i in range(0, len(data)-1):
for j in range(i+1, len(data)):
if data[i] < data[j]:
temp = data[i]
data[i] = data[j]
data[j] = temp
print(data)
#array
a = [1,3,5,7,2,4]
#functioncall
sort_Ascending(a) | [
"mynameismanozacharya@gmail.com"
] | mynameismanozacharya@gmail.com |
ee26cb6d30f2a114158c6fc07b2ba1139a0992f3 | e41e3ec38419b6efe5468632d2c5f60440eafb07 | /system/views.py | 9124d0862f04a4aa4fc9d67ca2e3887d344cf476 | [] | no_license | aytekinaygun/NagiConfig | c6a8744c339d686b26b840440bda6a8edfa59035 | 6d5a1ace7d345fdbb5e825ecc8c3212d90704eb7 | refs/heads/master | 2022-05-04T19:04:02.902402 | 2020-10-05T12:57:48 | 2020-10-05T12:57:48 | 163,303,011 | 1 | 0 | null | 2022-04-22T21:02:49 | 2018-12-27T14:23:34 | Python | UTF-8 | Python | false | false | 2,925 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from host_groups.models import Host_Groups
from hosts.models import Hosts
from services.models import Services, ServiceCommand
import os
@login_required()
def system(request):
context = {}
return render(request, 'system.html', context)
@login_required()
def nagios_restart(request):
# Define Host Groups
# -------------------------------------
hostgroups = Host_Groups.objects.all()
f = open('conf.d/nc-hostgroups.cfg', 'w')
for hg in hostgroups:
f.write('define hostgroup{\n')
f.write(' hostgroup_name %s\n' % ('grup-' + hg.hostgroup_name))
f.write(' alias %s\n' % (hg.alias))
f.write('}\n')
f.close()
# Define Hosts
# -------------------------------------
hosts = Hosts.objects.filter(is_active=True)
f = open('conf.d/nc-hosts.cfg', 'w')
for h in hosts:
f.write('define host{\n')
f.write(' use windows-server\n')
f.write(' host_name %s\n' % (h.host_name))
f.write(' alias %s\n' % (h.alias))
f.write(' address %s\n' % (h.address))
# parents
p_list = ''
for ho in h.parents.all():
p_list = p_list + ho.host_name + ' '
p_list = p_list.strip().replace(' ', ', ')
if p_list != '':
f.write(' parents %s\n' % (p_list))
# hostgroups
hg_list = ''
for hg in h.hostgroups.all():
hg_list = hg_list + 'grup-' + hg.hostgroup_name + ' '
hg_list = hg_list.strip().replace(' ', ', ')
if hg_list != '':
f.write(' hostgroups %s\n' % (hg_list))
f.write('}\n')
f.close()
# Define Services
# -------------------------------------
services = Services.objects.exclude(hosts__isnull=True) # host ilişkisi boş olmayanlar
f = open('conf.d/nc-services.cfg', 'w')
for s in services:
success = 1
define = ('define service{\n')
define += (' use generic-service\n')
define += (' service_description %s\n' % (s.service_description))
# Hosts
h_list = ''
for h in s.hosts.filter(is_active=True):
h_list += h.host_name + ' '
h_list = h_list.strip().replace(' ', ', ')
if h_list == '':
success = 0
define += (' host_name %s\n' % (h_list))
cmd = ServiceCommand.objects.get(id=s.service_description_id)
define += (' check_command %s\n' % (cmd.check_command))
define += ('}\n')
if success == 1:
f.write(define)
f.close()
os.system('systemctl restart nagios4.service')
messages.success(request, 'Nagios tekrar başlatıldı.', extra_tags='alert-success')
return redirect('/system/')
| [
"aytekinaygun@gmail.com"
] | aytekinaygun@gmail.com |
ec9970dcf780bcafd195a9680fad730328cbdf8e | 7d80e6d69454596dd6efc814bd808e6c115071c1 | /util/stemmedCountVectorizer.py | d1d8f1953aa9dd0840d06b375556628960125a80 | [] | no_license | nilankamanoj/solvesoft_scanme_core | e0485d584eaf2f4ec146d2e444f5238a322c3743 | ec946e538df16fd71324a5478660472beabc144b | refs/heads/master | 2022-12-09T20:40:26.616732 | 2019-12-08T14:53:24 | 2019-12-08T14:53:24 | 193,545,694 | 0 | 1 | null | 2022-12-08T06:35:38 | 2019-06-24T17:00:29 | HTML | UTF-8 | Python | false | false | 426 | py | from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
# global_analyzer = analyzer
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
| [
"nilankaeng16@gmail.com"
] | nilankaeng16@gmail.com |
21b6deb849e7b391aabeb811cc79bf8b7ccee1eb | 21238a26742309adb860a04174ea5360f729ad39 | /SourceCode/.history/Detector_20181224025625.py | b39a3a2293f57ceff29bef9d0e2a2f2758353cac | [] | no_license | Shehabalaa/Viola-Jones-Face-Detection | 5b5d0c3835e0de11658d35941fa3d19468452e93 | b6522b96394df8d67266b41a803bc30a93fc5c49 | refs/heads/master | 2020-04-23T03:08:56.976486 | 2019-06-23T10:39:25 | 2019-06-23T10:39:25 | 170,869,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,957 | py | from IntegralImage import toIntegralImage as toII
import cv2
import numpy as np
import random
from sklearn.cluster import MeanShift
from Cascade import Cascade
import itertools
import Utils
from math import floor
from functools import partial
from multiprocessing import Pool
base_detector_width = 24.
def preProcess(image, gamma=2):
image = cv2.blur(image,(5,5))
#image = cv2.equalizeHist(image)
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
image =cv2.LUT(image, table)
return image
def meanShift(points):
clustering = MeanShift().fit(points)
return clustering.cluster_centers_
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
def detect(image,Evaluator):
w_h_pairs=[]
all_detected_squares = []
w = 24 # width and height are equals as i will scan image in squares
h = 24
offset_w = 2
offset_h = 2
image_parts_ranges=[]
image_parts_values=[]
while(w<200 and h < image.shape[0] and w<image.shape[1]):
r = list(range(0, image.shape[0]-h-1,int(offset_h)))
c = list(range(0,image.shape[1]-w-1,int(offset_w)))
new_range = list(itertools.product(r, c))
image_parts_ranges += list(itertools.product(r, c))
image_parts_values += list(map(lambda p: np.array(image[p[0]:p[0]+h, p[1]:p[1]+w]),new_range))
offset_w +=.5
offset_h +=.5
w = int(round(w*1.25))
h = int(round(h*1.25))
#for img in image_parts_values:
# cv2.imshow('a', img)
# cv2.waitKey(0)
image_parts_values = [cv2.resize(img,(24,24)) for img in image_parts_values]
image_parts_values_normalized = list(map(Utils.varianceNormalize,image_parts_values))
ii_parts_values = list(map(toII,image_parts_values_normalized))
all_detected_squares = [(image_parts_ranges[i],image_parts_values[i].shape) for i in Evaluator.predict(ii_parts_values)]
return all_detected_squares
'''
def detectScaleDetector(ii,Evaluator):
w_h_pairs=[]
all_detected_squares = []
w = 80 # width and height are equals as i will scan image in squares
h = int(1.25*(w))
offset_w = 10
offset_h = 10
ii_parts_ranges=[]
ii_parts_values=[]
while(w < ii.shape[0] and w<ii.shape[1]):
r = list(range(0, ii.shape[0]-h,offset_h))
c = list(range(0,ii.shape[1]-w,offset_w))
ii_parts_ranges = list(itertools.product(r, c))
ii_parts_values = list(map(lambda p: ii[p[0]:p[0]+h, p[1]:p[1]+w],ii_parts_ranges))
ii_parts_values = [cv2.resize(ii,(24,24)) for ii in ii_parts_values]
all_detected_squares += [ii_parts_ranges[i] for i in Evaluator.predict(ii_parts_values,(1,1)] #(w/24.,h/24.)
offset_w += 1
offset_h += 1
if(len(all_detected_squares)):
w_h_pairs.append((len(all_detected_squares), w,h))
w = int(round(w*1.5))
return all_detected_squares,w_h_pairs
'''
def main():
Evaluator = Cascade('../Cascade/')
#cap = cv2.VideoCapture(0)
#while(True):
# Capture frame-by-frame
#ret,frame = cap.read()
frame = cv2.imread("faces2.jpg")
frame = cv2.resize(frame,(600,400))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('frame',gray)
#cv2.waitKey(0);
gray = cv2.blur(gray,(5,5))
recs = detect(gray,Evaluator)
#recs,w_h_pairs = detectFast(toII(Utils.varianceNormalize(gray)),Evaluator)
recs = np.array([[recs[i][0][1],recs[i][0][0],recs[i][0][1]+recs[i][1][1],recs[i][0][0]+recs[i][1][0]] for i in range(len(recs))])
recs = non_max_suppression_fast(recs,.1)
[cv2.rectangle(frame,(rec[0],rec[1]),(rec[2],rec[3]), (255, 0, 0), 2) for rec in recs ]
cv2.imshow('frame',frame)
cv2.waitKey(0)
cv2.imwrite("dtectedface2s.jpg")
#cap.release()
#cv2.destroyAllWindows()
if __name__ == "__main__":
main()
"""
Take raw frame before any previos processiong just in gray lvl
return hand's postion as x,y,w,h
w=30
h=30
doubts=[]
imagnge(len(res)):
if(res[i]==1):
doubts.append(pos_of_images_to_detect[i])
doubts2.append(rec_of_images_to_detect[i])
print("Num of Scanned:{0}\nNum of TP:{1}\nNum of FN:{2}\n ".format(len(res),sum(res),len(res)-sum(res)))
return doubts,nonMaxSuppression(doubts2,0.1)
#return nonMaxSuppression(doubts,0.1)
'''
true_point=(0,0)
true_point_doubts=0
for x in range(0,gray.shape[0],40):
for y in range(0,gray.shape[1],40):
tmp_point_doubts=0
for doubt in doubts:
if(doubt[2]>=x>=doubt[0] and doubt[3]>=y>=doubt[1]):
tmp_point_doubts+=1
if(tmp_point_doubts>true_point_doubts):
true_point=(y,x)
true_point_doubts=tmp_point_doubts
return true_point
'''es_to_detect=[]
pos_of_images_to_detect=[]
rec_of_images_to_detect=[]
while(True):
if(w >=gray.shape[0]):
break
w=int(w*2)
h=int(h*2)
for r in range(0,gray.shape[0]-h+1,15):
for c in range(0,gray.shape[1]-w+1,15):
#TODO scalling feature instead of resising image
new = cv2.resize(gray[r:r+h,c:c+w],(28,28))
#new = preProcess(new,1.2)
#cv2.imshow('new',new)
#cv2.waitKey(0)
images_to_detect.append(new)
rec_of_images_to_detect.append((c,r,c+w,r+w)) #append postions not as row and colums
pos_of_images_to_detect.append((int(c+w/2),int(r+w/2))) #append postions not as row and colums
images_ii_to_detect = list(map(toII, images_to_detect))
res = sc.predict(images_ii_to_detect)
doubts2=[]
"""
| [
"shehabalaa97@gmail.com"
] | shehabalaa97@gmail.com |
8c27fefec88b9cb1c72f0f422651288309a83dab | de1c72ceb7c2a302ce809d7cb98044e187cd8178 | /src/LNEC/SAR_Tiling.py | 8ee4bf5bee0204f03bffac9c858285f963517547 | [] | no_license | ouc-cook/coresyf-toolkit | 8004d08af93172ecf251418c88530bc34b23fa87 | 98f189d6a8e24430ea42c21e8dbfbb5bd76dee8c | refs/heads/master | 2020-04-13T22:49:57.056157 | 2018-02-19T15:09:37 | 2018-02-19T15:09:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,925 | py | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
=====================================================================================================
Co-ReSyF Research Application: Image Processing and Subsets definition
Authors: Florent Birrien and Alberto Azevedo and Francisco Sancho
Date: July/2017
Last update: Sept/2017
=====================================================================================================
"""
#
import os,sys,shutil
#
import numpy as np
#
import Toolbox.CSAR_Classes as CL
import Toolbox.CSAR_Utilities as UT
import Toolbox.CSAR_ImageProcessing as IP
import Toolbox.CSAR_Subsets as SUB
#
#-------------------
# input parameters
#-------------------
SubsetsParameters, ImageParameters, args, verbose = IP.InputSubsetParameters()
#**********************************
#
# Pre-processing step
#
#**********************************
# clean old directories and create new ones
if os.path.isdir('Output'):
shutil.rmtree('Output')
# create new directory and subdirectories
Main_Dir, Sub_Dir = ['Output'], ['SubsetSpectra', 'Results', 'Bathymetry']
UT.CreateDirectories(Main_Dir, Sub_Dir);
#------------
# read image
#------------
if verbose:
print '|------------------------------------------------|'
print '| Read and Process SAR image |'
print '|------------------------------------------------|'
coordinates, image, pixelresolution = IP.ReadSARImg(ImageParameters)
FlagFlip = IP.CheckImageOrientation(coordinates) # check whether preprocessing image flip process affects direction estimate
data = CL.Subset(0, image, coordinates, pixelresolution, FlagFlip) # store main data (image, coordinates) as list
if verbose:
print 'nb of pixels (x,y)', coordinates.easting.shape[0], coordinates.northing.shape[1]
print 'pixel resolution (m)', pixelresolution
# npz files to (1) save processed and image coordinates and (2) save parameters
UT.Create_Image_Parameters_TransferFile(SubsetsParameters, ImageParameters, data)
#UT.CreateImageParametersNPZ(parameters,data)
#---------------------
# read grid points
#---------------------
if verbose:
print '|--------------------------------|'
print '| Read Grid Points |'
print '|--------------------------------|'
Points, flagbathy = UT.ReadGridPoints(args, coordinates)
if verbose:
print 'number of grid points', Points.shape[0]
#------------------------------
# inversion crucial parameters
#------------------------------
# check if enough data are available for further computation (Tp and bathymetry)
if (not flagbathy) and (args.Tp == 0):
sys.exit("not enough input data (bathymetry/Tp) to perform bathymetry inversion")
#-------------------------
# get subset dimensions
#-------------------------
dimension = SUB.GetBoxDim(SubsetsParameters, data)
#---------------------------------------------------------------------------
# parallelised and run the Spectrum/Inversion scripts for each point subset
#---------------------------------------------------------------------------
Spectra = []; wavelength=[]; bathymetry = []; ComputationPoints = [];
if verbose:
print '|-------------------------------|'
print '| Create Subsets |'
print '|-------------------------------|'
for index, point in enumerate(Points):
#***********************
# Subset definitions
#***********************
# point indices (related to image pixels)
Point = np.array([point.IndexEasting, point.IndexNorthing])
# gather subset data
Subsetparameters = CL.SubsetParameters(Point, SubsetsParameters.DomainDimension, SubsetsParameters.FlagPowerofTwo, SubsetsParameters.Shift, SubsetsParameters.BoxNb)
# main subset
subset = SUB.GetImageSubset(Subsetparameters, data, dimension)
# computation subsets (5 or 9 boxes)
Subsets = SUB.GetFFTBoxes(Subsetparameters, data, dimension)
# store data
UT.Create_Subset_TransferFile(index, SubsetsParameters, point, Subsets, args.output)
| [
"tiago.mendes@deimos.com.pt"
] | tiago.mendes@deimos.com.pt |
92b9a7b14e7c1602e3296d2d6da514b5bb2b768c | 24874a352571a52aad2fd6d6aedce6cfe78db364 | /loft_auth.py | fe6f71096b238a3f3e46dc79c23d3b70cbee4b21 | [] | no_license | RetailArchitects/xmpp_auth | a9cd33518c4e48c72dcd815c65e97056e17130fd | 117d23b876a408411675563701417fe21ce24201 | refs/heads/master | 2020-04-14T08:59:38.345495 | 2013-04-01T17:42:57 | 2013-04-01T17:42:57 | 9,152,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | #!/usr/bin/python
import requests
from requests.auth import HTTPBasicAuth
import json
import sys, os, logging
from struct import *
url = 'https://simon.retailarchitects.com/tg/authenticate'
sys.stderr = open('/Applications/ejabberd-2.1.11/logs/extauth_err.log','a')
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/Applications/ejabberd-2.1.11/logs/extauth.log',
filemode='a')
logging.info('extauth script started, waiting for ejabberd requests')
class EjabberdInputError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def genanswer(bool):
if bool:
answer = 1
token = pack('>hh', 2, answer)
return token
def ejabberd_out(bool):
logging.debug('Ejabberd gets: %s' % bool)
token = genanswer(bool)
logging.debug('sent bytes: %#x %#x %#x %#x' % (ord(token[0]), ord(token[1]), ord(token[2]), ord(token[3])))
sys.stdout.write(token)
sys.stdout.flush()
def ejabberd_in():
logging.debug('trying to read 2 byte header from ejabberd:')
try:
input_length = sys.stdin.read(2)
except IOError:
logging.debug('ioerror')
if len(input_length) is not 2:
logging.debug('ejabberd sent improper 2 byte header!')
raise EjabberdInputError("ejabberd sent wrong thinggy")
logging.debug('got proper 2 byte header via stdin')
(size,) = unpack('>h', input_length)
return sys.stdin.read(size).split(':')
def auth(username, server, password):
# call authenticate webservice from Loft...
logging.debug('%s@%s wants authentication...' % (username, server))
try:
response = requests.get(url, auth=('rn','rn'))
except Exception, e:
logging.info('Loft authentication error: %s' % e)
response = json.loads(response.content)
response = response['response']
if response['errors']:
logging.debug('not a valid user/passwd')
return False
else:
logging.debug('user OK')
return True
def isuser(username, server):
return True #assume all OK
def setpass(username, server, newpassword):
return False #disallow from XMPP
while True:
logging.debug('start of infinite loop')
try:
data = ejabberd_in()
except EjabberdInputError, inst:
logging.info("Exception occured: %s" % inst)
break
logging.debug("Method: %s" % data[0])
success = False
if data[0] == 'auth':
success = auth(data[1], data[2], data[3])
ejabberd_out(success)
elif data[0] == 'isuser':
success = auth(data[1], data[2])
ejabberd_out(success)
elif data[0] == 'setpass':
success = auth(data[1], data[2], data[3])
ejabberd_out(success)
logging.debug("end of infinite loop")
logging.info('extauth script terminating') | [
"robertneville73@gmail.com"
] | robertneville73@gmail.com |
da0aaee1bb5defffbefd3443a022f700842a5265 | 0235c80552b9a4a34f915c6272bdca8dad9fef6e | /tokenize_preprocess.py | c38e12e94abce83268fc08446c782ca79e044cd2 | [] | no_license | J-Seo/Pytorch_NLG | b52c11f97bccaaf84db97a7b3b7e874d5cbf0c08 | 0a9a7d1643534d813e52c8160cc1bb2ef1a759b8 | refs/heads/master | 2022-02-14T13:20:01.937820 | 2019-08-01T04:51:45 | 2019-08-01T04:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,501 | py | # -*- coding: utf-8 -*-
"""tokenize_preprocess.ipynb의 사본
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10xGlIo5KWV5RtCNfeUn7KBTCNJDeL8bU
"""
!pip install https://github.com/rsennrich/subword-nmt/archive/master.zip
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install -q tensorflow-gpu==2.0.0-beta0
import tensorflow as tf
import numpy as np
import os
subword-nmt get-vocab --train_file {train_file} --vocab_file {vocab_file}
subword-nmt segment-char-ngrams --vocab {vocab_file} -n {order} --shortlist {size} < {test_file} > {out_file}
url_list = ['https://drive.google.com/open?id=1I4kynBxgLvy6ukPxt2Nm0e7r67U4xTls',
'https://drive.google.com/open?id=1DKmfllIV5_Y178ubYHVnXITVrbi8IDcZ',
'https://drive.google.com/open?id=1Ht4ZI12wNSkm5I6g3-KChka9CIyyk36c',
'https://drive.google.com/open?id=1G0ENNV49lYcNpxZE28xcdSOfJgJRoc1G',
'https://drive.google.com/open?id=1PDDfrhdJHYCPTvhP_gF4ArmgOmJLp36O',
'https://drive.google.com/open?id=1HIGcwke5FSDboO9e1YrFjYVIduvix34M']
file_names = ['text1.txt', 'text2.txt', 'text3.txt', 'text4.txt', 'text5.txt', 'text6.txt']
i = 0
for name in file_names:
text_dir = tf.keras.utils.get_file(name, origin = url_list[i])
i += 1
parent_dir = os.path.dirname(text_dir)
parent_dir
os.system("./learn_bpe.py -s 30000 < parent_dir > parent.dir.bpe")
os.system("parent.dir.bpe")
def labeler(example, index):
return example, tf.cast(index, tf.int64)
labeled_data_sets = []
for i, file_name in enumerate(file_names):
lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name))
labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))
labeled_data_sets.append(labeled_dataset)
BUFFER_SIZE = 50000
BATCH_SIZE = 64
TAKE_SIZE = 5000
all_labeled_data = labeled_data_sets[0]
for labeled_dataset in labeled_data_sets[1:]:
all_labeled_data = all_labeled_data.concatenate(labeled_dataset)
all_labeled_data = all_labeled_data.shuffle(
BUFFER_SIZE, reshuffle_each_iteration=False)
for ex in all_labeled_data.take(5):
print(ex)
## 토큰화하기
tokenizer = tfds.features.text.Tokenizer()
vocabulary_set = set()
for text_tensor, _ in all_labeled_data:
some_tokens = tokenizer.tokenize(text_tensor.numpy())
vocabulary_set.update(some_tokens)
vocab_size = len(vocabulary_set)
vocab_size
encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)
example_text = next(iter(all_labeled_data))[0].numpy()
print(example_text)
encoded_example = encoder.encode(example_text)
print(encoded_example)
def encode(text_tensor, label):
encoded_text = encoder.encode(text_tensor.numpy())
return encoded_text, label
def encode_map_fn(text, label):
return tf.py_function(encode, inp=[text, label], Tout=(tf.int64, tf.int64))
all_encoded_data = all_labeled_data.map(encode_map_fn)
train_data = all_encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE)
train_data = train_data.padded_batch(BATCH_SIZE, padded_shapes=([-1],[]))
test_data = all_encoded_data.take(TAKE_SIZE)
test_data = test_data.padded_batch(BATCH_SIZE, padded_shapes=([-1],[]))
sample_text, sample_labels = next(iter(test_data))
sample_text[0], sample_labels[0]
os.system("subword-nmt learn-bpe -s 30000 < train_data > train_data.bpe")
train_data.bpe
os.system("subword-nmt apply-bpe -c merge_text.en.bpe < train_data.en > train_data_final.en")
os.system("subword-nmt learn-bpe -s 30000 < test_data.en > test_data.en.bpe")
| [
"41497567+seojae777@users.noreply.github.com"
] | 41497567+seojae777@users.noreply.github.com |
25e06c922c868656cf57c45a74152cf0e56c3986 | be54d95dc0363f5b3dcf57fd99facbf67660d280 | /ttsx_app/views_bak.py | 8153feec473caf0267567d455d3f4fff02114a16 | [] | no_license | csrlsm/cmdb_test | c9443dab28bd842594e6640c538fef8b7ec038a1 | 5cd4ee1f179a68ae849925992732d532b0c18f68 | refs/heads/master | 2021-05-01T09:57:04.522773 | 2018-03-12T06:47:19 | 2018-03-12T06:47:19 | 121,101,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import *
from django.template import loader, RequestContext
# Create your views here.
def index(request):
# tmp=loader.get_template('index.html')
# return HttpResponse(tmp.render())
return render(request, 'index.html')
def base(request):
return render(request, 'base.html') | [
"csrlsm@outlook.com"
] | csrlsm@outlook.com |
116cc3115d4ac2f1294d91646a2ab68c1d360cde | eff7a4a914e912eef2bc7a480795cfaae95eac91 | /python/Exercicios/8.16/8.16v2.py | 357d73983e83043c3f6a648ce289af847d27c6f8 | [] | no_license | HenDGS/Aprendendo-Python | fb3cf05d8911a7084c7805a69b8df06f9ce3d311 | 622a83983f3f77e5e74411e016663f05449be537 | refs/heads/master | 2023-08-17T14:17:53.304676 | 2021-09-14T02:51:52 | 2021-09-14T02:51:52 | 294,150,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | import funcao
a=funcao.funcao("camaro","Chevrolet",ano=2015,porência=461)
print(a)
| [
"henrique1443@hotmail.com"
] | henrique1443@hotmail.com |
ec31ae45ffa95cb68df704049a22cd392664ce0f | d76d780efe1c7934907ca01917030d4d46924629 | /test (Edmunds-MacBook-Pro's conflicted copy 2013-05-27).py | 9fbaba0a87d07e77c89762fc351fdfb6eca0a5e9 | [] | no_license | emhart/SexRatioFecundityIBM | ba63a616bd5e1f4dd81f134b8fc2e6a180f4d4e7 | 2075996e9a79f237365695ff40e28da5cdeb7e0f | refs/heads/master | 2020-05-19T14:14:17.620469 | 2013-06-19T23:41:53 | 2013-06-19T23:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | '''
Created on Jul 19, 2012
@author: emh
A working file to test out my python classes, will be junked when the full simulation is run.
'''
### Imports
from ibmsimulation import Lattice as L
from ibmsimulation import Individual as Ind
from ibmsimulation import Group as G
import matplotlib.pyplot as plt
import numpy as np
from ibmsimulation import ibm_help as ih
'''
Global parameter sets:
b: Sets the intercept of the gompertz equation, more negative values force the y intercept at 0 longer.
c: Sets how rapidly the function asymptotes. more negative values asymptote faster.
const: sets the threshold reproductive energy needed as a function of total potential fecundity.
'''
b = -10
c = -1
const = .5
'''
Set the parameters for all individuals
fr: The upper and lower bounds of the feeding rate parameter, drawn from a uniform distribution
energy: The starting energy of a newly born individual
rep_cost: The energetic cost of reproduction per individual offspring
lifespan: The number of time steps an organism can live.
rep_thresh: The energetic threshold that an organism needs to reach.
fecund_genes: A list of four numbers. Positions [0,1] are the upper and lower bounds of a uniform distribution
and [2] is the number of chromosomes, usually 2, and position [3] is the length of each chromosome.
'''
ind_set = {'fr':[1,1],'m_cost':0,'energy':1,'rep_cost': 0 ,'lifespan':1,'fecund_genes':[.6,1,2,5],'max_energy':10}
tmp = L.Lattice(dims = [2,2],Kp = [.005,.01] )
groups = []
z = []
for x in range(20):
indiv_dict = {'forage_rate':np.random.uniform(ind_set["fr"][0],ind_set["fr"][1]),'m_cost':ind_set["m_cost"],'energy':1,'rep_cost':ind_set["rep_cost"],'lifespan':ind_set["lifespan"],'groupID' : 1,'sex' : np.random.binomial(1,.5,1),'fecund_genes':np.random.uniform(ind_set["fecund_genes"][0],ind_set["fecund_genes"][1],(ind_set["fecund_genes"][2],ind_set["fecund_genes"][3])),"max_energy": ind_set["max_energy"]}
z.append(Ind.individual(**indiv_dict))
for x in range(4):
groups.append(G.group([],x,ID=x))
groups[0] = G.group(z,0,ID=0)
tmp.groups = groups
n = 100
for x in range(n):
if x%1 == 0:
print x
tmp.mate(ind_set)
tmp.disperse(.1)
tmp.reproduce()
tmp.senesce(.05)
tmp.mutate(0.01)
#tmp.forage()
tmp.regenerate()
tmp.data_collect()
ih.write_ibmdata(tmp)
print "done"
| [
"edmundhart@Edmunds-iMac.local"
] | edmundhart@Edmunds-iMac.local |
c8a0dd7d7b60c120616e1b5a012166d1ebc10680 | 470b3d063ec639200de58acd24c6b50466315ece | /UpdatedSyntheticDataset/SyntheticDataset2/ElementsCreator/shape.py | e03c24a1df1a8dcc4b3c33ec11d5efb1f5a918a0 | [
"MIT"
] | permissive | FlintHill/SUAS-Competition | 56da6189e1e3391acd873806ccb2181a729fac05 | 8931c8859878692134f5113d4c6c3e17561f0196 | refs/heads/master | 2020-04-03T21:28:08.110599 | 2020-01-27T13:42:53 | 2020-01-27T13:42:53 | 34,731,946 | 5 | 12 | null | 2020-01-27T13:46:10 | 2015-04-28T13:25:37 | Python | UTF-8 | Python | false | false | 1,084 | py | from PIL import ImageDraw, Image
import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
def __init__(self, color, rotation):
"""
:param color: color of shape - RGB
:type color: 3-tuple
:param rotation: degrees counterclockwise shape will be rotated
:type rotation: int
"""
self.color = color
self.rotation = rotation
@abc.abstractmethod
def get_coordinates(self):
pass
@abc.abstractmethod
def draw(self):
pass
def overlay(self, midpoint, image):
"""
:param midpoint: midpoint where shape will be overlayed on image
:type midpoint: 2-tuple xy pixel coordinates
:param image: image for shape to be overlayed on
:type image: PIL image
"""
new_shape = self.draw()
image.paste(new_shape, self.get_upperleft(new_shape, midpoint), new_shape)
def get_upperleft(self, shape_image, midpoint):
x1 = midpoint[0]-shape_image.width/2
y1 = midpoint[1]-shape_image.height/2
return (x1,y1)
| [
"jmoxrox@gmail.com"
] | jmoxrox@gmail.com |
5c187cef52ac8e1006273cd22ea80940f0c1b7d1 | 485ba262357e10460c74482cd407003ac86886bb | /pyNastran/converters/openfoam/test_openfoam_gui.py | 0d93a5f9986ab459a658b741ae5694fddee65246 | [] | no_license | shangke00GitHub/pyNastran | 13202f3f504dca044755088971176a407622425b | c4509df6ef6c3291c005caada831b443feee734f | refs/heads/master | 2020-11-30T02:45:48.774507 | 2019-12-20T00:56:25 | 2019-12-20T00:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | import os
import unittest
from cpylog import get_logger
import pyNastran
from pyNastran.gui.testing_methods import FakeGUIMethods
from pyNastran.converters.openfoam.block_mesh import read_block_mesh, mirror_block_mesh
from pyNastran.converters.openfoam.face_file import FaceFile
from pyNastran.converters.openfoam.openfoam_io import OpenFoamIO
from pyNastran.utils import check_path
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, 'converters', 'openfoam', 'models')
class OpenFoamGUI(OpenFoamIO, FakeGUIMethods):
def __init__(self):
FakeGUIMethods.__init__(self)
self.model = OpenFoamIO(self)
self.build_fmts(['openfoam_hex', 'openfoam_shell', 'openfoam_faces'], stop_on_failure=True)
class TestOpenFoamGUI(unittest.TestCase):
def test_openfoam_geometry_01(self):
"""tests the ascii three plugs model"""
log = get_logger(level='warning', encoding='utf-8')
geometry_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'blockMeshDict')
bdf_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'blockMeshDict.bdf')
face_filename = os.path.join(MODEL_PATH, 'SnakeRiverCanyon', 'system', 'faces')
check_path(geometry_filename, 'geometry_filename')
test = OpenFoamGUI()
test.log = log
test.on_load_geometry(geometry_filename, geometry_format='openfoam_shell', raise_error=True)
test.on_load_geometry(geometry_filename, geometry_format='openfoam_hex', raise_error=True)
os.remove('points.bdf')
#test.load_openfoam_geometry_faces(geometry_filename)
model = read_block_mesh(geometry_filename, log=log)
block_mesh_name_out = 'blockMeshDict.out'
model.write_block_mesh(
block_mesh_name_out=block_mesh_name_out, make_symmetry=False)
model.write_block_mesh(
block_mesh_name_out=block_mesh_name_out, make_symmetry=True)
model.write_bdf(bdf_filename, model.nodes, model.hexas)
mirror_block_mesh(geometry_filename, block_mesh_name_out)
os.remove(block_mesh_name_out)
#nodes, hexas, quads, inames, bcs
def test_openfoam_2(self):
point_filename = 'points'
with open(point_filename, 'w') as point_file:
point_file.write('0. 0. 0.\n')
face_filename = 'faces'
with open(face_filename, 'w') as face_file:
face_file.write('2\n')
face_file.write('\n')
face_file.write('3 1 2 3\n')
face_file.write('3 1 3 4\n')
log = get_logger(level='warning', encoding='utf-8')
#test = OpenFoamGUI()
#test.log = log
#test.load_openfoam_faces_geometry(face_filename)
faces = FaceFile(log=None, debug=False)
faces.read_face_file(face_filename)
faces.read_face_file(face_filename, ifaces_to_read=[1])
faces.read_face_file(face_filename, ifaces_to_read=[0, 1])
os.remove(point_filename)
os.remove(face_filename)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| [
"mesheb82@gmail.com"
] | mesheb82@gmail.com |
05926003b26f53d3dda5111840987f9ff1896673 | be58f5f0012d70db570de0e3a745ceefc91f91e0 | /Week7/Code/using_os.py | 906d5166ea0ff95ca2d5dad4387733a42f8c9428 | [] | no_license | amysolman/CMEECourseWork | caa2ad6ef11c819d2eb295cfe03f1571b7ad9cae | fb2a21bc7b625fed643eaad03bf51c458645abc4 | refs/heads/master | 2021-07-25T01:10:00.653892 | 2020-08-27T13:06:55 | 2020-08-27T13:06:55 | 212,303,861 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | #!/usr/bin/env python3
# Date: 15th November 2019
"""Subprocess practical for Python II"""
__appname__ = 'using_os.py'
__author__ = 'Amy Solman (amy.solman19@imperial.ac.uk'
__version__ = '0.0.1'
# Use the subprocess.os module to get a list of files and directories
# in your ubuntu home directory
# Hint: look in subprocess.os and/or subprocess.os.path and/or
# subprocess.os.walk for helpful functions
import subprocess, pathlib, re
subprocess.Popen(["ls", "-l"], cwd=pathlib.Path.home())
#################################
#~Get a list of files and
#~directories in your home/ that start with an uppercase 'C'
# Type your code here:
# Get the user's home directory.
home = subprocess.os.path.expanduser("~")
# Create a list to store the results.
FilesDirsStartingWithC = []
# Use a for loop to walk through the home directory.
for (dir, subdir, files) in subprocess.os.walk(home):
FilesDirsStartingWithC.extend(re.findall(r'^C\w+', ''.join(dir)))
FilesDirsStartingWithC.extend(re.findall(r'^C\w+', ''.join(subdir)))
FilesDirsStartingWithC.extend(re.findall(r'^C\w+', ''.join(files)))
print(FilesDirsStartingWithC)
#################################
# Get files and directories in your home/ that start with either an
# upper or lower case 'C'
# Type your code here:
home = subprocess.os.path.expanduser("~")
FilesDirsStartingWithCc = []
for (dir, subdir, files) in subprocess.os.walk(home):
FilesDirsStartingWithCc.extend(re.findall(r'^C\w+|^c\w+', ''.join(dir)))
FilesDirsStartingWithCc.extend(re.findall(r'^C\w+|^c\w+', ''.join(subdir)))
FilesDirsStartingWithCc.extend(re.findall(r'^C\w+|^c\w+', ''.join(files)))
print(FilesDirsStartingWithCc)
#################################
# Get only directories in your home/ that start with either an upper or
#~lower case 'C'
# Type your code here:
home = subprocess.os.path.expanduser("~")
DirsStartingWithCc = []
for (dir, subdir, files) in subprocess.os.walk(home):
DirsStartingWithCc.extend(re.findall(r'^C\w+|^c\w+', ''.join(dir)))
DirsStartingWithCc.extend(re.findall(r'^C\w+|^c\w+', ''.join(subdir)))
print(DirsStartingWithCc) | [
"amysolman@Amys-MacBook-Pro.local"
] | amysolman@Amys-MacBook-Pro.local |
9e384c9a4c29da0c141ce394c3198d2303cc27f9 | 6414227ccd7022800f9486f7890b28e506d92f44 | /robots/small/strategies/2019/test/pokupio/t2.py | 7c777b3fc7b9f6b56271fbb8bb6b17d0944cf1f9 | [
"MIT"
] | permissive | memristor/mep2 | 74df408e59c9183146d99f9d48ec288f87741cec | bc5cddacba3d740f791f3454b8cb51bda83ce202 | refs/heads/master | 2023-02-04T04:03:00.054195 | 2020-04-03T16:29:29 | 2020-04-03T16:29:29 | 153,911,004 | 5 | 1 | MIT | 2020-02-26T21:58:51 | 2018-10-20T13:56:59 | C++ | UTF-8 | Python | false | false | 59 | py |
weight=2
def run():
if not State.pokupio:
return False
| [
"sciliquant@gmail.com"
] | sciliquant@gmail.com |
8878ce204a1cf83898ff83b7f035d0f6a483c822 | e5f2a3e824ad181a5121e18304adcde70ec2e16e | /ads_chm/bin/pip3.5 | 8c560f102abd53370d064c50ab2fee69b6b49775 | [] | no_license | MichalRozenwald/num_methods_ads_prediction | 5d45f4551dd497e88d3ebbc5b9b4ac1a3088f522 | 168031696e37844ed8f5324c5cf27eb0c2d7ca0f | refs/heads/master | 2021-01-12T08:29:27.731475 | 2016-12-15T20:57:20 | 2016-12-15T20:57:20 | 76,594,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | 5 | #!/home/michal/Projects/ads_chm/ads_chm/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"michal.rozenwald@gmail.com"
] | michal.rozenwald@gmail.com |
719ec5e11dce6e24bd6b5f91b3469b407c0160a1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02257/s888628284.py | 59550bf214754874e7673f5cf26d7edf5cc0ca07 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # -*- coding: utf-8 -*-
def isPrime(p):
if p == 2:
return True
elif p < 2 or p%2 == 0:
return False
elif pow(2, p-1, p) == 1:
return True
else:
return False
n = int(raw_input())
count = 0
for i in range(n):
if isPrime(int(raw_input())):
count += 1
print count | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
561a473b6aa704f7d0651d89278fc1942b376384 | b3528a3795ce373e27d52362128de3cff6f9969d | /python/orbs/target/password-generator/slices1589360571.263371/success/success_39_0.py | f1896f56cc09413b60c95ec2ce3c24bce6dab1fd | [] | no_license | greenmonn/daily-coding | 43e0f3775678c7d6116df7ba5034ea18489d87c9 | ef6ecc88e6db61e18364eef3ea071c11e1385a99 | refs/heads/master | 2023-01-14T04:59:14.130309 | 2021-02-08T23:32:56 | 2021-02-08T23:32:56 | 157,735,438 | 1 | 1 | null | 2022-12-21T02:13:17 | 2018-11-15T15:47:37 | Python | UTF-8 | Python | false | false | 5,253 | py | #!/usr/bin/env python3
# m4ngl3m3! v0.1.1
# Common password pattern generator using strings list
# Follow (Medium / Twitter): @localh0t
import argparse
import sys
import os
from Mangler import ManglingParameters
from Mangler import Mangler
def build_parser():
"""Add parser arguments and return an instance of ArgumentParser."""
parser = argparse.ArgumentParser(description=("Common password pattern "
"generator using strings "
"list"),
formatter_class=argparse.
ArgumentDefaultsHelpFormatter)
parser.add_argument("mutation_mode",
metavar="MUTATION_MODE",
type=str,
help=("Mutation mode to perform: "
"(prefix-mode | suffix-mode | dual-mode)"),
choices=['prefix-mode', 'suffix-mode', 'dual-mode'])
parser.add_argument("strings_file",
metavar="STRINGS_FILE",
type=str,
help="File with strings to mutate")
parser.add_argument("output_file",
metavar="OUTPUT_FILE",
type=str,
help="Where to write the mutated strings")
parser.add_argument("-fy", "--from-year",
metavar="FROM_YEAR",
type=int,
help="Year where our iteration starts",
default=2015)
parser.add_argument("-ty", "--to-year",
metavar="TO_YEAR",
type=int,
help="Year where our iteration ends",
default=2020)
parser.add_argument('-sy', "--short-year",
help=("Also add shorter year form when iterating"),
action='store_true',
default=False)
parser.add_argument("-nf", "--numbers-file",
metavar="NUMBERS_FILE",
type=str,
help="Numbers prefix/suffix file",
default='./target/password-generator/files/numbers/numbers_set2.txt')
parser.add_argument("-sf", "--symbols-file",
metavar="SYMBOLS_FILE",
type=str,
help="Symbols prefix/suffix file",
default='./target/password-generator/files/symbols/symbols_set2.txt')
parser.add_argument("-cf", "--custom-file",
metavar="CUSTOM_FILE",
type=str,
help="Custom words/dates/initials/etc file")
parser.add_argument('-sbs', "--symbols-before-suffix",
help=("Insert symbols also before years/numbers/"
"custom (when in suffix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument('-sap', "--symbols-after-prefix",
help=("Insert symbols also after years/numbers/custom"
" (when in prefix-mode or dual-mode)"),
action='store_true',
default=False)
parser.add_argument("-mm", "--mutation-methods",
metavar="MUTATION_METHODS",
default='normal,'
'uppercase,'
'firstup,'
'replacevowels')
return parser
def build_mangler_with_args(args):
parameters = ManglingParameters()
parameters.num_file = open(args.numbers_file, 'r').read().splitlines()
parameters.sym_file = open(args.symbols_file, 'r').read().splitlines()
if (args.custom_file):
parameters.cus_file = open(args.custom_file, 'r').read().splitlines()
parameters.mutation_mode = args.mutation_mode
parameters.from_year = args.from_year
parameters.to_year = args.to_year
parameters.suffix_pos_swap = args.symbols_before_suffix
return Mangler(mangling_parameters=parameters)
if __name__ == "__main__":
args = build_parser().parse_args()
mangler = build_mangler_with_args(args)
mangler_functions = {
"normal": mangler.normal_mangling,
"uppercase": mangler.uppercase_mangling,
"firstup": mangler.firstup_mangling,
"replacevowels": mangler.replacevowels_mangling,
}
written_strings = 0
with open(args.strings_file, 'r') as f:
for line in f:
mangled = []
for method in args.mutation_methods.lower().split(","):
try:
(name, output) = mangler_functions[method](line.strip())
mangled.extend(output)
except KeyError:
print("[-] The method %s is not defined !" % method)
print("[+] %s mutation method done on string: %s" %
(name, line.strip()))
written_strings += len(mangled)
print('##v_trajectory captured: {}##'.format(written_strings))
| [
"greenmon@kaist.ac.kr"
] | greenmon@kaist.ac.kr |
acf2f03be9c0416122ef08ae04a4e24687d2aaf8 | 8bc6fa3520c11820f030a08fb620b0da9753bbfe | /lib/dataset/shelf_synthetic.py | 23a7bfa57c931e7effe0632777f28931862bc3fc | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | zhangyux15/voxelpose-pytorch | 010a66d8678dca5c679b742faf2a8c421bccf74b | 9ef5d407a597c9647b2c8f6c0a246b725a87a054 | refs/heads/main | 2023-06-02T18:18:50.210534 | 2021-06-18T07:13:06 | 2021-06-18T07:13:06 | 408,174,695 | 1 | 0 | MIT | 2021-09-19T16:12:56 | 2021-09-19T16:12:56 | null | UTF-8 | Python | false | false | 16,179 | py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import torch
from torch.utils.data import Dataset
import json_tricks as json
import pickle
import logging
import copy
import random
import cv2
import os
from utils.transforms import get_affine_transform
from utils.transforms import affine_transform
from utils.transforms import rotate_points, get_scale
from utils.cameras_cpu import project_pose
logger = logging.getLogger(__name__)
coco_joints_def = {0: 'nose',
1: 'Leye', 2: 'Reye', 3: 'Lear', 4: 'Rear',
5: 'Lsho', 6: 'Rsho',
7: 'Lelb', 8: 'Relb',
9: 'Lwri', 10: 'Rwri',
11: 'Lhip', 12: 'Rhip',
13: 'Lkne', 14: 'Rkne',
15: 'Lank', 16: 'Rank'}
LIMBS = [[0, 1], [0, 2], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [11, 13], [13, 15],
[6, 12], [12, 14], [14, 16], [5, 6], [11, 12]]
class ShelfSynthetic(Dataset):
def __init__(self, cfg, image_set, is_train, transform=None):
super().__init__()
self.pixel_std = 200.0
self.joints_def = coco_joints_def
self.limbs = LIMBS
self.num_joints = len(coco_joints_def)
self.cam_list = [0, 1, 2, 3, 4]
self.num_views = len(self.cam_list)
self.maximum_person = cfg.MULTI_PERSON.MAX_PEOPLE_NUM
self.is_train = is_train
this_dir = os.path.dirname(__file__)
dataset_root = os.path.join(this_dir, '../..', cfg.DATASET.ROOT)
self.dataset_root = dataset_root
self.image_set = image_set
self.dataset_name = cfg.DATASET.TEST_DATASET
self.data_format = cfg.DATASET.DATA_FORMAT
self.data_augmentation = cfg.DATASET.DATA_AUGMENTATION
self.color_rgb = cfg.DATASET.COLOR_RGB
self.target_type = cfg.NETWORK.TARGET_TYPE
self.image_size = np.array(cfg.NETWORK.IMAGE_SIZE)
self.heatmap_size = np.array(cfg.NETWORK.HEATMAP_SIZE)
self.sigma = cfg.NETWORK.SIGMA
self.use_different_joints_weight = cfg.LOSS.USE_DIFFERENT_JOINTS_WEIGHT
self.joints_weight = 1
self.transform = transform
self.space_size = np.array(cfg.MULTI_PERSON.SPACE_SIZE)
self.space_center = np.array(cfg.MULTI_PERSON.SPACE_CENTER)
self.initial_cube_size = np.array(cfg.MULTI_PERSON.INITIAL_CUBE_SIZE)
pose_db_file = os.path.join(self.dataset_root, "..", "panoptic_training_pose.pkl")
self.pose_db = pickle.load(open(pose_db_file, "rb"))
self.cameras = self._get_cam()
def _get_cam(self):
cam_file = osp.join(self.dataset_root, "calibration_shelf.json")
with open(cam_file) as cfile:
cameras = json.load(cfile)
for id, cam in cameras.items():
for k, v in cam.items():
cameras[id][k] = np.array(v)
return cameras
def __getitem__(self, idx):
# nposes = np.random.choice([1, 2, 3, 4, 5], p=[0.1, 0.1, 0.2, 0.4, 0.2])
nposes = np.random.choice(range(1, 6))
bbox_list = []
center_list = []
select_poses = np.random.choice(self.pose_db, nposes)
joints_3d = np.array([p['pose'] for p in select_poses])
joints_3d_vis = np.array([p['vis'] for p in select_poses])
for n in range(0, nposes):
points = joints_3d[n][:, :2].copy()
center = (points[11, :2] + points[12, :2]) / 2
rot_rad = np.random.uniform(-180, 180)
new_center = self.get_new_center(center_list)
new_xy = rotate_points(points, center, rot_rad) - center + new_center
loop_count = 0
while not self.isvalid(self.calc_bbox(new_xy, joints_3d_vis[n]), bbox_list):
loop_count += 1
if loop_count >= 100:
break
new_center = self.get_new_center(center_list)
new_xy = rotate_points(points, center, rot_rad) - center + new_center
if loop_count >= 100:
nposes = n
joints_3d = joints_3d[:n]
joints_3d_vis = joints_3d_vis[:n]
else:
center_list.append(new_center)
bbox_list.append(self.calc_bbox(new_xy, joints_3d_vis[n]))
joints_3d[n][:, :2] = new_xy
input, target_heatmap, target_weight, target_3d, meta, input_heatmap = [], [], [], [], [], []
for k, cam in self.cameras.items():
i, th, tw, t3, m, ih = self._get_single_view_item(joints_3d, joints_3d_vis, cam)
input.append(i)
target_heatmap.append(th)
target_weight.append(tw)
input_heatmap.append(ih)
target_3d.append(t3)
meta.append(m)
return input, target_heatmap, target_weight, target_3d, meta, input_heatmap
def __len__(self):
return 3000
# return self.db_size // self.num_views
def _get_single_view_item(self, joints_3d, joints_3d_vis, cam):
joints_3d = copy.deepcopy(joints_3d)
joints_3d_vis = copy.deepcopy(joints_3d_vis)
nposes = len(joints_3d)
width = 1032
height = 776
c = np.array([width / 2.0, height / 2.0], dtype=np.float32)
# s = np.array(
# [width / self.pixel_std, height / self.pixel_std], dtype=np.float32)
s = get_scale((width, height), self.image_size)
r = 0
joints = []
joints_vis = []
for n in range(nposes):
pose2d = project_pose(joints_3d[n], cam)
x_check = np.bitwise_and(pose2d[:, 0] >= 0,
pose2d[:, 0] <= width - 1)
y_check = np.bitwise_and(pose2d[:, 1] >= 0,
pose2d[:, 1] <= height - 1)
check = np.bitwise_and(x_check, y_check)
vis = joints_3d_vis[n][:, 0] > 0
vis[np.logical_not(check)] = 0
joints.append(pose2d)
joints_vis.append(np.repeat(np.reshape(vis, (-1, 1)), 2, axis=1))
trans = get_affine_transform(c, s, r, self.image_size)
input = np.ones((height, width, 3), dtype=np.float32)
input = cv2.warpAffine(
input,
trans, (int(self.image_size[0]), int(self.image_size[1])),
flags=cv2.INTER_LINEAR)
if self.transform:
input = self.transform(input)
for n in range(nposes):
for i in range(len(joints[0])):
if joints_vis[n][i, 0] > 0.0:
joints[n][i, 0:2] = affine_transform(
joints[n][i, 0:2], trans)
if (np.min(joints[n][i, :2]) < 0 or
joints[n][i, 0] >= self.image_size[0] or
joints[n][i, 1] >= self.image_size[1]):
joints_vis[n][i, :] = 0
input_heatmap, _ = self.generate_input_heatmap(
joints, joints_vis)
input_heatmap = torch.from_numpy(input_heatmap)
target_heatmap = torch.zeros_like(input_heatmap)
target_weight = torch.zeros(len(target_heatmap), 1)
# make joints and joints_vis having same shape
joints_u = np.zeros((self.maximum_person, len(joints[0]), 2))
joints_vis_u = np.zeros((self.maximum_person, len(joints[0]), 2))
for i in range(nposes):
joints_u[i] = joints[i]
joints_vis_u[i] = joints_vis[i]
joints_3d_u = np.zeros((self.maximum_person, len(joints[0]), 3))
joints_3d_vis_u = np.zeros((self.maximum_person, len(joints[0]), 3))
for i in range(nposes):
joints_3d_u[i] = joints_3d[i][:, 0:3]
joints_3d_vis_u[i] = joints_3d_vis[i][:, 0:3]
target_3d = self.generate_3d_target(joints_3d)
target_3d = torch.from_numpy(target_3d)
meta = {
'image': '',
'num_person': nposes,
'joints_3d': joints_3d_u,
'roots_3d': (joints_3d_u[:, 11] + joints_3d_u[:, 12]) / 2.0,
'joints_3d_vis': joints_3d_vis_u,
'joints': joints_u,
'joints_vis': joints_vis_u,
'center': c,
'scale': s,
'rotation': r,
'camera': cam
}
return input, target_heatmap, target_weight, target_3d, meta, input_heatmap
@staticmethod
def compute_human_scale(pose, joints_vis):
idx = joints_vis[:, 0] == 1
if np.sum(idx) == 0:
return 0
minx, maxx = np.min(pose[idx, 0]), np.max(pose[idx, 0])
miny, maxy = np.min(pose[idx, 1]), np.max(pose[idx, 1])
return np.clip(np.maximum(maxy - miny, maxx - minx) ** 2, 1.0 / 4 * 96 ** 2, 4 * 96 ** 2)
def generate_input_heatmap(self, joints, joints_vis):
'''
:param joints: [[num_joints, 3]]
:param joints_vis: [num_joints, 3]
:return: input_heatmap
'''
nposes = len(joints)
num_joints = joints[0].shape[0]
target_weight = np.zeros((num_joints, 1), dtype=np.float32)
for i in range(num_joints):
for n in range(nposes):
if joints_vis[n][i, 0] == 1:
target_weight[i, 0] = 1
assert self.target_type == 'gaussian', \
'Only support gaussian map now!'
if self.target_type == 'gaussian':
target = np.zeros(
(num_joints, self.heatmap_size[1], self.heatmap_size[0]),
dtype=np.float32)
feat_stride = self.image_size / self.heatmap_size
for n in range(nposes):
obscured = random.random() < 0.05
if obscured:
continue
human_scale = 2 * self.compute_human_scale(joints[n] / feat_stride, joints_vis[n])
if human_scale == 0:
continue
cur_sigma = self.sigma * np.sqrt((human_scale / (96.0 * 96.0)))
tmp_size = cur_sigma * 3
for joint_id in range(num_joints):
feat_stride = self.image_size / self.heatmap_size
mu_x = int(joints[n][joint_id][0] / feat_stride[0])
mu_y = int(joints[n][joint_id][1] / feat_stride[1])
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if joints_vis[n][joint_id, 0] == 0 or \
ul[0] >= self.heatmap_size[0] or \
ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
continue
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# scale = 1 - np.abs(np.random.randn(1) * 0.25)
scale = 0.9 + np.random.randn(1) * 0.03 if random.random() < 0.6 else 1.0
if joint_id in [7, 8, 13, 14]:
scale = scale * 0.5 if random.random() < 0.1 else scale
elif joint_id in [9, 10, 15, 16]:
scale = scale * 0.2 if random.random() < 0.1 else scale
else:
scale = scale * 0.5 if random.random() < 0.05 else scale
g = np.exp(
-((x - x0) ** 2 + (y - y0) ** 2) / (2 * cur_sigma ** 2)) * scale
# Usable gaussian range
g_x = max(0,
-ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0,
-ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
target = np.clip(target, 0, 1)
if self.use_different_joints_weight:
target_weight = np.multiply(target_weight, self.joints_weight)
return target, target_weight
def generate_3d_target(self, joints_3d):
num_people = len(joints_3d)
space_size = self.space_size
space_center = self.space_center
cube_size = self.initial_cube_size
grid1Dx = np.linspace(-space_size[0] / 2, space_size[0] / 2, cube_size[0]) + space_center[0]
grid1Dy = np.linspace(-space_size[1] / 2, space_size[1] / 2, cube_size[1]) + space_center[1]
grid1Dz = np.linspace(-space_size[2] / 2, space_size[2] / 2, cube_size[2]) + space_center[2]
target = np.zeros((cube_size[0], cube_size[1], cube_size[2]), dtype=np.float32)
cur_sigma = 200.0
for n in range(num_people):
joint_id = [11, 12] # mid-hip
mu_x = (joints_3d[n][joint_id[0]][0] + joints_3d[n][joint_id[1]][0]) / 2.0
mu_y = (joints_3d[n][joint_id[0]][1] + joints_3d[n][joint_id[1]][1]) / 2.0
mu_z = (joints_3d[n][joint_id[0]][2] + joints_3d[n][joint_id[1]][2]) / 2.0
i_x = [np.searchsorted(grid1Dx, mu_x - 3 * cur_sigma),
np.searchsorted(grid1Dx, mu_x + 3 * cur_sigma, 'right')]
i_y = [np.searchsorted(grid1Dy, mu_y - 3 * cur_sigma),
np.searchsorted(grid1Dy, mu_y + 3 * cur_sigma, 'right')]
i_z = [np.searchsorted(grid1Dz, mu_z - 3 * cur_sigma),
np.searchsorted(grid1Dz, mu_z + 3 * cur_sigma, 'right')]
if i_x[0] >= i_x[1] or i_y[0] >= i_y[1] or i_z[0] >= i_z[1]:
continue
gridx, gridy, gridz = np.meshgrid(grid1Dx[i_x[0]:i_x[1]], grid1Dy[i_y[0]:i_y[1]], grid1Dz[i_z[0]:i_z[1]],
indexing='ij')
g = np.exp(-((gridx - mu_x) ** 2 + (gridy - mu_y) ** 2 + (gridz - mu_z) ** 2) / (2 * cur_sigma ** 2))
target[i_x[0]:i_x[1], i_y[0]:i_y[1], i_z[0]:i_z[1]] = np.maximum(
target[i_x[0]:i_x[1], i_y[0]:i_y[1], i_z[0]:i_z[1]], g)
target = np.clip(target, 0, 1)
return target
def evaluate(self):
pass
@staticmethod
def get_new_center(center_list):
if len(center_list) == 0 or random.random() < 0.7:
new_center = np.array([np.random.uniform(-1000.0, 2000.0), np.random.uniform(-1600.0, 1600.0)])
else:
xy = center_list[np.random.choice(range(len(center_list)))]
new_center = xy + np.random.normal(500, 50, 2) * np.random.choice([1, -1], 2)
return new_center
@staticmethod
def isvalid(bbox, bbox_list):
if len(bbox_list) == 0:
return True
bbox_list = np.array(bbox_list)
x0 = np.maximum(bbox[0], bbox_list[:, 0])
y0 = np.maximum(bbox[1], bbox_list[:, 1])
x1 = np.minimum(bbox[2], bbox_list[:, 2])
y1 = np.minimum(bbox[3], bbox_list[:, 3])
intersection = np.maximum(0, (x1 - x0) * (y1 - y0))
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
area_list = (bbox_list[:, 2] - bbox_list[:, 0]) * (bbox_list[:, 3] - bbox_list[:, 1])
iou_list = intersection / (area + area_list - intersection)
return np.max(iou_list) < 0.01
@staticmethod
def calc_bbox(pose, pose_vis):
index = pose_vis[:, 0] > 0
bbox = [np.min(pose[index, 0]), np.min(pose[index, 1]),
np.max(pose[index, 0]), np.max(pose[index, 1])]
return np.array(bbox)
| [
"meijieru@gmail.com"
] | meijieru@gmail.com |
ed222c561a8364dd729c7da79f866fc6f3032907 | 8419f7d24df69a2cb92f04d7369c11c8141b0fcd | /tests/selection_test.py | 50d2e7a9e5f36e1495360ba5839de57cc89d17e9 | [
"MIT"
] | permissive | heyuqi1970/vaex | c1768eac9d5126e7efd1e139522feb9d65a7ecc9 | 867c180427a23e3b71df47305d7e8866b6673a98 | refs/heads/master | 2021-07-09T08:45:21.634354 | 2020-04-23T17:23:58 | 2020-04-23T17:23:58 | 242,555,084 | 2 | 0 | MIT | 2020-04-24T03:40:17 | 2020-02-23T16:54:13 | Python | UTF-8 | Python | false | false | 6,325 | py | from common import *
def test_selection_basics(df):
total = df["x"].sum()
df.select("x > 5")
df.select("x <= 5", name="inverse")
counts = df.count("x", selection=["default", "inverse", "x > 5", "default | inverse"])
np.testing.assert_array_almost_equal(counts, [4, 6, 4, 10])
df.select("x <= 1", name="inverse", mode="subtract")
counts = df.count("x", selection=["default", "inverse"])
np.testing.assert_array_almost_equal(counts, [4, 4])
total_subset = df["x"].sum(selection=True)
assert total_subset < total
for mode in vaex.selections._select_functions.keys():
df.select("x > 5")
df.select("x > 5", mode)
df.select(None)
df.select("x > 5", mode)
df.select("x > 5")
total_subset = df["x"].sum(selection=True)
df.select_inverse()
total_subset_inverse = df["x"].sum(selection=True)
df.select("x <= 5")
total_subset_inverse_compare = df["x"].sum(selection=True)
assert total_subset_inverse == total_subset_inverse_compare
assert total_subset_inverse + total_subset == total
df.select("x > 5")
df.select("x <= 5", name="inverse")
df.select_inverse(name="inverse")
counts = df.count("x", selection=["default", "inverse"])
np.testing.assert_array_almost_equal(counts, [4, 4])
def test_selection_history(df):
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
df.select_nothing()
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
total = df["x"].sum()
assert not df.has_selection()
assert not df.selection_can_undo()
assert not df.selection_can_redo()
df.select("x > 5")
assert df.has_selection()
total_subset = df["x"].sum(selection=True)
assert total_subset < total
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.select("x < 7", mode="and")
total_subset2 = df["x"].sum(selection=True)
assert total_subset2 < total_subset
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
total_subset_same = df["x"].sum(selection=True)
total_subset == total_subset_same
assert df.selection_can_undo()
assert df.selection_can_redo()
df.selection_redo()
total_subset2_same = df["x"].sum(selection=True)
total_subset2 == total_subset2_same
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
df.selection_undo()
assert not df.has_selection()
assert not df.selection_can_undo()
assert df.selection_can_redo()
df.selection_redo()
assert df.has_selection()
assert df.selection_can_undo()
assert df.selection_can_redo()
df.select("x < 7", mode="and")
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.select_nothing()
assert not df.has_selection()
assert df.selection_can_undo()
assert not df.selection_can_redo()
df.selection_undo()
assert df.selection_can_undo()
assert df.selection_can_redo()
def test_selection_serialize(df):
selection_expression = vaex.selections.SelectionExpression("x > 5", None, "and")
df.set_selection(selection_expression)
total_subset = df["x"].sum(selection=True)
df.select("x > 5")
total_subset_same = df["x"].sum(selection=True)
assert total_subset == total_subset_same
values = selection_expression.to_dict()
df.set_selection(vaex.selections.selection_from_dict(values))
total_subset_same2 = df["x"].sum(selection=True)
assert total_subset == total_subset_same2
selection_expression = vaex.selections.SelectionExpression("x > 5", None, "and")
selection_lasso = vaex.selections.SelectionLasso("x", "y", [0, 10, 10, 0], [-1, -1, 100, 100], selection_expression, "and")
df.set_selection(selection_lasso)
total_2 = df.sum("x", selection=True)
assert total_2 == total_subset
def test_selection_and_filter():
x = np.arange(-10, 11, 1)
y = np.arange(21)
df = vaex.from_arrays(x=x, y=y)
df.select(df.x < 0)
selected_list = df.evaluate(df.x, selection=True).tolist()
df_filtered = df[df.x < 0]
filtered_list = df_filtered['x'].tolist()
assert filtered_list == selected_list
repr(df_filtered)
# make sure we can slice, and repr
df_sliced = df_filtered[:5]
repr(df_sliced)
def test_filter(df):
dff = df[df.x>4]
assert dff.x.tolist() == list(range(5,10))
# vaex can have filters 'grow'
dff_bigger = dff.filter(dff.x < 3, mode="or")
dff_bigger = dff_bigger.filter(dff_bigger.x >= 0, mode="and") # restore old filter (df_filtered)
assert dff_bigger.x.tolist() == list(range(3)) + list(range(5,10))
def test_filter_boolean_scalar_variable(df):
df = df[df.x>4]
assert df.x.tolist() == list(range(5,10))
df.add_variable("production", True)
df = df.filter("production", mode="or")
df = df[df.x>=0] # restore old filter (df_filtered)
df = df[df.x<10] # restore old filter (df_filtered)
assert df.x.tolist() == list(range(10))
def test_selection_with_filtered_df_invalid_data():
# Custom function to be applied to a filtered DataFrame
def custom_func(x):
assert 4 not in x; return x**2
df = vaex.from_arrays(x=np.arange(10))
df_filtered = df[df.x!=4]
df_filtered.add_function('custom_function', custom_func)
df_filtered['y'] = df_filtered.func.custom_function(df_filtered.x)
# assert df_filtered.y.tolist() == [0, 1, 4, 9, 25, 36, 49, 64, 81]
assert df_filtered.count(df_filtered.y, selection='y > 0') == 8
def test_lasso(df):
x = [-0.1, 5.1, 5.1, -0.1]
y = [-0.1, -0.1, 4.1, 4.1]
df.select_lasso("x", "y", x, y)
sumx, sumy = df.sum(["x", "y"], selection=True)
np.testing.assert_array_almost_equal(sumx, 0+1+2)
np.testing.assert_array_almost_equal(sumy, 0+1+4)
# now test with masked arrays, m ~= x
x = [8-0.1, 9+0.1, 9+0.1, 8-0.1]
y = [-0.1, -0.1, 1000, 1000]
if df.is_local():
df._invalidate_selection_cache()
df.select_lasso("m", "y", x, y)
sumx, sumy = df.sum(['m', 'y'], selection=True)
np.testing.assert_array_almost_equal(sumx, 8)
np.testing.assert_array_almost_equal(sumy, 8**2)
| [
"maartenbreddels@gmail.com"
] | maartenbreddels@gmail.com |
ed2f46727aa6af253e2f0bda84ca29d56ea9a2af | 41fd80f9ccc72a17c2db16b7019312a87d3181e8 | /zhang_local/pdep/network4339_1.py | 52702a2b7fa8af0d6b888aed032edd7cb7a99807 | [] | no_license | aberdeendinius/n-heptane | 1510e6704d87283043357aec36317fdb4a2a0c34 | 1806622607f74495477ef3fd772908d94cff04d9 | refs/heads/master | 2020-05-26T02:06:49.084015 | 2019-07-01T15:12:44 | 2019-07-01T15:12:44 | 188,069,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,984 | py | species(
label = '[CH]C(=[CH])C([CH2])C(18883)',
structure = SMILES('[CH]C(=[CH])C([CH2])C'),
E0 = (739.718,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3120,650,792.5,1650,350,440,435,1725,510.927,510.939,510.946],'cm^-1')),
HinderedRotor(inertia=(0.289946,'amu*angstrom^2'), symmetry=1, barrier=(53.7133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.289928,'amu*angstrom^2'), symmetry=1, barrier=(53.7135,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.289953,'amu*angstrom^2'), symmetry=1, barrier=(53.7128,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.289951,'amu*angstrom^2'), symmetry=1, barrier=(53.7141,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.04902,0.0582,-3.35514e-05,4.61057e-09,2.14951e-12,89079.6,26.026], Tmin=(100,'K'), Tmax=(1015.18,'K')), NASAPolynomial(coeffs=[11.0759,0.030956,-1.14174e-05,1.97523e-09,-1.32045e-13,86411.8,-25.6117], Tmin=(1015.18,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(739.718,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cds_P) + radical(AllylJ2_triplet)"""),
)
species(
label = 'C=CC(42)',
structure = SMILES('C=CC'),
E0 = (6.12372,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650],'cm^-1')),
HinderedRotor(inertia=(0.597443,'amu*angstrom^2'), symmetry=1, barrier=(13.7364,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (42.0797,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2218.31,'J/mol'), sigma=(4.982,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.30977,0.00827491,3.37717e-05,-4.3931e-08,1.58773e-11,767.476,9.64349], Tmin=(100,'K'), Tmax=(988,'K')), NASAPolynomial(coeffs=[5.41204,0.0172866,-6.51359e-06,1.20323e-09,-8.55924e-14,-503.177,-4.80153], Tmin=(988,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(6.12372,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsHH)"""),
)
species(
label = '[CH]=C=[CH](18734)',
structure = SMILES('[CH]=C=[CH]'),
E0 = (491.681,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([540,610,2055,239.877,511.233,1743.98,1746.51,1747.6,1753.44],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (38.048,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1737.73,'J/mol'), sigma=(4.1,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.766,0.0170203,-1.57568e-05,7.95984e-09,-1.4265e-12,59188.9,11.2142], Tmin=(100,'K'), Tmax=(1806.04,'K')), NASAPolynomial(coeffs=[4.81405,0.00509933,2.77647e-07,-2.23082e-10,1.96202e-14,59653.5,3.45727], Tmin=(1806.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(491.681,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(C=C=CJ) + radical(C=C=CJ)"""),
)
species(
label = 'H(8)',
structure = SMILES('[H]'),
E0 = (211.805,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25474.2,-0.444973], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25474.2,-0.444973], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.805,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH]C(=[CH])C(=C)C(19687)',
structure = SMILES('[CH]C(=[CH])C(=C)C'),
E0 = (636.521,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,180,180,180],'cm^-1')),
HinderedRotor(inertia=(2.11706,'amu*angstrom^2'), symmetry=1, barrier=(48.6754,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.11657,'amu*angstrom^2'), symmetry=1, barrier=(48.6641,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.11677,'amu*angstrom^2'), symmetry=1, barrier=(48.6687,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (79.1198,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.895327,0.0606763,-4.09674e-05,9.40654e-09,1.0466e-12,76674.1,21.159], Tmin=(100,'K'), Tmax=(1031.13,'K')), NASAPolynomial(coeffs=[13.2617,0.0259733,-9.78747e-06,1.72745e-09,-1.17364e-13,73418.4,-42.3016], Tmin=(1031.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(636.521,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(AllylJ2_triplet) + radical(Cds_P)"""),
)
species(
label = '[CH](2815)',
structure = SMILES('[CH]'),
E0 = (585.033,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([4000],'cm^-1')),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (13.0186,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.1763,-0.00339736,5.29655e-06,-3.21799e-09,7.28313e-13,70356.4,-0.99239], Tmin=(100,'K'), Tmax=(1260.74,'K')), NASAPolynomial(coeffs=[3.26554,0.000229807,1.03509e-07,-7.93772e-12,-2.40435e-16,70527.4,3.38009], Tmin=(1260.74,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(585.033,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), comment="""Thermo library: primaryThermoLibrary + radical(CJ3)"""),
)
species(
label = 'C#CC([CH2])C(5193)',
structure = SMILES('C#CC([CH2])C'),
E0 = (321.758,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,750,770,3400,2100,1380,1390,370,380,2900,435,2175,525,3000,3100,440,815,1455,1000],'cm^-1')),
HinderedRotor(inertia=(0.46208,'amu*angstrom^2'), symmetry=1, barrier=(10.6241,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0666038,'amu*angstrom^2'), symmetry=1, barrier=(83.0888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.60399,'amu*angstrom^2'), symmetry=1, barrier=(82.8629,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (67.1091,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.05266,0.0371709,-7.10649e-06,-1.96893e-08,1.19932e-11,38774.1,18.6599], Tmin=(100,'K'), Tmax=(877.4,'K')), NASAPolynomial(coeffs=[9.62985,0.0193968,-5.38942e-06,7.89676e-10,-4.88604e-14,36799,-20.583], Tmin=(877.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(321.758,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CtCsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl)"""),
)
species(
label = '[CH3](11)',
structure = SMILES('[CH3]'),
E0 = (135.382,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([570.572,1408.13,1408.49,4000,4000,4000],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.91547,0.00184154,3.48742e-06,-3.32748e-09,8.49957e-13,16285.6,0.351741], Tmin=(100,'K'), Tmax=(1337.63,'K')), NASAPolynomial(coeffs=[3.54146,0.00476787,-1.82148e-06,3.28877e-10,-2.22546e-14,16224,1.66035], Tmin=(1337.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(135.382,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), comment="""Thermo library: primaryThermoLibrary + radical(CH3)"""),
)
species(
label = '[CH]C(=[CH])C=C(19261)',
structure = SMILES('[CH]C(=[CH])C=C'),
E0 = (674.111,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2950,3100,1380,975,1025,1650,350,440,435,1725,3010,987.5,1337.5,450,1655,180,180,180],'cm^-1')),
HinderedRotor(inertia=(2.10119,'amu*angstrom^2'), symmetry=1, barrier=(48.3106,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.0992,'amu*angstrom^2'), symmetry=1, barrier=(48.2648,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (65.0932,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.78075,0.0401882,-8.76075e-06,-1.97193e-08,1.12783e-11,81164.5,17.38], Tmin=(100,'K'), Tmax=(955.832,'K')), NASAPolynomial(coeffs=[12.0562,0.0178508,-6.13458e-06,1.06669e-09,-7.39864e-14,78256.3,-36.6668], Tmin=(955.832,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(674.111,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Cds_P) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH][C]=[CH](21256)',
structure = SMILES('[CH][C]=[CH]'),
E0 = (861.746,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,3120,650,792.5,1650,180,180],'cm^-1')),
HinderedRotor(inertia=(2.1891,'amu*angstrom^2'), symmetry=1, barrier=(50.3317,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (38.048,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.18317,0.0164338,-7.13252e-06,1.19383e-09,-3.27944e-14,103675,12.0918], Tmin=(100,'K'), Tmax=(1799.19,'K')), NASAPolynomial(coeffs=[6.32962,0.0112581,-4.33439e-06,7.19107e-10,-4.49321e-14,102248,-5.75439], Tmin=(1799.19,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(861.746,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(103.931,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_P) + radical(Cds_S) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH2][CH]C(44)',
structure = SMILES('[CH2][CH]C'),
E0 = (279.046,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5],'cm^-1')),
HinderedRotor(inertia=(0.00418548,'amu*angstrom^2'), symmetry=1, barrier=(6.91848,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00418537,'amu*angstrom^2'), symmetry=1, barrier=(6.91838,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (42.0797,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.25505,0.0137285,1.00536e-05,-1.43788e-08,4.3875e-12,33590.4,14.1736], Tmin=(100,'K'), Tmax=(1201.86,'K')), NASAPolynomial(coeffs=[3.74312,0.0203097,-8.40105e-06,1.5386e-09,-1.05137e-13,32880.4,9.26373], Tmin=(1201.86,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(279.046,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + radical(RCCJ) + radical(CCJC)"""),
)
species(
label = '[CH]C([CH])=C(C)C(21272)',
structure = SMILES('[CH]C([CH])=C(C)C'),
E0 = (633.357,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.34145,0.0595696,-3.30538e-05,9.11995e-09,-1.06436e-12,76268.9,22.2278], Tmin=(100,'K'), Tmax=(1774.5,'K')), NASAPolynomial(coeffs=[9.96714,0.0401259,-1.66178e-05,2.94502e-09,-1.944e-13,73207.7,-24.3331], Tmin=(1774.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(633.357,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + radical(AllylJ2_triplet) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]C([CH2])=C([CH2])C(18079)',
structure = SMILES('[CH]C([CH2])=C([CH2])C'),
E0 = (565.671,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,325,375,415,465,420,450,1700,1750,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,435.027,435.118,435.22],'cm^-1')),
HinderedRotor(inertia=(0.381444,'amu*angstrom^2'), symmetry=1, barrier=(51.1879,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.381355,'amu*angstrom^2'), symmetry=1, barrier=(51.1799,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.380777,'amu*angstrom^2'), symmetry=1, barrier=(51.1824,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.380691,'amu*angstrom^2'), symmetry=1, barrier=(51.1758,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.908188,0.0595149,-3.25415e-05,3.40661e-09,2.0794e-12,68152.9,23.256], Tmin=(100,'K'), Tmax=(1101.48,'K')), NASAPolynomial(coeffs=[12.3301,0.0313888,-1.24226e-05,2.23507e-09,-1.52544e-13,64826.7,-36.6291], Tmin=(1101.48,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(565.671,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + radical(Allyl_P) + radical(AllylJ2_triplet) + radical(Allyl_P)"""),
)
species(
label = '[CH]C(=C)C([CH2])[CH2](17727)',
structure = SMILES('[CH]C(=C)C([CH2])[CH2]'),
E0 = (697.704,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,623.021,623.022,623.022,623.023],'cm^-1')),
HinderedRotor(inertia=(0.200176,'amu*angstrom^2'), symmetry=1, barrier=(55.1377,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.200177,'amu*angstrom^2'), symmetry=1, barrier=(55.1377,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.200176,'amu*angstrom^2'), symmetry=1, barrier=(55.1376,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.200175,'amu*angstrom^2'), symmetry=1, barrier=(55.1377,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3248.85,'J/mol'), sigma=(5.90911,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=507.46 K, Pc=35.73 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.21023,0.0555238,-2.63652e-05,-4.35895e-09,6.27756e-12,84020.2,26.8273], Tmin=(100,'K'), Tmax=(923.387,'K')), NASAPolynomial(coeffs=[10.335,0.0309114,-1.06121e-05,1.76031e-09,-1.15182e-13,81699.2,-19.9103], Tmin=(923.387,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(697.704,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsHH) + radical(Isobutyl) + radical(AllylJ2_triplet) + radical(Isobutyl)"""),
)
species(
label = '[CH]C([CH])=C[CH2](21258)',
structure = SMILES('[CH]C([CH])=C[CH2]'),
E0 = (823.911,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,350,440,435,1725,328.03,328.033,328.034,328.035,328.036,328.04],'cm^-1')),
HinderedRotor(inertia=(0.664758,'amu*angstrom^2'), symmetry=1, barrier=(50.762,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.664777,'amu*angstrom^2'), symmetry=1, barrier=(50.7619,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.664772,'amu*angstrom^2'), symmetry=1, barrier=(50.7618,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 6,
opticalIsomers = 1,
molecularWeight = (65.0932,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.78722,0.0415433,-8.93762e-06,-1.22359e-08,6.20359e-12,99179.3,20.1709], Tmin=(100,'K'), Tmax=(1059.47,'K')), NASAPolynomial(coeffs=[8.69747,0.0295269,-1.18496e-05,2.13401e-09,-1.45711e-13,96925.3,-17.2938], Tmin=(1059.47,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(823.911,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(220.334,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(AllylJ2_triplet) + radical(Allyl_P) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]C([CH])=C([CH2])C(19692)',
structure = SMILES('[CH]C([CH])=C([CH2])C'),
E0 = (784.856,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,325,375,415,465,420,450,1700,1750,3000,3100,440,815,1455,1000,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 6,
opticalIsomers = 1,
molecularWeight = (79.1198,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.948164,0.0613832,-3.83155e-05,1.23014e-08,-1.65031e-12,94510.7,23.945], Tmin=(100,'K'), Tmax=(1662.91,'K')), NASAPolynomial(coeffs=[12.3948,0.0338493,-1.3479e-05,2.34436e-09,-1.53386e-13,90703.8,-37.0999], Tmin=(1662.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(784.856,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + radical(AllylJ2_triplet) + radical(AllylJ2_triplet) + radical(Allyl_P)"""),
)
species(
label = '[CH]C(=[CH])C([CH2])[CH2](19200)',
structure = SMILES('[CH]C(=[CH])C([CH2])[CH2]'),
E0 = (944.8,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3120,650,792.5,1650,1380,1390,370,380,2900,435,350,440,435,1725,492.573,492.856,493.377],'cm^-1')),
HinderedRotor(inertia=(0.00069575,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.310712,'amu*angstrom^2'), symmetry=1, barrier=(53.542,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.31084,'amu*angstrom^2'), symmetry=1, barrier=(53.541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.310118,'amu*angstrom^2'), symmetry=1, barrier=(53.5398,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 6,
opticalIsomers = 1,
molecularWeight = (79.1198,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.16628,0.0586672,-4.15569e-05,1.0902e-08,1.3757e-12,113739,26.6969], Tmin=(100,'K'), Tmax=(889.962,'K')), NASAPolynomial(coeffs=[10.5201,0.0281773,-9.63713e-06,1.57586e-09,-1.01524e-13,111616,-19.9098], Tmin=(889.962,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(944.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsHH) + radical(AllylJ2_triplet) + radical(Isobutyl) + radical(Isobutyl) + radical(Cds_P)"""),
)
species(
label = '[CH]C(=C)C(=C)C(18075)',
structure = SMILES('[CH]C(=C)C(=C)C'),
E0 = (389.424,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,180,180,180,180],'cm^-1')),
HinderedRotor(inertia=(2.14161,'amu*angstrom^2'), symmetry=1, barrier=(49.2399,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.14261,'amu*angstrom^2'), symmetry=1, barrier=(49.2628,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.14146,'amu*angstrom^2'), symmetry=1, barrier=(49.2363,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.986292,0.0569524,-2.36192e-05,-8.84439e-09,7.31756e-12,46953.6,21.1224], Tmin=(100,'K'), Tmax=(1006.49,'K')), NASAPolynomial(coeffs=[12.9227,0.0289714,-1.09157e-05,1.94828e-09,-1.34051e-13,43565.3,-41.437], Tmin=(1006.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(389.424,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""),
)
species(
label = 'CH2(S)(14)',
structure = SMILES('[CH2]'),
E0 = (419.091,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.93,2896.01,2896.03],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.10264,-0.00144068,5.45069e-06,-3.58002e-09,7.56192e-13,50400.6,-0.411765], Tmin=(100,'K'), Tmax=(1442.36,'K')), NASAPolynomial(coeffs=[2.62648,0.00394763,-1.49924e-06,2.54539e-10,-1.62956e-14,50691.8,6.78378], Tmin=(1442.36,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(419.091,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH]C(=[CH])C[CH2](18837)',
structure = SMILES('[CH]C(=[CH])C[CH2]'),
E0 = (767.45,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,3000,3100,440,815,1455,1000,498.567,499.809,501.077],'cm^-1')),
HinderedRotor(inertia=(0.291866,'amu*angstrom^2'), symmetry=1, barrier=(52.092,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.293908,'amu*angstrom^2'), symmetry=1, barrier=(52.1161,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.29664,'amu*angstrom^2'), symmetry=1, barrier=(52.0336,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (66.1011,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3074.1,'J/mol'), sigma=(5.55822,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=480.17 K, Pc=40.62 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.65878,0.0455874,-2.94386e-05,9.8193e-09,-1.35238e-12,92392,21.8548], Tmin=(100,'K'), Tmax=(1655.31,'K')), NASAPolynomial(coeffs=[11.0697,0.0228462,-8.83111e-06,1.51975e-09,-9.89077e-14,89276.4,-28.2906], Tmin=(1655.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(767.45,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(245.277,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(AllylJ2_triplet) + radical(Cds_P) + radical(RCCJ)"""),
)
species(
label = '[CH]C([CH])=CCC(21273)',
structure = SMILES('[CH]C([CH])=CCC'),
E0 = (649.766,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.03631,0.056981,-2.30081e-05,-1.43386e-09,2.37915e-12,78262.2,25.5066], Tmin=(100,'K'), Tmax=(1214.05,'K')), NASAPolynomial(coeffs=[10.1548,0.0390469,-1.5811e-05,2.82965e-09,-1.90572e-13,75155.8,-23.9289], Tmin=(1214.05,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(649.766,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(AllylJ2_triplet) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]C(=[CH])C[CH]C(18912)',
structure = SMILES('[CH]C(=[CH])C[CH]C'),
E0 = (732.87,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,3120,650,792.5,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,514.385,514.912,516.225,516.862],'cm^-1')),
HinderedRotor(inertia=(0.000621478,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.282893,'amu*angstrom^2'), symmetry=1, barrier=(53.891,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.283783,'amu*angstrom^2'), symmetry=1, barrier=(53.7954,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.298436,'amu*angstrom^2'), symmetry=1, barrier=(53.944,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.65953,0.0543528,-3.41486e-05,1.21485e-08,-1.99454e-12,88225.5,25.0313], Tmin=(100,'K'), Tmax=(1266.28,'K')), NASAPolynomial(coeffs=[6.23748,0.0398916,-1.70181e-05,3.12966e-09,-2.13952e-13,87066.1,1.86455], Tmin=(1266.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(732.87,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + radical(Cds_P) + radical(AllylJ2_triplet) + radical(RCCJC)"""),
)
species(
label = '[CH]C1=CCC1C(21274)',
structure = SMILES('[CH]C1=CCC1C'),
E0 = (444.345,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (80.1277,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.88991,0.0288127,5.7306e-05,-9.17238e-08,3.62878e-11,53534.2,20.0457], Tmin=(100,'K'), Tmax=(965.058,'K')), NASAPolynomial(coeffs=[12.5588,0.0283833,-1.00919e-05,1.85478e-09,-1.34496e-13,49435.7,-41.6113], Tmin=(965.058,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(444.345,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(324.264,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclobutene) + radical(AllylJ2_triplet)"""),
)
species(
label = 'CH2(T)(28)',
structure = SMILES('[CH2]'),
E0 = (381.37,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1066.91,2790.99,3622.37],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.01192,-0.000154979,3.26298e-06,-2.40422e-09,5.69497e-13,45867.7,0.5332], Tmin=(100,'K'), Tmax=(1104.58,'K')), NASAPolynomial(coeffs=[3.14983,0.00296674,-9.76056e-07,1.54115e-10,-9.50338e-15,46058.1,4.77808], Tmin=(1104.58,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(381.37,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH]C([CH])=CC(21257)',
structure = SMILES('[CH]C([CH])=CC'),
E0 = (672.412,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,350,440,435,1725,302.964,302.964,302.966,302.968,302.978,302.992],'cm^-1')),
HinderedRotor(inertia=(0.783156,'amu*angstrom^2'), symmetry=1, barrier=(51.0103,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.783094,'amu*angstrom^2'), symmetry=1, barrier=(51.0102,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.783135,'amu*angstrom^2'), symmetry=1, barrier=(51.0104,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (66.1011,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.70252,0.0448033,-1.90919e-05,1.71805e-09,5.4842e-13,80959.9,20.2105], Tmin=(100,'K'), Tmax=(1432.87,'K')), NASAPolynomial(coeffs=[8.99387,0.0315806,-1.27157e-05,2.22513e-09,-1.46128e-13,78138.2,-20.1434], Tmin=(1432.87,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(672.412,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(245.277,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + radical(AllylJ2_triplet) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]C(=[CH])C([CH])C(21275)',
structure = SMILES('[CH]C(=[CH])C([CH])C'),
E0 = (982.851,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,350,440,435,1725,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 6,
opticalIsomers = 1,
molecularWeight = (79.1198,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.794676,0.0615631,-4.67001e-05,1.85473e-08,-2.99066e-12,118332,26.1726], Tmin=(100,'K'), Tmax=(1467.14,'K')), NASAPolynomial(coeffs=[14.1607,0.025122,-9.44272e-06,1.61746e-09,-1.05816e-13,114411,-43.434], Tmin=(1467.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(982.851,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsHH) + radical(CCJ2_triplet) + radical(AllylJ2_triplet) + radical(Cds_P)"""),
)
species(
label = '[C]C(=[CH])C([CH2])C(21276)',
structure = SMILES('[C]C(=[CH])C([CH2])C'),
E0 = (1038.51,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,1380,1390,370,380,2900,435,3120,650,792.5,1650,350,440,435,1725,395.001],'cm^-1')),
HinderedRotor(inertia=(0.0823483,'amu*angstrom^2'), symmetry=1, barrier=(9.04704,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0816193,'amu*angstrom^2'), symmetry=1, barrier=(9.04182,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.248242,'amu*angstrom^2'), symmetry=1, barrier=(27.3137,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 6,
opticalIsomers = 1,
molecularWeight = (79.1198,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.870208,0.061859,-5.90831e-05,2.98425e-08,-5.95954e-12,125023,24.7062], Tmin=(100,'K'), Tmax=(1223.59,'K')), NASAPolynomial(coeffs=[14.0868,0.0186511,-6.11215e-06,9.80278e-10,-6.22518e-14,121788,-41.7227], Tmin=(1223.59,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(1038.51,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + longDistanceInteraction_noncyclic(CdCs-ST) + group(Cds-CdsHH) + radical(Isobutyl) + radical(Cds_P) + radical(CJ3)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.64289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53101,-0.000123661,-5.02999e-07,2.43531e-09,-1.40881e-12,-1046.98,2.96747], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.95258,0.0013969,-4.92632e-07,7.8601e-11,-4.60755e-15,-923.949,5.87189], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-8.64289,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'He',
structure = SMILES('[He]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (4.0026,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(84.8076,'J/mol'), sigma=(2.576,'angstroms'), dipoleMoment=(0,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""NOx2018"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""He""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ar',
structure = SMILES('[Ar]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (739.718,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (859.143,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (922.947,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (834.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (887.531,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (793.412,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (836.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (931.847,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (784.027,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (959.293,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (1140.79,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (996.661,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (1156.6,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (803.118,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (1186.54,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (934.483,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (899.653,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (748.002,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (1088.1,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (1194.66,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (1250.32,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['C=CC(42)', '[CH]=C=[CH](18734)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['H(8)', '[CH]C(=[CH])C(=C)C(19687)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(72.1434,'m^3/(mol*s)'), n=1.66666, Ea=(10.8177,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-OneDeCs_Cds;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['[CH](2815)', 'C#CC([CH2])C(5193)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(18.899,'m^3/(mol*s)'), n=1.76329, Ea=(16.1554,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Ct-Cs_Ct-H;YJ] for rate rule [Ct-Cs_Ct-H;CH_quartet]
Euclidian distance = 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH3](11)', '[CH]C(=[CH])C=C(19261)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(0.0129216,'m^3/(mol*s)'), n=2.42105, Ea=(24.5119,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-OneDeH_Cds;CsJ-HHH]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['C=CC(42)', '[CH][C]=[CH](21256)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.00168615,'m^3/(mol*s)'), n=2.52599, Ea=(19.6608,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-CsH_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['[CH2][CH]C(44)', '[CH]=C=[CH](18734)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.523563,'m^3/(mol*s)'), n=2.10494, Ea=(22.6844,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ct_Ct;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH]C([CH])=C(C)C(21272)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(4.614e+09,'s^-1'), n=1.31, Ea=(203.342,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 163 used for R2H_S;C_rad_out_OneDe/Cs;Cs_H_out_2H
Exact match found for rate rule [R2H_S;C_rad_out_OneDe/Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C([CH2])=C([CH2])C(18079)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(13437.7,'s^-1'), n=2.58467, Ea=(192.129,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_DS;Cd_rad_out_singleH;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C(=C)C([CH2])[CH2](17727)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(222600,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH3](11)', '[CH]C([CH])=C[CH2](21258)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(1.66881e+08,'m^3/(mol*s)'), n=-0.401267, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;C_methyl]
Euclidian distance = 0
family: R_Recombination
Ea raised from -6.7 to 0 kJ/mol."""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2][CH]C(44)', '[CH][C]=[CH](21256)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.9789e+07,'m^3/(mol*s)'), n=-0.126319, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -15.6 to -15.6 kJ/mol.
Ea raised from -15.6 to 0 kJ/mol."""),
)
reaction(
label = 'reaction12',
reactants = ['H(8)', '[CH]C([CH])=C([CH2])C(19692)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(4.34078e+06,'m^3/(mol*s)'), n=0.278577, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction13',
reactants = ['H(8)', '[CH]C(=[CH])C([CH2])[CH2](19200)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(6.97354e-12,'cm^3/(molecule*s)'), n=0.6, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 18 used for C_rad/H2/Cs;H_rad
Exact match found for rate rule [C_rad/H2/Cs;H_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -3.3 to 0 kJ/mol."""),
)
reaction(
label = 'reaction14',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C(=C)C(=C)C(18075)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction15',
reactants = ['CH2(S)(14)', '[CH]C(=[CH])C[CH2](18837)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(143764,'m^3/(mol*s)'), n=0.444, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [carbene;R_H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: 1,2_Insertion_carbene
Ea raised from -5.1 to 0 kJ/mol."""),
)
reaction(
label = 'reaction16',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C([CH])=CCC(21273)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(5.59192e+09,'s^-1'), n=1.025, Ea=(194.765,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;CH3] for rate rule [cCs(-HC)CJ;CsJ-HH;CH3]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C(=[CH])C[CH]C(18912)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction18',
reactants = ['[CH]C(=[CH])C([CH2])C(18883)'],
products = ['[CH]C1=CCC1C(21274)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;C_rad_out_2H;Ypri_rad_out] for rate rule [R4_SSD;C_rad_out_2H;CdsinglepriH_rad_out]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction19',
reactants = ['CH2(T)(28)', '[CH]C([CH])=CC(21257)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/OneDeC;Birad]
Euclidian distance = 4.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction20',
reactants = ['H(8)', '[CH]C(=[CH])C([CH])C(21275)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction21',
reactants = ['H(8)', '[C]C(=[CH])C([CH2])C(21276)'],
products = ['[CH]C(=[CH])C([CH2])C(18883)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
network(
label = '4339',
isomers = [
'[CH]C(=[CH])C([CH2])C(18883)',
],
reactants = [
('C=CC(42)', '[CH]=C=[CH](18734)'),
],
bathGas = {
'N2': 0.25,
'Ne': 0.25,
'He': 0.25,
'Ar': 0.25,
},
)
pressureDependence(
label = '4339',
Tmin = (1200,'K'),
Tmax = (1500,'K'),
Tcount = 10,
Tlist = ([1201.48,1213.22,1236.21,1269.31,1310.55,1356.92,1404.16,1447.02,1479.84,1497.7],'K'),
Pmin = (1,'atm'),
Pmax = (10,'atm'),
Pcount = 10,
Plist = ([1.02771,1.14872,1.41959,1.89986,2.67608,3.83649,5.40396,7.23219,8.93758,9.98989],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"dinius.ab@husky.neu.edu"
] | dinius.ab@husky.neu.edu |
483748271f5991198d5319c7f7955667c13bdc56 | e15b849ef7e2c9c2c8837a4e00da8eeee2117c60 | /iaproofread03-gen.py | 0782df93d917df7c112f5462633b45ad26f9897d | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | vitorio/iaproofread | baaf0153d60eef009c14b29ed12d5f1128635e6b | f4a5188ad83bd07f778f57cdd7e37828477a0aa1 | refs/heads/master | 2021-01-10T01:33:10.717361 | 2016-02-07T04:44:34 | 2013-11-17T04:05:44 | 51,231,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | # -*- coding: utf-8 -*-
__author__ = 'vitorio'
from xml.dom import minidom
import os.path
import pickle
import cgi
import PythonMagick
import jinja2
env = jinja2.Environment(loader=jinja2.FileSystemLoader('.'))
template = env.get_template('iaproofread03.jinja2')
#IA_NAME = 'windinwillows00grah'
#IA_NAME = 'artpracticeoftyp00gres'
#IA_NAME = 'manualoflinotype00merg'
IA_NAME = 'glimpsesofworldp00stod'
# must already exist
OUTPUT_FOLDER = 'iapr'
brittlefragments = pickle.load(open(os.path.join(OUTPUT_FOLDER, '%s_brittlefragments.pickle' % IA_NAME), 'rb'))
print '%d fully computed fragments' % len(brittlefragments)
for idx_fra, a in enumerate(brittlefragments):
idx_obj, idx_reg, idx_par, idx_lin = [int(b) for b in a['name'].split('-')]
# let's assume if the PNG exists, it's correct. this may not be true!
if not os.path.exists(os.path.join(OUTPUT_FOLDER, '%s.png' % a['name'])):
jp2file = PythonMagick.Image(str(os.path.join('%s_jp2' % IA_NAME, '%s.jp2' % a['jp2name'])))
jp2file.crop(a['geometrystring'])
jp2file.write(os.path.join(OUTPUT_FOLDER, '%s.png' % a['name']))
a['fragment']['unicodetext'] = cgi.escape(a['fragment']['text']).encode('utf8').decode('utf8')
a['fragment']['unicodeinputtext'] = cgi.escape(a['fragment']['text'], quote=True).encode('utf8').decode('utf8')
output_from_parsed_template = template.render(a=a)
# to save the results
with open(os.path.join(OUTPUT_FOLDER, '%s.html' % a['name']), 'wb') as fh:
fh.write(output_from_parsed_template.encode('utf8'))
| [
"email_about_iaproofread@vitor.io"
] | email_about_iaproofread@vitor.io |
01015222f4e22a467b3819943f4ec0d7342478b0 | bb8542963352bb45706e28dab0d42281010dc259 | /delphi/train.py | 421a6033129cca849132aa08032692c303f2dc92 | [] | no_license | kjlfsjakljsdfnsvjkdsa/Efficient-Truncated-Regression-with-Unknown-Noise-Variance---NeurIPs---2021 | 682a7d23c31a02c7f3470406f9dbec91ee3425ec | df227fbd68b8accc5c34cd07129df42f952b90df | refs/heads/master | 2023-05-13T20:14:40.790333 | 2021-06-04T19:32:40 | 2021-06-04T19:32:40 | 373,937,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,976 | py | import time
import os
import warnings
import dill
import numpy as np
import torch as ch
from torch import Tensor
from torch.optim import SGD, Adam
from torch.optim import lr_scheduler
from . import oracle
from .utils.helpers import has_attr, ckpt_at_epoch, AverageMeter, accuracy, type_of_script, LinearUnknownVariance, setup_store_with_metadata, LinearUnknownVariance, ProcedureComplete
from .utils import constants as consts
# determine running environment
script = type_of_script()
if script == consts.JUPYTER:
from tqdm.autonotebook import tqdm as tqdm
else:
from tqdm import tqdm
def make_optimizer_and_schedule(args, model, checkpoint, params, T=None):
param_list = model.parameters() if params is None else params
optimizer = SGD(param_list, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Make schedule
schedule = None
if args.custom_lr_multiplier == consts.CYCLIC and T is not None:
lr_func = lambda t: np.interp([t], [0, T*4//15, T], [0, 1, 0])[0]
schedule = lr_scheduler.LambdaLR(optimizer, lr_func)
elif args.custom_lr_multiplier == consts.COSINE and T is not None:
schedule = lr_scheduler.CosineAnnealingLR(optimizer, T)
elif args.custom_lr_multiplier:
cs = args.custom_lr_multiplier
periods = eval(cs) if type(cs) is str else cs
if args.lr_interpolation == consts.LINEAR:
lr_func = lambda t: np.interp([t], *zip(*periods))[0]
else:
def lr_func(ep):
for (milestone, lr) in reversed(periods):
if ep >= milestone: return lr
return 1.0
schedule = lr_scheduler.LambdaLR(optimizer, lr_func)
elif args.step_lr:
schedule = lr_scheduler.StepLR(optimizer, step_size=args.step_lr, gamma=args.step_lr_gamma)
# Fast-forward the optimizer and the scheduler if resuming
if checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
try:
schedule.load_state_dict(checkpoint['schedule'])
except:
steps_to_take = checkpoint['epoch']
print('Could not load schedule (was probably LambdaLR).'
f' Stepping {steps_to_take} times instead...')
for i in range(steps_to_take):
schedule.step()
return optimizer, schedule
def eval_model(args, model, loader, store, table=None):
"""
Evaluate a model for standard (and optionally adversarial) accuracy.
Args:
args (object) : A list of arguments---should be a python object
implementing ``getattr()`` and ``setattr()``.
model (AttackerModel) : model to evaluate
loader (iterable) : a dataloader serving `(input, label)` batches from
the validation set
store (cox.Store) : store for saving results in (via tensorboardX)
"""
start_time = time.time()
table = consts.EVAL_LOGS_TABLE if table is None else table
if store is not None:
store.add_table(table, consts.EVAL_LOGS_SCHEMA)
writer = store.tensorboard if store else None
# put model on device
model.to(args.device)
assert not hasattr(model, "module"), "model is already in DataParallel."
if args.parallel and next(model.parameters()).is_cuda:
model = ch.nn.DataParallel(model)
test_prec1, test_loss, score = model_loop(args, 'val', loader,
model, None, 0, 0, writer, args.device)
log_info = {
'test_prec1': test_prec1,
'test_loss': test_loss,
'time': time.time() - start_time
}
# Log info into the logs table
if store:
store[consts.EVAL_LOGS_TABLE if table is None else table].append_row(log_info)
return log_info
def train_model(args, model, loaders, *, phi=oracle.Identity(), criterion=ch.nn.CrossEntropyLoss(), checkpoint=None, parallel=False, cuda=False, dp_device_ids=None,
store=None, table=None, update_params=None, disable_no_grad=False):
table = consts.LOGS_TABLE if table is None else table
if store is not None:
store.add_table(table, consts.LOGS_SCHEMA)
writer = store.tensorboard if store else None
# data loaders
train_loader, val_loader = loaders
optimizer, schedule = make_optimizer_and_schedule(args, model, checkpoint, update_params, T=(args.epochs if args.epochs else args.steps))
# put the neural network onto gpu and in parallel mode
assert not has_attr(model, "module"), "model is already in DataParallel."
if cuda:
model = model.cuda()
if parallel:
model = ch.nn.DataParallel(model)
best_prec1, epoch = (0, 0)
if checkpoint:
epoch = checkpoint['epoch']
best_prec1 = checkpoint['prec1'] if 'prec1' in checkpoint \
else model_loop(args, 'val', val_loader, model, None, start_epoch-1, steps, writer=None, device=args.device, schedule=schedule)[0]
# keep track of the start time
start_time = time.time()
steps = 0 if args.steps else None # number of gradient steps taken
# do training loops until performing enough gradient steps or epochs
while (args.steps is not None and steps < args.steps) or (args.epochs is not None and epoch < args.epochs):
try:
train_prec1, train_loss = model_loop(args, 'train', train_loader, model, phi, criterion, optimizer, epoch+1, steps, writer, device=args.device, schedule=schedule)
except ProcedureComplete:
return model
except Exception as e:
raise e
# check for logging/checkpoint
last_epoch = (epoch == (args.epochs - 1)) if args.epochs else (steps >= args.steps)
should_save_ckpt = ((epoch % args.save_ckpt_iters == 0 or last_epoch) if args.epochs else (steps % args.save_ckpt_iters == 0 or last_epoch)) if args.save_ckpt_iters else False
should_log = ((epoch % args.log_iters == 0 or last_epoch) if args.epochs else (steps % args.log_iters == 0 or last_epoch)) if args.log_iters else False
# validation loop
val_prec1, val_loss = 0.0, 0.0
if should_log or should_save_ckpt:
ctx = ch.enable_grad() if disable_no_grad else ch.no_grad()
# evaluate model on validation set, if there is one
if val_loader is not None:
with ctx:
val_prec1, val_loss, score = model_loop(args, 'val', val_loader, model,
None, epoch + 1, steps, writer, device=args.device)
# remember best prec_1 and save checkpoint
is_best = val_prec1 > best_prec1
best_prec1 = max(val_prec1, best_prec1)
# save model checkpoint -- for neural networks
if should_save_ckpt:
sd_info = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'schedule': (schedule and schedule.state_dict()),
'epoch': epoch+1,
'amp': amp.state_dict() if args.mixed_precision else None,
'prec1': val_prec1
}
def save_checkpoint(filename):
ckpt_save_path = os.path.join(args.out_dir if not store else \
store.path, filename)
ch.save(sd_info, ckpt_save_path, pickle_module=dill)
# If we are at a saving epoch (or the last epoch), save a checkpoint
save_checkpoint(ckpt_at_epoch(epoch))
# Update the latest and best checkpoints (overrides old one)
save_checkpoint(consts.CKPT_NAME_LATEST)
if is_best: save_checkpoint(consts.CKPT_NAME_BEST)
# log results
if should_log: # TODO: add custom logging hook
# log every checkpoint
log_info = {
'epoch': epoch + 1,
'val_prec1': val_prec1,
'val_loss': val_loss,
'train_prec1': train_prec1,
'train_loss': train_loss,
'time': time.time() - start_time
}
# log info in log table
if store: store[table].append_row(log_info)
# update lr
if args.epochs is not None and schedule: schedule.step()
if has_attr(args, 'epoch_hook'):
args.epoch_hook(model, epoch)
# increment epoch counter
epoch += 1
# update number of gradient steps taken
if steps is not None:
steps += len(train_loader)
# TODO: add end training hook
return model
def model_loop(args, loop_type, loader, model, phi, criterion, optimizer, epoch, steps, writer, device, schedule=None):
# check loop type
if not loop_type in ['train', 'val']:
err_msg = "loop type must be in {0} must be 'train' or 'val".format(loop_type)
raise ValueError(err_msg)
# train or val loop
is_train = (loop_type == 'train')
loop_msg = 'Train' if is_train else 'Val'
# algorithm metrics
losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
if not isinstance(model, ch.distributions.distribution.Distribution):
model = model.train() if is_train else model.eval()
# iterator
iterator = enumerate(loader) if args.steps else tqdm(enumerate(loader), total=len(loader), leave=False)
for i, batch in iterator:
inp, target, output = None, None, None
loss = 0.0
if isinstance(model, ch.distributions.distribution.Distribution):
loss = criterion(*optimizer.param_groups[0]['params'], *batch)
elif isinstance(model, ch.nn.Module):
inp, target = batch
inp, target = inp.to(device), target.to(device)
output = model(inp)
# attacker model returns both output anf final input
if isinstance(output, tuple):
output, final_inp = output
# lambda parameter used for regression with unknown noise variance
try:
loss = criterion(output, target, model.lambda_, phi)
except Exception as e:
loss = criterion(output, target, phi)
# regularizer option
reg_term = 0.0
if has_attr(args, "regularizer") and isinstance(model, ch.nn.Module):
reg_term = args.regularizer(model, inp, target)
loss = loss + reg_term
# perform backprop and take optimizer step
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
if len(loss.size()) > 0: loss = loss.mean()
model_logits = None
if not isinstance(model, ch.distributions.distribution.Distribution):
model_logits = output[0] if isinstance(output, tuple) else output
# measure accuracy and record loss
top1_acc = float('nan')
top5_acc = float('nan')
desc = None # description for epoch
# censored, truncated distributions - calculate score
if args.steps:
steps += 1
if schedule: schedule.step()
# latent variable models
else:
losses.update(loss.item(), inp.size(0))
# calculate accuracy metrics
if args.accuracy:
# accuracy
maxk = min(5, model_logits.shape[-1])
if has_attr(args, "custom_accuracy"):
prec1, prec5 = args.custom_accuracy(model_logits, target)
else:
prec1, prec5 = accuracy(model_logits, target, topk=(1, maxk))
prec1, prec5 = prec1[0], prec5[0]
top1.update(prec1, inp.size(0))
top5.update(prec5, inp.size(0))
top1_acc = top1.avg
top5_acc = top5.avg
# ITERATOR
if args.accuracy:
desc = ('Epoch: {0} | Loss {loss.avg:.4f} | '
'{1}1 {top1_acc:.3f} | {1}5 {top5_acc:.3f} | '
'Reg term: {reg} ||'.format(epoch, loop_msg,
loss=losses, top1_acc=top1_acc, top5_acc=top5_acc, reg=reg_term))
else:
desc = ('Epoch: {0} | Loss {loss.avg:.4f} | {1}1'
'Reg term: {reg} ||'.format(epoch, loop_msg, loss=losses, reg=reg_term))
iterator.set_description(desc)
# USER-DEFINED HOOK
if has_attr(args, 'iteration_hook'):
args.iteration_hook(model, optimizer, i, loop_type, inp, target)
if writer is not None:
descs = ['loss', 'top1', 'top5']
vals = [losses, top1, top5]
for d, v in zip(descs, vals):
writer.add_scalar('_'.join([loop_type, d]), v.avg,
epoch)
# LOSS AND ACCURACY
return top1.avg, losses.avg
| [
"stefanou@mit.edu"
] | stefanou@mit.edu |
e382659fe44a65b3a060e2c0d9cb78015fd0bea2 | 28436c3e8d5f59f9011bfac7fcdef977c345aa7b | /2021-05-15/homework1.py | 960425e000fa70c2621ff185c6e2e587beb46b6b | [] | no_license | engeeker/python-for-kid-2021 | 533d7b54ef23d99727642ba7a119e0a46577651b | 783d3582c6e9009c23213378650160f7dc937409 | refs/heads/main | 2023-08-02T15:18:17.367567 | 2021-10-01T13:15:56 | 2021-10-01T13:15:56 | 347,414,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import turtle
import random
p = turtle.Pen()
color_list = ['red', 'yellow', 'blue', 'green']
p.speed(0)
turtle.bgcolor('black')
p.color(random.choice(color_list))
for i in range(200):
p.forward(i * 2)
p.left(91)
turtle.Screen().exitonclick() | [
"xiaoquwl@gmail.com"
] | xiaoquwl@gmail.com |
b011cc4260236016a704cc1c7e6ee337974e4570 | 3eadb0ea0d359422db25e780bf387983bcb8b4d4 | /MnA/lib/python3.6/codecs.py | 8cf1662cc230abbe90516e6d96f6e780438703ad | [] | no_license | Zhaarn/CRUDBuilding | 37492af97a4ffb55a6c804a5562cdd54d131218c | b96bf11412aa9f353b26dab45ac56402405e78c0 | refs/heads/master | 2021-05-02T11:13:45.889862 | 2018-02-08T18:39:40 | 2018-02-08T18:39:40 | 120,771,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /Users/am_dev/anaconda3/lib/python3.6/codecs.py | [
"amdirtylabdeveloper@gmail.com"
] | amdirtylabdeveloper@gmail.com |
6bc3b1ab7146b7abc8c84a1644387c0b84af4983 | 25c3cca9f8d34ede4b1a3307bef4ad2b9482e2dc | /decuen/actors/_actor.py | 4fed5361eb818be3562bb21c321a5c7731a7f6d6 | [
"MIT"
] | permissive | ziyadedher/decuen | efd36897f9b62b2d4baa61dcdedaa0661cc0decf | bc3bd42857308d7b189f576a3404abb3f9152531 | refs/heads/develop | 2020-08-29T20:27:07.408852 | 2019-11-26T18:52:26 | 2019-11-26T18:52:26 | 218,165,890 | 1 | 0 | MIT | 2019-11-26T18:52:27 | 2019-10-28T23:43:12 | Python | UTF-8 | Python | false | false | 4,858 | py | """Interface for arbitrary actor-learners and respective settings."""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from functools import reduce
from typing import Generic, MutableSequence, Optional, Type, TypeVar
from gym.spaces import Box, Discrete # type: ignore
from torch import diag_embed
from torch.nn.functional import softplus
from decuen.critics import Critic
from decuen.dists import Categorical, Distribution, MultivariateNormal, Normal
from decuen.structs import State, Tensor, Trajectory
from decuen.utils.context import Contextful
@dataclass
class ActorSettings:
"""Basic common settings for all actor-learners."""
dist: Type[Distribution]
discount_factor: float
CriticType = TypeVar("CriticType", bound=Critic)
class Actor(Generic[CriticType], ABC, Contextful):
"""Generic abstract actor-learner interface.
This abstraction provides interfaces for the two main functionalities of an actor-learner:
1. the ability to choose an action to perform given a state, and
2. the ability to learn based on past transitions and trajectories.
"""
settings: ActorSettings
_critic: Optional[CriticType]
@abstractmethod
def __init__(self, settings: ActorSettings) -> None:
"""Initialize a generic actor-learner."""
super().__init__()
self.settings = settings
self._critic = None
@property
def critic(self) -> CriticType:
"""Get the critic associated with this actor."""
if not self._critic:
raise ValueError("no critic associated with this actor")
return self._critic
@critic.setter
def critic(self, critic: CriticType) -> None:
"""Set the critic of this actor.
You probably do not want to do this manually.
"""
self._critic = critic
def act(self, state: State) -> Distribution:
"""Construct a parameterized policy and return the generated distribution."""
return self._gen_behaviour(self._gen_policy_params(state))
# TODO: support learning from transitions
# XXX: possibly return loss or some other metric?
@abstractmethod
def learn(self, trajectories: MutableSequence[Trajectory]) -> None:
"""Update policy based on past trajectories."""
...
@abstractmethod
def _gen_policy_params(self, state: State) -> Tensor:
"""Generate policy parameters on-the-fly based on an environment state."""
...
@property
def _num_policy_params(self) -> int:
"""Calculate the number of parameters needed for the policy."""
if not any(isinstance(self.action_space, space_type) for space_type in (Discrete, Box)):
raise TypeError("actors only support Discrete, Box action spaces")
if self.settings.dist is Categorical:
if not isinstance(self.action_space, Discrete):
raise TypeError("categorical distributions for actions can only be used for a Discrete action space")
return self.action_space.n
if self.settings.dist is Normal:
if isinstance(self.action_space, Discrete):
return 2
if isinstance(self.action_space, Box):
if self.action_space.shape != (1,):
raise TypeError("univariate normal distribution can only be used with unidimensional action spaces")
return 2
if self.settings.dist is MultivariateNormal:
if isinstance(self.action_space, Discrete):
raise TypeError("mutivariate normal distribution cannot be used with Discrete action spaces")
if isinstance(self.action_space, Box):
return 2 * reduce((lambda x, y: x * y), self.action_space.shape)
raise NotImplementedError("actors do not support this action distribution yet")
def _gen_behaviour(self, params: Tensor) -> Distribution:
"""Generate the behavioural policy based on the given parameters and the distribution family of this actor."""
# TODO: check for parameter size mismatches
# TODO: support params being for multiple different distributions
if len(params.size()) == 1:
params = params.unsqueeze(0)
elif len(params.size()) > 2:
# FIXME: better error message
raise ValueError("unknown dimensionality")
if self.settings.dist is Categorical:
return Categorical(logits=params)
if self.settings.dist is Normal:
return Normal(params[:, 0], params[:, 1])
if self.settings.dist is MultivariateNormal:
half = params.size()[1] // 2
return MultivariateNormal(params[:, :half], diag_embed(softplus(params[:, half:])))
raise NotImplementedError("actors do not support this action distribution yet")
| [
"ziyad.edher@gmail.com"
] | ziyad.edher@gmail.com |
94e24cb8e8e35ea44e6696d809e1def8130431b8 | d51a6dc99e36cc964598485a4971e7315725b78d | /D_010.py | ebb37bf463b2295abe0038c6798070340057f853 | [] | no_license | Carmona-Elias/Python_CEV | d8baf296ce66a659c209b203aacecf8c129772c5 | da2eb6c43ee836788a818497e14b207455461f26 | refs/heads/main | 2022-12-31T13:35:44.240344 | 2020-10-16T20:43:10 | 2020-10-16T20:43:10 | 302,625,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | print('e-Cambio')
valor = float(input('Digite o valor em MZN MT: '))
print(f'Com {valor} MZN MT voce pode comprar {valor/71.63 :.2f} USD $')
print('Obrigado \n Volte Sempre!') | [
"noreply@github.com"
] | noreply@github.com |
e4e413389afb212298e6544059d2479cd049516e | 385befa1b4edd424c5669e3957bbefcc799a906c | /pystreaming/audio/patterns.py | 071f412f72d2885786ad0e55fcf4481698033a87 | [
"MIT"
] | permissive | joseph-x-li/pystreaming | 121c55f5471b7c154323e2f63dcf4585f2b95584 | 31cd0129fdedfe4b11d73ead90bf4792f343ff42 | refs/heads/main | 2023-05-13T05:00:17.533007 | 2021-05-31T07:22:02 | 2021-05-31T07:22:02 | 316,455,995 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | import zmq
import time
from ..stream import interface as intf
from . import STR_HWM, RCV_HWM
class AudioStreamer:
def __init__(self, endpoint):
"""Audio streamer.
Binds to a zmq PUB socket.
Args:
endpoint (str): Descriptor of stream publishing endpoint.
"""
self.socket = zmq.Context.instance().socket(zmq.PUB)
self.socket.bind(endpoint)
self.socket.setsockopt(zmq.SNDHWM, STR_HWM)
self.endpoint = endpoint
self.fno = 0
def send(self, arr):
"""Send a buffer of audio.
Args:
arr (np.ndarray): A segment of audio as a numpy array.
"""
try:
intf.send(
socket=self.socket,
fno=self.fno,
ftime=time.time(),
meta=None,
arr=arr,
flags=zmq.NOBLOCK,
)
except zmq.error.Again:
pass
self.fno += 1
def __repr__(self):
rpr = "-----AudioStreamer-----\n"
rpr += f"{'OUT': <8}{self.endpoint}\n"
rpr += f"{'HWM': <8}({STR_HWM} >"
return rpr
class AudioReceiver:
def __init__(self, endpoint):
"""Audio receiver.
Connects using a zmq SUB socket.
Args:
endpoint (str): Descriptor of stream publishing endpoint.
"""
self.socket = zmq.Context.instance().socket(zmq.SUB)
self.socket.setsockopt(zmq.RCVHWM, RCV_HWM)
self.socket.connect(endpoint)
self.socket.subscribe("")
self.endpoint = endpoint
def recv(self, timeout):
"""Receive a package of data from the audio channel.
Args:
timeout (int): Timeout period in milliseconds.
Raises:
TimeoutError: Raised when no messages are received in the timeout period.
"""
if self.socket.poll(timeout):
return intf.recv(
socket=self.socket,
arr=True,
flags=zmq.NOBLOCK,
)
else:
raise TimeoutError(
f"No messages were received within the timeout period {timeout}ms"
)
def handler(self, timeout=0):
"""Yield a package of data from audio channel.
Args:
timeout (int, optional): Timeout period in milliseconds. Defaults to 0.
Yields:
dict: Expected items, with keys: {arr, meta, ftime, fno}.
"""
while True:
try:
yield self.recv(timeout=timeout)
except TimeoutError:
yield None
def __repr__(self):
rpr = "-----AudioReceiver-----\n"
rpr += f"{'IN': <8}{self.endpoint}\n"
rpr += f"{'HWM': <8}> {RCV_HWM})"
return rpr
| [
"13826851+joseph-x-li@users.noreply.github.com"
] | 13826851+joseph-x-li@users.noreply.github.com |
c66e67c04ff41d5801bd819a5d94c68a3aa17409 | 0cb8c5789450e1cae6f990176faec1db0e387a24 | /pylsdj/test_clock.py | fb0012f814e202ff03bce2d59ecbe0a4a03a975b | [
"MIT"
] | permissive | undermink/pylsdj | 419111db35d996cf7285502aab5c30d3f2618f29 | d051d30a7e7889cc290b9a1c2cf46cb15c4b1902 | refs/heads/master | 2021-01-16T23:11:47.854245 | 2015-01-12T07:17:05 | 2015-01-12T07:17:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | import os
from nose.tools import assert_equal
from project import load_lsdsng
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
def test_read_clocks():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
project_clock = proj.song.clock
total_clock = proj.song.global_clock
print project_clock
print total_clock
print total_clock.checksum
assert_equal(5, project_clock.hours)
assert_equal(47, project_clock.minutes)
assert_equal(57, total_clock.days)
assert_equal(1, total_clock.hours)
assert_equal(11, total_clock.minutes)
def test_set_local_clock():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
project_clock = proj.song.clock
project_clock.hours = 2
project_clock.minutes = 22
assert_equal(2, proj.song.clock.hours)
assert_equal(22, proj.song.clock.minutes)
def test_set_global_clock():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
proj.song.global_clock.days = 5
proj.song.global_clock.hours = 14
proj.song.global_clock.minutes = 20
assert_equal(5, proj.song.global_clock.days)
assert_equal(14, proj.song.global_clock.hours)
assert_equal(20, proj.song.global_clock.minutes)
assert_equal(39, proj.song.global_clock.checksum)
| [
"alexras@trifacta.com"
] | alexras@trifacta.com |
4d56f1d27269f09b0a3b2c28a39c14e04b9956c0 | 693e90bf7b3fcd245e254c3fb4c03f041821e5f6 | /domain/Base.py | 3210a01c99425523521a01941bd745e9326688ed | [] | no_license | lilin409546297/python_planeWar | 8c7a948380a861eab47638a808eba7430bd78ec2 | 5c16221057a528a9432e545cf7f4ab85ab8d8e82 | refs/heads/master | 2020-05-20T18:47:15.275477 | 2019-05-09T02:42:12 | 2019-05-09T02:42:12 | 185,713,246 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | # !/usr/bin/python
# -*- coding: UTF-8 -*-
import pygame
class Base:
def __init__(self, screen, x, y, image_path):
self.screen = screen
self.x = x
self.y = y
self.image = pygame.image.load(image_path)
| [
"lilin409546297@*"
] | lilin409546297@* |
1df490347f6ba150e4c18eda8adb09b65cfd0cbd | 7ca50753ed3ff4c6115f8be3de675c91631c382f | /manage.py | 8c8509f5ee95dfc01cb27aa14ab0dd2c753db751 | [] | no_license | harrywang/flask-tdd-docker | a63ca86062dc05ab99591ef4ce609d90868f6e77 | 2677c52ae8dba84695d032fd309ee864f7fb2521 | refs/heads/master | 2023-05-11T15:44:04.689565 | 2020-03-21T20:00:57 | 2020-03-21T20:00:57 | 248,801,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # manage.py
import sys
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.users.models import User
app = create_app()
cli = FlaskGroup(create_app=create_app)
@cli.command('recreate_db')
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command('seed_db')
def seed_db():
db.session.add(User(username='michael', email="hermanmu@gmail.com"))
db.session.add(User(username='michaelherman', email="michael@mherman.org"))
db.session.commit()
if __name__ == '__main__':
cli()
| [
"harryjwang@gmail.com"
] | harryjwang@gmail.com |
9805962edbe9190b73c9b86898de6cbf7dce6ec3 | 504cf192d885776af1bf83ff05385d49847f2dc2 | /P12_tweet_analysis.py | 4e722ad3a9367cbf4636b9e9408ad60d8bbde8cb | [] | no_license | LeonMac/Principles-of-Data-Science_forPython3 | 334c0ea82738c3204e550815f2615918d5aef033 | 0b09db9f03e5b4d57c2efeb9f48da0d08eccdb97 | refs/heads/master | 2020-06-04T13:12:49.779035 | 2019-06-16T10:13:56 | 2019-06-16T10:13:56 | 192,036,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
tweet_msg="RT @robdv: $TWTR now top holding for Andor, unseating $AAPL"
words_in_tweet= tweet_msg.split(' ') # split tweet messge to word
for word in words_in_tweet: #for loop
if "$" in word:
print ("This Tweet is about", word)
| [
"liang.ma.sh@gmail.com"
] | liang.ma.sh@gmail.com |
77ece19a047655049ffb2f82d4ce5d60516e3101 | 300e852000024dcf6ec222af75490361c37b6a3d | /tests/test_model.py | 2fbd9cc7ad4d3569e19eb8125c0409e2618fe029 | [] | no_license | BitLab16/Machine-learning | 6c54c22b9a859baf97d2a7ba9d918b1e0ea0476b | f5498b5c23061194436ac0d912bfd5f7346f6489 | refs/heads/master | 2023-04-22T17:16:19.816393 | 2021-05-18T15:04:57 | 2021-05-18T15:04:57 | 343,573,845 | 2 | 0 | null | 2021-05-17T08:29:03 | 2021-03-01T22:23:07 | Python | UTF-8 | Python | false | false | 572 | py | import pytest
import pandas as pd
import json
from unittest import TestCase
from flaskr.model.Prediction import Prediction
from flaskr.ml.DecisionTree import DecisionTree
'''
def test_to_json(data_for_testing):
data = data_for_testing.loc[(data_for_testing['detection_time'] == '2018-01-10 14:05') & (data_for_testing['tracked_point_id'] == 1)]
expected_result = {'time': ['2018-01-10 14:05'], 'flow': [8]}
prediction = Prediction(data['detection_time'], data['people_concentration'])
result = prediction.to_json()
assert result == expected_result
''' | [
"daimatty@gmail.com"
] | daimatty@gmail.com |
2f1870f3c2fec1ee22df014c5d9a0cf91f80080d | 70233044d1431fa4253dd330e862d98810307e02 | /github/github.py | 895e889b4d7310b2719962d4f2a31e62be6d6bd6 | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | assem-ch/github-issues-migrator | cfa48a9f0d5c47b0b87005fae41bec6f43e9cfed | 6ee852ab849dd61ee28a0219e904bae34220f874 | refs/heads/master | 2021-01-15T20:04:44.737627 | 2016-06-01T13:15:17 | 2016-06-01T13:15:17 | 60,211,311 | 9 | 0 | null | 2016-06-01T21:15:21 | 2016-06-01T21:15:21 | null | UTF-8 | Python | false | false | 4,012 | py | #!/usr/bin/env python
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from config import config
__author__ = 'Kord Campbell'
__website__ = 'http://www.tinyprobe.com'
try:
import simplejson as json
except ImportError:
import json
import oauth_client as oauth2
# Github OAuth Implementation
class GithubAuth(object):
def __init__(self, github_server, github_redirect_uri, scope, github_client_id=config.get('github_client_id'),
github_client_secret=config.get('github_client_secret')):
# load github shizzle from config.py
self.oauth_settings = {
'client_id': github_client_id,
'client_secret': github_client_secret,
'access_token_url': 'https://%s/login/oauth/access_token' % github_server,
'authorization_url': 'https://%s/login/oauth/authorize' % github_server,
'redirect_url': '%s' % github_redirect_uri,
'scope': '%s' % scope
}
# get our auth url and return to login handler
def get_authorize_url(self):
oauth_client = oauth2.Client(
self.oauth_settings['client_id'],
self.oauth_settings['client_secret'],
self.oauth_settings['authorization_url']
)
authorization_url = oauth_client.authorization_url(
redirect_uri=self.oauth_settings['redirect_url'],
params={'scope': self.oauth_settings['scope']}
)
return authorization_url
def get_access_token(self, code):
oauth_client = oauth2.Client(
self.oauth_settings['client_id'],
self.oauth_settings['client_secret'],
self.oauth_settings['access_token_url']
)
data = oauth_client.access_token(code, self.oauth_settings['redirect_url'])
access_token = data.get('access_token')
return access_token
return authorization_url
class GithubRequest(object):
def __init__(self, access_token, github_client_id=config.get('github_client_id'),
github_client_secret=config.get('github_client_secret')):
self.access_token = access_token
self.oauth_settings = {
'client_id': github_client_id,
'client_secret': github_client_secret,
'access_token_url': 'https://%s/login/oauth/access_token' % access_token,
}
def get_user_info(self):
return self.make_request('user')
def fetch_user_repos(self):
return self.make_request('user/repos', params={'per_page': 1000})
def get_issues_list(self, repo_name, repo_own):
return self.make_request('repos/' + repo_own + '/' + repo_name + '/issues', params={'per_page': 1000})
def create_issue(self, owner, repo, issue):
body = json.dumps(issue)
return self.make_request('repos/' + owner + '/' + repo + '/issues', method='POST', body=body)
def get_oauth(self):
return oauth2.Client(
self.oauth_settings['client_id'],
self.oauth_settings['client_secret'],
self.oauth_settings['access_token_url']
)
def make_request(self, endpoint, body=None, method='GET', params=None):
oauth_client = self.get_oauth()
(headers, body) = oauth_client.request(
'https://api.github.com/' + endpoint,
access_token=self.access_token,
token_param='access_token',
method=method,
body=body,
params=params
)
return json.loads(body)
| [
"kghiboub@gmail.com"
] | kghiboub@gmail.com |
28acd031f8288cd187bda7e9ae137641eb37c596 | 6dbb207a407b4255ce9fd3916d2b625409de378c | /milestone_1_pkg/src/scripts/keyboardInput | b4196f9a5cf38c0bf7fd7dbbb207fa59f2bf58ca | [] | no_license | JqkerN/Project_Course_Robotic_and_Controll | 3c95e7aa6a50f6abd916e6c3d053316fd408e821 | be0cfc5941cefb50ecdea579a6f6de1444ea1758 | refs/heads/main | 2023-01-03T03:12:31.878519 | 2020-11-02T14:11:26 | 2020-11-02T14:11:26 | 309,388,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | #!/usr/bin/env python
import sys
import math
import readchar
import rospy
import tf2_ros
import tf2_geometry_msgs
from crazyflie_driver.msg import Hover
from aruco_msgs.msg import MarkerArray
HOVER_LVL = 0.2
SHUTDOWN = False
POSE = Hover()
def publish_hover():
global POSE
pub_cmd.publish(POSE)
def keyAction(key):
global HOVER_LVL, SHUTDOWN, POSE
POSE.header.stamp = rospy.Time.now()
POSE.vx = 0
POSE.vy = 0
POSE.yawrate = 0
POSE.zDistance = HOVER_LVL
if key == 'c': # QUIT
print("QUITING...")
pub_SHUTDOWN.publish(POSE)
SHUTDOWN = True
elif key == 'w': # FORWARD
POSE.vx = 0.5
elif key == 'a': # LEFT
POSE.vy = 0.5
elif key == 's': # BACKWARD
POSE.vx = -0.5
elif key == 'd': # RIGHT
POSE.vy = -0.5
elif key == 'e': # TURN RIGHT
POSE.yawrate = 60
elif key == 'q': # TURN LEFT
POSE.yawrate = -60
if HOVER_LVL < 0 :
rospy.logwarn("OUT OF LOWER BOUNDS")
if key == 'r': # UP
HOVER_LVL += 0.02
POSE.zDistance = HOVER_LVL
elif HOVER_LVL > 1:
rospy.logwarn("OUT OF UPPER BOUNDS")
if key == 'f': # DOWN
HOVER_LVL -= 0.02
POSE.zDistance = HOVER_LVL
else:
if key == 'r': # UP
HOVER_LVL += 0.02
POSE.zDistance = HOVER_LVL
elif key == 'f': # DOWN
HOVER_LVL -= 0.02
POSE.zDistance = HOVER_LVL
return POSE
rospy.init_node('hoverKeyboard')
pub_cmd = rospy.Publisher("ml1/keyboard", Hover, queue_size=2)
pub_SHUTDOWN = rospy.Publisher("ml1/SHUTDOWN", Hover, queue_size=1)
def main():
global HOVER_LVL
print("Starting...")
while not SHUTDOWN:
key = readchar.readkey()
key = key.lower()
POSE = keyAction(key)
publish_hover()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com | |
fcf1ed07e836d7eace792161bc3e350b982fa814 | 08c18b86c49072a5cb5bde1791a128a15947b73a | /mysite/uphotos/serializers.py | 0204e5af8be79a63c0d7930fa51670200b1befec | [] | no_license | xiaol/xuemei | e20e721ecf833d9e163b27d9d4bcbe5ae12eba21 | 8c75524530ca233badf807debaea0e04575f951f | refs/heads/master | 2021-01-10T01:19:26.320158 | 2013-06-04T08:23:36 | 2013-06-04T08:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from rest_framework import serializers, pagination
from uphotos.models import Photo
class PhotoSerializer(serializers.ModelSerializer):
class Meta:
model = Photo
fields = ('image_url',)
class PaginatedPhotoSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = PhotoSerializer
| [
"ubuntu@ishoow.cn"
] | ubuntu@ishoow.cn |
8cf3ba6f22bfcdd3f535f3444e4ffa50b1aa8779 | 5d4eaf047143dbba60bcc6b72f744a4d92684709 | /homepage/views.py | eddd48736e83f6308e14febcbb530d4dbb90394d | [] | no_license | Rungwarapon/django-miniproject | fa5177212dc95fbb8296325389d6bf0fae7f4be7 | c8d922d5e4fab1533756579112bc7315db528c5e | refs/heads/master | 2022-06-23T07:46:49.862025 | 2020-03-10T12:40:21 | 2020-03-10T12:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from urllib import request
from homepage.models import Faculty, Restaurant
from django.shortcuts import render
# Create your views here.
def home(request):
search = request.GET.get('inputSearch', '')
faculty = Faculty.objects.all()
classes = Restaurant.objects.filter(
name__icontains=search
)
return render(request, template_name='home.html',
context={
'search': search,
'classes': classes,
'faculty': faculty}
)
def management(request):
return render(request, template_name='management.html')
def detail(request):
return render(request, template_name='detail.html')
| [
"rungwarapon.khu14@gmail.com"
] | rungwarapon.khu14@gmail.com |
f0946355fe96135fd0f0750b6b88dfd013d5006f | 245bcf63bce2933948adead5734f86e11cc190dd | /marketcheck_api_sdk/api/crm_api.py | ebdc39bd9bea2de007954012b2a4ac08d813572a | [] | no_license | MarketcheckCarsInc/marketcheck_api_sdk_python | 385c645c74805be3d8717304188e2b215786be56 | aca339cc61a0860f31f2070c736af32f07d8fd5a | refs/heads/master | 2020-03-23T23:53:32.292175 | 2018-08-25T03:06:52 | 2018-08-25T03:06:52 | 142,261,814 | 2 | 1 | null | 2018-08-25T03:06:53 | 2018-07-25T07:11:04 | Python | UTF-8 | Python | false | false | 6,081 | py | # coding: utf-8
"""
Marketcheck Cars API
<b>Access the New, Used and Certified cars inventories for all Car Dealers in US.</b> <br/>The data is sourced from online listings by over 44,000 Car dealers in US. At any time, there are about 6.2M searchable listings (about 1.9M unique VINs) for Used & Certified cars and about 6.6M (about 3.9M unique VINs) New Car listings from all over US. We use this API at the back for our website <a href='https://www.marketcheck.com' target='_blank'>www.marketcheck.com</a> and our Android and iOS mobile apps too.<br/><h5> Few useful links : </h5><ul><li>A quick view of the API and the use cases is depicated <a href='https://portals.marketcheck.com/mcapi/' target='_blank'>here</a></li><li>The Postman collection with various usages of the API is shared here https://www.getpostman.com/collections/2752684ff636cdd7bac2</li></ul> # noqa: E501
OpenAPI spec version: 1.0.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from marketcheck_api_sdk.api_client import ApiClient
class CRMApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def crm_check(self, vin, sale_date, **kwargs): # noqa: E501
"""CRM check of a particular vin # noqa: E501
Check whether particular vin has had a listing after stipulated date or not # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.crm_check(vin, sale_date, async=True)
>>> result = thread.get()
:param async bool
:param str vin: vin for which CRM check needs to be done (required)
:param str sale_date: sale date after which listing has appeared or not (required)
:param str api_key: The API Authentication Key. Mandatory with all API calls.
:return: CRMResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.crm_check_with_http_info(vin, sale_date, **kwargs) # noqa: E501
else:
(data) = self.crm_check_with_http_info(vin, sale_date, **kwargs) # noqa: E501
return data
def crm_check_with_http_info(self, vin, sale_date, **kwargs): # noqa: E501
"""CRM check of a particular vin # noqa: E501
Check whether particular vin has had a listing after stipulated date or not # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.crm_check_with_http_info(vin, sale_date, async=True)
>>> result = thread.get()
:param async bool
:param str vin: vin for which CRM check needs to be done (required)
:param str sale_date: sale date after which listing has appeared or not (required)
:param str api_key: The API Authentication Key. Mandatory with all API calls.
:return: CRMResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['vin', 'sale_date', 'api_key'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method crm_check" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'vin' is set
if ('vin' not in params or
params['vin'] is None):
raise ValueError("Missing the required parameter `vin` when calling `crm_check`") # noqa: E501
# verify the required parameter 'sale_date' is set
if ('sale_date' not in params or
params['sale_date'] is None):
raise ValueError("Missing the required parameter `sale_date` when calling `crm_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'vin' in params:
path_params['vin'] = params['vin'] # noqa: E501
query_params = []
if 'api_key' in params:
query_params.append(('api_key', params['api_key'])) # noqa: E501
if 'sale_date' in params:
query_params.append(('sale_date', params['sale_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/crm_check/{vin}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CRMResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"mahesh.hakeem@zerebral.co.in"
] | mahesh.hakeem@zerebral.co.in |
402af64e3ea87e296c5acc8805fb6f6745eeb37b | 55962e7722844cc7877d6d6417479a58111ba7c3 | /app/app.py | ded5085155f02512f5887529646da9412e8490ae | [] | no_license | masterplanx/demo-mockapp | e62d4f56a1aaf82fdedd50803690ce28f589f266 | bb864750bae402d169b2183a99fa21f504a48be9 | refs/heads/master | 2020-04-11T04:05:17.222134 | 2019-03-25T18:54:58 | 2019-03-25T18:54:58 | 161,501,073 | 0 | 0 | null | 2019-01-18T15:30:52 | 2018-12-12T14:37:52 | Python | UTF-8 | Python | false | false | 1,849 | py | """
high level support for doing this and that.
"""
import os
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from redis import Redis
from flask_migrate import Migrate
DATABASE_URI = 'postgresql+psycopg2://{dbuser}:{dbpass}@{dbhost}/{dbname}'.format(
dbuser=os.environ['PG_USER'],
dbpass=os.environ['PG_PASS'],
dbhost=os.environ['PG_HOST'],
dbname=os.environ['PG_DB']
)
APP = Flask(__name__)
APP.config.update(
SQLALCHEMY_DATABASE_URI=DATABASE_URI,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
)
# initialize the database connection
DB = SQLAlchemy(APP)
# initialize database migration management
MIGRATE = Migrate(APP, DB)
@APP.route('/')
def view_registered_guests():
"""Return the pathname of the KOS root directory."""
from models import Guest
guests = Guest.query.all()
return render_template('guest_list.html', guests=guests)
@APP.route('/register', methods=['GET'])
def view_registration_form():
"""Return the pathname of the KOS root directory."""
return render_template('guest_registration.html')
@APP.route('/register', methods=['POST'])
def register_guest():
"""Return the pathname of the KOS root directory."""
from models import Guest
name = request.form.get('name')
email = request.form.get('email')
guest = Guest(name, email)
DB.session.add(guest)
DB.session.commit()
return render_template(
'guest_confirmation.html', name=name, email=email)
@APP.route('/cache')
def hello():
"""Return the pathname of the KOS root directory."""
redis = Redis(
host=os.environ['REDIS_HOST'],
port=os.environ['REDIS_PORT2'],
db=0,
password=os.environ['RD_PASS']
)
redis.incr('hits')
return 'This Flask demo has been viewed %s time(s).' % redis.get('hits')
| [
"ferreyrasergio@gmail.com"
] | ferreyrasergio@gmail.com |
a4ab1183b45a01e1584dcbc648e4f24ba302105a | 5ab03914f3685cab48816fe7bbfdd3a11ec4ca0a | /OOP/call.py | ac3e04364fa741e92b6e817704a922ce8180ab5f | [] | no_license | jan-2018-py1/Python_Anton | 09449a74336e19b158faf0feb9f5778de5461f8d | 1fec1bcb3f6b70436ae91afe45deda24308251e6 | refs/heads/master | 2021-05-09T11:08:41.831053 | 2018-03-03T03:40:29 | 2018-03-03T03:40:29 | 118,982,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | class Call(object):
def __init__(self, c_id, caller_name, caller_phone_number, time_of_call, reason):
self.c_id = c_id
self.caller_name = caller_name
self.caller_phone_number = caller_phone_number
self.time_of_call = time_of_call
self.reason = reason
def display(self):
print ("ID: ", self.c_id)
print ("Name: ", self.caller_name)
print ("Phone: ", self.caller_phone_number)
print ("Time: ", self.time_of_call)
print ("Resaon: ", self.reason)
class CallCenter(object):
def __init__(self, calls = None):
if calls == None:
self.calls = []
self.queue_size = 0
else:
self.calls = calls
self.queue_size = len(self.calls)
def addCall(self, new_call):
self.calls.append(new_call)
self.queue_size = len(self.calls)
return self
def removeCall(self, call):
self.calls.remove(call)
self.queue_size = len(self.calls)
return self
def removeCallByNumber(self, callNumber):
for key in self.calls:
if key.caller_phone_number == callNumber:
self.calls.remove(key)
self.queue_size = len(self.calls)
return self
def info(self):
print("The call query: " + str(self.queue_size))
for call in self.calls:
print("Caller Name: " + call.caller_name)
print("Call number: " + call.caller_phone_number)
return self
call1 = Call(1,"Anton Test","443-827-0000","02/07/18 09:07am","Technical support")
call2 = Call(2,"John Deer","410-560-1111","02/07/18 09:10am","Billing")
call3 = Call(3,"Mark Sams","xxx-xxx-xxxx","02/07/18 09:30am","New Order")
center = CallCenter([call1, call2])
center.info()
center.removeCallByNumber("235-673-6437")
center.info() | [
"slntn@yahoo.com"
] | slntn@yahoo.com |
44afabd1a5975428ea59c4232a94a03a50f2c8e7 | dc02e4333d1a0558534c3c0136dc2213ca5531e7 | /test.py | 875c278653b8b557c52e2f64a1695249fde76c23 | [] | no_license | yjsx/yjsx_leetcode | 3676907d29a71506314f0d9db701f76781ac1f54 | a130e59cbbcfb777e40393c0204d25085254600e | refs/heads/master | 2020-05-24T00:44:39.120684 | 2019-08-14T09:42:41 | 2019-08-14T09:42:41 | 187,021,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def test(a):
a = a.copy()
a.pop(1)
return a
a = {1:1,2:2}
b = test(a)
print(a)
print(b) | [
"dyf0202@mail.ustc.edu.cn"
] | dyf0202@mail.ustc.edu.cn |
352fc2592e428da6a89e6a9b67cbe4e96f892a87 | 3ca6b34676a0adeaba85a2953a8c9abf5d6ef3e4 | /cap 2/ex2.3 mensagem_pessoal.py | d266787bc99096ebbba2c49184dbe991fa9c8afc | [] | no_license | giusepper11/Curso-intensivo-Python | 34fb8e94c7c9afb09f54d8fc67136b337d0ef106 | 613cd502af3ff877dac0d62d9eb09b290d227838 | refs/heads/master | 2021-08-30T11:41:42.824065 | 2017-12-17T19:47:15 | 2017-12-17T19:47:15 | 114,535,941 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | name = "Giuseppe"
print('Alo ' + name + ', voce gostaria de aprender mais sobre python? ')
| [
"giusepper11@gmail.com"
] | giusepper11@gmail.com |
8e176be9bb96af885fabca34dfda3cb72b2e9898 | 75962f70eecc19a2616e3bf44d47da9a5f8e697d | /dividends.py | b70901a43945b8b66244164251747a249c40eceb | [] | no_license | Stephen-Strosko/dividend-retrieval | 88ec38bca8760116c5997777b22d035b5ce538a9 | de25df226d889a42e447f9240059809a232485e8 | refs/heads/master | 2022-09-27T12:33:37.880207 | 2020-05-31T15:02:08 | 2020-05-31T15:02:08 | 268,299,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | import logging
import pandas as pd
import requests
import time
from bs4 import BeautifulSoup
from pathlib import Path
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
CURRENT_STOCKS = Path(f'{Insert Path Here')
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'}
def main():
driver = setup_driver()
metadata = []
stocks = open(CURRENT_STOCKS)
tickers = stocks.read().split('\n')
for ticker in tickers:
website = f'https://www.nasdaq.com/market-activity/stocks/{ticker}/dividend-history'
stock_info = get_info(ticker, website, driver)
metadata.append(stock_info)
driver.quit()
df = pd.DataFrame(
metadata,
columns = [
'Ex/EFF DATE', 'TYPE', 'CASH AMOUNT',
'DECLARATION DATE', 'RECORD DATE',
'PAYMENT DATE', 'STOCK'
]
)
df.to_csv('dividends.csv', index=False)
def setup_driver():
options = Options()
options.add_argument('log-level=3')
return webdriver.Chrome(options=options)
def get_info(ticker, website, driver):
driver.get(website)
time.sleep(10)
soup = BeautifulSoup(driver.page_source, 'html.parser')
dividend_row = soup.findAll('tr')
try:
stock_info = [item.text for item in dividend_row[1]]
stock_info.append(ticker)
except (IndexError, AttributeError):
try:
website = f'https://www.nasdaq.com/market-activity/funds-and-etfs/{ticker}/dividend-history'
driver.get(website)
time.sleep(30)
soup = BeautifulSoup(driver.page_source, 'html.parser')
dividend_row = soup.findAll('tr')
stock_info = [item.text for item in dividend_row[1]]
stock_info.append(ticker)
except (IndexError, AttributeError):
logging.info(f'Stock {ticker} has no dividend history.')
return ['NA', 'NA', 'NA', 'NA', 'NA', 'NA', ticker]
return stock_info
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
1d3fbff443cb40d8db73301313628d2eca4d2fa2 | 5ff85c3986448903ceacfbc035b1bc00f5157125 | /1-Chinese/1-textgcn/build_graph.py | 5693a1b9d95edbb9a909e77e71360747bd3888b5 | [] | no_license | wanlu0/MSPaper | f25e75535ab03670b0a16b94b3bea2b6f6020ec4 | 55401d32e0bc8021146ab53408f717a91958349a | refs/heads/master | 2021-03-01T11:38:50.168766 | 2020-03-06T04:23:33 | 2020-03-06T04:23:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,019 | py | import os
import random
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from utils import loadWord2Vec, clean_str
from math import log
from sklearn import svm
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
import sys
from scipy.spatial.distance import cosine
if len(sys.argv) != 2:
sys.exit("Use: python build_graph.py <dataset>")
datasets = ['20ng', 'R8', 'R52', 'ohsumed', 'mr', 'tcm', 'law','medical','medical2','medical3','medical20','law2']
# build corpus
dataset = sys.argv[1]
if dataset not in datasets:
sys.exit("wrong dataset name")
# Read Word Vectors
# word_vector_file = 'data/glove.6B/glove.6B.300d.txt'
# word_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'
#_, embd, word_vector_map = loadWord2Vec(word_vector_file)
# word_embeddings_dim = len(embd[0])
word_embeddings_dim = 300
word_vector_map = {}
# shulffing
doc_name_list = []
doc_train_list = []
doc_test_list = []
f = open('data/' + dataset + '.txt', 'r')
lines = f.readlines()
for line in lines:
doc_name_list.append(line.strip())
temp = line.split("\t")
if temp[1].find('test') != -1:
doc_test_list.append(line.strip())
elif temp[1].find('train') != -1:
doc_train_list.append(line.strip())
f.close()
# print(doc_train_list)
# print(doc_test_list)
doc_content_list = []
f = open('data/corpus/' + dataset + '.clean.txt', 'r')
lines = f.readlines()
for line in lines:
doc_content_list.append(line.strip())
f.close()
# print('doc_content_list has', doc_content_list[7036])
# print(doc_content_list)
train_ids = []
for train_name in doc_train_list:
train_id = doc_name_list.index(train_name)
train_ids.append(train_id)
# print(train_ids)
random.shuffle(train_ids)
# partial labeled data
#train_ids = train_ids[:int(0.2 * len(train_ids))]
train_ids_str = '\n'.join(str(index) for index in train_ids)
f = open('data/' + dataset + '.train.index', 'w')
f.write(train_ids_str)
f.close()
test_ids = []
for test_name in doc_test_list:
test_id = doc_name_list.index(test_name)
test_ids.append(test_id)
# print(test_ids)
random.shuffle(test_ids)
test_ids_str = '\n'.join(str(index) for index in test_ids)
f = open('data/' + dataset + '.test.index', 'w')
f.write(test_ids_str)
f.close()
ids = train_ids + test_ids
# print(ids)
# print(len(ids))
shuffle_doc_name_list = []
shuffle_doc_words_list = []
for id in ids:
shuffle_doc_name_list.append(doc_name_list[int(id)])
# print('now id is',id)
shuffle_doc_words_list.append(doc_content_list[int(id)])
shuffle_doc_name_str = '\n'.join(shuffle_doc_name_list)
shuffle_doc_words_str = '\n'.join(shuffle_doc_words_list)
f = open('data/' + dataset + '_shuffle.txt', 'w')
f.write(shuffle_doc_name_str)
f.close()
f = open('data/corpus/' + dataset + '_shuffle.txt', 'w')
f.write(shuffle_doc_words_str)
f.close()
# build vocab
word_freq = {}
word_set = set()
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
for word in words:
word_set.add(word)
if word in word_freq:
word_freq[word] += 1
else:
word_freq[word] = 1
vocab = list(word_set)
vocab_size = len(vocab)
word_doc_list = {}
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
appeared = set()
for word in words:
if word in appeared:
continue
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
appeared.add(word)
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
word_id_map = {}
for i in range(vocab_size):
word_id_map[vocab[i]] = i
vocab_str = '\n'.join(vocab)
f = open('data/corpus/' + dataset + '_vocab.txt', 'w')
f.write(vocab_str)
f.close()
'''
Word definitions begin
'''
'''
definitions = []
for word in vocab:
word = word.strip()
synsets = wn.synsets(clean_str(word))
word_defs = []
for synset in synsets:
syn_def = synset.definition()
word_defs.append(syn_def)
word_des = ' '.join(word_defs)
if word_des == '':
word_des = '<PAD>'
definitions.append(word_des)
string = '\n'.join(definitions)
f = open('data/corpus/' + dataset + '_vocab_def.txt', 'w')
f.write(string)
f.close()
tfidf_vec = TfidfVectorizer(max_features=1000)
tfidf_matrix = tfidf_vec.fit_transform(definitions)
tfidf_matrix_array = tfidf_matrix.toarray()
print(tfidf_matrix_array[0], len(tfidf_matrix_array[0]))
word_vectors = []
for i in range(len(vocab)):
word = vocab[i]
vector = tfidf_matrix_array[i]
str_vector = []
for j in range(len(vector)):
str_vector.append(str(vector[j]))
temp = ' '.join(str_vector)
word_vector = word + ' ' + temp
word_vectors.append(word_vector)
string = '\n'.join(word_vectors)
f = open('data/corpus/' + dataset + '_word_vectors.txt', 'w')
f.write(string)
f.close()
word_vector_file = 'data/corpus/' + dataset + '_word_vectors.txt'
_, embd, word_vector_map = loadWord2Vec(word_vector_file)
word_embeddings_dim = len(embd[0])
'''
'''
Word definitions end
'''
# label list
label_set = set()
for doc_meta in shuffle_doc_name_list:
temp = doc_meta.split('\t')
label_set.add(temp[2])
label_list = list(label_set)
label_list_str = '\n'.join(label_list)
f = open('data/corpus/' + dataset + '_labels.txt', 'w')
f.write(label_list_str)
f.close()
# x: feature vectors of training docs, no initial features
# slect 90% training set
train_size = len(train_ids)
val_size = int(0.1 * train_size)
real_train_size = train_size - val_size # - int(0.5 * train_size)
# different training rates
real_train_doc_names = shuffle_doc_name_list[:real_train_size]
real_train_doc_names_str = '\n'.join(real_train_doc_names)
f = open('data/' + dataset + '.real_train.name', 'w')
f.write(real_train_doc_names_str)
f.close()
row_x = []
col_x = []
data_x = []
for i in range(real_train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
# print(doc_vec)
# print(np.array(word_vector))
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_x.append(i)
col_x.append(j)
# np.random.uniform(-0.25, 0.25)
data_x.append(doc_vec[j] / doc_len) # doc_vec[j]/ doc_len
# x = sp.csr_matrix((real_train_size, word_embeddings_dim), dtype=np.float32)
x = sp.csr_matrix((data_x, (row_x, col_x)), shape=(
real_train_size, word_embeddings_dim))
y = []
for i in range(real_train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
y.append(one_hot)
y = np.array(y)
# print(y)
# tx: feature vectors of test docs, no initial features
test_size = len(test_ids)
row_tx = []
col_tx = []
data_tx = []
for i in range(test_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i + train_size]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_tx.append(i)
col_tx.append(j)
# np.random.uniform(-0.25, 0.25)
data_tx.append(doc_vec[j] / doc_len) # doc_vec[j] / doc_len
# tx = sp.csr_matrix((test_size, word_embeddings_dim), dtype=np.float32)
tx = sp.csr_matrix((data_tx, (row_tx, col_tx)),
shape=(test_size, word_embeddings_dim))
ty = []
for i in range(test_size):
doc_meta = shuffle_doc_name_list[i + train_size]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ty.append(one_hot)
ty = np.array(ty)
# print(ty)
# allx: the the feature vectors of both labeled and unlabeled training instances
# (a superset of x)
# unlabeled training instances -> words
word_vectors = np.random.uniform(-0.01, 0.01,
(vocab_size, word_embeddings_dim))
for i in range(len(vocab)):
word = vocab[i]
if word in word_vector_map:
vector = word_vector_map[word]
word_vectors[i] = vector
row_allx = []
col_allx = []
data_allx = []
for i in range(train_size):
doc_vec = np.array([0.0 for k in range(word_embeddings_dim)])
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_len = len(words)
for word in words:
if word in word_vector_map:
word_vector = word_vector_map[word]
doc_vec = doc_vec + np.array(word_vector)
for j in range(word_embeddings_dim):
row_allx.append(int(i))
col_allx.append(j)
# np.random.uniform(-0.25, 0.25)
data_allx.append(doc_vec[j] / doc_len) # doc_vec[j]/doc_len
for i in range(vocab_size):
for j in range(word_embeddings_dim):
row_allx.append(int(i + train_size))
col_allx.append(j)
data_allx.append(word_vectors.item((i, j)))
row_allx = np.array(row_allx)
col_allx = np.array(col_allx)
data_allx = np.array(data_allx)
allx = sp.csr_matrix(
(data_allx, (row_allx, col_allx)), shape=(train_size + vocab_size, word_embeddings_dim))
ally = []
for i in range(train_size):
doc_meta = shuffle_doc_name_list[i]
temp = doc_meta.split('\t')
label = temp[2]
one_hot = [0 for l in range(len(label_list))]
label_index = label_list.index(label)
one_hot[label_index] = 1
ally.append(one_hot)
for i in range(vocab_size):
one_hot = [0 for l in range(len(label_list))]
ally.append(one_hot)
ally = np.array(ally)
# print(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape)
'''
Doc word heterogeneous graph
'''
# word co-occurence with context windows
window_size = 20
windows = []
for doc_words in shuffle_doc_words_list:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
# print(length, length - window_size + 1)
for j in range(length - window_size + 1):
window = words[j: j + window_size]
windows.append(window)
# print(window)
word_window_freq = {}
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
word_pair_count = {}
for window in windows:
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_i_id = word_id_map[word_i]
word_j = window[j]
word_j_id = word_id_map[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + ',' + str(word_j_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
# two orders
word_pair_str = str(word_j_id) + ',' + str(word_i_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
row = []
col = []
weight = []
# pmi as weights
num_window = len(windows)
for key in word_pair_count:
temp = key.split(',')
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log((1.0 * count / num_window) /
(1.0 * word_freq_i * word_freq_j/(num_window * num_window)))
if pmi <= 0:
continue
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
# word vector cosine similarity as weights
'''
for i in range(vocab_size):
for j in range(vocab_size):
if vocab[i] in word_vector_map and vocab[j] in word_vector_map:
vector_i = np.array(word_vector_map[vocab[i]])
vector_j = np.array(word_vector_map[vocab[j]])
similarity = 1.0 - cosine(vector_i, vector_j)
if similarity > 0.9:
# print(vocab[i], vocab[j], similarity)
row.append(train_size + i)
col.append(train_size + j)
weight.append(similarity)
'''
# doc word frequency
doc_word_freq = {}
for doc_id in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[doc_id]
words = doc_words.split()
for word in words:
word_id = word_id_map[word]
doc_word_str = str(doc_id) + ',' + str(word_id)
if doc_word_str in doc_word_freq:
doc_word_freq[doc_word_str] += 1
else:
doc_word_freq[doc_word_str] = 1
for i in range(len(shuffle_doc_words_list)):
doc_words = shuffle_doc_words_list[i]
words = doc_words.split()
doc_word_set = set()
for word in words:
if word in doc_word_set:
continue
j = word_id_map[word]
key = str(i) + ',' + str(j)
freq = doc_word_freq[key]
if i < train_size:
row.append(i)
else:
row.append(i + vocab_size)
col.append(train_size + j)
idf = log(1.0 * len(shuffle_doc_words_list) /
word_doc_freq[vocab[j]])
weight.append(freq * idf)
doc_word_set.add(word)
node_size = train_size + vocab_size + test_size
adj = sp.csr_matrix(
(weight, (row, col)), shape=(node_size, node_size))
print('样本数',train_size+test_size)
print('训练集',train_size)
print('测试集',test_size)
print('单词数',vocab_size)
print('结点个数',node_size)
print('边的个数',len(weight))
print('类别数',y.shape[1])
# dump objects
f = open("data/ind.{}.x".format(dataset), 'wb')
# print('x',x)
pkl.dump(x, f)
f.close()
f = open("data/ind.{}.y".format(dataset), 'wb')
# print('y',y)
pkl.dump(y, f)
f.close()
f = open("data/ind.{}.tx".format(dataset), 'wb')
# print('tx',tx)
pkl.dump(tx, f)
f.close()
f = open("data/ind.{}.ty".format(dataset), 'wb')
pkl.dump(ty, f)
f.close()
f = open("data/ind.{}.allx".format(dataset), 'wb')
pkl.dump(allx, f)
f.close()
f = open("data/ind.{}.ally".format(dataset), 'wb')
pkl.dump(ally, f)
f.close()
f = open("data/ind.{}.adj".format(dataset), 'wb')
pkl.dump(adj, f)
f.close()
| [
"wangzhaoonly@163.com"
] | wangzhaoonly@163.com |
bd517c47eea33b7775af83cdb8ffe53a16598566 | eac0e7be7cdfc6aa63dff46b3ca706c3fd840979 | /maskpostgresdata/__init__.py | 2c1b79b6ee68dd4bbf53a724da5e2f6c84470452 | [] | no_license | developersociety/django-maskpostgresdata | 3318cddb8881702a18b90e6c996560487089abf3 | 1cc60d1245a278c7b9e7c0ea3ef2003bbedc12fc | refs/heads/main | 2023-09-04T06:36:29.377464 | 2023-08-18T14:44:51 | 2023-08-18T14:44:51 | 200,231,576 | 3 | 0 | null | 2023-08-18T14:55:53 | 2019-08-02T12:33:42 | Python | UTF-8 | Python | false | false | 92 | py | from .management.commands.dump_masked_data import Command as BasePostgresDataMaskingCommand
| [
"alistairclark89@gmail.com"
] | alistairclark89@gmail.com |
a1ac5e350c5300dac315fe2d6bba5ddfeb3fa3de | f431bdb1f5a333448e7a274c7659f5b8fd4d56ac | /DNRtest.py | d86d3e3af4054cafb8115309ee3a11c5d7850357 | [] | no_license | RanZhu1989/case33Py | d465e7876c632a40912cf637d5a45025220a7862 | cc7c0b44a78fab8734c10ed92281ab14b57430c6 | refs/heads/master | 2023-02-04T10:03:04.802931 | 2020-12-23T06:48:22 | 2020-12-23T06:48:22 | 316,476,346 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from lib.MosekTask import *
from lib.GridData import *
from lib.PandapowerTask import *
# create data agent
data_case33 = GridData()
# create IEEE33BW network
network = PandapowerTask()
data_case33.make_step(step=1,DNR=True)
problem = MosekDNR(data_case33)
problem.make_constraints(data_case33)
problem.make_objective(data_case33)
problem.solve(1, data_case33,log=True,debug=True)
print(problem.beta.level())
print(problem.epsilon.level()) | [
"gemina_cat@163.com"
] | gemina_cat@163.com |
87866b70179b143a7d8ca36a63784724e7ce88f1 | 1f4d46034598f635fd77fba860f75ecdbab2f70c | /scripts/make_gif.py | 1250de89be985cf384c0de12b3c0f38247231488 | [] | no_license | evan-greenbrg/CalculateMobility | 776b68629ab8f8a9a131168b84d53d90c58a7c52 | da54949724c47cba42ccfeb050b3e3cd18de9039 | refs/heads/master | 2023-04-09T14:51:33.740524 | 2023-04-04T16:18:29 | 2023-04-04T16:18:29 | 528,523,994 | 0 | 0 | null | 2022-11-08T20:32:16 | 2022-08-24T17:21:32 | Python | UTF-8 | Python | false | false | 2,492 | py | import argparse
import glob
import os
import io
import re
from natsort import natsorted
import rasterio
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
import numpy as np
def make_gif(root, out):
# Find files
fps = natsorted(glob.glob(os.path.join(root, '*.tif')))
agrs = []
years = []
for i, fp in enumerate(fps):
year = re.findall(r"[0-9]{4,7}", fp)[-1]
ds = rasterio.open(fp).read(1).astype(int)
if not np.sum(ds):
skip_flag = True
continue
if (not i) or (skip_flag):
agr = ds
skip_flag = False
else:
agr += ds
agr[np.where(ds)] = 2
ag_save = np.copy(agr)
agr[np.where(agr)] = 1
agrs.append(ag_save)
years.append(year)
images = []
# legend_elements = [
# Patch(color='#ad2437', label='Visited Pixels'),
# Patch(color='#6b2e10', label='Unvisted Pixels'),
# Patch(color='#9eb4f0', label='Yearly Water'),
# ]
for i, ag in enumerate(agrs):
year = years[i]
img_buf = io.BytesIO()
fig = plt.figure(constrained_layout=True, figsize=(10, 7))
gs = fig.add_gridspec(1, 1)
ax = fig.add_subplot(gs[0, 0])
# ax.imshow(ag, cmap='Paired_r')
ax.imshow(ag, cmap='Greys')
ax.text(
0.95,
0.95,
f'Year: {year}',
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes,
color='red'
)
# ax.legend(
# handles=legend_elements,
# loc='lower left',
# prop={'size': 10}
# )
ax.axis('off')
plt.savefig(img_buf, format='png')
images.append(Image.open(img_buf))
plt.close('all')
img, *imgs = images
print(out)
img.save(
fp=out,
format='GIF',
append_images=imgs,
save_all=True,
duration=400,
loop=30
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Make gif')
parser.add_argument(
'--root',
metavar='root',
type=str,
help='root folder with tif files to make gif'
)
parser.add_argument(
'--out',
metavar='out',
type=str,
help='path to save the file'
)
args = parser.parse_args()
make_gif(args.root, args.out)
| [
"greenberg@Evans-MacBook-Pro.local"
] | greenberg@Evans-MacBook-Pro.local |
7befc4596349f1be8acb39bc0bd4d35beeee12c9 | 551d39505955b67c1570d0b9788245c7cdaf4a05 | /exercise7/DecisionTreeSample.py | a29cfc18e8f25b5b813c6b38ef8896f61853c52f | [] | no_license | NetoPedro/DecisionTreeSample | b22ca8b170d63253580b0e6d879446b71ef2c88f | 52939442b680af7dc7c9b93152b299c1d499f973 | refs/heads/master | 2020-03-27T20:41:49.141505 | 2018-09-02T14:23:44 | 2018-09-02T14:23:44 | 147,087,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | import sklearn.datasets as datasets
import numpy as np
import pandas
# Generating the dataset
samples = 10000
dataset_X,dataset_y = datasets.make_moons(n_samples=samples, noise=0.4)
dataset = pandas.DataFrame(dataset_X)
dataset["label"] = dataset_y
# Function to divide the dataset into train and test set.
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_set = shuffled_indices[:test_set_size]
train_set = shuffled_indices[test_set_size:]
return data.iloc[train_set], data.iloc[test_set]
train_set, test_set = split_train_test(dataset, 0.2)
# Decision Tree instantiation and GridSearch fit to find the best combination of hyperparameters
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
decision_tree = DecisionTreeClassifier()
param_grid = {"max_depth":[2,3,4,5], "max_leaf_nodes":range(5,15,1)}
tree_grid = GridSearchCV(decision_tree,param_grid=param_grid,n_jobs=-1,verbose=3,cv=3)
tree_grid.fit(train_set.drop("label",axis=1), train_set["label"])
print(tree_grid.best_estimator_)
print(tree_grid.best_score_)
# Evaluation through prediction on the test_set
from sklearn.metrics import accuracy_score
print(accuracy_score(test_set["label"], tree_grid.predict(test_set.drop("label",axis=1),)))
predictions = pandas.DataFrame()
for i in range(1,10000):
sub_decision_tree = DecisionTreeClassifier(max_depth=decision_tree.max_depth,max_leaf_nodes=decision_tree.max_leaf_nodes)
shuffled_indices = np.random.permutation(len(train_set))
subset = train_set.iloc[shuffled_indices[:100]]
sub_decision_tree.fit(subset.drop("label",axis=1),subset["label"])
sub_prediction = sub_decision_tree.predict(test_set.drop("label",axis=1))
predictions[i] = sub_prediction
from scipy import stats
test_set_t = predictions.values.T
finalPrediction = stats.mode(test_set_t)[0].astype(int)
print(finalPrediction.T)
print(accuracy_score(test_set["label"], finalPrediction.T))
| [
"pedroneto_09@hotmail.com"
] | pedroneto_09@hotmail.com |
4a07ea84d52063b402726d57dbdf3727faf67046 | b09584e81194e40070d320c763856d6b0721935f | /tools/Polygraphy/tests/backend/trt/test_loader.py | 74ddfb66e8dcb83df156387239bb16de0286f81a | [
"BSD-3-Clause",
"Apache-2.0",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | MarkMoTrin/TensorRT | c7a46a5877b4a0687ffe2b694515e7fc923d0443 | 7f269a7e6a62f555100d9b72afb9977e702ad488 | refs/heads/main | 2023-09-05T13:08:58.048025 | 2021-10-19T08:23:08 | 2021-10-19T17:25:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,003 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import sys
import pytest
import tensorrt as trt
from polygraphy import constants, mod, util
from polygraphy.backend.trt import (
Calibrator,
CreateConfig,
EngineBytesFromNetwork,
EngineFromBytes,
EngineFromNetwork,
LoadPlugins,
ModifyNetworkOutputs,
NetworkFromOnnxBytes,
Profile,
SaveEngine,
bytes_from_engine,
engine_from_network,
modify_network_outputs,
network_from_onnx_bytes,
network_from_onnx_path,
onnx_like_from_network,
)
from polygraphy.comparator import DataLoader
from tests.helper import get_file_size, is_file_non_empty
from tests.models.meta import ONNX_MODELS
##
## Fixtures
##
@pytest.fixture(scope="session")
def identity_engine():
network_loader = NetworkFromOnnxBytes(ONNX_MODELS["identity"].loader)
engine_loader = EngineFromNetwork(network_loader, CreateConfig())
with engine_loader() as engine:
yield engine
@pytest.fixture(scope="session")
def identity_builder_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
yield builder, network
@pytest.fixture(scope="session")
def identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def identity_identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity_identity"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def reshape_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["reshape"].loader)
with builder, network, parser:
yield builder, network, parser
@pytest.fixture(scope="session")
def modifiable_network():
# Must return a loader since the network will be modified each time it's loaded.
return NetworkFromOnnxBytes(ONNX_MODELS["identity_identity"].loader)
@pytest.fixture(scope="session")
def modifiable_reshape_network():
# Must return a loader since the network will be modified each time it's loaded.
return NetworkFromOnnxBytes(ONNX_MODELS["reshape"].loader)
##
## Tests
##
class TestLoadPlugins(object):
def test_can_load_libnvinfer_plugins(self):
def get_plugin_names():
return [pc.name for pc in trt.get_plugin_registry().plugin_creator_list]
loader = LoadPlugins(
plugins=["nvinfer_plugin.dll" if sys.platform.startswith("win") else "libnvinfer_plugin.so"]
)
loader()
assert get_plugin_names()
class TestSerializedEngineLoader(object):
def test_serialized_engine_loader_from_lambda(self, identity_engine):
with util.NamedTemporaryFile() as outpath:
with open(outpath.name, "wb") as f, identity_engine.serialize() as buffer:
f.write(buffer)
loader = EngineFromBytes(lambda: open(outpath.name, "rb").read())
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
def test_serialized_engine_loader_from_buffer(self, identity_engine):
with identity_engine.serialize() as buffer:
loader = EngineFromBytes(buffer)
with loader() as engine:
assert isinstance(engine, trt.ICudaEngine)
class TestOnnxNetworkLoader(object):
def test_loader(self):
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["identity"].loader, explicit_precision=True)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
if mod.version(trt.__version__) < mod.version("8.0"):
assert network.has_explicit_precision
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.1.0.0"), reason="API was added in TRT 7.1")
class TestNetworkFromOnnxPath(object):
def test_loader(self):
builder, network, parser = network_from_onnx_path(ONNX_MODELS["identity"].path)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
assert not network.has_explicit_precision
def test_loader_explicit_precision(self):
builder, network, parser = network_from_onnx_path(ONNX_MODELS["identity"].path, explicit_precision=True)
with builder, network, parser:
assert not network.has_implicit_batch_dimension
if mod.version(trt.__version__) < mod.version("8.0"):
assert network.has_explicit_precision
class TestModifyNetwork(object):
def test_mark_layerwise(self, modifiable_network):
load_network = ModifyNetworkOutputs(modifiable_network, outputs=constants.MARK_ALL)
builder, network, parser = load_network()
with builder, network, parser:
for layer in network:
for index in range(layer.num_outputs):
assert layer.get_output(index).is_network_output
def test_mark_custom_outputs(self, modifiable_network):
builder, network, parser = modify_network_outputs(modifiable_network, outputs=["identity_out_0"])
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
def test_exclude_outputs_with_mark_layerwise(self, modifiable_network):
builder, network, parser = modify_network_outputs(
modifiable_network, outputs=constants.MARK_ALL, exclude_outputs=["identity_out_2"]
)
with builder, network, parser:
assert network.num_outputs == 1
assert network.get_output(0).name == "identity_out_0"
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_mark_shape_outputs(self, modifiable_reshape_network):
builder, network, parser = modify_network_outputs(
modifiable_reshape_network, outputs=["output", "reduce_prod_out_gs_2"]
)
with builder, network, parser:
assert network.num_outputs == 2
assert network.get_output(0).name == "reduce_prod_out_gs_2"
assert network.get_output(0).is_shape_tensor
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_unmark_shape_outputs(self, modifiable_reshape_network):
builder, network, parser = modify_network_outputs(
modifiable_reshape_network, outputs=constants.MARK_ALL, exclude_outputs=["reduce_prod_out_gs_2"]
)
with builder, network, parser:
assert network.num_outputs == 1
class TestConfigLoader(object):
def test_defaults(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig()
assert loader.timing_cache_path is None
with loader(builder, network) as config:
assert config.max_workspace_size == 1 << 24
with contextlib.suppress(AttributeError):
assert not config.get_flag(trt.BuilderFlag.TF32)
with contextlib.suppress(AttributeError):
assert not config.get_flag(trt.BuilderFlag.SPARSE_WEIGHTS)
assert not config.get_flag(trt.BuilderFlag.FP16)
assert not config.get_flag(trt.BuilderFlag.INT8)
assert config.num_optimization_profiles == 1
assert config.int8_calibrator is None
with contextlib.suppress(AttributeError):
if mod.version(trt.__version__) < mod.version("8.0"):
assert config.get_tactic_sources() == 3
else:
assert config.get_tactic_sources() == 7
def test_workspace_size(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig(max_workspace_size=0)
with loader(builder, network) as config:
assert config.max_workspace_size == 0
@pytest.mark.parametrize("flag", [True, False])
def test_strict_types(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(strict_types=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.STRICT_TYPES) == flag
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0.0.0"), reason="API was added in TRT 8.0")
@pytest.mark.parametrize("flag", [True, False])
def test_restricted(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(restricted=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.SAFETY_SCOPE) == flag
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.1.0.0"), reason="API was added in TRT 7.1")
@pytest.mark.parametrize("flag", [True, False])
def test_tf32(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(tf32=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.TF32) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_fp16(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(fp16=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.FP16) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_int8(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(int8=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.INT8) == flag
@pytest.mark.parametrize("flag", [True, False])
def test_allow_gpu_fallback(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(allow_gpu_fallback=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.GPU_FALLBACK) == flag
@pytest.mark.skipif(
mod.version(trt.__version__) < mod.version("8.0"), reason="API was not available in 7.2 and older"
)
@pytest.mark.parametrize("flag", [True, False])
def test_sparse_weights(self, identity_builder_network, flag):
builder, network = identity_builder_network
loader = CreateConfig(sparse_weights=flag)
with loader(builder, network) as config:
assert config.get_flag(trt.BuilderFlag.SPARSE_WEIGHTS) == flag
def test_use_dla(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig(use_dla=True)
with loader(builder, network) as config:
assert config.default_device_type == trt.DeviceType.DLA
assert config.DLA_core == 0
with contextlib.suppress(AttributeError):
if mod.version(trt.__version__) < mod.version("8.0"):
TACTIC_SOURCES_CASES = [
(None, 3), # By default, all sources are enabled.
([], 0),
([trt.TacticSource.CUBLAS], 1),
([trt.TacticSource.CUBLAS_LT], 2),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 3),
]
else:
TACTIC_SOURCES_CASES = [
(None, 7), # By default, all sources are enabled.
([], 0),
([trt.TacticSource.CUBLAS], 1),
([trt.TacticSource.CUBLAS_LT], 2),
([trt.TacticSource.CUDNN], 4),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 3),
([trt.TacticSource.CUBLAS, trt.TacticSource.CUDNN], 5),
([trt.TacticSource.CUBLAS_LT, trt.TacticSource.CUDNN], 6),
([trt.TacticSource.CUDNN, trt.TacticSource.CUBLAS, trt.TacticSource.CUBLAS_LT], 7),
]
@pytest.mark.parametrize("sources, expected", TACTIC_SOURCES_CASES)
def test_tactic_sources(self, identity_builder_network, sources, expected):
builder, network = identity_builder_network
loader = CreateConfig(tactic_sources=sources)
with loader(builder, network) as config:
assert config.get_tactic_sources() == expected
def test_calibrator_metadata_set(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
loader = CreateConfig(int8=True, calibrator=calibrator)
with loader(builder, network) as config:
assert config.int8_calibrator
assert "x" in calibrator.data_loader.input_metadata
def test_multiple_profiles(self, identity_builder_network):
builder, network = identity_builder_network
profiles = [
Profile().add("x", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4)),
Profile().add("x", (1, 2, 4, 4), (1, 2, 8, 8), (1, 2, 16, 16)),
]
loader = CreateConfig(profiles=profiles)
with loader(builder, network) as config:
assert config.num_optimization_profiles == 2
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
@pytest.mark.parametrize("path_mode", [True, False], ids=["path", "file-like"])
def test_timing_cache(self, identity_builder_network, path_mode):
builder, network = identity_builder_network
with util.NamedTemporaryFile() as cache:
loader = CreateConfig(load_timing_cache=cache.name if path_mode else cache)
with loader(builder, network) as config:
assert config.get_timing_cache()
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
def test_empty_timing_cache_when_default(self, identity_builder_network):
builder, network = identity_builder_network
loader = CreateConfig()
with loader(builder, network) as config:
cache = config.get_timing_cache()
with cache.serialize() as buffer:
cache_size = len(bytes(buffer))
cache.reset()
with cache.serialize() as buffer:
new_cache_size = len(bytes(buffer))
assert cache_size == new_cache_size
class TestEngineBytesFromNetwork(object):
def test_can_build(self, identity_network):
loader = EngineBytesFromNetwork(identity_network)
with loader() as serialized_engine:
assert isinstance(serialized_engine, trt.IHostMemory)
class TestEngineFromNetwork(object):
def test_defaults(self, identity_network):
loader = EngineFromNetwork(identity_network)
assert loader.timing_cache_path is None
def test_can_build_with_parser_owning(self, identity_network):
loader = EngineFromNetwork(identity_network)
with loader():
pass
def test_can_build_without_parser_non_owning(self, identity_builder_network):
builder, network = identity_builder_network
loader = EngineFromNetwork((builder, network))
with loader():
pass
def test_can_build_with_calibrator(self, identity_builder_network):
builder, network = identity_builder_network
calibrator = Calibrator(DataLoader())
create_config = CreateConfig(int8=True, calibrator=calibrator)
loader = EngineFromNetwork((builder, network), create_config)
with loader():
pass
# Calibrator buffers should be freed after the build
assert all([buf.allocated_nbytes == 0 for buf in calibrator.device_buffers.values()])
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("8.0"), reason="Unsupported for TRT 7.2 and older")
@pytest.mark.parametrize("path_mode", [True, False], ids=["path", "file-like"])
def test_timing_cache_generate_and_append(self, path_mode):
with util.NamedTemporaryFile() as total_cache, util.NamedTemporaryFile() as identity_cache:
def build_engine(model, cache):
if not path_mode:
cache.seek(0)
network_loader = NetworkFromOnnxBytes(ONNX_MODELS[model].loader)
# In non-path_mode, use the file-like object directly.
# Must load the cache with CreateConfig so that new data is appended
# instead of overwriting the previous cache.
loader = EngineFromNetwork(
network_loader,
CreateConfig(load_timing_cache=cache.name),
save_timing_cache=cache.name if path_mode else cache,
)
with loader():
pass
if not path_mode:
cache.seek(0)
assert not total_cache.read()
build_engine("const_foldable", total_cache)
const_foldable_cache_size = get_file_size(total_cache.name)
# Build this network twice. Once with a fresh cache so we can determine its size.
assert get_file_size(identity_cache.name) == 0
build_engine("identity", identity_cache)
identity_cache_size = get_file_size(identity_cache.name)
build_engine("identity", total_cache)
total_cache_size = get_file_size(total_cache.name)
# The total cache should be larger than either of the individual caches.
assert total_cache_size > const_foldable_cache_size and total_cache_size > identity_cache_size
# The total cache should also be smaller than or equal to the sum of the individual caches since
# header information should not be duplicated.
assert total_cache_size <= (const_foldable_cache_size + identity_cache_size)
class TestBytesFromEngine(object):
def test_serialize_engine(self, identity_network):
with engine_from_network(identity_network) as engine:
serialized_engine = bytes_from_engine(engine)
assert isinstance(serialized_engine, bytes)
class TestSaveEngine(object):
def test_save_engine(self, identity_network):
with util.NamedTemporaryFile() as outpath:
engine_loader = SaveEngine(EngineFromNetwork(identity_network), path=outpath.name)
with engine_loader():
assert is_file_non_empty(outpath.name)
class TestOnnxLikeFromNetwork(object):
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.2"), reason="Unsupported for TRT 7.1 and older")
@pytest.mark.parametrize(
"model_name", ["identity", "empty_tensor_expand", "const_foldable", "and", "scan", "dim_param", "tensor_attr"]
)
def test_onnx_like_from_network(self, model_name):
assert onnx_like_from_network(NetworkFromOnnxBytes(ONNX_MODELS[model_name].loader))
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
114a3690d3e8d7109785b55584eddd54b82468bf | e26839377e28a27d53bf946ce8b23c5304868be0 | /project/models/Food.py | 8065f09b5a2ff141c75abbe6123798a3b3586b7c | [] | no_license | liumx10/ele | 82ec24e1a64499a4ff80866a2300c513e1a7259d | 280dfe873f795bc2a37f6724be0833c7f7ed65af | refs/heads/master | 2021-01-10T08:12:26.507965 | 2016-02-29T11:53:03 | 2016-02-29T11:53:03 | 47,866,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #coding:utf-8
from project import db
class Food(db.Model):
__tablename__ = "food"
id = db.Column(db.Integer, primary_key=True)
rid = db.Column(db.Integer)
fid = db.Column(db.Integer)
name = db.Column(db.String(64))
count = db.Column(db.Integer)
def __init__(self, fid, rid, name, count):
self.rid = rid
self.fid = fid
self.name = name
self.count = count
def get_food(restaurant_id):
print restaurant_id
res = Food.query.filter_by(fid=restaurant_id).order_by(Food.count.desc()).all()
count = 0
for food in res:
count = count+food.count
for food in res:
food.y = food.count*1.0/count
all_count = 0.0
foods = []
for food in res:
if food.y < 0.005:
break
all_count = all_count + food.y
foods.append(food)
foods.append({'name': u"其他", 'y': 1.0-all_count, 'fid': -1, 'rid':-1})
print foods
return foods
def get_all_food(restaurant_id):
return Food.query.filter_by(fid=restaurant_id).all()
| [
"liumengxing2010@qq.com"
] | liumengxing2010@qq.com |
71420c46e794fbf9129e80cd832982ba3453f560 | c0836fbc0d26ec5b4fbef8b116536ee1573a63e3 | /1_basic/2_pandas/pandas_15.py | c103bccdeca3d2290f5bb6aabbc243f1cc9500b8 | [] | no_license | SungmanHan/machineLearningStudy | 5e4c2869351cceddb6cd212323c4a710a97984cc | 36854f946252158b2cdb18b6842f0c905d0811b1 | refs/heads/master | 2020-07-12T21:21:18.126845 | 2019-09-25T13:23:50 | 2019-09-25T13:23:50 | 204,908,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,567 | py | # -*- coding: utf-8 -*-
import pandas as pd
# 사이킷 런의 sklearn.datasets 패키지 내부의
# 학습 데이터를 로딩하는 코드
# (load_... 이름으로 함수가 정의되어 있음)
from sklearn.datasets import load_iris
# iris 데이터를 로딩하는 코드
iris_data = load_iris()
# Bunch 클래스 타입의 값이 반환
# 파이썬의 딕셔너리와 유사한 타입으로
# 키값을 사용하여 데이터를 추출할 수 있음
print(type(iris_data))
# Bunch 클래스 keys 메소드
# 사용할 수 있는 키의 목록을 반환하는 메소드
print(iris_data.keys())
# 키 값 'data' 는 특성 데이터를 반환
# (numpy 2차원 배열의 형태)
print(iris_data['data'])
print(iris_data.data)
print(type(iris_data.data))
# pandas 데이터 프레임으로
# 특성 데이터를 저장
X_df = pd.DataFrame(iris_data.data)
# Bunch 클래스의 타입의 feature_names 키 값을
# 사용하여 데이터프레임의 헤더를 설정
X_df.columns = iris_data.feature_names
# iris 데이터의 샘플 개수 및 결측데이터 확인
print(X_df.info())
# iris 데이터의 수치 데이터 통계 확인
print(X_df.describe())
# 라벨 데이터의 데이터프레임 생성
# 키 값 'target' 은 라벨 데이터를 반환
# (numpy 1차원 배열의 형태)
y_df = pd.Series(iris_data.target)
# 데이터의 확인
# 사이킷 런에서 제공되는 데이터들은
# 전처리가 완료된 상태의 데이터이므로
# 문자열이 아닌 수치 데이터가 제공됨
print(y_df)
# 라벨 데이터의 분포 확인
print(y_df.value_counts())
print(y_df.value_counts() / len(y_df))
# 특성 데이터와 라벨 데이터의 결합
all_df = pd.concat([X_df, y_df], axis=1)
# pandas 옵션을 사용하여 화면에 출력할
# 최대 컬럼 개수를 조정
pd.options.display.max_columns = 10
print(all_df)
# 데이터 프레임 내부의 특성 간 상관관계를
# 분석하여 반환하는 메소드 - corr()
corr_df = all_df.corr()
# 결과(라벨) 데이터와 특성 데이터들간의
# 상관관계를 출력
# 1에 가까울수록 강한 양의 상관관계를 보여줌
# (라벨 데이터의 수치가 커질수록 특성 데이터의
# 값이 증가)
# 0에 가까울수록 약한 상관관계를 보여줌
# (특성 데이터의 수치 변화가 특성 데이터와 관계없음)
# -1에 가까울수록 강한 음의 상관관계를 보여줌
# (특성 데이터의 수치가 커질수록 특성 데이터의
# 값이 감소)
print(corr_df)
print(iris_data.target_names)
| [
"hansung926@gmail.com"
] | hansung926@gmail.com |
7bc640435ac8b1c04189e32d8de4b0291e22ffe4 | c9720c1b088237c2d45fd809e35753e875beace8 | /Attention_weighted_sim.py | 1bb730682150f2d839e652ac1db0df55e8b4215a | [] | no_license | Remorax/IBM-Internship | 6576b44e285db6d81d3a5bc4a6f61ba5f7fc9727 | c857330758b64a611d6e6eda793c80979bdc7267 | refs/heads/master | 2023-05-26T00:55:03.112221 | 2021-02-13T08:56:27 | 2021-02-13T08:56:27 | 267,085,878 | 1 | 0 | null | 2021-01-18T10:05:28 | 2020-05-26T15:49:06 | Jupyter Notebook | UTF-8 | Python | false | false | 11,674 | py | import os, itertools, time, pickle
import subprocess
from xml.dom import minidom
from collections import Counter, OrderedDict
from operator import itemgetter
from scipy import spatial
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
from sklearn.feature_extraction.text import TfidfVectorizer
import re, sys
import numpy as np
import scipy.sparse as sp
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from math import ceil, exp
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
f = open(sys.argv[3], "rb")
data, emb_indexer, emb_indexer_inv, emb_vals, gt_mappings, neighbours_dicts, ontologies_in_alignment = pickle.load(f)
ontologies_in_alignment = [tuple(pair) for pair in ontologies_in_alignment]
flatten = lambda l: [item for sublist in l for item in sublist]
direct_inputs, direct_targets = [], []
def cos_sim(a,b):
return 1 - spatial.distance.cosine(a,b)
all_fn, all_fp = [], []
def greedy_matching():
global batch_size, test_data_t, test_data_f, model, optimizer, emb_indexer_inv, gt_mappings, all_metrics, direct_inputs, direct_targets
all_results = OrderedDict()
direct_inputs, direct_targets = [], []
with torch.no_grad():
all_pred = []
np.random.shuffle(test_data_t)
np.random.shuffle(test_data_f)
inputs_pos, targets_pos = generate_input(test_data_t, 1)
inputs_neg, targets_neg = generate_input(test_data_f, 0)
inputs_all = list(inputs_pos) + list(inputs_neg)
targets_all = list(targets_pos) + list(targets_neg)
indices_all = np.random.permutation(len(inputs_all))
inputs_all = np.array(inputs_all)[indices_all]
targets_all = np.array(targets_all)[indices_all]
batch_size = min(batch_size, len(inputs_all))
num_batches = int(ceil(len(inputs_all)/batch_size))
for batch_idx in range(num_batches):
batch_start = batch_idx * batch_size
batch_end = (batch_idx+1) * batch_size
inputs = inputs_all[batch_start: batch_end]
targets = targets_all[batch_start: batch_end]
inp = inputs.transpose(1,0,2)
inp_elems = torch.LongTensor(inputs).to(device)
targ_elems = torch.DoubleTensor(targets)
outputs = model(inp_elems)
outputs = [el.item() for el in outputs]
targets = [True if el.item() else False for el in targets]
for idx, pred_elem in enumerate(outputs):
ent1 = emb_indexer_inv[inp[0][idx][0]]
ent2 = emb_indexer_inv[inp[1][idx][0]]
if (ent1, ent2) in all_results:
print ("Error: ", ent1, ent2, "already present")
all_results[(ent1, ent2)] = (pred_elem, targets[idx])
direct_targets = [True if el else False for el in direct_targets]
print ("Len (direct inputs): ", len(direct_inputs))
for idx, direct_input in enumerate(direct_inputs):
ent1 = emb_indexer_inv[direct_input[0]]
ent2 = emb_indexer_inv[direct_input[1]]
sim = cos_sim(emb_vals[direct_input[0]], emb_vals[direct_input[1]])
all_results[(ent1, ent2)] = (sim, direct_targets[idx])
optimum_metrics, opt_threshold = [-1000 for i in range(5)], -1000
low_threshold = np.min([el[0] for el in all_results.values()]) - 0.02
high_threshold = np.max([el[0] for el in all_results.values()]) + 0.02
threshold = low_threshold
step = 0.001
opt_fn, opt_fp = [], []
while threshold < high_threshold:
res = []
for i,key in enumerate(all_results):
if all_results[key][0] > threshold:
res.append(key)
fn_list = [(key, all_results[key][0]) for key in gt_mappings if key not in set(res) and not is_valid(test_onto, key)]
fp_list = [(elem, all_results[elem][0]) for elem in res if not all_results[elem][1]]
tp_list = [(elem, all_results[elem][0]) for elem in res if all_results[elem][1]]
tp, fn, fp = len(tp_list), len(fn_list), len(fp_list)
exception = False
try:
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f1score = 2 * precision * recall / (precision + recall)
f2score = 5 * precision * recall / (4 * precision + recall)
f0_5score = 1.25 * precision * recall / (0.25 * precision + recall)
except Exception as e:
print (e)
exception = True
step = 0.001
threshold += step
continue
print ("Threshold: ", threshold, precision, recall, f1score, f2score, f0_5score)
if f1score > optimum_metrics[2]:
optimum_metrics = [precision, recall, f1score, f2score, f0_5score]
opt_threshold = threshold
opt_fn = fn_list
opt_fp = fp_list
if threshold > 0.98 and not exception:
step = 0.0001
else:
step = 0.001
print (step, threshold, exception)
threshold += step
print ("Precision: {} Recall: {} F1-Score: {} F2-Score: {} F0.5-Score: {}".format(*optimum_metrics))
all_fn.extend(opt_fn)
all_fp.extend(opt_fp)
if optimum_metrics[2] != -1000:
all_metrics.append((opt_threshold, optimum_metrics))
return all_results
def masked_softmax(inp):
inp = inp.double()
mask = ((inp != 0).double() - 1) * 9999 # for -inf
return (inp + mask).softmax(dim=-1)
class SiameseNetwork(nn.Module):
def __init__(self):
super().__init__()
self.embedding_dim = np.array(emb_vals).shape[1]
self.name_embedding = nn.Embedding(len(emb_vals), self.embedding_dim)
self.name_embedding.load_state_dict({'weight': torch.from_numpy(np.array(emb_vals))})
self.name_embedding.weight.requires_grad = False
self.dropout = dropout
self.cosine_sim_layer = nn.CosineSimilarity(dim=1)
self.output = nn.Linear(1024, 300)
n = int(sys.argv[1])
self.v = nn.Parameter(torch.DoubleTensor([1/(n-1) for i in range(n-1)]))
self.w = nn.Parameter(torch.randn(1))
def forward(self, inputs):
results = []
inputs = inputs.permute(1,0,2)
for i in range(2):
x = self.name_embedding(inputs[i])
node = x.permute(1,0,2)[:1].permute(1,0,2) # 3993 * 1 * 512
neighbours = x.permute(1,0,2)[1:].permute(1,0,2) # 3993 * 9 * 512
att_weights = torch.bmm(neighbours, node.permute(0, 2, 1)).squeeze()
att_weights = masked_softmax(att_weights).unsqueeze(-1)
context = torch.matmul(self.v, att_weights * neighbours)
context = context.reshape(-1, self.embedding_dim)
node = node.reshape(-1, self.embedding_dim)
results.append((context, node))
x = self.w * self.cosine_sim_layer(results[0][0], results[1][0]) + \
(1-self.w) * self.cosine_sim_layer(results[0][1], results[1][1])
return x
def is_valid(test_onto, key):
return tuple([el.split("#")[0] for el in key]) not in test_onto
def generate_data_neighbourless(elem_tuple):
op = np.array([emb_indexer[elem] for elem in elem_tuple])
return op
def generate_data(elem_tuple):
return np.array([[emb_indexer[el] for el in neighbours_dicts[elem.split("#")[0]][elem]] for elem in elem_tuple])
def generate_input(elems, target):
inputs, targets = [], []
global direct_inputs, direct_targets
for elem in list(elems):
try:
inputs.append(generate_data(elem))
targets.append(target)
except:
direct_inputs.append(generate_data_neighbourless(elem))
direct_targets.append(target)
return np.array(inputs), np.array(targets)
print("Number of neighbours: " + str(sys.argv[1]))
def count_non_unk(elem):
return len([l for l in elem if l!="<UNK>"])
neighbours_dicts = {ont: {el: neighbours_dicts[ont][el][:int(sys.argv[1])] for el in neighbours_dicts[ont]
if count_non_unk(neighbours_dicts[ont][el]) > int(sys.argv[2])} for ont in neighbours_dicts}
data_items = data.items()
np.random.shuffle(list(data_items))
data = OrderedDict(data_items)
print ("Number of entities:", len(data))
all_metrics = []
for i in list(range(0, len(ontologies_in_alignment)-1, 3)):
test_onto = ontologies_in_alignment[i:i+3]
train_data = {elem: data[elem] for elem in data if tuple([el.split("#")[0] for el in elem]) not in test_onto}
test_data = {elem: data[elem] for elem in data if tuple([el.split("#")[0] for el in elem]) in test_onto}
print ("Training size:", len(train_data), "Testing size:", len(test_data))
torch.set_default_dtype(torch.float64)
train_test_split = 0.9
train_data_t = [key for key in train_data if train_data[key]]
train_data_f = [key for key in train_data if not train_data[key]]
train_data_t = np.repeat(train_data_t, ceil(len(train_data_f)/len(train_data_t)), axis=0)
train_data_t = train_data_t[:len(train_data_f)].tolist()
#train_data_f = train_data_f[:int(len(train_data_t))]
# [:int(0.1*(len(train_data) - len(train_data_t)) )]
np.random.shuffle(train_data_f)
lr = 0.001
num_epochs = 50
weight_decay = 0.001
batch_size = 10
dropout = 0.3
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = SiameseNetwork().to(device)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(num_epochs):
inputs_pos, targets_pos = generate_input(train_data_t, 1)
inputs_neg, targets_neg = generate_input(train_data_f, 0)
inputs_all = list(inputs_pos) + list(inputs_neg)
targets_all = list(targets_pos) + list(targets_neg)
indices_all = np.random.permutation(len(inputs_all))
inputs_all = np.array(inputs_all)[indices_all]
targets_all = np.array(targets_all)[indices_all]
batch_size = min(batch_size, len(inputs_all))
num_batches = int(ceil(len(inputs_all)/batch_size))
for batch_idx in range(num_batches):
batch_start = batch_idx * batch_size
batch_end = (batch_idx+1) * batch_size
inputs = inputs_all[batch_start: batch_end]
targets = targets_all[batch_start: batch_end]
inp_elems = torch.LongTensor(inputs).to(device)
targ_elems = torch.DoubleTensor(targets).to(device)
optimizer.zero_grad()
outputs = model(inp_elems)
loss = F.mse_loss(outputs, targ_elems)
loss.backward()
optimizer.step()
if batch_idx%1000 == 0:
print ("Epoch: {} Idx: {} Loss: {}".format(epoch, batch_idx, loss.item()))
model.eval()
test_data_t = [key for key in test_data if test_data[key]]
test_data_f = [key for key in test_data if not test_data[key]]
res = greedy_matching()
f1 = open("test_results.pkl", "wb")
pickle.dump(res, f1)
f1 = open(sys.argv[4], "wb")
pickle.dump([all_fn, all_fp], f1)
print ("Final Results: " + str(np.mean([el[1] for el in all_metrics], axis=0)))
print ("Best threshold: " + str(all_metrics[np.argmax([el[1][2] for el in all_metrics])][0]))
| [
"vivekbalasundaram@gmail.com"
] | vivekbalasundaram@gmail.com |
30d321a07b229416598f5b342fce036537d243be | e68f1a4a71a39a183a20fd925015f322f9e273fa | /maps_and_tiles.py | 757240ab54b06a7c3d98688fb6fba2b0be9c41c9 | [] | no_license | OskarDuda/evolution-simulator | f6307e672f4e34b5bf7f4f6b3a2c8d0322afa56f | acdc6f86bdc130ce9763681a347f21e40869d2b0 | refs/heads/master | 2021-07-24T06:42:02.416759 | 2017-11-02T13:03:58 | 2017-11-02T13:03:58 | 107,558,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | import numpy as np
import matplotlib.pyplot as plt
TILE_TYPES = []
class Tile_type:
def __init__(self, id='', plants_inc=0.0, meat_inc=0.0, temperature=0.0):
if not id:
self.id = 'Custom_Tile'+str(len(TILE_TYPES)+1) #ID speaks for itself
else:
self.id = id
self.plants_income = plants_inc #How fast plants grow on this tile type
self.meat_income = meat_inc #How much meat is regurarly generated on this tile type
self.temperature = temperature #Temperature of the tile type affects animals living here
TILE_TYPES.append(self)
class Tile:
def __init__(self, tt=Tile_type(), plants=0.0, meat=0.0,x=0,y=0, ):
self.tile_type = tt
self.plants = plants
self.meat = meat
self.x = x
self.y = y
self.packs = []
def __repr__(self):
return 'A tile of {}, with {} edible plants and {} meat'.format(self.tile_type.id,self.plants,self.meat)
class Map:
def __init__(self,x_size,y_size,f):
self.x_size = x_size
self.y_size = y_size
self.tilemap = self.generate_tilemap(x_size, y_size,f)
def generate_tilemap(self, x_size, y_size, f):
return f(x_size, y_size)
def generate_food(self):
for mx in self.tilemap:
for m in mx:
m.plants += m.tile_type.plants_income
m.meat += m.tile_type.meat_income
def geographic_tilemap_generator_by_temp(x_size, y_size):
a = []
for t in TILE_TYPES:
a.append(t.temperature)
base = 1.1
step = (max(a)-min(a))/x_size
r =[]
for i1 in range(x_size):
r.append([])
for i2 in range(y_size):
r[i1].append(TILE_TYPES[0])
m = max(a)
for i1 in range(y_size):
a_roul = np.cumsum(a)*((1/base) ** (np.square((np.array(a) - m*np.ones(len(a))))))
for i2 in range(x_size):
indic = sum(a_roul > np.random.rand())-1
r[i2][i1] = Tile(tt=TILE_TYPES[indic],x=i2,y=i1)
m -= step
return r
t1 = Tile_type(id='desert',temperature=40.0)
t1 = Tile_type(id='jungle', plants_inc=0.4, meat_inc=0.2, temperature=35.0)
t1 = Tile_type(id='mountains', plants_inc=0.2, temperature=10.0)
t1 = Tile_type(id='taiga', plants_inc=0.3, meat_inc=0.1, temperature=10.0)
t1 = Tile_type(id='woodlands', plants_inc=0.2, meat_inc=0.1, temperature=20.0)
t1 = Tile_type(id='lakeside', plants_inc=0.2, meat_inc=0.1, temperature=20.0)
t1 = Tile_type(id='meadows', plants_inc=0.3, meat_inc=0.1, temperature=25.0)
t1 = Tile_type(id='polar', temperature=-10.0)
t1 = Tile_type(id='tundra', plants_inc=0.1, temperature=-10.0)
M = Map(150,100,geographic_tilemap_generator_by_temp) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.