content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright: This module has been placed in the public domain.
import sys
import os
import os.path
import stat
import imp
import getopt
import textwrap
import time
import threading
import subprocess
import tempfile
import platform
import json
import pp.preprocessor as pp
import easytorq
import utility
import threadingutil
import moduleloadutility
# globals
__mlu = moduleloadutility.ModuleLoadUtility()
CCFX_PREPDIR = ".ccfxprepdir"
# walkaround to avoid \x5c character problem in os.path.split
# walkaround to avoid limit of length of file path in Windows
if platform.architecture() == ('32bit', 'WindowsPE'):
import win32getshortname
__converter_file_funcs = easytorq.ICUConverter()
__converter_file_funcs.setencoding("char")
else:
fopen = file
remove_file = os.remove
rename_file = os.rename
stat_file = os.stat
if __name__ == '__main__':
__theMain(__mlu).main(sys.argv)
| [
171,
119,
123,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
25,
770,
8265,
468,
587,
4624,
287,
262,
1171,
7386,
13,
198,
198,
11748,
25064,
198,
... | 2.864307 | 339 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-08 06:02
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
18,
319,
1584,
12,
1157,
12,
2919,
9130,
25,
2999,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.690909 | 55 |
# Generated by Django 2.1 on 2019-01-19 12:02
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
13130,
12,
486,
12,
1129,
1105,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
'''Parse data from data.gov.in dataset'''
import json
import osgeo.ogr
def get_columns(fields, data_labels, date_label='YEAR',
subdivision=False, state_label='SUBDIVISION'):
'''Get index of date_label, data_labels
and optionally subdivision's name'''
def to_col(col_id):
'''json id to python list "column" index'''
return ord(col_id) - ord('a')
# keep keys from data_labels to distinguish
# between different measurements
data_cols = {}
date_col = None
state_col = None
for lbldict in fields:
if lbldict['label'] == date_label:
date_col = to_col(lbldict['id'])
elif lbldict['label'] in data_labels:
data_cols[lbldict['label']] = to_col(lbldict['id'])
elif subdivision and lbldict['label'] == state_label:
state_col = to_col(lbldict['id'])
if subdivision:
return data_cols, date_col, state_col
else:
return data_cols, date_col
def get_temporal_measurement(filename, years, data_labels=['ANNUAL'], nodataval=None):
'''Returns temperature or number of storms from dataset
in a dicitonary of dictionaries with key, value pairs s.t.
key: year(int), value: dictionary with key, pair values s.t.
key: data_label from `data_labels`, value: measurement(float)
Arguments:
* `filename`: filename from data.gov.in dataset, must be full path
from root of project.
* `years`: int list, years of measurements.
* `data_labels`: labels of data to be returned from dataset.
Non-existent labels are ignored.'''
# if len(set(data_labels)) != len(data_labels):
# raise ValueError
with open(filename, 'r') as json_file:
data = json.load(json_file)
# temporal -> label = 'YEAR'
data_cols, date_col = get_columns(data['fields'], data_labels)
# get measurements in a list of dictionaries
temporal_measurement = {}
for data_row in data['data']:
year = int(data_row[date_col])
if year in years:
for key, data_col in data_cols.items():
try:
temporal_measurement.setdefault(year, {})[key] = float(data_row[data_col])
except ValueError: # NA
temporal_measurement.setdefault(year, {})[key] = nodataval
return temporal_measurement
def get_state_rainfall(filename, years, data_labels=['ANNUAL'], nodataval=None):
'''Returns rainfall (default: annual) for every state in India
in the form of dictionary of dictionaries of dictionaries.
Dictionary structure:
{
state1 (str) : {
year1 (int) : {
data_label1 (str): measurement (float) | None,
data_label2 (str): ...
},
year2 (int) : {...}
},
state2 (str) : {...}
}
Arguments:
* `filename`: full path from root of project to (rainfall) dataset.
* `years`: int list, years of measurements.
* `data_labels`: labels of data to be returned from dataset.
Non-existent labels are ignored.'''
with open(filename, 'r') as json_file:
data = json.load(json_file)
data_cols, date_col, state_col = get_columns(data['fields'], data_labels, subdivision=True)
state_measurements = {}
for data_row in data['data']:
if data_row[date_col] in years or data_row[date_col] in map(str, years):
year = int(data_row[date_col])
for key, data_col in data_cols.items():
try:
state_measurements.setdefault(data_row[state_col], {})\
.setdefault(year, {})[key] = float(data_row[data_col])
except ValueError: # 'NA'
state_measurements.setdefault(data_row[state_col], {})\
.setdefault(year, {})[key] = nodataval
return state_measurements
def get_perimeters_from_geojson(filename):
'''Returns a dictionary of key: value pairs s.t.
key: NAME_1 of state, value: list of perimeters,
i.e. (longitude, latitude) lists (one or more)
Arguments:
* `filename`: Path from root of project to geojson
'''
# dictionary to preserve NAME_1 attribute (state)
geojson = {}
with open(filename, 'r') as fdc:
line = fdc.readline()
while line:
# get only
# ... "geometry": { ... } ...
# |<--->|
roi_ind = line.find('geometry')
# file holds whole geometry in one line
# the uses one "empty" line to seperate geometries
if roi_ind != -1:
roi_bgn = line.find(':', roi_ind) + 1
roi_end = line.rfind('}')
key_ind = line.find('NAME_1')
key_bgn = line.find('"', key_ind + len('NAME_1":')) + 1
key_end = line.find('"', key_bgn)
geojson[line[key_bgn:key_end]] = line[roi_bgn:roi_end].strip()
line = fdc.readline()
# get POLYGONs and MULTIPOLYGONs of each state
perimeters = {key: osgeo.ogr.CreateGeometryFromJson(geo) for key, geo in geojson.items()}
perimeters_coords = {}
for key in perimeters.keys():
# if MULTIPOLYGON -> iterable of POLYGONs
if perimeters[key].GetGeometryName() == 'MULTIPOLYGON':
for polygon in perimeters[key]:
perimeters_coords.setdefault(key, []).append(
osgeo.ogr.Geometry.GetPoints(
osgeo.ogr.Geometry.GetGeometryRef(polygon, 0)
)
)
else: # POLYGON itself
perimeters_coords[key] = [osgeo.ogr.Geometry.GetPoints(
osgeo.ogr.Geometry.GetGeometryRef(perimeters[key], 0)
)]
return perimeters_coords
| [
7061,
6,
10044,
325,
1366,
422,
1366,
13,
9567,
13,
259,
27039,
7061,
6,
198,
198,
11748,
33918,
198,
11748,
28686,
469,
78,
13,
519,
81,
198,
198,
4299,
651,
62,
28665,
82,
7,
25747,
11,
1366,
62,
23912,
1424,
11,
3128,
62,
18242... | 2.144154 | 2,754 |
# Vendor
import pygraphviz as pgv
| [
2,
39896,
198,
11748,
12972,
34960,
85,
528,
355,
23241,
85,
198
] | 2.833333 | 12 |
p = Pessoa('samuel')
print(p.nome)
p = Pessoa.outro_contrutor('saulo', 'nunes')
print(p.sobrenome)
| [
198,
198,
79,
796,
350,
408,
12162,
10786,
37687,
2731,
11537,
198,
4798,
7,
79,
13,
77,
462,
8,
198,
79,
796,
350,
408,
12162,
13,
448,
305,
62,
3642,
81,
38409,
10786,
82,
2518,
78,
3256,
705,
77,
4015,
11537,
198,
4798,
7,
79... | 1.961538 | 52 |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AutoScalingPolicy(object):
"""
Autoscaling policies define the criteria that trigger autoscaling actions and the actions to take.
An autoscaling policy is part of an autoscaling configuration. For more information, see
`Autoscaling`__.
You can create the following types of autoscaling policies:
- **Schedule-based:** Autoscaling events take place at the specific times that you schedule.
- **Threshold-based:** An autoscaling action is triggered when a performance metric meets or exceeds a threshold.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/Tasks/autoscalinginstancepools.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new AutoScalingPolicy object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.autoscaling.models.ScheduledPolicy`
* :class:`~oci.autoscaling.models.ThresholdPolicy`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param capacity:
The value to assign to the capacity property of this AutoScalingPolicy.
:type capacity: oci.autoscaling.models.Capacity
:param id:
The value to assign to the id property of this AutoScalingPolicy.
:type id: str
:param display_name:
The value to assign to the display_name property of this AutoScalingPolicy.
:type display_name: str
:param policy_type:
The value to assign to the policy_type property of this AutoScalingPolicy.
:type policy_type: str
:param time_created:
The value to assign to the time_created property of this AutoScalingPolicy.
:type time_created: datetime
:param is_enabled:
The value to assign to the is_enabled property of this AutoScalingPolicy.
:type is_enabled: bool
"""
self.swagger_types = {
'capacity': 'Capacity',
'id': 'str',
'display_name': 'str',
'policy_type': 'str',
'time_created': 'datetime',
'is_enabled': 'bool'
}
self.attribute_map = {
'capacity': 'capacity',
'id': 'id',
'display_name': 'displayName',
'policy_type': 'policyType',
'time_created': 'timeCreated',
'is_enabled': 'isEnabled'
}
self._capacity = None
self._id = None
self._display_name = None
self._policy_type = None
self._time_created = None
self._is_enabled = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['policyType']
if type == 'scheduled':
return 'ScheduledPolicy'
if type == 'threshold':
return 'ThresholdPolicy'
else:
return 'AutoScalingPolicy'
@property
def capacity(self):
"""
Gets the capacity of this AutoScalingPolicy.
The capacity requirements of the autoscaling policy.
:return: The capacity of this AutoScalingPolicy.
:rtype: oci.autoscaling.models.Capacity
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""
Sets the capacity of this AutoScalingPolicy.
The capacity requirements of the autoscaling policy.
:param capacity: The capacity of this AutoScalingPolicy.
:type: oci.autoscaling.models.Capacity
"""
self._capacity = capacity
@property
def id(self):
"""
Gets the id of this AutoScalingPolicy.
The ID of the autoscaling policy that is assigned after creation.
:return: The id of this AutoScalingPolicy.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this AutoScalingPolicy.
The ID of the autoscaling policy that is assigned after creation.
:param id: The id of this AutoScalingPolicy.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this AutoScalingPolicy.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:return: The display_name of this AutoScalingPolicy.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this AutoScalingPolicy.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this AutoScalingPolicy.
:type: str
"""
self._display_name = display_name
@property
def policy_type(self):
"""
**[Required]** Gets the policy_type of this AutoScalingPolicy.
The type of autoscaling policy.
:return: The policy_type of this AutoScalingPolicy.
:rtype: str
"""
return self._policy_type
@policy_type.setter
def policy_type(self, policy_type):
"""
Sets the policy_type of this AutoScalingPolicy.
The type of autoscaling policy.
:param policy_type: The policy_type of this AutoScalingPolicy.
:type: str
"""
self._policy_type = policy_type
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this AutoScalingPolicy.
The date and time the autoscaling configuration was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_created of this AutoScalingPolicy.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this AutoScalingPolicy.
The date and time the autoscaling configuration was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:param time_created: The time_created of this AutoScalingPolicy.
:type: datetime
"""
self._time_created = time_created
@property
def is_enabled(self):
"""
Gets the is_enabled of this AutoScalingPolicy.
Whether the autoscaling policy is enabled.
:return: The is_enabled of this AutoScalingPolicy.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this AutoScalingPolicy.
Whether the autoscaling policy is enabled.
:param is_enabled: The is_enabled of this AutoScalingPolicy.
:type: bool
"""
self._is_enabled = is_enabled
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
15069,
357,
66,
8,
1584,
11,
33448,
11,
18650,
290,
14,
273,
663,
29116,
13,
220,
1439,
2489,
10395,
13,
198,
2,
770,
3788,
318,
10668,
12,
36612,
284,
345,
739,
262,
14499,
2448,
33532,
1... | 2.515952 | 3,103 |
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from flask import Response
from osdf import ACCEPTED_MESSAGE_TEMPLATE
from osdf.logging.osdf_logging import debug_log
def osdf_response_for_request_accept(request_id="", transaction_id="", request_status="", status_message="",
version_info = {
'placementVersioningEnabled': False,
'placementMajorVersion': '1',
'placementMinorVersion': '0',
'placementPatchVersion': '0'
},
response_code=202, as_http=True):
"""Helper method to create a response object for request acceptance, so that the object can be sent to a client
:param request_id: request ID provided by the caller
:param transaction_id: transaction ID provided by the caller
:param request_status: the status of a request
:param status_message: details on the status of a request
:param response_code: the HTTP status code to send -- default is 202 (accepted)
:param as_http: whether to send response as HTTP response object or as a string
:return: if as_http is True, return a HTTP Response object. Otherwise, return json-encoded-message
"""
response_message = ACCEPTED_MESSAGE_TEMPLATE.render(request_id=request_id, transaction_id=transaction_id,
request_status=request_status, status_message=status_message)
if not as_http:
return response_message
response = Response(response_message, content_type='application/json; charset=utf-8')
response.headers.add('content-length', len(response_message))
placement_ver_enabled = version_info['placementVersioningEnabled']
if placement_ver_enabled:
placement_minor_version = version_info['placementMinorVersion']
placement_patch_version = version_info['placementPatchVersion']
placement_major_version = version_info['placementMajorVersion']
x_latest_version = placement_major_version+'.'+placement_minor_version+'.'+placement_patch_version
response.headers.add('X-MinorVersion', placement_minor_version)
response.headers.add('X-PatchVersion', placement_patch_version)
response.headers.add('X-LatestVersion', x_latest_version)
debug_log.debug("Versions set in HTTP header for synchronous response: X-MinorVersion: {} X-PatchVersion: {} X-LatestVersion: {}"
.format(placement_minor_version, placement_patch_version, x_latest_version))
response.status_code = response_code
return response
| [
2,
16529,
45537,
201,
198,
2,
220,
220,
15069,
357,
66,
8,
1853,
12,
5539,
5161,
5,
51,
42443,
14161,
201,
198,
2,
201,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
... | 2.601317 | 1,367 |
"""Builds the common network.
Summary of available functions:
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Basic model parameters.
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
# Global constants describing the data set.
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
LEARNING_RATE_DECAY_FACTOR = 0.5 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.01 # Initial learning rate.0.1
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference.
labels: Labels from inputs. 1-D tensor of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
# This method is very hard to control learning rate.
decay_steps = 2000
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
| [
37811,
15580,
82,
262,
2219,
3127,
13,
198,
198,
22093,
286,
1695,
5499,
25,
198,
1303,
3082,
1133,
262,
2472,
2994,
286,
262,
17724,
351,
2461,
284,
262,
14722,
13,
198,
2994,
796,
2994,
7,
28764,
9278,
11,
14722,
8,
628,
1303,
136... | 2.866747 | 1,666 |
#! /usr/bin/python3
# import re
import mechanicalsoup
# Connect to starken
browser = mechanicalsoup.StatefulBrowser()
browser.open("http://www.starken.cl/cotiza-tu-envio/")
browser.select_form('form[action="/cotiza-tu-envio"]')
browser["ciudadOrigen"] = 104
browser["ciudadDestino"] = 1
browser["alto"] = 10
browser["ancho"] = 10
browser["largo"] = 10
browser["kilos"] = 1
string = str(browser.get_current_page().find_all(attrs={"for": "edit-verificacion"})[0])
leng = len("/sites/all/themes/starken/img/")
string = string[string.index("/sites/all/themes/starken/img/") + leng :]
n1 = string[0]
string = string[string.index("/sites/all/themes/starken/img/") + leng :]
signo = string[:2]
string = string[string.index("/sites/all/themes/starken/img/") + leng :]
n2 = string[0]
print(n1,signo,n2)
if signo=="11":
browser["verificacion"] = str(int(n1)+int(n2))
if signo == "12":
browser["verificacion"] = str(int(n1)-int(n2))
if signo == "13":
browser["verificacion"] = str(int(n1)*int(n2))
browser.submit_selected()
result = browser.get_current_page()
print(result.find_all(attrs={"class","tabla-resultado-tabla"}))
string = str(result.find_all(attrs={"class","tabla-resultado-tabla"})[0])
print(string.split("</tr><tr>"))
print(string.index('$'))
aux = string.index('$')
print(string[aux:(aux+7)])
# print(result.find_all(attrs={"class","ts ts-n ts-n-dom"}))
# print(result.find_all(attrs={"class","ts ts-n ts-n-ag"}))
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
1330,
302,
198,
11748,
12370,
82,
10486,
198,
198,
2,
8113,
284,
19278,
268,
198,
40259,
796,
12370,
82,
10486,
13,
9012,
913,
46532,
3419,
198,
40259,
13,
9654,
7203,
4023,
... | 2.482759 | 580 |
import netflix_controls as nc
import screen_maker as sm
import sys
import time
begin_offset = 0 #seconds
name = ""
try:
name = sys.argv[1]
except:
print("no movie name given!")
if name is not "":
print("starting taking screenshots for movie: "+name)
print("leave stopped movie on fulscreen at 0:0")
for i in range(10,-1,-1):
time.sleep(1)
print("starting in "+str(i)+" !")
print("here we go!!!")
sm.mkdir(name)
nc.play_pause()
nc.play_pause()
nc.forward(int(begin_offset/10))
i = 0
while True:
nc.wait_for_hide()
sm.screenshot(name+"_"+str(i))
nc.forward()
nc.forward()
i+=1
| [
11748,
2010,
10046,
62,
13716,
82,
355,
299,
66,
198,
11748,
3159,
62,
10297,
355,
895,
198,
11748,
25064,
198,
11748,
640,
198,
198,
27471,
62,
28968,
796,
657,
1303,
43012,
198,
198,
3672,
796,
13538,
198,
28311,
25,
198,
220,
220,
... | 2.153846 | 325 |
# encoding utf-8
import torch
from torch import nn
from itertools import repeat
if __name__ == "__main__":
spatial_dropout = SpatialDropout(0.2)
batch_tensor = torch.randn(size=(3, 4, 5))
out_tensor = spatial_dropout(batch_tensor)
print(out_tensor)
print(out_tensor.shape)
| [
2,
21004,
3384,
69,
12,
23,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
340,
861,
10141,
1330,
9585,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
21739,
62,
14781,
44... | 2.487395 | 119 |
from datetime import timedelta, datetime, timezone
import locale
import time
| [
6738,
4818,
8079,
1330,
28805,
12514,
11,
4818,
8079,
11,
640,
11340,
198,
11748,
36693,
198,
11748,
640,
198
] | 4.052632 | 19 |
#Crie um programa que mostre na tela todos os numeros pares de 1 a 50
for count in range(0, 50+2, 2):
print(count, end=' ')
print('ACABOU A CONTAGEM!') | [
2,
34,
5034,
23781,
1430,
64,
8358,
749,
260,
12385,
256,
10304,
284,
37427,
28686,
5470,
418,
279,
3565,
390,
352,
257,
2026,
201,
198,
201,
198,
1640,
954,
287,
2837,
7,
15,
11,
2026,
10,
17,
11,
362,
2599,
201,
198,
220,
220,
... | 2.376812 | 69 |
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from rest_framework import views, status, generics, permissions
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.utils import uc_response
from certificates.models import StudentCertificate
from certificates.serializers import StudentCertificateSerializer
from course_homes.models import CourseHome
from course_homes.serializers import CourseHomeMinSerializer
from courses.models import Course, UserBuyCourse
from my.serializers import UserBuyCourseSerializer, UserBuyProgramSerializer
from programs.models import Program, StudentProgram, UserBuyProgram
from courses.serializers import CourseSearchSerializer, CourseMySerializer
from programs.serializers import ProgramSearchSerializer, ProgramMinSerializer, StudentProgramSerializer, \
ProgramProcessSerializer
from users.models import User
| [
6738,
42625,
14208,
13,
7295,
13,
79,
363,
20900,
1330,
31525,
20900,
11,
7873,
3673,
2025,
46541,
11,
33523,
9876,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
198,
6738,
1334,
62,
30604,
1330,
5009,
11,
3722,
11,
1... | 4.263158 | 228 |
""" Custom implementation of encryption
"""
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Hash import SHA256
from Crypto.Random import get_random_bytes
def encrypt(key: str, data: str):
"""Encrypts the data string using a key
The key will be used by PBKDF2 to derive a new key.
This new key will be used in an AES cipher configured with EAX mode.
Args:
key: a secret string
data: the data string to be encrypted
Returns:
A string containing the ciphertext, nonce, tag, and salt used in
the encryption process.
"""
if not key:
raise ValueError("Key is missing")
if not data:
raise ValueError("Data is missing")
salt = get_random_bytes(40)
derived_key = PBKDF2(key, salt, 16, count=1000, hmac_hash_module=SHA256)
cipher = AES.new(derived_key, AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(data.encode())
return ".".join([x.hex() for x in [ciphertext, cipher.nonce, tag, salt]])
def decrypt(key: bytes, data: str):
"""Decrypts the data string using a key
Args:
key: a secret string
data: the data string to be decrypted
Returns:
The decrypted string
"""
if not key:
raise ValueError("Key is missing")
if not data:
raise ValueError("Data is missing")
try:
ciphertext, nonce, tag, salt = [bytes.fromhex(v) for v in data.split(".")]
derived_key = PBKDF2(key, salt, 16, count=1000, hmac_hash_module=SHA256)
cipher = AES.new(derived_key, AES.MODE_EAX, nonce=nonce)
plaintext = cipher.decrypt_and_verify(ciphertext, tag)
return plaintext.decode("utf-8")
except Exception as ex:
raise DecryptError("Error decrypting the data: %s", str(ex))
class DecryptError(Exception):
"""Raised when an error happened when decrypting the data"""
| [
37811,
8562,
7822,
286,
15835,
198,
37811,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
198,
6738,
36579,
13,
19703,
4668,
13,
42,
8068,
1330,
30524,
42,
8068,
17,
198,
6738,
36579,
13,
26257,
1330,
25630,
11645,
198,
6738,
36579,... | 2.632089 | 723 |
from typing import Optional | [
6738,
19720,
1330,
32233
] | 6.75 | 4 |
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
File: breakout.py
Name: Ian Kuo
-------------------------
The objective of this program is to create a breakout game. In this game, a layer of bricks lines the top third of the
screen and the goal is to destroy them all. A ball moves straight around the screen, bouncing off the top and two
sides of the screen. When a brick is hit, the ball bounces back and the brick is destroyed.
The player loses a turn when the ball touches the bottom of the screen. To prevent this from happening,
the player has a horizontally movable paddle to bounce the ball upward, keeping it in play.
"""
from campy.gui.events.timer import pause
from breakoutgraphics_ext import BreakoutGraphics
FRAME_RATE = 1000 / 60 # 120 frames per second.
NUM_LIVES = 3 # Number of turns which the player can play game.
if __name__ == '__main__':
main()
| [
37811,
198,
14192,
10669,
12243,
448,
4935,
198,
48003,
276,
422,
7651,
10918,
338,
12243,
448,
416,
198,
31056,
6592,
5030,
12,
40728,
11,
39859,
494,
449,
518,
11,
8047,
38774,
11,
198,
392,
13075,
406,
13481,
198,
198,
8979,
25,
31... | 3.801587 | 252 |
import os
import numpy as np
from montepython.likelihood_class import Likelihood
# initialization routine
# compute likelihood
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
40689,
538,
7535,
13,
2339,
11935,
62,
4871,
1330,
4525,
11935,
628,
198,
220,
220,
220,
1303,
37588,
8027,
628,
220,
220,
220,
1303,
24061,
14955,
198
] | 3.72973 | 37 |
"""
AzCam tests - not yet implemented
"""
| [
37811,
198,
26903,
21701,
5254,
532,
407,
1865,
9177,
198,
37811,
198
] | 3.5 | 12 |
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables import otTables
from fontbakery.checkrunner import WARN
from fontbakery.codetesting import (assert_PASS,
assert_SKIP,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.profiles import opentype as opentype_profile
def test_check_gdef_spacing_marks():
""" Are some spacing glyphs in GDEF mark glyph class? """
check = CheckTester(opentype_profile,
"com.google.fonts/check/gdef_spacing_marks")
ttFont = get_test_font()
assert_SKIP(check(ttFont),
'if a font lacks a GDEF table...')
add_gdef_table(ttFont, {})
assert_PASS(check(ttFont),
'with an empty GDEF table...')
# Add a table with 'A' defined as a mark glyph:
add_gdef_table(ttFont, {'A': 3})
assert_results_contain(check(ttFont),
WARN, 'spacing-mark-glyphs',
'if a mark glyph has non-zero width...')
def test_check_gdef_mark_chars():
""" Are some mark characters not in in GDEF mark glyph class? """
check = CheckTester(opentype_profile,
"com.google.fonts/check/gdef_mark_chars")
ttFont = get_test_font()
assert_SKIP(check(ttFont),
'if a font lacks a GDEF table...')
# Add a GDEF table not including `acutecomb` (U+0301) as a mark char:
add_gdef_table(ttFont, {})
message = assert_results_contain(check(ttFont),
WARN, 'mark-chars',
'if a mark-char is not listed...')
assert 'U+0301' in message
# Include it in the table to see the check PASS:
add_gdef_table(ttFont, {'acutecomb': 3})
assert_PASS(check(ttFont),
'when properly declared...')
def test_check_gdef_non_mark_chars():
""" Are some non-mark characters in GDEF mark glyph class spacing? """
check = CheckTester(opentype_profile,
"com.google.fonts/check/gdef_non_mark_chars")
ttFont = get_test_font()
assert_SKIP(check(ttFont),
'if a font lacks a GDEF table...')
add_gdef_table(ttFont, {})
assert_PASS(check(ttFont),
'with an empty GDEF table.')
add_gdef_table(ttFont, {'acutecomb': 3})
assert_PASS(check(ttFont),
'with an GDEF with only properly declared mark chars.')
add_gdef_table(ttFont, {'acute': 3, 'acutecomb': 3})
message = assert_results_contain(check(ttFont),
WARN, 'non-mark-chars',
'with an GDEF with a non-mark char (U+00B4, "acute") misdeclared')
assert 'U+00B4' in message
| [
6738,
10369,
33637,
13,
926,
25835,
1330,
309,
10234,
756,
11,
649,
10962,
198,
6738,
10369,
33637,
13,
926,
25835,
13,
83,
2977,
1330,
30972,
51,
2977,
198,
198,
6738,
10369,
65,
33684,
13,
9122,
16737,
1330,
42660,
198,
6738,
10369,
... | 2.040685 | 1,401 |
# Sets DP (Top Voted), O(n^4) time, O(n^3) space
# Optimised (Top Voted), O(target * n^2) time, O(target) space
# Optimised (Revisited), O(target * n^2) time, O(target) space
| [
220,
220,
220,
1303,
21394,
27704,
357,
9126,
569,
5191,
828,
440,
7,
77,
61,
19,
8,
640,
11,
440,
7,
77,
61,
18,
8,
2272,
628,
220,
220,
220,
1303,
30011,
1417,
357,
9126,
569,
5191,
828,
440,
7,
16793,
1635,
299,
61,
17,
8,
... | 2.333333 | 81 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import decimal
from django.contrib.auth.models import User
from django.db import models
class Car(BaseModel):
"""Car"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
label = models.CharField(max_length=200)
number_plate = models.CharField(max_length=20)
make = models.CharField(max_length=30, null=True, blank=True)
model = models.CharField(max_length=30, null=True, blank=True)
year = models.IntegerField(null=True, blank=True)
notes = models.CharField(max_length=512, null=True, blank=True)
class FuelStation(BaseModel):
"""Fuel station"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
label = models.CharField(max_length=200)
address = models.CharField(max_length=512, null=True, blank=True)
notes = models.CharField(max_length=512, null=True, blank=True)
class Refuelling(BaseModel):
"""Refuelling event"""
car = models.ForeignKey(Car, on_delete=models.CASCADE)
station = models.ForeignKey(FuelStation, on_delete=models.CASCADE)
date = models.DateTimeField()
litres = models.DecimalField(max_digits=9, decimal_places=3)
litre_price = models.DecimalField(max_digits=6, decimal_places=3)
price = models.DecimalField(max_digits=9, decimal_places=2)
daycounterkm = models.DecimalField(max_digits=9, decimal_places=1)
totalcounterkm = models.IntegerField()
notes = models.CharField(max_length=512, null=True, blank=True)
@property
@property
@property
@property
@property
@property
class Default(BaseModel):
"""Default Car and FuelStation for User"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
car = models.ForeignKey(Car, on_delete=models.CASCADE)
station = models.ForeignKey(FuelStation, on_delete=models.CASCADE)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
32465,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
... | 2.755882 | 680 |
from socket import error as SocketError
from bs4 import BeautifulSoup
from difflib import SequenceMatcher
import urllib
from joblib import Parallel, delayed
import multiprocessing
import re
import pymysql
import requests
import errno
##отладочный метод, его изначально не было
if __name__ == '__main__':
post_url = 'http://sanskrit-linguistics.org/dcs/ajax-php/ajax-text-handler-wrapper.php'
get_url = 'http://sanskrit-linguistics.org/dcs/index.php?contents=texte'
db = pymysql.connect(host='localhost', port=3333,
user='root',
password='',
db='sanskrit',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
cursor = db.cursor()
#create_text_mapper()
#create_chapter_mapper()
#create_sentence_mapper()
split_sentences() #<---падает вот на этом методе. все, что выше работает. В методе create_sentence_mapper
##идет логгирование выгружаемых запросов, там можно увидеть разметку тоже.
cursor.close()
db.close()
| [
6738,
17802,
1330,
4049,
355,
47068,
12331,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
814,
8019,
1330,
45835,
19044,
2044,
198,
11748,
2956,
297,
571,
198,
6738,
1693,
8019,
1330,
42945,
11,
11038,
198,
11748,
18540,
30... | 1.800327 | 611 |
from GKeepToCalc.KeepPruner.main import *
| [
6738,
402,
15597,
2514,
9771,
66,
13,
15597,
47,
5143,
263,
13,
12417,
1330,
1635,
628,
198
] | 2.588235 | 17 |
"""
Afterware runs after the formatter and operates on the saved file
"""
__all__ = ['Pipeline', 'Register', 'Apply']
import Logger
import tempfile
import os
from shutil import copyfile
from .afterware_registrar import AfterwareRegistrar
import Config
registrar = AfterwareRegistrar()
cfg = Config.Get()
def Register(name):
""" Decorator for creating Afterware """
return wrapper
def Pipeline(nextInPipeline):
""" Decorator for using afteware in a pipeline """
return wrapper
def Apply(savedFile):
""" Run the file through afterware """
# Create a temporary file for the afterware to use,
# this way the actual BOM will not be deleted or modified
fileForAfterware = createCopyOfFile(savedFile)
for afterware in cfg['afterware']:
afterwareFn = registrar.Dispatch(afterware)
if afterwareFn is None:
Logger.Error("Unkown Afterware", afterware)
continue
# Run the temp file through afterware
afterwareFn(fileForAfterware)
# Cleanup
if fileForAfterware != savedFile:
os.remove(fileForAfterware)
return savedFile
def createCopyOfFile(file):
""" Creates a copy of the file into a temporary folder """
filename, ext = os.path.splitext(file)
tmpFile = os.path.join(tempfile.gettempdir(), "bom.temp{0}".format(ext))
try:
copyfile(file, tmpFile)
# Copy was made
return tmpFile
except Exception as e:
Logger.Error("Could not create a copy of the file", file, " running afterware on that path")
return file
| [
37811,
198,
3260,
1574,
4539,
706,
262,
1296,
1436,
290,
14051,
319,
262,
7448,
2393,
198,
37811,
198,
834,
439,
834,
796,
37250,
47,
541,
4470,
3256,
705,
38804,
3256,
705,
44836,
20520,
198,
198,
11748,
5972,
1362,
198,
198,
11748,
... | 3.122661 | 481 |
#!/usr/bin/env python
import json
import os
import subprocess
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
11748,
33918,
201,
198,
11748,
28686,
201,
198,
11748,
850,
14681,
201,
198,
201,
198
] | 2.592593 | 27 |
# Copyright (c) 2021, Sangchun Ha. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformer_transducer.audio_encoder import AudioEncoder
from transformer_transducer.label_encoder import LabelEncoder
from transformer_transducer.model import TransformerTransducer
import torch
| [
2,
15069,
357,
66,
8,
33448,
11,
30043,
354,
403,
9398,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.819905 | 211 |
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
import plotly.figure_factory as ff
| [
11748,
7110,
306,
13,
34960,
62,
48205,
355,
467,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
306,
13,
42712,
355,
279,
87,
198,
11748,
7110,
306,
13,
26875,
62,
69,
9548,
355,
31246,
628,
628,
628
] | 3.128205 | 39 |
from flask import Flask, Blueprint,render_template, redirect,request,url_for,session,flash,jsonify,json,make_response
from flasgger import swag_from
from functools import wraps
import datetime
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
from flask_jwt_extended import (create_access_token,get_jwt_identity, jwt_required)
from app.models.database import *
product = Blueprint('product', __name__)
db = MyDatabase()
@product.route('/v2/products',methods=['GET'])
#@token_required
@product.route('/v2/products',methods=['POST'])
#@token_required
@product.route('/v2/products/<prod_id>',methods=['GET'])
#@token_required
@product.route('/v2/products/<prod_id>',methods=['PUT'])
#@token_required
@product.route('/v2/products/<prod_id>',methods=['DELETE'])
#@token_required
| [
6738,
42903,
1330,
46947,
11,
39932,
11,
13287,
62,
28243,
11,
18941,
11,
25927,
11,
6371,
62,
1640,
11,
29891,
11,
34167,
11,
17752,
1958,
11,
17752,
11,
15883,
62,
26209,
198,
6738,
781,
292,
26679,
1330,
1509,
363,
62,
6738,
198,
... | 2.849498 | 299 |
# -*- coding: utf-8 -*-
"""AI_models.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Uz-CAo71_eqktLGNkEHHMdHOeCy76gft
# Imports and downloads
"""
!pip install NRCLex
!pip install torch
!pip install stanza
import stanza
stanza.download('en')
import nltk
nltk.download('punkt')
from bs4 import BeautifulSoup
from nrclex import NRCLex
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
"""# Converting Data to JSON"""
def get_character_dictionary_from_html(filename, name_position, dialogue_position, scenario_position):
"""
This function takes the name of the file, the starting position of the name, dialogue,
and scenario. Then, returns the dictionary.
Parameters:
filename: string: The path of the file to read
name_position: float: The starting value of left pixel for name of character
dialogue_position: float: The starting value of left pixel for dialogues
scenario_position: float: The starting value of left pixel for scenarios
Returns:
character_dialogues: dict: A dictionary with every character's dialogues,
: The sample format is "Name": list of dialogues
scenarios: dict: A list of scenarios, with the key "scenarios". It is kept
as a dict because we can further classify it into different
kinds of scenarios if we want
"""
with open(filename) as fp:
soup = BeautifulSoup(fp, "html.parser")
everything = soup.find_all('div')
character_dialogues = {}
scenarios = {'scenarios': []}
dialogue = ""
dialogue_ready = False
for div in everything:
try:
if float(div['style'].split(';')[1].split(':')[1].split('px')[0]) >= name_position-5 \
and float(div['style'].split(';')[1].split(':')[1].split('px')[0]) <= name_position+5:
if dialogue_ready: # For the first dialogue
if not character_dialogues.get(character):
character_dialogues[character] = []
character_dialogues[character].append(dialogue)
dialogue = ""
character = div.text # Character updated
if float(div['style'].split(';')[1].split(':')[1].split('px')[0]) >= dialogue_position-5 \
and float(div['style'].split(';')[1].split(':')[1].split('px')[0]) <= dialogue_position+5:
dialogue = dialogue + " " + div.text
dialogue_ready = True
if float(div['style'].split(';')[1].split(':')[1].split('px')[0]) >= scenario_position-5 \
and float(div['style'].split(';')[1].split(':')[1].split('px')[0]) <= scenario_position+5:
scenarios['scenarios'].append(div.text)
except:
pass
if not character_dialogues.get(character):
character_dialogues[character] = []
character_dialogues[character].append(dialogue)
dialogue = ""
return character_dialogues, scenarios
def get_character_dictionary_from_text(filename, name_position, dialogue_position, scenario_position):
"""
This function converts a text file inot a character dictionary.
Parameters:
filename: string: The path of the file to read
name_position: int: Number of spaces before the name
dialogue_position: int: Number of spaces before the dialogues
scenario_position: int: Number of spaces before the scenarios
Returns:
character_dialogues: dict: A dictionary with every character's dialogues,
: The sample format is "Name": list of dialogues
scenarios: dict: A list of scenarios, with the key "scenarios". It is kept
as a dict because we can further classify it into different
kinds of scenarios if we want
"""
# Filtering the empty lines
lines = list(filter(lambda x: x!='', open(filename).read().split('\n')))
character_dialogues = {}
scenarios = {'scenarios': []}
dialogue = ""
dialogue_ready = False
for line in lines: # for every line
line_arr = line.split(":")
if len(line_arr) == 1:
scenarios['scenarios'].append(line_arr[0])
elif len(line_arr) == 2:
character = line_arr[0]
dialogue = line_arr[1]
# If some random line is classified as a dialogue, skip it;
if len(character) >= 30:
continue
if not character_dialogues.get(character):
character_dialogues[character] = []
character_dialogues[character].append(dialogue)
return character_dialogues, scenarios
"""
Should we write like a driver function?
"""
character_dialogues, scenarios = get_character_dictionary_from_html("hp5.html", 230, 162, 90)
import json
with open('hp.json', 'w') as fp:
json.dump(character_dialogues, fp)
with open('hp_scenarios.json', 'w') as fp:
json.dump(scenarios, fp)
"""# Sentiment Analysis"""
def get_sentiments(dialogues):
"""
Takes a list of dialogues spoken by a character and classify it into
positive, negative, or neutral sentiment
Parameters:
dialogues: list: List of dialogues to be setimentized
Returns:
sentiment_count: Count of each sentiment in the dialogues.
"""
emotions = ['Negative', 'Neutral', 'Positive']
nlp = stanza.Pipeline(lang='en', processors='tokenize,sentiment')
doc = nlp(". ".join(dialogues))
sentiment = [i.sentiment for i in doc.sentences]
sentiment_count = np.unique(sentiment, return_counts=True)
# Getting the right name for the emotion instead of numeric number
sentiment_count = dict(zip(map(lambda x: emotions[x], sentiment_count[0]),
sentiment_count[1]))
return sentiment_count
"""
Main script that reads the databases, run stanza on them, and save the final dataframe
"""
movies = ['marvel', 'hp', 'PP', 'horror'] # A list of movies data bases we are going to work with
data_frames = {}
character_sentiments = {}
genders = {} # A dict of dict
# Reading the files for all the movies
for movie in movies:
data_frames[movie] = pd.read_csv(movie+'_db.csv').drop('Unnamed: 0', axis=1)
# Running Stanza on the characters
for movie in movies:
df = data_frames[movie] # for convinience
for character in df.character.unique():
if not character_sentiments.get(movie):
character_sentiments[movie] = {}
genders[movie] = dict(data_frames[movie].groupby('character')['types'].agg('unique').apply(lambda x: x[0]).fillna('NA'))
character_sentiments[movie][character] = get_sentiments(df[df.character==character]['text'])
def plot_emotions_movies(character_sentiments, movies):
"""
This functions plots the emotions of male and female characters for the list of
movies given
Parameters:
character_sentiments: dict: having the name of the character and sentiments
movies: list: The list of movies whose characters we have to plot
Returns:
Nothing
"""
fig, axs = plt.subplots(len(movies), 2)
i = 0
for movie in movies:
ch = character_sentiments[movie]
df1 = pd.DataFrame(ch).transpose().fillna(0)
df1['gender'] = df1.index
df1['gender'] = df1.gender.apply(lambda x: genders[movie].get(x, '' ))
df2 = df1[df1.gender == 'male']
df2.drop('gender', axis=1, inplace=True)
df3 = df1[df1.gender == 'female']
df3.drop('gender', axis=1, inplace=True)
df2.div(df2.sum(axis=1), axis=0).reset_index().fillna(0).mean().plot.pie(y=df.index,
shadow=False,
ax=axs[i, 0],
wedgeprops={'width':0.5},
startangle=90,
title='Average Sentiment of Male characters - ' + movie,
autopct='%1.1f%%',
figsize=(15, 25))
df3.div(df3.sum(axis=1), axis=0).reset_index().fillna(0).mean().plot.pie(y=df.index,
shadow=False,
ax=axs[i, 1],
wedgeprops={'width':0.5},
startangle=90,
title='Average Sentiment of Female characters - ' + movie,
autopct='%1.1f%%')
i+=1
plt.savefig('a.jpg')
plot_emotions_movies(character_sentiments, movies)
def plot_character(character_name, movie_name, character_sentiments):
"""
This functions takes the name of the character, and plot their sentiment in a ring chart
Parameters:
character_name: str: The name of the character who emotions are to be displayed\
movie_name: str: the movie which the character belongs to
character_sentiments: dict: having the name of the character and sentiments
Returns:
Nothing
"""
if not character_sentiments.get(movie_name, {}).get(character_name):
print("The character does not exist")
return
plt.figure(figsize=(10,10))
plt.pie(character_sentiments[movie_name][character_name].values(),
labels=character_sentiments[movie_name][character_name].keys(),
# ax=axs[i, 1],
wedgeprops={'width':0.5},
startangle=90,
autopct='%1.1f%%')
plt.title(character_name)
plot_character('THOR', 'marvel', character_sentiments)
plot_character('LOKI', 'marvel', character_sentiments)
"""
We are running stanza on Harry's dialogues.
Then we will save the file
"""
harry = pd.read_csv('harry.csv')
emotions = ['Negative', 'Neutral', 'Positive']
nlp = stanza.Pipeline(lang='en', processors='tokenize,sentiment')
doc = nlp(". ".join(harry.text))
sentiment = [i.sentiment for i in doc.sentences]
text = [i.text for i in doc.sentences]
text = [i.text for i in doc.sentences]
harry_stanza = pd.DataFrame({'text':text,'sentiment':sentiment})
harry_stanza.to_csv('harry_stanza.csv', index=False)
"""
Here we are reading the file again after manually annotating it as well
"""
harry_annotated = pd.read_csv('harry_stanza_annotated.csv')
print(accuracy_score(harry_annotated['person 1'], harry_annotated['person 2']))
print(accuracy_score(harry_annotated['sentiment'], harry_annotated['person 1']))
print(accuracy_score(harry_annotated['sentiment'], harry_annotated['person 2']))
"""# Emotions Analysis"""
def func(x):
"""
A functıon to apply NRCLEX on a dıalogue. It also computes new emotıons ıncludıng,
anxıety, prıde, envy, domınance, and curıosıty. Thıs ıs a functıon that ıs only
used to apply on a serıes on dıalogue
Parameters:
x: str: A strıng, preferably dıalogue, on whıch we have to run NRCLEX
Returns:
res: dıct: wıth emotıons, havıng 1 ıf the emotıon ıs present, 0 otherwıse
"""
xx = 'NA'
res = NRCLex(x).affect_frequencies
del res['negative']
del res['positive']
total = sum(res.values())
if total != 0:
for keys in res.keys():
res[keys] = res[keys]/total
if res.get('anticipation') and res.get('fear'):
res['anxiety'] = res['fear']*res['anticipation']
if res.get('anger'):
if res.get('joy'):
res['pride'] = res['anger']*res['joy']
if res.get('sadness'):
res['envy'] = res['anger']*res['sadness']
if res.get('trust'):
res['dominance'] = res['anger']*res['trust']
if res.get('surprise') and res.get('trust'):
res['curiosity'] = res['surprise']*res['trust']
for key in res.keys():
if res[key] > 0:
res[key] = 1
return res
for movie in movies:
df = data_frames[movie]
df['emotions'] = df['text'].apply(func)
"""
Code to plot some of the emotıons for our report and the presentatıon
"""
basic_emotions = ['fear', 'anger', 'trust', 'surprise', 'sadness', 'disgust', 'joy', 'anticipation']
derived_emotions = ['anxiety', 'pride', 'envy', 'dominance', 'curiosity']
fig, axs = plt.subplots(len(movies), 4, sharey=True)
i = 0
for movie in movies:
df = data_frames[movie]
df2 = pd.concat([df.drop(['emotions'], axis = 1), df['emotions'].apply(pd.Series)], axis = 1).fillna(0)
df2[(df2.types=='male')][basic_emotions].sum(axis=0).plot(
labels=basic_emotions,
label="",
ax=axs[i, 0],
kind='pie',
autopct='%1.1f%%',
figsize=(20,20)
)
axs[i,0].set_title("%s | Basic Male \n" % movie, fontsize=12)
df2[(df2.types=='male')][derived_emotions].sum(axis=0).plot(
labels=derived_emotions,
label="",
ax=axs[i, 1],
kind='pie',
autopct='%1.1f%%',
figsize=(20,20)
)
axs[i,1].set_title("%s | Conjugate Male \n" % movie, fontsize=12)
df2[(df2.types=='female')][basic_emotions].sum(axis=0).plot(
labels=basic_emotions,
label="",
ax=axs[i, 2],
kind='pie',
autopct='%1.1f%%',
figsize=(20,20)
)
axs[i,2].set_title("%s | Basic Female \n" % movie, fontsize=12)
df2[(df2.types=='female')][derived_emotions].sum(axis=0).plot(
labels=derived_emotions,
label="",
ax=axs[i, 3],
kind='pie',
autopct='%1.1f%%',
figsize=(20,20)
)
axs[i,3].set_title("%s | Conjugate Female \n" % movie, fontsize=12)
i += 1
plt.savefig('b.jpg')
# ['marvel', 'hp', 'PP', 'horror']
"""
Code to plot emotıons of 1 character. Maınly for presentatıon and the report
"""
movie = 'hp'
character = 'DUDLEY'
df = data_frames[movie]
df2 = pd.concat([df.drop(['emotions'], axis = 1), df['emotions'].apply(pd.Series)], axis = 1).fillna(0)
df2[(df2.character==character)][derived_emotions].sum(axis=0).plot(
labels=derived_emotions,
label=character,
kind='pie',
wedgeprops={'width':0.5},
autopct='%1.1f%%',
figsize=(10,10)
) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
20185,
62,
27530,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
13... | 2.526784 | 5,283 |
#!/usr/bin/python
import os
import json
from optparse import OptionParser
homepath = os.getenv("INSIGHTAGENTDIR")
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-r", "--reporting_interval",
action="store", dest="reporting_interval", help="Reporting interval in minute")
(options, args) = parser.parse_args()
if options.reporting_interval is None:
reporting_interval = "5"
else:
reporting_interval = options.reporting_interval
if homepath is None:
homepath = os.getcwd()
deltaFields = ["CPU", "DiskRead", "DiskWrite", "NetworkIn", "NetworkOut", "InOctets", "OutOctets", "InErrors", "OutErrors", "InDiscards", "OutDiscards"]
#update endtime in config file
update_configs(reporting_interval,"0","5")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
2172,
29572,
1330,
16018,
46677,
198,
198,
11195,
6978,
796,
28686,
13,
1136,
24330,
7203,
20913,
9947,
4760,
3525,
34720,
4943,
198,
26060,
796,
... | 2.988142 | 253 |
from ..base import GnuRecipe
| [
6738,
11485,
8692,
1330,
18509,
84,
37523,
628
] | 3.75 | 8 |
import os
import numpy as np
# if new types are added - add to the end in order not to break previously created files!
supportedDtypes= np.array( [np.dtype(x) for x in ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']] )
dtypeByteSizes= [1,2,4,8,4,8]
pntsHeaderSize= 4 + 1
# functions for converting between previously used format for clusters/points (tables) and the new ones (simple hand-made binary)
if __name__=='__main__':
import sys
if len(sys.argv) <= 3 or not(sys.argv[1] in ['c2b','c2t','p2b']):
print "Usage: python io_util.py conversion_type input_file output_file";
print "Conversion_type: c2b|c2t|p2b"
print "c2b: clusters from old tables to new binary"
print "c2t: clusters from binary to old tables"
print "p2b: points from old tables to new binary"
raise ValueError
operation= sys.argv[1]
inputFile= os.path.expanduser(sys.argv[2])
outputFile= os.path.expanduser(sys.argv[3])
if not(os.path.exists(inputFile)):
print "Input file doesn't exist, %s" % inputFile
if operation=='c2b':
convert_clusters_tables_to_bin(inputFile, outputFile)
elif operation=='c2t':
if clusters_are_corrupt(inputFile):
print "Cluster file (binary format) is corrupt, %s" % inputFile
convert_clusters_bin_to_tables(inputFile, outputFile)
elif operation=='p2b':
convert_points_tables_to_bin(inputFile, outputFile)
else:
assert(0)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
2,
611,
649,
3858,
389,
2087,
532,
751,
284,
262,
886,
287,
1502,
407,
284,
2270,
4271,
2727,
3696,
0,
198,
15999,
35,
19199,
28,
45941,
13,
18747,
7,
685,
37659,
13,
... | 2.449675 | 616 |
#! /usr/bin/env python
import math
import pandas as pd
import networkx as nx
from bokeh.plotting import figure
from bokeh.models import Circle, Text, Row, Column, ColumnDataSource, TapTool, CustomJS, MultiLine
from bokeh.models.graphs import from_networkx
from bokeh.palettes import viridis
from bokeh.transform import factor_cmap
from bokeh.io import output_file, save
from bokeh.models.widgets import Div
import click
@click.command(context_settings=dict(
ignore_unknown_options=True,
),
short_help="Generate an html plot of the clustering"
)
@click.option("--title", "-t", type=str, default="Clusters",
help="Title for the plot")
@click.argument("embedding_file", type=click.Path(exists=True))
@click.argument("info_file", type=click.Path(exists=True))
@click.argument("plot_file", type=click.Path())
if __name__ == '__main__':
cli_handler()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
10688,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
3127,
87,
355,
299,
87,
198,
198,
6738,
1489,
365,
71,
13,
29487,
889,
1330,
3785,
198,
6738,
1489,
365,
71,
13,
2... | 2.84984 | 313 |
from django.forms import Form, Textarea
from django import forms
| [
6738,
42625,
14208,
13,
23914,
1330,
5178,
11,
8255,
20337,
198,
6738,
42625,
14208,
1330,
5107,
628,
198
] | 3.722222 | 18 |
"""
File: shrink.py
Name: Che-Hsien, Chiu
-------------------------------
Create a new "out" image half the width and height of the original.
Set pixels at x=0 1 2 3 in out , from x=0 2 4 6 in original,
and likewise in the y direction.
"""
from simpleimage import SimpleImage
def shrink(filename):
"""
:param filename: str,
:return img: SimpleImage,
"""
img = SimpleImage(filename)
new_img = SimpleImage.blank(img.width //2, img.height//2)
for y in range(img.height):
for x in range(img.width):
pixel = img.get_pixel(x,y)
new_pixel = new_img.get_pixel(x//2,y//2) # choose 1 pixel replace 4x4 pixel
new_pixel.red = pixel.red
new_pixel.blue = pixel.blue
new_pixel.green = pixel.green
return new_img
def main():
"""
shrink image
"""
original = SimpleImage("images/poppy.png")
original.show()
after_shrink = shrink("images/poppy.png")
after_shrink.show()
if __name__ == '__main__':
main()
| [
37811,
201,
198,
8979,
25,
22085,
13,
9078,
201,
198,
5376,
25,
2580,
12,
39,
82,
2013,
11,
609,
16115,
201,
198,
1783,
24305,
201,
198,
16447,
257,
649,
366,
448,
1,
2939,
2063,
262,
9647,
290,
6001,
286,
262,
2656,
13,
201,
198,... | 2.321041 | 461 |
import unittest
from cartpole.rl import policy
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
6383,
36869,
13,
45895,
1330,
2450,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419
] | 2.742857 | 35 |
import socket
from contextlib import closing
from typing import List, Optional
from vektonn.dtos import Vector, Attribute, AttributeValue, InputDataPoint, FoundDataPoint
vektonn_api_host = 'localhost'
vektonn_api_port = 8081
vektonn_api_base_url = f'http://{vektonn_api_host}:{vektonn_api_port}'
data_source_name = 'QuickStart.Source'
data_source_version = '1.0'
index_name = 'QuickStart.Index'
index_version = '1.0'
is_vektonn_running = _is_vektonn_running()
zero_vector = dense_vector(coordinates=[0.0, 0.0])
| [
11748,
17802,
198,
6738,
4732,
8019,
1330,
9605,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
6738,
1569,
74,
1122,
77,
13,
28664,
418,
1330,
20650,
11,
3460,
4163,
11,
3460,
4163,
11395,
11,
23412,
6601,
12727,
11,
4062,
6601,
... | 2.625 | 200 |
# -*- coding: utf-8 -*-
"""
Script for randomly separate all the auditory stim of this protocol into
4 sets of 72 stimuli, with 12 of each category per set
author: Juan Jesus Torre Tresols
e-mail: juan-jesus.torre-tresols@inria.fr
"""
import os
from random import shuffle
import shutil
# %% PARAMETERS
categories = {'animal': [], 'music': [], 'nature': [],
'speech': [], 'tools': [], 'voice': []}
output_sets = {'set1': [], 'set2': [], 'set3': [], 'set4': []}
files_per_category = 48
number_of_categories = len(categories.keys())
number_of_sets = len(output_sets.keys())
cat_files_per_set = int(files_per_category / number_of_sets)
# %% PATHS
my_path = os.getcwd()
input_path = os.path.join(my_path, 'all')
# %% SCRIPT
# Fill the dict of lists with the files filenames
for cat in categories.keys():
for i in range(files_per_category):
title = "s2_" + cat + "_" + str(i + 1) + ".wav"
categories[cat].append(title)
# Divide files into the sets
for cat in categories.keys():
shuffle(categories[cat])
chunks = [categories[cat][x: x + cat_files_per_set] \
for x in range(0, len(categories[cat]), cat_files_per_set)]
chunk_index = 0
for set_ in output_sets.keys():
for item in chunks[chunk_index]:
output_sets[set_].append(item)
chunk_index += 1
# Copy the files to their folders
for set_ in output_sets.keys():
output_path = os.path.join(my_path, set_)
old_files = os.listdir(output_path)
# Remove old files from set folders
for file_name in old_files:
os.remove(output_path + '/' + file_name)
# Copy from 'all' dir to each directory
for item in range(len(output_sets[set_])):
item_in_path = os.path.join(input_path, output_sets[set_][item])
item_out_path = os.path.join(output_path, output_sets[set_][item])
shutil.copyfile(item_in_path, item_out_path)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
7391,
329,
15456,
4553,
477,
262,
38949,
7132,
286,
428,
8435,
656,
198,
19,
5621,
286,
7724,
25973,
11,
351,
1105,
286,
1123,
6536,
583,
900,
198,
198,
98... | 2.226981 | 934 |
import os
import sys
import fcntl
import datetime
import json
import re
import time
import zipfile
import threading
import hashlib
import shutil
import subprocess
import pprint
import random
from invoke import task
import boto3
import botocore.exceptions
import multiprocessing
import io
import ai2thor.build
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(process)d] %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
@task
@task
@task(iterable=["scenes"])
@task
def webgl_build(
context,
scenes="",
room_ranges=None,
directory="builds",
prefix="local",
verbose=False,
content_addressable=False,
crowdsource_build=False,
):
"""
Creates a WebGL build
:param context:
:param scenes: String of scenes to include in the build as a comma separated list
:param prefix: Prefix name for the build
:param content_addressable: Whether to change the unityweb build files to be content-addressable
have their content hashes as part of their names.
:return:
"""
from functools import reduce
arch = "WebGL"
build_name = local_build_name(prefix, arch)
if room_ranges is not None:
floor_plans = [
"FloorPlan{}_physics".format(i)
for i in reduce(
lambda x, y: x + y,
map(
lambda x: x + [x[-1] + 1],
[
list(range(*tuple(int(y) for y in x.split("-"))))
for x in room_ranges.split(",")
],
),
)
]
scenes = ",".join(floor_plans)
if verbose:
print(scenes)
env = dict(BUILD_SCENES=scenes)
if crowdsource_build:
env["DEFINES"] = "CROWDSOURCE_TASK"
if _build("unity", arch, directory, build_name, env=env):
print("Build Successful")
else:
print("Build Failure")
build_path = _webgl_local_build_path(prefix, directory)
fix_webgl_unity_loader_regex(os.path.join(build_path, "Build/UnityLoader.js"))
generate_quality_settings(context)
# the remainder of this is only used to generate scene metadata, but it
# is not part of building webgl player
rooms = {
"kitchens": {"name": "Kitchens", "roomRanges": range(1, 31)},
"livingRooms": {"name": "Living Rooms", "roomRanges": range(201, 231)},
"bedrooms": {"name": "Bedrooms", "roomRanges": range(301, 331)},
"bathrooms": {"name": "Bathrooms", "roomRanges": range(401, 431)},
"foyers": {"name": "Foyers", "roomRanges": range(501, 531)},
}
room_type_by_id = {}
for room_type, room_data in rooms.items():
for room_num in room_data["roomRanges"]:
room_id = "FloorPlan{}_physics".format(room_num)
room_type_by_id[room_id] = {"type": room_type, "name": room_data["name"]}
scene_metadata = {}
for scene_name in scenes.split(","):
if scene_name not in room_type_by_id:
# allows for arbitrary scenes to be included dynamically
room_type = {"type": "Other", "name": None}
else:
room_type = room_type_by_id[scene_name]
if room_type["type"] not in scene_metadata:
scene_metadata[room_type["type"]] = {
"scenes": [],
"name": room_type["name"],
}
scene_metadata[room_type["type"]]["scenes"].append(scene_name)
if verbose:
print(scene_metadata)
to_content_addressable = [
("{}.data.unityweb".format(build_name), "dataUrl"),
("{}.wasm.code.unityweb".format(build_name), "wasmCodeUrl"),
("{}.wasm.framework.unityweb".format(build_name), "wasmFrameworkUrl"),
]
for file_name, key in to_content_addressable:
file_to_content_addressable(
os.path.join(build_path, "Build/{}".format(file_name)),
os.path.join(build_path, "Build/{}.json".format(build_name)),
key,
)
with open(os.path.join(build_path, "scenes.json"), "w") as f:
f.write(json.dumps(scene_metadata, sort_keys=False, indent=4))
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
cache_seconds = 31536000
@task
@task
@task
@task
def webgl_s3_deploy(
ctx, bucket, target_dir, scenes="", verbose=False, all=False, deploy_skip=False
):
"""
Builds and deploys a WebGL unity site
:param context:
:param target_dir: Target s3 bucket
:param target_dir: Target directory in bucket
:param scenes: String of scene numbers to include in the build as a comma separated list e.g. "4,6,230"
:param verbose: verbose build
:param all: overrides 'scenes' parameter and builds and deploys all separate rooms
:param deploy_skip: Whether to skip deployment and do build only.
:return:
"""
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
}
if all:
flatten = lambda l: [item for sublist in l for item in sublist]
room_numbers = flatten(
[
[i for i in range(room_range[0], room_range[1])]
for key, room_range in rooms.items()
]
)
else:
room_numbers = [s.strip() for s in scenes.split(",")]
if verbose:
print("Rooms in build: '{}'".format(room_numbers))
for i in room_numbers:
floor_plan_name = "FloorPlan{}_physics".format(i)
if verbose:
print("Building room '{}'...".format(floor_plan_name))
target_s3_dir = "{}/{}".format(target_dir, floor_plan_name)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(
ctx, scenes=floor_plan_name, directory=build_dir, crowdsource_build=True
)
if verbose:
print("Deploying room '{}'...".format(floor_plan_name))
if not deploy_skip:
webgl_deploy(
ctx,
bucket=bucket,
source_dir=build_dir,
target_dir=target_s3_dir,
verbose=verbose,
extensions_no_cache=".css",
)
@task
@task
@task
@task
def create_robothor_dataset(
context,
local_build=False,
editor_mode=False,
width=300,
height=300,
output="robothor-dataset.json",
intermediate_directory=".",
visibility_distance=1.0,
objects_filter=None,
scene_filter=None,
filter_file=None,
):
"""
Creates a dataset for the robothor challenge in `intermediate_directory`
named `robothor-dataset.json`
"""
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
# Restrict points visibility_multiplier_filter * visibility_distance away from the target object
visibility_multiplier_filter = 2
scene_object_filter = {}
if filter_file is not None:
with open(filter_file, "r") as f:
scene_object_filter = json.load(f)
print("Filter:")
pprint.pprint(scene_object_filter)
print("Visibility distance: {}".format(visibility_distance))
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
failed_points = []
if objects_filter is not None:
obj_filter = set([o for o in objects_filter.split(",")])
targets = [o for o in targets if o.replace(" ", "") in obj_filter]
desired_points = 30
event = controller.step(
dict(
action="GetScenesInBuild",
)
)
scenes_in_build = event.metadata["actionReturn"]
objects_types_in_scene = set()
dataset = {}
dataset_flat = []
if intermediate_directory is not None:
if intermediate_directory != ".":
if os.path.exists(intermediate_directory):
shutil.rmtree(intermediate_directory)
os.makedirs(intermediate_directory)
scenes = sorted(
[scene for scene in scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
if scene_filter is not None:
scene_filter_set = set(scene_filter.split(","))
scenes = [s for s in scenes if s in scene_filter_set]
print("Sorted scenes: {}".format(scenes))
for scene in scenes:
dataset[scene] = {}
dataset["object_types"] = targets
objects = []
for objectType in targets:
if filter_file is None or (
objectType in scene_object_filter
and scene in scene_object_filter[objectType]
):
dataset[scene][objectType] = []
obj = get_points(controller, objectType, scene)
if obj is not None:
objects = objects + obj
dataset_flat = dataset_flat + objects
if intermediate_directory != ".":
with open(
os.path.join(intermediate_directory, "{}.json".format(scene)), "w"
) as f:
json.dump(objects, f, indent=4)
with open(os.path.join(intermediate_directory, output), "w") as f:
json.dump(dataset_flat, f, indent=4)
print("Object types in scene union: {}".format(objects_types_in_scene))
print("Total unique objects: {}".format(len(objects_types_in_scene)))
print("Total scenes: {}".format(len(scenes)))
print("Total datapoints: {}".format(len(dataset_flat)))
print(failed_points)
with open(os.path.join(intermediate_directory, "failed.json"), "w") as f:
json.dump(failed_points, f, indent=4)
@task
@task
def filter_dataset(ctx, filename, output_filename, ids=False):
"""
Filters objects in dataset that are not reachable in at least one of the scenes (have
zero occurrences in the dataset)
"""
with open(filename, "r") as f:
obj = json.load(f)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
counter = {}
for f in obj:
obj_type = f["object_type"]
if f["scene"] not in counter:
counter[f["scene"]] = {target: 0 for target in targets}
scene_counter = counter[f["scene"]]
if obj_type not in scene_counter:
scene_counter[obj_type] = 1
else:
scene_counter[obj_type] += 1
objects_with_zero = set()
objects_with_zero_by_obj = {}
for k, item in counter.items():
# print("Key {} ".format(k))
for obj_type, count in item.items():
# print("obj {} count {}".format(obj_type, count))
if count == 0:
if obj_type not in objects_with_zero_by_obj:
objects_with_zero_by_obj[obj_type] = set()
# print("With zero for obj: {} in scene {}".format(obj_type, k))
objects_with_zero_by_obj[obj_type].add(k)
objects_with_zero.add(obj_type)
print("Objects with zero: {}".format(objects_with_zero))
with open("with_zero.json", "w") as fw:
dict_list = {k: list(v) for k, v in objects_with_zero_by_obj.items()}
json.dump(dict_list, fw, sort_keys=True, indent=4)
pprint.pprint(objects_with_zero_by_obj)
filtered = [o for o in obj if o["object_type"] not in objects_with_zero]
counter = 0
current_scene = ""
current_object_type = ""
for i, o in enumerate(filtered):
if current_scene != o["scene"] or current_object_type != o["object_type"]:
counter = 0
current_scene = o["scene"]
current_object_type = o["object_type"]
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", o["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), o["object_type"], counter
)
counter += 1
o["id"] = point_id
with open(output_filename, "w") as f:
json.dump(filtered, f, indent=4)
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
@task
def test_utf():
"""
Generates a module named ai2thor/tests/test_utf.py with test_XYZ style methods
that include failures (if any) extracted from the xml output
of the Unity Test Runner
"""
project_path = os.path.join(os.getcwd(), "unity")
commit_id = git_commit_id()
test_results_path = os.path.join(project_path, "utf_testResults-%s.xml" % commit_id)
logfile_path = os.path.join(os.getcwd(), "thor-testResults-%s.log" % commit_id)
command = (
"%s -runTests -testResults %s -logFile %s -testPlatform PlayMode -projectpath %s "
% (_unity_path(), test_results_path, logfile_path, project_path)
)
subprocess.call(command, shell=True)
return test_results_path, logfile_path
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
277,
66,
429,
75,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
640,
198,
11748,
19974,
7753,
198,
11748,
4704,
278,
198,
11748,
12234,
8019,
198,
11748,
4423,
346,
... | 2.210269 | 6,349 |
class Solution:
"""
@param s: an expression includes numbers, letters and brackets
@return: a string
"""
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
264,
25,
281,
5408,
3407,
3146,
11,
7475,
290,
28103,
198,
220,
220,
220,
2488,
7783,
25,
257,
4731,
198,
220,
220,
220,
37227,
198
] | 3.102564 | 39 |
"""
Demo for the sdfrw.py module.
Add property to molecules in an sdf file according to values stored in a text file
example: python updater.py Name flavonoid_names.txt < flavonoids.sdf > flavonoids_updated.sdf
result: Molecule property "Name", with values from file flavonoid_names.txt,
is set or reset for molecules in flavonoids.sdf to produce file flavonoids_updated.sdf.
"""
import sdfrw
import sys
fhIn = sys.stdin
fhOut = sys.stdout
# read/write from/to standard input/output
argc = len(sys.argv)
# get number of command line arguments
if argc != 3:
# in case 2 arguments are not present on command line
print("Usage: python " + sys.argv[0] + " PropertyName ValuesFileName < sdfFileNameInput.sdf > sdfFileNameOutput.sdf")
print("Example: python updater.py Name flavonoid_names.txt < flavonoids.sdf > flavonoids_updated.sdf")
sys.exit()
# help message
propname = sys.argv[1]
# get new property name
txtfilename = sys.argv[2]
# get name of the file that contains the new values for the new property
with open(txtfilename) as fhTxt:
# open file with values for reading
for mol in sdfrw.sdfReader(fhIn):
# loop through molecules from input sdf file
propvalue = fhTxt.readline().strip()
# get property and values for the current molecule
changed, mol = sdfrw.sdfSetChangeProp(mol, propname, propvalue)
# update current molecule with property and current value
sdfrw.sdfWrite(fhOut, mol)
# write current molecule
| [
37811,
198,
11522,
78,
329,
262,
264,
7568,
31653,
13,
9078,
8265,
13,
198,
4550,
3119,
284,
17745,
287,
281,
264,
7568,
2393,
1864,
284,
3815,
8574,
287,
257,
2420,
2393,
198,
198,
20688,
25,
21015,
2325,
729,
13,
9078,
6530,
10525,
... | 3.122271 | 458 |
from can_tools.scrapers.official.federal.CDC.cdc_coviddatatracker import (
CDCCovidDataTracker,
)
from can_tools.scrapers.official.federal.CDC.cdc_state_vaccines import CDCStateVaccine
| [
6738,
460,
62,
31391,
13,
1416,
2416,
364,
13,
16841,
13,
69,
2110,
13,
47667,
13,
10210,
66,
62,
66,
709,
1638,
265,
265,
81,
10735,
1330,
357,
198,
220,
220,
220,
6458,
4093,
709,
312,
6601,
35694,
11,
198,
8,
198,
6738,
460,
... | 2.625 | 72 |
import os
import shutil
from unittest import mock
import pytest
from click.testing import CliRunner
from great_expectations import DataContext
from great_expectations.cli import cli
from great_expectations.data_context.templates import CONFIG_VARIABLES_TEMPLATE
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.test_cli import yaml
from tests.cli.utils import assert_no_logging_messages_or_tracebacks
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_fixing_them(
mock_webbrowser, caplog, tmp_path_factory,
):
"""
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
"""
root_dir = tmp_path_factory.mktemp("hiya")
root_dir = str(root_dir)
os.makedirs(os.path.join(root_dir, "data"))
data_folder_path = os.path.join(root_dir, "data")
data_path = os.path.join(root_dir, "data", "Titanic.csv")
fixture_path = file_relative_path(
__file__, os.path.join("..", "test_sets", "Titanic.csv")
)
shutil.copy(fixture_path, data_path)
# Create a new project from scratch that we will use for the test in the next step
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", root_dir],
input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format(data_folder_path, data_path),
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
root_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Great Expectations is now set up." in stdout
context = DataContext(os.path.join(root_dir, DataContext.GE_DIR))
uncommitted_dir = os.path.join(context.root_directory, "uncommitted")
shutil.rmtree(uncommitted_dir)
assert not os.path.isdir(uncommitted_dir)
# Test the second invocation of init
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", root_dir], input="Y\nn\n", catch_exceptions=False
)
stdout = result.stdout
assert result.exit_code == 0
assert "Great Expectations added some missing files required to run." in stdout
assert "You may see new files in" in stdout
assert "OK. You must run" not in stdout
assert "great_expectations init" not in stdout
assert "to fix the missing files!" not in stdout
assert "Would you like to build & view this project's Data Docs!?" in stdout
assert os.path.isdir(uncommitted_dir)
config_var_path = os.path.join(uncommitted_dir, "config_variables.yml")
assert os.path.isfile(config_var_path)
with open(config_var_path, "r") as f:
assert f.read() == CONFIG_VARIABLES_TEMPLATE
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_complete_existing_project_all_uncommitted_dirs_exist(
mock_webbrowser, caplog, tmp_path_factory,
):
"""
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
"""
root_dir = tmp_path_factory.mktemp("hiya")
root_dir = str(root_dir)
os.makedirs(os.path.join(root_dir, "data"))
data_folder_path = os.path.join(root_dir, "data")
data_path = os.path.join(root_dir, "data", "Titanic.csv")
fixture_path = file_relative_path(
__file__, os.path.join("..", "test_sets", "Titanic.csv")
)
shutil.copy(fixture_path, data_path)
# Create a new project from scratch that we will use for the test in the next step
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", root_dir],
input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format(
data_folder_path, data_path, catch_exceptions=False
),
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
root_dir
)
in mock_webbrowser.call_args[0][0]
)
# Now the test begins - rerun the init on an existing project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", root_dir], input="n\n", catch_exceptions=False
)
stdout = result.stdout
assert mock_webbrowser.call_count == 1
assert result.exit_code == 0
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "ready to roll" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
| [
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
1049,
62,
1069,
806,
602,
1330,
6060,
21947,
198,
6738,
1049,
... | 2.515431 | 2,171 |
import threadpool
import math
import random
import igraph
import time
def computL(i,j,cmty,c):
'''
compute r_i,j,c
:param flag:
:param cmty_i:
:param cmty_j:
:return:
'''
if c in cmty[i] and c in cmty[j]:
if ',' in cmty[i]:
cmty_i_list = cmty[i].split(',')
else:
cmty_i_list = [cmty[i]]
p_ic = 1 / len(cmty_i_list)
if ',' in cmty[j]:
cmty_j_list = cmty[j].split(',')
else:
cmty_j_list = [cmty[j]]
p_jc = 1 / len(cmty_j_list)
elif c not in cmty[i] and c in cmty[j]:
p_ic = 0
if ',' in cmty[j]:
cmty_j_list = cmty[j].split(',')
else:
cmty_j_list = [cmty[j]]
p_jc = 1 / len(cmty_j_list)
elif c in cmty[i] and c not in cmty[j]:
if ',' in cmty[i]:
cmty_i_list = cmty[i].split(',')
else:
cmty_i_list = [cmty[i]]
p_ic = 1 / len(cmty_i_list)
p_jc = 0
else:
p_ic = 0
p_jc = 0
return 1 / ((1 + pow(math.e, -(60 * p_ic - 30))) * (1 + pow(math.e, -(60 * p_jc - 30))))
def computW(I,J,nodelist,cmty,c):
'''
compute w_i,j
:param I:
:param J:
:param nodelist:
:param cmty:
:param c:
:return:
'''
firstmol = 0
secondmol = 0
for i in range(len(nodelist)):
firstmol = firstmol + computL(i,J,cmty,c)
for j in range(len(nodelist)):
secondmol = secondmol + computL(I,j,cmty,c)
return (firstmol/(len(nodelist)))*(secondmol/len(nodelist))
def getdetail(G):
'''
Detail information of the community
:param G:
:return:
'''
mydict = index_idofname(G) # {id:name}
revmydict = index_nameofid(G) # {name:id}
nodelist = G.vs()['name']
edgelistid = G.get_edgelist()
edgelist = list()
for edge in edgelistid:
edgelist.append((int(mydict[edge[0]]), int(mydict[edge[1]])))
cmty = list()
for i in range(len(nodelist)):
cmty.append(G.vs().find(name=nodelist[i])['value'])
m = len(edgelist)
neigbor_size = [G.neighborhood_size(int(revmydict[_])) for _ in nodelist]
return nodelist,edgelist,cmty,mydict,revmydict,neigbor_size,m
def computQ(G):
'''
compute Q
:param G:
:return:
'''
temp = getdetail(G)
nodelist = temp[0]
edgelist = temp[1]
cmty = temp[2]
revmydict = temp[4]
neigbor_size = temp[5]
m = temp[6]
Q = [[[] for k in range(len(nodelist))] for j in range(5000)]
for c in range(5000):
for i in range(len(nodelist)):
for j in range(len(nodelist)):
if (nodelist[i], nodelist[j]) in edgelist or (nodelist[j], nodelist[i]) in edgelist:
A_ij = 1
else:
A_ij = 0
q1 = computL(i, j, cmty, str(c)) * A_ij
q2 = (computW(i, j, nodelist, cmty, str(c)) *
neigbor_size[int(revmydict[nodelist[i]])] * neigbor_size[int(revmydict[nodelist[j]])])
Q[c][i].append([q1, q2])
sum1 = 0
sum2 = 0
for c in range(5000):
for i in range(len(nodelist)):
sum1 = sum1 + sum([Q[c][i][j][0] for j in range(len(nodelist))])
sum2 = sum2 + sum([Q[c][i][j][1] for j in range(len(nodelist))])
final_Q = sum1/(2*m) - sum2/(4*m)
return final_Q,Q
def changenode(G,p_exchange):
'''
NodeEvolution
:param G:
:param p_exchange:
:return:
'''
nodelist = G.vs()['name']
# Insert nodes
if random.random() < p_exchange:
maxcount = math.ceil(len(nodelist) * 0.02)
insertNodeNumber = random.randint(0, maxcount)
if insertNodeNumber != 0:
# Connected the inserted nodes with others of inserted community
for i in range(insertNodeNumber):
insertedge = list()
insertname = int(max(nodelist) + i + 1)
select_community = random.sample(range(5000), 1)[0]
with open('../data/large_network/com-dblp.top5000.cmty.txt', 'r') as f:
readfile = f.readlines()
cmty = readfile[select_community].strip('\r\n').split('\t')
G.add_vertex(name=str(insertname),value=select_community)
for _ in range(len(cmty)):
insertedge.append((insertname,cmty[_]))
G.add_edges(insertedge)
# Delete nodes
else:
mydict = index_nameofid(G)
print(mydict)
maxx = math.ceil(len(nodelist) * 0.02)
deleteNodeNumber = random.randint(0, maxx)
if deleteNodeNumber != 0:
selectDeletNode = random.sample(nodelist, deleteNodeNumber)
idofselectdeletenode = list()
for node in selectDeletNode:
idofselectdeletenode.append(int(mydict[node]))
G.delete_vertices(idofselectdeletenode)
pass
def changedge(G,Q,aim_Q,T,detail_Q):
'''
EdgeEvolution
:param G:
:param Q:
:param aim_Q:
:param T:
:return:
'''
mydict = index_idofname(G)
revmydict = index_nameofid(G)
nodelist = G.vs()['name']
edgelistid = G.get_edgelist()
edgelist = list()
for edge in edgelistid:
edgelist.append((mydict[edge[0]],mydict[edge[1]])) # 构造由name表示的边的列表
for t in range(100000):
if abs(aim_Q - Q) < 0.001:
break
else:
nodetrip = random.sample(nodelist,2)
nodeA = nodetrip[0]
nodeB = nodetrip[1]
if (nodeA,nodeB) in edgelist or (nodeB,nodeA) in edgelist:
subflag = 1
else:
subflag = 0
temp_Q,detail_Q = computtemp_Q(G,subflag,nodeA,nodeB,revmydict,detail_Q)
# Flipping
if abs(aim_Q - temp_Q) < abs(aim_Q - Q):
if subflag == 1:
G.delete_edges((revmydict[nodeA],revmydict[nodeB]))
Q = temp_Q
else:
G.add_edges((revmydict[nodeA],revmydict[nodeB]))
Q = temp_Q
# Flipping with the decrease probability
else:
if random.random() < pow(math.e, (-t / T)):
if subflag == 1:
G.delete_edges((revmydict[nodeA], revmydict[nodeB]))
Q = temp_Q
else:
G.add_edges((revmydict[nodeA], revmydict[nodeB]))
Q = temp_Q
return G,Q,detail_Q
def computtemp_Q(G,subflag,a,b,revmydict,detail_Q):
'''
Compute temp_Q
:param start:
:return:
'''
temp = getdetail(G)
nodelist = temp[0]
cmty = temp[2]
neigbor_size = temp[5]
m = temp[6]
if subflag == 1:
m = m - 1
elif subflag == 0:
m = m + 1
for c in range(5000):
for i in range(len(nodelist)):
for j in range(len(nodelist)):
if subflag == 1:
A_ij = 1
else:
A_ij = 0
if (i, j) == (a, b) or (i, j) == (b, a):
q1 = computL(i, j, cmty, str(c)) * A_ij
q2 = (computW(i, j, nodelist, cmty, str(c)) *
neigbor_size[int(revmydict[nodelist[i]])] * neigbor_size[int(revmydict[nodelist[j]])])
detail_Q[c][i][j] = [q1, q2]
detail_Q[c][j][i] = [q1, q2]
break
break
sum1 = 0
sum2 = 0
for c in range(5000):
for i in range(len(nodelist)):
sum1 = sum1 + sum([detail_Q[c][i][j][0] for j in range(len(nodelist))])
sum2 = sum2 + sum([detail_Q[c][i][j][1] for j in range(len(nodelist))])
final_Q = sum1 / (2 * m) - sum2 / (4 * m)
return final_Q, detail_Q
| [
11748,
4704,
7742,
201,
198,
11748,
10688,
201,
198,
11748,
4738,
201,
198,
11748,
45329,
1470,
201,
198,
11748,
640,
201,
198,
4299,
2653,
43,
7,
72,
11,
73,
11,
11215,
774,
11,
66,
2599,
201,
198,
220,
220,
220,
705,
7061,
201,
... | 1.739205 | 4,678 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^list/$', 'notifications.views.notifications_list', name="notification-list"),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
3256,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
4868,
32624,
3256,
705,
1662,
6637,
13,
33571,
13,
1662,
6637,
62,
... | 2.890909 | 55 |
# myTeam.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from captureAgents import CaptureAgent
import random, time, util
import time
import random
import numpy as np
from game import Directions
from game import Actions
import game
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first = 'MonteCarloAttacker', second = 'Defender'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class MonteCarloAttacker(CaptureAgent):
'''
This is a class in which we inmplement the attacker using Monte Carlo Tree
Search. Every time before choosing
'''
def takeToEmptyAlley(self, gameState, action, depth):
"""
Verify if an action takes the agent to an alley with
no pacdots.
"""
if depth == 0:
return False
newState = gameState.generateSuccessor(self.index, action)
previousEatenFood = gameState.getAgentState(self.index).numCarrying
eatenFood = newState.getAgentState(self.index).numCarrying
if previousEatenFood < eatenFood:
return False
actions = newState.getLegalActions(self.index)
actions.remove(Directions.STOP)
reversed_direction = Directions.REVERSE[newState.getAgentState(self.index).configuration.direction]
if reversed_direction in actions:
actions.remove(reversed_direction)
if len(actions) == 0:
return True
for a in actions:
if not self.takeToEmptyAlley(newState, a, depth - 1):
return False
return True
# Given the current simulation reset, this function will return the
# index of the best successor index according to the results so far
# Given the choosen successor and the depth limits, the function can
# generate a path randomly
def getReward(self, previousState, nextState):
'''
Offensive agent:
1. get food: small positive reward(+2)
2. get capsult: medium positive reward(+5)
3. get back to own territory with food: big positive reward(+10 x #food)
4. get eaten: big negative reward(-500)
5. travel in enemy territory: small negative reward(-1)
6. travel in own territory vertically: medium negative reward(-2)
7. travel in own territory horizontally towards own: medium negative reward(-2)
8. travel in own territory horizontally towards enemy: small negative reward(-1)
9. stop: medium negative reward(-5)
'''
features = self.getFeatures(previousState)
featureNextState = self.getFeatures(nextState)
agentState = previousState.getAgentState(self.index)
agentStateNextState = nextState.getAgentState(self.index)
reward = 0
if nextState.getAgentPosition(self.index) in self.getFood(previousState).asList():
reward += 2
if nextState.getAgentPosition(self.index) in self.getCapsules(previousState):
reward += 5
if agentState.numCarrying > 0 and agentStateNextState.numCarrying == 0:
if self.getScore(nextState) > self.getScore(previousState):
reward += 10 * previousState.getAgentState(self.index).numCarrying
if agentState.isPacman and agentStateNextState.isPacman:
reward -= 1
action = self.getNextAction(previousState,nextState)
if action == None: #dead
reward -= 500
if not agentState.isPacman and not agentStateNextState.isPacman:
if action in [Directions.NORTH,Directions.SOUTH]:
reward -= 2
elif featureNextState['distanceFromStart'] < features['distanceFromStart']:
reward -= 1
else:
reward -= 2
return reward
class Defender(CaptureAgent):
"Gera Monte, o agente defensivo."
# Implemente este metodo para pre-processamento (15s max).
def distFoodToPatrol(self, gameState):
"""
This method calculates the minimum distance from our patrol
points to our pacdots. The inverse of this distance will
be used as the probability to select the patrol point as
target.
"""
food = self.getFoodYouAreDefending(gameState).asList()
total = 0
# Get the minimum distance from the food to our
# patrol points.
for position in self.noWallSpots:
closestFoodDist = "+inf"
for foodPos in food:
dist = self.getMazeDistance(position, foodPos)
if dist < closestFoodDist:
closestFoodDist = dist
# We can't divide by 0!
if closestFoodDist == 0:
closestFoodDist = 1
self.patrolDict[position] = 1.0/float(closestFoodDist)
total += self.patrolDict[position]
# Normalize the value used as probability.
if total == 0:
total = 1
for x in self.patrolDict.keys():
self.patrolDict[x] = float(self.patrolDict[x])/float(total)
def selectPatrolTarget(self):
"""
Select some patrol point to use as target.
"""
rand = random.random()
sum = 0.0
for x in self.patrolDict.keys():
sum += self.patrolDict[x]
if rand < sum:
return x
# Implemente este metodo para controlar o agente (1s max).
class DummyAgent(CaptureAgent):
"""
A Dummy agent to serve as an example of the necessary agent structure.
You should look at baselineTeam.py for more details about how to
create an agent as this is the bare minimum.
"""
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
IMPORTANT: This method may run for at most 15 seconds.
"""
'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.registerInitialState in captureAgents.py.
'''
CaptureAgent.registerInitialState(self, gameState)
'''
Your initialization code goes here, if you need any.
'''
def chooseAction(self, gameState):
"""
Picks among actions randomly.
"""
actions = gameState.getLegalActions(self.index)
'''
You should change this in your own agent.
'''
return random.choice(actions)
| [
2,
616,
15592,
13,
9078,
198,
2,
45337,
198,
2,
10483,
26426,
6188,
25,
220,
921,
389,
1479,
284,
779,
393,
9117,
777,
4493,
329,
198,
2,
9856,
4959,
2810,
326,
357,
16,
8,
345,
466,
407,
14983,
393,
7715,
198,
2,
8136,
11,
357,... | 3.344859 | 2,169 |
# Generated by Django 2.1.2 on 2018-10-18 02:57
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
17,
319,
2864,
12,
940,
12,
1507,
7816,
25,
3553,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
6... | 2.818182 | 44 |
#!/usr/bin/python
from __future__ import division
import sys
import dpkt
import struct
import socket
import string
import binascii
pkt_num = 1
offset = 0
dst_received = 0
#time
sendinit = 0
dstnor = 0
flag_src_stop = 0
if __name__ == '__main__':
flow = list()
if len(sys.argv) < 5:
print("python parse.py [-tfm,-opennf] [debug_pkt_type] [srcfile] [dstfile] [sendfile]")
print("-tfm: parse result of tfm ")
print("-opennf: parse result of opennf ")
else:
if len(sys.argv) == 5 and sys.argv[1] == "-tfm":
print("parse type:tfm")
print("no send file,get initime and outtime record in src and dst pcap file,record in udp payload")
flow = parse_tfm(sys.argv[3],sys.argv[4],None)
elif len(sys.argv) == 6 and sys.argv[1] == "-tfm":
print("parse type:tfm")
print("use send file,get initime from send and outtime record in src and dst file")
flow = parse_tfm(sys.argv[3],sys.argv[4],sys.argv[5])
elif sys.argv[1] == "-opennf":
print("parse type:opennf")
if len(sys.argv) < 6:
exit(1)
print("error: need send file")
else:
flow = parse_opennf(sys.argv[3],sys.argv[4],sys.argv[5])
else:
print("python parse.py -h app help")
exit(1)
if flow:
Print_stat(flow)
Debug_pkt(flow,(sys.argv[2]))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
201,
198,
6738,
11593,
37443,
834,
1330,
7297,
201,
198,
11748,
25064,
201,
198,
11748,
288,
79,
21841,
201,
198,
11748,
2878,
201,
198,
11748,
17802,
201,
198,
11748,
4731,
201,
198,
11... | 1.871239 | 831 |
"""Contains various utility-related nox sessions."""
import nox
from constants_nox import COMPOSE_FILE, INTEGRATION_COMPOSE_FILE
COMPOSE_DOWN = (
"docker-compose",
"-f",
COMPOSE_FILE,
"-f",
INTEGRATION_COMPOSE_FILE,
"down",
"--remove-orphans",
)
@nox.session()
def clean(session: nox.Session) -> None:
"""
Clean up docker containers, remove orphans, remove volumes
and prune images related to this project.
"""
clean_command = (*COMPOSE_DOWN, "--volumes", "--rmi", "all")
session.run(*clean_command, external=True)
session.run("docker", "system", "prune", "--force", external=True)
print("Clean Complete!")
@nox.session()
def teardown(session: nox.Session) -> None:
"""Tear down the docker dev environment."""
session.run(*COMPOSE_DOWN, external=True)
print("Teardown complete")
| [
37811,
4264,
1299,
2972,
10361,
12,
5363,
645,
87,
10991,
526,
15931,
198,
11748,
645,
87,
198,
6738,
38491,
62,
35420,
1330,
24301,
14058,
62,
25664,
11,
17828,
7156,
49,
6234,
62,
9858,
48933,
62,
25664,
198,
198,
9858,
48933,
62,
4... | 2.734824 | 313 |
from tqdm import tqdm
import numpy as np
import torch
from dataset import AuxTables
from .featurizer import Featurizer
from collections import OrderedDict
import string
import pandas as pd
from itertools import product
def build_norm_model():
"""
build a normalized language model
:return: a key value dictionary that is 1-1
"""
classes = "".join([string.ascii_lowercase, "0123456789 ", string.punctuation])
tups = [(char, char) for char in classes]
return dict(tups)
#GM
| [
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
27039,
1330,
47105,
51,
2977,
198,
6738,
764,
5036,
2541,
7509,
1330,
5452,
2541,
7509,
198,
6738,
17268,
1330,
14230,
1068,
3... | 3 | 169 |
"""HTML scraper for The Stem & Stein"""
import datetime
from urllib.parse import parse_qsl
from html import unescape
from decimal import Decimal
import logging
import os
from bs4 import BeautifulSoup
import requests
import configurations
import pytz
from dateutil.parser import parse
from django.db.models import Q
from django.core.exceptions import ImproperlyConfigured, AppRegistryNotReady
# boilerplate code necessary for launching outside manage.py
try:
from ..base import BaseTapListProvider
except (ImproperlyConfigured, AppRegistryNotReady):
os.environ["DJANGO_SETTINGS_MODULE"] = "hsv_dot_beer.config"
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
configurations.setup()
from ..base import BaseTapListProvider
from beers.models import Manufacturer, Beer, ServingSize, BeerPrice
from taps.models import Tap
CENTRAL_TIME = pytz.timezone("America/Chicago")
LOG = logging.getLogger(__name__)
class StemAndSteinParser(BaseTapListProvider):
"""Parser for The Stem and Stein's static HTML page"""
provider_name = "stemandstein"
ROOT_URL = "https://thestemandstein.com"
BEER_URL = "https://thestemandstein.com/Home/BeerDetails/{}"
def parse_root_html(self):
"""Get a list of beer PKs to fetch"""
beer_list = self.parser.find("ul", {"class": "beerlist"})
beers = [
{
"name": unescape(tag.text),
"url": tag.attrs["href"],
}
for tag in beer_list.find_all("a")
]
return beers
def parse_beers(self, beers):
"""Fetch each beer, then attempt to guess the beer"""
found_beers = {}
for index, beer_dict in enumerate(beers):
beer_pk = int(beer_dict["url"].split("/")[-1])
beer = None
try:
beer = Beer.objects.get(stem_and_stein_pk=beer_pk)
except Beer.DoesNotExist:
# if we're really lucky, we can get a match by name
try:
beer = Beer.objects.get(name=beer_dict["name"])
except (Beer.DoesNotExist, Beer.MultipleObjectsReturned):
# womp womp
beer = self.guess_beer(beer_dict["name"])
if beer.stem_and_stein_pk != beer_pk:
beer.stem_and_stein_pk = beer_pk
beer.save()
found_beers[index + 1] = beer
return found_beers
def fill_in_beer_details(self, beer):
"""Update color, serving size, and price for a beer"""
beer_html = requests.get(
self.__class__.BEER_URL.format(beer.stem_and_stein_pk),
).text
beer_parser = BeautifulSoup(beer_html, "html.parser")
jumbotron = beer_parser.find("div", {"class": "jumbotron"})
tap_table = beer_parser.find("table", {"id": "tapList"})
tap_body = tap_table.find("tbody")
image_div = jumbotron.find(
"div",
{"style": "display:table-cell;vertical-align:top;width:17px;"},
)
pricing_div = jumbotron.find(
"div",
{
"style": "display: table-cell; padding: 3px;"
" font-size: 22px; vertical-align: top; width:42px",
},
)
price = Decimal(pricing_div.text[1:])
image_url = image_div.find("img").attrs["src"]
image_params = dict(parse_qsl(image_url.split("?")[-1]))
abv_div = jumbotron.find(
"div",
{
"style": "color:slategray; font-size:18px;padding-left:20px",
},
)
if not beer.abv:
if "ABV" in abv_div.text:
# ABV x.y% (a bunch of spaces) city, state
try:
abv = Decimal(abv_div.text.split()[1][:-1])
except ValueError:
LOG.warning("Invalid S&S ABV %s for beer %s", abv_div.text, beer)
else:
LOG.debug("Setting ABV for beer %s to %s%%", beer, abv)
beer.abv = abv
beer.save()
if not beer.manufacturer.location:
raw_text = abv_div.text.replace(" ", "")
percent_index = raw_text.index("%")
beer.manufacturer.location = raw_text[percent_index + 1 :].strip()
LOG.debug(
"Setting beer %s location to %s", beer, beer.manufacturer.location
)
beer.manufacturer.save()
try:
color = unescape(image_params["color"])
except KeyError:
LOG.warning("Missing S&S color for beer %s", beer)
color = None
volume_oz = (
16 if image_params["glassware"].casefold() == "pint".casefold() else 10
)
if not beer.color_html and color:
beer.color_html = color
beer.save()
serving_size = self.serving_sizes[volume_oz]
BeerPrice.objects.update_or_create(
venue=self.venue,
serving_size=serving_size,
beer=beer,
defaults={"price": price},
)
time_tapped = None
for row in tap_body.find_all("tr"):
cells = list(row.find_all("td"))
if cells[-1].text.endswith("(so far)"):
time_tapped = CENTRAL_TIME.localize(parse(cells[0].text))
return time_tapped
| [
37811,
28656,
19320,
525,
329,
383,
520,
368,
1222,
15215,
37811,
198,
11748,
4818,
8079,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
21136,
62,
80,
6649,
198,
6738,
27711,
1330,
555,
41915,
198,
6738,
32465,
1330,
4280,
4402,
198,
1174... | 2.066795 | 2,605 |
# -*- coding:utf-8 -*-
from django.conf.urls import url, include
from apps.query.views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'querysqllog', QuerySqlLogViewSet, basename="querysqllog")
urlpatterns = [
#
url(r'^', include(router.urls)),
url(r'querysql', QuerySqlViewSet.as_view()),
url(r'querymongodb', QueryMongodbViewSet.as_view()),
url(r'queryredis', QueryRedisViewSet.as_view()),
] | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
6725,
13,
22766,
13,
33571,
1330,
1635,
198,
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
47... | 2.470588 | 187 |
'''
titre : blockchain.py
description : Implementation d'une blockchain
auteur : Koffi Michel
utuilisation : python blockchain_client.py
python blockchain_client.py -p 8080
python blockchain_client.py --port 8080
version python : 3
References : [1] https://github.com/julienr/ipynb_playground/blob/master/bitcoin/dumbcoin/dumbcoin.ipynb
'''
from collections import OrderedDict
import binascii
import Crypto
import Crypto.Random
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import requests
from flask import Flask, jsonify, request, render_template
import hashlib
app = Flask(__name__)
@app.route('/')
@app.route('/make/transaction')
@app.route('/view/transactions')
@app.route('/wallet/new', methods=['GET'])
@app.route('/generate/transaction', methods=['POST'])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=8080, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='127.0.0.1', port=port) | [
7061,
6,
201,
198,
83,
270,
260,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
11779,
13,
9078,
201,
198,
11213,
220,
220,
220,
220,
1058,
46333,
288,
6,
1726,
11779,
201,
198,
64,
1133,
333,
220,
220,
220,
220,
220,
2... | 2.412331 | 519 |
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
from testcases import common
from pyalgotrade import dataseries
from pyalgotrade.technical import highlow
| [
2,
9485,
2348,
2188,
35965,
198,
2,
198,
2,
15069,
2813,
12,
4626,
17371,
5780,
15780,
276,
25314,
11667,
528,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
... | 3.686364 | 220 |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from .models import CustomUser, Chamado | [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
198,
6738,
764,
27530,
1330,
8562,
12982,
11,
37790,
4533
] | 3.85 | 40 |
## Sequences
from Bio.Seq import Seq
from Bio.Seq import MutableSeq
seqobj = Bio.Seq.Seq("ATCGTG")
print(str(seqobj))
mutable = MutableSeq(seqobj)
seqobj[0] = "T"
| [
2235,
24604,
3007,
198,
198,
6738,
16024,
13,
4653,
80,
1330,
1001,
80,
198,
6738,
16024,
13,
4653,
80,
1330,
13859,
540,
4653,
80,
198,
198,
41068,
26801,
796,
16024,
13,
4653,
80,
13,
4653,
80,
7203,
1404,
34,
19555,
38,
4943,
198... | 2.243243 | 74 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 23:16:58 2022
@author: marco
"""
import pandas as pd
import numpy as np
import os
from scipy.linalg import pinv as pinv
from scipy.linalg import inv as inv
from scipy.stats import f
from scipy.stats import chi2
os.chdir('C://Users//marco//Desktop//Projects')
cwd = os.getcwd()
print("Current working directory: {0}".format(cwd))
import warnings # `do not disturbe` mode
warnings.filterwarnings('ignore')
#D^Mahalanobis = (x-m)^T C^-1 (x-m)
'''
dtafile = 'Data.xlsx'
df = pd.read_excel(dtafile, index_col=0, skiprows=0, na_values=('NE'),sheet_name='ols')
dfd,dfe = mahalanobis(df=df)
dfc = cooks(df=df)
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
1526,
1511,
2242,
25,
1433,
25,
3365,
33160,
201,
198,
201,
198,
31,
9800,
25,
1667,
1073,
201,
198,
37811,
201,
198,
11748,
19... | 2.119534 | 343 |
from __future__ import print_function
import time
import argparse
import Queue
import RPi.GPIO as GPIO
import bisect
DESCRIPTION = """A simple tool to count events on a GPIO
PIN-Numbering is in BROADCOM!
"""
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
GPIO.cleanup()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
640,
198,
11748,
1822,
29572,
198,
11748,
4670,
518,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
47457,
478,
198,
198,
30910,
40165,
796,
37227,
32,
2829,
28... | 2.828829 | 111 |
"""
I/O, attributes, and processing for different datasets.
"""
from .semantickitti import SemanticKITTI
from .s3dis import S3DIS
from .parislille3d import ParisLille3D
from .toronto3d import Toronto3D
from .customdataset import Custom3D
from .semantic3d import Semantic3D
from . import utils
__all__ = [
'SemanticKITTI', 'S3DIS', 'Toronto3D', 'ParisLille3D', 'Semantic3D',
'Custom3D', 'utils'
]
| [
37811,
198,
40,
14,
46,
11,
12608,
11,
290,
7587,
329,
1180,
40522,
13,
198,
37811,
198,
198,
6738,
764,
43616,
415,
624,
715,
72,
1330,
12449,
5109,
42,
22470,
40,
198,
6738,
764,
82,
18,
6381,
1330,
311,
18,
26288,
198,
6738,
76... | 2.7 | 150 |
from python_ddd.domain.Entity import Entity
| [
6738,
21015,
62,
1860,
67,
13,
27830,
13,
32398,
1330,
20885,
628
] | 3.75 | 12 |
import mido
import random
from ClearLaunchpad import RemoveNotes, ClearScreen
from FirstMido import FillNotes
ClearScreen()
| [
11748,
3095,
78,
198,
11748,
4738,
198,
6738,
11459,
38296,
15636,
1330,
17220,
16130,
11,
11459,
23901,
198,
6738,
3274,
44,
17305,
1330,
27845,
16130,
628,
628,
198,
19856,
23901,
3419,
198
] | 4 | 32 |
#!/usr/bin/env python
"""Module for storing custom exceptions for cmc-py modules."""
class InvalidPageURL(Exception):
"""Raised when the webpage is not found on CoinMarketCap website."""
def __init__(self, url: str) -> None:
"""
Args:
url (str): Link of the webpage.
"""
self.url = url
class InvalidCryptoCurrencyURL(Exception):
"""Raised when the cryptocurrency webpage is not found on
CoinMarketCap website."""
def __init__(self, cryptocurrency: str) -> None:
"""
Args:
cryptocurrency (str): Link of the cryptocurrency webpage.
"""
self.cryptocurrency = cryptocurrency
class InvalidExchangeURL(Exception):
"""Raised when the exchange webpage is not found on
CoinMarketCap website."""
def __init__(self, exchange: str) -> None:
"""
Args:
exchange (str): Link of the exchange webpage.
"""
self.exchange = exchange
class ProxyTimeOut(Exception):
"""Raised when a proxy cannot be fetched from the API."""
class InvalidProxy(Exception):
"""Raised when the proxy used is not valid."""
def __init__(self, proxy: str) -> None:
"""
Args:
proxy (str): The invalid proxy.
"""
self.proxy = proxy
class ScrapeError(Exception):
"""Raised when Selenium is unable to scrape required element from the webpage."""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
26796,
329,
23069,
2183,
13269,
329,
12067,
66,
12,
9078,
13103,
526,
15931,
628,
198,
4871,
17665,
9876,
21886,
7,
16922,
2599,
198,
220,
220,
220,
37227,
21762,
1417,
618,
... | 2.684701 | 536 |
import dash_html_components as html
import dash_core_components as dcc
from datetime import datetime
markdown_text = '''
### Explanation
This dashboard extracts data from various data sources and presents the daily lifestyle of the individual.
1. __Select__ a date ranging from 5th April 2019 to 5th May 2019.
2. __Pie Chart__ presents the _univariate_ analysis of the time spent on exercise.
3. __Bar Plot__ presents _bivariate_ analysis of the calories burnt on each exercise.
4. __Dual Plot__ presents the health status with shaded region indicating sleep pattern, and lines represent the heart rate and steps during a day. The x-axis is time-scale for each day. The _slider_ lets on zoom on each section.
###### Technical Details
1. Information was extracted from four dataframes - sleep, exercise, heart rate and step count using _pandas and datetime_ modules.
2. Three subplots were created to plot 3 graphs together.
3. The range slider and date picker components were used from _plotly_ module.
'''
tab_daily_layout = html.Div([
html.H1('Daily Routine Analsis'),
html.P('This graph shows day wise composite information on various user level data.'),
html.Div([
html.Div([
html.Div([dcc.DatePickerSingle(
id='my-date-picker-single',
min_date_allowed=datetime(2019, 4, 5),
max_date_allowed=datetime(2019, 5, 5),
initial_visible_month=datetime(2019, 4, 15),
date=datetime(2019,4,15)),
dcc.Graph(id='dashboard')
])],className='nine columns'),
html.Div([
dcc.Markdown(children=markdown_text)], className='three columns')
])
], className="row") | [
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
4102,
2902,
62,
5239,
796,
705,
7061,
198,
21017,
50125,
341,
198,
198,
1212... | 2.13219 | 991 |
from .command import main
main()
| [
6738,
764,
21812,
1330,
1388,
198,
198,
12417,
3419,
198
] | 3.4 | 10 |
from sys import path, version_info
from os.path import sep
path.insert(1, path[0]+sep+'codec'+sep+'ber')
import ber.suite
path.insert(1, path[0]+sep+'codec'+sep+'cer')
import cer.suite
path.insert(1, path[0]+sep+'codec'+sep+'der')
import der.suite
from pyasn1.error import PyAsn1Error
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
suite = unittest.TestSuite()
for m in (
ber.suite,
cer.suite,
der.suite
):
suite.addTest(getattr(m, 'suite'))
if __name__ == '__main__': runTests()
| [
6738,
25064,
1330,
3108,
11,
2196,
62,
10951,
198,
6738,
28686,
13,
6978,
1330,
41767,
198,
6978,
13,
28463,
7,
16,
11,
3108,
58,
15,
48688,
325,
79,
10,
6,
19815,
721,
6,
10,
325,
79,
10,
6,
527,
11537,
198,
11748,
18157,
13,
2... | 2.158416 | 303 |
import datetime
import logging
import operator
import sys
import time
from collections import defaultdict, namedtuple
from time import sleep
from OpenSSL import SSL
import facebook
from db_functions import db_interface_context
from slack_notifier import notify_slack
import config_utils
SearchRunnerParams = namedtuple(
'SearchRunnerParams',
['country_code',
'facebook_access_token',
'sleep_time',
'request_limit',
'max_requests',
'stop_at_datetime',
])
def get_stop_at_datetime(stop_at_time_str):
"""Get datetime for today at the clock time in ISO format.
Args:
stop_at_time_str: str time to stop in ISO format. only hours, minutes, seconds used (all other
info ignored).
Returns:
datetime.datetime of today at the specified time.
"""
stop_at_time = datetime.time.fromisoformat(stop_at_time_str)
today = datetime.date.today()
return datetime.datetime(year=today.year, month=today.month, day=today.day,
hour=stop_at_time.hour, minute=stop_at_time.minute,
second=stop_at_time.second)
if __name__ == '__main__':
config = config_utils.get_config(sys.argv[1])
country_code = config['SEARCH']['COUNTRY_CODE'].lower()
config_utils.configure_logger(f"{country_code}_active_ads_fb_api_collection.log")
if len(sys.argv) < 2:
exit(f"Usage:python3 {sys.argv[0]} active_ads_fb_collector.cfg")
main(config)
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
10088,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
17268,
1330,
4277,
11600,
11,
3706,
83,
29291,
198,
6738,
640,
1330,
3993,
628,
198,
6738,
4946,
31127,
1330,
25952,
198,
11748,
23960... | 2.419458 | 627 |
#!python3
#encoding:utf-8
import os.path
import subprocess
import getpass
import Data
import command.miscellaneous.Licenses
if __name__ == "__main__":
github_user_name = 'ytyaru'
os_user_name = getpass.getuser()
device_name = 'some_device'
path_db_base = 'db/GitHub'
path_db_account = '/media/{0}/{1}/{2}/GitHub.Accounts.sqlite3'.format(os_user_name, device_name, path_db_base)
path_db_repo = '/media/{0}/{1}/{2}/GitHub.Repositories.{3}.sqlite3'.format(os_user_name, device_name, path_db_base, github_user_name)
path_db_license = '/media/{0}/{1}/{2}/GitHub.Licenses.sqlite3'.format(os_user_name, device_name, path_db_base)
main = Main(github_user_name, path_db_account, path_db_repo, path_db_license)
main.Run()
| [
2,
0,
29412,
18,
198,
2,
12685,
7656,
25,
40477,
12,
23,
198,
11748,
28686,
13,
6978,
198,
11748,
850,
14681,
198,
11748,
651,
6603,
198,
11748,
6060,
198,
11748,
3141,
13,
25413,
25673,
13,
26656,
4541,
628,
198,
361,
11593,
3672,
... | 2.366013 | 306 |
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy() | [
6738,
42903,
1330,
46947,
11,
33918,
1958,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
9945,
796,
16363,
2348,
26599,
3419
] | 3.5 | 26 |
import time
import copy
from pprint import pprint
from .cltools import HAVE_PYOPENCL
from .cluster import HAVE_ISOSPLIT5
try:
import numba
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
# nb of channel to become sparse
limit_dense_sparse = 4
_default_catalogue_params = {
'duration': 300.,
'chunksize': 1024,
'mode': 'dense', # 'sparse'
'sparse_threshold': None, # 1.5
'memory_mode': 'memmap',
'n_spike_for_centroid':350,
'n_jobs' :-1,
'preprocessor': {
'highpass_freq': 300.,
'lowpass_freq': 5000.,
'smooth_size': 0,
'lostfront_chunksize': -1, # this auto
'engine': 'numpy',
'common_ref_removal':False,
},
'peak_detector': {
'method' : 'global',
'engine': 'numpy',
'peak_sign': '-',
'relative_threshold': 5.,
'peak_span_ms': .7,
'adjacency_radius_um' : None,
'smooth_radius_um' : None,
},
'noise_snippet': {
'nb_snippet': 300,
},
'extract_waveforms': {
'wf_left_ms': -1.5,
'wf_right_ms': 2.5,
},
'clean_peaks': {
'alien_value_threshold': None,
'mode': 'extremum_amplitude',
},
'peak_sampler': {
'mode': 'rand',
'nb_max': 20000,
'nb_max_by_channel': None,
},
#~ 'clean_waveforms': {
#~ 'alien_value_threshold': None,
#~ },
'feature_method': 'pca_by_channel',
'feature_kargs': {},
'cluster_method': 'pruningshears',
'cluster_kargs': {},
'clean_cluster' :{
'apply_auto_split': True,
'apply_trash_not_aligned': True,
'apply_auto_merge_cluster': True,
'apply_trash_low_extremum': True,
'apply_trash_small_cluster': True,
},
'make_catalogue':{
'inter_sample_oversampling':False,
'subsample_ratio': 'auto',
'sparse_thresh_level2': 1.5,
'sparse_thresh_level2': 3,
}
}
def get_auto_params_for_catalogue(dataio, chan_grp=0):
"""
Automatic selection of parameters.
This totally empiric paramerters.
"""
params = copy.deepcopy(_default_catalogue_params)
nb_chan = dataio.nb_channel(chan_grp=chan_grp)
# TODO make this more complicated
# * by detecting if dense array or not.
# * better method sleection
#~ seg0_duration = dataio.get_segment_length(seg_num=0) / dataio.sample_rate
total_duration = sum(dataio.get_segment_length(seg_num=seg_num) / dataio.sample_rate for seg_num in range(dataio.nb_segment))
# auto chunsize of 100 ms
params['chunksize'] = int(dataio.sample_rate * 0.1)
params['duration'] = 601.
# segment durartion is not so big then take the whole duration
# to avoid double preprocessing (catalogue+peeler)
if params['duration'] * 2 > total_duration:
params['duration'] = total_duration
#~ if nb_chan == 1:
#~ params['mode'] = 'dense'
#~ params['adjacency_radius_um'] = 0.
#~ params['sparse_threshold'] = 1.5
#~ params['peak_detector']['method'] = 'global'
#~ params['peak_detector']['engine'] = 'numpy'
#~ params['peak_detector']['smooth_radius_um' ] = None
#~ params['peak_sampler']['mode'] = 'rand'
#~ params['peak_sampler']['nb_max'] = 20000
#~ params['feature_method'] = 'global_pca'
#~ params['feature_kargs'] = {'n_components' : 4 }
#~ params['cluster_method'] = 'dbscan_with_noise'
#~ params['cluster_kargs'] = {}
#~ params['clean_cluster_kargs'] = {'too_small' : 20 }
#~ elif nb_chan <=4:
if nb_chan <= limit_dense_sparse:
#~ if nb_chan <=8:
params['mode'] = 'dense'
#~ params['adjacency_radius_um'] = 0.
params['sparse_threshold'] = 1.5
params['peak_detector']['method'] = 'global'
params['peak_detector']['engine'] = 'numpy'
params['peak_detector']['adjacency_radius_um'] = 200. # useless
params['peak_detector']['smooth_radius_um' ] = None
params['peak_sampler']['mode'] = 'rand'
params['peak_sampler']['nb_max'] = 20000
params['feature_method'] = 'global_pca'
if nb_chan in (1,2):
n_components = 5
else:
n_components = int(nb_chan*2)
params['feature_kargs'] = {'n_components' : n_components }
params['cluster_method'] = 'pruningshears'
params['cluster_kargs']['max_loop'] = max(1000, nb_chan * 10)
params['cluster_kargs']['min_cluster_size'] = 20
params['cluster_kargs']['adjacency_radius_um'] = 0.
params['cluster_kargs']['high_adjacency_radius_um'] = 0.
# necessary for peeler classic
#~ params['make_catalogue']['inter_sample_oversampling'] = True
else:
params['mode'] = 'sparse'
#~ params['adjacency_radius_um'] = 200.
params['sparse_threshold'] = 1.5
if nb_chan > 32 and HAVE_PYOPENCL:
params['preprocessor']['engine'] = 'opencl'
params['peak_detector']['method'] = 'geometrical'
params['peak_detector']['adjacency_radius_um'] = 200.
#~ params['peak_detector']['smooth_radius_um' ] = 10
params['peak_detector']['smooth_radius_um' ] = None
if HAVE_PYOPENCL:
params['peak_detector']['engine'] = 'opencl'
elif HAVE_NUMBA:
params['peak_detector']['engine'] = 'numba'
else:
print('WARNING : peakdetector will be slow install opencl or numba')
params['peak_detector']['engine'] = 'numpy'
params['peak_sampler']['mode'] = 'rand_by_channel'
#~ params['extract_waveforms']['nb_max_by_channel'] = 700
params['peak_sampler']['nb_max_by_channel'] = 1000
#~ params['peak_sampler']['nb_max_by_channel'] = 1500
#~ params['peak_sampler']['nb_max_by_channel'] = 3000
params['feature_method'] = 'pca_by_channel'
# TODO change n_components_by_channel depending on channel density
#~ params['feature_kargs'] = {'n_components_by_channel':5}
params['feature_kargs'] = {'n_components_by_channel': 3,
'adjacency_radius_um' :50., # this should be greater than cluster 'adjacency_radius_um'
}
params['cluster_method'] = 'pruningshears'
params['cluster_kargs']['max_loop'] = max(1000, nb_chan * 20)
params['cluster_kargs']['min_cluster_size'] = 20
params['cluster_kargs']['adjacency_radius_um'] = 50.
params['cluster_kargs']['high_adjacency_radius_um'] = 30.
return params
| [
11748,
640,
198,
11748,
4866,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
6738,
764,
565,
31391,
1330,
21515,
62,
47,
56,
3185,
1677,
5097,
198,
6738,
764,
565,
5819,
1330,
21515,
62,
1797,
2640,
6489,
2043,
20,
198,
198,
28311,
... | 2.016312 | 3,433 |
from xml.dom.minidom import DOMImplementation
from xml.dom.minidom import getDOMImplementation
| [
6738,
35555,
13,
3438,
13,
1084,
312,
296,
1330,
24121,
3546,
32851,
201,
198,
6738,
35555,
13,
3438,
13,
1084,
312,
296,
1330,
651,
39170,
3546,
32851,
201,
198,
201,
198,
201,
198,
201,
198
] | 2.942857 | 35 |
from .Sprites import Monster, Bullet, Man
| [
6738,
764,
4561,
23156,
1330,
220,
12635,
11,
18003,
11,
1869,
198
] | 3.583333 | 12 |
from abc import abstractmethod, ABC
from typing import (
Any,
Dict,
List,
)
from .disk import Disk
from .pool import Pool
from .virtualmachine import VirtualMachine
| [
6738,
450,
66,
1330,
12531,
24396,
11,
9738,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
4377,
11,
198,
220,
220,
220,
360,
713,
11,
198,
220,
220,
220,
7343,
11,
198,
8,
198,
6738,
764,
39531,
1330,
31664,
198,
6738,
764,
77... | 3.236364 | 55 |
from __future__ import division
import numpy as np
import scipy
import scipy.stats
import scipy.fftpack
import scipy.optimize
import logging
import stingray.lightcurve as lightcurve
import stingray.utils as utils
from stingray.gti import bin_intervals_from_gtis, check_gtis
from stingray.utils import simon
from stingray.crossspectrum import Crossspectrum, AveragedCrossspectrum
__all__ = ["Powerspectrum", "AveragedPowerspectrum", "DynamicalPowerspectrum"]
def classical_pvalue(power, nspec):
"""
Compute the probability of detecting the current power under
the assumption that there is no periodic oscillation in the data.
This computes the single-trial p-value that the power was
observed under the null hypothesis that there is no signal in
the data.
Important: the underlying assumptions that make this calculation valid
are:
1. the powers in the power spectrum follow a chi-square distribution
2. the power spectrum is normalized according to [Leahy 1983]_, such
that the powers have a mean of 2 and a variance of 4
3. there is only white noise in the light curve. That is, there is no
aperiodic variability that would change the overall shape of the power
spectrum.
Also note that the p-value is for a *single trial*, i.e. the power
currently being tested. If more than one power or more than one power
spectrum are being tested, the resulting p-value must be corrected for the
number of trials (Bonferroni correction).
Mathematical formulation in [Groth 1975]_.
Original implementation in IDL by Anna L. Watts.
Parameters
----------
power : float
The squared Fourier amplitude of a spectrum to be evaluated
nspec : int
The number of spectra or frequency bins averaged in ``power``.
This matters because averaging spectra or frequency bins increases
the signal-to-noise ratio, i.e. makes the statistical distributions
of the noise narrower, such that a smaller power might be very
significant in averaged spectra even though it would not be in a single
power spectrum.
Returns
-------
pval : float
The classical p-value of the observed power being consistent with
the null hypothesis of white noise
References
----------
* .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract
* .. [Groth 1975] https://ui.adsabs.harvard.edu/#abs/1975ApJS...29..285G/abstract
"""
if not np.isfinite(power):
raise ValueError("power must be a finite floating point number!")
if power < 0:
raise ValueError("power must be a positive real number!")
if not np.isfinite(nspec):
raise ValueError("nspec must be a finite integer number")
if nspec < 1:
raise ValueError("nspec must be larger or equal to 1")
if not np.isclose(nspec % 1, 0):
raise ValueError("nspec must be an integer number!")
# If the power is really big, it's safe to say it's significant,
# and the p-value will be nearly zero
if (power * nspec) > 30000:
simon("Probability of no signal too miniscule to calculate.")
return 0.0
else:
pval = _pavnosigfun(power, nspec)
return pval
def _pavnosigfun(power, nspec):
"""
Helper function doing the actual calculation of the p-value.
Parameters
----------
power : float
The measured candidate power
nspec : int
The number of power spectral bins that were averaged in `power`
(note: can be either through averaging spectra or neighbouring bins)
"""
sum = 0.0
m = nspec - 1
pn = power * nspec
while m >= 0:
s = 0.0
for i in range(int(m) - 1):
s += np.log(float(m - i))
logterm = m * np.log(pn / 2) - pn / 2 - s
term = np.exp(logterm)
ratio = sum / term
if ratio > 1.0e15:
return sum
sum += term
m -= 1
return sum
class Powerspectrum(Crossspectrum):
"""
Make a :class:`Powerspectrum` (also called periodogram) from a (binned) light curve.
Periodograms can be normalized by either Leahy normalization, fractional rms
normalizaation, absolute rms normalization, or not at all.
You can also make an empty :class:`Powerspectrum` object to populate with your
own fourier-transformed data (this can sometimes be useful when making
binned power spectra).
Parameters
----------
lc: :class:`stingray.Lightcurve` object, optional, default ``None``
The light curve data to be Fourier-transformed.
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the power spectrum to be used. Options are
``leahy``, ``frac``, ``abs`` and ``none``, default is ``frac``.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }
the normalization of the power spectrun
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging power spectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged powers in each bin
n: int
The number of data points in the light curve
nphots: float
The total number of photons in the light curve
"""
def rebin(self, df=None, f=None, method="mean"):
"""
Rebin the power spectrum.
Parameters
----------
df: float
The new frequency resolution
Other Parameters
----------------
f: float
the rebin factor. If specified, it substitutes ``df`` with ``f*self.df``
Returns
-------
bin_cs = :class:`Powerspectrum` object
The newly binned power spectrum.
"""
bin_ps = Crossspectrum.rebin(self, df=df, f=f, method=method)
bin_ps.nphots = bin_ps.nphots1
return bin_ps
def compute_rms(self, min_freq, max_freq, white_noise_offset=0.):
"""
Compute the fractional rms amplitude in the power spectrum
between two frequencies.
Parameters
----------
min_freq: float
The lower frequency bound for the calculation
max_freq: float
The upper frequency bound for the calculation
Other parameters
----------------
white_noise_offset : float, default 0
This is the white noise level, in Leahy normalization. In the ideal
case, this is 2. Dead time and other instrumental effects can alter
it. The user can fit the white noise level outside this function
and it will get subtracted from powers here.
Returns
-------
rms: float
The fractional rms amplitude contained between ``min_freq`` and
``max_freq``
"""
minind = self.freq.searchsorted(min_freq)
maxind = self.freq.searchsorted(max_freq)
powers = self.power[minind:maxind]
nphots = self.nphots
if self.norm.lower() == 'leahy':
powers_leahy = powers.copy()
elif self.norm.lower() == "frac":
powers_leahy = \
self.unnorm_power[minind:maxind].real * 2 / nphots
else:
raise TypeError("Normalization not recognized!")
rms = np.sqrt(np.sum(powers_leahy - white_noise_offset) / nphots)
rms_err = self._rms_error(powers_leahy)
return rms, rms_err
def _rms_error(self, powers):
"""
Compute the error on the fractional rms amplitude using error
propagation.
Note: this uses the actual measured powers, which is not
strictly correct. We should be using the underlying power spectrum,
but in the absence of an estimate of that, this will have to do.
.. math::
r = \sqrt{P}
.. math::
\delta r = \\frac{1}{2 * \sqrt{P}} \delta P
Parameters
----------
powers: iterable
The list of powers used to compute the fractional rms amplitude.
Returns
-------
delta_rms: float
the error on the fractional rms amplitude
"""
nphots = self.nphots
p_err = scipy.stats.chi2(2.0 * self.m).var() * powers / self.m / nphots
rms = np.sum(powers) / nphots
pow = np.sqrt(rms)
drms_dp = 1 / (2 * pow)
sq_sum_err = np.sqrt(np.sum(p_err**2))
delta_rms = sq_sum_err * drms_dp
return delta_rms
def classical_significances(self, threshold=1, trial_correction=False):
"""
Compute the classical significances for the powers in the power
spectrum, assuming an underlying noise distribution that follows a
chi-square distributions with 2M degrees of freedom, where M is the
number of powers averaged in each bin.
Note that this function will *only* produce correct results when the
following underlying assumptions are fulfilled:
1. The power spectrum is Leahy-normalized
2. There is no source of variability in the data other than the
periodic signal to be determined with this method. This is important!
If there are other sources of (aperiodic) variability in the data, this
method will *not* produce correct results, but instead produce a large
number of spurious false positive detections!
3. There are no significant instrumental effects changing the
statistical distribution of the powers (e.g. pile-up or dead time)
By default, the method produces ``(index,p-values)`` for all powers in
the power spectrum, where index is the numerical index of the power in
question. If a ``threshold`` is set, then only powers with p-values
*below* that threshold with their respective indices. If
``trial_correction`` is set to ``True``, then the threshold will be corrected
for the number of trials (frequencies) in the power spectrum before
being used.
Parameters
----------
threshold : float, optional, default ``1``
The threshold to be used when reporting p-values of potentially
significant powers. Must be between 0 and 1.
Default is ``1`` (all p-values will be reported).
trial_correction : bool, optional, default ``False``
A Boolean flag that sets whether the ``threshold`` will be corrected
by the number of frequencies before being applied. This decreases
the ``threshold`` (p-values need to be lower to count as significant).
Default is ``False`` (report all powers) though for any application
where `threshold`` is set to something meaningful, this should also
be applied!
Returns
-------
pvals : iterable
A list of ``(index, p-value)`` tuples for all powers that have p-values
lower than the threshold specified in ``threshold``.
"""
if not self.norm == "leahy":
raise ValueError("This method only works on "
"Leahy-normalized power spectra!")
if np.size(self.m) == 1:
# calculate p-values for all powers
# leave out zeroth power since it just encodes the number of photons!
pv = np.array([classical_pvalue(power, self.m)
for power in self.power])
else:
pv = np.array([classical_pvalue(power, m)
for power, m in zip(self.power, self.m)])
# if trial correction is used, then correct the threshold for
# the number of powers in the power spectrum
if trial_correction:
threshold /= self.power.shape[0]
# need to add 1 to the indices to make up for the fact that
# we left out the first power above!
indices = np.where(pv < threshold)[0]
pvals = np.vstack([pv[indices], indices])
return pvals
class AveragedPowerspectrum(AveragedCrossspectrum, Powerspectrum):
"""
Make an averaged periodogram from a light curve by segmenting the light
curve, Fourier-transforming each segment and then averaging the
resulting periodograms.
Parameters
----------
lc: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects
The light curve data to be Fourier-transformed.
segment_size: float
The size of each segment to average. Note that if the total
duration of each :class:`Lightcurve` object in lc is not an integer multiple
of the ``segment_size``, then any fraction left-over at the end of the
time series will be lost.
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the periodogram to be used.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }
the normalization of the periodogram
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of normalized squared absolute values of Fourier
amplitudes
power_err: numpy.ndarray
The uncertainties of ``power``.
An approximation for each bin given by ``power_err= power/sqrt(m)``.
Where ``m`` is the number of power averaged in each bin (by frequency
binning, or averaging powerspectrum). Note that for a single
realization (``m=1``) the error is equal to the power.
df: float
The frequency resolution
m: int
The number of averaged periodograms
n: int
The number of data points in the light curve
nphots: float
The total number of photons in the light curve
"""
def _make_segment_spectrum(self, lc, segment_size):
"""
Split the light curves into segments of size ``segment_size``, and calculate a power spectrum for
each.
Parameters
----------
lc : :class:`stingray.Lightcurve` objects\
The input light curve
segment_size : ``numpy.float``
Size of each light curve segment to use for averaging.
Returns
-------
power_all : list of :class:`Powerspectrum` objects
A list of power spectra calculated independently from each light curve segment
nphots_all : ``numpy.ndarray``
List containing the number of photons for all segments calculated from ``lc``
"""
if not isinstance(lc, lightcurve.Lightcurve):
raise TypeError("lc must be a lightcurve.Lightcurve object")
if self.gti is None:
self.gti = lc.gti
check_gtis(self.gti)
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, segment_size, lc.time, dt=lc.dt)
power_all = []
nphots_all = []
for start_ind, end_ind in zip(start_inds, end_inds):
time = lc.time[start_ind:end_ind]
counts = lc.counts[start_ind:end_ind]
counts_err = lc.counts_err[start_ind: end_ind]
lc_seg = lightcurve.Lightcurve(time, counts, err=counts_err,
err_dist=lc.err_dist.lower())
power_seg = Powerspectrum(lc_seg, norm=self.norm)
power_all.append(power_seg)
nphots_all.append(np.sum(lc_seg.counts))
return power_all, nphots_all
class DynamicalPowerspectrum(AveragedPowerspectrum):
"""
Create a dynamical power spectrum, also often called a *spectrogram*.
This class will divide a :class:`Lightcurve` object into segments of
length ``segment_size``, create a power spectrum for each segment and store
all powers in a matrix as a function of both time (using the mid-point of each
segment) and frequency.
This is often used to trace changes in period of a (quasi-)periodic signal over
time.
Parameters
----------
lc : :class:`stingray.Lightcurve` object
The time series of which the Dynamical powerspectrum is
to be calculated.
segment_size : float, default 1
Length of the segment of light curve, default value is 1 (in whatever units
the ``time`` array in the :class:`Lightcurve`` object uses).
norm: {``leahy`` | ``frac`` | ``abs`` | ``none`` }, optional, default ``frac``
The normaliation of the periodogram to be used.
Other Parameters
----------------
gti: 2-d float array
``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
segment_size: float
The size of each segment to average. Note that if the total
duration of each Lightcurve object in lc is not an integer multiple
of the ``segment_size``, then any fraction left-over at the end of the
time series will be lost.
dyn_ps : np.ndarray
The matrix of normalized squared absolute values of Fourier
amplitudes. The axis are given by the ``freq``
and ``time`` attributes
norm: {``leahy`` | ``frac`` | ``abs`` | ``none``}
the normalization of the periodogram
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
df: float
The frequency resolution
dt: float
The time resolution
"""
def _make_matrix(self, lc):
"""
Create a matrix of powers for each time step (rows) and each frequency step (columns).
Parameters
----------
lc : :class:`Lightcurve` object
The :class:`Lightcurve` object from which to generate the dynamical power spectrum
"""
ps_all, _ = AveragedPowerspectrum._make_segment_spectrum(
self, lc, self.segment_size)
self.dyn_ps = np.array([ps.power for ps in ps_all]).T
self.freq = ps_all[0].freq
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, self.segment_size, lc.time, dt=lc.dt)
tstart = lc.time[start_inds]
tend = lc.time[end_inds]
self.time = tstart + 0.5*(tend - tstart)
# Assign length of lightcurve as time resolution if only one value
if len(self.time) > 1:
self.dt = self.time[1] - self.time[0]
else:
self.dt = lc.n
# Assign biggest freq. resolution if only one value
if len(self.freq) > 1:
self.df = self.freq[1] - self.freq[0]
else:
self.df = 1 / lc.n
def rebin_frequency(self, df_new, method="sum"):
"""
Rebin the Dynamic Power Spectrum to a new frequency resolution. Rebinning is
an in-place operation, i.e. will replace the existing ``dyn_ps`` attribute.
While the new resolution need not be an integer multiple of the
previous frequency resolution, be aware that if it is not, the last
bin will be cut off by the fraction left over by the integer division.
Parameters
----------
df_new: float
The new frequency resolution of the Dynamical Power Spectrum.
Must be larger than the frequency resolution of the old Dynamical
Power Spectrum!
method: {``sum`` | ``mean`` | ``average``}, optional, default ``sum``
This keyword argument sets whether the counts in the new bins
should be summed or averaged.
"""
dynspec_new = []
for data in self.dyn_ps.T:
freq_new, bin_counts, bin_err, _ = \
utils.rebin_data(self.freq, data, dx_new=df_new,
method=method)
dynspec_new.append(bin_counts)
self.freq = freq_new
self.dyn_ps = np.array(dynspec_new).T
self.df = df_new
def trace_maximum(self, min_freq=None, max_freq=None, sigmaclip=False):
"""
Return the indices of the maximum powers in each segment :class:`Powerspectrum`
between specified frequencies.
Parameters
----------
min_freq: float, default ``None``
The lower frequency bound.
max_freq: float, default ``None``
The upper frequency bound.
Returns
-------
max_positions : np.array
The array of indices of the maximum power in each segment having
frequency between ``min_freq`` and ``max_freq``.
"""
if min_freq is None:
min_freq = np.min(self.freq)
if max_freq is None:
max_freq = np.max(self.freq)
max_positions = []
for ps in self.dyn_ps.T:
indices = np.logical_and(self.freq <= max_freq,
min_freq <= self.freq)
max_power = np.max(ps[indices])
max_positions.append(np.where(ps == max_power)[0][0])
return np.array(max_positions)
def rebin_time(self, dt_new, method='sum'):
"""
Rebin the Dynamic Power Spectrum to a new time resolution.
While the new resolution need not be an integer multiple of the
previous time resolution, be aware that if it is not, the last bin
will be cut off by the fraction left over by the integer division.
Parameters
----------
dt_new: float
The new time resolution of the Dynamical Power Spectrum.
Must be larger than the time resolution of the old Dynamical Power
Spectrum!
method: {"sum" | "mean" | "average"}, optional, default "sum"
This keyword argument sets whether the counts in the new bins
should be summed or averaged.
Returns
-------
time_new: numpy.ndarray
Time axis with new rebinned time resolution.
dynspec_new: numpy.ndarray
New rebinned Dynamical Power Spectrum.
"""
if dt_new < self.dt:
raise ValueError("New time resolution must be larger than "
"old time resolution!")
dynspec_new = []
for data in self.dyn_ps:
time_new, bin_counts, bin_err, _ = \
utils.rebin_data(self.time, data, dt_new,
method=method)
dynspec_new.append(bin_counts)
self.time = time_new
self.dyn_ps = np.array(dynspec_new)
self.dt = dt_new
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
198,
11748,
629,
541,
88,
13,
34242,
198,
11748,
629,
541,
88,
13,
487,
83,
8002,
198,
11748,
629,
541,
88,
13,
40085,
1096,
198,
117... | 2.488541 | 9,512 |
# Generated by Django 3.1.4 on 2021-06-23 15:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
3312,
12,
1954,
1315,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import requests
import json
from .helper import Helper
| [
11748,
7007,
198,
11748,
33918,
198,
198,
6738,
764,
2978,
525,
1330,
5053,
525,
198
] | 3.733333 | 15 |
ano_nasc = int(input("Informe o ano que você nasceu com 4 dígitos: "))
if ano_nasc >= 2006:
print("Você não poderá votar esse ano!")
else:
print("Você poderá votar esse ano!")
| [
5733,
62,
77,
3372,
796,
493,
7,
15414,
7203,
818,
687,
68,
267,
281,
78,
8358,
12776,
25792,
25221,
344,
84,
401,
604,
288,
8836,
18300,
418,
25,
366,
4008,
198,
361,
281,
78,
62,
77,
3372,
18189,
4793,
25,
198,
220,
220,
220,
... | 2.139535 | 86 |
from __future__ import print_function
import unittest
import wrapt
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
555,
715,
395,
198,
198,
11748,
7917,
457,
198
] | 3.45 | 20 |
# detects letters on a sheet
import cv2
import os
import numpy as np
import scipy.io as sio
os.chdir('character_recognition')
from oct2py import octave
os.chdir('..')
os.chdir('character_detection')
from arrange import arrange
from matplotlib import pyplot as plt
mat0 = sio.loadmat('weights/tr0.mat')
mat1 = sio.loadmat('weights/tr1.mat')
mat2 = sio.loadmat('weights/tr2.mat')
Theta0=mat0['Theta0']
Theta1=mat1['Theta1']
Theta2=mat2['Theta2']
cv2.destroyAllWindows()
ind=np.arange(256).reshape(256,1)
himn=20
histd=0
m=0
k=0
std=0.0
O=[]
Q=0.0
yt=0
zt=0
#cv2.namedWindow('edges')
#cv2.namedWindow('edge')
#cv2.createTrackbar('h','edge',0,255,nothing)
#cv2.createTrackbar('s','edge',1,500,nothing)
#cv2.createTrackbar('v','edge',1,255,nothing)
#cv2.createTrackbar('h1','edges',0,255,nothing)
#cv2.createTrackbar('s1','edges',0,255,nothing)
#cv2.createTrackbar('v1','edges',0,255,nothing)
#cv2.VideoCapture(0).release()
cap = cv2.VideoCapture(0)
ret, img = cap.read()
while(cap.isOpened()):
a=np.zeros(4,np.float32)
b=np.zeros(4,np.float32)
word=[]
c=[]
m=0
ret, img = cap.read()
_,img1=cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = cv2.GaussianBlur(img,(9,9),0)
#cv2.imshow("aef",hsv)
gray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
blur=cv2.GaussianBlur(img,(9,9),0)
blur1=cv2.GaussianBlur(gray,(7,7),0)
th2 = cv2.adaptiveThreshold(blur1,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,11,16)
#h = cv2.getTrackbarPos('h','edge')
#s = cv2.getTrackbarPos('s','edge')
#v = cv2.getTrackbarPos('v','edge')
#h1 = cv2.getTrackbarPos('h1','edges')
#s1 = cv2.getTrackbarPos('s1','edges')
#v1 = cv2.getTrackbarPos('v1','edges')
if cv2.waitKey(3) == ord('p'):
cv2.imwrite("selfie.jpg",img)
edges = cv2.Canny(blur,0,100,apertureSize = 3)
edes = cv2.Canny(blur,0 ,100,apertureSize = 3)
contours0, hierarchy0 = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
lower_blue = np.array([0,0,0],np.uint16)
upper_blue = np.array([180,104,255],np.uint16)
mask = cv2.inRange(hsv,lower_blue, upper_blue)
mas = cv2.inRange(hsv,lower_blue, upper_blue)
contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
lower_blue1 = np.array([0,0,0],np.uint16)
upper_blue1 = np.array([180,125,255],np.uint16)
mask1 = cv2.inRange(hsv,lower_blue1, upper_blue1)
mas1= cv2.inRange(hsv,lower_blue1, upper_blue1)
contours1, hierarchy1 = cv2.findContours(mask1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
lower_blue2 = np.array([0,0,0],np.uint16)
upper_blue2 = np.array([180,115,255],np.uint16)
mask2 = cv2.inRange(hsv,lower_blue2, upper_blue2)
mas2 = cv2.inRange(hsv,lower_blue2, upper_blue2)
contours2, hierarchy2 = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
lower_blue3 = np.array([0,0,0],np.uint16)
upper_blue3 = np.array([255,84,255],np.uint16)
mask3 = cv2.inRange(hsv,lower_blue3, upper_blue3)
mas3 = cv2.inRange(hsv,lower_blue3, upper_blue3)
contours3, hierarchy3 = cv2.findContours(mask3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
hist_full0 = cv2.calcHist([blur1],[0],None,[256],[0,256]);
plt.plot(hist_full0);
#print himn
_,tt = cv2.threshold(blur1,himn-2*histd,255,cv2.THRESH_BINARY_INV)
#_,ttt = cv2.threshold(blur1,h,255,cv2.THRESH_BINARY_INV)
#cv2.imshow("jf",ttt)
contours4, hierarchy4 = cv2.findContours(tt,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours1)):
cnt=contours1[i]
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print cv2.contourArea(cnt)
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
for i in range(len(contours2)):
cnt=contours2[i]
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print cv2.contourArea(cnt)
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
for i in range(len(contours3)):
cnt=contours3[i]
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print cv2.contourArea(cnt)
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
for i in range(len(contours0)):
cnt=contours0[i]
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print approx
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
for i in range(len(contours)):
cnt=contours[i]
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print cv2.contourArea(cnt)
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
for i in range(len(contours4)):
cnt=contours4[i]
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx)==4 and cv2.contourArea(cnt)>100000 and cv2.contourArea(cnt)<250000:
#print cv2.contourArea(cnt)
pts1=np.float32(approx.reshape(4,2))
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(img,[cnt],0,(0,255,0),1)
m+=1
if m>0:
#im0=img[y:(y+h),x:(x+w)]
l=pts1[:,1]**2+pts1[:,0]**2
l=l.reshape(4,1)
a[0]=pts1[np.where(np.any(l==min(l),axis=1)),0]+20
b[0]=pts1[np.where(np.any(l==min(l),axis=1)),1]+20
a[3]=pts1[np.where(np.any(l==max(l),axis=1)),0]-20
b[3]=pts1[np.where(np.any(l==max(l),axis=1)),1]-20
a[1]=pts1[np.where(np.any((l!=max(l)) & (l!=min(l)) & (pts1[:,0]<pts1[:,1]).reshape(4,1),axis=1)),0]+20
b[1]=pts1[np.where(np.any((l!=max(l)) & (l!=min(l)) & (pts1[:,0]<pts1[:,1]).reshape(4,1),axis=1)),1]-20
a[2]=pts1[np.where(np.any((l!=max(l)) & (l!=min(l)) & (pts1[:,0]>pts1[:,1]).reshape(4,1),axis=1)),0]-20
b[2]=pts1[np.where(np.any((l!=max(l)) & (l!=min(l)) & (pts1[:,0]>pts1[:,1]).reshape(4,1),axis=1)),1]+20
pts1 = np.float32([[a[0],b[0]],[a[1],b[1]],[a[2],b[2]],[a[3],b[3]]])
pts2 = np.float32([[0,0],[0,300],[450,0],[450,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
d = cv2.warpPerspective(img,M,(450,300))
ds = cv2.warpPerspective(gray,M,(450,300))
dst = cv2.warpPerspective(th2,M,(450,300))
dst1 = cv2.warpPerspective(th2,M,(450,300))
hist_full = cv2.calcHist([ds],[0],None,[256],[0,256])
himn=np.average(ind,weights=hist_full)
#print np.average(ind,weights=hist_full)
histd=(np.average((ind-np.ones((256,1))*himn)**2,weights=hist_full))**0.5
ret,t = cv2.threshold(ds,himn-2*histd,255,cv2.THRESH_BINARY_INV)
ret,td = cv2.threshold(ds,himn-2*histd,255,cv2.THRESH_BINARY_INV)
contous, hierarch = cv2.findContours(td,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#cv2.imshow("asd",t)
#cv2.imshow("ad",td)
if cv2.waitKey(3) == ord('p'):
cv2.imwrite("lettrs.png",d)
(cont,indi,indl)=arrange(contous,hierarch)
for i in range(len(cont)):
cn=cont[i]
x1,y1,w1,h1 = cv2.boundingRect(cn)
cv2.rectangle(d,(x1,y1),(x1+w1,y1+h1),(0,255,0),1)
im0=t[y1:(y1+h1),x1:(x1+w1)]
black=np.zeros((np.shape(t)))
#kernel = np.ones((3,3),np.uint8)
#im0 = cv2.erode(im0,kernel,iterations = 1)
black[y1:(y1+h1),x1:(x1+w1)]=im0
im0=black[y1-h1/5:(y1+h1+h1/5),x1-w1/3:(x1+w1+w1/3)]
if w1/float(h1)<0.3:
im0=black[y1-h1/5:(y1+h1+h1/5),x1-3*w1:(x1+w1+3*w1)]
res = cv2.resize(im0,(20, 20), interpolation = cv2.INTER_CUBIC)
#print (w1/float(h1))
#cv2.imshow('edge',res)
#cv2.imshow('edge',d)
#cv2.waitKey(0)
res=res.astype(float)
cv2.normalize(res, res, 0, 1, cv2.NORM_MINMAX)
#print res
l=np.transpose(res).reshape(1,400)
c.append(l)
l=np.array(l)
#print np.shape(l)
#p=octave.predict(Theta1,Theta2,l)
#print chr(int(p)+64)
#k=k+1
#cv2.imshow("ex",d)
c=np.array(c)
u,o,r=np.shape(c)
#sio.savemat('RMI.mat', {'vect':c})
#break;
#cv2.imshow('edge',t)
#cv2.imshow('ed',mas1)
#print np.shape(c)
c=c.reshape(u,r)
p=octave.predict(Theta0,Theta1,Theta2,c);
#print p
#for i in range(len(p)):
# word.append(chr(p[i]+64));
#print "".join(word)
if k<8 and k>=3:
Q+=np.size(p)
std+=np.size(p)**2
#print np.size(p)
if k==8:
#print std
#print Q
std=((std/5)-(Q/5)**2)**0.5
Q=np.round(Q/5)
Q=int(Q)
#print std
#print Q
O=p[0:Q+1]
if std>0.5:
print(1)
break
elif k>8 and np.size(p)==Q:
#print O
#print O
for i in range(len(p)):
if indi:
if i==indi[yt]+1:
word.append(" ")
if yt< len(indi)-1:
yt=yt+1
if indl:
if i==indl[zt]+1:
word.append(" ")
if zt< len(indl)-1:
zt=zt+1
word.append(chr(p[i]+64));
print ("".join(word))
break
#break
#cv2.destroyAllWindows()
#break
k=k+1
#cv2.imshow('e',mas2)
cv2.imshow("edge",img1)
#print k
#cv2.imshow('ex',edes)
#cv2.waitKey(0)
#plt.hist(img.ravel(),256,[0,256]); plt.show()
if cv2.waitKey(3)==ord('q'):
break
cap.release()
cv2.waitKey(3)
cv2.destroyAllWindows()
| [
198,
2,
39382,
7475,
319,
257,
9629,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
418,
13,
354,
15908,
10786,
22769,
62,
26243,
653,
11537,
19... | 1.803347 | 5,258 |
from geo.calc import Calc
from geo.calc import Distance
from geo.geosp import Wt
from geo.geosp import Gh
from geo.files.csv_file import check | [
6738,
40087,
13,
9948,
66,
1330,
2199,
66,
198,
6738,
40087,
13,
9948,
66,
1330,
34600,
198,
6738,
40087,
13,
469,
2117,
1330,
370,
83,
198,
6738,
40087,
13,
469,
2117,
1330,
11972,
198,
6738,
40087,
13,
16624,
13,
40664,
62,
7753,
... | 3.227273 | 44 |
# This test code was written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
from hypothesis import given
from hypothesis import strategies as st
import threedframe.scad.label
# TODO: replace st.nothing() with an appropriate strategy
@given(
scad_object=st.none(),
params=st.just(Ellipsis),
)
@given(
content=st.text(),
halign=st.text(),
valign=st.text(),
depth=st.floats(),
size=st.floats(),
width=st.floats(),
center=st.booleans(),
)
| [
2,
770,
1332,
2438,
373,
3194,
416,
262,
4600,
36362,
313,
8497,
13,
26086,
13,
38933,
16002,
63,
8265,
198,
2,
290,
318,
2810,
739,
262,
17404,
13815,
12169,
1171,
7386,
22445,
13,
198,
198,
6738,
14078,
1330,
1813,
198,
6738,
14078,... | 2.890052 | 191 |
from help_handler import HelpHandler
| [
6738,
1037,
62,
30281,
1330,
10478,
25060,
628
] | 4.75 | 8 |
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Dict, Tuple, Optional
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
import numpy as np
# from dse_do_utils.datamanager import DataManager
from dse_do_utils.plotlymanager import PlotlyManager
from .fruitdatamanager import FruitDataManager
class FruitPlotlyManager(PlotlyManager):
"""Holds method that create Plotly charts.
Pass-in the DM as an input in the constructor.
"""
def plotly_demand_pie(self):
"""Pie chart of demand quantities.
Input tables: ['Demand']
Output tables: []
"""
df = (self.dm.demand
)
labels = {'product': 'Product Name', 'demand': 'Demand'}
fig = px.pie(df.reset_index(), values='demand', names='product',
title='Total product demand', labels=labels)
return fig
def plotly_demand_vs_inventory_bar(self):
"""Bar chart of demand vs inventory quantities.
Input tables: ['Demand', 'Inventory']
Output tables: []
"""
df = (self.dm.demand.join(self.dm.inventory).groupby(['product']).sum()
.reset_index()
.melt(id_vars=['product'], var_name='source', value_name='quantity')
)
# display(df.head())
labels = {'product': 'Product Name', 'demand': 'Demand', 'inventory': 'Inventory', 'quantity':'Quantity'}
fig = px.bar(df.reset_index(), x="product", y="quantity", title="Demand vs Inventory",
color="source", barmode='group',
labels=labels
) # , facet_row="timePeriodSeq")
# fig.update_xaxes(type='category')
return fig
@staticmethod
def make_traffic_light_gauge(value: float, title: str, orange_threshold: float, red_threshold: float, max_val: float):
"""
TODO: move to PlotlyManager?
"""
steps = [
{'range': [0, orange_threshold], 'color': 'green'},
{'range': [orange_threshold, red_threshold], 'color': 'orange'},
{'range': [red_threshold, max_val], 'color': 'red'},
]
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = value,
domain = {'x': [0, 1], 'y': [0, .75]},
title = {'text': title, 'font': {'color': 'black', 'size': 18}},
gauge = {'axis': {'range': [None, max_val], 'tickfont': {'color': 'black'}},
'threshold' : {'line': {'color': "darkred", 'width': 4}, 'thickness': 0.75, 'value': red_threshold},
'steps': steps,
'bar': {'color': "darkblue"},},
))
fig.update_layout(font = {'color': 'green' if value < orange_threshold else 'orange' if value > orange_threshold and value < red_threshold else 'red', 'family': "Arial"},
margin={'t':10,'b':30},
)
return fig
| [
2,
15069,
19764,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
309,
29291,
11,
32233,
198,
11748,
19798,
292,
355,
279,
67,
19... | 2.201449 | 1,380 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
def use_secret(secret_name:str, secret_volume_mount_path:str, env_variable:str=None, secret_file_path_in_volume:str=None):
"""
An operator that configures the container to use a secret.
This assumes that the secret is created and availabel in the k8s cluster.
Keyword Arguments:
secret_name {String} -- [Required] The k8s secret name.
secret_volume_mount_path {String} -- [Required] The path to the secret that is mounted.
env_variable {String} -- Env variable pointing to the mounted secret file. Requires both the env_variable and secret_file_path_in_volume to be defined.
The value is the path to the secret.
secret_file_path_in_volume {String} -- The path to the secret in the volume. This will be the value of env_variable.
Both env_variable and secret_file_path_in_volume needs to be set if any env variable should be created.
Raises:
ValueError: If not the necessary variables (secret_name, volume_name", secret_volume_mount_path) are supplied.
Or only one of env_variable and secret_file_path_in_volume are supplied
Returns:
[ContainerOperator] -- Returns the container operator after it has been modified.
"""
secret_name = str(secret_name)
if '{{' in secret_name:
volume_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=10)) + "_volume"
else:
volume_name = secret_name
for param, param_name in zip([secret_name, secret_volume_mount_path],["secret_name","secret_volume_mount_path"]):
if param == "":
raise ValueError("The '{}' must not be empty".format(param_name))
if bool(env_variable) != bool(secret_file_path_in_volume):
raise ValueError("Both {} and {} needs to be supplied together or not at all".format(env_variable, secret_file_path_in_volume))
return _use_secret | [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.859358 | 903 |
from .get_timetable import get_timetable
| [
6738,
764,
1136,
62,
16514,
316,
540,
1330,
651,
62,
16514,
316,
540,
198
] | 2.928571 | 14 |
"""adding database for prefect
Revision ID: cecfd30de918
Revises: 361fde4943a9
Create Date: 2021-07-27 16:30:44.676573
"""
from alembic import op
import sqlalchemy as sa
import psycopg2
# revision identifiers, used by Alembic.
revision = 'cecfd30de918'
down_revision = '361fde4943a9'
branch_labels = None
depends_on = None | [
37811,
26872,
6831,
329,
662,
2309,
198,
198,
18009,
1166,
4522,
25,
269,
721,
16344,
1270,
2934,
24,
1507,
198,
18009,
2696,
25,
47744,
69,
2934,
2920,
3559,
64,
24,
198,
16447,
7536,
25,
33448,
12,
2998,
12,
1983,
1467,
25,
1270,
... | 2.587302 | 126 |
from pwn import *
context.log_level = 'info'
exe = './mr_snowy'
# This will automatically get context arch, bits, os etc
elf = context.binary = ELF(exe, checksec=False)
# Start program
io = process('./mr_snowy')
#io = remote('138.68.174.27', 31056)
offset = 72
# Build the payload
payload = flat(
{offset: 0x00401165}
)
# Save the payload to file
write('payload', payload)
# Send the payload
io.sendlineafter('> ', "1")
io.sendlineafter('> ', payload)
io.recvuntil('[+] Here is the secret password to deactivate the camera: ')
# Get our flag!
flag = io.recv()
success(flag)
| [
6738,
279,
675,
1330,
1635,
198,
198,
22866,
13,
6404,
62,
5715,
796,
705,
10951,
6,
198,
198,
13499,
796,
705,
19571,
43395,
62,
82,
2197,
88,
6,
198,
2,
770,
481,
6338,
651,
4732,
3934,
11,
10340,
11,
28686,
3503,
198,
7046,
796... | 2.767773 | 211 |
#!/usr/bin/python3
# flake8: noqa
from builtins import __test_sink, __test_source
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
3170,
1040,
1330,
11593,
9288,
62,
82,
676,
11,
11593,
9288,
62,
10459,
628,
198
] | 2.545455 | 33 |
import csv
import gym
import envs
ENV_ID = "CustomEnv-v0"
my_env = gym.make(ENV_ID)
done = False
my_env.current_states = my_env.reset()
my_env.save_counter = 10000
while not done:
# current_action = my_contr.Controller_model(my_env.current_states, my_env.dt * my_env.counter, action=sl_action)
my_env.current_states, b, done, _ = my_env.step((0, 0, 0, 0))
fields = my_env.ctrl.w_cmd
with open("ctrl.csv", "a") as f:
writer = csv.writer(f)
writer.writerow(fields)
my_env.reset()
# if my_env.best_reward > current_best_rew:
# current_best_rew = my_env.best_reward
# with open("reward_step.csv", "a") as f:
# writer = csv.writer(f)
# writer.writerow(sl_action)
| [
11748,
269,
21370,
198,
11748,
11550,
198,
11748,
551,
14259,
198,
198,
1677,
53,
62,
2389,
796,
366,
15022,
4834,
85,
12,
85,
15,
1,
198,
1820,
62,
24330,
796,
11550,
13,
15883,
7,
1677,
53,
62,
2389,
8,
198,
28060,
796,
10352,
1... | 2.289902 | 307 |
from bitstring import BitArray, BitStream
# Definition of constants
KEY_SIZE = 128
REGISTER_SIZE = 128
LAST_INDEX = 127
INITIALIZATION_CLOCKS = 256
if __name__ == '__main__':
# Define number of rounds
NO_ROUNDS = 25
ONES_PADDING = BitArray('0xFFFFFFFF')
# Test vector of GRAIN(128 bit key)
# IV Key Keystream
# 000000000000000000000000 00000000000000000000000000000000 0fd9deefeb6fad437bf43fce35849cfe Big-Endian
# 000000000000000000000000 00000000000000000000000000000000 db032aff3788498b57cb894fffb6bb96 Little-Endian
# =====================================
# 0123456789abcdef12345678 0123456789abcdef123456789abcdef0 f09b7bf7d7f6b5c2de2ffc73ac21397f Big-Endian
# 0123456789abcdef12345678 0123456789abcdef123456789abcdef0 afb5babfa8de896b4b9c6acaf7c4fbfd Little-Endian
# =====================================
initialization_vector = BitArray('0x000000000000000000000000')
key = BitArray('0x00000000000000000000000000000000')
# Key and IV Initialization
nfsr = key[:]
lfsr = ONES_PADDING + initialization_vector[:]
nfsr, lfsr = initialization(nfsr, lfsr)
keystream = cipher(nfsr, lfsr)
print("IV: ", initialization_vector.hex)
print("key: ", key.hex)
print("keystream: ", keystream.hex)
| [
6738,
1643,
8841,
1330,
4722,
19182,
11,
4722,
12124,
198,
198,
2,
30396,
286,
38491,
198,
20373,
62,
33489,
796,
13108,
198,
31553,
41517,
62,
33489,
796,
13108,
198,
43,
11262,
62,
12115,
6369,
796,
18112,
198,
1268,
2043,
12576,
1488... | 2.481752 | 548 |
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst')
except:
long_description = ""
requirements = list(map(str.strip, open("requirements.txt").readlines()))
setup(
name='tairClient',
version='0.1.3',
description='a client extends redis.py that gives developers easy access to tair or tairModules',
author='Cheng Jiang',
author_email='jiangcheng17@mails.ucas.ac.cn',
url='https://github.com/631086083/tairClient',
packages=['tairClient'],
install_requires=requirements,
keywords='tair redis tairHash tairString',
long_description=long_description,
python_requires=">=3.6"
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
198,
5661,
62,
34945,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
198,
28311,
25,
198,
220,
220,... | 2.721254 | 287 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the functionality of the fuzz_target module."""
import subprocess
import unittest
from unittest.mock import call
from unittest import mock
from pyfakefs import fake_filesystem_unittest
import docker
# pylint: disable=no-self-use,too-few-public-methods,protected-access
class TestGetProjectImageName(unittest.TestCase):
"""Tests for get_project_image_name."""
def test_get_project_image_name(self):
"""Tests that get_project_image_name works as intended."""
project_name = 'myproject'
result = docker.get_project_image_name(project_name)
self.assertEqual(result, 'gcr.io/oss-fuzz/myproject')
class TestDeleteImages(unittest.TestCase):
"""Tests for get_project_image_name."""
@mock.patch('utils.execute')
def test_delete_images(self, mocked_execute):
"""Tests thart delete_images deletes images."""
images = ['myimage1', 'myimage2']
docker.delete_images(images)
mocked_execute.assert_has_calls([
call(['docker', 'rmi', '-f'] + images),
call(['docker', 'builder', 'prune', '-f'])
])
class TestStopDockerContainer(unittest.TestCase):
"""Tests for stop_docker_container."""
@mock.patch('subprocess.run', return_value=mock.MagicMock(returncode=0))
def test_stop_docker_container(self, mocked_run):
"""Tests that stop_docker_container works as intended."""
container_id = 'container-id'
wait_time = 100
result = docker.stop_docker_container(container_id, wait_time)
mocked_run.assert_called_with(
['docker', 'stop', container_id, '-t',
str(wait_time)], check=False)
self.assertTrue(result)
class TestHandleTimedOutContainerProcess(fake_filesystem_unittest.TestCase):
"""Tests for _handle_timed_out_container_process."""
ERROR_EXPECTED_RESULT = (None, None)
CONTAINER_ID = 'container-id'
CID_FILENAME = '/cid-file'
@mock.patch('logging.error')
def test_unreadable_file(self, mocked_error):
"""Tests that _handle_timed_out_container_process doesn't exception when the
cidfile doesn't exist."""
fake_cid_file = '/tmp/my-fake/cid-file'
result = docker._handle_timed_out_container_process(mock.MagicMock(),
fake_cid_file)
self.assertEqual(result, self.ERROR_EXPECTED_RESULT)
mocked_error.assert_called_with('cid_file not found.')
@mock.patch('logging.error')
@mock.patch('docker.stop_docker_container')
def test_stop_docker_container_failed(self, mocked_stop_docker_container,
mocked_error):
"""Tests that _handle_timed_out_container_process behaves properly when it
fails to stop the docker container."""
mocked_stop_docker_container.return_value = False
result = docker._handle_timed_out_container_process(mock.MagicMock(),
self.CID_FILENAME)
mocked_stop_docker_container.assert_called_with(self.CONTAINER_ID)
self.assertEqual(result, self.ERROR_EXPECTED_RESULT)
mocked_error.assert_called_with('Failed to stop docker container: %s',
self.CONTAINER_ID)
@mock.patch('logging.error')
@mock.patch('docker.stop_docker_container')
def test_handle_timed_out_container_process(self,
mocked_stop_docker_container,
mocked_error):
"""Tests that test_handle_timed_out_container_process works as intended."""
mocked_stop_docker_container.return_value = True
process = mock.MagicMock()
process.communicate = lambda *args, **kwargs: None
result = docker._handle_timed_out_container_process(process,
self.CID_FILENAME)
# communicate returns None because of the way we mocked Popen.
self.assertIsNone(result)
mocked_error.assert_not_called()
class TestRunContainerCommand(unittest.TestCase):
"""Tests for run_container_command."""
ARGUMENTS = ['argument']
@mock.patch('docker._handle_timed_out_container_process',
return_value=(None, None))
@mock.patch('logging.warning')
@mock.patch('subprocess.Popen')
def test_timeout(self, mocked_popen, mocked_warning, _):
"""Tests run_container_command behaves as expected when the command times
out."""
popen_magic_mock = mock.MagicMock()
mocked_popen.return_value = popen_magic_mock
popen_magic_mock.communicate.side_effect = subprocess.TimeoutExpired(
['cmd'], '1')
result = docker.run_container_command(self.ARGUMENTS)
self.assertEqual(mocked_warning.call_count, 1)
self.assertTrue(result.timed_out)
@mock.patch('docker._handle_timed_out_container_process')
@mock.patch('subprocess.Popen')
def test_run_container_command(self, mocked_popen,
mocked_handle_timed_out_container_process):
"""Tests run_container_command behaves as expected."""
popen_magic_mock = mock.MagicMock()
mocked_popen.return_value = popen_magic_mock
popen_magic_mock.communicate.return_value = (None, None)
mocked_handle_timed_out_container_process.return_value = (None, None)
result = docker.run_container_command(self.ARGUMENTS)
mocked_handle_timed_out_container_process.assert_not_called()
self.assertFalse(result.timed_out)
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 2.566594 | 2,305 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import time
import numpy as np
import torch
from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
import torch.nn as nn
import torch.optim as optim
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.decomposition import PCA
from allennlp.modules.elmo import Elmo, batch_to_ids
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper, ELMoTokenCharactersIndexer
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
# from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertModel, BertConfig
# from pytorch_pretrained_bert.tokenization import BertTokenizer
# from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
# from bias_data.def_sent_pairs import full_def_sent_pairs, thisis_def_sent_pairs, expanded_thisis
# from bias_data.more_def_sent_pairs import full_def_sent_pairs
from bias_data.def_sent_pairs import pairs_dict
from allennlp.commands.elmo import ElmoEmbedder
logger = logging.getLogger(__name__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = Config(
testing=True,
seed=1,
batch_size=64,
lr=3e-4,
epochs=2,
hidden_sz=64,
max_seq_len=100, # necessary to limit memory usage
max_vocab_size=100000,
)
OPTIONS_FILE = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
WEIGHT_FILE = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
elmo = Elmo(options_file=OPTIONS_FILE, weight_file=WEIGHT_FILE,
do_layer_norm=False, dropout=0.0, num_output_representations=1).to(device)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class ColaElmoProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
class Sst2ElmoProcessor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
label_list = self.get_labels()
label_map = {label : i for i, label in enumerate(label_list)}
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
label_id = label_map[label]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label_id))
return examples
if __name__ == "__main__":
main()
'''
cola biased:
CUDA_VISIBLE_DEVICES=2 python run_elmo.py --output_dir elmo-results/CoLA-lstm --task_name CoLA --do_eval --do_lower_case --data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA --bert_model bert-base-uncased --max_seq_length 128 --train_batch_size 32 --learning_rate 0.001 --num_train_epochs 50.0 --normalize --do_train
CUDA_VISIBLE_DEVICES=2 \
python run_elmo.py --output_dir elmo-results/CoLA-lstm-biased \
--task_name CoLA \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
mcc: 39.1
cola debias:
CUDA_VISIBLE_DEVICES=3 \
python run_elmo.py --output_dir elmo-results/CoLA-lstm-debiased \
--debias \
--task_name CoLA \
--do_eval \
--do_train \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/CoLA \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 7.0 \
--normalize \
--debias
sst biased:
screen: elmo-sst-biased
CUDA_VISIBLE_DEVICES=0 \
python run_elmo.py --output_dir elmo-results/SST-2-lstm-biased \
--task_name SST-2 \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/SST-2 \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
sst debiased:
screen: elmo-sst-debias
CUDA_VISIBLE_DEVICES=1 \
python run_elmo.py --output_dir elmo-results/SST-2-lstm-debias \
--task_name SST-2 \
--debias \
--do_eval \
--do_lower_case \
--data_dir /media/bighdd7/irene/debias/sent_tasks/glue_data/SST-2 \
--bert_model bert-base-uncased \
--max_seq_length 128 \
--train_batch_size 32 \
--learning_rate 0.001 \
--num_train_epochs 50.0 \
--normalize \
--do_train
'''
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
3012,
9552,
15417,
4816,
46665,
290,
383,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
15069,
357,
66,
8,
2864,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,... | 2.594409 | 3,649 |
"""
Collect PSF files, run
ex for using the average file:
gp = piff.GPInterp2pcf(kernel="0.009 * RBF(300.*0.26)",
optimize=fit_hyp, white_noise=1e-5, average_fits='output/average.fits')
gp.initialize(stars_training)
gp.solve(stars_training)
"""
from __future__ import print_function, division
import os
import glob
from sklearn.neighbors import KNeighborsRegressor
import fitsio
import piff
import pandas as pd
from fit_psf import plot_2dhist_shapes
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--overwrite', action='store_true', dest='overwrite',
help='Overwrite existing output')
parser.add_argument('-n', action='store', dest='n', type=int, default=0, help='Number of fits to meanify')
parser.add_argument(action='store', dest='run_config_path',
help='Run config to load up and do')
options = parser.parse_args()
kwargs = vars(options)
call_meanify(**kwargs)
| [
37811,
198,
31337,
6599,
37,
3696,
11,
1057,
198,
198,
1069,
329,
1262,
262,
2811,
2393,
25,
198,
220,
220,
220,
220,
220,
220,
220,
27809,
796,
279,
733,
13,
16960,
9492,
79,
17,
79,
12993,
7,
33885,
2625,
15,
13,
28694,
1635,
17... | 2.453704 | 432 |
########################################################################################################
# DEFAULT CONFIG FILE VFX
########################################################################################################
# GLOBAL
FULL_SCREEN = False
# VIDEO
VIDEO_WINDOW_NAME = "SAW2F2"
VIDEO_SCREEN_SIZE = (1280, 720)
# AUDIO
AUDIO_DEFAULT_TRIGGER = False
AUDIO_RATE = 44100
AUDIO_CHANNELS = 1
AUDIO_INPUT_DEVICE_INDEX = None #2
AUDIO_INPUT = True
AUDIO_CHUNK = 512
AUDIO_RECORD_SECONDS = 5
# MIDI
MIDI_DEFAULT_INPUT_ID = None #3
MIDI_DEFAULT_TRIGGER = False
MIDI_CHANNEL = 1
# GPIO
GPIO = False
GPIO_IN_BTN_PREV = 0
GPIO_IN_BTN_NEXT = 0
GPIO_IN_BTN_AUDIO_TRIG = 0
GPIO_IN_BTN_MIDI_TRIG = 0
GPIO_IN_BTN_OSD_MENU = 0
GPIO_OUT_LED_AUDIO_TRIG = 0
GPIO_OUT_LED_MIDI_TRIG = 0
| [
29113,
29113,
29113,
7804,
198,
2,
5550,
38865,
25626,
45811,
569,
17213,
198,
29113,
29113,
29113,
7804,
198,
198,
2,
10188,
9864,
1847,
198,
37,
9994,
62,
6173,
2200,
1677,
796,
10352,
198,
198,
2,
35507,
198,
42937,
62,
28929,
3913,
... | 2.596721 | 305 |
import asyncio
import asynctest
import pytest
from diffuse import worker
| [
11748,
30351,
952,
198,
198,
11748,
355,
2047,
310,
395,
198,
11748,
12972,
9288,
198,
198,
6738,
42864,
1330,
8383,
628
] | 3.619048 | 21 |
# Escreva um programa que exiba o resultado de 2a x 3b, em que a vale 3 e b vale 5
a = 3
b = 5
print(2 * a * 3 * b)
| [
2,
16319,
260,
6862,
23781,
1430,
64,
8358,
409,
23718,
267,
1255,
4533,
390,
362,
64,
2124,
513,
65,
11,
795,
8358,
257,
410,
1000,
513,
304,
275,
410,
1000,
642,
198,
198,
64,
796,
513,
198,
65,
796,
642,
198,
4798,
7,
17,
163... | 2.25 | 52 |
import cutter
from .extras import hex_pad, file_name
| [
11748,
38121,
198,
6738,
764,
2302,
8847,
1330,
17910,
62,
15636,
11,
2393,
62,
3672,
628,
198
] | 3.235294 | 17 |
#!/usr/bin/env python
import RPi.GPIO as GPIO
import SimpleMFRC522
import os, pickle, time
import pickle
reader = SimpleMFRC522.SimpleMFRC522()
tags = {}
load_tags()
try:
while True:
print("Hold a tag next to the reader")
id = reader.read_id()
command = tags.get(id, None)
if command:
print(command)
os.system(command)
time.sleep(2) # prevent too many repeats
finally:
print("cleaning up")
GPIO.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
17427,
49800,
7397,
49542,
198,
11748,
28686,
11,
2298,
293,
11,
640,
198,
11748,
2298,
293,
198,
198,
46862,
796,
17427,
... | 2.132231 | 242 |