content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
import urllib.request, json, datetime, time
from urllib.request import urlopen
from pathlib import Path
csv_file = Path(__file__).parents[0] / 'data' / 'gdax-bitcoin.csv'
if __name__ == '__main__':
while True:
now = datetime.datetime.now()
while (now.second % 5):
now = datetime.datetime.now()
print(now.second)
time.sleep(0.5)
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
2956,
297,
571,
13,
25927,
11,
33918,
11,
4818,
8079,
11,
640,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
40664,
... | 2.21466 | 191 |
#
# @lc app=leetcode.cn id=251 lang=python3
#
# [251] flatten-2d-vector
#
None
# @lc code=end | [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
13,
31522,
4686,
28,
28072,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
28072,
60,
27172,
268,
12,
17,
67,
12,
31364,
198,
2,
198,
14202,
198,
2,
2488,
44601,
2438,
28,
437
] | 2.113636 | 44 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields, tools, _
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
6738,
16298,
2238,
1330,
40391,
11,
4981,
11,
7032,
11,
4899,
... | 3.104167 | 48 |
from typing import Optional
import numpy as np
import tensorflow as tf
class PositionalEncoding(tf.keras.layers.Layer):
"""
Create a positional encoding layer, usually added on top of an embedding layer.
Embeds information about the position of the elements using the formula
.. math::
PE[pos,2i]=sin\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
PE[pos,2i+1]=cos\\left(\\frac{pos}{normalize\\_factor^{\\frac{2i}{embedding\\_dim}}}\\right)
The resulting embedding gets added (point-wise) to the input.
Arguments
---------
- `max_sequence_length` (``int``): Maximum sequence length of input
- `embedding_dim` (``int``): Dimensionality of the of the input's last dimension
- `normalize_factor` (``float``): Normalize factor
- `name` (``str``): Layer name
Input shape
-----------
(batch_size, time_steps, channels) where time_steps equals to the ``max_sequence_length`` and channels to ``embedding_dim``
Output shape
------------
Same shape as input.
Examples
--------
.. code-block:: python3
import tensorflow as tf
import tavolo as tvl
model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, 8, input_length=max_sequence_length),
tvl.embeddings.PositionalEncoding(max_sequence_length=max_sequence_length,
embedding_dim=8)]) # Add positional encoding
References
----------
`Attention Is All You Need`_
.. _Attention Is All You Need:
https://arxiv.org/abs/1706.03762
"""
def __init__(self,
max_sequence_length: int,
embedding_dim: int,
normalize_factor: Optional[float] = 10000,
name: Optional[str] = 'positional_encoding',
**kwargs):
"""
:param max_sequence_length: Maximum sequence length of input
:param embedding_dim: Dimensionality of the of the input's last dimension
:param normalize_factor: Normalize factor
:param name: Layer name
"""
super().__init__(name=name, **kwargs)
# Error checking
if max_sequence_length < 1:
raise ValueError(
'max_sequence_length must be greater than zero. (value provided {})'.format(max_sequence_length))
if embedding_dim < 1:
raise ValueError(
'embedding_dim must be greater than zero. (value provided {})'.format(max_sequence_length))
# First part of the PE function: sin and cos argument
self.positional_encoding = np.array([
[pos / np.power(normalize_factor, 2. * i / embedding_dim) for i in range(embedding_dim)]
for pos in range(max_sequence_length)])
# Second part, apply the cosine to even columns and sin to odds.
self.positional_encoding[:, 0::2] = np.sin(self.positional_encoding[:, 0::2])
self.positional_encoding[:, 1::2] = np.cos(self.positional_encoding[:, 1::2])
self.positional_encoding = self.add_variable(
'embedding_matrix',
shape=self.positional_encoding.shape,
initializer=tf.keras.initializers.Constant(self.positional_encoding),
trainable=False,
dtype=self.dtype)
| [
6738,
19720,
1330,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
4871,
18574,
1859,
27195,
7656,
7,
27110,
13,
6122,
292,
13,
75,
6962,
13,
49925,
2599,
198,
220,
220,
220,
37227,
... | 2.369701 | 1,439 |
with open("data.in") as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
arr = []
for ln in lines:
arr.append(int(ln))
for i in range(80):
for j in range(0, len(arr)):
if arr[j] <= 0:
arr[j] = 6
arr.append(8)
else:
arr[j] -= 1
print("1: " + str(len(arr)))
file.seek(0)
lines = file.readlines()
lines = [line.rstrip() for line in lines]
lines = lines[0].split(",")
lines = list(map(int, lines))
fish = [lines.count(i) for i in range(9)]
for i in range(256):
num = fish.pop(0)
fish[6] += num
fish.append(num)
print("2: " + str(sum(fish))) | [
4480,
1280,
7203,
7890,
13,
259,
4943,
355,
2393,
25,
198,
220,
220,
220,
3951,
796,
2393,
13,
961,
6615,
3419,
198,
220,
220,
220,
3951,
796,
685,
1370,
13,
81,
36311,
3419,
329,
1627,
287,
3951,
60,
198,
220,
220,
220,
3951,
796... | 1.971722 | 389 |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class PasswordManagerEnabledTest(ChromeEnterpriseTestCase):
"""Test the PasswordManagerEnabled policy.
See https://cloud.google.com/docs/chrome-enterprise/policies/?policy=PasswordManagerEnabled"""
@before_all
@test
@test
| [
2,
15069,
13130,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
1174... | 3.597484 | 159 |
from abc import abstractmethod
| [
6738,
450,
66,
1330,
12531,
24396,
198
] | 4.428571 | 7 |
import torch
import torch.nn.functional as F
import torch.nn as nn
from collections import OrderedDict
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
220,
220,
220,
220,
198,
4871,
4274,
62,
42946,
7,
20471,
13,
26796,
2599,... | 2.762463 | 341 |
# We are goint to be creating a viewset
# and basing it of the combination of generic viewset
# and we are specifically going to use the list model mixins
# > A django rest frameworke feature
# where you can pull in different parts of a viewset
# that we want to use for our application
# > so we only want to take the list model function
# we do not want to the create, update, delete functions
# > we can achive this be a combination of the
# generic viewset and the list model mixins
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
# import the Tag model class
from core.models import Tag, Ingredient, Recipe
# import the serializer
from recipe import serializers
# Create your views here.
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,):
"""Base viewset for user owned recipe attributes"""
# requires authentication to access the Tag
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# override get_queryset() mtd for ListModelMixin
# to filter object by the authenticated user
def get_queryset(self):
"""Return objects for the current authenticated user only"""
# self.queryset is referencing the queryset
# 'queryset = Tag.objects.all()'
# or 'queryset = Ingredient.objects.all()'
# then the filtering is performed in the overriden mtd
# then order by tag name
return self.queryset.filter(user=self.request.user).order_by('-name')
# overide perform_create for CreateModelMixin
# it allows us to hook into the create proceswe do a create object
# so that when we fo a create object in our ViewSet
# the validated serializer will be passed in as a serializer argument
# and we can perform any modifications that we like
def perform_create(self, serializer):
"""Create a new Object e.g. Tag or Ingredient
"""
serializer.save(user=self.request.user)
# Create your views here.
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
# select all
queryset = Tag.objects.all()
# serializer class
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
# select all
queryset = Ingredient.objects.all()
# serializer class
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
# add serializer class
serializer_class = serializers.RecipeSerializer
# add the recipe class
queryset = Recipe.objects.all()
# add athentication classes
# so that user must be authenticated to be permited to have access
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# create a private function
# to convert ids to tags
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
# override get_queryset()
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
# query_params: all of the query params provided in the request
# 'tags': one of the query params string provided
tags = self.request.query_params.get('tags')
# 'ingredients': one of the query params string we have provided
ingredients = self.request.query_params.get('ingredients')
# get queryset before we apply filters
queryset = self.queryset
# if tags is not None
if tags:
# converts all the tag string ids to tag int ids
tag_ids = self._params_to_ints(tags)
# tags__id__in: django syntax for filtering on FK objects
# we have a 'tags' field in our recipe queryset
# that has a FK to the tags table that has an 'id'
# if you want to filter by the remot table you do 'tags__id'
# then you can apply another function like 'in'
# to become 'tags__id__in'
# which then means return all of the tags
# where the id is in this list that we provide
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
# limit the object to the authenticated user
# return self.queryset.filter(user=self.request.user)
return queryset.filter(user=self.request.user)
# override get_serializer_class()
def get_serializer_class(self):
"""Return appropriate serializer class"""
# ViewSet actions are:
# list, create, retrieve, update, partial update, and destroy
# > retrieve is the action used for detailed view
# The self.action contains the action of the request currently used
# therefore, check that action currently used is the retrieve action
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
# override perform_create()
def perform_create(self, serializer):
"""Create a new recipe"""
# viewsets.ModelViewSet allows you to create objects out of the box
# so the default is that if you assign a serializer_class
# serializer_class = serializers.RecipeSerializer
# and its assigned to a model
# then it knows how to create new objects with that model
# when you do a HTTP POST
# > hence what we need to do is to assign authenticated user
# to that model once it has been created
serializer.save(user=self.request.user)
# override the upload_image()
# -methods=[]: mtd your action will use, 'GET', 'POST', 'PUT', 'PATCH'
# -detail=True: means use only the detail url to upload images
# also you will be able to upload images for resipes that already exist
# -url_path: path name for our urls
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
# retrieve the recipe object, based on the ID/PK
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
# check if serializer is valied
if serializer.is_valid():
serializer.save()
# return good response
return Response(
serializer.data,
status=status.HTTP_200_OK
)
# else return invalied response
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| [
198,
198,
2,
775,
389,
467,
600,
284,
307,
4441,
257,
1570,
2617,
198,
2,
290,
1615,
278,
340,
286,
262,
6087,
286,
14276,
1570,
2617,
198,
2,
290,
356,
389,
5734,
1016,
284,
779,
262,
1351,
2746,
5022,
1040,
198,
2,
1875,
317,
... | 2.766768 | 2,624 |
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.const import *
CONF_UPDATE_INSTANT = "update_instant"
CONF_MAPPING = 'mapping'
CONF_CONTROL_PARAMS = 'params'
CONF_CLOUD = 'update_from_cloud'
CONF_MODEL = 'model'
CONF_SENSOR_PROPERTY = "sensor_property"
CONF_SENSOR_UNIT = "sensor_unit"
CONF_DEFAULT_PROPERTIES = "default_properties"
ATTR_STATE_VALUE = "state_value"
ATTR_MODEL = "model"
ATTR_FIRMWARE_VERSION = "firmware_version"
ATTR_HARDWARE_VERSION = "hardware_version"
DOMAIN = 'xiaomi_miot_raw'
SUPPORTED_DOMAINS = [
"sensor",
"switch",
"light",
"fan",
"cover",
"humidifier",
"media_player",
"climate",
"lock",
"water_heater",
]
DEFAULT_NAME = "Xiaomi MIoT Device"
DUMMY_IP = "255.255.255.255"
DUMMY_TOKEN = "00000000000000000000000000000000"
SCHEMA = {
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,
vol.Optional(CONF_CLOUD): vol.All(),
vol.Optional('cloud_write'):vol.All(),
vol.Required(CONF_MAPPING):vol.All(),
vol.Required(CONF_CONTROL_PARAMS):vol.All(),
vol.Optional(CONF_SENSOR_PROPERTY): cv.string,
vol.Optional(CONF_SENSOR_UNIT): cv.string,
}
MAP = {
"sensor": {
"air_monitor",
"water_purifier",
"cooker",
"pressure_cooker",
"induction_cooker",
"power_consumption",
"electricity",
"environment",
"filter",
"filter_2",
"filter_3",
"filter_4",
"temperature_humidity_sensor",
"magnet_sensor",
"motion_sensor",
"submersion_sensor",
"tds_sensor",
"air_fryer",
"remain_clean_time",
},
"switch": {
"switch",
"outlet",
"switch_2",
"switch_3",
"switch_4",
"coffee_machine",
},
"light": {
"light",
"light_2",
"light_3",
"light_4",
"indicator_light",
},
"fan": {
"a_l",
"fan",
"ceiling_fan",
"air_fresh",
"air_purifier",
"washer",
"hood",
"fan_control",
"dryer",
"toilet",
"settings",
"settings_2",
"air_fresh_heater",
"bed",
"pet_drinking_fountain",
},
"cover": {
"curtain",
"airer",
},
"humidifier": {
"humidifier",
"dehumidifier",
},
"media_player": {
"media_player",
"speaker",
"play_control",
},
"climate": {
"air_conditioner",
"heater",
},
"lock": {
"physical_controls_locked",
},
"water_heater": {
"water_heater",
"kettle",
"dishwasher",
},
}
UNIT_MAPPING = {
"percentage" : PERCENTAGE , # ็พๅๆฏ
"celsius" : TEMP_CELSIUS , # ๆๆฐๅบฆ
"seconds" : "็ง" , # ็ง
"minutes" : "ๅ้" , # ๅ
"hours" : "ๅฐๆถ" , # ๅฐๆถ
"days" : "ๅคฉ" , # ๅคฉ
"kelvin" : TEMP_KELVIN , # ๅผๆฐๆธฉๆ
"pascal" : "Pa" , # ๅธๆฏๅก(ๅคงๆฐๅๅผบๅไฝ)
"arcdegrees" : "rad" , # ๅผงๅบฆ(่งๅบฆๅไฝ)
"rgb" : "RGB" , # RGB(้ข่ฒ)
"watt" : POWER_WATT , # ็ฆ็น(ๅ็)
"litre" : VOLUME_LITERS , # ๅ
"ppm" : CONCENTRATION_PARTS_PER_MILLION , # ppmๆตๅบฆ
"lux" : LIGHT_LUX , # ๅๅ
ๆฏ(็
งๅบฆ)
"mg/m3" : CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER , # ๆฏซๅ
ๆฏ็ซๆน็ฑณ
} | [
11748,
1363,
562,
10167,
13,
16794,
364,
13,
11250,
62,
12102,
341,
355,
269,
85,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
1635,
198,
198,
10943,
37,
62,
16977,
62,
38604,
8643,
796,
366,
... | 1.639104 | 2,455 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/type.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/type.proto',
package='google.protobuf',
syntax='proto3',
serialized_options=b'\n\023com.google.protobufB\tTypeProtoP\001Z-google.golang.org/protobuf/types/known/typepb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42{\n\x13\x63om.google.protobufB\tTypeProtoP\x01Z-google.golang.org/protobuf/types/known/typepb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_source__context__pb2.DESCRIPTOR,])
_SYNTAX = _descriptor.EnumDescriptor(
name='Syntax',
full_name='google.protobuf.Syntax',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO2', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO3', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1413,
serialized_end=1459,
)
_sym_db.RegisterEnumDescriptor(_SYNTAX)
Syntax = enum_type_wrapper.EnumTypeWrapper(_SYNTAX)
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
_FIELD_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='google.protobuf.Field.Kind',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=938,
)
_sym_db.RegisterEnumDescriptor(_FIELD_KIND)
_FIELD_CARDINALITY = _descriptor.EnumDescriptor(
name='Cardinality',
full_name='google.protobuf.Field.Cardinality',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CARDINALITY_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_OPTIONAL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REQUIRED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REPEATED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=940,
serialized_end=1056,
)
_sym_db.RegisterEnumDescriptor(_FIELD_CARDINALITY)
_TYPE = _descriptor.Descriptor(
name='Type',
full_name='google.protobuf.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Type.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fields', full_name='google.protobuf.Type.fields', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneofs', full_name='google.protobuf.Type.oneofs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Type.options', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Type.source_context', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Type.syntax', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=328,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='google.protobuf.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.protobuf.Field.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cardinality', full_name='google.protobuf.Field.cardinality', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.Field.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Field.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Field.type_url', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.Field.oneof_index', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.Field.packed', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Field.options', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='json_name', full_name='google.protobuf.Field.json_name', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.Field.default_value', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELD_KIND,
_FIELD_CARDINALITY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=1056,
)
_ENUM = _descriptor.Descriptor(
name='Enum',
full_name='google.protobuf.Enum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Enum.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumvalue', full_name='google.protobuf.Enum.enumvalue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Enum.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Enum.source_context', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Enum.syntax', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1265,
)
_ENUMVALUE = _descriptor.Descriptor(
name='EnumValue',
full_name='google.protobuf.EnumValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValue.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValue.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1267,
serialized_end=1350,
)
_OPTION = _descriptor.Descriptor(
name='Option',
full_name='google.protobuf.Option',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Option.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Option.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1352,
serialized_end=1411,
)
_TYPE.fields_by_name['fields'].message_type = _FIELD
_TYPE.fields_by_name['options'].message_type = _OPTION
_TYPE.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_TYPE.fields_by_name['syntax'].enum_type = _SYNTAX
_FIELD.fields_by_name['kind'].enum_type = _FIELD_KIND
_FIELD.fields_by_name['cardinality'].enum_type = _FIELD_CARDINALITY
_FIELD.fields_by_name['options'].message_type = _OPTION
_FIELD_KIND.containing_type = _FIELD
_FIELD_CARDINALITY.containing_type = _FIELD
_ENUM.fields_by_name['enumvalue'].message_type = _ENUMVALUE
_ENUM.fields_by_name['options'].message_type = _OPTION
_ENUM.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_ENUM.fields_by_name['syntax'].enum_type = _SYNTAX
_ENUMVALUE.fields_by_name['options'].message_type = _OPTION
_OPTION.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Type'] = _TYPE
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
DESCRIPTOR.message_types_by_name['Enum'] = _ENUM
DESCRIPTOR.message_types_by_name['EnumValue'] = _ENUMVALUE
DESCRIPTOR.message_types_by_name['Option'] = _OPTION
DESCRIPTOR.enum_types_by_name['Syntax'] = _SYNTAX
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), {
'DESCRIPTOR' : _TYPE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Type)
})
_sym_db.RegisterMessage(Type)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _FIELD,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Field)
})
_sym_db.RegisterMessage(Field)
Enum = _reflection.GeneratedProtocolMessageType('Enum', (_message.Message,), {
'DESCRIPTOR' : _ENUM,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Enum)
})
_sym_db.RegisterMessage(Enum)
EnumValue = _reflection.GeneratedProtocolMessageType('EnumValue', (_message.Message,), {
'DESCRIPTOR' : _ENUMVALUE,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValue)
})
_sym_db.RegisterMessage(EnumValue)
Option = _reflection.GeneratedProtocolMessageType('Option', (_message.Message,), {
'DESCRIPTOR' : _OPTION,
'__module__' : 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Option)
})
_sym_db.RegisterMessage(Option)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
23645,
14,
11235,
672,
3046,
14,
4906,
13,
1676,
1462,
198,
37811,
8645,
... | 2.316021 | 10,898 |
from resource_factory import ResourceFactory
from modules.configs.environment import env | [
6738,
8271,
62,
69,
9548,
1330,
20857,
22810,
198,
6738,
13103,
13,
11250,
82,
13,
38986,
1330,
17365
] | 4.888889 | 18 |
import datetime
from decimal import Decimal
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from dateutil.relativedelta import relativedelta
from factory import fuzzy
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.bankaccounts.models import BankAccount
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactions.models import BankTransaction
from mymoney.apps.banktransactionschedulers.factories import (
BankTransactionSchedulerFactory,
)
from mymoney.apps.banktransactionschedulers.models import (
BankTransactionScheduler,
)
from mymoney.apps.banktransactiontags.factories import (
BankTransactionTagFactory,
)
from mymoney.apps.banktransactiontags.models import BankTransactionTag
from ...factories import UserFactory
class Command(BaseCommand):
"""
Data generator for purpose only.
"""
help = 'Generate data for purpose.'
leave_locale_alone = True
| [
11748,
4818,
8079,
198,
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
... | 3.574257 | 303 |
BOARD_FIELDS_EXPECTED = {
'first_field': 0,
'second_field': 1,
'third_field': 2,
'fourth_field': 3,
'fifth_field': 4,
'sixth_field': 5,
'seventh_field': 6,
'eighth_field': 7,
'ninth_field': 8
}
FIELD_EMPTY_VAL = " " | [
8202,
9795,
62,
11674,
3698,
5258,
62,
49864,
9782,
1961,
796,
1391,
198,
220,
220,
220,
705,
11085,
62,
3245,
10354,
657,
11,
198,
220,
220,
220,
705,
12227,
62,
3245,
10354,
352,
11,
198,
220,
220,
220,
705,
17089,
62,
3245,
10354... | 1.992063 | 126 |
__all__ = ('DiscordRPCError', )
class DiscordRPCError(BaseException):
"""
Discord RPC error code.
Attributes
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
def __init__(self, code, message):
"""
Creates a new Discord RPC error instance with the given parameters.
Parameters
----------
code : `int`
Discord RPC error code.
message : `str`
Discord RPC error message.
"""
self.code = code
self.message = message
BaseException.__init__(self, code, message)
def __repr__(self):
"""Returns the representation of the error code."""
return f'{self.__class__.__name__}: [{self.code}] {self.message!r}'
| [
834,
439,
834,
796,
19203,
15642,
585,
20031,
5222,
81,
1472,
3256,
1267,
198,
198,
4871,
39462,
20031,
5222,
81,
1472,
7,
14881,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
39462,
39400,
4049,
2438,
13,
198,
220,
220,... | 2.23219 | 379 |
"""
Your task is pretty simple , given a string S , find the total count of numbers present in the digit.
Input
The first line contains T , the number of test cases. The first line of each and every testc ase will contain a integer N , the length of the string . The second line of each and every test case will contain a string S of length N.
Output
For each and every testcase , output the total count of numbers present in the string.
Constraints
0<T<200
0<N<10000
SAMPLE INPUT
1
26
sadw96aeafae4awdw2wd100awd
SAMPLE OUTPUT
4
Explanation
For the first test case , the string given is "sadw96aeafae4awdw2wd100awd". There are total of 4 numbers in the string - [96,4,2,100]. So , we output 4.
"""
T = raw_input()
Digits = ['0','1','2','3','4','5','6','7','8','9']
for i in range(int(T)):
N = raw_input()
Array = map(str, raw_input())
arr = []
cnt = 0
Array = list(Array)
for j in range(len(Array)):
if Array[j] in Digits:
arr.append(j)
else:
pass
for k in range(len(arr)-1):
if arr[k] == arr[k+1]-1:
pass
else:
cnt += 1
print cnt+1
| [
37811,
198,
7120,
4876,
318,
2495,
2829,
837,
1813,
257,
4731,
311,
837,
1064,
262,
2472,
954,
286,
3146,
1944,
287,
262,
16839,
13,
198,
198,
20560,
198,
198,
464,
717,
1627,
4909,
309,
837,
262,
1271,
286,
1332,
2663,
13,
383,
717... | 2.444444 | 477 |
import os
import sys
import imp
import venv
import shutil
import stat
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(" dcc-venv ")
import requirements_handler as reqhand
# VENV_ROOT_FOLDER = os.path.join(os.path.expanduser('~'), ".dcc-venvs")
VENV_ROOT_FOLDER = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), ".venvs")
CONFIGS_FOLDER = os.path.abspath(os.path.dirname(__file__))
CONFIG_PREFIX = "config_"
DCC_STARTUP_SCRIPT = os.path.join(CONFIGS_FOLDER, "common", "dcc_venv_startup.py")
def onremoveerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def str2bool(v):
"""
I can't believe this isn't built in
https://stackoverflow.com/a/43357954
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == "__main__":
os.system("cls")
log.info("#"*85)
log.info("dcc_venvs setup")
log.info("#"*85)
sys.stdout.write("\n{} \n\n".format("-"*100))
func_map = {"install": install_venv,
"update": update_venv,
"uninstall": uninstall_venv}
import argparse
parser = argparse.ArgumentParser("venv handler")
parser.add_argument("type", type=str, help="install or uninstall")
parser.add_argument("-dev", type=str2bool, help="use edit install for git packages under '# DEV' tag")
parser.add_argument("-dccs", default=(), nargs="+", help="specific dccs")
args = parser.parse_args()
func = func_map.get(args.type)
func(args.dccs, args.dev)
os.system("pause")
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
848,
198,
11748,
8710,
85,
198,
11748,
4423,
346,
198,
11748,
1185,
198,
198,
11748,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
8,
198,
6404,
796,
18931,
... | 2.248776 | 1,021 |
'''
Basic Twilio handler function
'''
import boto3
import random
import StringIO
import urllib2
from boto3.dynamodb.conditions import Key
from boto3.session import Session
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
session = Session()
| [
7061,
6,
198,
26416,
1815,
346,
952,
21360,
2163,
198,
7061,
6,
198,
198,
11748,
275,
2069,
18,
198,
11748,
4738,
198,
11748,
10903,
9399,
198,
11748,
2956,
297,
571,
17,
198,
198,
6738,
275,
2069,
18,
13,
67,
4989,
375,
65,
13,
1... | 2.976471 | 85 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('mlearning')
setup(
name='mlearning',
version=version,
url='https://github.com/aaronlelevier/mlearning',
license='MIT',
description="Code repo for general machine learning code that doesn't belong to any one repo or model in particular",
author='Aaron Lelevier',
author_email='aaron.lelevier@gmail.com',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=[
'numpy',
'matplotlib',
'opencv-python',
],
python_requires=">=3.6",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
]
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
302,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
628,
198,
4299,
651,... | 2.564797 | 517 |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from src.communications import Transmitter, AWGNChannel, Receiver
from src.utils import *
# Configuration
MODULATION = 'BPSK'
EbN0dBs = np.linspace(-20, 8, 20)
# Initialization
transmitter = Transmitter(MODULATION)
receiver = Receiver(MODULATION)
channel = AWGNChannel(get_bps(MODULATION), transmitter.block_length, transmitter.block_coded_length)
if __name__ == '__main__':
BER = ber_performance(
EbN0dBs,
get_basic_channel_fct(transmitter, channel, receiver),
1000,
500
)
# Plot results
plt.figure()
show_ber(MODULATION, EbN0dBs, BER)
plt.legend(['BPSK Theory', 'BPSK simulation'])
plt.show() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
12351,
13,
20860,
1330,
3602,
37974,
11,
14356,
16630,
29239,
... | 2.491525 | 295 |
import json
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
voicePlayer = Sound('voice') # ไบบๅฃฐ้ณ่ฝจ
soundPlayer = Sound('sound') # ้ณๆ้ณ่ฝจ
# bgmPlayer = Sound('bgm') # BGMๆญๆพ้ณ่ฝจ๏ผๅจ่ฏฅ้กน็ฎไธญไธ้่ฆ
if __name__ == '__main__': # ่ฟ้ๆฏๆ ๅ็จๆณ
# ๅซ็ไฟกๅทๆงฝไน็ฑป็ไธๅฏไฟก๏ผๅชๆๅจไธปๅจๆไฝๆถๆไผๆฟๅ๏ผๅฏไธๅฏไฟก็ๅชๆไธปๅจ.position()ๆฅ่ฏข๏ผ.duration()้ฝๆฒก็จ
from time import sleep
app = QApplication(sys.argv)
sound = Sound('test')
sound.play_music(r'source\Move.wav')
sleep(3)
sound.play_music(r'source\Note.wav')
sleep(3)
# sys.exit(app.exec_()) # ไธ่ฝ้ปๅก๏ผๅฆๅๆ ๆณ้ๅบ
| [
11748,
33918,
198,
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1195,
28165,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
15205,
20626,... | 1.591837 | 392 |
import numpy as np
import gen.dists as dists
# This really should be a pass-through function for gen gas, etc. but oh well.
if __name__ == "__main__":
# Generate some test distribution and display in 3d with mpl
gen = Generator(int(100), int(100), int(100), 1e5, 1e4, 10, 40, 100, 10, 40, 100, 10, 2)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.quiver(gen.gas_x, gen.gas_y, gen.gas_z, gen.gas_v_x, gen.gas_v_y, gen.gas_v_z, length=10)
ax.quiver(gen.star_x, gen.star_y, gen.star_z, gen.star_v_x, gen.star_v_y, gen.star_v_z, length=10)
ax.set_zlim(-400, 400)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2429,
13,
67,
1023,
355,
288,
1023,
628,
628,
628,
220,
220,
220,
1303,
770,
1107,
815,
307,
257,
1208,
12,
9579,
2163,
329,
2429,
3623,
11,
3503,
13,
475,
11752,
880,
13,
628,
628,
198,
... | 2.2263 | 327 |
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
from ooflib.common import debug
from ooflib.common import toolbox
from ooflib.engine.IO import genericinfotoolbox
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
toolbox.registerToolboxClass(MeshInfoToolbox, ordering=3.0)
| [
2,
532,
9,
12,
21015,
532,
9,
12,
198,
198,
2,
770,
3788,
373,
4635,
416,
399,
8808,
11,
281,
4086,
286,
262,
471,
13,
50,
13,
1230,
11,
198,
2,
290,
416,
14195,
318,
407,
2426,
284,
6634,
287,
262,
1578,
1829,
13,
198,
2,
3... | 3.268868 | 212 |
import zmq
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://*:5555" % port)
while True:
socket.send("What time is it?")
msg = socket.recv()
print msg
time.sleep(1)
| [
11748,
1976,
76,
80,
198,
198,
22866,
796,
1976,
76,
80,
13,
21947,
3419,
198,
44971,
796,
4732,
13,
44971,
7,
89,
76,
80,
13,
4537,
4663,
8,
198,
44971,
13,
21653,
7203,
83,
13155,
1378,
47026,
2816,
2816,
1,
4064,
2493,
8,
198,
... | 2.344444 | 90 |
from django.urls import path
from website import api
urlpatterns = [
path('', api.load_user_location),
path('status/<status>', api.load_user_location),
path('approve/<locid>', api.approve_user_location),
path('approve/', api.approve_user_location),
path('disapprove/<locid>', api.disapprove_user_location),
path('disapprove/', api.disapprove_user_location),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
201,
198,
6738,
3052,
1330,
40391,
201,
198,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
220,
3108,
10786,
3256,
40391,
13,
2220,
62,
7220,
62,
24886,
828,
201,
198,
220,
22... | 2.522293 | 157 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import datetime
from sqlalchemy.exc import IntegrityError
from mojibake.app import app, db
from mojibake.models import Post, Category, Tag, User
from mojibake.settings import TEST_DATABASE_URI
#TO DO: More work on this on testing!
#http://flask.pocoo.org/docs/testing/
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
11748,
4818,
8079,
198,
6738,
44161,
282,
26599,
13,
41194,
1330,
39348,
12331,
198,
19... | 2.704225 | 142 |
import pyglet
| [
11748,
12972,
70,
1616,
628
] | 3 | 5 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 10:42:50 2020
@author: draveendran
"""
import json
import boto3
import datetime
import os
#SSM Functions | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2447,
1596,
838,
25,
3682,
25,
1120,
12131,
198,
198,
31,
9800,
25,
288,
5758,
437,
2596,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
... | 2.515625 | 64 |
# Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instruction module."""
from absl.testing import absltest
import gast as ast
from python_graphs import instruction as instruction_module
if __name__ == '__main__':
absltest.main()
| [
2,
15069,
357,
34,
8,
33448,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 3.743961 | 207 |
import unittest
from component.LogReg import LogReg
from autosklearn.pipeline.util import _test_classifier
import sklearn.metrics
| [
11748,
555,
715,
395,
198,
198,
6738,
7515,
13,
11187,
8081,
1330,
5972,
8081,
198,
6738,
44619,
74,
35720,
13,
79,
541,
4470,
13,
22602,
1330,
4808,
9288,
62,
4871,
7483,
198,
11748,
1341,
35720,
13,
4164,
10466,
628
] | 3.384615 | 39 |
#!/usr/bin/env python3
print('hello')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
4798,
10786,
31373,
11537,
198
] | 2.533333 | 15 |
from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
def imagenet_data_augmentation(pillow_img, target_size,
area_min=0.08, area_max=1.0,
aspect_min=0.75, aspect_max=4.0/3.0):
"""
Data augmentation for single image(based on GoogLe Net)
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
# optional : cropped area = U([area_min, area_max])
cropped aspect ratio = U([aspect_min, aspect_max])
"""
# aspect_ratio = width / height
# cropped_width = sqrt(S*a)
# cropped_height = sqrt(S/a)
original_width, original_height = pillow_img.size
cropped_area = np.random.uniform(area_min, area_max) * original_width * original_height
cropped_aspect_ratio = np.random.uniform(aspect_min, aspect_max)
cropped_width = int(np.sqrt(cropped_area * cropped_aspect_ratio))
cropped_height = int(np.sqrt(cropped_area / cropped_aspect_ratio))
# crop left / right point
if original_width > cropped_width:
horizontal_slide = int(np.random.uniform(0, original_width-cropped_width))
left, right = horizontal_slide, horizontal_slide+cropped_width
else:
horizontal_slide = (cropped_width - original_width) // 2
left, right = -horizontal_slide, horizontal_slide+original_width
# crop top / bottom point
if original_height > cropped_height:
vertical_slide = int(np.random.uniform(0, original_height-cropped_height))
top, bottom = vertical_slide, vertical_slide+cropped_height
else:
vertical_slide = (cropped_height - original_height) // 2
top, bottom = -vertical_slide, vertical_slide+original_height
cropped = pillow_img.crop((left, top, right, bottom))
resized = cropped.resize((target_size, target_size), Image.LINEAR)
# horizontal flip
if np.random.random() >= 0.5:
resized = ImageOps.mirror(resized)
# auto contrast (a bit slow)
if np.random.random() >= 0.5:
resized = ImageOps.autocontrast(resized,
np.random.uniform(0, 1.0), ignore=0) # ignore black background
return np.asarray(resized, np.uint8)
def validation_image_load(pillow_img, target_size):
"""
Convert pillow instance to numpy array
# input : pillow_img = PIL instance
# : target_size = resized width / height
# output : uint8 numpy array
"""
resized = pillow_img.resize((target_size, target_size), Image.LINEAR)
return np.asarray(resized, np.uint8)
| [
6738,
350,
4146,
1330,
7412,
11,
7412,
41472,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
4299,
3590,
268,
316,
62,
7890,
62,
559,
5154,
341,
7,
27215,
322,
62,
9600,
1... | 2.456767 | 1,064 |
from fixate.core.common import TestClass
from fixate.core.ui import user_ok, user_info
from fixate.core.checks import *
__version__ = "1"
class SimpleTest(TestClass):
"""Simple passing test"""
TEST_SEQUENCE = [SimpleTest()]
| [
6738,
4259,
378,
13,
7295,
13,
11321,
1330,
6208,
9487,
198,
6738,
4259,
378,
13,
7295,
13,
9019,
1330,
2836,
62,
482,
11,
2836,
62,
10951,
198,
6738,
4259,
378,
13,
7295,
13,
42116,
1330,
1635,
198,
198,
834,
9641,
834,
796,
366,
... | 2.987179 | 78 |
"""Exactcover __init__."""
from .exactcover import solve, ExactCoverKeyError
__all__ = ['solve', 'ExactCoverKeyError']
| [
37811,
3109,
529,
9631,
11593,
15003,
834,
526,
15931,
198,
6738,
764,
1069,
529,
9631,
1330,
8494,
11,
1475,
529,
27245,
9218,
12331,
198,
198,
834,
439,
834,
796,
37250,
82,
6442,
3256,
705,
3109,
529,
27245,
9218,
12331,
20520,
198
] | 2.926829 | 41 |
global cjk_list
global unicode_list
global cjk_jian_list
global cjk_jian_fan_list
global cjk_fan_list
global cjk_count
global unicode_count
import os, sys
global main_directory
#if packaged by pyinstaller
#ref: https://stackoverflow.com/questions/404744/determining-application-path-in-a-python-exe-generated-by-pyinstaller
if getattr(sys, 'frozen', False):
#change from loading same folder to full folder, --onedir
main_directory = os.path.dirname(sys.executable)
#`pyinstaller --onefile` change to use the following code
#if '_MEIPASS2' in os.environ:
# main_directory = os.environ['_MEIPASS2']
#ref: https://stackoverflow.com/questions/9553262/pyinstaller-ioerror-errno-2-no-such-file-or-directory
else:
#dev mode
try: #py xx.py
app_full_path = os.path.realpath(__file__)
main_directory = os.path.dirname(app_full_path)
except NameError: #py then run code
main_directory = os.getcwd()
#english name
#old list for compatibility
cjk_list = {"gb2312":"GB/T 2312",
"gb12345":"GB/T 12345",
"gbk":"GBK",
"gb18030":"GB 18030",
"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"tongyong-guifan":"Table of General Standard Chinese Characters", #้็จ่ง่ๆฑๅญ่กจ
"3500changyong":"List of Frequently Used Characters in Modern Chinese", #็ฐไปฃๆฑ่ฏญๅธธ็จๅญ่กจ
"7000tongyong":"List of Commonly Used Characters in Modern Chinese", #็ฐไปฃๆฑ่ฏญ้็จๅญ่กจ
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education", #ไนๅกๆ่ฒ่ฏญๆ่ฏพ็จๅธธ็จๅญ่กจ
"4808changyong":"Chart of Standard Forms of Common National Characters", #ๅธธ็จๅฝๅญๆ ๅๅญไฝ่กจ
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters", #ๆฌกๅธธ็จๅฝๅญๆ ๅๅญไฝ่กจ
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters", #ๅธธ็จๅญๅญๅฝข่กจ
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)"
}
cjk_jian_list_en = {"gb2312":"GB/T 2312",
"3500changyong":"List of Frequently Used Characters in Modern Chinese",
"7000tongyong":"List of Commonly Used Characters in Modern Chinese",
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education",
"tongyong-guifan":"Table of General Standard Chinese Characters"
}
cjk_jian_fan_list_en = {"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_en = {"4808changyong":"Chart of Standard Forms of Common National Characters",
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters",
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters",
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)",
"gb12345":"GB/T 12345"
}
unicode_list = {"kangxi":"Kangxi Radicals",
"kangxi-sup":"CJK Radical Supplements",
"zero":"ใ",
"basic":"CJK Unified Ideographs",
"ext-a":"CJK Unified Ideographs Extension A",
"compat":"CJK Compatibility Ideographs",
"compat-ideo":" Non-Compatibility (Unified) Ideographs",
"ext-b":"CJK Unified Ideographs Extension B",
"ext-c":"CJK Unified Ideographs Extension C",
"ext-d":"CJK Unified Ideographs Extension D",
"ext-e":"CJK Unified Ideographs Extension E",
"ext-f":"CJK Unified Ideographs Extension F",
"compat-sup":"CJK Compatibility Ideographs Supplement",
"ext-g":"CJK Unified Ideographs Extension G",
"total":"Total Ideographs"
}
#chinese name (simp)
cjk_jian_list_zhs = {"gb2312":"GB/T 2312",
"3500changyong":"็ฐไปฃๆฑ่ฏญๅธธ็จๅญ่กจ๏ผ",
"7000tongyong":"็ฐไปฃๆฑ่ฏญ้็จๅญ่กจ",
"yiwu-jiaoyu":"ไนๅกๆ่ฒ่ฏญๆ่ฏพ็จๅธธ็จๅญ่กจ",
"tongyong-guifan":"้็จ่ง่ๆฑๅญ่กจ"
}
cjk_jian_fan_list_zhs = {"hanyi-jianfan":"ๆฑไปช็ฎ็นๅญ่กจ",
"fangzheng-jianfan":"ๆนๆญฃ็ฎ็นๅญ่กจ",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_zhs = {"4808changyong":"ๅธธ็จๅฝๅญๆ ๅๅญไฝ่กจ",
"6343cichangyong":"ๆฌกๅธธ็จๅฝๅญๆ ๅๅญไฝ่กจ",
"big5changyong":"ไบๅคง็ (Big5) ๅธธ็จๆฑๅญ่กจ",
"big5":"ไบๅคง็ (Big5)",
"hkchangyong":"ๅธธ็จๅญๅญๅฝข่กจ",
"hkscs":"้ฆๆธฏๅข่กฅๅญ็ฌฆ้ (HKSCS)",
"suppchara":"ๅธธ็จ้ฆๆธฏๅคๅญ่กจ (1-6็บง)",
"gb12345":"GB/T 12345"
}
unicode_list_zhs = {"kangxi":"ๅบท็้จ้ฆ",
"kangxi-sup":"ๆฑๅญ้จ้ฆ่กฅๅ
",
"zero":"ใ",
"basic":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญ",
"ext-a":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑAๅบ",
"compat":"ไธญๆฅ้ฉๅ
ผๅฎน่กจๆๆๅญ",
"compat-ideo":"ใ้ๅ
ผๅฎน๏ผ็ปไธ๏ผ่กจๆๆๅญ",
"ext-b":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑBๅบ",
"ext-c":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑCๅบ",
"ext-d":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑDๅบ",
"ext-e":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑEๅบ",
"ext-f":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑFๅบ",
"compat-sup":"ไธญๆฅ้ฉๅ
ผๅฎน่กจๆๆๅญ๏ผ่กฅๅ
ๅบ๏ผ",
"ext-g":"ไธญๆฅ้ฉ็ปไธ่กจๆๆๅญโๆฉๅฑGๅบ",
"total":"ๆปๆฑๅญๆฐ"
}
#chinese name (trad)
cjk_fan_list_zht = {"4808changyong":"ๅธธ็จๅๅญๆจๆบๅญ้ซ่กจ",
"6343cichangyong":"ๆฌกๅธธ็จๅๅญๆจๆบๅญ้ซ่กจ",
"big5changyong":"ไบๅคง็ขผ (Big5) ๅธธ็จๆผขๅญ่กจ",
"big5":"ไบๅคง็ขผ (Big5)",
"hkchangyong":"ๅธธ็จๅญๅญๅฝข่กจ",
"hkscs":"้ฆๆธฏๅข่ฃๅญ็ฌฆ้ (HKSCS)",
"suppchara":"ๅธธ็จ้ฆๆธฏๅคๅญ่กจ (1-6็ด)",
"gb12345":"GB/T 12345"
}
cjk_jian_fan_list_zht = {"hanyi-jianfan":"ๆผขๅ็ฐก็นๅญ่กจ",
"fangzheng-jianfan":"ๆนๆญฃ็ฐก็นๅญ่กจ",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_jian_list_zht = {"gb2312":"GB/T 2312",
"3500changyong":"็พไปฃๆผข่ชๅธธ็จๅญ่กจ",
"7000tongyong":"็พไปฃๆผข่ช้็จๅญ่กจ",
"yiwu-jiaoyu":"็พฉๅๆ่ฒ่ชๆ่ชฒ็จๅธธ็จๅญ่กจ",
"tongyong-guifan":"้็จ่ฆ็ฏๆผขๅญ่กจ"
}
unicode_list_zht = {"kangxi":"ๅบท็้จ้ฆ",
"kangxi-sup":"ๆผขๅญ้จ้ฆ่ฃๅ
",
"zero":"ใ",
"basic":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญ",
"ext-a":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑAๅ",
"compat":"ไธญๆฅ้ๅ
ผๅฎน่กจๆๆๅญ",
"compat-ideo":"ใ้ๅ
ผๅฎน๏ผ็ตฑไธ๏ผ่กจๆๆๅญ",
"ext-b":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑBๅ",
"ext-c":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑCๅ",
"ext-d":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑDๅ",
"ext-e":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑEๅ",
"ext-f":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑFๅ",
"compat-sup":"ไธญๆฅ้ๅ
ผๅฎน่กจๆๆๅญ๏ผ่ฃๅ
ๅ๏ผ",
"ext-g":"ไธญๆฅ้็ตฑไธ่กจๆๆๅญโๆดๅฑGๅ",
"total":"็ธฝๆผขๅญๆธ"
}
#character count
cjk_count = {"gb2312":6763,
"gb12345":6866,
"gbk":20923,
"gb18030":0,
"hanyi-jianfan":9169,
"fangzheng-jianfan":9664,
"tongyong-guifan":8105,
"3500changyong":3500,
"7000tongyong":7000,
"yiwu-jiaoyu":3500,
"4808changyong":4808,
"6343cichangyong":6343,
"big5changyong":5401,
"big5":13060,
"hkchangyong":4825,
"hkscs":4603,
"suppchara":1097
}
unicode_count = {"kangxi":214,
"kangxi-sup":115,
"zero":1,
"basic":20992,
"ext-a":6592,
"compat":472,
"compat-ideo":12,
"ext-b":42720,
"ext-c":4153,
"ext-d":222,
"ext-e":5762,
"ext-f":7473,
"compat-sup":542,
"ext-g":4939,
"total":0
}
cjk_count["gb18030"] = unicode_count["zero"]+unicode_count["basic"]+unicode_count["ext-a"]
unicode_count["total"] = unicode_count["zero"]+unicode_count["compat-ideo"]+unicode_count["basic"]+unicode_count["ext-a"]+unicode_count["ext-b"]+unicode_count["ext-c"]+unicode_count["ext-d"]+unicode_count["ext-e"]+unicode_count["ext-f"]+unicode_count["ext-g"] | [
20541,
269,
73,
74,
62,
4868,
201,
198,
20541,
28000,
1098,
62,
4868,
201,
198,
201,
198,
20541,
269,
73,
74,
62,
73,
666,
62,
4868,
201,
198,
20541,
269,
73,
74,
62,
73,
666,
62,
24408,
62,
4868,
201,
198,
20541,
269,
73,
74,
... | 1.4814 | 5,914 |
# MIT License
# Copyright (c) 2017 Derek Selander
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import ds
import os
import shlex
import optparse
import datetime
import lldb.utils.symbolication
def dclass(debugger, command, exe_ctx, result, internal_dict):
'''
Dumps all the NSObject inherited classes in the process. If you give it a module,
it will dump only the classes within that module. You can also filter out classes
to only a certain type and can also generate a header file for a specific class.
Example:
# Dump ALL the NSObject classes within the process
(lldb) dclass
# Dump all the classes that are a UIViewController within the process
(lldb) dclass -f UIViewController
# Dump all the classes with the regex case insensitive search "viewcontroller" in the class name
(lldb) dclass -r (?i)viewCoNtrolLer
# Dump all the classes within the UIKit module
(lldb) dclass -m UIKit
# Dump all classes in CKConfettiEffect NSBundle that are UIView subclasses
(lldb) dclass /System/Library/Messages/iMessageEffects/CKConfettiEffect.bundle/CKConfettiEffect -f UIView
# Generate a header file for the class specified:
(lldb) dclass -g UIView
# Generate a protocol that you can cast an object to. Ideal when working with private classes at dev time
(lldb) dclass -P UIView
# Dump all classes and methods for a particular module, ideal for viewing changes in frameworks over time
(lldb) dclass -o UIKit
# Only dump classes whose superclass is of type class and in UIKit module. Ideal for going after specific classes
(lldb) dclass -s NSObject -m UIKit
'''
command_args = shlex.split(command, posix=False)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
if not args:
# result.SetError('Usage: find NSObjectSubclass\n\nUse \'help find\' for more details')
clean_command = None
# return
if not args and options.generate_header:
result.SetError('Need to supply class for option')
return
else:
clean_command = ('').join(args)
res = lldb.SBCommandReturnObject()
interpreter = debugger.GetCommandInterpreter()
target = exe_ctx.target
if not options.info and not options.class_type and not options.verbose and not options.regular_expression and not options.module and not options.filter and not options.search_protocols and not options.dump_code_output and not options.generate_header and not options.verbose_info and not options.generate_protocol and not options.conforms_to_protocol and not options.superclass and len(args) == 1:
options.info = args[0]
if options.info or options.verbose_info:
script = generate_class_info(options)
# print(script)
# return
interpreter.HandleCommand('expression -lobjc -O -- ' + script, res)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
result.AppendMessage(contents)
return
elif options.dump_code_output:
directory = '/tmp/{}_{}/'.format(target.executable.basename, datetime.datetime.now().time())
os.makedirs(directory)
modules = target.modules
if len(args) > 0 and args[0] == '__all':
os.makedirs(directory + 'PrivateFrameworks')
os.makedirs(directory + 'Frameworks')
modules = [i for i in target.modules if '/usr/lib/' not in i.file.fullpath and '__lldb_' not in i.file.fullpath]
outputMsg = "Dumping all private Objective-C frameworks"
elif len(args) > 0 and args[0]:
module = target.module[args[0]]
if module is None:
result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(args[0]))
return
modules = [module]
outputMsg = "Dumping all private Objective-C frameworks"
else:
modules = [target.module[target.executable.fullpath]]
for module in modules:
command_script = generate_module_header_script(options, module.file.fullpath.replace('//', '/'))
interpreter.HandleCommand('expression -lobjc -O -u0 -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -- ' + command_script)
if '/System/Library/PrivateFrameworks/' in module.file.fullpath:
subdir = 'PrivateFrameworks/'
elif '/System/Library/Frameworks/' in module.file.fullpath:
subdir = 'Frameworks/'
else:
subdir = ''
ds.create_or_touch_filepath(directory + subdir + module.file.basename + '.txt', res.GetOutput())
print('Written output to: ' + directory + '... opening file')
os.system('open -R ' + directory)
return
if options.module is not None:
options.module = options.module.strip("\"\'")
module = target.FindModule(lldb.SBFileSpec(options.module))
if not module.IsValid():
if not module or not module.IsValid():
result.SetError(
"Unable to open module name '{}', to see list of images use 'image list -b'".format(str(options.module)))
return
if options.conforms_to_protocol is not None:
interpreter.HandleCommand('expression -lobjc -O -- (id)NSProtocolFromString(@\"{}\")'.format(options.conforms_to_protocol), res)
if 'nil' in res.GetOutput() or not res.GetOutput():
result.SetError("No such Protocol name '{}'".format(options.conforms_to_protocol))
return
res.Clear()
if options.generate_header or options.generate_protocol:
command_script = generate_header_script(options, clean_command)
else:
command_script = generate_class_dump(target, options, clean_command)
if options.generate_header or options.generate_protocol:
interpreter.HandleCommand('expression -lobjc -O -- (Class)NSClassFromString(@\"{}\")'.format(clean_command), res)
if 'nil' in res.GetOutput():
result.SetError('Can\'t find class named "{}". Womp womp...'.format(clean_command))
return
res.Clear()
if options.generate_protocol:
filepath = "/tmp/DS_" + clean_command + "Protocol.h"
else:
filepath = "/tmp/" + clean_command + ".h"
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(res.GetError())
return
contents = res.GetOutput()
ds.create_or_touch_filepath(filepath, contents)
print('Written output to: ' + filepath + '... opening file')
os.system('open -R ' + filepath)
else:
msg = "Dumping protocols" if options.search_protocols else "Dumping classes"
result.AppendMessage(ds.attrStr(msg, 'cyan'))
interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res)
# debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script)
if res.GetError():
result.SetError(ds.attrStr(res.GetError(), 'red'))
return
result.AppendMessage(ds.attrStr('************************************************************', 'cyan'))
if res.Succeeded():
result.AppendMessage(res.GetOutput())
| [
2,
17168,
13789,
198,
198,
2,
15069,
357,
66,
8,
2177,
20893,
15300,
4066,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
... | 2.634412 | 3,307 |
#!/usr/bin/env python3
import re, inspect
set_mask(Logger.default_levels)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
302,
11,
10104,
628,
628,
198,
198,
2617,
62,
27932,
7,
11187,
1362,
13,
12286,
62,
46170,
8,
628
] | 2.612903 | 31 |
""" Base class to create Circus subscribers plugins.
"""
import sys
import logging
import errno
import uuid
import argparse
from circus import zmq
from zmq.eventloop import ioloop, zmqstream
from zmq.utils.jsonapi import jsonmod as json
from circus import logger, __version__
from circus.client import make_message, cast_message
from circus.util import (debuglog, to_bool, resolve_name, close_on_exec,
LOG_LEVELS, LOG_FMT, LOG_DATE_FMT,
DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB,
get_connection)
class CircusPlugin(object):
"""Base class to write plugins.
Options:
- **context** -- the ZMQ context to use
- **endpoint** -- the circusd ZMQ endpoint
- **pubsub_endpoint** -- the circusd ZMQ pub/sub endpoint
- **check_delay** -- the configured check delay
- **config** -- free config mapping
"""
name = ''
@debuglog
@debuglog
@debuglog
def call(self, command, **props):
"""Sends to **circusd** the command.
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
Returns the JSON mapping sent back by **circusd**
"""
msg = make_message(command, **props)
self.client.send(json.dumps(msg))
msg = self.client.recv()
return json.loads(msg)
def cast(self, command, **props):
"""Fire-and-forget a command to **circusd**
Options:
- **command** -- the command to call
- **props** -- keywords argument to add to the call
"""
msg = cast_message(command, **props)
self.client.send(json.dumps(msg))
#
# methods to override.
#
def handle_recv(self, data):
"""Receives every event published by **circusd**
Options:
- **data** -- a tuple containing the topic and the message.
"""
raise NotImplementedError()
def handle_stop(self):
"""Called right before the plugin is stopped by Circus.
"""
pass
def handle_init(self):
"""Called right befor a plugin is started - in the thread context.
"""
pass
if __name__ == '__main__':
main()
| [
37811,
7308,
1398,
284,
2251,
46658,
18327,
20652,
13,
198,
37811,
198,
11748,
25064,
198,
11748,
18931,
198,
11748,
11454,
3919,
198,
11748,
334,
27112,
198,
11748,
1822,
29572,
198,
198,
6738,
33125,
1330,
1976,
76,
80,
198,
6738,
1976,... | 2.441335 | 929 |
import requests
from globus_sdk import exc
class SearchAPIError(exc.GlobusAPIError):
"""
Error class for the Search API client. In addition to the
inherited ``code`` and ``message`` instance variables, provides ``error_data``.
:ivar error_data: Additional object returned in the error response. May be
a dict, list, or None.
"""
# the Search API always and only returns 'message' for string messages
MESSAGE_FIELDS = ["message"]
| [
11748,
7007,
198,
198,
6738,
15095,
385,
62,
21282,
74,
1330,
2859,
628,
198,
4871,
11140,
17614,
12331,
7,
41194,
13,
9861,
672,
385,
17614,
12331,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13047,
1398,
329,
262,
11140,
78... | 3.006211 | 161 |
import des109moeda
n1 = float(input('Digite o preรงo: R$'))
print(f'O valor {des109moeda.moeda(n1)} dobrado รฉ {des109moeda.dobro(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} pela metade รฉ {des109moeda.metade(n1, True)}.')
print(f'O valor {des109moeda.moeda(n1)} aumentado em 15% รฉ {des109moeda.aumenta(n1, 15, True)}.')
print(f'O valor {des109moeda.moeda(n1)} diminuรญdo em 15% รฉ {des109moeda.diminui(n1, 15, True)}.')
| [
11748,
748,
14454,
5908,
18082,
628,
198,
77,
16,
796,
12178,
7,
15414,
10786,
19511,
578,
267,
662,
16175,
78,
25,
371,
3,
6,
4008,
198,
4798,
7,
69,
6,
46,
1188,
273,
1391,
8906,
14454,
5908,
18082,
13,
5908,
18082,
7,
77,
16,
... | 2.129353 | 201 |
from env_alias import __title__ as NAME
from env_alias import __version__ as VERSION
from env_alias.utils import logger
from env_alias.exceptions.EnvAliasException import EnvAliasException
from env_alias.utils.config import EnvAliasConfig
from env_alias.utils.content import EnvAliasContent
from env_alias.utils.selector import EnvAliasSelector
| [
198,
6738,
17365,
62,
26011,
1330,
11593,
7839,
834,
355,
36751,
198,
6738,
17365,
62,
26011,
1330,
11593,
9641,
834,
355,
44156,
2849,
198,
198,
6738,
17365,
62,
26011,
13,
26791,
1330,
49706,
198,
6738,
17365,
62,
26011,
13,
1069,
117... | 3.625 | 96 |
"""
Module handling the I/O for an MD run.
"""
import csv
import pickle
import re
import sys
import yaml
from copy import copy, deepcopy
from IPython import get_ipython
from numpy import float64
from numpy import load as np_load
from numpy import savetxt, savez, zeros
from numpy.random import randint
from os import listdir, mkdir
from os.path import basename, exists, join
from pyfiglet import Figlet, print_figlet
from warnings import warn
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# If you are using Jupyter Notebook
from tqdm import tqdm_notebook as tqdm
else:
# If you are using IPython or Python kernel
from tqdm import tqdm
FONTS = ["speed", "starwars", "graffiti", "chunky", "epic", "larry3d", "ogre"]
# Light Colors.
LIGHT_COLORS = [
"255;255;255",
"13;177;75",
"153;162;162",
"240;133;33",
"144;154;183",
"209;222;63",
"232;217;181",
"200;154;88",
"148;174;74",
"203;90;40",
]
# Dark Colors.
DARK_COLORS = ["24;69;49", "0;129;131", "83;80;84", "110;0;95"]
class InputOutput:
"""
Class handling the input and output functions of the MD run.
Parameters
----------
process : str
Name of the process class containing MD run info.
"""
electrostatic_equilibration: bool = False
eq_dump_dir: str = "dumps"
equilibration_dir: str = "Equilibration"
input_file: str = None # MD run input file.
job_dir: str = None
job_id: str = None
log_file: str = None
mag_dump_dir: str = "dumps"
magnetization_dir: str = "Magnetization"
magnetized: bool = False
preprocess_file: str = None
preprocessing: bool = False
preprocessing_dir: str = "PreProcessing"
process: str = "preprocessing"
processes_dir: str = None
prod_dump_dir: str = "dumps"
production_dir: str = "Production"
postprocessing_dir: str = "PostProcessing"
simulations_dir: str = "Simulations"
simulation_dir: str = "Simulation"
verbose: bool = False
xyz_dir: str = None
xyz_filename: str = None
def __copy__(self):
"""Make a shallow copy of the object using copy by creating a new instance of the object and copying its __dict__."""
# Create a new object
_copy = type(self)()
# copy the dictionary
_copy.__dict__.update(self.__dict__)
return _copy
def from_dict(self, input_dict: dict):
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def setup(self):
"""Create file paths and directories for the simulation."""
self.create_file_paths()
self.make_directories()
self.file_header()
def from_yaml(self, filename: str):
"""
Parse inputs from YAML file.
Parameters
----------
filename: str
Input YAML file.
Returns
-------
dics : dict
Content of YAML file parsed in a nested dictionary
"""
self.input_file = filename
with open(filename, "r") as stream:
dics = yaml.load(stream, Loader=yaml.FullLoader)
self.__dict__.update(dics["IO"])
if "Parameters" in dics.keys():
keyed = "Parameters"
for key, value in dics[keyed].items():
if key == "verbose":
self.verbose = value
if key == "magnetized":
self.magnetized = value
if key == "load_method":
self.load_method = value
if value[-7:] == "restart":
self.restart = True
else:
self.restart = False
if key == "preprocessing":
self.preprocessing = value
if "Integrator" in dics.keys():
keyed = "Integrator"
for key, value in dics[keyed].items():
if key == "electrostatic_equilibration":
self.electrostatic_equilibration = value
# rdf_nbins can be defined in either Parameters or Postprocessing. However, Postprocessing will always
# supersede Parameters choice.
if "Observables" in dics.keys():
for i in dics["Observables"]:
if "RadialDistributionFunction" in i.keys():
dics["Parameters"]["rdf_nbins"] = i["RadialDistributionFunction"]["no_bins"]
return dics
def create_file_paths(self):
"""Create all directories', subdirectories', and files' paths."""
if self.job_dir is None:
self.job_dir = basename(self.input_file).split(".")[0]
if self.job_id is None:
self.job_id = self.job_dir
self.job_dir = join(self.simulations_dir, self.job_dir)
# Create Processes directories
self.processes_dir = [
join(self.job_dir, self.preprocessing_dir),
join(self.job_dir, self.simulation_dir),
join(self.job_dir, self.postprocessing_dir),
]
# Redundancy
self.preprocessing_dir = self.processes_dir[0]
self.simulation_dir = self.processes_dir[1]
self.postprocessing_dir = self.processes_dir[2]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
# Equilibration directory and sub_dir
self.equilibration_dir = join(self.processes_dir[indx], self.equilibration_dir)
self.eq_dump_dir = join(self.equilibration_dir, "dumps")
# Production dir and sub_dir
self.production_dir = join(self.processes_dir[indx], self.production_dir)
self.prod_dump_dir = join(self.production_dir, "dumps")
# Production phase filenames
self.prod_energy_filename = join(self.production_dir, "ProductionEnergy_" + self.job_id + ".csv")
self.prod_ptcls_filename = join(self.prod_dump_dir, "checkpoint_")
# Equilibration phase filenames
self.eq_energy_filename = join(self.equilibration_dir, "EquilibrationEnergy_" + self.job_id + ".csv")
self.eq_ptcls_filename = join(self.eq_dump_dir, "checkpoint_")
# Magnetic dir
if self.electrostatic_equilibration:
self.magnetization_dir = join(self.processes_dir[indx], self.magnetization_dir)
self.mag_dump_dir = join(self.magnetization_dir, "dumps")
# Magnetization phase filenames
self.mag_energy_filename = join(self.magnetization_dir, "MagnetizationEnergy_" + self.job_id + ".csv")
self.mag_ptcls_filename = join(self.mag_dump_dir, "checkpoint_")
if self.process == "postprocessing":
indx = 2 # Redirect to the correct folder
# Log File
if self.log_file is None:
self.log_file = join(self.processes_dir[indx], "log_" + self.job_id + ".out")
else:
self.log_file = join(self.processes_dir[indx], self.log_file)
def make_directories(self):
"""Create directories where to store MD results."""
# Check if the directories exist
if not exists(self.simulations_dir):
mkdir(self.simulations_dir)
if not exists(self.job_dir):
mkdir(self.job_dir)
# Create Process' directories and their subdir
for i in self.processes_dir:
if not exists(i):
mkdir(i)
# The following automatically create directories in the correct Process
if not exists(self.equilibration_dir):
mkdir(self.equilibration_dir)
if not exists(self.eq_dump_dir):
mkdir(self.eq_dump_dir)
if not exists(self.production_dir):
mkdir(self.production_dir)
if not exists(self.prod_dump_dir):
mkdir(self.prod_dump_dir)
if self.electrostatic_equilibration:
if not exists(self.magnetization_dir):
mkdir(self.magnetization_dir)
if not exists(self.mag_dump_dir):
mkdir(self.mag_dump_dir)
if self.preprocessing:
if not exists(self.preprocessing_dir):
mkdir(self.preprocessing_dir)
if not exists(self.postprocessing_dir):
mkdir(self.postprocessing_dir)
def file_header(self):
"""Create the log file and print the figlet if not a restart run."""
if not self.restart:
with open(self.log_file, "w+") as f_log:
figlet_obj = Figlet(font="starwars")
print(figlet_obj.renderText("Sarkas"), file=f_log)
print("An open-source pure-Python molecular dynamics suite for non-ideal plasmas.", file=f_log)
# Print figlet to screen if verbose
if self.verbose:
self.screen_figlet()
def simulation_summary(self, simulation):
"""
Print out to file a summary of simulation's parameters.
If verbose output then it will print twice: the first time to file and second time to screen.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Simulation's parameters
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
if simulation.parameters.load_method in ["production_restart", "prod_restart"]:
print("\n\n--------------------------- Production Restart -------------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["equilibration_restart", "eq_restart"]:
print("\n\n------------------------ Equilibration Restart ----------------------------------")
self.time_info(simulation)
elif simulation.parameters.load_method in ["magnetization_restart", "mag_restart"]:
print("\n\n------------------------ Magnetization Restart ----------------------------------")
self.time_info(simulation)
elif self.process == "postprocessing":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"PostProcessing directory: \n{self.postprocessing_dir}")
print(f"\nEquilibration dumps directory: {self.eq_dump_dir}")
print(f"Production dumps directory: \n{self.prod_dump_dir}")
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
else:
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
print(f"\nJob ID: {self.job_id}")
print(f"Job directory: {self.job_dir}")
print(f"\nEquilibration dumps directory: \n", {self.eq_dump_dir})
print(f"Production dumps directory: \n", {self.prod_dump_dir})
print(f"\nEquilibration Thermodynamics file: \n{self.eq_energy_filename}")
print(f"Production Thermodynamics file: \n{self.prod_energy_filename}")
print("\nPARTICLES:")
print("Total No. of particles = ", simulation.parameters.total_num_ptcls)
for isp, sp in enumerate(simulation.species):
if sp.name == "electron_background":
sp_index = isp
print("No. of species = ", len(simulation.species[:isp]))
for isp, sp in enumerate(simulation.species):
if sp.name != "electron_background":
print("Species ID: {}".format(isp))
sp.pretty_print(simulation.potential.type, simulation.parameters.units)
# Parameters Info
simulation.parameters.pretty_print()
# Potential Info
simulation.potential.pretty_print()
# Integrator
simulation.integrator.pretty_print()
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def time_stamp(self, time_stamp, t):
"""
Print out to screen elapsed times. If verbose output, print to file first and then to screen.
Parameters
----------
time_stamp : str
Array of time stamps.
t : float
Elapsed time.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if "Potential Initialization" in time_stamp:
print("\n\n{:-^70} \n".format("Initialization Times"))
if t_hrs == 0 and t_min == 0 and t_sec <= 2:
print(f"\n{time_stamp} Time: {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec")
else:
print(f"\n{time_stamp} Time: {int(t_hrs)} hrs {int(t_min)} min {int(t_sec)} sec")
repeat -= 1
sys.stdout = screen
f_log.close()
def timing_study(self, simulation):
"""
Info specific for timing study.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the info to print.
"""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
# Print to file first then to screen if repeat == 2
while repeat > 0:
print("\n\n------------ Conclusion ------------\n")
print("Suggested Mesh = [ {} , {} , {} ]".format(*simulation.potential.pppm_mesh))
print(
"Suggested Ewald parameter alpha = {:2.4f} / a_ws = {:1.6e} ".format(
simulation.potential.pppm_alpha_ewald * simulation.parameters.a_ws,
simulation.potential.pppm_alpha_ewald,
),
end="",
)
print("[1/cm]" if simulation.parameters.units == "cgs" else "[1/m]")
print(
"Suggested rcut = {:2.4f} a_ws = {:.6e} ".format(
simulation.potential.rc / simulation.parameters.a_ws, simulation.potential.rc
),
end="",
)
print("[cm]" if simulation.parameters.units == "cgs" else "[m]")
self.algorithm_info(simulation)
repeat -= 1
sys.stdout = screen # Restore the original sys.stdout
f_log.close()
def preprocess_sizing(self, sizes):
"""Print the estimated file sizes."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
print("\n\n{:=^70} \n".format(" Filesize Estimates "))
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 0])
print("\nEquilibration:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[0, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
if self.electrostatic_equilibration:
print("\nMagnetization:\n")
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 0])
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[2, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 0])
print("\nProduction:\n")
print(
"Checkpoint filesize: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[1, 1])
print(
"Checkpoint folder size: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
size_GB, size_MB, size_KB, rem = convert_bytes(sizes[:, 1].sum())
print(
"\nTotal minimum needed space: {} GB {} MB {} KB {} bytes".format(
int(size_GB), int(size_MB), int(size_KB), int(rem)
)
)
repeat -= 1
sys.stdout = screen
f_log.close()
def preprocess_timing(self, str_id, t, loops):
"""Print times estimates of simulation to file first and then to screen if verbose."""
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
t_hrs, t_min, t_sec, t_msec, t_usec, t_nsec = t
# redirect printing to file
sys.stdout = f_log
while repeat > 0:
if str_id == "header":
print("\n\n{:=^70} \n".format(" Times Estimates "))
elif str_id == "GF":
print(
"Optimal Green's Function Time: \n"
"{} min {} sec {} msec {} usec {} nsec \n".format(
int(t_min), int(t_sec), int(t_msec), int(t_usec), int(t_nsec)
)
)
elif str_id in ["PP", "PM", "FMM"]:
print(f"Time of {str_id} acceleration calculation averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
elif str_id in ["Equilibration", "Magnetization", "Production"]:
print(f"Time of a single {str_id} step averaged over {loops - 1} steps:")
print(f"{int(t_min)} min {int(t_sec)} sec {int(t_msec)} msec {int(t_usec)} usec {int(t_nsec)} nsec \n")
if str_id == "Production":
print("\n\n{:-^70} \n".format(" Total Estimated Times "))
repeat -= 1
sys.stdout = screen
f_log.close()
def postprocess_info(self, simulation, write_to_file=False, observable=None):
"""
Print Post-processing info to file and/or screen in a reader-friendly format.
Parameters
----------
simulation : :class:`sarkas.processes.PostProcess`
PostProcess class.
write_to_file : bool
Flag for printing info also to file. Default= False.
observable : str
Observable whose info to print. Default = None.
Choices = ['header','rdf', 'ccf', 'dsf', 'ssf', 'vm']
"""
choices = ["header", "rdf", "ccf", "dsf", "ssf", "vd"]
msg = (
"Observable not defined. \n "
"Please choose an observable from this list \n"
"'rdf' = Radial Distribution Function, \n"
"'ccf' = Current Correlation Function, \n"
"'dsf' = Dynamic Structure Function, \n"
"'ssf' = Static Structure Factor, \n"
"'vd' = Velocity Distribution"
)
if observable is None:
raise ValueError(msg)
if observable not in choices:
raise ValueError(msg)
if write_to_file:
screen = sys.stdout
f_log = open(self.log_file, "a+")
repeat = 2 if self.verbose else 1
# redirect printing to file
sys.stdout = f_log
else:
repeat = 1
while repeat > 0:
if observable == "header":
# Header of process
process_title = "{:^80}".format(self.process.capitalize())
print("\n\n")
print(*["*" for i in range(50)])
print(process_title)
print(*["*" for i in range(50)])
elif observable == "rdf":
simulation.rdf.pretty_print()
elif observable == "ssf":
simulation.ssf.pretty_print()
elif observable == "dsf":
simulation.dsf.pretty_print()
elif observable == "ccf":
simulation.ccf.pretty_print()
elif observable == "vd":
simulation.vm.setup(simulation.parameters)
print("\nVelocity Moments:")
print("Maximum no. of moments = {}".format(simulation.vm.max_no_moment))
print("Maximum velocity moment = {}".format(int(2 * simulation.vm.max_no_moment)))
repeat -= 1
if write_to_file:
sys.stdout = screen
if write_to_file:
f_log.close()
@staticmethod
def screen_figlet():
"""
Print a colored figlet of Sarkas to screen.
"""
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
# Assume white background in Jupyter Notebook
clr = DARK_COLORS[randint(0, len(DARK_COLORS))]
else:
# Assume dark background in IPython/Python Kernel
clr = LIGHT_COLORS[randint(0, len(LIGHT_COLORS))]
fnt = FONTS[randint(0, len(FONTS))]
print_figlet("\nSarkas\n", font=fnt, colors=clr)
print("\nAn open-source pure-python molecular dynamics suite for non-ideal plasmas.\n\n")
@staticmethod
def time_info(simulation):
"""
Print time simulation's parameters.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the timing info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release.\n" "Use Integrator.pretty_print()",
category=DeprecationWarning,
)
simulation.integrator.pretty_print()
@staticmethod
def algorithm_info(simulation):
"""
Print algorithm information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the algorithm info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.method_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.method_pretty_print()
@staticmethod
def potential_info(simulation):
"""
Print potential information.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing the potential info and other parameters.
"""
warn(
"Deprecated feature. It will be removed in the v2.0.0 release. Use potential.pot_pretty_print()",
category=DeprecationWarning,
)
simulation.potential.pot_pretty_print(simulation.potential)
def copy_params(self, params):
"""
Copy necessary parameters.
Parameters
----------
params: :class:`sarkas.core.Parameters`
Simulation's parameters.
"""
self.dt = params.dt
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
self.species_names = params.species_names.copy()
self.coupling = params.coupling_constant * params.T_desired
def setup_checkpoint(self, params):
"""
Assign attributes needed for saving dumps.
Parameters
----------
params : :class:`sarkas.core.Parameters`
General simulation parameters.
species : :class:`sarkas.plasma.Species`
List of Species classes.
"""
self.copy_params(params)
# Check whether energy files exist already
if not exists(self.prod_energy_filename):
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.prod_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if not exists(self.eq_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.eq_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
if self.electrostatic_equilibration:
if not exists(self.mag_energy_filename) and not params.load_method[-7:] == "restart":
# Create the Energy file
dkeys = ["Time", "Total Energy", "Total Kinetic Energy", "Potential Energy", "Temperature"]
if len(self.species_names) > 1:
for i, sp_name in enumerate(self.species_names):
dkeys.append("{} Kinetic Energy".format(sp_name))
dkeys.append("{} Potential Energy".format(sp_name))
dkeys.append("{} Temperature".format(sp_name))
data = dict.fromkeys(dkeys)
with open(self.mag_energy_filename, "w+") as f:
w = csv.writer(f)
w.writerow(data.keys())
def save_pickle(self, simulation):
"""
Save all simulations parameters in pickle files.
Parameters
----------
simulation : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential", "species"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "wb") as pickle_file:
pickle.dump(simulation.__dict__[fl], pickle_file)
pickle_file.close()
def read_pickle(self, process):
"""
Read pickle files containing all the simulation information.
Parameters
----------
process : :class:`sarkas.processes.Process`
Process class containing MD run info to save.
"""
file_list = ["parameters", "integrator", "potential"]
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
for fl in file_list:
filename = join(self.processes_dir[indx], fl + ".pickle")
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.__dict__[fl] = copy(data)
# Read species
filename = join(self.processes_dir[indx], "species.pickle")
process.species = []
with open(filename, "rb") as handle:
data = pickle.load(handle)
process.species = copy(data)
def read_pickle_single(self, class_to_read: str):
"""
Read the desired pickle file.
Parameters
----------
class_to_read : str
Name of the class to read.
Returns
-------
_copy : cls
Copy of desired class.
"""
# Redirect to the correct process folder
if self.process == "preprocessing":
indx = 0
else:
# Note that Postprocessing needs the link to simulation's folder
# because that is where I look for energy files and pickle files
indx = 1
filename = join(self.processes_dir[indx], class_to_read + ".pickle")
with open(filename, "rb") as pickle_file:
data = pickle.load(pickle_file)
_copy = deepcopy(data)
return _copy
def dump(self, phase, ptcls, it):
"""
Save particles' data to binary file for future restart.
Parameters
----------
phase : str
Simulation phase.
ptcls : :class:`sarkas.particles.Particles`
Particles data.
it : int
Timestep number.
"""
if phase == "production":
ptcls_file = self.prod_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
cntr=ptcls.pbc_cntr,
rdf_hist=ptcls.rdf_hist,
virial=ptcls.virial,
time=tme,
)
energy_file = self.prod_energy_filename
elif phase == "equilibration":
ptcls_file = self.eq_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.eq_energy_filename
elif phase == "magnetization":
ptcls_file = self.mag_ptcls_filename + str(it)
tme = it * self.dt
savez(
ptcls_file,
id=ptcls.id,
names=ptcls.names,
pos=ptcls.pos,
vel=ptcls.vel,
acc=ptcls.acc,
virial=ptcls.virial,
time=tme,
)
energy_file = self.mag_energy_filename
kinetic_energies, temperatures = ptcls.kinetic_temperature()
potential_energies = ptcls.potential_energies()
# Save Energy data
data = {
"Time": it * self.dt,
"Total Energy": kinetic_energies.sum() + ptcls.potential_energy,
"Total Kinetic Energy": kinetic_energies.sum(),
"Potential Energy": ptcls.potential_energy,
"Total Temperature": ptcls.species_num.transpose() @ temperatures / ptcls.total_num_ptcls,
}
if len(temperatures) > 1:
for sp, kin in enumerate(kinetic_energies):
data[f"{self.species_names[sp]} Kinetic Energy"] = kin
data[f"{self.species_names[sp]} Potential Energy"] = potential_energies[sp]
data[f"{self.species_names[sp]} Temperature"] = temperatures[sp]
with open(energy_file, "a") as f:
w = csv.writer(f)
w.writerow(data.values())
def dump_xyz(self, phase: str = "production"):
"""
Save the XYZ file by reading Sarkas dumps.
Parameters
----------
phase : str
Phase from which to read dumps. 'equilibration' or 'production'.
dump_skip : int
Interval of dumps to skip. Default = 1
"""
if phase == "equilibration":
self.xyz_filename = join(self.equilibration_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.eq_dump_dir
else:
self.xyz_filename = join(self.production_dir, "pva_" + self.job_id + ".xyz")
dump_dir = self.prod_dump_dir
f_xyz = open(self.xyz_filename, "w+")
if not hasattr(self, "a_ws"):
params = self.read_pickle_single("parameters")
self.a_ws = params.a_ws
self.total_num_ptcls = params.total_num_ptcls
self.total_plasma_frequency = params.total_plasma_frequency
# Rescale constants. This is needed since OVITO has a small number limit.
pscale = 1.0 / self.a_ws
vscale = 1.0 / (self.a_ws * self.total_plasma_frequency)
ascale = 1.0 / (self.a_ws * self.total_plasma_frequency**2)
# Read the list of dumps and sort them in the correct (natural) order
dumps = listdir(dump_dir)
dumps.sort(key=num_sort)
for dump in tqdm(dumps, disable=not self.verbose):
data = self.read_npz(dump_dir, dump)
data["pos_x"] *= pscale
data["pos_y"] *= pscale
data["pos_z"] *= pscale
data["vel_x"] *= vscale
data["vel_y"] *= vscale
data["vel_z"] *= vscale
data["acc_x"] *= ascale
data["acc_y"] *= ascale
data["acc_z"] *= ascale
f_xyz.writelines("{0:d}\n".format(self.total_num_ptcls))
f_xyz.writelines("name x y z vx vy vz ax ay az\n")
savetxt(f_xyz, data, fmt="%s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
f_xyz.close()
@staticmethod
def read_npz(fldr: str, filename: str):
"""
Load particles' data from dumps.
Parameters
----------
fldr : str
Folder containing dumps.
filename: str
Name of the dump file to load.
Returns
-------
struct_array : numpy.ndarray
Structured data array.
"""
file_name = join(fldr, filename)
data = np_load(file_name, allow_pickle=True)
# Dev Notes: the old way of saving the xyz file by
# savetxt(f_xyz, np.c_[data["names"],data["pos"] ....]
# , fmt="%10s %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e %.6e")
# was not working, because the columns of np.c_[] all have the same data type <U32
# which is in conflict with the desired fmt. i.e. data["names"] was not recognized as a string.
# So I have to create a new structured array and pass this. I could not think of a more Pythonic way.
struct_array = zeros(
data["names"].size,
dtype=[
("names", "U6"),
("pos_x", float64),
("pos_y", float64),
("pos_z", float64),
("vel_x", float64),
("vel_y", float64),
("vel_z", float64),
("acc_x", float64),
("acc_y", float64),
("acc_z", float64),
],
)
struct_array["names"] = data["names"]
struct_array["pos_x"] = data["pos"][:, 0]
struct_array["pos_y"] = data["pos"][:, 1]
struct_array["pos_z"] = data["pos"][:, 2]
struct_array["vel_x"] = data["vel"][:, 0]
struct_array["vel_y"] = data["vel"][:, 1]
struct_array["vel_z"] = data["vel"][:, 2]
struct_array["acc_x"] = data["acc"][:, 0]
struct_array["acc_y"] = data["acc"][:, 1]
struct_array["acc_z"] = data["acc"][:, 2]
return struct_array
def alpha_to_int(text):
"""Convert strings of numbers into integers.
Parameters
----------
text : str
Text to be converted into an int, if `text` is a number.
Returns
-------
_ : int, str
Integral number otherwise returns a string.
"""
return int(text) if text.isdigit() else text
def num_sort(text):
"""
Sort strings with numbers inside.
Parameters
----------
text : str
Text to be split into str and int
Returns
-------
: list
List containing text and integers
Notes
-----
Function copied from
https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside.
Originally from http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [alpha_to_int(c) for c in re.split(r"(\d+)", text)]
def convert_bytes(tot_bytes):
"""Convert bytes to human-readable GB, MB, KB.
Parameters
----------
tot_bytes : int
Total number of bytes.
Returns
-------
[GB, MB, KB, rem] : list
Bytes divided into Giga, Mega, Kilo bytes.
"""
GB, rem = divmod(tot_bytes, 1024 * 1024 * 1024)
MB, rem = divmod(rem, 1024 * 1024)
KB, rem = divmod(rem, 1024)
return [GB, MB, KB, rem]
| [
37811,
198,
26796,
9041,
262,
314,
14,
46,
329,
281,
10670,
1057,
13,
198,
37811,
198,
11748,
269,
21370,
198,
11748,
2298,
293,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
331,
43695,
198,
6738,
4866,
1330,
4866,
11,
2769,
30073,
... | 2.070305 | 18,747 |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Script for model comparison between TF and Gluon."""
import argparse
import logging
import os
import sys
import mxnet as mx
import numpy as np
import torch
import gluonnlp as nlp
import transformers
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Comparison script for Tensorflow and GLuon XLNet model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model-name', type=str, required=True,
choices=['xlnet_cased_L-12_H-768_A-12',
'xlnet_cased_L-24_H-1024_A-16'], help='Model name')
parser.add_argument('--gluon-parameter-file', type=str, required=True,
help='gluon parameter file name.')
parser.add_argument('--gluon-vocab-file', type=str, required=True,
help='gluon vocab file corresponding to --gluon_parameter_file.')
parser.add_argument('--debug', action='store_true', help='debugging mode')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if args.debug else logging.INFO)
logging.info(args)
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir)))
from transformer import XLNet
compare_xlnet(args)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
1... | 2.842896 | 732 |
# coding: utf-8
# Standard Libraries
import unittest
from pathlib import Path
# Dopplerr
from dopplerr.tasks.download_subtitles import DownloadSubtitleTask
# Todo:
# glob test of "The.Series.Name.S07E06.720p.BluRay.DD5.1.x264-EbP-Obfuscated"
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
8997,
46267,
198,
11748,
555,
715,
395,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
2141,
381,
1754,
81,
198,
6738,
466,
381,
1754,
81,
13,
83,
6791,
13,
15002,
62,
7266,
83,
305... | 2.490196 | 102 |
a = 3 \ 4.0
print('hello')
| [
64,
796,
513,
3467,
604,
13,
15,
198,
4798,
10786,
31373,
11537,
198
] | 2.076923 | 13 |
class SpotifyObject:
'''
Represents a generic Spotify Object.
Attributes
----------
id: str
Spotify ID of the object.
name: str
Name of the object.
uri: str
Spotify URI of the object.
'''
_type = None
@property
| [
4871,
26778,
10267,
25,
198,
197,
7061,
6,
198,
197,
6207,
6629,
257,
14276,
26778,
9515,
13,
198,
197,
198,
197,
29021,
198,
197,
35937,
198,
197,
198,
197,
312,
25,
965,
198,
197,
197,
32565,
1958,
4522,
286,
262,
2134,
13,
198,
... | 2.681818 | 88 |
from flask import Flask
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
app = Flask(__name__, static_url_path='/static')
app.config.from_object('config.DevConfig')
CORS(app)
ma = Marshmallow(app)
db = SQLAlchemy(app)
from routes import images
app.register_blueprint(images)
from dbSetup import Setup
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
76,
5406,
42725,
1330,
9786,
42725,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
198,
1324,
796,
46947,
7,
834,
... | 2.971223 | 139 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# Greed is a dice game where you roll up to five dice to accumulate
# points. The following "score" function will be used calculate the
# score of a single roll of the dice.
#
# A greed roll is scored as follows:
#
# * A set of three ones is 1000 points
#
# * A set of three numbers (other than ones) is worth 100 times the
# number. (e.g. three fives is 500 points).
#
# * A one (that is not part of a set of three) is worth 100 points.
#
# * A five (that is not part of a set of three) is worth 50 points.
#
# * Everything else is worth 0 points.
#
#
# Examples:
#
# score([1,1,1,5,1]) => 1150 points
# score([2,3,4,6,2]) => 0 points
# score([3,4,5,3,3]) => 350 points
# score([1,5,1,2,4]) => 250 points
#
# More scoring examples are given in the tests below:
#
# Your goal is to write the score method.
# Test cases start from here
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
17490,
13,
7204,
272,
1330,
1635,
198,
198,
2,
11955,
276,
318,
257,
17963,
983,
810,
345,
4836,
510,
284,... | 2.929487 | 312 |
# Funciรณn de generaciรณn de matrices de clases y caracterรญsticas
| [
2,
11138,
979,
18840,
390,
1152,
32009,
18840,
390,
2603,
45977,
390,
537,
1386,
331,
1097,
7321,
8836,
11268,
292,
628,
628,
628,
198
] | 2.916667 | 24 |
# import dash
# import dash_core_components as dcc
# import dash_html_components as html
# import pandas as pd
# import plotly.graph_objs as go
# from dash.dependencies import Input, Output
# import requests, json
# # df = pd.read_csv(
# # 'wiki_fires_cleaned_2015-2018.csv')
# year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# # print(type(year_data))
# df = pd.read_json(year_data.content)
# print(year_data)
# app = dash.Dash()
# app.layout = html.Div([
# dcc.Graph(id = 'graph-with-slider'),
# dcc.Slider(
# id = 'year-slider',
# min = df['Fire Year'].min(),
# max = df['Fire Year'].max(),
# value = df['Fire Year'].min(),
# step = None,
# marks = {str(Year): str(Year) for Year in df['Fire Year'].unique()}
# )
# ])
# @app.callback(
# dash.dependencies.Output('graph-with-slider', 'figure'),
# [dash.dependencies.Input('year-slider', 'value')])
# def update_figure(selected_year):
# filtered_df = df[df["Fire Year"] == selected_year]
# traces = []
# for i in filtered_df.County.unique():
# df_by_county = filtered_df[filtered_df['County'] == i]
# traces.append(go.Scatter(
# x = df_by_county['Number of Days'],
# y = df_by_county['Acres Burned'],
# text = f"{i}, {selected_year}",
# mode = 'markers',
# opacity = 0.7,
# marker = {
# 'size': 15,
# 'line': {'width': 0.5, 'color': 'white'}
# },
# name = i
# ))
# return {
# 'data': traces,
# 'layout': go.Layout(
# xaxis = {'type': 'linear', 'title': 'Number of Days'},
# yaxis = {'title': 'Acres Burned', 'range': [0, 30000]},
# margin = {'l': 40, 'b': 40, 't': 10, 'r': 10},
# hovermode = 'closest'
# )
# }
# if __name__ == '__main__':
# app.run_server()
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import requests, json
# df = pd.read_csv(
# 'wiki_fires_cleaned_2015-2018.csv')
year_data = requests.get("http://127.0.0.1:5000/api/v1.0/wildfires/greaterthan/2015","json")
# print(type(year_data))
df = pd.read_json(year_data.content)
app = dash.Dash()
app.layout = html.Div([
html.Div([
dcc.Graph(id='graph-with-slider',
hoverData={'points':[{'customdata':"San Bernardino"}] })
], style={'width': '49%', 'height': '550', 'display': 'inline-block', 'padding': '0.20'}),
html.Div([
dcc.Graph(id='x-time-series'),
dcc.Graph(id='y-time-series'),
], style={'display': 'inline-block', 'width': '49%', 'height':'550'}),
html.Div(
dcc.Slider(
id='year-slider',
min=df['Fire Year'].min(),
max=df['Fire Year'].max(),
value=df['Fire Year'].min(),
step=None,
marks={str(Year): str(Year) for Year in df['Fire Year'].unique()}
), style={'width': '49%', 'padding':'0px 20px 20px 20px'})
])
@app.callback(
dash.dependencies.Output('graph-with-slider', 'figure'),
[dash.dependencies.Input('year-slider', 'value')])
@app.callback(
dash.dependencies.Output('x-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
@app.callback(
dash.dependencies.Output('y-time-series', 'figure'),
[dash.dependencies.Input('graph-with-slider', 'hoverData'),
dash.dependencies.Input('year-slider', 'value')]
)
if __name__ == '__main__':
app.run_server() | [
2,
1330,
14470,
201,
198,
2,
1330,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
201,
198,
2,
1330,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
201,
198,
2,
1330,
19798,
292,
355,
279,
67,
201,
198,
2,
1330,
7110,
306,
13,
34... | 2.06392 | 1,893 |
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils import timezone
import experiments.utils as utils
from experiments.models import RequestMonitor, Player, Experiment, Treatment, Session
from experiments.views import login_view, game_view, finish_round_view
# Create your tests here.
def add_session_to_request(request):
"""Annotate a request object with a session"""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
| [
6738,
42625,
14208,
13,
3642,
822,
13,
82,
6202,
13,
27171,
1574,
1330,
23575,
34621,
1574,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
19390,
22810,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
142... | 3.674847 | 163 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~ยบ/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
"""An example of an application that use :mod:`xoeuf.cli`.
It behaves similar to "openerp-server" script. This module does not provide any
external facilities, but uses :func:`xotl.tools.cli.app.main` to run the
OpenERP server. Usage::
$ python server.py [options...]
"""
if __name__ == "__main__":
server()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
30934,
198,
2,
15069,
357,
66,
8,
34414,
786,
5231,
260,
434,
685,
93,
36165,
14,
93,
60,
290,
25767,
669... | 3.285714 | 182 |
# Generated by Django 2.2.6 on 2019-12-12 22:24
import django.db.models.deletion
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
1065,
12,
1065,
2534,
25,
1731,
198,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
... | 2.818182 | 44 |
bit_pattern_31 = \
[
8,-3, 9,5, #/*mean (0), correlation (0)*/,
4,2, 7,-12, #/*mean (1.12461e-05), correlation (0.0437584)*/,
-11,9, -8,2, #/*mean (3.37382e-05), correlation (0.0617409)*/,
7,-12, 12,-13, #/*mean (5.62303e-05), correlation (0.0636977)*/,
2,-13, 2,12, #/*mean (0.000134953), correlation (0.085099)*/,
1,-7, 1,6, #/*mean (0.000528565), correlation (0.0857175)*/,
-2,-10, -2,-4, #/*mean (0.0188821), correlation (0.0985774)*/,
-13,-13, -11,-8, #/*mean (0.0363135), correlation (0.0899616)*/,
-13,-3, -12,-9, #/*mean (0.121806), correlation (0.099849)*/,
10,4, 11,9, #/*mean (0.122065), correlation (0.093285)*/,
-13,-8, -8,-9, #/*mean (0.162787), correlation (0.0942748)*/,
-11,7, -9,12, #/*mean (0.21561), correlation (0.0974438)*/,
7,7, 12,6, #/*mean (0.160583), correlation (0.130064)*/,
-4,-5, -3,0, #/*mean (0.228171), correlation (0.132998)*/,
-13,2, -12,-3, #/*mean (0.00997526), correlation (0.145926)*/,
-9,0, -7,5, #/*mean (0.198234), correlation (0.143636)*/,
12,-6, 12,-1, #/*mean (0.0676226), correlation (0.16689)*/,
-3,6, -2,12, #/*mean (0.166847), correlation (0.171682)*/,
-6,-13, -4,-8, #/*mean (0.101215), correlation (0.179716)*/,
11,-13, 12,-8, #/*mean (0.200641), correlation (0.192279)*/,
4,7, 5,1, #/*mean (0.205106), correlation (0.186848)*/,
5,-3, 10,-3, #/*mean (0.234908), correlation (0.192319)*/,
3,-7, 6,12, #/*mean (0.0709964), correlation (0.210872)*/,
-8,-7, -6,-2, #/*mean (0.0939834), correlation (0.212589)*/,
-2,11, -1,-10, #/*mean (0.127778), correlation (0.20866)*/,
-13,12, -8,10, #/*mean (0.14783), correlation (0.206356)*/,
-7,3, -5,-3, #/*mean (0.182141), correlation (0.198942)*/,
-4,2, -3,7, #/*mean (0.188237), correlation (0.21384)*/,
-10,-12, -6,11, #/*mean (0.14865), correlation (0.23571)*/,
5,-12, 6,-7, #/*mean (0.222312), correlation (0.23324)*/,
5,-6, 7,-1, #/*mean (0.229082), correlation (0.23389)*/,
1,0, 4,-5, #/*mean (0.241577), correlation (0.215286)*/,
9,11, 11,-13, #/*mean (0.00338507), correlation (0.251373)*/,
4,7, 4,12, #/*mean (0.131005), correlation (0.257622)*/,
2,-1, 4,4, #/*mean (0.152755), correlation (0.255205)*/,
-4,-12, -2,7, #/*mean (0.182771), correlation (0.244867)*/,
-8,-5, -7,-10, #/*mean (0.186898), correlation (0.23901)*/,
4,11, 9,12, #/*mean (0.226226), correlation (0.258255)*/,
0,-8, 1,-13, #/*mean (0.0897886), correlation (0.274827)*/,
-13,-2, -8,2, #/*mean (0.148774), correlation (0.28065)*/,
-3,-2, -2,3, #/*mean (0.153048), correlation (0.283063)*/,
-6,9, -4,-9, #/*mean (0.169523), correlation (0.278248)*/,
8,12, 10,7, #/*mean (0.225337), correlation (0.282851)*/,
0,9, 1,3, #/*mean (0.226687), correlation (0.278734)*/,
7,-5, 11,-10, #/*mean (0.00693882), correlation (0.305161)*/,
-13,-6, -11,0, #/*mean (0.0227283), correlation (0.300181)*/,
10,7, 12,1, #/*mean (0.125517), correlation (0.31089)*/,
-6,-3, -6,12, #/*mean (0.131748), correlation (0.312779)*/,
10,-9, 12,-4, #/*mean (0.144827), correlation (0.292797)*/,
-13,8, -8,-12, #/*mean (0.149202), correlation (0.308918)*/,
-13,0, -8,-4, #/*mean (0.160909), correlation (0.310013)*/,
3,3, 7,8, #/*mean (0.177755), correlation (0.309394)*/,
5,7, 10,-7, #/*mean (0.212337), correlation (0.310315)*/,
-1,7, 1,-12, #/*mean (0.214429), correlation (0.311933)*/,
3,-10, 5,6, #/*mean (0.235807), correlation (0.313104)*/,
2,-4, 3,-10, #/*mean (0.00494827), correlation (0.344948)*/,
-13,0, -13,5, #/*mean (0.0549145), correlation (0.344675)*/,
-13,-7, -12,12, #/*mean (0.103385), correlation (0.342715)*/,
-13,3, -11,8, #/*mean (0.134222), correlation (0.322922)*/,
-7,12, -4,7, #/*mean (0.153284), correlation (0.337061)*/,
6,-10, 12,8, #/*mean (0.154881), correlation (0.329257)*/,
-9,-1, -7,-6, #/*mean (0.200967), correlation (0.33312)*/,
-2,-5, 0,12, #/*mean (0.201518), correlation (0.340635)*/,
-12,5, -7,5, #/*mean (0.207805), correlation (0.335631)*/,
3,-10, 8,-13, #/*mean (0.224438), correlation (0.34504)*/,
-7,-7, -4,5, #/*mean (0.239361), correlation (0.338053)*/,
-3,-2, -1,-7, #/*mean (0.240744), correlation (0.344322)*/,
2,9, 5,-11, #/*mean (0.242949), correlation (0.34145)*/,
-11,-13, -5,-13, #/*mean (0.244028), correlation (0.336861)*/,
-1,6, 0,-1, #/*mean (0.247571), correlation (0.343684)*/,
5,-3, 5,2, #/*mean (0.000697256), correlation (0.357265)*/,
-4,-13, -4,12, #/*mean (0.00213675), correlation (0.373827)*/,
-9,-6, -9,6, #/*mean (0.0126856), correlation (0.373938)*/,
-12,-10, -8,-4, #/*mean (0.0152497), correlation (0.364237)*/,
10,2, 12,-3, #/*mean (0.0299933), correlation (0.345292)*/,
7,12, 12,12, #/*mean (0.0307242), correlation (0.366299)*/,
-7,-13, -6,5, #/*mean (0.0534975), correlation (0.368357)*/,
-4,9, -3,4, #/*mean (0.099865), correlation (0.372276)*/,
7,-1, 12,2, #/*mean (0.117083), correlation (0.364529)*/,
-7,6, -5,1, #/*mean (0.126125), correlation (0.369606)*/,
-13,11, -12,5, #/*mean (0.130364), correlation (0.358502)*/,
-3,7, -2,-6, #/*mean (0.131691), correlation (0.375531)*/,
7,-8, 12,-7, #/*mean (0.160166), correlation (0.379508)*/,
-13,-7, -11,-12, #/*mean (0.167848), correlation (0.353343)*/,
1,-3, 12,12, #/*mean (0.183378), correlation (0.371916)*/,
2,-6, 3,0, #/*mean (0.228711), correlation (0.371761)*/,
-4,3, -2,-13, #/*mean (0.247211), correlation (0.364063)*/,
-1,-13, 1,9, #/*mean (0.249325), correlation (0.378139)*/,
7,1, 8,-6, #/*mean (0.000652272), correlation (0.411682)*/,
1,-1, 3,12, #/*mean (0.00248538), correlation (0.392988)*/,
9,1, 12,6, #/*mean (0.0206815), correlation (0.386106)*/,
-1,-9, -1,3, #/*mean (0.0364485), correlation (0.410752)*/,
-13,-13, -10,5, #/*mean (0.0376068), correlation (0.398374)*/,
7,7, 10,12, #/*mean (0.0424202), correlation (0.405663)*/,
12,-5, 12,9, #/*mean (0.0942645), correlation (0.410422)*/,
6,3, 7,11, #/*mean (0.1074), correlation (0.413224)*/,
5,-13, 6,10, #/*mean (0.109256), correlation (0.408646)*/,
2,-12, 2,3, #/*mean (0.131691), correlation (0.416076)*/,
3,8, 4,-6, #/*mean (0.165081), correlation (0.417569)*/,
2,6, 12,-13, #/*mean (0.171874), correlation (0.408471)*/,
9,-12, 10,3, #/*mean (0.175146), correlation (0.41296)*/,
-8,4, -7,9, #/*mean (0.183682), correlation (0.402956)*/,
-11,12, -4,-6, #/*mean (0.184672), correlation (0.416125)*/,
1,12, 2,-8, #/*mean (0.191487), correlation (0.386696)*/,
6,-9, 7,-4, #/*mean (0.192668), correlation (0.394771)*/,
2,3, 3,-2, #/*mean (0.200157), correlation (0.408303)*/,
6,3, 11,0, #/*mean (0.204588), correlation (0.411762)*/,
3,-3, 8,-8, #/*mean (0.205904), correlation (0.416294)*/,
7,8, 9,3, #/*mean (0.213237), correlation (0.409306)*/,
-11,-5, -6,-4, #/*mean (0.243444), correlation (0.395069)*/,
-10,11, -5,10, #/*mean (0.247672), correlation (0.413392)*/,
-5,-8, -3,12, #/*mean (0.24774), correlation (0.411416)*/,
-10,5, -9,0, #/*mean (0.00213675), correlation (0.454003)*/,
8,-1, 12,-6, #/*mean (0.0293635), correlation (0.455368)*/,
4,-6, 6,-11, #/*mean (0.0404971), correlation (0.457393)*/,
-10,12, -8,7, #/*mean (0.0481107), correlation (0.448364)*/,
4,-2, 6,7, #/*mean (0.050641), correlation (0.455019)*/,
-2,0, -2,12, #/*mean (0.0525978), correlation (0.44338)*/,
-5,-8, -5,2, #/*mean (0.0629667), correlation (0.457096)*/,
7,-6, 10,12, #/*mean (0.0653846), correlation (0.445623)*/,
-9,-13, -8,-8, #/*mean (0.0858749), correlation (0.449789)*/,
-5,-13, -5,-2, #/*mean (0.122402), correlation (0.450201)*/,
8,-8, 9,-13, #/*mean (0.125416), correlation (0.453224)*/,
-9,-11, -9,0, #/*mean (0.130128), correlation (0.458724)*/,
1,-8, 1,-2, #/*mean (0.132467), correlation (0.440133)*/,
7,-4, 9,1, #/*mean (0.132692), correlation (0.454)*/,
-2,1, -1,-4, #/*mean (0.135695), correlation (0.455739)*/,
11,-6, 12,-11, #/*mean (0.142904), correlation (0.446114)*/,
-12,-9, -6,4, #/*mean (0.146165), correlation (0.451473)*/,
3,7, 7,12, #/*mean (0.147627), correlation (0.456643)*/,
5,5, 10,8, #/*mean (0.152901), correlation (0.455036)*/,
0,-4, 2,8, #/*mean (0.167083), correlation (0.459315)*/,
-9,12, -5,-13, #/*mean (0.173234), correlation (0.454706)*/,
0,7, 2,12, #/*mean (0.18312), correlation (0.433855)*/,
-1,2, 1,7, #/*mean (0.185504), correlation (0.443838)*/,
5,11, 7,-9, #/*mean (0.185706), correlation (0.451123)*/,
3,5, 6,-8, #/*mean (0.188968), correlation (0.455808)*/,
-13,-4, -8,9, #/*mean (0.191667), correlation (0.459128)*/,
-5,9, -3,-3, #/*mean (0.193196), correlation (0.458364)*/,
-4,-7, -3,-12, #/*mean (0.196536), correlation (0.455782)*/,
6,5, 8,0, #/*mean (0.1972), correlation (0.450481)*/,
-7,6, -6,12, #/*mean (0.199438), correlation (0.458156)*/,
-13,6, -5,-2, #/*mean (0.211224), correlation (0.449548)*/,
1,-10, 3,10, #/*mean (0.211718), correlation (0.440606)*/,
4,1, 8,-4, #/*mean (0.213034), correlation (0.443177)*/,
-2,-2, 2,-13, #/*mean (0.234334), correlation (0.455304)*/,
2,-12, 12,12, #/*mean (0.235684), correlation (0.443436)*/,
-2,-13, 0,-6, #/*mean (0.237674), correlation (0.452525)*/,
4,1, 9,3, #/*mean (0.23962), correlation (0.444824)*/,
-6,-10, -3,-5, #/*mean (0.248459), correlation (0.439621)*/,
-3,-13, -1,1, #/*mean (0.249505), correlation (0.456666)*/,
7,5, 12,-11, #/*mean (0.00119208), correlation (0.495466)*/,
4,-2, 5,-7, #/*mean (0.00372245), correlation (0.484214)*/,
-13,9, -9,-5, #/*mean (0.00741116), correlation (0.499854)*/,
7,1, 8,6, #/*mean (0.0208952), correlation (0.499773)*/,
7,-8, 7,6, #/*mean (0.0220085), correlation (0.501609)*/,
-7,-4, -7,1, #/*mean (0.0233806), correlation (0.496568)*/,
-8,11, -7,-8, #/*mean (0.0236505), correlation (0.489719)*/,
-13,6, -12,-8, #/*mean (0.0268781), correlation (0.503487)*/,
2,4, 3,9, #/*mean (0.0323324), correlation (0.501938)*/,
10,-5, 12,3, #/*mean (0.0399235), correlation (0.494029)*/,
-6,-5, -6,7, #/*mean (0.0420153), correlation (0.486579)*/,
8,-3, 9,-8, #/*mean (0.0548021), correlation (0.484237)*/,
2,-12, 2,8, #/*mean (0.0616622), correlation (0.496642)*/,
-11,-2, -10,3, #/*mean (0.0627755), correlation (0.498563)*/,
-12,-13, -7,-9, #/*mean (0.0829622), correlation (0.495491)*/,
-11,0, -10,-5, #/*mean (0.0843342), correlation (0.487146)*/,
5,-3, 11,8, #/*mean (0.0929937), correlation (0.502315)*/,
-2,-13, -1,12, #/*mean (0.113327), correlation (0.48941)*/,
-1,-8, 0,9, #/*mean (0.132119), correlation (0.467268)*/,
-13,-11, -12,-5, #/*mean (0.136269), correlation (0.498771)*/,
-10,-2, -10,11, #/*mean (0.142173), correlation (0.498714)*/,
-3,9, -2,-13, #/*mean (0.144141), correlation (0.491973)*/,
2,-3, 3,2, #/*mean (0.14892), correlation (0.500782)*/,
-9,-13, -4,0, #/*mean (0.150371), correlation (0.498211)*/,
-4,6, -3,-10, #/*mean (0.152159), correlation (0.495547)*/,
-4,12, -2,-7, #/*mean (0.156152), correlation (0.496925)*/,
-6,-11, -4,9, #/*mean (0.15749), correlation (0.499222)*/,
6,-3, 6,11, #/*mean (0.159211), correlation (0.503821)*/,
-13,11, -5,5, #/*mean (0.162427), correlation (0.501907)*/,
11,11, 12,6, #/*mean (0.16652), correlation (0.497632)*/,
7,-5, 12,-2, #/*mean (0.169141), correlation (0.484474)*/,
-1,12, 0,7, #/*mean (0.169456), correlation (0.495339)*/,
-4,-8, -3,-2, #/*mean (0.171457), correlation (0.487251)*/,
-7,1, -6,7, #/*mean (0.175), correlation (0.500024)*/,
-13,-12, -8,-13, #/*mean (0.175866), correlation (0.497523)*/,
-7,-2, -6,-8, #/*mean (0.178273), correlation (0.501854)*/,
-8,5, -6,-9, #/*mean (0.181107), correlation (0.494888)*/,
-5,-1, -4,5, #/*mean (0.190227), correlation (0.482557)*/,
-13,7, -8,10, #/*mean (0.196739), correlation (0.496503)*/,
1,5, 5,-13, #/*mean (0.19973), correlation (0.499759)*/,
1,0, 10,-13, #/*mean (0.204465), correlation (0.49873)*/,
9,12, 10,-1, #/*mean (0.209334), correlation (0.49063)*/,
5,-8, 10,-9, #/*mean (0.211134), correlation (0.503011)*/,
-1,11, 1,-13, #/*mean (0.212), correlation (0.499414)*/,
-9,-3, -6,2, #/*mean (0.212168), correlation (0.480739)*/,
-1,-10, 1,12, #/*mean (0.212731), correlation (0.502523)*/,
-13,1, -8,-10, #/*mean (0.21327), correlation (0.489786)*/,
8,-11, 10,-6, #/*mean (0.214159), correlation (0.488246)*/,
2,-13, 3,-6, #/*mean (0.216993), correlation (0.50287)*/,
7,-13, 12,-9, #/*mean (0.223639), correlation (0.470502)*/,
-10,-10, -5,-7, #/*mean (0.224089), correlation (0.500852)*/,
-10,-8, -8,-13, #/*mean (0.228666), correlation (0.502629)*/,
4,-6, 8,5, #/*mean (0.22906), correlation (0.498305)*/,
3,12, 8,-13, #/*mean (0.233378), correlation (0.503825)*/,
-4,2, -3,-3, #/*mean (0.234323), correlation (0.476692)*/,
5,-13, 10,-12, #/*mean (0.236392), correlation (0.475462)*/,
4,-13, 5,-1, #/*mean (0.236842), correlation (0.504132)*/,
-9,9, -4,3, #/*mean (0.236977), correlation (0.497739)*/,
0,3, 3,-9, #/*mean (0.24314), correlation (0.499398)*/,
-12,1, -6,1, #/*mean (0.243297), correlation (0.489447)*/,
3,2, 4,-8, #/*mean (0.00155196), correlation (0.553496)*/,
-10,-10, -10,9, #/*mean (0.00239541), correlation (0.54297)*/,
8,-13, 12,12, #/*mean (0.0034413), correlation (0.544361)*/,
-8,-12, -6,-5, #/*mean (0.003565), correlation (0.551225)*/,
2,2, 3,7, #/*mean (0.00835583), correlation (0.55285)*/,
10,6, 11,-8, #/*mean (0.00885065), correlation (0.540913)*/,
6,8, 8,-12, #/*mean (0.0101552), correlation (0.551085)*/,
-7,10, -6,5, #/*mean (0.0102227), correlation (0.533635)*/,
-3,-9, -3,9, #/*mean (0.0110211), correlation (0.543121)*/,
-1,-13, -1,5, #/*mean (0.0113473), correlation (0.550173)*/,
-3,-7, -3,4, #/*mean (0.0140913), correlation (0.554774)*/,
-8,-2, -8,3, #/*mean (0.017049), correlation (0.55461)*/,
4,2, 12,12, #/*mean (0.01778), correlation (0.546921)*/,
2,-5, 3,11, #/*mean (0.0224022), correlation (0.549667)*/,
6,-9, 11,-13, #/*mean (0.029161), correlation (0.546295)*/,
3,-1, 7,12, #/*mean (0.0303081), correlation (0.548599)*/,
11,-1, 12,4, #/*mean (0.0355151), correlation (0.523943)*/,
-3,0, -3,6, #/*mean (0.0417904), correlation (0.543395)*/,
4,-11, 4,12, #/*mean (0.0487292), correlation (0.542818)*/,
2,-4, 2,1, #/*mean (0.0575124), correlation (0.554888)*/,
-10,-6, -8,1, #/*mean (0.0594242), correlation (0.544026)*/,
-13,7, -11,1, #/*mean (0.0597391), correlation (0.550524)*/,
-13,12, -11,-13, #/*mean (0.0608974), correlation (0.55383)*/,
6,0, 11,-13, #/*mean (0.065126), correlation (0.552006)*/,
0,-1, 1,4, #/*mean (0.074224), correlation (0.546372)*/,
-13,3, -9,-2, #/*mean (0.0808592), correlation (0.554875)*/,
-9,8, -6,-3, #/*mean (0.0883378), correlation (0.551178)*/,
-13,-6, -8,-2, #/*mean (0.0901035), correlation (0.548446)*/,
5,-9, 8,10, #/*mean (0.0949843), correlation (0.554694)*/,
2,7, 3,-9, #/*mean (0.0994152), correlation (0.550979)*/,
-1,-6, -1,-1, #/*mean (0.10045), correlation (0.552714)*/,
9,5, 11,-2, #/*mean (0.100686), correlation (0.552594)*/,
11,-3, 12,-8, #/*mean (0.101091), correlation (0.532394)*/,
3,0, 3,5, #/*mean (0.101147), correlation (0.525576)*/,
-1,4, 0,10, #/*mean (0.105263), correlation (0.531498)*/,
3,-6, 4,5, #/*mean (0.110785), correlation (0.540491)*/,
-13,0, -10,5, #/*mean (0.112798), correlation (0.536582)*/,
5,8, 12,11, #/*mean (0.114181), correlation (0.555793)*/,
8,9, 9,-6, #/*mean (0.117431), correlation (0.553763)*/,
7,-4, 8,-12, #/*mean (0.118522), correlation (0.553452)*/,
-10,4, -10,9, #/*mean (0.12094), correlation (0.554785)*/,
7,3, 12,4, #/*mean (0.122582), correlation (0.555825)*/,
9,-7, 10,-2, #/*mean (0.124978), correlation (0.549846)*/,
7,0, 12,-2, #/*mean (0.127002), correlation (0.537452)*/,
-1,-6, 0,-11, #/*mean (0.127148), correlation (0.547401)*/
]
| [
198,
198,
2545,
62,
33279,
62,
3132,
796,
3467,
198,
58,
198,
220,
220,
220,
807,
12095,
18,
11,
860,
11,
20,
11,
1303,
15211,
32604,
357,
15,
828,
16096,
357,
15,
8,
16208,
11,
198,
220,
220,
220,
604,
11,
17,
11,
767,
12095,
... | 1.96429 | 8,261 |
# -*- coding: utf-8 -*-
"""Script to compact all Brython scripts in a single one."""
import datetime
import os
import re
import sys
import tarfile
import zipfile
import make_static_doc # lint:ok
try:
import slimit
minify = slimit.minify
except ImportError:
minify = slimit = None
# path of parent directory
pdir = os.path.dirname(os.getcwd())
# version info
version = [3, 3, 0, "alpha", 0]
implementation = [3, 0, 3, 'alpha', 0]
abs_path = lambda _pth: os.path.join(os.path.dirname(os.getcwd()), 'src', _pth)
now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# update version number
with open(abs_path('version_info.js'), 'wb') as vinfo_file_out:
# implementation[2] = now
vinfo_file_out.write('__BRYTHON__.implementation = %s\n' % implementation)
vinfo_file_out.write('__BRYTHON__.__MAGIC__ = "%s"\n' %
'.'.join(['%s' % _i for _i in implementation[:3]]))
vinfo_file_out.write('__BRYTHON__.version_info = %s\n' % str(version))
vinfo_file_out.write('__BRYTHON__.compiled_date = "%s"\n' % str(datetime.datetime.now()))
# builtin module names = list of scripts in src/libs
vinfo_file_out.write('__BRYTHON__.builtin_module_names = ["posix",')
_modules=['"%s"' % fname.split('.')[0]
for fname in os.listdir(abs_path('libs')) if fname.endswith('.js')]
_modules.sort() #sort modules so that git diff's don't change between runs
vinfo_file_out.write(',\n '.join(_modules))
# add Python scripts in Lib that start with _ and arent found in CPython Lib
# using sys.executable to find stdlib dir doesn't work under linux.
stdlib_path = os.path.dirname(os.__file__)
# stdlib_path = os.path.join(os.path.dirname(sys.executable),'Lib')
stdlib_mods = [f for f in os.listdir(stdlib_path) if f.startswith('_')]
stdlib_mods.sort()
brython_mods = [f for f in os.listdir(abs_path('Lib'))
if f.startswith('_') and f != '__pycache__']
brython_py_builtins = [os.path.splitext(x)[0]
for x in brython_mods if x not in stdlib_mods]
brython_py_builtins.sort()
vinfo_file_out.write(',\n ' + ',\n '.join(
['"%s"' % f for f in brython_py_builtins]))
vinfo_file_out.write(']\n')
#log.info("Finished Writing file: " + abs_path('version_info.js'))
# Create file stdlib_paths.js : static mapping between module names and paths
# in the standard library
libfolder = os.path.join(os.path.dirname(os.getcwd()), 'src')
simple_javascript_template_string = """;(function($B){\n
$B.stdlib = {}
"""
with open(os.path.join(libfolder, 'stdlib_paths.js'), 'wb') as out:
out.write(simple_javascript_template_string)
jspath = os.path.join(libfolder, 'libs')
jslist = []
for dirpath, dirnames, filenames in os.walk(jspath):
for filename in filenames:
if not filename.endswith('.js'):
continue
mod_name = os.path.splitext(filename)[0]
jslist.append(mod_name)
jslist.sort()
out.write("var js=['%s']\n" % "','".join(jslist))
out.write("""for(var i=0;i<js.length;i++) $B.stdlib[js[i]]=['js']\n\n""")
pylist = []
pkglist = []
pypath = os.path.join(libfolder, 'Lib')
for dirpath, dirnames, filenames in os.walk(pypath):
for filename in filenames:
mod_name, ext = os.path.splitext(filename)
if ext != '.py':
continue
path = dirpath[len(pypath)+len(os.sep):].split(os.sep)+[mod_name]
if not path[0]:
path = path[1:]
mod_name = '.'.join(path).lstrip('.')
if filename == '__init__.py':
mod_name = '.'.join(path[:-1]).lstrip('.')
mod_path = 'Lib/'+'/'.join(path)
if filename == '__init__.py':
pkglist.append(mod_name)
else:
pylist.append(mod_name)
pylist.sort()
out.write("var pylist=['%s']\n" % "','".join(pylist))
pkglist.sort()
out.write(
"for(var i=0;i<pylist.length;i++) $B.stdlib[pylist[i]]=['py']\n\n")
out.write("var pkglist=['%s']\n" % "','".join(pkglist))
out.write(
"for(var i=0;i<pkglist.length;i++) $B.stdlib[pkglist[i]]=['py',true]\n")
out.write('})(__BRYTHON__)')
print('static stdlib mapping ok')
# build brython.js from base Javascript files
sources = [
'brython_builtins', 'version_info', 'identifiers_re', 'py2js', 'py_object',
'py_type', 'py_utils', 'py_generator', 'py_builtin_functions', 'py_bytes',
'js_objects', 'stdlib_paths', 'py_import', 'py_float', 'py_int',
'py_complex', 'py_dict', 'py_list', 'py_string', 'py_set', 'py_dom',
'py_import_hooks'
]
res = '// brython.js brython.info\n'
res += '// version %s\n' % version
res += '// implementation %s\n' % implementation
res += '// version compiled from commented, indented source files '
res += 'at github.com/brython-dev/brython\n'
src_size = 0
for fname in sources:
src = open(abs_path(fname)+'.js').read() + '\n'
src_size += len(src)
if minify is not None:
try:
res += minify(src)
except Exception as error:
print(error)
else:
res += custom_minify(src)
res = res.replace('context', 'C')
with open(abs_path('brython.js'), 'wb') as the_brythonjs_file_output:
the_brythonjs_file_output.write(res)
print(('size : originals %s compact %s gain %.2f' %
(src_size, len(res), 100 * (src_size - len(res)) / src_size)))
# version name
vname = '.'.join(str(x) for x in implementation[:3])
if implementation[3] == 'rc':
vname += 'rc%s' % implementation[4]
sys.path.append("scripts")
try:
import make_VFS # isort:skip
except ImportError:
print("Cannot find make_VFS, so we won't make py_VFS.js")
make_VFS = None
sys.exit()
make_VFS.process(os.path.join(pdir, 'src', 'py_VFS.js'))
make_VFS.process_unittest(os.path.join(pdir, 'src', 'py_unittest.js'))
# make distribution with core + libraries
with open(os.path.join(pdir, 'src', 'brython_dist.js'), 'wb') as distrib_file:
distrib_file.write(open(os.path.join(pdir, 'src', 'brython.js')).read())
distrib_file.write(open(os.path.join(pdir, 'src', 'py_VFS.js')).read())
# zip files
dest_dir = os.path.join(pdir, 'dist')
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
name = 'Brython%s_site_mirror-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
dist_gz = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
for path in os.listdir(pdir):
if not is_valid(path):
continue
abs_path = os.path.join(pdir, path)
if os.path.isdir(abs_path) and path == "dist":
continue
print(('add', path))
dist_gz.add(os.path.join(pdir, path), arcname=os.path.join(name, path))
dist_gz.close()
dist_zip = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(pdir):
print(dirpath)
for path in filenames:
if not is_valid(path):
continue
abs_path = os.path.join(pdir, dirpath, path)
dist_zip.write(
os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(pdir) + 1:], path))
if 'dist' in dirnames:
dirnames.remove('dist')
if '.hg' in dirnames:
dirnames.remove('.hg')
if '.git' in dirnames:
dirnames.remove('.git')
for dirname in dirnames:
if dirname == 'dist':
continue
dist_zip.close()
print('end of mirror')
# minimum package
name = 'Brython%s-%s' % (vname, now)
dest_path = os.path.join(dest_dir, name)
dist1 = tarfile.open(dest_path + '.tar.gz', mode='w:gz')
dist2 = tarfile.open(dest_path+'.tar.bz2', mode='w:bz2')
dist3 = zipfile.ZipFile(dest_path + '.zip', mode='w',
compression=zipfile.ZIP_DEFLATED)
for arc, wfunc in (dist1, dist1.add), (dist2, dist2.add), (dist3, dist3.write):
for path in 'README.md', 'LICENCE.txt':
wfunc(os.path.join(pdir, path), arcname=os.path.join(name, path))
wfunc(os.path.join(pdir, 'src', 'brython.js'),
arcname=os.path.join(name, 'brython.js'))
base = os.path.join(pdir, 'src')
folders = ('libs', 'Lib')
for folder in folders:
for dirpath, dirnames, filenames in os.walk(os.path.join(base, folder)):
for path in filenames:
if os.path.splitext(path)[1] not in ('.js', '.py'):
continue
print(('add', path, dirpath[len(base):]))
wfunc(os.path.join(dirpath, path),
arcname=os.path.join(name, dirpath[len(base) + 1:], path))
arc.close()
# changelog file
try:
first = 'Changes in Brython version %s.%s.%s' % (
implementation[0], implementation[1], implementation[2])
with open(os.path.join(pdir, 'dist', 'changelog.txt')) as file_to_read:
input_changelog_data_string = file_to_read.read()
with open(os.path.join(pdir, 'dist', 'changelog_%s.txt' % now), 'wb') as ou:
ou.write('%s\n' % first)
ou.write('%s\n\n' % ('=' * len(first)))
ou.write(input_changelog_data_string)
except Exception as error:
print(error)
print("Warning - no changelog file")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
37811,
7391,
284,
16001,
477,
9092,
400,
261,
14750,
287,
257,
2060,
530,
526,
15931,
628,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2... | 2.14971 | 4,315 |
from game_data import data
from art import logo, vs
import random
import os
def compare(choice, curr, compare_against, points, game_is_over):
"""Compares the players choice with the other choice and updates game_is_over, 'Compare A' and points. Returns points and game_is_over."""
if choice == curr and choice["follower_count"] >= compare_against["follower_count"]:
curr = compare_against
points += 1
elif choice == compare_against and choice["follower_count"] >= curr["follower_count"]:
curr = compare_against
points += 1
else:
os.system("clear")
game_is_over = True
return points, game_is_over, curr
def play_game():
"""Starts the game and intializes all variables and print statements. Returns the total amount of points player has recieved."""
points = 0
curr = random.choice(data)
game_is_over = False
while not game_is_over:
os.system("clear")
print(logo)
compare_against = random.choice(data)
while compare_against == curr:
compare_against = random.choice(data)
if points > 0:
print(f"You're right! Current score: {points}.")
print("Compare A: " + curr["name"] + ", a " +
curr["description"] + ", from " + curr["country"]+".")
print("\n" + vs + "\n")
print("Against B: " + compare_against["name"] + ", a " +
compare_against["description"] + ", from " + compare_against["country"]+".")
if input("Who has more followers? Type 'A' or 'B': ") == "A":
choice = curr
else:
choice = compare_against
points, game_is_over, curr = compare(
choice, curr, compare_against, points, game_is_over)
return points
points = play_game()
print(f"{logo} \n Sorry that's wrong, Final Score: {points} \n")
| [
6738,
983,
62,
7890,
1330,
1366,
198,
6738,
1242,
1330,
11112,
11,
3691,
198,
11748,
4738,
198,
11748,
28686,
628,
198,
4299,
8996,
7,
25541,
11,
1090,
81,
11,
8996,
62,
32826,
11,
2173,
11,
983,
62,
271,
62,
2502,
2599,
198,
220,
... | 2.52027 | 740 |
import unittest
from VintageousPlus.vi.utils import modes
from VintageousPlus.state import State
from VintageousPlus.tests import get_sel
from VintageousPlus.tests import first_sel
from VintageousPlus.tests import ViewTest
from VintageousPlus.ex_commands import CURRENT_LINE_RANGE
# TODO: test with multiple selections.
| [
11748,
555,
715,
395,
198,
198,
6738,
34057,
516,
17860,
13,
8903,
13,
26791,
1330,
12881,
198,
198,
6738,
34057,
516,
17860,
13,
5219,
1330,
1812,
198,
198,
6738,
34057,
516,
17860,
13,
41989,
1330,
651,
62,
741,
198,
6738,
34057,
51... | 3.542553 | 94 |
import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from util.orientation import Orientation
from util.vec import Vec3
import numpy as np
import os
| [
11748,
10688,
198,
198,
6738,
374,
75,
13645,
13,
49638,
13,
8692,
62,
25781,
1330,
7308,
36772,
11,
17427,
22130,
9012,
198,
6738,
374,
75,
13645,
13,
26791,
13,
7249,
942,
13,
6057,
62,
7890,
62,
7249,
1330,
3776,
51,
624,
47,
831... | 3.438356 | 73 |
import pytest
from qtoggleserver.core.expressions import comparison, Function
from qtoggleserver.core.expressions import InvalidNumberOfArguments
| [
198,
11748,
12972,
9288,
198,
198,
6738,
10662,
83,
48549,
18497,
13,
7295,
13,
42712,
507,
1330,
7208,
11,
15553,
198,
6738,
10662,
83,
48549,
18497,
13,
7295,
13,
42712,
507,
1330,
17665,
15057,
5189,
28100,
2886,
628,
628,
628,
628,
... | 3.44 | 50 |
#!/usr/bin/env python
"""
_CMSSW_
Template for a CMSSW Step
"""
import pickle
from WMCore.WMSpec.ConfigSectionTree import nodeName
from WMCore.WMSpec.Steps.Template import CoreHelper, Template
class CMSSWStepHelper(CoreHelper):
"""
_CMSSWStepHelper_
Add API calls and helper methods to the basic WMStepHelper to specialise
for CMSSW tasks
"""
def setAcqEra(self, acqEra):
"""
_setAcqEra_
Set the acquisition era attribute for this step.
"""
self.data.output.acqEra = acqEra
def setProcStr(self, procStr):
"""
_setProcStr_
Set the processing string attribute for this step.
"""
self.data.output.procStr = procStr
def setProcVer(self, procVer):
"""
_setProcVer_
Set the processing version era attribute for this step.
"""
self.data.output.procVer = procVer
def getAcqEra(self):
"""
_getAcqEra_
Retrieve the acquisition era for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'acqEra', None)
def getProcStr(self):
"""
_getProcStr_
Retrieve the processing string for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procStr', None)
def getProcVer(self):
"""
_getProcVer_
Retrieve the processing version for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'procVer', None)
def setPrepId(self, prepId):
"""
_setPrepId_
Set the prep_id attribute for this step.
"""
self.data.output.prepId = prepId
def getPrepId(self):
"""
_getPrepId_
Retrieve the prep_id for this step, or return None if non-existent.
"""
return getattr(self.data.output, 'prepId', None)
def addOutputModule(self, moduleName, **details):
"""
_addOutputModule_
Add in an output module settings, all default to None unless
the value is provided in details
"""
modules = self.data.output.modules
if getattr(modules, moduleName, None) == None:
modules.section_(moduleName)
module = getattr(modules, moduleName)
for key, value in details.items():
setattr(module, key, value)
return
def listOutputModules(self):
"""
_listOutputModules_
retrieve list of output module names
"""
if hasattr(self.data.output, "modules"):
return self.data.output.modules.dictionary_().keys()
return []
def getOutputModule(self, name):
"""
_getOutputModule_
retrieve the data structure for an output module by name
None if not found
"""
return getattr(self.data.output.modules, name, None)
def setConfigCache(self, url, document, dbName="config_cache"):
"""
_setConfigCache_
Set the information required to retrieve a configuration from
the config cache.
url - base URL for the config cache instance
document - GUID for the config document
dbName - optional, name of the db instance in the couch server
"""
self.data.application.configuration.configCacheUrl = url
self.data.application.configuration.configId = document
self.data.application.configuration.cacheName = dbName
docUrl = "%s/%s/%s" % (url, dbName, document)
self.data.application.configuration.configUrl = docUrl
self.data.application.configuration.retrieveConfigUrl = "%s/configFile" % docUrl
def setDataProcessingConfig(self, scenarioName, functionName, **args):
"""
_setDataProcessingConfig_
Set a configuration library to be used from the CMSSW Release
DataProcessing package.
"""
self.data.application.configuration.scenario = scenarioName
self.data.application.configuration.function = functionName
# assume if this crashes we are dealing with complex data
# which is only supported in new agents that only look
# at pickledarguments anyways
try:
self.data.application.configuration.section_('arguments')
[setattr(self.data.application.configuration.arguments, k, v) for k, v in args.items()]
except Exception:
pass
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def cmsswSetup(self, cmsswVersion, **options):
"""
_cmsswSetup_
Provide setup details for CMSSW.
cmsswVersion - required - version of CMSSW to use
Optional:
scramCommand - defaults to scramv1
scramProject - defaults to CMSSW
scramArch - optional scram architecture, defaults to None
buildArch - optional scram build architecture, defaults to None
softwareEnvironment - setup command to bootstrap scram,defaults to None
"""
self.data.application.setup.cmsswVersion = cmsswVersion
for k, v in options.items():
setattr(self.data.application.setup, k, v)
return
def getScramArch(self):
"""
_getScramArch_
Retrieve the scram architecture used for this step.
"""
return self.data.application.setup.scramArch
def getCMSSWVersion(self):
"""
_getCMSSWVersion_
Retrieve the version of the framework used for this step.
"""
return self.data.application.setup.cmsswVersion
def setGlobalTag(self, globalTag):
"""
_setGlobalTag_
Set the global tag.
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.globalTag = globalTag
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['globalTag'] = globalTag
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getGlobalTag(self):
"""
_getGlobalTag_
Retrieve the global tag.
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "globalTag"):
return self.data.application.configuration.arguments.globalTag
return pickle.loads(self.data.application.configuration.pickledarguments)['globalTag']
def setDatasetName(self, datasetName):
"""
_setDatasetName_
Set the dataset name in the pickled arguments
"""
self.data.application.configuration.section_('arguments')
self.data.application.configuration.arguments.datasetName = datasetName
args = {}
if hasattr(self.data.application.configuration, "pickledarguments"):
args = pickle.loads(self.data.application.configuration.pickledarguments)
args['datasetName'] = datasetName
self.data.application.configuration.pickledarguments = pickle.dumps(args)
return
def getDatasetName(self):
"""
_getDatasetName_
Retrieve the dataset name from the pickled arguments
"""
if hasattr(self.data.application.configuration, "arguments"):
if hasattr(self.data.application.configuration.arguments, "datasetName"):
return self.data.application.configuration.arguments.datasetName
return pickle.loads(self.data.application.configuration.pickledarguments).get('datasetName', None)
def getScenario(self):
"""
_getScenario_
Retrieve the scenario from the pickled arguments, if any
"""
if hasattr(self.data.application.configuration, "scenario"):
return self.data.application.configuration.scenario
return None
def setUserSandbox(self, userSandbox):
"""
_setUserSandbox_
Sets the userSandbox. Eventually may have to move this to a proper
list rather than a one element list
"""
if userSandbox:
self.data.user.inputSandboxes = [userSandbox]
return
def setUserFiles(self, userFiles):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if userFiles:
self.data.user.userFiles = userFiles
return
def setUserLFNBase(self, lfnBase):
"""
_setUserFiles_
Sets the list of extra files the user needs
"""
if lfnBase:
self.data.user.lfnBase = lfnBase
return
def setupChainedProcessing(self, inputStepName, inputOutputModule):
"""
_setupChainedProcessing_
Set values to support chained CMSSW running.
"""
self.data.input.chainedProcessing = True
self.data.input.inputStepName = inputStepName
self.data.input.inputOutputModule = inputOutputModule
def keepOutput(self, keepOutput):
"""
_keepOutput_
Mark whether or not we should keep the output from this step. We don't
want to keep the output from certain chained steps.
"""
self.data.output.keep = keepOutput
return
def getPileup(self):
"""
_getPileup_
Retrieve the pileup config from this step.
"""
return getattr(self.data, "pileup", None)
def setupPileup(self, pileupConfig, dbsUrl):
"""
include pileup input configuration into this step configuration.
pileupConfig is initially specified as input to the workload
(user input) and here is available as a dict.
"""
# so, e.g. this {"cosmics": "/some/cosmics/dataset", "minbias": "/some/minbias/dataset"}
# would translate into
# self.data.pileup.comics.dataset = "/some/cosmics/dataset"
# self.data.pileup.minbias.dataset = "/some/minbias/dataset"
self.data.section_("pileup")
for pileupType, dataset in pileupConfig.items():
self.data.pileup.section_(pileupType)
setattr(getattr(self.data.pileup, pileupType), "dataset", dataset)
setattr(self.data, "dbsUrl", dbsUrl)
def setOverrideCatalog(self, overrideCatalog):
"""
_setOverrideCatalog_
set the override catalog
needed at least at CERN to use production castor pools
"""
if overrideCatalog != None:
self.data.application.overrideCatalog = overrideCatalog
def setEventsPerLumi(self, eventsPerLumi):
"""
_setEventsPerLumi_
Add event per lumi information to the step, so it can be added later
to the process, this comes from user input
"""
if eventsPerLumi != None:
setattr(self.data.application.configuration, "eventsPerLumi", eventsPerLumi)
def getSkipBadFiles(self):
"""
_getSkipBadFiles_
Check if we can skip inexistent files instead of failing the job
"""
return getattr(self.data.application.configuration, "skipBadFiles", False)
def setSkipBadFiles(self, skipBadFiles):
"""
_setSkipBadFiles_
Add a flag to indicate the CMSSW process if we can
skip inexistent files instead of failing the job
"""
setattr(self.data.application.configuration, "skipBadFiles", skipBadFiles)
def setNumberOfCores(self, ncores, nEventStreams=0):
"""
_setNumberOfCores_
Set the number of cores and event streams for CMSSW to run on
"""
# if None is passed for EventStreams, then set it to 0
nEventStreams = nEventStreams or 0
self.data.application.multicore.numberOfCores = ncores
self.data.application.multicore.eventStreams = nEventStreams
def getNumberOfCores(self):
"""
_getNumberOfCores_
Get number of cores
"""
return self.data.application.multicore.numberOfCores
def getEventStreams(self):
"""
_getEventStreams_
Get number of event streams
"""
return self.data.application.multicore.eventStreams
class CMSSW(Template):
"""
_CMSSW_
Tools for creating a template CMSSW Step
"""
def install(self, step):
"""
_install_
Add the set of default fields to the step required for running
a cmssw job
"""
stepname = nodeName(step)
step.stepType = "CMSSW"
step.application.section_("setup")
step.application.setup.scramCommand = "scramv1"
step.application.setup.scramProject = "CMSSW"
step.application.setup.cmsswVersion = None
step.application.setup.scramArch = None
step.application.setup.buildArch = None
step.application.setup.softwareEnvironment = None
step.application.section_("command")
step.application.command.executable = "cmsRun"
step.application.command.configuration = "PSet.py"
step.application.command.configurationPickle = "PSet.pkl"
step.application.command.configurationHash = None
step.application.command.psetTweak = None
step.application.command.arguments = ""
step.output.jobReport = "FrameworkJobReport.xml"
step.output.stdout = "%s-stdout.log" % stepname
step.output.stderr = "%s-stderr.log" % stepname
step.output.keep = True
step.output.section_("modules")
step.output.section_("analysisFiles")
step.section_("runtime")
step.runtime.preScripts = []
step.runtime.scramPreScripts = []
step.runtime.postScripts = []
step.runtime.postScramScripts = []
step.section_("debug")
step.debug.verbosity = 0
step.debug.keepLogs = False
step.section_("user")
step.user.inputSandboxes = []
step.user.script = None
step.user.outputFiles = []
step.user.userFiles = []
step.user.lfnBase = None
step.section_("monitoring")
# support for multicore cmssw running mode
step.application.section_("multicore")
step.application.multicore.numberOfCores = 1
step.application.multicore.eventStreams = 0
def helper(self, step):
"""
_helper_
Wrap the WMStep provided in the CMSSW helper class that
includes the ability to add and manipulate the details
of a CMSSW workflow step
"""
return CMSSWStepHelper(step)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
62,
24187,
5432,
54,
62,
198,
198,
30800,
329,
257,
16477,
5432,
54,
5012,
198,
198,
37811,
198,
198,
11748,
2298,
293,
198,
198,
6738,
370,
9655,
382,
13,
22117,
22882,
... | 2.414001 | 6,128 |
import numpy as np
from numpy import linalg as LA
class Duffing:
"""
Create a duffing object by specifying it's parameter delta as input at initialization of the object
It is a bistable dynamical system with 2 stable steady states and one unstable steady state
"""
def reset(self):
"""
:return: randomly initialized state of the duffing object
"""
self.state = np.random.uniform(low=-4, high=4, size=(2,))
return self.state
def step(self, u):
"""
takes input u as the action/control to be applied
calculates next state by calling 4th order Runge-Kutta solver
returns state at the next time step
"""
y = self.state
self.control = u
new_y = self.rk4(y)
self.state = new_y
return self.state
def reward(self):
"""
:return: reward as the negative of the 2 norm between current state and the desired state
"""
return -LA.norm(self.state - self.desired_state, axis=0)
def bin_classifier(self):
"""
:return: binary control (0 or 1) based on the locally weighted binary classifier
"""
w = np.exp(-(LA.norm(self.state - self.X, axis=1)**2)/(2*self.tau))
w /= np.sum(w)
if np.dot(w, self.U) > 0.5:
return 1
else:
return 0
def dydt(self, y):
"""
:param y: current state of the duffing oscillator
:return: right hand side of duffing ODEs
"""
dy0 = y[1] + self.control
dy1 = y[0] - y[0]**3 - self.delta*y[1]
return dy0, dy1
def rk4(self, y0):
"""
:param y0: current state of the duffing object
:return: state y of the duffing object at next time step using 4th order Runge-Kutta method
"""
h = self.dt
f = self.dydt
k1 = h * np.asarray(f(y0))
k2 = h * np.asarray(f(y0 + k1 / 2))
k3 = h * np.asarray(f(y0 + k2 / 2))
k4 = h * np.asarray(f(y0 + k3))
y = y0 + (1 / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
return y
def trajectory(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t and control u
"""
y, u, t = np.zeros((n, 2)), np.zeros((n, 1)), np.zeros((n, 1))
y[0, :] = self.state
u[0, 0] = self.max_control * self.bin_classifier()
for i in range(1, n):
y[i, :] = self.step(u[i - 1, 0])
t[i, 0] = i * self.dt
u[i, 0] = self.max_control * self.bin_classifier()
return y, u, t
def trajectory_no_control(self, n):
"""
:param n: number of time steps in trajectory
:return: trajectory y at time steps t
"""
y, t = np.zeros((n, 2)), np.zeros((n, 1))
y[0, :] = self.state
for i in range(1, n):
y[i, :] = self.step(0)
t[i, 0] = i * self.dt
return y, t
| [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
299,
32152,
1330,
300,
1292,
70,
355,
9131,
201,
198,
201,
198,
4871,
50209,
278,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
13610,
257,
288,
1648,
278,
2134,
416,
315... | 1.980855 | 1,567 |
#Matthew Trahms
#EE 526
#5/25/21
#This file serves as the toplevel generation script. The user will enter
#at least the number entries, bits, and reads, with options to specify
#that the regfile should be split into banks. Vertical banks (v_banks) means
#that the address space will be split between multiple banks. This comes in
#handy when creating a register file with a large number of entries.
#Horizontal banks (h_banks) means that the data being read/written is split
#across multiple register files. This comes in handy when creating a register
#file with lots of bits per entry in the register file.
from make_store_grid import make_store_grid as grid
from make_module_decl import make_module_decl as module
from make_io import make_io as io
from make_wr_data_latches import make_wr_data_latches as wr_latches
from make_wr_data_buffers import make_wr_data_buffers as wr_buff
from route_rdata import route_rdata as rdata
from make_decoder import make_decoder as decoder
from make_wr_addr_latches import make_wr_addr_latches as waddr_latch
from make_wr_en_latch import make_wr_en_latch as wen_latch
from connect_w_logic import connect_w_logic as w_logic
from buffer_clk import buffer_clk
from cell_map import low_widths, tristate_w
from size_buffers_latches import size_tristate, size_wr_data_latch
import math
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('entries',
help='the number of entries found in the finished register file',
type=int)
parser.add_argument('bits',
help='the number of bits per entry in the register file',
type=int)
parser.add_argument('reads',
help='the number of read ports in the register file', type=int)
parser.add_argument('word_banks',
help='splits the word among multiple banks to lower cap (default 1)',
type=int)
parser.add_argument('address_banks',
help='splits addr space among banks to lower cap (default 1)',
type=int)
args = parser.parse_args()
out_file = open('regfile.v', 'w')
pins_file = open('../src/apr/pin_config.txt', 'w')
entries = args.entries
bits = args.bits
reads = args.reads
h_banks = args.word_banks
v_banks = args.address_banks
make_toplevel(out_file, pins_file, entries, bits, reads, v_banks, h_banks)
out_file.close()
pins_file.close()
| [
2,
25372,
4759,
71,
907,
198,
2,
6500,
642,
2075,
198,
2,
20,
14,
1495,
14,
2481,
198,
198,
2,
1212,
2393,
9179,
355,
262,
284,
1154,
626,
5270,
4226,
13,
383,
2836,
481,
3802,
198,
2,
265,
1551,
262,
1271,
12784,
11,
10340,
11,... | 3.088117 | 749 |
from dlib import point, points
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
| [
6738,
288,
8019,
1330,
966,
11,
2173,
198,
28311,
25,
198,
220,
220,
220,
1330,
269,
31686,
293,
355,
2298,
293,
220,
1303,
5765,
269,
31686,
293,
319,
11361,
362,
13,
22,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1330,
2298... | 2.913043 | 46 |
# -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-01-23
Function: ไบๅๆ ็ไธไธ่็น
"""
def getBTreeNextNode(pNode):
"""
็ปๅฎไธๆฃตไบๅๆ ๅๅ
ถไธญไธไธช่็น๏ผๆพๅบไธญๅบ้ๅๅบๅ็ไธไธไธช่็น
:param pNode: ็ปๅฎ่็น
:return: ่ฏฅ่็นไธไธไธช่็น
"""
if pNode == None:
return None
if pNode.rchild != None:
tmpNode = pNode.rchild
while tmpNode.lchild:
tmpNode = tmpNode.lchild
return tmpNode
else:
if pNode.parent == None:
return
elif pNode.parent.lchild == pNode:
return pNode.parent
else:
while pNode.parent:
if pNode.parent.lchild and pNode.parent.lchild == pNode:
return pNode.parent
pNode = pNode.parent
return None | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
17171,
25,
29436,
25426,
2290,
198,
26130,
25,
347,
8577,
51,
198,
10430,
25,
2864,
12,
486,
12,
1954,
198,
22203,
25,
220,
12859,
234,
20998,
231,
43718,
... | 1.599174 | 484 |
""" ๆฅ็จ่กจ
https://pcrbot.github.io/pcr-calendar/#cn
"""
from datetime import datetime, timedelta
from typing import Dict, Optional, Set
import httpx
from nonebot import get_bot
from nonebot.log import logger
from nonebot_plugin_apscheduler import scheduler
from .config import plugin_config
calender_obj = Calender()
| [
37811,
10545,
245,
98,
163,
101,
233,
26193,
101,
198,
198,
5450,
1378,
79,
6098,
13645,
13,
12567,
13,
952,
14,
79,
6098,
12,
9948,
9239,
31113,
31522,
198,
37811,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,... | 3.028302 | 106 |
"""
Common helper functions for handling k8s objects information
"""
import datetime as dt
import logging
import os
import re
import yaml
from flask import current_app
log = logging.getLogger(__name__)
def get_prefixed_index_html():
"""
The backend should modify the <base> element of the index.html file to
align with the configured prefix the backend is listening
"""
prefix = os.path.join("/", current_app.config["PREFIX"], "")
static_dir = current_app.config["STATIC_DIR"]
log.info("Setting the <base> to reflect the prefix: %s", prefix)
with open(os.path.join(static_dir, "index.html"), "r") as f:
index_html = f.read()
index_prefixed = re.sub(
r"\<base href=\".*\"\>", '<base href="%s">' % prefix, index_html,
)
return index_prefixed
def load_yaml(f):
"""
f: file path
Load a yaml file and convert it to a python dict.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read()
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def load_param_yaml(f, **kwargs):
"""
f: file path
Load a yaml file and convert it to a python dict. The yaml might have some
`{var}` values which the user will have to format. For this we first read
the yaml file and replace these variables and then convert the generated
string to a dict via the yaml module.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read().format(**kwargs)
except IOError:
log.error(f"Error opening: {f}")
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def get_uptime(then):
"""
then: datetime instance | string
Return a string that informs how much time has pasted from the provided
timestamp.
"""
if isinstance(then, str):
then = dt.datetime.strptime(then, "%Y-%m-%dT%H:%M:%SZ")
now = dt.datetime.now()
diff = now - then.replace(tzinfo=None)
days = diff.days
hours = int(diff.seconds / 3600)
mins = int((diff.seconds % 3600) / 60)
age = ""
if days > 0:
if days == 1:
age = str(days) + " day"
else:
age = str(days) + " days"
else:
if hours > 0:
if hours == 1:
age = str(hours) + " hour"
else:
age = str(hours) + " hours"
else:
if mins == 0:
return "just now"
if mins == 1:
age = str(mins) + " min"
else:
age = str(mins) + " mins"
return age + " ago"
| [
37811,
198,
17227,
31904,
5499,
329,
9041,
479,
23,
82,
5563,
1321,
198,
37811,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
198,
11748,
331,
43695,
198,
6738,
42903,
1330,
1459,
62,
... | 2.180623 | 1,445 |
# Blackbox tests for ndrdump
# Copyright (C) 2008 Andrew Tridgell <tridge@samba.org>
# Copyright (C) 2008 Andrew Bartlett <abartlet@samba.org>
# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
# based on test_smbclient.sh
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Blackbox tests for ndrdump."""
import os
from samba.tests import BlackboxTestCase
for p in [ "../../../../../source4/librpc/tests", "../../../../../librpc/tests"]:
data_path_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), p))
print data_path_dir
if os.path.exists(data_path_dir):
break
class NdrDumpTests(BlackboxTestCase):
"""Blackbox tests for ndrdump."""
| [
2,
2619,
3524,
5254,
329,
299,
67,
4372,
931,
198,
2,
15069,
357,
34,
8,
3648,
6858,
833,
3130,
297,
1279,
2213,
3130,
31,
82,
31842,
13,
2398,
29,
198,
2,
15069,
357,
34,
8,
3648,
6858,
13167,
15503,
1279,
397,
433,
1616,
31,
8... | 3.065217 | 414 |
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor, rdm6300
from esphome.const import CONF_UID, CONF_ID
from . import rdm6300_ns
DEPENDENCIES = ['rdm6300']
CONF_RDM6300_ID = 'rdm6300_id'
RDM6300BinarySensor = rdm6300_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor)
CONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend({
cv.GenerateID(): cv.declare_id(RDM6300BinarySensor),
cv.GenerateID(CONF_RDM6300_ID): cv.use_id(rdm6300.RDM6300Component),
cv.Required(CONF_UID): cv.uint32_t,
})
| [
11748,
1658,
746,
462,
13,
8189,
5235,
355,
269,
70,
198,
11748,
1658,
746,
462,
13,
11250,
62,
12102,
341,
355,
269,
85,
198,
6738,
1658,
746,
462,
13,
5589,
3906,
1330,
13934,
62,
82,
22854,
11,
374,
36020,
5066,
405,
198,
6738,
... | 2.245136 | 257 |
###############################################################################
#
# taxonParser.py - parse taxonomic-specific marker sets
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
from collections import defaultdict
import checkm.prettytable as prettytable
from checkm.markerSets import BinMarkerSets, MarkerSet
from checkm.util.taxonomyUtils import taxonomicRanks, ranksByLevel, ranksByLabel
from checkm.defaultValues import DefaultValues
class TaxonParser():
"""Parse taxonomic-specific marker sets."""
def list(self, rankFilter='ALL'):
""" List all available marker sets from the specified rank."""
taxonMarkerSets = self.readMarkerSets()
header = ['Rank', 'Taxon', '# genomes', '# marker genes', '# marker sets']
pTable = prettytable.PrettyTable(header)
pTable.align = 'c'
pTable.align['Rank'] = 'l'
pTable.align['Taxon'] = 'l'
pTable.hrules = prettytable.FRAME
pTable.vrules = prettytable.NONE
for rank in taxonomicRanks:
if rankFilter == 'ALL' or rankFilter == rank:
for taxon in sorted(taxonMarkerSets[rank]):
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
pTable.add_row([rank, taxon, markerSet.numGenomes, numMarkers, numMarkerSets])
print ''
print pTable.get_string()
def markerSet(self, rank, taxon, markerFile):
"""Obtain specified taxonomic-specific marker set."""
taxonMarkerSets = self.readMarkerSets()
if rank not in taxonMarkerSets:
self.logger.error(' Unrecognized taxonomic rank: ' + rank)
return False
elif taxon not in taxonMarkerSets[rank]:
self.logger.error(' Unrecognized taxon: %s (in rank %s): ' % (taxon, rank))
return False
markerSet = taxonMarkerSets[rank][taxon]
taxonomy = markerSet.lineageStr.split(';')[::-1]
binMarkerSets = BinMarkerSets(taxon, BinMarkerSets.TAXONOMIC_MARKER_SET)
for i, taxon in enumerate(taxonomy):
if rank != 'life':
rank = ranksByLevel[len(taxonomy) - i - 1]
if rank == 'species':
taxon = taxonomy[1] + ' ' + taxonomy[0]
markerSet = taxonMarkerSets[rank][taxon]
numMarkers, numMarkerSets = markerSet.size()
self.logger.info(' Marker set for %s contains %d marker genes arranged in %d sets.' % (taxon, numMarkers, numMarkerSets))
self.logger.info(' Marker set inferred from %d reference genomes.' % markerSet.numGenomes)
markerSet.lineageStr = taxon
binMarkerSets.addMarkerSet(markerSet)
fout = open(markerFile, 'w')
fout.write(DefaultValues.TAXON_MARKER_FILE_HEADER + '\n')
binMarkerSets.write(fout)
fout.close()
return True
| [
29113,
29113,
7804,
4242,
21017,
201,
198,
2,
201,
198,
2,
1687,
261,
46677,
13,
9078,
532,
21136,
1687,
40036,
12,
11423,
18364,
5621,
201,
198,
2,
201,
198,
29113,
29113,
7804,
4242,
21017,
201,
198,
2,
220,
220,
220,
220,
220,
22... | 2.131671 | 2,005 |
#-*- coding:utf-8 -*-
import sqlite3
# ---------------------------------------------------------------------------------------
# Description : Database Processor
# ---------------------------------------------------------------------------------------
# duplicate_tag is the group id.
# retrieve all different sentence candadite of a certain group. tag here is the group id | [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
44161,
578,
18,
198,
198,
2,
16529,
19351,
6329,
198,
2,
12489,
220,
220,
1058,
24047,
32893,
198,
2,
16529,
19351,
6329,
198,
198,
2,
23418,
62,
12985,
318,
262,
1... | 5.626866 | 67 |
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from config import ConfigService
| [
6738,
36579,
13,
34,
10803,
1330,
34329,
201,
198,
6738,
36579,
13,
19703,
4668,
13,
42,
8068,
1330,
30524,
42,
8068,
17,
201,
198,
6738,
4566,
1330,
17056,
16177,
201,
198,
220,
220,
220,
220,
220,
220,
220,
201,
198,
220
] | 2.804878 | 41 |
#!/usr/bin/python3
from app import app
__author__ = "@ivanleoncz"
if __name__ == "__main__":
app.run(debug=True)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
598,
1330,
598,
198,
198,
834,
9800,
834,
796,
44212,
13809,
38970,
26691,
1,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
598,
13,
... | 2.4 | 50 |
from sanic import text
from sanic.response import json
from sanic_ext import serializer
| [
6738,
5336,
291,
1330,
2420,
198,
6738,
5336,
291,
13,
26209,
1330,
33918,
198,
198,
6738,
5336,
291,
62,
2302,
1330,
11389,
7509,
628,
198
] | 3.64 | 25 |
# COMP 551 Mini Project 4
# 2019-04-17
# Segev, Michael
# Jacquier, Pierre
# Han, Zhenze
# loads and formats data from stanford sentiment tree bank
import re
import numpy as np
# computes the fine-grained labels
| [
2,
24301,
642,
4349,
12558,
4935,
604,
198,
2,
13130,
12,
3023,
12,
1558,
198,
2,
1001,
469,
85,
11,
3899,
198,
2,
44726,
959,
11,
21204,
198,
2,
9530,
11,
1168,
831,
2736,
198,
2,
15989,
290,
17519,
1366,
422,
336,
272,
3841,
1... | 3.084507 | 71 |
import sys
import textwrap
import pytest
from radon.visitors import *
dedent = lambda code: textwrap.dedent(code).strip()
SIMPLE_BLOCKS = [
(
'''
if a: pass
''',
2,
{},
),
(
'''
if a: pass
else: pass
''',
2,
{},
),
(
'''
if a: pass
elif b: pass
''',
3,
{},
),
(
'''
if a: pass
elif b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
''',
3,
{},
),
(
'''
if a and b: pass
else: pass
''',
3,
{},
),
(
'''
if a and b: pass
elif c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c and d: pass
else: pass
''',
5,
{},
),
(
'''
if a and b or c: pass
else: pass
''',
4,
{},
),
(
'''
for x in range(10): print(x)
''',
2,
{},
),
(
'''
for x in xrange(10): print(x)
else: pass
''',
3,
{},
),
(
'''
while a < 4: pass
''',
2,
{},
),
(
'''
while a < 4: pass
else: pass
''',
3,
{},
),
(
'''
while a < 4 and b < 42: pass
''',
3,
{},
),
(
'''
while a and b or c < 10: pass
else: pass
''',
5,
{},
),
# With and async-with statements no longer count towards CC, see #123
(
'''
with open('raw.py') as fobj: print(fobj.read())
''',
1,
{},
),
(
'''
[i for i in range(4)]
''',
2,
{},
),
(
'''
[i for i in range(4) if i&1]
''',
3,
{},
),
(
'''
(i for i in range(4))
''',
2,
{},
),
(
'''
(i for i in range(4) if i&1)
''',
3,
{},
),
(
'''
[i for i in range(42) if sum(k ** 2 for k in divisors(i)) & 1]
''',
4,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
''',
3,
{},
),
(
'''
try: raise TypeError
finally: pass
''',
1,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
finally: pass
''',
2,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else: pass
finally: pass
''',
3,
{},
),
(
'''
try: raise TypeError
except TypeError: pass
else:
pass
pass
finally: pass
''',
3,
{},
),
# Lambda are not counted anymore as per #68
(
'''
k = lambda a, b: k(b, a)
''',
1,
{},
),
(
'''
k = lambda a, b, c: c if a else b
''',
2,
{},
),
(
'''
v = a if b else c
''',
2,
{},
),
(
'''
v = a if sum(i for i in xrange(c)) < 10 else c
''',
3,
{},
),
(
'''
sum(i for i in range(12) for z in range(i ** 2) if i * z & 1)
''',
4,
{},
),
(
'''
sum(i for i in range(10) if i >= 2 and val and val2 or val3)
''',
6,
{},
),
(
'''
for i in range(10):
print(i)
else:
print('wah')
print('really not found')
print(3)
''',
3,
{},
),
(
'''
while True:
print(1)
else:
print(2)
print(1)
print(0)
print(-1)
''',
3,
{},
),
(
'''
assert i < 0
''',
2,
{},
),
(
'''
assert i < 0, "Fail"
''',
2,
{},
),
(
'''
assert i < 0
''',
1,
{'no_assert': True},
),
(
'''
def f():
assert 10 > 20
''',
1,
{'no_assert': True},
),
(
'''
class TestYo(object):
def test_yo(self):
assert self.n > 4
''',
1,
{'no_assert': True},
),
]
# These run only if Python version is >= 2.7
ADDITIONAL_BLOCKS = [
(
'''
{i for i in range(4)}
''',
2,
{},
),
(
'''
{i for i in range(4) if i&1}
''',
3,
{},
),
(
'''
{i:i**4 for i in range(4)}
''',
2,
{},
),
(
'''
{i:i**4 for i in range(4) if i&1}
''',
3,
{},
),
]
BLOCKS = SIMPLE_BLOCKS[:]
if sys.version_info[:2] >= (2, 7):
BLOCKS.extend(ADDITIONAL_BLOCKS)
@pytest.mark.parametrize('code,expected,kwargs', BLOCKS)
SINGLE_FUNCTIONS_CASES = [
(
'''
def f(a, b, c):
if a and b == 4:
return c ** c
elif a and not c:
return sum(i for i in range(41) if i&1)
return a + b
''',
(1, 7),
),
(
'''
if a and not b: pass
elif b or c: pass
else: pass
for i in range(4):
print(i)
def g(a, b):
while a < b:
b, a = a **2, b ** 2
return b
''',
(6, 2),
),
(
'''
def f(a, b):
while a**b:
a, b = b, a * (b - 1)
if a and b:
b = 0
else:
b = 1
return sum(i for i in range(b))
''',
(1, 5),
),
]
if sys.version_info[:2] >= (3, 5):
# With and async-with statements no longer count towards CC, see #123
SINGLE_FUNCTIONS_CASES.append(
(
'''
async def f(a, b):
async with open('blabla.log', 'w') as f:
async for i in range(100):
f.write(str(i) + '\\n')
''',
(1, 2),
),
)
@pytest.mark.parametrize('code,expected', SINGLE_FUNCTIONS_CASES)
FUNCTIONS_CASES = [
# With and async-with statements no longer count towards CC, see #123
(
'''
def f(a, b):
return a if b else 2
def g(a, b, c):
if a and b:
return a / b + b / a
elif b and c:
return b / c - c / b
return a + b + c
def h(a, b):
return 2 * (a + b)
''',
(2, 5, 1),
),
(
'''
def f(p, q):
while p:
p, q = q, p - q
if q < 1:
return 1 / q ** 2
elif q > 100:
return 1 / q ** .5
return 42 if not q else p
def g(a, b, c):
if a and b or a - b:
return a / b - c
elif b or c:
return 1
else:
k = 0
with open('results.txt', 'w') as fobj:
for i in range(b ** c):
k += sum(1 / j for j in range(i ** 2) if j > 2)
fobj.write(str(k))
return k - 1
''',
(5, 9),
),
]
@pytest.mark.parametrize('code,expected', FUNCTIONS_CASES)
CLASSES_CASES = [
(
'''
class A(object):
def m(self, a, b):
if not a or b:
return b - 1
try:
return a / b
except ZeroDivisionError:
return a
def n(self, k):
while self.m(k) < k:
k -= self.m(k ** 2 - min(self.m(j) for j in range(k ** 4)))
return k
''',
(8, 4, 3),
),
(
'''
class B(object):
ATTR = 9 if A().n(9) == 9 else 10
import sys
if sys.version_info >= (3, 3):
import os
AT = os.openat('/random/loc')
def __iter__(self):
return __import__('itertools').tee(B.__dict__)
def test(self, func):
a = func(self.ATTR, self.AT)
if a < self.ATTR:
yield self
elif a > self.ATTR ** 2:
yield self.__iter__()
yield iter(a)
''',
(7, 1, 3),
),
]
@pytest.mark.parametrize('code,expected', CLASSES_CASES)
GENERAL_CASES = [
(
'''
if a and b:
print
else:
print
a = sum(i for i in range(1000) if i % 3 == 0 and i % 5 == 0)
def f(n):
def inner(n):
return n ** 2
if n == 0:
return 1
elif n == 1:
return n
elif n < 5:
return (n - 1) ** 2
return n * pow(inner(n), f(n - 1), n - 3)
''',
(6, 3, 0, 9),
),
(
'''
try:
1 / 0
except ZeroDivisonError:
print
except TypeError:
pass
class J(object):
def aux(self, w):
if w == 0:
return 0
return w - 1 + sum(self.aux(w - 3 - i) for i in range(2))
def f(a, b):
def inner(n):
return n ** 2
if a < b:
b, a = a, inner(b)
return a, b
''',
(3, 1, 3, 7),
),
(
'''
class f(object):
class inner(object):
pass
''',
(1, 0, 0, 1),
),
]
@pytest.mark.parametrize('code,expected', GENERAL_CASES)
CLOSURES_CASES = [
(
'''
def f(n):
def g(l):
return l ** 4
def h(i):
return i ** 5 + 1 if i & 1 else 2
return sum(g(u + 4) / float(h(u)) for u in range(2, n))
''',
('g', 'h'),
(1, 2, 2),
),
(
'''
# will it work? :D
def memoize(func):
cache = {}
def aux(*args, **kwargs):
key = (args, kwargs)
if key in cache:
return cache[key]
cache[key] = res = func(*args, **kwargs)
return res
return aux
''',
('aux',),
(2, 1),
),
]
@pytest.mark.parametrize('code,closure_names,expected', CLOSURES_CASES)
CONTAINERS_CASES = [
(
('func', 12, 0, 18, False, None, [], 5),
('F', 'func', 'F 12:0->18 func - 5'),
),
(
('meth', 12, 0, 21, True, 'cls', [], 5),
('M', 'cls.meth', 'M 12:0->21 cls.meth - 5'),
),
(('cls', 12, 0, 15, [], [], 5), ('C', 'cls', 'C 12:0->15 cls - 5')),
(
('cls', 12, 0, 19, [object, object, object, object], [], 30),
('C', 'cls', 'C 12:0->19 cls - 8'),
),
]
@pytest.mark.parametrize('values,expected', CONTAINERS_CASES)
| [
11748,
25064,
198,
11748,
2420,
37150,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
2511,
261,
13,
4703,
6742,
1330,
1635,
198,
198,
9395,
298,
796,
37456,
2438,
25,
2420,
37150,
13,
9395,
298,
7,
8189,
737,
36311,
3419,
628,
198,
48... | 1.619139 | 6,761 |
import torch
import torch.nn as nn
from models.dsam_layers import center_crop
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
4981,
13,
9310,
321,
62,
75,
6962,
1330,
3641,
62,
31476,
628
] | 3.291667 | 24 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import arcpy
arcpy.env.workspace = "C:/Temp/donnees.gdb"
soustypes = arcpy.da.ListSubtypes("villes")
for stcode, stdict in list(soustypes.items()):
print("code: ", stcode, " nom: ", stdict['Name'], " defaut: ", stdict['Default'])
"""
Retourne ceci:
('code: ', 0, ' nom: ', u'Villes', ' defaut: ', True)
('code: ', 1, ' nom: ', u'Grande ville', ' defaut: ', False)
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
10389,
9078,
198,
5605,
9078,
13,
24330,
13,
5225,
10223,
796,
366,
34,
14079,
30782,
14,
9099,
710,
274,
13,
... | 2.361111 | 180 |
#!
# coding:utf-8
'''
Add headerlink Extension for Python-Markdown
==========================================
This extension adds headerlink CSS to the output HTML in Python-Markdown.
This is intended for use with TocExtension(permalink=True) which generates the links
Simple Usage:
>>> import markdown
>>> markdown.markdown("Some text", ['addheaderlinkcss']) # doctest: +ELLIPSIS
u'<style...h1:hover > .headerlink {\\n display: inline;...</style>\\n<p>Some text</p>'
'''
import markdown
from markdown.util import etree
from markdown.util import isBlockLevel
# Global Vars
SECTIONLINK_PERMITTED_TAGS=set("h1 h2 h3 h4 h5 h6".split())
SECTIONLINK_CSS = r'''
/* The following code is added by mdx_addheaderlinkcss.py
It was originally lifted from http://subversion.apache.org/style/site.css */
/*
* Hide class="headerlink", except when an enclosing heading
* has the :hover property.
*/
.headerlink {
display: none;
}
'''
for tag in SECTIONLINK_PERMITTED_TAGS:
SECTIONLINK_CSS += '''\
%s:hover > .headerlink {
display: inline;
}
''' % tag
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
""" Add tableclass to Markdown. """
# https://pythonhosted.org/Markdown/extensions/api.html#makeextension says
# to use (**kwargs) only, but built-in extensions actually use (*args, **kwargs)
if __name__ == "__main__":
import doctest
# Test does not work currently because processing is disabled
doctest.testmod()
| [
2,
0,
220,
198,
2,
19617,
25,
40477,
12,
23,
198,
198,
7061,
6,
198,
198,
4550,
13639,
8726,
27995,
329,
11361,
12,
9704,
2902,
198,
10052,
2559,
855,
198,
198,
1212,
7552,
6673,
13639,
8726,
17391,
284,
262,
5072,
11532,
287,
11361... | 3.030612 | 490 |
import json
import sys
import turtle
from maze import Maze
def draw_path(filepath, pen, origin, sq_size):
""""Reads a path from a file and draws it on the maze."""
first = True
with open(filepath, 'r') as file_object:
for line in file_object:
x, y, visited, heading = json.loads(line)
if visited == 0:
color = 'gray'
elif visited == 1:
color = 'green yellow'
elif visited == 2:
color = 'gray'
elif visited == 3:
color = 'red'
else:
color = 'black'
if first:
pen.hideturtle()
pen.pensize(int(sq_size / 2))
pen.pencolor(color)
pen.setheading(90)
pen.goto(origin + sq_size / 2, origin + sq_size / 2)
pen.showturtle()
first = False
else:
draw_line(x, y, color, heading, pen, origin, sq_size)
def draw_line(x, y, color, heading, pen, origin, sq_size):
"""Draws a continuous line on the path."""
center_x = origin + sq_size * x + sq_size / 2
center_y = origin + sq_size * y + sq_size / 2
heading_dict = {"up": 90, "right": 0, "down": 270, "left": 180}
pen.setheading(heading_dict[heading])
pen.pendown()
pen.goto(center_x, center_y)
pen.penup()
pen.pencolor(color)
def draw_maze(maze, pen, origin, sq_size):
"""Draws the maze lines om screen."""
# iterate through squares one by one to decide where to draw walls
for x in range(maze.dim):
for y in range(maze.dim):
if not maze.is_permissible([x, y], 'up'):
pen.goto(origin + sq_size * x, origin + sq_size * (y + 1))
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if not maze.is_permissible([x, y], 'right'):
pen.goto(origin + sq_size * (x + 1), origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check bottom wall if on lowest row
if y == 0 and not maze.is_permissible([x, y], 'down'):
pen.goto(origin + sq_size * x, origin)
pen.setheading(0)
pen.pendown()
pen.forward(sq_size)
pen.penup()
# only check left wall if on leftmost column
if x == 0 and not maze.is_permissible([x, y], 'left'):
pen.goto(origin, origin + sq_size * y)
pen.setheading(90)
pen.pendown()
pen.forward(sq_size)
pen.penup()
if __name__ == '__main__':
'''
This script uses Python's turtle library to draw a picture of the maze
given as an argument when running the script.
'''
# Create a maze based on input argument on command line.
maze = Maze(str(sys.argv[1]))
# Initialize the window and drawing turtle.
window = turtle.Screen()
pen = turtle.Turtle()
pen.speed(0)
pen.penup()
# maze centered on (0,0), squares are 20 units in length.
sq_size = 20
origin = maze.dim * sq_size / -2
window.tracer(0)
draw_maze(maze, pen, origin, sq_size)
window.update()
window.tracer(1)
if len(sys.argv) == 3:
draw_path(str(sys.argv[2]), pen, origin, sq_size)
pen.hideturtle()
window.exitonclick()
| [
11748,
33918,
198,
11748,
25064,
198,
11748,
28699,
198,
198,
6738,
31237,
1330,
33412,
628,
198,
4299,
3197,
62,
6978,
7,
7753,
6978,
11,
3112,
11,
8159,
11,
19862,
62,
7857,
2599,
198,
220,
220,
220,
13538,
15931,
5569,
82,
257,
310... | 2.002825 | 1,770 |
"""
SoftLayer.CLI.routes
~~~~~~~~~~~~~~~~~~~
Routes for shell-specific commands
:license: MIT, see LICENSE for more details.
"""
ALL_ROUTES = [
('exit', 'SoftLayer.shell.cmd_exit:cli'),
('shell-help', 'SoftLayer.shell.cmd_help:cli'),
('env', 'SoftLayer.shell.cmd_env:cli'),
]
ALL_ALIASES = {
'?': 'shell-help',
'help': 'shell-help',
'quit': 'exit',
}
| [
37811,
198,
220,
220,
220,
8297,
49925,
13,
5097,
40,
13,
81,
448,
274,
198,
220,
220,
220,
220,
27156,
4907,
93,
198,
220,
220,
220,
39602,
274,
329,
7582,
12,
11423,
9729,
628,
220,
220,
220,
1058,
43085,
25,
17168,
11,
766,
385... | 2.359281 | 167 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Thu Oct 31 11:09:38 2019
@author: Jorge palma
MARETEC/Instituto Superior Tรฉcnico
Universidade de Lisboa
'''
import sys
import os
import gc
import argparse
import traceback
import time
import datetime
import pandas as pd
import json
import random
import string
## python-geohash: https://pypi.org/project/python-geohash/
import geohash
## pyshp: https://pythonhosted.org/Python%20Shapefile%20Library/
import shapefile
## https://shapely.readthedocs.io
from shapely.geometry import Point, Polygon, shape
sys.tracebacklimit=0
'''dev'''
sys.tracebacklimit=1
output_default_file = 'output.dat'
species_loc_file = 'speciesloc.dat'
grid_file = 'grid.dat'
class GeohashMaker(object):
'''bounding_box'''
def _is_geohash_in_bounding_box(self, current_geohash, bbox_coordinates):
'''Checks if the box of a geohash is inside the bounding box
:param current_geohash: a geohash
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the center of the geohash is in the bounding box
'''
# decode return [latitude, longitude]
(latitude, longitude) = geohash.decode(current_geohash)
geohash_in_bounding_box = (bbox_coordinates[0] < longitude < bbox_coordinates[2]) and \
(bbox_coordinates[1] < latitude < bbox_coordinates[3])
return geohash_in_bounding_box
def _is_coordinates_in_bounding_box(self, coordinates, bbox_coordinates):
'''Checks if coordinates is inside the bounding box
:param coordinates: [lon, lat]
:param bbox_coordinates: bounding box coordinates, [lon1, lat1, lon2, lat2]
:return: true if the coordinate is in the bounding box
'''
coordinates_in_bounding_box = (bbox_coordinates[0] < coordinates[0] < bbox_coordinates[2]) and \
(bbox_coordinates[1] < coordinates[1] < bbox_coordinates[3])
return coordinates_in_bounding_box
def _buil_cell_tiles_from_bbox(self, bbox_coordinates):
'''Computes all geohash tile in the given bounding box
:param bbox_coordinates: the bounding box coordinates of the geohashes
:return: a list of geohashes
'''
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = (bbox_coordinates[0] + bbox_coordinates[2]) / 2
center_latitude = (bbox_coordinates[1] + bbox_coordinates[3]) / 2
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
if neighbor not in checked_geohashes and self._is_geohash_in_bounding_box(neighbor, bbox_coordinates):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''shapefile'''
def _is_coordinates_in_shapefile(self, coordinates, shpfile):
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
first_shp = shapes[0]
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
def _build_cell_tiles_from_shapefile(self, shpfile):
'''Computes all geohash tiles in the given shapefile
:param shapefile: shapefile
:return: a list of geohashes
'''
''' open shapefile'''
sf = shapefile.Reader(shpfile)
'''get features'''
shapes = sf.shapes()
if len(shapes) > 1:
print("More than one feature was found. Only first will be selected.")
input("Press Enter to continue...")
'''only use first feature'''
first_shp = shapes[0]
''' get shape type. only if shapetype is polygon'''
shape_type = first_shp.shapeType
if shape_type != 5:
handle_error(msg='Shapefile feature be a polygon')
''' get points coordinates for each point in the shape '''
points = first_shp.points
polygon = Polygon(points)
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_latitude = polygon.centroid.coords[0][1]
center_longitude = polygon.centroid.coords[0][0]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
'''geojson'''
def _is_coordinates_in_geojson(self, coordinates, jsonfile):
'''Checks if coordinates is inside the polygon
:param coordinates: [lon, lat]
:geojson file with polygon
:return: true if the coordinate is in polygon
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
point = Point(coordinates[0], coordinates[1])
return polygon.contains(point)
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
def _build_cell_tiles_from_geojson(self, jsonfile):
'''Computes all geohash tiles in the given geojson file
:param jsonfile: geojson (polygon)
:return: a list of geohashes
'''
with open(jsonfile) as f:
try:
data = json.load(f)
polygon = shape(data["geometry"])
geom_type = polygon.geom_type
if geom_type != 'Polygon':
handle_error('SyntaxError', 'Invalid GEOJSON format: Must be a Polygon type')
checked_geohashes = set()
geohash_stack = set()
geohashes = []
'''get center of bounding box, assuming the earth is flat'''
center_longitude = polygon.centroid.coords[0][0]
center_latitude = polygon.centroid.coords[0][1]
center_geohash = self.build_cell([center_longitude, center_latitude])
geohashes.append(center_geohash)
geohash_stack.add(center_geohash)
checked_geohashes.add(center_geohash)
while len(geohash_stack) > 0:
current_geohash = geohash_stack.pop()
neighbors = geohash.neighbors(current_geohash)
for neighbor in neighbors:
point = Point(geohash.decode(neighbor)[::-1])
if neighbor not in checked_geohashes and polygon.contains(point):
geohashes.append(neighbor)
geohash_stack.add(neighbor)
checked_geohashes.add(neighbor)
geohashes.sort()
return geohashes
except ValueError as e:
handle_error(msg='Invalid GEOJSON format')
def get_parser():
''' Get parser object '''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='read gbif and make input file to EstimateS')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
parser.add_argument('-v', dest='verbose', help='verbose', action='store_true')
parser.add_argument('-vv', dest='vverbose', help='more verbose', action='store_true')
## Create io files group
subparser_io = parser.add_argument_group(title='IO group')
subparser_io.add_argument('-i', dest='input', help='csv gbif results', required=True)
subparser_io.add_argument('-s', dest='separator', help='csv separator', default='\t', required=False)
subparser_io.add_argument('-o', dest='output', help='output file', default=output_default_file, required=False)
## Create time group
subparser_time = parser.add_argument_group(title='time group')
subparser_time.add_argument('-str', dest='strdate', help="the Start Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
subparser_time.add_argument('-end', dest='enddate', help="the End Date format YYYYMMDD",
type=lambda d: datetime.datetime.strptime(d, '%Y%m%d'), required=False)
## Create grid group
subparser_grid = parser.add_argument_group(title='grid group')
subparser_grid.add_argument('-g', dest='grid_type', choices=['geohash'], default='geohash', required=False)
subparser_grid.add_argument('-p', dest='precision', type=int, help='grid precision', default=5, required=False)
subparser_grid_exclusive = subparser_grid.add_mutually_exclusive_group(required=True)
subparser_grid_exclusive.add_argument('-shp', dest='shapefile', help='shapefile with polygon', required=False)
subparser_grid_exclusive.add_argument('-geojson', dest='geojson', help='geojson file with polygon', required=False)
subparser_grid_exclusive.add_argument('-bbox', dest='bbox', nargs='+', type=float, help='bounding box: x1 y1 x2 y2', required=False)
## Create species group
subparser_specie = parser.add_argument_group(title='specie group')
subparser_specie.add_argument('-n', dest='species', nargs='+', default=[], help='species allowed', required=False)
args = parser.parse_args()
if args.vverbose:
args.verbose = True
if not os.path.isfile(args.input):
raise IOError('No such file {}'.format(args.input))
args.outdir = os.path.dirname(args.output)
outfile = os.path.basename(args.output)
## verify if is a path and create it
if args.outdir:
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
args.outdir = args.outdir + '/'
## verify if is a path with filename
if not outfile:
args.output = args.outdir + '/output.dat'
if not args.strdate:
args.strdate = datetime.datetime.strptime('1900-01-01', '%Y-%m-%d')
if not args.enddate:
args.enddate = datetime.datetime.strptime('2100-01-01', '%Y-%m-%d')
if args.shapefile:
if not os.path.isfile(args.shapefile):
handle_error('FileNotFoundError', 'Shapefile not found')
if args.geojson:
if not os.path.isfile(args.geojson):
handle_error('FileNotFoundError', 'JSON file not found')
return args
if __name__ == "__main__":
start_time = time.time()
args = get_parser()
'''#### build grid'''
print('1. build grid')
grid = []
if args.grid_type == 'geohash':
grid_maker = GeohashMaker(args.precision, args.shapefile, args.geojson, args.bbox)
grid = grid_maker.create_grid()
else:
handle_error(msg='Error: only accept geohash grid type')
'''#### init big_data variable'''
print('2. init big data')
big_data = {}
for cell in grid:
big_data[cell] = {}
'''how many species in cell'''
big_data[cell]['sum'] = 0
'''list of species in cell'''
big_data[cell]['species'] = {}
'''used to consider only one observation (specie and time) in cell'''
big_data[cell]['dates'] = {}
'''create localization.dat file'''
f = open(args.outdir + species_loc_file, 'w+')
f.write("latitude;longitude;species\n")
'''#### read csv file'''
print('3. read each gbif observation (be patient...)')
nobs_accepted = 0
nobs_rejected = 0
nobs_repeated = 0
nobs_outside_grid_or_time = 0
nobs_wrong_format = 0
nobs = 0
usecols = ['gbifID', 'decimalLatitude', 'decimalLongitude', 'speciesKey', 'year', 'month', 'day']
chunksize = 10 ** 5
filesize = os.path.getsize(args.input)
linesize = 820
for df in pd.read_csv(args.input, sep=args.separator, chunksize=chunksize, engine='c', low_memory=False, usecols=usecols, skip_blank_lines=True):
s_time = time.time()
nlines = len(df.index)
nobs += nlines
''' verify if all columns exist in header csv'''
csv_columns = df.columns.tolist()
test_csv_header(csv_columns, usecols)
'''
gbifID abstract accessRights accrualMethod accrualPeriodicity accrualPolicy alternative audience available bibliographicCitation conformsTo contributor coverage
created creator date dateAccepted dateCopyrighted dateSubmitted description educationLevel extent format hasFormat hasPart hasVersion identifier instructionalMethod isFormatOf isPartOf
isReferencedBy isReplacedBy isRequiredBy isVersionOf issued language license mediator medium modified provenance publisher references relation replaces requires rights
rightsHolder source spatial subject tableOfContents temporal title type valid institutionID collectionID datasetID institutionCode collectionCode datasetName ownerInstitutionCode
basisOfRecord informationWithheld dataGeneralizations dynamicProperties occurrenceID catalogNumber recordNumber recordedBy individualCount organismQuantity organismQuantityType
sex lifeStage reproductiveCondition behavior establishmentMeansoccurrenceStatus preparations disposition associatedReferences associatedSequences associatedTaxa otherCatalogNumbers
occurrenceRemarks organismIDorganismName organismScope associatedOccurrences associatedOrganisms previousIdentifications organismRemarks materialSampleID eventID parentEventID
fieldNumber eventDate eventTime startDayOfYear endDayOfYear year month day verbatimEventDate habitat samplingProtocol samplingEffort sampleSizeValue sampleSizeUnit
fieldNotes eventRemarks locationID higherGeographyID higherGeography continent waterBody islandGroupisland countryCode stateProvince county municipality locality
verbatimLocality verbatimElevation verbatimDepth minimumDistanceAboveSurfaceInMeters maximumDistanceAboveSurfaceInMeters locationAccordingTo locationRemarks decimalLatitude
decimalLongitude coordinateUncertaintyInMeters coordinatePrecision pointRadiusSpatialFit verbatimCoordinateSystem verbatimSRS footprintWKT footprintSRS
footprintSpatialFit georeferencedBy georeferencedDate georeferenceProtocol georeferenceSources georeferenceVerificationStatus georeferenceRemarks geologicalContextID
earliestEonOrLowestEonothemlatestEonOrHighestEonothem earliestEraOrLowestErathem latestEraOrHighestErathem earliestPeriodOrLowestSystem latestPeriodOrHighestSystem
earliestEpochOrLowestSeries latestEpochOrHighestSeries earliestAgeOrLowestStage latestAgeOrHighestStage lowestBiostratigraphicZone highestBiostratigraphicZonelithostratigraphicTerms
group formation member bed identificationID identificationQualifier typeStatus identifiedBy dateIdentified identificationReferences identificationVerificationStatus
identificationRemarks taxonID scientificNameID acceptedNameUsageID parentNameUsageID originalNameUsageID nameAccordingToID namePublishedInID taxonConceptID scientificName
acceptedNameUsage parentNameUsage originalNameUsage nameAccordingTo namePublishedIn namePublishedInYear higherClassification kingdom phylum class order family genus
subgenus specificEpithet infraspecificEpithet taxonRank verbatimTaxonRank vernacularName nomenclaturalCode taxonomicStatus nomenclaturalStatus taxonRemarks
datasetKey publishingCountry lastInterpreted elevation elevationAccuracy depth depthAccuracy distanceAboveSurface distanceAboveSurfaceAccuracy issue mediaType
hasCoordinate hasGeospatialIssues taxonKey acceptedTaxonKey kingdomKey phylumKey classKey orderKey familyKey genusKey subgenusKey speciesKey species genericName acceptedScientificName
typifiedName protocol lastParsed lastCrawled repatriated
'''
for index, row in df.iterrows():
if args.verbose:
if nlines < chunksize:
progress(index, nlines)
else:
progress(index*linesize, filesize)
'''get values'''
try:
gbifid = str(row['gbifID'])
speciekey = int(float(row['speciesKey']))
lon = round(float(row['decimalLongitude']), 6)
lat = round(float(row['decimalLatitude']), 6)
year = row['year']
month = row['month']
day = row['day']
date_obj = datetime.datetime(int(year), int(month), int(day), 0, 0)
date = date_obj.strftime("%Y-%m-%d")
#print(index, gbifid, speciekey, lon, lat, date)
except Exception as exception:
# traceback.print_exc()
nobs_wrong_format += 1
nobs_rejected += 1
continue
else:
'''test if observation is in domain, in time and in species list'''
if grid_maker.is_in_grid([lon, lat]) and date_obj >= args.strdate and date_obj <= args.enddate and (not args.species or speciekey in args.species):
cell = grid_maker.build_cell([lon, lat])
if not cell in grid:
nobs_outside_grid_or_time += 1
#print(cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
#handle_error(msg=cell + ' ' + str(lon) + ' ' + str(lat) + ' is not in grid')
continue
'''
filter: only consider one observation per day, per grid
create key to save only one obs per day, per grid
'''
try:
if speciekey in big_data[cell]['dates'][date]:
# print('repeated: ' + cell + ' ' + date + ' ' + speciekey)
nobs_repeated += 1
nobs_rejected += 1
continue
else:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
except KeyError:
# print('accepted: ' + cell + ' ' + date + ' ' + speciekey)
big_data[cell]['dates'][date] = []
big_data[cell]['dates'][date].append(speciekey)
big_data[cell]['sum'] += 1
if speciekey in big_data[cell]['species']:
big_data[cell]['species'][speciekey]['count'] += 1
else:
big_data[cell]['species'][speciekey] = {'speciekey': speciekey, 'count': 1}
nobs_accepted += 1
f.write("{0:.6f};{1:.6f};{2:d}\r\n".format(lat, lon, speciekey))
else:
# print('out of domain or out of time period: ' + cell + ' ' + str(lon) + ' ' + str(lat) + ' ' + date + ' ' + str(speciekey))
nobs_outside_grid_or_time += 1
nobs_rejected += 1
if args.vverbose: elapsed_time([s_time, start_time])
del df
gc.collect()
f.close()
print()
print('\tobservations outside grid, time or selected species: {0}'.format(nobs_outside_grid_or_time))
print('\tobservations wrong format (no date): {0}'.format(nobs_wrong_format))
print('\tobservations repeated: {0}'.format(nobs_repeated))
print('\tobservations rejected: {0}'.format(nobs_rejected))
print('\tobservations accepted: {0}'.format(nobs_accepted))
print('\tobservations total: {0}'.format(nobs))
print()
''' delete unecessary variables '''
for c in big_data:
del big_data[c]['dates']
print('4. process big data and output results')
'''open output files'''
fout = open(args.output, 'w+')
fgeo = open(args.outdir + grid_file, 'w+')
fgeo.write("lat;lon;geohash;has_species\r\n")
community_list = []
community_accepted = 0
units_count = 0
for cell in big_data:
(lat, lon) = coordinates = geohash.decode(cell)
'''create new community'''
community = BioCommunity(cell)
community_unit_list = []
for speciekey, v in big_data[cell]['species'].items():
member = BioMember(speciekey)
member_count = v['count']
'''add member to community'''
community.add_member(member, member_count)
'''add member to member list'''
for i in range(member_count):
community_unit_list.append(member.getid())
community_units_count = community.get_units_count()
richness = community.get_richness()
'''only consider community with more than 2 observations'''
if community_units_count > 2:
community_accepted += 1
units_count += community_units_count
if args.vverbose: print(" There are {mc:} units in community {cell:} ({lat:}, {lon:}. The total diversity is {rich:} species)".format(mc=community_units_count, cell=cell, lat=lat, lon=lon, rich=community.get_richness()))
'''add community to list'''
community_list.append(community)
'''print header
"Cell eydsh (37.28759765625, -7.53662109375)" * SampleSet * 1 1 1
8 8
00001 00002 00003 00004 00005 00006 00007 00008
'''
fout.write("\"Cell {cell:} ({lat:}, {lon:})\"\t*SampleSet*\t1\t1\t1\r\n".format(cell=cell, lat=lat, lon=lon))
fout.write("{r:}\t{uc:}\r\n".format(r=richness, uc=community_units_count))
for i in range(1, community_units_count + 1):
fout.write("\t{0:05d}".format(i))
fout.write("\r\n")
'''set matrix data for random get'''
matrix = {}
members = community.get_all_members()
'''init matrix'''
for speciekey in members:
matrix[speciekey] = []
for i in range(community_units_count):
'''get random member'''
random_member = community_unit_list.pop(random.randrange(len(community_unit_list)))
for speciekey in members:
if speciekey == random_member:
matrix[speciekey].append(1)
else:
matrix[speciekey].append(0)
'''
print matrix
2474051 0 0 1 0 0 0 0 0
2492606 0 0 0 0 0 0 0 1
2492867 0 0 0 0 1 0 0 0
2495000 0 0 0 0 0 1 0 0
2498415 0 0 0 0 0 0 1 0
5229493 1 0 0 0 0 0 0 0
6092830 0 0 0 1 0 0 0 0
9515886 0 1 0 0 0 0 0 0
'''
for speciekey in sorted(matrix):
fout.write("{0:d}".format(speciekey))
for i in range(community_units_count):
fout.write("\t{0:}".format(int(matrix[speciekey][i])))
fout.write("\r\n")
fgeo.write("{lat:};{lon:};{cell:};{uc:}\r\n".format(lat=lat, lon=lon, cell= cell, uc=community_units_count))
fout.close()
fgeo.close()
'''add first line in output file'''
first_line = "*MultipleSampleSets*\t{0:}\t\"PT Community with more then 2 members".format(community_accepted)
if args.strdate.year != 1900:
first_line += '; start year: ' + str(args.strdate.year)
if args.enddate.year != 2100:
first_line += '; end year: ' + str(args.enddate.year)
first_line += "\"\r\n"
line_prepender(args.output, first_line)
'''print stat'''
meta = BioCommunity('tmp')
meta.add_communities(community_list)
print("\n== Metacommunities with more then 2 individuals:")
print("\t{0:} communities".format(len(community_list)))
print("\t{0:} species".format(meta.get_richness()))
print("\t{0:} individuals".format(units_count))
'''the end'''
elapsed_time(start_time)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
26223,
2556,
3261,
1367,
25,
2931,
25,
2548,
13130,
198,
198,
31,
9800,
25,
34687,
6340,
26... | 2.177232 | 12,131 |
#!/usr/bin/env python
from pydpiper.application import AbstractApplication
import atoms_and_modules.registration_functions as rf
import atoms_and_modules.minc_modules as mm
import atoms_and_modules.minc_parameters as mp
import atoms_and_modules.LSQ6 as lsq6
import atoms_and_modules.LSQ12 as lsq12
import atoms_and_modules.NLIN as nl
import atoms_and_modules.stats_tools as st
import atoms_and_modules.registration_file_handling as rfh
from os.path import split, splitext, abspath
import sys
import logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
application = LongitudinalTwolevelNlin()
application.start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
279,
5173,
79,
9346,
13,
31438,
1330,
27741,
23416,
198,
11748,
23235,
62,
392,
62,
18170,
13,
2301,
33397,
62,
12543,
2733,
355,
374,
69,
198,
11748,
23235,
62,
392,
62,
... | 2.986239 | 218 |
# Enter the interest rate (%) as a float (ex. 4% = .04).
P = int(input("What is the principal? \n \n"))
n = int(input("How many compoundings per year? \n \n"))
r = float(input("What is the interest rate? \n \n"))
t = int(input("How many years will your money be compounded for? \n \n"))
Total_money_compounded = P * ( ((1 + (r/n)) ** (n * t)) )
print("The total amount of money you will have in", t, "years is :", Total_money_compounded)
| [
2,
6062,
262,
1393,
2494,
11509,
355,
257,
12178,
357,
1069,
13,
604,
4,
796,
764,
3023,
737,
198,
198,
47,
796,
493,
7,
15414,
7203,
2061,
318,
262,
10033,
30,
3467,
77,
3467,
77,
48774,
220,
198,
77,
796,
493,
7,
15414,
7203,
... | 2.907895 | 152 |
#!/usr/bin/env python
import sys, os, torch, h5py
from transformers import BertModel, BertTokenizer
from nlptools.utils import zdump
model_path = sys.argv[1] if len(sys.argv) > 1 else "."
model_name = "bert-base-uncased"
vocab_name = os.path.join(model_path, 'vocab')
weight_path = os.path.join(model_path, '{}.h5py'.format(model_name))
word2idx_path = os.path.join(model_path, '{}.lookup'.format(model_name))
model = BertModel.from_pretrained(model_name)
weights = model.embeddings.word_embeddings.weight.detach().numpy()
tokenizer = BertTokenizer.from_pretrained(model_name)
word2idx = tokenizer.vocab
print(weights.shape)
print(len(tokenizer.vocab))
if os.path.exists(weight_path):
os.remove(weight_path)
with h5py.File(weight_path, 'w') as h5file:
h5file.create_dataset("word2vec", data=weights)
zdump(word2idx, word2idx_path)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
11,
28686,
11,
28034,
11,
289,
20,
9078,
198,
6738,
6121,
364,
1330,
22108,
17633,
11,
22108,
30642,
7509,
198,
6738,
299,
75,
457,
10141,
13,
26791,
1330,
1976,
39455,
1... | 2.541916 | 334 |
FILENAME = './day1/data/input'
measurements = []
with open(FILENAME) as file:
for line in file:
measurements.append(int(line.strip()))
s = 0
for i in range(3, len(measurements)):
if measurements[i] + measurements[i - 1] + measurements[i - 2] > measurements[i - 1] + measurements[i - 2] + measurements[i - 3]:
s = s + 1
print(s)
| [
46700,
1677,
10067,
796,
705,
19571,
820,
16,
14,
7890,
14,
15414,
6,
198,
198,
1326,
5015,
902,
796,
17635,
198,
198,
4480,
1280,
7,
46700,
1677,
10067,
8,
355,
2393,
25,
198,
220,
220,
220,
329,
1627,
287,
2393,
25,
198,
220,
22... | 2.462069 | 145 |
import pytest
from gyomu.gyomu_db_model import *
from gyomu.gyomu_db_schema import *
from gyomu.user_factory import UserFactory
from gyomu.user import User
from gyomu.db_connection_factory import DbConnectionFactory
from gyomu.json import Json
from marshmallow import ValidationError
gyomuapps_schema = GyomuAppsSchema()
gyomuapps_total_list_schema = GyomuAppsSchema(many=True)
gyomuapps_schema_load = GyomuAppsSchema(load_instance=True)
gyomuapps_total_list_schema_load = GyomuAppsSchema(many=True, load_instance=True)
TEST_APPLICATION_ID2 = 32651
TEST_APPLICAIONT_ID3 = 32652
| [
11748,
12972,
9288,
198,
6738,
21486,
296,
84,
13,
1360,
296,
84,
62,
9945,
62,
19849,
1330,
1635,
198,
6738,
21486,
296,
84,
13,
1360,
296,
84,
62,
9945,
62,
15952,
2611,
1330,
1635,
198,
6738,
21486,
296,
84,
13,
7220,
62,
69,
9... | 2.652968 | 219 |
import os, sys
import math
import copy
import time
from datetime import datetime
import re
# import requests
import urllib
import lxml.html as ht
# import lxml.etree as et
##------------------------------------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '../_public')) ## Note: ํ์ฌ ๋๋ ํ ๋ฆฌ ๊ธฐ์ค ์๋ ๊ฒฝ๋ก ์ค์
from utils_basic import (
_create_folder,
_read_file,
_file_to_json,
_json_to_file,
_to_lists,
_to_digit,
_divide_list,
_fn
)
from utils_scraping import (
_root,
_remove_punc,
_pages_by_pagination,
_scrape_list_pages,
_extract_values,
_scrape_detail_page,
_scrape_full_html
)
from scrap_selenium import (
_selenium,
_source,
_wait,
_login,
)
# sys.path.append(os.path.join(os.path.abspath('../staff')))
# from ScrapBySelenium import ScrapBySelenium
_base_url = 'https://m.stock.naver.com'
TODAY = datetime.now().strftime("%Y%m%d")
##
##----------------------------------------------------------
def scrap_naver_total(shcode='336370'):
"""
shcode์ ์ข
๋ชฉ์ ๋ํ '์ข
ํฉ/ํฌ์์๋ณ ๋งค๋งค๋ํฅ/...' ๋ฐ์ดํฐ scrap
"""
# url = f"https://m.stock.naver.com/item/main.nhn#/stocks/{shcode}/total"
url = f"https://m.stock.naver.com/index.html#/domestic/stock/{shcode}/total"
browser = _selenium(url=url, headless=False)
button = _wait(xpath='.//*[@id="content"]//div[@class="VStockInfo_article__3dWiQ"]/a', driver=browser)
if not button:
print(f"ํ์ด์ง ๋ก๋ฉ ์คํจ")
return False
button.click() ## ์ข
๋ชฉ ์ ๋ณด ๋๋ณด๊ธฐ
html = _source(driver=browser, xpath='.//*[@id="content"]')
root = _root(html)
# ## NOTE: N์ฆ๊ถ / ๊ตญ๋ด์ฆ์ / ์ข
ํฉ
# info = root.xpath('.//ul[@class="VStockInfo_list__1Hfnb"]')[0]
# values = {
# 'keys': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/strong',
# 'target': 'text',
# },
# 'vals': {
# 'xpath': './/li[contains(@class, "VStockInfo_item__1jFNs")]/div/span',
# 'target': 'text',
# },
# }
# r = _extract_values(info, values, _zip=None)
# print({key: _to_digit(val) for key, val in zip(r['keys'], r['vals'])})
## NOTE: N์ฆ๊ถ / ๊ตญ๋ด์ฆ์ / ํฌ์์๋ณ ๋งค๋งค๋ํฅ
button = _wait(xpath='.//*[@id="content"]//div[@class="VTableTrend_boxMore__1EVMo"]/a[1]', driver=browser)
if not button:
print(f"ํ์ด์ง ๋ก๋ฉ ์คํจ")
return False
button.click() ## ๋งค๋งค๋ํฅ ๋๋ณด๊ธฐ
info = root.xpath('.//div[@class="VTableTrend_inner__1Crkx"]')[0]
values = {
'keys': {
'xpath': './table/thead/tr/th',
'target': 'text'
},
'vals': {
'xpath': './table/tbody/tr/td',
'target': 'content'
}
}
r = _extract_values(info, values, _zip=None)
n = len(r['keys']) ## NOTE: ์ดcolumn ์
vals = [val if i%n == 0 else _to_digit(val[:len(val)//2]) if i%n==n-2 else _to_digit(val) for i, val in enumerate(r['vals'])]
rows = [r['keys']] + _divide_list(vals, n)
print(f"ํฌ์๋ํฅ: {rows}")
# ## NOTE: ๋์ผ ์
์ข
๋น๊ต
# xpath = '//div[contains(@class, "compare")]/a'
# if s.wait(xpath, max_wait=3) != -1: # '๋์ผ ์
์ข
๋น๊ต'๊ฐ ์๋ ๊ฒฝ์ฐ
# upjong = s.attribute_value(xpath, "href").split('=')[-1]
# output['์
์ข
๋ฒํธ'] = upjong
# ## ์ปจ์ผ์์ค
# xpath = '//span[contains(@class, "data_lyr")]'
# if s.check_element(xpath): # NOTE: ์ปจ์ผ์์ค๊ฐ ์๋ ๊ฒฝ์ฐ
# trade_weight = s._convert_to_float(s.find_element(xpath).text) # NOTE: ๋งค์.๋งค๋ ์ ์
# goal_price = s._convert_to_float(s.find_element('//span[@class="goal_stock"]/em').text) # NOTE: ๋ชฉํ๊ฐ
# output['๋งค๋งค์ถ์ฒ'] = trade_weight
# output['๋ชฉํ์ฃผ๊ฐ'] = goal_price
# s.close() # NOTE: selenium browser close
# return output
# def scrap_naver_upjong():
# """
# ์
์ข
์์น๋ฅ
# """
# url = "https://m.stock.naver.com/sise/siseList.nhn?menu=upjong"
# s = ScrapBySelenium(url=url)
# # wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# wait_xpath = '//span[@class="u_pg_total"]'
# s.wait(wait_xpath)
# total = s._convert_to_float(s.find_element_text(wait_xpath))
# wait_xpath = '//span[@class="u_pg_area"]/span[contains(@class, "u_pg_txt")]'
# s.click(xpath=wait_xpath) # ๋ฒํผ ํผ์น๊ธฐ
# output = []
# for i in range(0, total):
# gap_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//span[1]'
# name_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//strong[@class="stock_item"]'
# no_xpath = f'//ul[contains(@class, "dmst_type_lst")]/li[{i+1}]//a[1]'
# # <a href="/sise/siseGroupDetail.nhn?menu=upjong&no=218" class="btn_detail" onclick="nclk(this, 'mil.cat', '', '');">์์ธ ๋ชฉ๋ก ๋ณด๊ธฐ</a>
# name = s.find_element(name_xpath).text
# no = s.attribute_value(no_xpath, 'href').split('=')[-1]
# gap = s._convert_to_float(s.find_element(gap_xpath).text)
# print(f"{name}, {no}, {gap}")
# output.append({'์
์ข
๋ช
': name, '์
์ข
๋ฒํธ': no, '์
์ข
์์น๋ฅ ': gap})
# s.close()
# return output
if __name__ == '__main__':
## NOTE: ํ
์คํธ
scrap_naver_total(shcode='336370')
## NOTE: shcode์ ์ข
๋ชฉ์ ๋ํ '์ข
ํฉ/ํฌ์์๋ณ ๋งค๋งค๋ํฅ/์
์ข
๋ฒํธ/'
# t = scrap_naver_total(shcode='336370')
# print(f"{t}")
## NOTE: ์
์ข
๋ณ ์
์ข
๋ช
/์
์ข
๋ฒํธ/์์น๋ฅ
# u = scrap_naver_upjong()
# print(f"{u}")
## NOTE: file
# path = './naver_sise_rise_table_bak.html'
# path = './naver_sise_rise_table.html'
# root = _tree_from_file(path=path)
# # text = _text_by_xpath(root, xpath='.//div[@class="choice_lt"]/div')
# # text = _text_by_xpath(root, xpath='.//th')
# result = []
# for i in range(3, 13):
# texts = _texts_by_xpath(root, xpath=f'.//table[@class="type_2"]/tbody/tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
# # print(f"{[el.text for el in root.findall('.//country//rank')]}")
# ## NOTE: naver_stock_m_domestic_upper_kospi
# path = './naver_stock_m_domestic_upper_kospi.html'
# root = _tree_from_file(path=path)
# result = []
# for i in range(1, 10):
# texts = _texts_by_xpath(root, xpath=f'.//table/tbody//tr[{i}]/td')
# if len(texts) > 2:
# result.append(texts)
# print(f"result: {result}")
## TODO:
## naver ์
์ข
์ฝ๋(page serial)
# https://m.stock.naver.com/sise/siseGroupDetail.nhn?menu=upjong&no=218
# # ๋ค์ด๋ฒ
# N์ฆ๊ถ > ๊ตญ๋ด์ฆ์
# ### ์ข
ํฉ
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/total
# ์ ์ผ
# ์๊ฐ
# ๊ณ ๊ฐ
# ์ ๊ฐ
# ๊ฑฐ๋๋
# ๋๊ธ
# ์์ด
# ์ธ์ธ์์ง์จ
# 52์ฃผ์ต๊ณ
# 52์ฃผ์ต์
# PER
# EPS
# BPS
# ๋ฐฐ๋น์์ต๋ฅ
# ์ฃผ๋น๋ฐฐ๋น๊ธ
# ### ํ ๋ก
# - https://m.stock.naver.com/item/main.nhn#/stocks/336370/discuss
# ### ๋ด์ค.๊ณต์
# #### ์ข
๋ชฉ๋ด์ค
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/news
# #### ๊ณต์์ ๋ณด
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/notice
# #### IR์ ๋ณด
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ir
# ### ์์ธ.ํธ๊ฐ
# #### ์ผ๋ณ์์ธ
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/price
# #### 5๋จ๊ณ ํธ๊ฐ
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/ask
# ### ์ฌ๋ฌด
# #### ์ฐ๊ฐ์ค์
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/annual
# #### ๋ถ๊ธฐ์ค์
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/quarter
# #### ๋น์ฌ๋ฌด์ ๋ณด
# https://m.stock.naver.com/item/main.nhn#/stocks/336370/nonfinance
# ## ํ
# ### ๊ด์ฌ์ข
๋ชฉ
# ### ํธ๋ ๋ ๋ญํน
# ## ์์ฅ์งํ
# ### ์ฃผ์
# ### ํ์จ
# ### ์๋์ง
# ### ๊ธ์
# ### ๊ธ๋ฆฌ
# ### ๋์ถ์ฐ๋ฌผ
# ## ๊ตญ๋ด
# ### ์๊ฐ์ด์ก
# ### ์
์ข
# ### ํ
๋ง
# ### ๊ทธ๋ฃน
# ### ์ธ๊ธฐ๊ฒ์
# ### ๋ฐฐ๋น
# ### ๊ฑฐ๋์์
# ### ์ํ๊ฐ
# ###
# ์ปจ์ผ์์ค
# ์ปจ์ผ์์ค
# ์
์ข
# ํ
๋ง
# ๊ทธ๋ฃน
# ๊ฑฐ๋์์
# https://m.stock.naver.com/sise/siseList.nhn?menu=quant&sosok=0
# ์ํ๊ฐ
# ์์น
# ํ๋ฝ
# ๊ด๋ฆฌ
| [
11748,
28686,
11,
25064,
198,
11748,
10688,
198,
11748,
4866,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
198,
2,
1330,
7007,
198,
11748,
2956,
297,
571,
198,
11748,
300,
19875,
13,
6494,
355,
289,
83,
1... | 1.641231 | 4,875 |
from azure.cosmos import CosmosClient
import subprocess
RES_GROUP = "my-cosmsos-resource-group"
ACCT_NAME = "my-cosomso-account-name"
url = get_account_uri(RES_GROUP, ACCT_NAME)
key = get_key(RES_GROUP, ACCT_NAME)
print(url, key)
client = CosmosClient(url, credential=key)
| [
6738,
35560,
495,
13,
6966,
16785,
1330,
39972,
11792,
628,
198,
11748,
850,
14681,
198,
198,
19535,
62,
46846,
796,
366,
1820,
12,
6966,
907,
418,
12,
31092,
12,
8094,
1,
198,
2246,
4177,
62,
20608,
796,
366,
1820,
12,
6966,
296,
5... | 2.67619 | 105 |
import logging
import seqlog
import time
seqlog.log_to_seq(
server_url="http://127.0.0.1:5341/",
api_key="RK2UCFPEIY7dsttQJA9F",
level=logging.NOTSET,
batch_size=10,
auto_flush_timeout=1, # seconds
override_root_logger=True,
# json_encoder_class=json.encoder.JSONEncoder # Optional; only specify this if you want to use a custom JSON encoder
)
logging.debug("A log message in level debug")
logging.info("A log message in level info")
logging.warning("A log message in level warning")
logging.error("A log message in level error")
logging.critical("A log message in level critical")
logging.info("Hello, {name}!", name="World")
logging.info("Processed order {orderId} by {customer}",
orderId = 15, customer = "Johnny")
try:
result = 2 / 0
except Exception as exception:
logging.exception("We got an exception")
time.sleep(2) # sleep for 2 seconds to give seqlog time to write to Seq | [
11748,
18931,
198,
11748,
33756,
6404,
198,
11748,
640,
198,
198,
41068,
6404,
13,
6404,
62,
1462,
62,
41068,
7,
198,
220,
220,
4382,
62,
6371,
2625,
4023,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
20,
33660,
14,
1600,
198,
220,
22... | 2.744186 | 344 |
import test_storage
import storage
| [
198,
11748,
1332,
62,
35350,
198,
198,
11748,
6143,
628
] | 3.8 | 10 |
# dup_reads.py
#
# create duplicate reads
#
import sys
import random
if (len(sys.argv) < 4 or len(sys.argv) > 5):
print "usage: %s <# of duplicate reads> <max duplication> read1.fq [read2.fq]" % sys.argv[p]
exit(1)
dupcount = int(sys.argv[1])
maxdup = int(sys.argv[2])
in1 = open(sys.argv[3], "r")
out1 = open("dup_" + sys.argv[3], "w")
paired = len(sys.argv) >= 5
if paired:
in2 = open(sys.argv[4], "r")
out2 = open("dup_" + sys.argv[4], "w")
for i in range(0, dupcount):
r1 = readread(in1)
if paired:
r2 = readread(in2)
ndup = random.randint(2,maxdup)
for j in range(0, ndup):
writeread(out1, ["@dup%d_%s" % (j, r1[0][1:]), r1[1], r1[2], r1[3]])
if paired:
writeread(out2, ["@dup%d_%s" % (j, r2[0][1:]), r2[1], r2[2], r2[3]])
| [
2,
32597,
62,
40779,
13,
9078,
198,
2,
198,
2,
2251,
23418,
9743,
198,
2,
198,
198,
11748,
25064,
198,
11748,
4738,
198,
198,
361,
357,
11925,
7,
17597,
13,
853,
85,
8,
1279,
604,
393,
18896,
7,
17597,
13,
853,
85,
8,
1875,
642,... | 1.880282 | 426 |
import os
from scrapy import Request
from scrapy.pipelines.files import FilesPipeline
from scrapy.exceptions import DropItem
from .items import Course, Lesson
from . import settings
| [
11748,
28686,
198,
198,
6738,
15881,
88,
1330,
19390,
198,
6738,
15881,
88,
13,
79,
541,
20655,
13,
16624,
1330,
13283,
47,
541,
4470,
198,
6738,
15881,
88,
13,
1069,
11755,
1330,
14258,
7449,
198,
198,
6738,
764,
23814,
1330,
20537,
... | 3.647059 | 51 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import base64
import hashlib
import traceback
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from jwcrypto.common import base64url_encode
from ratelimit.mixins import RatelimitMixin
from openid_connect_op.models import OpenIDClient
from openid_connect_op.signals import access_token_start, access_token_finish
from openid_connect_op.utils.jwt import JWTTools
from . import OAuthRequestMixin
from .errors import OAuthError
from .parameters import AuthenticationParameters, TokenParameters
from ..models import OpenIDToken
# section 4.1.3 of OAUTH 2.0
# https://tools.ietf.org/pdf/draft-hunt-oauth-chain-01.pdf
| [
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
12854,
1891,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
... | 3.333333 | 258 |
# -*- coding: utf-8 -*-
import datetime
from odoo import models, fields, api , _
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4818,
8079,
198,
6738,
16298,
2238,
1330,
4981,
11,
7032,
11,
40391,
837,
4808,
198
] | 2.645161 | 31 |
import os
from datetime import datetime, timedelta
import aiosqlite
import disnake
from aiosqlite import connect
from disnake.ext import commands
from dotenv import load_dotenv
from exencolorlogs import Logger
from utils.constants import *
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
11748,
257,
4267,
13976,
578,
198,
11748,
595,
77,
539,
198,
6738,
257,
4267,
13976,
578,
1330,
2018,
198,
6738,
595,
77,
539,
13,
2302,
1330,
9729,
19... | 3.422535 | 71 |
# import libraries
import clr
import os
# Get and build the pyrevit path
userProfile = os.environ.get("USERPROFILE")
prvPath = userProfile + '\\AppData\\Roaming\\pyRevit-Master\\'
# Load the path
try:
os.startfile(prvPath)
except:
print('The path was not found.') | [
2,
1330,
12782,
198,
11748,
537,
81,
198,
11748,
28686,
198,
198,
2,
3497,
290,
1382,
262,
12972,
18218,
270,
3108,
198,
7220,
37046,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
29904,
31190,
25664,
4943,
198,
1050,
85,
15235,
796,
283... | 2.873684 | 95 |
"""
Preprocess the XSUM dataset
There are several noisy training instances which do not contain any words in pre-defined vocabulary of NTM.
We remove these instances.
Here are the details about these removed instance:
- instance #37993:
input: Here are our favourites:
target: On Monday, we asked for you to send us your favourite shop pun names.
- instance #47104:
input: Here are some of the Ethiopian runner's greatest feats.
target: Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.
- instance #71767:
input: JANUARYFEBRUARYMARCHAPRILMAYJUNE
target: As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.
- instance #94109:
input: Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim
target: FOOTBALL LEAGUE RESULTS
- instance #95592:
input: KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM
target: Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.
"""
import os
train_input, train_target = [], []
hardcoded_delete_input = ['Here are our favourites:\n', "Here are some of the Ethiopian runner's greatest feats.\n",
'JANUARYFEBRUARYMARCHAPRILMAYJUNE\n',
'Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim\n',
'KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM\n']
hardcoded_delete_target = ['On Monday, we asked for you to send us your favourite shop pun names.\n',
'Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.\n',
'As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.\n',
'FOOTBALL LEAGUE RESULTS\n',
'Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.\n']
with open(f"data/xsum/train.source", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_input:
train_input.append(line)
with open(f"data/xsum/train.target", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_target:
train_target.append(line)
print(f"there are {len(train_input)} in the new source file")
print(f"there are {len(train_target)} in the new target file")
if os.path.exists("data/xsum/train.source"):
os.remove("data/xsum/train.source")
if os.path.exists("data/xsum/train.target"):
os.remove("data/xsum/train.target")
with open(f"data/xsum/train.source", "w", encoding='utf8') as f:
for item in train_input:
f.write(item)
with open(f"data/xsum/train.target", "w", encoding='utf8') as f:
for item in train_target:
f.write(item)
| [
37811,
198,
6719,
14681,
262,
1395,
50,
5883,
27039,
628,
220,
220,
220,
1318,
389,
1811,
31210,
3047,
10245,
543,
466,
407,
3994,
597,
2456,
287,
662,
12,
23211,
25818,
286,
399,
15972,
13,
198,
220,
220,
220,
775,
4781,
777,
10245,
... | 2.621289 | 1,381 |
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from statsmodels.tools.eval_measures import bic
import time
from scipy.stats import pearsonr
parent_dir = '/media/seb/HD_Numba_Juan/Dropbox/postdoc/NARPS/preprint1'
data_fn = parent_dir + '/participants_and_model.csv'
df = pd.read_csv(data_fn)
#list(df.columns.values)
#Check if the p_accept aligns with 4 levels of DV
DV_4levels_all = df[['participant_response','p_accept']]
DV_4levels_all_mn = DV_4levels_all.groupby(['participant_response']).mean()['p_accept']
DV_4levels_all_std = DV_4levels_all.groupby(['participant_response']).std()['p_accept']
DV_4levels_all_ranks = np.argsort(np.argsort(DV_4levels_all_mn.values)) #==[3,0,2,1]
DV_4levels_per_sub_mn = df.groupby(['participant_response','ID']).mean()['p_accept'].unstack(level=0)
DV_4levels_per_sub_mn = DV_4levels_per_sub_mn.drop([13,25,30,56])
bic_score_full = 0
bic_score_intercept = 0
bic_score_gain = 0
bic_score_loss = 0
num_subs = 0
good_ranks = 0
all_coefs = []
bic_all = []
bic_ranks = []
bad_subs_full_model = []
bad_bic_full_model = []
bad_rank_subs = []
bad_ranks = []
bad_probs = []
for sub in np.unique(df.ID):
if sub == 13 or sub == 25 or sub==30 or sub==56:
print('sub: ', sub, 'excluded')
continue
sub_df = df[df.ID==sub]
#Check if the p_accept aligns with 4 levels of DV
DV_vals = DV_4levels_per_sub_mn.loc[sub].values
nan_idx = np.where(np.isnan(DV_vals))[0]
DV_vals2 = [x for x in DV_vals if str(x) != 'nan']
DV_4levels_sub_ranks = np.argsort(np.argsort(DV_vals2))
DV_4levels_all_ranks2 = np.argsort(np.argsort(np.delete(DV_4levels_all_mn.values, nan_idx)))
num_subs += 1
if (DV_4levels_sub_ranks==DV_4levels_all_ranks2).all():
good_ranks += 1
else:
bad_rank_subs.append(sub)
bad_ranks.append(DV_4levels_sub_ranks)
bad_probs.append(DV_vals2)
#Run the logistic regressions
X = sub_df[['gain','loss']]
X['intercept'] = 1.0
y = sub_df.accept
#Run the full model
model_full = sm.Logit(y, X, missing='drop')
result_full = model_full.fit()
#result.summary()
coefficients_full = np.array(result_full.params)
all_coefs.append(coefficients_full)
bic_score_full += bic(result_full.llf,len(y),len(coefficients_full))
#Run the intercept only
model_intercept = sm.Logit(y, X['intercept'], missing='drop')
result_intercept = model_intercept.fit()
bic_score_intercept += bic(result_intercept.llf,len(y),1)
#Run intercept & gain
model_gain = sm.Logit(y, X[['gain', 'intercept']], missing='drop')
result_gain = model_gain.fit()
bic_score_gain += bic(result_gain.llf,len(y),2)
#Run intercept & loss
model_loss = sm.Logit(y, X[['loss', 'intercept']], missing='drop')
result_loss = model_loss.fit()
bic_score_loss += bic(result_loss.llf,len(y),2)
bic_per_sub = [bic(result_full.llf,len(y),len(coefficients_full)), bic(result_intercept.llf,len(y),1),
bic(result_gain.llf,len(y),2), bic(result_loss.llf,len(y),2)]
bic_all.append(bic_per_sub)
bic_ranks.append(np.argmin(bic_per_sub))
if np.argmin(bic_per_sub)!=0: #0th index is the full model
bad_subs_full_model.append(sub)
bad_bic_full_model.append(bic_per_sub)
print('proportion of good ranks: ', good_ranks/float(num_subs)) #just 2 subs have strongly rejected inverted with weakly rejected
print('full, gain, loss, intercept')
print(bic_score_full, bic_score_gain, bic_score_loss, bic_score_intercept)
#full model wins for everyone
print('correlation between loss and gains coefficients:')
print(pearsonr(all_coefs[:,0], all_coefs[:,1]))
print('DV levels of p_accept:')
print(DV_4levels_all_mn)
print(DV_4levels_all_std)
#Time for plotting
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["figure.figsize"] = (20,10)
offset_points=(-5, -5)
fs = 18
#fig.canvas.draw()
#Plot BICs
plt.subplot(1, 3, 1)
bic_all = np.vstack(bic_all)
bic_all2 = np.hstack([bic_all[:,1:],bic_all[:,0].reshape(len(bic_all[:,0]),1)])
#bic_labels = np.tile(['Gain & Loss','Baseline','Gain only', 'Loss only'],len(bic_all))
bic_labels = np.tile(['Baseline\n (Intercept only)','Gain', 'Loss','Full\n (Gain & Loss)'],len(bic_all2))
sns.set_palette(sns.color_palette("PuBu"))
sns.stripplot(bic_labels, bic_all2.flatten(), jitter=True)
sns.despine()
#plt.plot(bic_labels, bic_all2.flatten(), '.')
plt.xlabel('Behavioral model', fontsize=fs)
plt.ylabel('Bayesian Information Criterion\n (BIC)', fontsize=fs)
plt.annotate('a', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot the gain/loss coefficients
plt.subplot(1, 3, 2)
all_coefs = np.vstack(all_coefs)
plt.plot(all_coefs[:,0], all_coefs[:,1], 'k.')
plt.xlabel('Gain Coefficient\n (Full model)', fontsize=fs)
plt.ylabel('Loss Coefficient\n (Full model)', fontsize=fs)
plt.annotate('b', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#Plot DV levels
plt.subplot(1, 3, 3)
DV_4levels_for_plot = DV_4levels_per_sub_mn.values[:,[0,2,3,1]]
#DV_labels = np.tile(['Strongly accept','Strongly reject','Weakly accept', 'Weakly reject'],len(DV_4levels_per_sub_mn.values))
DV_labels = np.tile(['Strongly\n accept', 'Weakly\n accept', 'Weakly\n reject', 'Strongly\n reject'],len(DV_4levels_per_sub_mn.values))
#sns.palplot(sns.color_palette("RdGy_r"))
sns.set_palette(sns.color_palette("RdYlGn_r"))
sns.stripplot(DV_labels, DV_4levels_for_plot.flatten(), jitter=True)
sns.despine()
#plt.plot(DV_labels, DV_4levels_for_plot.flatten(), '.')
plt.xlabel('Participant response', fontsize=fs)
plt.ylabel('Mean probability of accepting gamble\n (Full model)', fontsize=fs)
plt.annotate('c', (1, 1),xytext=offset_points,xycoords='axes fraction',textcoords='offset points',ha='right', va='top',weight="bold", fontsize=fs)
#plt.savefig(parent_dir + '/figs/behavioral_model.png', bbox_inches='tight', dpi=300)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
9756,
27530,
13,
687,
4712,
13,
15042,
355,
895,
198,
6738,
9756,
27530,
13,
31391,
13,
18206,
62,
47336,
1330,
275,
291,
198,
11748,
640,
198,
6738,
629... | 2.303517 | 2,616 |
import turtle
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(60)
turtle.forward(300)
turtle.right(120)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.right(30)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.left(120)
turtle.forward(300)
turtle.left(60)
turtle.forward(75)
turtle.right(150)
turtle.forward(100)
turtle.back(100)
turtle.right(90)
turtle.forward(300)
turtle.left(90)
turtle.forward(100)
turtle.back(100)
turtle.right(30)
turtle.forward(75)
turtle.left(30)
turtle.forward(100)
turtle.done()
| [
11748,
28699,
198,
198,
83,
17964,
13,
11813,
7,
6200,
8,
198,
83,
17964,
13,
3506,
7,
10232,
8,
198,
83,
17964,
13,
11813,
7,
2425,
8,
198,
83,
17964,
13,
3506,
7,
1899,
8,
198,
83,
17964,
13,
11813,
7,
6200,
8,
198,
83,
1796... | 2.306202 | 258 |