id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1663002 | <filename>src/server/wsgi/shopster/commodity/serializer.py
from rest_framework import serializers
from rest_framework import serializers
from .models import Product, Order, Order_Item, Category
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
# fields = ('id', 'title', 'code', 'linenos', 'language', 'style')
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = Order_Item
# class OrderSerializer(serializers.ModelSerializer):
# class Meta:
# model = Order
# fields = ('order_id', 'product', 'price', 'ordered_on', 'ordered_by')
# # fields = ('id', 'title', 'code', 'linenos', 'language', 'style')
# # Create OderedItems and add it Order
class OrderSerializer(serializers.ModelSerializer):
products = OrderItemSerializer(many=True)
class Meta:
model = Order
fields = ('order_id', 'products', 'price', 'ordered_on',
'ordered_by', 'is_completed', 'status')
def create(self, validated_data):
# print(validated_data)
ordered_products = validated_data.pop('products')
# print(ordered_products)
order = Order.objects.create(**validated_data)
order.price = 0
ordered_items_list = list()
for product in ordered_products:
order.price += product["quantity"] * product["price"]
# print(product)
ordered_item = Order_Item.objects.create(**product)
ordered_item.save()
ordered_items_list.append(ordered_item)
order.products.add(ordered_item)
order.save()
# print(order.order_id)
for ordered_item in ordered_items_list:
ordered_item.order_id = Order.objects.get(order_id=order.order_id)
ordered_item.save()
return order
def update(self, instance, validated_data):
# Update the Order instance
print(validated_data)
instance.status = validated_data['status']
instance.save()
# Update status of each product
for item in validated_data['products']:
product = Order_Item.objects.get(order_id=instance)
product.status = instance.status
product.save()
return instance
class PostOrderSerializer(serializers.ModelSerializer):
pass
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = Order_Item
fields = ('product_id', 'ordered_item', 'price',
'name', 'quantity', 'status')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
| StarcoderdataPython |
41893 | <filename>ib/ext/cfg/EWrapperMsgGenerator.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" ib.ext.cfg.EWrapperMsgGenerator -> config module for EWrapperMsgGenerator.java.
"""
modulePreamble = [
'from ib.ext.AnyWrapperMsgGenerator import AnyWrapperMsgGenerator',
'from ib.ext.Util import Util',
]
| StarcoderdataPython |
106571 | """Implementation of a CNN for classfication with VGG Encoder."""
import torch
import torchvision.models as models
from torch.nn import CrossEntropyLoss, Linear, Module
from src.utils.mapper import configmapper
STR_MODEL_MAPPING = {
"11": models.vgg11,
"13": models.vgg13,
"16": models.vgg16,
"19": models.vgg19,
"11_bn": models.vgg11_bn,
"13_bn": models.vgg13_bn,
"16_bn": models.vgg16_bn,
"19_bn": models.vgg19_bn,
}
@configmapper.map("models", "vgg")
class Vgg(Module):
def __init__(self, config):
super(Vgg, self).__init__()
vgg_version = config.vgg_version
num_layers = int(vgg_version)
# check that the config.num_layers_freeze < number of layers in the network
num_layers_freeze = config.num_layers_freeze
assert num_layers_freeze < int(vgg_version), (
"(num_layers_freeze) should be greater than (number of layers in network - 1). num_layers_freeze = "
+ str(num_layers_freeze)
+ " and number of layers in network = "
+ vgg_version
)
if config.batch_norm:
vgg_version += "_" + "bn"
assert (
vgg_version in STR_MODEL_MAPPING
), 'VGG version incorrect, should be in ["11","13","16","19"]'
# load the pretrained model
self.model = STR_MODEL_MAPPING[vgg_version](pretrained=config.pretrained)
# freeze specified number of layers
num_cls_layers_freeze = 3 - num_layers + num_layers_freeze
if num_cls_layers_freeze > 0:
num_enc_layers_freeze = num_layers_freeze - num_cls_layers_freeze
else:
num_cls_layers_freeze = 0
num_enc_layers_freeze = num_layers_freeze
if config.batch_norm:
self.model.features = self.freeze_layers(
self.model.features, num_enc_layers_freeze, 4
)
else:
self.model.features = self.freeze_layers(
self.model.features, num_enc_layers_freeze, 2
)
self.model.classifier = self.freeze_layers(
self.model.classifier, num_cls_layers_freeze, 2
)
# modify the last linear layer
in_features_dim = self.model.classifier[-1].in_features
self.model.classifier[-1] = Linear(in_features_dim, config.num_classes)
self.loss_fn = CrossEntropyLoss()
def freeze_layers(self, model, num_layers_freeze_param, mod):
ct_unique = 0
k = 0
for name, param in model.named_parameters():
if k % mod == 0:
ct_unique += 1
if param.requires_grad and ct_unique <= num_layers_freeze_param:
param.requires_grad = False
k += 1
return model
def forward(self, image, labels=None):
logits = self.model(image)
if labels is not None:
loss = self.loss_fn(logits, labels)
return loss, logits
return logits
| StarcoderdataPython |
92438 | from decimal import Decimal
from typing import Any, Optional
from freshbooks.api.accounting import AccountingResource
from freshbooks.api.resource import HttpVerbs
from freshbooks.errors import FreshBooksError
from freshbooks.models import Result
class EventsResource(AccountingResource):
"""Handles resources under the `/events` endpoints.
These are handled almost similarly to `/accounting` endpoints.
Refer to `freshbooks.api.accounting.AccountingResource`.
"""
def _get_url(self, account_id: str, resource_id: Optional[int] = None) -> str:
if resource_id:
return "{}/events/account/{}/{}/{}".format(
self.base_url, account_id, self.accounting_path, resource_id)
return "{}/events/account/{}/{}".format(self.base_url, account_id, self.accounting_path)
def _request(self, url: str, method: str, data: Optional[dict] = None) -> Any:
response = self._send_request(url, method, data)
status = response.status_code
if status == 200 and method == HttpVerbs.HEAD: # pragma: no cover
# no content returned from a HEAD
return
if status == 204 and method == HttpVerbs.DELETE:
return {}
try:
content = response.json(parse_float=Decimal)
except ValueError:
raise FreshBooksError(status, "Failed to parse response", raw_response=response.text)
if status >= 400:
message, code = self._extract_error(content)
raise FreshBooksError(status, message, error_code=code, raw_response=content)
if "response" not in content:
raise FreshBooksError(status, "Returned an unexpected response", raw_response=response.text)
return content["response"]["result"]
def verify(self, account_id: str, resource_id: int, verifier: str) -> Result:
"""Verify webhook callback by making a put request
Args:
account_id: The alpha-numeric account id
resource_id: Id of the resource to update
verifier: The string verifier received by the webhook callback URI
Returns:
Result: Result object with the resource's response data.
Raises:
FreshBooksError: If the call is not successful.
"""
response = self._request(
self._get_url(account_id, resource_id), HttpVerbs.PUT, data={self.single_name: {"verifier": verifier}}
)
return Result(self.single_name, response)
def resend_verification(self, account_id: str, resource_id: int) -> Result:
"""Tell FreshBooks to resend the verification webhook for the callback
Args:
account_id: The alpha-numeric account id
resource_id: Id of the resource to update
Returns:
Result: Result object with the resource's response data.
Raises:
FreshBooksError: If the call is not successful.
"""
response = self._request(
self._get_url(account_id, resource_id), HttpVerbs.PUT, data={self.single_name: {"resend": True}}
)
return Result(self.single_name, response)
| StarcoderdataPython |
1763551 | <filename>twitter_app/twitter_bot/views.py
from django.shortcuts import render
import tweepy, requests
import sys, requests, json, time, os
from django.contrib.messages.views import messages
from .forms import InputForm
from django.conf import settings
CONSUMER_KEY = settings.CONSUMER_KEY
CONSUMER_SECRET = settings.CONSUMER_SECRET
ACCESS_TOKEN = settings.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = settings.ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def unfollowers(request):
form = InputForm()
if request.method == "POST":
try:
form = InputForm(request.POST)
if form.is_valid():
username = form.cleaned_data['user_input']
starttime = time.time()
user = api.get_user(username)
user_id = user._json['id']
followed_users_ids = api.friends_ids(user_id)
followers_ids = api.followers_ids(user_id)
difference_list = diff(followed_users_ids, followers_ids)
counter = 0
counter_2 = 0
a = []
for i in range(len(difference_list)//100+1):
counter = i*100
counter_2 += 100
a.append(api.lookup_users(difference_list[counter:counter_2]))
nons_list = []
for i in a:
for j in i:
nons_list.append(j._json['id'])
unfollowers_ids_list =list(set(nons_list) - set(followers_ids))
counter_3 = 0
counter_4 = 0
b=[]
for i in range(len(unfollowers_ids_list)//100+1):
counter_3 = i*100
counter_4 += 100
b.append(api.lookup_users(unfollowers_ids_list[counter_3:counter_4]))
unfollowers_list = []
times = time.time()-starttime
for i in b:
for j in i:
unfollowers_list.append(j._json['screen_name'])
return render(request, 'twitter_bot/nonfollowers.html', {'form':form, 'unfollowers_list':unfollowers_list, 'times':times})
except tweepy.error.TweepError:
messages.error(request,'Bu kullanıcı adına sahip birisi yok')
return render(request,'twitter_bot/nonfollowers.html', {'form':form})
return render(request,'twitter_bot/nonfollowers.html',{'form':form})
def diff(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
| StarcoderdataPython |
14391 | <filename>ssd_project/functions/multiboxloss.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt as sqrt
import collections
import numpy as np
import itertools
from ssd_project.utils.utils import *
from ssd_project.utils.global_variables import *
device = DEVICE
class MultiBoxLoss(nn.Module):
"""
For our SSD we use a unique loss function called MultiBoxLoss.
The loss is branch into:
1. Localization loss coming from the predicted bounding boxes for objects with respect to ground truth object
2. Confidence loss coming from the predicted class score for the object with respect to ground truth object class
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = decode_center_size(self.priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
#L1 loss is used for the predicted localizations w.r.t ground truth.
self.smooth_l1 = nn.L1Loss()
#CrossEntropyLoss is used for the predicted confidence scores w.r.t ground truth.
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""
Each time the model predicts new localization and confidence scores,
they are compared to the ground truth objects and classes.
Args:
:predicted_locs: predicted localizatios from the model w.r.t to prior-boxes. Shape: (batch_size, 8732, 4)
:predicted_scores: confidence scores for each class for each localization box. Shape: (batch_size, 8732, n_classes)
:boxes: ground truth objects per image: Shape(batch_size)
:param labels: ground truth classes per image: Shape(batch_size)
Return:
Loss - a scalar
"""
batch_size = predicted_locs.size(0)
num_priors = self.priors_cxcy.size(0)
num_classes = predicted_scores.size(2)
assert num_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs, true_classes = self.match_priors_objs(boxes, labels, num_priors, num_classes, batch_size)
# Identify priors that are positive (object/non-background)
non_bck_priors = true_classes != 0 # (N, 8732)
# LOCALIZATION LOSS
# Localization loss is computed only over positive (non-background) priors
loc_loss = self.smooth_l1(predicted_locs[non_bck_priors], true_locs[non_bck_priors]) # (), scalar
# CONFIDENCE LOSS
# Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image
# Number of positive and hard-negative priors per image
num_positives = non_bck_priors.sum(dim=1) # (N)
num_hard_negatives = self.neg_pos_ratio * num_positives # (N)
# First, find the loss for all priors
confidence_loss = self.cross_entropy(predicted_scores.view(-1, num_classes), true_classes.view(-1)) # (N * 8732)
confidence_loss = confidence_loss.view(batch_size, num_priors) # (N, 8732)
# We already know which priors are positive
confidence_loss_non_bck = confidence_loss[non_bck_priors]
# Next, find which priors are hard-negative
# To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
confidence_loss_negative = confidence_loss.clone() # (N, 8732)
confidence_loss_negative[non_bck_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives)
confidence_loss_negative, _ = confidence_loss_negative.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(num_priors)).unsqueeze(0).expand_as(confidence_loss_negative).to(device) # (N, 8732)
hard_negatives = hardness_ranks < num_hard_negatives.unsqueeze(1) # (N, 8732)
confidence_loss_hard_neg = confidence_loss_negative[hard_negatives] # (sum(n_hard_negatives))
# As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
conf_loss = (confidence_loss_hard_neg.sum() + confidence_loss_non_bck.sum()) / num_positives.sum().float()
# TOTAL LOSS
return conf_loss + self.alpha * loc_loss
def match_priors_objs(self, boxes, labels, num_priors, num_classes, batch_size):
"""
Helper function:
Basically we set a class("background", "window", "door", "building") for each prior.
This is done by checking what is the overlap between each prior and the ground truth objects.
If the overlap does not satisfy the threshold(0.5) for overlaping then we consider it background.
"""
true_locs = torch.zeros((batch_size, num_priors, 4), dtype=torch.float).to(device) # (batch_size, 8732, 4)
true_classes = torch.zeros((batch_size, num_priors), dtype=torch.long).to(device) # (batch_size, 8732)
for i, bboxes_img in enumerate(boxes):
#For each img and its objects, compute jaccard overlap between ground truth objects and priors
num_objects = bboxes_img.size(0)
obj_prior_overlap = jaccard_overlap(bboxes_img, self.priors_xy) #(num_objects, 8732)
#Get best object per prior
overlap_prior, obj_prior = obj_prior_overlap.max(dim = 0) #(8732)
#Get best prior per object
overlap_obj, prior_obj = obj_prior_overlap.max(dim = 1) #(num_objects)
#Fix that every object has been set to its respective best prior
obj_prior[prior_obj] = torch.LongTensor(range(num_objects)).to(device)
overlap_prior[prior_obj] = 1
#Give a label to the prior
label_prior = labels[i][obj_prior]
label_prior[overlap_prior < self.threshold] = 0
label_prior = label_prior.squeeze()
true_classes[i] = label_prior
#Encode it in boxes w.r.t to prior boxes format
true_locs[i] = encode_xy_to_gcxgcy(bboxes_img[obj_prior], self.priors_cxcy)
return true_locs, true_classes
| StarcoderdataPython |
81142 | <reponame>tobby2002/python-sandbox
import matplotlib.pyplot as plt
import pandas as pd
# http://queirozf.com/entries/pandas-dataframe-plot-examples-with-matplotlib-pyplot
df = pd.DataFrame({
'name': ['john','mary','peter','jeff','bill','lisa','jose'],
'age': [23,78,22,19,45,33,20],
'gender': ['M','F','M','M','M','F','M'],
'state': ['california','dc','california','dc','california','texas','texas'],
'num_children': [2,0,0,3,2,1,4],
'num_pets': [5,1,0,5,2,2,3]
})
print(df)
# 1. a scatter plot comparing num_children and num_pets
df.plot(kind='scatter', x='num_children', y='num_pets', color='red')
plt.show()
# plt.savefig('output.png')
# 2. a simple line plot
df.plot(kind='bar', x='name', y='age')
plt.show()
# 3. gca stands for 'get current axis'
plt.clf()
ax = plt.gca()
df.plot(kind='line', x='name', y='num_children', ax=ax)
df.plot(kind='line', x='name', y='num_pets', color='red', ax=ax)
plt.show()
# 4. Bar plot with group by
plt.clf()
df.groupby('state')['name'].nunique().plot(kind='bar')
plt.show()
# 5. Stacked bar plot with group byPermalink
# Example: plot count by category as a stacked column:
# create a dummy variable and do a two-level group-by based on it:
# fix the x axis label and the legend
df.assign(dummy = 1).groupby(
['dummy','state']
).size().to_frame().unstack().plot(kind='bar',stacked=True,legend=False)
plt.title('Number of records by State')
# other it'll show up as 'dummy'
plt.xlabel('state')
# disable ticks in the x axis
plt.xticks([])
# fix the legend
current_handles, _ = plt.gca().get_legend_handles_labels()
reversed_handles = reversed(current_handles)
labels = reversed(df['state'].unique())
plt.legend(reversed_handles,labels,loc='lower right')
plt.show()
# 6. Stacked bar plot with group by, normalized to 100%Permalink
# A plot where the columns sum up to 100%.
#
# Similar to the example above but:
#
# normalize the values by dividing by the total amounts
#
# use percentage tick labels for the y axis
#
# Example: Plot percentage count of records by state
import matplotlib.ticker as mtick
# create dummy variable then group by that
# set the legend to false because we'll fix it later
df.assign(dummy = 1).groupby(
['dummy','state']
).size().groupby(level=0).apply(
lambda x: 100 * x / x.sum()
).to_frame().unstack().plot(kind='bar',stacked=True,legend=False)
# or it'll show up as 'dummy'
plt.xlabel('state')
# disable ticks in the x axis
plt.xticks([])
# fix the legend or it'll include the dummy variable
current_handles, _ = plt.gca().get_legend_handles_labels()
reversed_handles = reversed(current_handles)
correct_labels = reversed(df['state'].unique())
plt.legend(reversed_handles,correct_labels)
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter())
plt.show()
# 7. Stacked bar plot with two-level group byPermalink
# Just do a normal groupby() and call unstack():
df.groupby(['state','gender']).size().unstack().plot(kind='bar',stacked=True)
plt.show()
# Another example: count the people by gender, spliting by state:
df.groupby(['gender','state']).size().unstack().plot(kind='bar',stacked=True)
plt.show()
# 8. Stacked bar plot with two-level group by, normalized to 100%Permalink
# Sometimes you are only ever interested in the distributions, not raw amounts:
df.groupby(['gender','state']).size().groupby(level=0).apply(
lambda x: 100 * x / x.sum()
).unstack().plot(kind='bar',stacked=True)
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter())
plt.show()
# 9. Plot histogram of column values
df[['age']].plot(kind='hist',bins=[0,20,40,60,80,100],rwidth=0.8)
plt.show()
#10. Plot date histogram
# To plot the number of records per unit of time, you must first convert the date column to datetime using pandas.to_datetime().
df = pd.DataFrame({
'name':[
'john','lisa','peter','carl','linda','betty'
],
'date_of_birth':[
'01/21/1988','03/10/1977','07/25/1999','01/22/1977','09/30/1968','09/15/1970'
]
})
#Now convert the date column into datetime type and use plot(kind='hist'):
df['date_of_birth'] = pd.to_datetime(df['date_of_birth'],infer_datetime_format=True)
plt.clf()
df['date_of_birth'].map(lambda d: d.month).plot(kind='hist')
plt.show() | StarcoderdataPython |
3299397 | <reponame>jiaju-yang/leetcode
#
# @lc app=leetcode id=139 lang=python3
#
# [139] Word Break
#
from typing import List
# @lc code=start
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
dp = [False] * (len(s) + 1)
dp[0] = True
max_word_len = len(max(wordDict, key=len))
last_true = 0
for i in range(1, len(s) + 1):
if i > last_true + max_word_len:
break
for word in wordDict:
if i - len(word) >= 0 and dp[i-len(word)]:
sub = s[i-len(word):i]
if sub == word:
dp[i] = True
last_true = i
break
return dp[-1]
# @lc code=end
solve = Solution().wordBreak
def test_default():
assert solve('leetcode', ['leet', 'code'])
assert solve('applepenapple', ['apple', 'pen'])
assert not solve('catsandog', ['cats', 'dog', 'sand', 'and', 'cat'])
def test_corner_cases():
assert solve('a', ['a'])
assert solve('aa', ['a'])
assert not solve('a', ['b'])
assert solve('a', ['a', 'b'])
| StarcoderdataPython |
106716 | <reponame>DeepLearnI/atlas
def load_parameters(log_parameters=True):
try:
parameters = _parsed_json(_raw_json_from_parameters_file())
if log_parameters:
log_params(parameters)
return parameters
except FileNotFoundError:
return {}
def flatten_parameter_dictionary(param_dictionary):
flattened_output = {}
for key, value in param_dictionary.items():
if _is_scalar_value(value):
flattened_output[key] = value
elif isinstance(value, dict):
flattened_output.update(_flatten_dict_value(key, value))
else:
flattened_output.update(_flatten_list_value(key, value))
return flattened_output
def log_param(key, value):
from foundations.utils import log_warning_if_not_running_in_job
log_warning_if_not_running_in_job(_log_param_in_running_job, key, value)
def _log_param_in_running_job(key, value):
from foundations_contrib.global_state import current_foundations_job, redis_connection
project_name = current_foundations_job().project_name
job_id = current_foundations_job().job_id
_insert_parameter_name_into_projects_params_set(redis_connection, project_name, key)
_insert_input_parameter_name_into_projects_input_params_set(redis_connection, project_name, key)
_insert_parameter_value_into_job_run_data(redis_connection, job_id, key, value)
_insert_input_parameter_name_into_job_input_parameter_data(redis_connection, job_id, key)
def log_params(parameters):
for key, value in flatten_parameter_dictionary(parameters).items():
log_param(key, value)
def _insert_parameter_name_into_projects_params_set(redis_connection, project_name, key):
_insert_parameter_name_into_specified_projects_params_set('job_parameter_names', redis_connection, project_name, key)
def _insert_parameter_value_into_job_run_data(redis_connection, job_id, key, value):
import json
job_params_key = f'jobs:{job_id}:parameters'
serialized_job_params = redis_connection.get(job_params_key)
job_params = _deserialized_job_params(json.loads, serialized_job_params)
job_params[key] = value
redis_connection.set(job_params_key, json.dumps(job_params))
def _insert_input_parameter_name_into_projects_input_params_set(redis_connection, project_name, key):
_insert_parameter_name_into_specified_projects_params_set('input_parameter_names', redis_connection, project_name, key)
def _insert_parameter_name_into_specified_projects_params_set(set_name, redis_connection, project_name, key):
redis_connection.sadd(f'projects:{project_name}:{set_name}', key)
def _insert_input_parameter_name_into_job_input_parameter_data(redis_connection, job_id, key):
from foundations_internal.foundations_serializer import dumps, loads
job_params_key = f'jobs:{job_id}:input_parameters'
serialized_job_params = redis_connection.get(job_params_key)
job_params = _deserialized_job_params(loads, serialized_job_params, default_type=list)
job_params.append({'argument': {'name': key, 'value': {'type': 'dynamic', 'name': key}}, 'stage_uuid': 'stageless'})
redis_connection.set(job_params_key, dumps(job_params))
def _deserialized_job_params(deserialize_callback, serialized_job_params, default_type=dict):
if serialized_job_params is None:
return default_type()
else:
return deserialize_callback(serialized_job_params)
def _is_scalar_value(value):
return isinstance(value, str) or isinstance(value, int) or isinstance(value, float) or value is None
def _flatten_dict_value(param_key, param_value):
if not param_value:
return {param_key: None}
return flatten_parameter_dictionary({'{}_{}'.format(param_key, nested_key): nested_value for nested_key, nested_value in param_value.items()})
def _flatten_list_value(param_key, param_value):
if not param_value:
return {param_key: None}
list_of_keys = _list_of_keys(param_key, len(param_value))
return flatten_parameter_dictionary({key: value for key, value in zip(list_of_keys, param_value)})
def _list_of_keys(key, length_of_list_value):
return ['{}_{}'.format(key, list_index) for list_index in range(length_of_list_value)]
def _raw_json_from_parameters_file():
with open('foundations_job_parameters.json', 'r') as parameters_file:
return parameters_file.read()
def _parsed_json(file_contents):
import json
if file_contents == '':
return {}
else:
return json.loads(file_contents) | StarcoderdataPython |
3210766 | <reponame>Penchekrak/Distilling-Object-Detectors
from .VOC import VOC
| StarcoderdataPython |
1654325 | <reponame>Khufos/10FastFingersBot
"Version Python 3.8.7 64bits"
"Você precisa de duas bibliotecas Selenium e pyautogui"
"10 fast fingers" "TRADUÇÃO , DEDOS RAPIDOS."
'''WebDriver é uma ferramenta de código aberto para teste automatizado de aplicativos da web em vários navegadores.
Ele fornece recursos para navegar para páginas da web, entrada do usuário, execução de JavaScript e muito mais.
ChromeDriver é um servidor autônomo que implementa o padrão W3C WebDriver .
ChromeDriver está disponível para Chrome em Android e Chrome em Desktop
(Mac, Linux, Windows e ChromeOS). '''
from selenium import webdriver
import pyautogui as cursor
from time import sleep
driver = webdriver.Chrome(executable_path=r'./chromedriver.exe')
driver.get('https://10fastfingers.com/typing-test/portuguese')
sleep(5)
cursor.moveTo(x=781, y=395, duration=0.1)
cursor.click()
word_list = driver.execute_script("return document.getElementById('wordlist').innerHTML")
words = word_list.split("|")
for word in words:
driver.find_element_by_id("inputfield").send_keys(word+ " ")
sleep(0.15) | StarcoderdataPython |
1779281 | <reponame>PacktPublishing/Boosting-Machine-Learning-Models-in-Python
"""
Voting classifier example, by default it's set up for
majority/hard voting mode.
"""
from section1_video5_data import get_data
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier, VotingClassifier
from sklearn.tree import DecisionTreeClassifier
seed=123
# Load prepared data
X, Y = get_data('../data/video1_diabetes.csv')
# Build our single model
c1 = RandomForestClassifier(n_estimators=10, random_state=seed)
c2 = ExtraTreesClassifier(n_estimators=10, random_state=seed)
# Build an ensemble model
c3 = AdaBoostClassifier(n_estimators=10, random_state=seed)
model = VotingClassifier([('c1', c1), ('c2', c2), ('c3', c3)])
# Fit a single model
results_c=[]
models_c=[c1, c2, c3]
for c in models_c:
results = c.fit(X, Y)
results_kfold = model_selection.cross_val_score(c, X, Y, cv=10)
results_c.append(results_kfold)
# Fit an ensemble model
results_model = model.fit(X, Y)
# Validate the peformance of a ensemble model
# using 10-fold Cross Validation.
results_kfold_model = model_selection.cross_val_score(model, X, Y, cv=10)
for i, rc in enumerate(results_c):
print('{:s}\t{:2.2f}%'.format(models_c[i].__class__.__name__, rc.mean()*100))
print("{:s}\t{:2.2f}%".format(model.__class__.__name__, results_kfold_model.mean()*100))
| StarcoderdataPython |
4813752 |
import mpmath
__all__ = ['yeo_johnson', 'inv_yeo_johnson']
def yeo_johnson(x, lmbda):
r"""
Yeo-Johnson transformation of x.
See https://en.wikipedia.org/wiki/Power_transform#Yeo%E2%80%93Johnson_transformation
"""
with mpmath.extradps(5):
x = mpmath.mpf(x)
lmbda = mpmath.mpf(lmbda)
if x >= 0:
if lmbda == 0:
return mpmath.log1p(x)
else:
return mpmath.expm1(lmbda*mpmath.log1p(x))/lmbda
else:
if lmbda == 2:
return -mpmath.log1p(-x)
else:
lmb2 = 2 - lmbda
return -mpmath.expm1(lmb2*mpmath.log1p(-x))/lmb2
def inv_yeo_johnson(x, lmbda):
"""
Inverse Yeo-Johnson transformation.
See https://en.wikipedia.org/wiki/Power_transform#Yeo%E2%80%93Johnson_transformation
"""
with mpmath.extradps(5):
x = mpmath.mpf(x)
lmbda = mpmath.mpf(lmbda)
if x >= 0:
if lmbda == 0:
return mpmath.expm1(x)
else:
return mpmath.expm1(mpmath.log1p(lmbda*x)/lmbda)
else:
if lmbda == 2:
return -mpmath.expm1(-x)
else:
lmb2 = 2 - lmbda
return -mpmath.expm1(mpmath.log1p(-lmb2*x)/lmb2)
| StarcoderdataPython |
19348 | <reponame>littlepea/django-auction<filename>auction/models/bidbasket.py<gh_stars>1-10
import importlib
from django.conf import settings
from auction.utils.loader import load_class
AUCTION_BIDBASKET_MODEL = getattr(settings, 'AUCTION_BIDBASKET_MODEL',
'auction.models.defaults.BidBasket')
BidBasket = load_class(AUCTION_BIDBASKET_MODEL, 'AUCTION_BIDBASKET_MODEL') | StarcoderdataPython |
66279 | ### The point of this module is that,
### when you import it, you get the "vendor" directory
### on your python's sys.path.
import sys
import os.path
import site
already_vendorified = False
def vendorify():
global already_vendorified
if already_vendorified:
return
ROOT = os.path.dirname(os.path.abspath(__file__))
path = lambda *a: os.path.join(ROOT, *a)
prev_sys_path = list(sys.path)
site.addsitedir(path('.'))
# Move the new items to the front of sys.path.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
# Remark that we have already vendorified
already_vendorified = True
| StarcoderdataPython |
56149 | from django import template
from core.models import Order
from django.contrib.auth.decorators import login_required
register = template.Library()
@login_required
@register.simple_tag
def product_cart_item_count(user,slug):
obj = Order.objects.filter(user__username=user,ordered=False)
if obj.exists():
obj2 = obj[0].items.filter(item__slug=slug)
if obj2.exists():
return obj2[0].quantity
return 0 | StarcoderdataPython |
109130 | #!/home/nitin/Learn/Repositories/Github/WebApps/SimpleIsBetterThanComplex.com/myproject/.env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| StarcoderdataPython |
1691320 | <filename>src/final/config.py
# +
from sqlalchemy import create_engine
import psycopg2
def dbconfig():
db = {"host": "affordablehousing.a2hosted.com",
"port": 5432,
"database": "afford31_housing",
"user": "afford31_siads",
"pass": "<PASSWORD>"}
pg_string = f"postgresql://{db['user']}:{db['pass']}@{db['host']}:{db['port']}/{db['database']}"
database_engine = create_engine(pg_string, echo = False)
return database_engine
# -
def basedir():
import os
return os.getcwd()
| StarcoderdataPython |
10361 | # -*- coding: utf-8 -*-
'''
Copyright 2012 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
templates = {
'static_link': '''
\t@$(AR) rcs %(lib)s %(obj)s
\t@echo " [\033[33m\033[1mAR\033[0m] - \033[37m\033[1m%(obj)s\033[0m to \033[37m\033[1m%(lib)s\033[0m"''',
'c_obj_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'asm_obj_ruler': '''%(obj)s: %(source)s
\t@$(AS) $(ASFLAGS) -o %(obj)s %(source)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mAS\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'c_asm_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_obj_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_asm_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'avr-main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <avr/sleep.h>
int main(void) {
for(;;)
sleep_mode();
return 0;
}''',
'main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <Arduino.h>
/**
* Setup of the firmware
**/
void setup() {
}
/**
* Schedule events for firmware program
**/
void loop() {
delay(250);
}''',
'Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Defines of Arduino
ARDUINO_HOME=%(sdk_home)s
ARDUINO_CORE=$(ARDUINO_HOME)/hardware/arduino/cores
ARDUINO_VARIANT=$(ARDUINO_HOME)/hardware/arduino/variants/%(variant)s
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I$(ARDUINO_CORE)/arduino -I$(ARDUINO_VARIANT) -I$(ARDUINO_CORE) -I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
ARDUINO=%(sdk_version)s
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -MMD -DARDUINO=$(ARDUINO) \\
-fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
OBJ=%(obj_dep)s
CORE_OBJ=%(core_obj_dep)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
CORE_LIB=binary/core.a
LIB_DEPS=%(lib_deps)s
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-C --mcu=$(MCU)
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(CORE_LIB) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(CORE_LIB) $(LIB_DEPS) -o $(AOUT)
$(CORE_LIB): $(CORE_OBJ)%(core_ruler)s
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
%(core_asm_rulers)s
%(core_obj_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\trm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
''',
'avr-Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
ASM=%(asm_dep)s
OBJ=%(obj_dep)s
LIB_DEPS=%(lib_deps)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-A
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(LIB_DEPS) -o $(AOUT)
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\t@rm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
'''
}
| StarcoderdataPython |
185044 | i=0
while 1:
a = int(input())
if a == 0 : break
i+=1
print "Case %d: %d" % (i, a)
| StarcoderdataPython |
1732384 | <reponame>Jemie-Wang/ECE5725_Project-Reptile_Monitoring_System
#!/usr/bin/env python
# Capture data from a DHT11 sensor and save it on a database
import time
import sqlite3
import board
import adafruit_dht
import RPi.GPIO as GPIO
import os
dbname='../sensorData.db'
sampleFreq = 10 # time in seconds
dhtDevice = adafruit_dht.DHT11(board.D6)
# get data from DHT sensor
def getDHTdata():
# Print the values to the serial port
temp=None
hum =None
try:
temp = dhtDevice.temperature
hum = dhtDevice.humidity
if hum is not None and temp is not None:
hum = round(hum)
temp = round(temp, 1)
print(
"Temp: {:.1f} C Humidity: {}% ".format(
temp, hum
)
)
if temp>25 or hum<5:
os.system('python /home/pi/project/logSensor/textMessage/sensorText.py')
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
time.sleep(2.0)
except Exception as error:
dhtDevice.exit()
raise error
except OverflowError as error:
print("meet error"+ str(error))
return temp, hum
# log sensor data on database
def logData (temp, hum):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
curs.execute("INSERT INTO DHT_data values(datetime('now','localtime'), (?), (?))", (temp, hum))
conn.commit()
conn.close()
# main function
def main():
while True:
temp, hum = getDHTdata()
if temp is None or hum is None:
#print("The DHT failed to work!!!!!!!!!")
continue
logData (temp, hum)
time.sleep(sampleFreq)
# ------------ Execute program
main()
GPIO.cleanup()
| StarcoderdataPython |
1778931 | #!/usr/bin/python
import sys
import xml.etree.ElementTree as ET
import re
import os
import datetime
import shutil
import time
import glob
import cgi
import cgitb
import sqlite3
import subprocess
import random
from ast import literal_eval as make_tuple
import math
MAX_RESPONCE_ITEMS = int(10000)
MODE = "fast" #fast / robust
T_Table = {0.95:1.96, 0.99:2.576, 0.999:3.291}
failuremodes_alias = dict([('M', 'Masked_Fault'), ('L', 'Latent_Fault'), ('S', 'Signalled_Failure'), ('C', 'Silent_Data_Corruption')])
fields = [ ('M.Label', 'model', True),
('I.ID', 'eind', False),
('T.NodeFullPath', 'target', True),
('T.Macrocell', 'instancetype', True) ,
('I.FaultModel', 'faultmodel', True),
('I.ForcedValue', 'forcedvalue', True),
('I.InjectionTime', 'injectiontime', False),
('I.InjectionDuration', 'injectionduration', False),
('I.ObservationTime', 'observationtime', False),
('I.FailureMode', 'failuremode', True),
('I.ErrorCount', 'errorcount', False),
('I.TrapCode', 'trapcode', True),
('I.FaultToFailureLatency', 'latencyfaultfailure', False),
('I.Dumpfile', 'dumpfile', True) ]
def inv_dict(indict):
return {v: k for k, v in indict.iteritems()}
def get_sample_size(error_margin, confidence_level, N, P = 0.5):
t = T_Table[confidence_level]
return N / ( 1 + error_margin*error_margin*(N-1) / ( t*t * P * (1-P) ) )
def get_error_margin(sample_size, confidence_level, P = 0.5, N=None):
t = T_Table[confidence_level]
if N != None:
return t * math.sqrt( P*(1-P)*(N-sample_size)/(sample_size * (N-1)) )
else:
return t * math.sqrt( P*(1-P) / sample_size)
def get_selector(f, k_httpt, k_selector_alias, enquote = False):
v = f.getvalue(k_httpt,'')
if k_httpt == 'failuremode':
v = inv_dict(failuremodes_alias)[v]
if v == '': return('')
elif v.find(':')>=0:
s = v.split(':')
v = k_selector_alias + '>' + s[0] + ' AND ' + k_selector_alias + '<' + s[1]
return(v)
else:
if enquote: v = ' '.join(r'"{}"'.format(word) if word.lower() not in ['and','or','not'] else word for word in v.split())
return(' '+k_selector_alias+'='+v)
def xml_markup(attrlist, valuelist, emptyfields=[]):
if len(attrlist) != len(valuelist[0]):
return('Error')
res = ''
n = len(valuelist) if len(valuelist) < MAX_RESPONCE_ITEMS else MAX_RESPONCE_ITEMS
for v in range (0,n,1):
res += '\n\n\t<QRItem'
for i in range(0,len(attrlist),1):
val = str(valuelist[v][i]) if not attrlist[i] in emptyfields else ""
res += '\n\t\t' + attrlist[i] + '=\"' + val + '\"'
res += '/>'
summarytag = '\n\n<Summary \nmessage=\"'
summarytag += 'Selected ' + str(len(valuelist)) + ' items, showed ' + str(n) + ' items\"' if n < len(valuelist) else '\"'
if n < len(valuelist):
if 'failuremode' in attrlist:
ind = attrlist.index('failuremode')
fmodes = dict()
for v in valuelist:
t = v[ind]
if t in fmodes:
fmodes[t] += 1
else:
fmodes[t] = 0
total = 0
for k, v in fmodes.items():
total += v
for k, v in fmodes.items():
summarytag += '\n\t' + k + '=\"' + str('%.2f' % (v*100.0/total)) + '\"'
summarytag += ' />'
return(summarytag + '\n\n' + res+'stop')
class DesignNode:
def __init__(self, tname):
self.name = tname
self.fmc = {'c': 0, 's': 0, 'm': 0, 'l': 0}
self.fmc_percentage = {'c': 0.0, 's': 0.0, 'm': 0.0, 'l': 0.0}
self.children = []
def append(self, pth, fm):
cnode = pth.pop(0)
xnode = None
for c in self.children:
if c.name == cnode:
xnode = c
break
if xnode == None:
xnode = DesignNode(cnode)
self.children.append(xnode)
xnode.fmc[fm] += 1
if len(pth) > 0:
xnode.append(pth, fm)
def normalize(self, relnode):
total = relnode.fmc['c'] + relnode.fmc['s'] + relnode.fmc['m'] + relnode.fmc['l']
self.fmc_percentage['c'] = (self.fmc['c'] * 100.0)/total
self.fmc_percentage['s'] = (self.fmc['s'] * 100.0)/total
self.fmc_percentage['m'] = (self.fmc['m'] * 100.0)/total
self.fmc_percentage['l'] = (self.fmc['l'] * 100.0)/total
for c in self.children:
c.normalize(relnode)
def to_JSON(self):
res = '{\n\"name\": \"' + self.name + '\"' + ',\n\"m\": \"' + str(self.fmc['m']) + '\",\n' + '\"l\": \"' + str(self.fmc['l']) + '\",\n' + '\"s\": \"' + str(self.fmc['s']) + '\",\n' + '\"c\": \"' + str(self.fmc['c']) + '\"'
res += ',\n\"c_p\": \"' + str('%.2f' % self.fmc_percentage['c']) + '\",\n\"s_p\": \"'+ str('%.2f' % self.fmc_percentage['s']) + '\",\n\"m_p\": \"'+ str('%.2f' % self.fmc_percentage['m']) + '\",\n\"l_p\": \"'+ str('%.2f' % self.fmc_percentage['l']) + '\"'
if len(self.children) > 0:
res += ',\n\"children\": ['
for i in range(0,len(self.children),1):
res += self.children[i].to_JSON()
if i < len(self.children) - 1 :
res += ',\n'
res += '\n]'
res += '\n}'
return(res)
def to_HTML(self, level=0):
tab = ''
for i in range(0, level, 1): tab += " |"
res = "\n<tr>" + "<td><pre>" + tab+ self.name + "</td></pre>"+ "<td><pre>" + str(self.fmc['m']) + "</td></pre>"+ "<td><pre>" + str(self.fmc['l']) + "</td></pre>"+ "<td><pre>" + str(self.fmc['s']) + "</td></pre>"+ "<td><pre>" + str(self.fmc['c']) + "</td></pre>"
res += "<td><pre>" + str('%.2f' % self.fmc_percentage['m']) + "</td></pre>"+ "<td><pre>" + str('%.2f' % self.fmc_percentage['l']) + "</td></pre>"+ "<td><pre>" + str('%.2f' % self.fmc_percentage['s']) + "</td></pre>"+ "<td><pre>" + str('%.2f' % self.fmc_percentage['c']) + "</td></pre>"+"</tr>"
for c in self.children:
res += c.to_HTML(level+1)
return(res)
sql_fields = []
req_fields = []
fdict = dict()
for i in fields:
sql_fields.append(i[0])
req_fields.append(i[1])
fdict[i[0]] = i[1]
log = open('log.txt','a')
try:
form = cgi.FieldStorage()
signature = ''
for k in form.keys():
if not k in ['action', 'cache']:
signature +='_' + k + '=' + re.sub("[^a-zA-Z0-9_]","-",form.getvalue(k))
if not os.path.exists(os.path.join(os.getcwd(), 'cache')):
os.mkdir(os.path.join(os.getcwd(), 'cache'))
log.write(signature)
populationsize = None
samplesize_max = None
if os.path.exists('Summary.xml'):
tree = ET.parse('Summary.xml').getroot()
for i in tree.findall('Experiment'):
if i.get('HDLModel','')==form.getvalue('model') and i.get('FaultModel','')==form.getvalue('faultmodel') and i.get('macrocell','')==form.getvalue('instancetype'):
populationsize = float(i.get('population','0'))
samplesize_max = int(i.get('samplesize','0'))
log.write('\n\nPopulation size: {0}, \nSamplesize={1}'.format(str(populationsize), str(samplesize_max)))
if not os.path.exists(os.path.join(os.getcwd(), 'cache', signature)):
os.mkdir(os.path.join(os.getcwd(), 'cache', signature))
connection = sqlite3.connect(glob.glob('*.db')[0])
cursor = connection.cursor()
query = """ SELECT @FIELDS
FROM Injections I
JOIN Models M ON I.ModelID = M.ID
JOIN Targets T ON I.TargetID = T.ID
"""
selector = ['I.Status != \"E\"']
for i in fields:
if form.has_key(i[1]):
selector.append(get_selector(form, i[1], i[0] , i[2]))
valid_sel_appended = 0
for c in selector:
if c != '':
if valid_sel_appended == 0:
query += ' WHERE '
else:
query += ' AND '
query += c
valid_sel_appended += 1
if samplesize_max!=None and samplesize_max>0:
query+="\nLIMIT {0}".format(str(samplesize_max))
sampling_mode = (form.getvalue('samplesize','').replace(' ', '') != '')
if sampling_mode:
cursor.execute(query.replace('@FIELDS','COUNT(*)'))
c = cursor.fetchone()
population_size = int(c[0])
if 'randseed' in form.keys():
if form.getvalue('randseed').isdigit():
random.seed(int(form.getvalue('randseed')))
sample_indicies = random.sample(range(0,population_size), int(form.getvalue('samplesize')) )
sample_indicies.sort(reverse=True)
log.write('\nPopulation size: {0}\nSamples [{1}] = {2}'.format(population_size, len(sample_indicies), '\n'.join([str(i) for i in sample_indicies])))
log.write('\n\n'+query)
log.flush()
cursor.execute(query.replace('@FIELDS', ', '.join(sql_fields)))
#build list of first N rows to show and statistics
i=0
listed_ind = 0
sampled_ind = 0
listing_content = ''
statistic_content = '\n\n<Summary '
fmodes = dict()
fmodefield_index = req_fields.index('failuremode')
pathfield_index = req_fields.index('target')
DesignTree = DesignNode('Root')
pathsep = re.compile('[/_\.\(\)\[\]]')
while True:
rows = cursor.fetchmany(50000)
if not rows:
break
for c in rows:
if i&0xFFFF == 0:
log.write('\nIndex = {0}'.format(str(i)))
log.flush()
stat_flag = False
list_flag = False
#if sampling mode: list item and include into statistics IF it's index has been selected for sampling
if sampling_mode:
if len(sample_indicies) > 0:
if i == sample_indicies[-1]:
list_flag = (listed_ind < MAX_RESPONCE_ITEMS)
stat_flag = True
sample_indicies.pop()
#otherwise - list just first MAX_RESPONCE_ITEMS, but compute statistics for the whole set
else:
stat_flag = True
if i < MAX_RESPONCE_ITEMS:
list_flag = True
if list_flag:
listing_content += '\n\n\t<QRItem'
for j in range(0,len(req_fields),1):
listing_content += '\n\t\t{0} = \"{1}\"'.format(req_fields[j], str(c[j]))
listing_content += '/>'
listed_ind += 1
if stat_flag:
t = c[fmodefield_index]
if t in fmodes:
fmodes[t] += 1
else:
fmodes[t] = 1
sampled_ind += 1
#Update the distribution tree
pth = []
for p in re.split(pathsep, c[pathfield_index].replace('{','').replace('}','').replace('\\','')):
if p != '':
pth.append(p)
DesignTree.append(pth, t.lower())
i+=1
statistic_content += '\nmessage=\"Items: Retrieved {0}, Sampled {1}, listed {2}, \"'.format(str(i), str(sampled_ind), str(listed_ind))
total = 0
for k, v in fmodes.items():
total += v
log.write('\n k: {0} = v: {1}'.format(k, str(v)))
for k, v in fmodes.items():
if k in failuremodes_alias:
statistic_content += '\n\t' + failuremodes_alias[k] + '_abs=\"' + str(v) + '\"'
statistic_content += '\n\t' + failuremodes_alias[k] + '_err=\"' + str( '%.2f' % (100*get_error_margin(total, 0.95, v*1.0/total, populationsize)) ) + '\"'
statistic_content += '\n\t' + failuremodes_alias[k] + '=\"' + str('%.2f' % (v*100.0/total)) + '\"'
statistic_content += ' />'
DesignTree.fmc = DesignTree.children[0].fmc
DesignTree.normalize(DesignTree)
#result for action = search
with open(os.path.join(os.getcwd(), 'cache', signature,'search.xml'), 'w') as cachefile:
cachefile.write('<?xml version="1.0" encoding="UTF-8"?>\n<data>' + statistic_content + '\n\n' + listing_content + "\n\n</data>")
with open(os.path.join(os.getcwd(), 'cache', signature,'distree.json'), 'w') as cachefile:
cachefile.write('[' + DesignTree.to_JSON() + ']')
with open(os.path.join(os.getcwd(), 'cache', signature,'distree.html'), 'w') as cachefile:
cachefile.write("<table> <th><pre>Design Unit</pre></th> <th><pre>Masked, Abs</pre></th> <th><pre>Latent, Abs</pre> <th><pre>Signaled Failure, Abs</pre></th> <th><pre>SDC, Abs</pre> <th><pre>Masked, %</pre> <th><pre>Latent, %</pre> <th><pre>Signaled Failure, %</pre></th> <th><pre>SDC, %</pre>" + DesignTree.to_HTML() + "</table>")
#return result for requested action
if form.getvalue('action','').find('search') >= 0:
with open(os.path.join(os.getcwd(), 'cache', signature,'search.xml'), 'r') as f:
result = f.read()
elif form.getvalue('action','').find('gedistree') >= 0:
if form.getvalue('action','').find('JSON') >= 0:
with open(os.path.join(os.getcwd(), 'cache', signature,'distree.json'), 'r') as f:
result = f.read()
else:
with open(os.path.join(os.getcwd(), 'cache', signature,'distree.html'), 'r') as f:
result = f.read()
print "Status: 200 \r\n"
print result
except Exception as e:
log.write(str(e))
finally:
log.write('Finished')
log.close()
| StarcoderdataPython |
4810160 | <reponame>alexanderlopoukhov/CDMSouffleur
import os
import pandas as pd
from pathlib import Path
from pyspark.sql.utils import AnalysisException
from cdm_souffleur.utils.utils import spark
from cdm_souffleur.utils.constants import VOCABULARY_DESCRIPTION_PATH
from cdm_souffleur.utils.utils import Database
def load_vocabulary(path=r'D:\vocabulary\\'):
# meanwhile not in use - only DB or direct file
"""Load ATHENA vocabulary into Dataframe structure
:param path - path to directory loaded from ATHENA
"""
vocabulary_list = []
for filename in os.listdir(path):
if filename.endswith('.csv'):
filepath = str(Path(path) / filename)
tablename = filename.replace('.csv', '')
df = spark().read.csv(filepath, sep='\t', header=True,
inferSchema=True)
df.createOrReplaceTempView(tablename)
vocabulary_list.append(tablename)
return vocabulary_list
def return_lookup_list():
"""Return ATHENA vocabulary lookup list"""
if Database().get_engine() is None:
vocabulary_description = pd.read_csv(VOCABULARY_DESCRIPTION_PATH,
sep='\t')
lookup_list = vocabulary_description['vocabulary_id'].values.tolist()
else:
with Database().get_engine().connect() as con:
vocabulary = con.execute("select vocabulary_id from vocabulary")
lookup_list = [row[0] for row in vocabulary]
return lookup_list
def return_domain_list():
"""Return ATHENA domain list"""
with Database().get_engine().connect() as con:
domain = con.execute('select domain_id from domain')
domain_list = [row[0] for row in domain]
return domain_list
def return_concept_class_list():
"""Return ATHENA concept class list"""
with Database().get_engine().connect() as con:
concept_class = con.execute("select concept_class_id from concept_class")
concept_class_list = [row[0] for row in concept_class]
return concept_class_list
def find_domain(column_name, table_name):
"""find target information by source code
:param column_name - source code name column
:param table_name - table where source code located
both vocabulary and report should be loaded to spark warehouse
"""
db = Database().get_engine()
sql = open('model/sources/SQL', 'r').read()
# TODO: with few PC's should be used sql_broadcast instead sql
# TODO: is it client-server or task cluster App?
# sc: SparkContext = spark.sparkContext
# sql_broadcast = sc.broadcast(sql)
try:
# res = spark().sql(sql.format(column_name, table_name))
res = pd.read_sql(sql.format(column_name, table_name), con=db)
except AnalysisException:
raise
return res
if __name__ == '__main__':
# TODO: detect configuration of PC and create effective entry point
# cores = os.cpu_count()
# init_spark()
from cdm_souffleur.model.source_schema import load_report
#load_report()
print(return_lookup_list())
#find_domain('dx1', 'facility_header').show()
# print(find_domain.__doc__)
| StarcoderdataPython |
4823082 | <gh_stars>1-10
"""Adds repositories/archives."""
########################################################################
# DO NOT EDIT THIS FILE unless you are inside the
# https://github.com/3rdparty/eventuals-grpc-examples repository. If you
# encounter it anywhere else it is because it has been copied there in
# order to simplify adding transitive dependencies. If you want a
# different version of eventuals-grpc-examples follow the Bazel build
# instructions at https://github.com/3rdparty/eventuals-grpc-example.
########################################################################
load("//3rdparty/eventuals-grpc:repos.bzl", eventuals_grpc_repos = "repos")
def repos(external = True, repo_mapping = {}):
eventuals_grpc_repos(
repo_mapping = repo_mapping,
)
| StarcoderdataPython |
1746602 | <filename>wce_triage/setup/install_boot.py
#!/usr/bin/python3
#
#
import os, sys, subprocess
if os.getuid() != 0:
print("***** install_boot would only work as root *****")
sys.exit(1)
#
subprocess.run(['update-grub'])
#
subprocess.run(['update-initramfs', '-u'])
#
subprocess.run(['mkdir', '/ro'])
subprocess.run(['mkdir', '/rw'])
| StarcoderdataPython |
3289931 | import sqlite3
with sqlite3.connect("test.db") as conn:
all_food = conn.execute("SELECT * FROM food")
for food in all_food:
print(food)
conn.execute("INSERT INTO food (name, price) VALUES ('salad', 7.77)")
conn.commit()
all_food_again = conn.execute("SELECT * FROM food")
for food in all_food_again:
print(food)
| StarcoderdataPython |
110200 | <gh_stars>0
from django.urls import include, path
from rapidsms.backends.kannel import views
urlpatterns = (
path('account/', include('rapidsms.urls.login_logout')),
path('delivery-report/',
views.DeliveryReportView.as_view(),
name='kannel-delivery-report'),
path('backend/kannel/',
views.KannelBackendView.as_view(backend_name='kannel-backend'),
name='kannel-backend'),
)
| StarcoderdataPython |
4838142 | # 1089 - Loop Musical
# https://www.urionlinejudge.com.br/judge/pt/problems/view/1089
def peaks(sample):
# add the last in the beginning and the first in the end
magnitudes = sample[-1:] + sample + sample[:1]
# loop through a magn, its previous magn, and the next one
for prev, magn, nxt in zip(magnitudes, magnitudes[1:], magnitudes[2:]):
# local max, local min
if (prev < magn > nxt) or (prev > magn < nxt):
yield magn
def main():
while True:
n = int(input())
if n == 0:
return
sample = [int(note) for note in input().split()]
print(len([p for p in peaks(sample)]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1758557 | """
---------------------------------------------------------------------------------
The main shine server running as backend and waiting
for incoming Game connections to be served
- using the twisted library
by <NAME>
(c) 2017 ducandu GmbH
---------------------------------------------------------------------------------
"""
import sys
# import inspect
from threading import Thread
import logging as log
# twisted server stuff
# from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet import reactor
# import ZODB
# import transaction
# import persistent.mapping
# import aiopening
# import server_pyrate # need to import this to be able to read subclasses of WorldManager
import server_protocol
def get_all_subclasses(_class):
return _class.__subclasses__() + [g for s in _class.__subclasses__() for g in get_all_subclasses(s)]
# collects all of aiopening's AlgorithmOld classes and stores them in two structures:
# a) a JSON list that can be sent (as is) back to client asking for "list algorithms"
# b) a algos_by_name where we can lookup (by AlgorithmOld class name),
# which methods are clientcallable (field: 'methodNames') and the class itself (field: 'class')
"""def collect_algorithm_class_info():
algos = get_all_subclasses(aiopening.AlgorithmOld)
json_algo_class_list = []
algo_dict_by_name = {} # key: WorldManager class name; values: list of ClientCallableMethods (method names)
# collect each WM's name and pyrateclientcallable methods to the returned json
for algo in algos:
methods = inspect.getmembers(algo, predicate=inspect.ismethod)
client_callable_method_names = []
for m in methods:
if hasattr(m, "_IsShineClientCallable"):
client_callable_method_names.append(m.__name__)
json_algo_class_list.append({"name": algo.__name__, "setupMethods": methods})
algo_dict_by_name[algo.__name__] = {"methodNames": client_callable_method_names, "class": algo}
return algos_by_name, json_algo_class_list
"""
# the command line prompt for shutdown or other commands
def command_prompt(_factory):
while True:
cmd = str(input(">"))
if cmd == "exit":
# shutdown all protocol (client) connections
_factory.shutdown_all()
# stop the reactor
reactor.stop()
return # ends the thread
# main server code
# - run server in main thread
# - spawn off one controller thread that serves as a command prompt (e.g. to shutdown the server gracefully)
if __name__ == "__main__":
# setup logging
log.basicConfig(filename='log/server.log', level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
# figure out our listening port
port = 0
try:
if sys.argv[1]:
port = int(sys.argv[1])
except IndexError:
port = 0
if port == 0:
port = 2017
log.info("PyRATE server started. Listening for incoming web-socket connections on port %d ..." % port)
# get all pyrate WorldManagers and all of every WorldManager's ClientCallableMethods
# algos_by_name, json_algo_list = collect_algorithm_class_info()
# log.info("json_algo_list was compiled as %s" % json_algo_list)
# connect to the ZODB and store the connection object in this factory object for all protocols to be able to access the DB # ZODB!
# connection = ZODB.connection('data/shine_server_data.fs') # ZODB!
zoRoot = {} # connection.root # ZODB!
# log.info("connected to ZODB") # ZODB!
# create empty tree structure if doesn't exist yet in DB
# if not hasattr(zoRoot, "Users"): # ZODB!
if "Users" not in zoRoot:
log.info("created ZODB root.Users persistent dict for UserRecord object storage")
# zoRoot.Users = {"sven": server_protocol.UserRecord("sven")} # persistent.mapping.PersistentMapping() # ZODB!
zoRoot["Users"] = {"sven": server_protocol.UserRecord("sven")} # persistent.mapping.PersistentMapping()
# setup the TCP server and start listening
factory = server_protocol.ShineProtocolFactory(reactor, "ws://localhost:%d" % port, zoRoot) # , algos_by_name, json_algo_list)
# determine our protocol to be generated on each call to buildProtocol
factory.protocol = server_protocol.ShineProtocol
# endpoint = TCP4ServerEndpoint(reactor, port)
# endpoint.listen(fact)
# before we start the "run", start a command prompt thread (+ queue) in order to be later able to shut down the server
# commandQ = Queue() # the command queue that we'll listen on
commandT = Thread(target=command_prompt, args=(factory,))
log.info("starting command prompt thread")
commandT.start()
# if "sven" in root.Users:
# print("main thread: root.Users['sven'] = %s" % str(root.Users['sven']))
# start the reactor (this will block until reactor.stop() is called)
print("starting reactor.run()")
print("type 'exit' to quit server")
reactor.listenTCP(port, factory)
reactor.run()
# we were stopped by the command prompt thread
# - commit all transactions to DB before we go down
# print("exiting server: committing all ZODB transactions and shutting down") # ZODB!
print("exiting server: shutting down")
# transaction.commit() # ZODB!
# connection.close() # ZODB!
| StarcoderdataPython |
4811557 | """
Testing the exponential map
"""
import sys
import spin
import numpy as np
import unittest
import csb.numeric as csb
import matplotlib.pylab as plt
from params import ExponentialMap
from scipy.linalg import logm
from spin.rotation import skew_matrix
from littlehelpers import make_title
class TestExpMap(unittest.TestCase):
tol = 1e-10
def test_matrix(self):
dofs = spin.ExponentialMap.random()
rot = spin.ExponentialMap(dofs)
rot2 = spin.ExponentialMap.from_rotation(rot)
rot3 = ExponentialMap(dofs)
print(make_title('Cython (from dofs)'))
print(rot)
print(make_title('Cython (from matrix)'))
print(rot2)
print(make_title('Python (from dofs)'))
print(rot3)
axis, angle = rot.axis_angle
r, theta, phi = csb.polar3d(axis)
axisangle = spin.AxisAngle([theta, phi, angle])
rot4 = spin.Rotation(csb.rotation_matrix(axis, -angle))
self.assertTrue(spin.distance(rot, rot2) < self.tol)
self.assertTrue(spin.distance(rot, rot4) < self.tol)
self.assertTrue(spin.distance(rot3, rot4) < self.tol)
self.assertTrue(spin.distance(rot2, axisangle) < self.tol)
def test_params(self):
"""
Compute skewmatrix and dofs
"""
rot = spin.ExponentialMap()
R = rot.matrix
B = logm(R).real
print(np.round(B, 3))
axis, theta = rot.axis_angle
b = axis * theta
C = 0.5 * (R - R.T) * theta / np.sin(theta)
c = np.array([-C[1,2], C[0,2], -C[0,1]])
c = 0.5 * theta / np.sin(theta) * np.array([R[2,1]-R[1,2], R[0,2]-R[2,0], R[1,0]-R[0,1]])
self.assertTrue(np.fabs(np.linalg.norm(axis)-1) < self.tol)
self.assertTrue(np.fabs(B - skew_matrix(b)).max() < self.tol)
self.assertTrue(np.fabs(B - C).max() < self.tol)
self.assertTrue(np.linalg.norm(c-b) < self.tol)
self.assertTrue(np.linalg.norm(c-spin.ExponentialMap.from_rotation(R).dofs) < self.tol)
def test_random(self):
n = int(1e5)
R = spin.random_rotation(n)
dofs = np.array([spin.ExponentialMap.from_rotation(r).dofs for r in R]).T
names = ('x','y','z')
fig, axes = plt.subplots(1,4,figsize=(16,4))
kw_hist = dict(alpha=0.2, color='k', bins=50)
kw_plot = dict(alpha=0.7, color='r', lw=3)
kw_hist['normed' if sys.version_info[0] == 2 else 'density'] = True
for name, values, ax in zip(names, dofs, axes):
ax.hist(values,label=r'${0}$'.format(name), **kw_hist)
ax.legend()
x = spin.RotationAngle.axis(200)
ax = axes[-1]
ax.hist(np.linalg.norm(dofs,axis=0), **kw_hist)
ax.plot(x, spin.RotationAngle.prob(x), **kw_plot)
fig.tight_layout()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
73323 | <reponame>Preen1/Antipetros_Discord_Bot<filename>antipetros_discordbot/utility/enums.py
# region [Imports]
# * Standard Library Imports -->
from enum import Enum, Flag, auto
# endregion[Imports]
class RequestStatus(Enum):
Ok = 200
NotFound = 404
NotAuthorized = 401
class WatermarkPosition(Flag):
Top = auto()
Bottom = auto()
Left = auto()
Right = auto()
Center = auto()
WATERMARK_COMBINATIONS = {WatermarkPosition.Left | WatermarkPosition.Top,
WatermarkPosition.Left | WatermarkPosition.Bottom,
WatermarkPosition.Right | WatermarkPosition.Top,
WatermarkPosition.Right | WatermarkPosition.Bottom,
WatermarkPosition.Center | WatermarkPosition.Top,
WatermarkPosition.Center | WatermarkPosition.Bottom,
WatermarkPosition.Center | WatermarkPosition.Left,
WatermarkPosition.Center | WatermarkPosition.Right,
WatermarkPosition.Center | WatermarkPosition.Center}
class DataSize(Enum):
Bytes = 1024**0
KiloBytes = 1024**1
MegaBytes = 1024**2
GigaBytes = 1024**3
TerraBytes = 1024**4
@property
def short_name(self):
if self.name != "Bytes":
return self.name[0].lower() + 'b'
return 'b'
def convert(self, in_bytes: int, round_digits=3, annotate=False):
converted_bytes = round(in_bytes / self.value, ndigits=round_digits)
if annotate is True:
return str(converted_bytes) + ' ' + self.short_name
return converted_bytes
| StarcoderdataPython |
4813941 | <gh_stars>1-10
# Copyright 2020 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import MagicMock
from beakerx.forms.easyforms import EasyForm
from beakerx_base import BeakerxText, BeakerxPassword, BeakerxTextArea, BeakerxButton
class TestEasyForms(unittest.TestCase):
def test_easyforms(self):
# given
# when
ef = EasyForm()
# then
self.assertEqual(ef.easyFormName, "")
self.assertEqual(len(ef.children), 0)
self.assertEqual(len(ef.components), 0)
def test_easyform_name(self):
# given
# when
ef = EasyForm("Hello EasyForm!")
# then
self.assertEqual(ef.easyFormName, "Hello EasyForm!")
def test_easyform_add_text_field(self):
# given
ef = EasyForm()
# when
ef.addTextField('first', width=10)
ef['first'] = 'First'
# then
self.assertEqual(len(ef.children), 1)
self.assertIsInstance(ef.children[0], BeakerxText)
self.assertIn('first', ef.components)
self.assertEqual(ef.components['first'].value, 'First')
self.assertEqual(ef.components['first'].description, 'first')
self.assertEqual(ef.components['first'].size, 10)
def test_easyform_add_password_field(self):
# given
ef = EasyForm()
# when
ef.addPasswordField("Password Field", width=10)
# then
self.assertEqual(len(ef.children), 1)
self.assertIsInstance(ef.children[0], BeakerxPassword)
self.assertIn('Password Field', ef.components)
p = ef.components['Password Field']
self.assertEqual(p.description, 'Password Field')
self.assertEqual(p.value, '')
self.assertEqual(p.size, 10)
def test_easyform_add_text_area(self):
# given
ef = EasyForm()
# when
ef.addTextArea("Text Area", width=10, height=5)
# then
self.assertEqual(len(ef.children), 1)
self.assertIsInstance(ef.children[0], BeakerxTextArea)
self.assertIn('Text Area', ef.components)
ta = ef.components['Text Area']
self.assertEqual(ta.description, 'Text Area')
self.assertEqual(ta.value, '')
self.assertEqual(ta.placeholder, '')
self.assertEqual(ta.cols, 10)
self.assertEqual(ta.rows, 5)
def test_easyform_add_button(self):
# given
ef = EasyForm()
# when
b = ef.addButton('OK', tag='tag')
b.actionPerformed = MagicMock()
# then
self.assertEqual(len(ef.children), 1)
self.assertEqual(len(ef.components), 0)
self.assertIsInstance(ef.children[0], BeakerxButton)
b = ef.children[0]
self.assertEqual(b.tag, 'tag')
self.assertFalse(b.actionPerformed.called)
b.click()
self.assertTrue(b.actionPerformed.called)
def test_easyform_add_list(self):
# given
ef = EasyForm()
# when
l1 = ef.addList('List 1', ["a", "b", "c"])
l2 = ef.addList('List 2', ["a", "b", "c"], multi=False)
l3 = ef.addList('List 3', ["a", "b", "c"], rows=2)
# then
print(ef.components.keys())
self.assertEqual(len(ef.children), 3)
print((l1, l2, l3))
| StarcoderdataPython |
3339497 | import logging
import sys
import time
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
class MaxRetriesExceededError(Exception):
pass
def wait(seconds, tick=12):
"""
Waits for a specified number of seconds, while also displaying an animated
spinner.
:param seconds: The number of seconds to wait.
:param tick: The number of frames per second used to animate the spinner.
"""
progress = '|/-\\'
waited = 0
while waited < seconds:
for frame in range(tick):
sys.stdout.write(f"\r{progress[frame % len(progress)]}")
sys.stdout.flush()
time.sleep(1/tick)
waited += 1
sys.stdout.write("\r")
sys.stdout.flush()
class ExponentialRetry:
def __init__(self, func, error_code, max_sleep=32):
self.func = func
self.error_code = error_code
self.max_sleep = max_sleep
def run(self, *func_args, **func_kwargs):
"""
Retries the specified function with a simple exponential backoff algorithm.
This is necessary when AWS is not yet ready to perform an action because all
resources have not been fully deployed.
:param func: The function to retry.
:param error_code: The error code to retry. Other errors are raised again.
:param func_args: The positional arguments to pass to the function.
:param func_kwargs: The keyword arguments to pass to the function.
:return: The return value of the retried function.
"""
sleepy_time = 1
func_return = None
while sleepy_time <= self.max_sleep and func_return is None:
try:
func_return = self.func(*func_args, **func_kwargs)
logger.info("Ran %s, got %s.", self.func.__name__, func_return)
except ClientError as error:
if error.response['Error']['Code'] == self.error_code:
print(f"Sleeping for {sleepy_time} to give AWS time to "
f"connect resources.")
time.sleep(sleepy_time)
sleepy_time = sleepy_time*2
else:
logger.error(
"%s raised an error and cannot be retried.", self.func.__name__)
raise
if sleepy_time > self.max_sleep:
raise MaxRetriesExceededError(
f"{self.func.__name__} exceeded the allowable number of retries.")
return func_return
| StarcoderdataPython |
1601660 | fist_name = "ada"
last_name = "lovelace"
full_name = f'{fist_name} {last_name}'
message = f'Olá, {full_name.title()}!'
print(message)
| StarcoderdataPython |
145889 | from vk_api.longpoll import VkLongPoll, VkEventType
import vk_api
import dialogflow_v2 as dialogflow
import random
import os
import logging
import logging.config
from dotenv import load_dotenv
load_dotenv()
VK_TOKEN = os.getenv('VK_TOKEN')
PROJECT_ID = os.getenv('PROJECT_ID')
GOOGLE_APPLICATION_CREDENTIALS = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
logger = logging.getLogger('vk_bot')
def detect_intent_texts(project_id, session_id, text, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
if response.query_result.intent.is_fallback:
return None
answer = response.query_result.fulfillment_text
return answer
def reply_message(event, vk_api):
try:
answer = detect_intent_texts(
PROJECT_ID, event.user_id, event.text, 'ru')
except:
logger.exception('detect intent not working')
if not answer:
pass
logger.info(f"message: {event.text}, not answered")
else:
vk_api.messages.send(
user_id=event.user_id,
message=answer,
random_id=random.randint(1, 1000)
)
logger.info(f"message: {event.text}, answered: {answer}")
def start_vk_bot():
vk_session = vk_api.VkApi(token=VK_TOKEN)
api = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
reply_message(event, api)
if __name__ == "__main__":
start_vk_bot()
| StarcoderdataPython |
1651039 | import pickle as pkl
import numpy as np
import time
t1 = time.time()
gsan_keep_data_list, gsan_right_data_list,gsan_left_data_list = [], [], []
for i in range(60):
with open(f"new_data/new_data_{i}.pkl","rb") as f:
_ = pkl.load(f)
data = _['data']
label = _['label']
gsan_left_number = gsan_right_number = gsan_keep_number = 0
if (label == 1).any():
gsan_left_data = data[label==1]
gsan_left_data_list.append(gsan_left_data)
gsan_left_number = gsan_left_data.shape[0]
if (label == -1).any():
gsan_right_data = data[label==-1]
gsan_right_data_list.append(gsan_right_data)
gsan_right_number = gsan_right_data.shape[0]
if (label == 0).any():
gsan_keep_number = max(gsan_left_number,gsan_right_number)*10
gsan_keep_data = data[label==0]
gsan_keep_data_list.append(gsan_keep_data[:gsan_keep_number])
print(i,gsan_left_data.shape,gsan_right_data.shape,gsan_keep_data.shape)
gsan_keep_data_array = np.vstack(gsan_keep_data_list)
gsan_right_data_array = np.vstack(gsan_right_data_list)
gsan_left_data_array = np.vstack(gsan_left_data_list)
print("Totally: ",gsan_left_data_array.shape,gsan_right_data_array.shape,gsan_keep_data_array.shape)
gsan_left_data_array = np.transpose(gsan_left_data_array,axes=(0,2,1,3))
gsan_right_data_array = np.transpose(gsan_right_data_array,axes=(0,2,1,3))
gsan_keep_data_array = np.transpose(gsan_keep_data_array,axes=(0,2,1,3))
print("Transposed:",gsan_left_data_array.shape,gsan_right_data_array.shape,gsan_keep_data_array.shape)
total = {
"right":gsan_right_data_array,
"left":gsan_left_data_array,
"keep":gsan_keep_data_array
}
with open("new_data/total.pkl","wb") as f:
pkl.dump(total,f)
# with open("new_data/left.pkl","wb") as f:
# pkl.dump(gsan_left_data_array,f)
# with open("new_data/right.pkl","wb") as f:
# pkl.dump(gsan_right_data_array,f)
# with open("new_data/keep.pkl","wb") as f:
# pkl.dump(gsan_keep_data_array,f)
t2 = time.time()
print(f"time : {t2-t1:.2f}") | StarcoderdataPython |
1656430 | from .testcases import *
from .utils import *
| StarcoderdataPython |
98664 | <filename>server/ec2-coordinator-app/src/initial_server/wait_for_sync_completion.py
import time
from loguru import logger
from library import ssh, geth_status
def wait(instance_dns, instance_type, datadir_mount, data_dir,
debug_run, interrupt_avail_pct, status_interval_secs):
logger.info(f"Monitoring geth synchronisation. This should take several hours to complete...")
status_count = 0
max_perc_block = -1
max_current_block = -1
max_highest_block = -1
while True:
if debug_run:
logger.warning(f"debug_run set to True; will interrupt sync prematurely!")
status_count += 1
status, avail_pct, detail, perc_block, highest_block, current_block = \
geth_status.status(instance_dns, datadir_mount, data_dir)
if perc_block > max_perc_block:
max_perc_block = perc_block
if highest_block > max_highest_block:
max_highest_block = highest_block
if current_block > max_current_block:
max_current_block = current_block
logger.info(f"\nGETH STATUS #{status_count} ({instance_type}, {avail_pct:.2f}% disk available, "
f"{max_current_block:,} current, {max_highest_block:,} highest, {max_perc_block:.2f}% blocks):\n"
+ "\n".join(detail))
if max_current_block >= 0 and max_highest_block > 0:
ssh.run(instance_dns, f"echo \"{max_current_block},{max_highest_block},{max_perc_block:.2f}%\""
f" > /home/ec2-user/geth_block_info.txt")
if status.name.startswith("stopped"):
logger.info(f"Exiting monitoring due to geth status {status}")
break
if avail_pct < interrupt_avail_pct:
# TODO: review the need to interrupt on low disk
pid = ssh.geth_sigint(instance_dns)
logger.info("Disk free:\n" + ssh.df(instance_dns, human=True))
logger.info("Disk usage:\n" + ssh.du(instance_dns, human=True))
logger.error(f"Interrupting geth process {pid} due to only {avail_pct:.2f}% avaiable on volume")
break
if debug_run and perc_block > 1.0:
logger.warning(f"Prematurely interrupting geth process in debug case for testing (perc_block {perc_block:.2f}%)...")
ssh.geth_sigint(instance_dns)
time.sleep(status_interval_secs)
return status, instance_type, avail_pct, detail
| StarcoderdataPython |
3341987 | <gh_stars>0
from Statistics.SampleMean import sampleMean
from Statistics.Proportion import proportion
from Calculators.Subtraction import subtraction
from Calculators.Division import division
from Calculators.Multiplication import multiplication
def var_sample_proportion(data):
sample_data = data[0:999]
samp_prop_data = []
for x in sample_data:
if x > 64:
samp_prop_data.append(x)
samp_len = len(samp_prop_data)
samp_len_data = len(sample_data)
p = round(samp_len / samp_len_data, 6)
q = subtraction(1, p)
return round(multiplication(p, q) / (samp_len_data - 1), 6)
| StarcoderdataPython |
4832278 | <reponame>motrom/kittitracking-pdfmht
# -*- coding: utf-8 -*-
""" taken from
https://github.com/utiasSTARS/pykitti/blob/master/pykitti/odometry.py """
"""Provides helper methods for loading and parsing KITTI data."""
from collections import namedtuple
import numpy as np
#from PIL import Image
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Per dataformat.txt
OxtsPacket = namedtuple('OxtsPacket',
'lat, lon, alt, ' +
'roll, pitch, yaw, ' +
'vn, ve, vf, vl, vu, ' +
'ax, ay, az, af, al, au, ' +
'wx, wy, wz, wf, wl, wu, ' +
'pos_accuracy, vel_accuracy, ' +
'navstat, numsats, ' +
'posmode, velmode, orimode')
# Bundle into an easy-to-access structure
OxtsData = namedtuple('OxtsData', 'packet, T_w_imu')
def subselect_files(files, indices):
try:
files = [files[i] for i in indices]
except:
pass
return files
def rotx(t):
"""Rotation about the x-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def transform_from_rot_trans(R, t):
"""Transforation matrix from rotation matrix and translation vector."""
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
def read_calib_file(filepath):
"""Read in a calibration file and parse into a dictionary."""
data = {}
with open(filepath, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
# The only non-float values in these files are dates, which
# we don't care about anyway
try:
data[key] = np.array([float(x) for x in value.split()])
except ValueError:
pass
return data
def pose_from_oxts_packet(packet, scale):
"""Helper method to compute a SE(3) pose matrix from an OXTS packet.
"""
er = 6378137. # earth radius (approx.) in meters
# Use a Mercator projection to get the translation vector
tx = scale * packet.lon * np.pi * er / 180.
ty = scale * er * \
np.log(np.tan((90. + packet.lat) * np.pi / 360.))
tz = packet.alt
t = np.array([tx, ty, tz])
# Use the Euler angles to get the rotation matrix
Rx = rotx(packet.roll)
Ry = roty(packet.pitch)
Rz = rotz(packet.yaw)
R = Rz.dot(Ry.dot(Rx))
# Combine the translation and rotation into a homogeneous transform
return R, t
def load_oxts_packets_and_poses(oxts_files):
"""Generator to read OXTS ground truth data.
Poses are given in an East-North-Up coordinate system
whose origin is the first GPS position.
"""
# Scale for Mercator projection (from first lat value)
scale = None
# Origin of the global coordinate system (first GPS position)
origin = None
oxts = []
for filename in oxts_files:
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split()
# Last five entries are flags and counts
line[:-5] = [float(x) for x in line[:-5]]
line[-5:] = [int(float(x)) for x in line[-5:]]
packet = OxtsPacket(*line)
if scale is None:
scale = np.cos(packet.lat * np.pi / 180.)
print("GPS/IMU scale {:f}".format(scale))
R, t = pose_from_oxts_packet(packet, scale)
if origin is None:
origin = t
print("GPS/IMU origin {:f}".format(origin))
T_w_imu = transform_from_rot_trans(R, t - origin)
oxts.append(OxtsData(packet, T_w_imu))
return oxts
def load_oxt(filename):
scale = None
# Origin of the global coordinate system (first GPS position)
origin = None
oxts = []
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split()
# Last five entries are flags and counts
line[:-5] = [float(x) for x in line[:-5]]
line[-5:] = [int(float(x)) for x in line[-5:]]
packet = OxtsPacket(*line)
if scale is None:
scale = np.cos(packet.lat * np.pi / 180.)
print("GPS/IMU scale {:f}".format(scale))
R, t = pose_from_oxts_packet(packet, scale)
if origin is None:
origin = t
print("GPS/IMU origin {:f} {:f} {:f}".format(*origin))
T_w_imu = transform_from_rot_trans(R, t - origin)
oxts.append(T_w_imu)
return oxts
def loadSelfTransformations(oxt_file):
poses = load_oxt(oxt_file)
transforms = [np.eye(4)]
for file_idx in range(1,len(poses)):
transform = np.linalg.inv(poses[file_idx]).dot(poses[file_idx-1])
transforms.append(transform)
assert transform[2,2] > .995
return transforms
if __name__ == '__main__':
from imageio import imread
from cv2 import imshow, waitKey, destroyWindow
def clear(): destroyWindow('a')
oxt_file = '../tracking/training/oxts/{:04d}.txt'
img_files = '../tracking/training/image_02/{:04d}/{:06d}.png'
lidar_files = '../tracking/training/velodyne/{:04d}/{:06d}.bin'
scene_idx = 1
files = range(400)#range(154)
poses = load_oxt(oxt_file.format(scene_idx))
draw_on_img = np.zeros((80,160,3), dtype=np.uint8) + 255
speeds = []
for file_idx in files[1:]:
img = imread(img_files.format(scene_idx, file_idx))[:,:,::-1]
pose = np.linalg.inv(poses[file_idx-1]).dot(poses[file_idx])
assert abs(pose[2,3]) < .2 # no up-down motion
assert abs(pose[1,3]) < .5 # no significant lateral motion
assert pose[0,3] > -.2 # not moving backwards
assert pose[0,3] < 4. # not very high speed
assert pose[2,2] > .99 # nearly same vertical orientation
assert pose[0,0] > .96 # mostly in same direction
# check whether angle approximations work
angleapprox1 = np.arctan2(pose[0,1], pose[0,0])
angleapprox2 = np.arctan2(-pose[1,0], pose[1,1])
assert abs(angleapprox1-angleapprox2) < .03
speed_instant = np.hypot(pose[0,3], pose[1,3])
speed = speed_instant * 10. # convert to m/s
speeds.append(speed)
# direction_correct = np.hypot(pose[0,0], pose[0,1])
# direction_cos = pose[0,0] / direction_correct
# direction_sin = pose[0,1] / direction_correct
wheel_angle = np.arctan2(pose[0,1], pose[0,0]) * 2. / max(.1, speed_instant)
wheel_cos = np.cos(wheel_angle)
wheel_sin = np.sin(wheel_angle)
# draw arrow indicate distance, with color indicating speed
speed_color = np.array((255-speed*8, speed*10, speed*2))
speed_color = np.minimum(np.maximum(speed_color, 0), 255).astype(np.uint8)
angle_shake = 2./80
draw_on_img2 = draw_on_img.copy()
for x in range(int(80*wheel_cos)):
ylo = 80 + int((80-x)*(wheel_sin-angle_shake))
yhi = 80 + int((80-x)*(wheel_sin+angle_shake)) + 1
draw_on_img2[x,ylo:yhi] = speed_color
img[:80,:160] = draw_on_img2
imshow('a', img)
if waitKey(100) == ord('q'):
break
clear() | StarcoderdataPython |
3282032 | <reponame>sethah/allencv
from allencv.common.testing import AllenCvTestCase, ModelTestCase
from allencv.data.dataset_readers import PairedImageReader
from allencv.models import SemanticSegmentationModel
from allencv.modules.image_encoders import ResnetEncoder, FPN
from allencv.modules.image_decoders import BasicDecoder
class TestSemanticSegmentation(ModelTestCase):
def test_basic_experiment(self):
data_directory = AllenCvTestCase.FIXTURES_ROOT / "data" / "image_mask_reader"
self.set_up_model(AllenCvTestCase.FIXTURES_ROOT / 'semantic_segmentation' / 'experiment.jsonnet',
data_directory)
self.ensure_model_can_train_save_and_load(self.param_file)
| StarcoderdataPython |
1708450 | import os
import googleapiclient.discovery
import requests
import json
from private import private
class RouteData:
""" General catch all for all the functions and data for the main.py """
selection_danger = "Please submit a selection to view the other pages."
wiki_err = """Currently, either the service provider is down, \
Wikipedia is down, or there are multiple pages unable to be parsed for this language.
Apologies for the inconvenience!
"""
syntax_err = ["#Unfortunately, syntax details for this concept is not yet implemented."]
def try_passer(self, setting, message):
""" Attempt to fill in the message data to be returned as the target accordingly.
This method eliminates the code try-except simple data insertion code smell
"""
try:
target = setting
except:
target = message
return target
def get_videos(self, language, concept):
""" Per the youtube API documentation.
Note that this code is based on the official documentation to make a query.
Pass in the language and concept to be queried
"""
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service = "youtube"
api_ver = "v3"
API_KEY = private.PrivateKeys().youtube()
youtube = googleapiclient.discovery.build(api_service, api_ver, developerKey = API_KEY)
req = youtube.search().list(
part="snippet",
maxResults=5,
q=language + ' ' + concept
)
# Get only the items list
res = req.execute()["items"]
return res
def process_vid_ids(self, session):
""" Get the session dictionary and process it into its own dictionary sets per the IDS"""
youtube_key1 = {}
youtube_key2 = {}
default_video = "zOjov-2OZ0E"
youtube1 = self.get_videos(session["language"], session["topic"])
youtube2 = self.get_videos(session["language2"], session["topic"])
# The items are in a list, since I am only requesting 5, look range up to 5.
# This is also increase I decide to increase the amount of IDS to be queried later, my own restriction is
# up to 5 results for my own requests.
for each_value in range(5):
# Use the numbers as a key.
youtube_key1.update({str(each_value): self.try_passer(youtube1[each_value]["id"]["videoId"], default_video)})
youtube_key2.update({str(each_value): self.try_passer(youtube2[each_value]["id"]["videoId"], default_video)})
data = {
"youtube1": youtube_key1,
"youtube2": youtube_key2
}
return data
def get_wikipedia(self, query):
""" Fetch wikipedia data
Use of group mate's microservice from:
Thomas, <NAME>
"""
url = "https://cs361-wiki-app.herokuapp.com/?search="
sub_query = "_programming_language"
# Fixes a bug with symbols
conversion = {
"C#": "c_sharp",
"C++": "c_plus_plus"
}
if query in conversion:
query = conversion[query]
base_query = query + sub_query
wikipedia_search = url + base_query
json_query = requests.get(wikipedia_search)
return json.loads(json_query.content)[base_query]
| StarcoderdataPython |
3344544 | <gh_stars>0
# Generated by Django 2.1.2 on 2018-12-09 13:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupons', '0010_auto_20181209_2135'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='due_date',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 9, 22, 42, 57, 437522)),
),
]
| StarcoderdataPython |
1725138 | <reponame>jannschu/mkdocs-section-index
import collections
import logging
import mkdocs.utils
from jinja2 import Environment
from mkdocs.plugins import BasePlugin
from mkdocs.structure.nav import Navigation, Section
from mkdocs.structure.pages import Page
from . import SectionPage, rewrites
__all__ = ["SectionIndexPlugin"]
log = logging.getLogger(f"mkdocs.plugins.{__name__}")
log.addFilter(mkdocs.utils.warning_filter)
class SectionIndexPlugin(BasePlugin):
def on_nav(self, nav: Navigation, config, files) -> Navigation:
todo = collections.deque((nav.items,))
while todo:
items = todo.popleft()
for i, section in enumerate(items):
if not isinstance(section, Section) or not section.children:
continue
todo.append(section.children)
page = section.children[0]
if not isinstance(page, Page):
continue
assert not page.children
if not page.title and page.url:
# The page becomes a section-page.
page.__class__ = SectionPage
page.is_section = page.is_page = True
page.title = section.title
# The page leaves the section but takes over children that used to be its peers.
section.children.pop(0)
page.children = section.children
for child in page.children:
child.parent = page
# The page replaces the section; the section will be garbage-collected.
items[i] = page
self._nav = nav
return nav
def on_env(self, env: Environment, config, files) -> Environment:
env.loader = self._loader = rewrites.TemplateRewritingLoader(env.loader)
return env
def on_page_context(self, context, page, config, nav):
if nav != self._nav:
self._nav = nav
log.warning(
"It seems that the effects of section-index plugin have been lost, because another MkDocs plugin re-wrote the nav! "
"Re-order `plugins` in mkdocs.yml so that 'section-index' appears closer to the end."
)
def on_post_build(self, config):
if not self._loader.found_supported_theme:
log.warning(
"section-index plugin couldn't detect a supported theme to adapt. "
"It probably won't work as expected. "
"See https://github.com/oprypin/mkdocs-section-index#theme-support"
)
| StarcoderdataPython |
1661163 | <reponame>urushiyama/DeUI
from .element import Element
from ..attribute.composite import Common
from ..attribute import (
AttributeRenderer,
Disabled, Label, Selected, Value
)
class Option(Element):
"""
Represents option for select box.
"""
attribute_renderer = AttributeRenderer(
*Common(),
Disabled(),
Label(),
Selected(),
Value()
)
def __str__(self):
return "option"
| StarcoderdataPython |
3236624 | <reponame>tpvt99/rl-course<filename>hw1/behavior_cloning.py
import os
import tensorflow as tf
import numpy as np
import gym
import mujoco_py
import pickle
import time
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from load_policy_v2 import ExpertPolicy
NUM_ROLLOUTS = 20
ENV_NAME = "Humanoid-v2"
MAX_STEPS = 1000
BATCH_SIZE = 64
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.d1 = Dense(128, activation=tf.nn.tanh)
self.d2 = Dense(64, activation=tf.nn.tanh)
self.d3 = Dense(17)
@tf.function
def call(self, inputs, training = True):
x = self.d1(inputs)
x = self.d2(x)
return self.d3(x)
class Meter():
def __init__(self):
self.val = 0
self.count = 0
self.avg = 0
self.sum = 0
def update(self, val, count=1):
self.count += count
self.val = val
self.sum += val*count
self.avg = self.sum / self.count
class BehaviorCloneing():
def __init__(self, model):
## Constants
self.num_rollouts = NUM_ROLLOUTS
self.env_name = ENV_NAME
self.max_steps = MAX_STEPS
self.batch_size = BATCH_SIZE
##
self.policy_fn = ExpertPolicy("experts/" + self.env_name + ".pkl")
self.ds = self.load_data("expert_data/" + self.env_name + ".pkl")
self.model = model
self.initialization()
def start(self):
template = "Epoch[{0}/{1}], Loss: {2:.3f}"
for epoch in range(200):
for index, data in enumerate(self.ds):
observations, actions = data
self.training(observations, actions)
print(template.format(epoch + 1, 200, self.meter.avg))
print("Saving the model")
self.model.save_weights("bc_policy/bc_model", save_format="tf")
def keep_training(self):
template = "Epoch[{0}/{1}], Loss: {2:.3f}"
for obs, data in self.ds:
self.model.apply(obs[None,0])
break
print("Load weight")
self.model.load_weights("bc_policy/bc_model")
for epoch in range(200):
for index, data in enumerate(self.ds):
observations, actions = data
self.training(observations, actions)
print(template.format(epoch + 1, 200, self.meter.avg))
self.model.save_weights("bc_policy/bc_model", save_format="tf")
def initialization(self):
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam()
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
self.meter = Meter()
def load_data(self, filename):
flag = True
try:
with open(filename, "rb") as f:
data = pickle.loads(f.read())
observations = data['observations'].astype(np.float32)
actions = data['actions'].astype(np.float32)
if observations.shape[0] != (self.num_rollouts * self.max_steps):
flag = False
except FileNotFoundError:
flag = False
if flag == True:
print("Expert data is generated. Done")
observations = observations.reshape(-1, observations.shape[-1])
actions = actions.reshape(-1, actions.shape[-1])
ds = tf.data.Dataset.from_tensor_slices((observations, actions)).shuffle(observations.shape[0]).batch(self.batch_size)
return ds
else:
print("Generating new expert data")
env = gym.make(self.env_name)
returns = []
observations = []
actions = []
for i in range(self.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = self.policy_fn(obs[None, :].astype(np.float32))
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
env.render()
if steps % 100 == 0: print("%i/%i" % (steps, self.max_steps))
if steps >= self.max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
expert_data = {'observations': np.array(observations),
'actions': np.array(actions)}
if not os.path.isdir("expert_data"):
os.makedirs("expert_data")
with open(os.path.join('expert_data', self.env_name + '.pkl'), 'wb') as f:
pickle.dump(expert_data, f, pickle.HIGHEST_PROTOCOL)
observations = np.array(observations).astype(np.float32)
actions = np.array(actions).astype(np.float32)
observations = observations.reshape(-1, observations.shape[-1])
actions = actions.reshape(-1, actions.shape[-1])
ds = tf.data.Dataset.from_tensor_slices((observations, actions)).shuffle(observations.shape[0]).batch(self.batch_size)
return ds
# normalizing data
#obs_mean = np.mean(observations, axis = 0)
#obs_meansq = np.mean(np.square(observations), axis = 0)
#obs_std = np.sqrt(np.maximum(0, obs_meansq - np.square(obs_mean)))
#observations = (observations - obs_mean)/ (obs_std+ 1e-6)
#actions = actions.reshape(-1, 17)
def training(self, observations, actions):
with tf.GradientTape() as tape:
predictions = self.model(observations)
loss = tf.reduce_mean(tf.nn.l2_loss(predictions-actions))
self.meter.update(loss.numpy())
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
def test():
returns = []
env_name = ENV_NAME
env = gym.make(env_name)
model = MyModel()
print("Loading and building policy of " + env_name)
with open("expert_data/" + env_name + ".pkl", "rb") as f:
data = pickle.loads(f.read())
observations = data['observations'].astype(np.float32)
actions = data['actions'].astype(np.float32)
print(actions.shape)
# obs_mean = np.mean(observations, axis=0)
# obs_meansq = np.mean(np.square(observations), axis=0)
# obs_std = np.sqrt(np.maximum(0, obs_meansq - np.square(obs_mean)))
# observations = (observations - obs_mean) / (obs_std + 1e-6)
model.apply(observations[:1])
model.load_weights("bc_policy/bc_model")
print("Built and loaded")
max_steps = MAX_STEPS
returns = []
for _ in range(100):
done = False
totalr = 0
obs = env.reset()
step = 0
# for z in range(1000):
while not done:
action = model(obs[None, :].astype(np.float32))
obs, r, done, _ = env.step(action)
env.render()
totalr += r
step += 1
if step % 100 == 0: print("Iter {} / {}".format(step, max_steps))
if step >= max_steps: break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == "__main__":
model = MyModel()
bc = BehaviorCloneing(model)
bc.keep_training()
test()
| StarcoderdataPython |
1637951 | <reponame>nhsuk/nhsuk-content-store
import imghdr
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.text import slugify
from wagtail.wagtailimages.models import Image as WagtailImage
class Image(WagtailImage):
caption = models.CharField(
max_length=255, blank=True,
help_text='Optional. It will be displayed below the image.'
)
slug = models.SlugField(
allow_unicode=True,
max_length=255
)
version = models.IntegerField(default=1)
@property
def alt(self):
return self.title
admin_form_fields = (
'title',
'file',
'collection',
'caption',
'tags',
'focal_point_x',
'focal_point_y',
'focal_point_width',
'focal_point_height',
)
api_fields = [
'alt', 'caption', 'slug', 'version', 'width', 'height'
]
def save(self, *args, **kwargs):
# generate slug
self._random_slug_postfix = get_random_string(4)
self.file.open()
self.slug = '{}-{}.{}'.format(
slugify(self.title.rsplit('.', 1)[0])[:50],
self._random_slug_postfix,
(imghdr.what(self.file) or 'jpg')
)
# increase version number
if self.id:
self.version = self.version + 1
return super().save(*args, **kwargs)
| StarcoderdataPython |
4836526 | <gh_stars>1-10
import os
import re
import fnmatch
import string
import bpy
p = os.path
def plugin_root():
return p.dirname(__file__)
def gen_root():
return p.join(plugin_root(), "gen")
proot = None
def project_root():
root = p.join(bpy.path.abspath('//'), p.pardir)
if not proot:
return p.abspath(root)
return p.abspath(proot)
def project_name():
with open(p.join(project_root(), "build.gradle")) as f:
for line in f.readlines():
if "appName" in line:
_, name, *_ = line.split("'")
return name
def set_file_line(file_path, line_num, text):
with open(file_path, 'r') as f:
lines = f.readlines()
lines[line_num - 1] = text + '\n'
with open(file_path, 'w') as f:
f.writelines(lines)
def get_file_line(file_path, line_num):
with open(file_path, 'r') as f:
lines = f.readlines()
return lines[line_num - 1]
def set_file_var(file_path, var_name, value):
with open(file_path, 'r') as f:
lines = f.readlines()
for i, ln in enumerate(lines):
if var_name+" =" in ln:
r, _ = ln.split('=')
lines[i] = '= '.join([r, value + ';\n'])
with open(file_path, 'w') as f:
f.writelines(lines)
def remove_lines_containing(file_path, pattern):
with open(file_path, 'r') as f:
lines = [l for l in f.readlines() if pattern not in l]
with open(file_path, 'w') as f:
f.writelines(lines)
def insert_lines_after(file_path, pattern, new_lines):
with open(file_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if pattern in line:
break
i += 1
if i == len(lines):
return
new_lines = [l + '\n' for l in new_lines]
lines = lines[:i] + new_lines + lines[i:]
with open(file_path, 'w') as f:
f.writelines(lines)
def replace_line_containing(file_path, pattern, new_line):
with open(file_path, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if pattern in line:
break
lines[i] = new_line + '\n';
with open(file_path, 'w') as f:
f.writelines(lines)
def in_bdx_project():
return p.isdir(p.join(project_root(), "android", "assets", "bdx"))
def dict_delta(d, dp):
return {k: dp[k] for k in set(dp) - set(d)}
def src_root(project="core", target_file="BdxApp.java"):
for root, dirs, files in os.walk(p.join(project_root(), project, "src")):
if target_file in files:
return root
def package_name():
with open(p.join(src_root(), "BdxApp.java"), 'r') as f:
_, package = f.readline().split()
return package[:-1]
def angel_code(path_to_fnt):
"""
Returns dict with relevant angel code data,
which is retreived from a .fnt file.
"""
def line_to_items(line):
words = re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', line)
items = [w.split('=') if '=' in w else (w, "0")
for w in words]
return [(k, eval(v)) for k, v in items]
ac = {"char":{}}
with open(path_to_fnt, "r") as f:
for l in f:
(key, _), *data = line_to_items(l)
if key == "char":
(_, char_id), *rest = data
ac["char"][char_id] = dict(rest)
else:
ac[key] = dict(data)
return ac
def listdir(path_to_dir, recursive=False, full_path=True, pattern="*", files_only=False, dirs_only=False):
ret = []
fds = [1, 2]
if files_only:
fds.remove(1)
elif dirs_only:
fds.remove(2)
for root_dirs_files in os.walk(path_to_dir):
root = root_dirs_files[0]
for i in fds:
for fd in root_dirs_files[i]:
if fnmatch.fnmatch(fd, pattern):
ret.append(p.join(root, fd) if full_path else fd)
if not recursive:
break
return ret
def gradle_cache_root():
return p.join(p.expanduser('~'),
".gradle",
"caches",
"modules-2",
"files-2.1")
def find_file(pattern, path):
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
return p.join(root, name)
def libgdx_version():
fp = p.join(project_root(), "build.gradle")
line_number = 21
with open(fp, "r") as build_gradle:
lines = build_gradle.readlines()
for i, line in enumerate(lines):
if "gdxVersion" in line:
line_number = i + 1
break
_, version, *_ = get_file_line(fp, line_number).split("'")
return version
def internal_java_package():
java_texts = [t for t in bpy.data.texts.values()]
if not java_texts:
return None
for text_line in java_texts[0].lines:
line = text_line.body
if line.startswith("package "):
if line.endswith(";"):
return line.split(" ")[1][:-1]
else:
return line.split(" ")[1]
def in_packed_bdx_blend():
return bpy.data.is_saved and internal_java_package()
def split_path(path):
head, tail = p.split(path)
if head:
return split_path(head) + [tail]
else:
return [tail]
def save_internal_java_files(to_dir, overwrite=True):
saved = []
java_texts = [t for t in bpy.data.texts.values()]
for t in java_texts:
fp = p.join(to_dir, t.name)
if not overwrite and p.exists(fp):
continue
saved.append(fp)
with open(fp, 'w') as f:
f.write(t.as_string())
return saved
def str_to_valid_java_class_name(input_string):
class_name = ['i'] # first character must be a letter
valid_chars = string.ascii_letters + string.digits + '_'
for char in input_string:
if char in valid_chars:
class_name.append(char)
else:
class_name.append('_'+str(ord(char))+'_')
return "".join(class_name)
| StarcoderdataPython |
1614886 | ///Under dev...ML language convertor with python | StarcoderdataPython |
86734 | <gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ShippingMethodArgs', 'ShippingMethod']
@pulumi.input_type
class ShippingMethodArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
localized_description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
predicate: Optional[pulumi.Input[str]] = None,
tax_category_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ShippingMethod resource.
:param pulumi.Input[bool] is_default: One shipping method in a project can be default
:param pulumi.Input[str] key: User-specific unique identifier for the shipping method
:param pulumi.Input[Mapping[str, Any]] localized_description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: A Cart predicate which can be used to more precisely select a shipping method for a cart
:param pulumi.Input[str] tax_category_id: ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
if description is not None:
pulumi.set(__self__, "description", description)
if is_default is not None:
pulumi.set(__self__, "is_default", is_default)
if key is not None:
pulumi.set(__self__, "key", key)
if localized_description is not None:
pulumi.set(__self__, "localized_description", localized_description)
if name is not None:
pulumi.set(__self__, "name", name)
if predicate is not None:
pulumi.set(__self__, "predicate", predicate)
if tax_category_id is not None:
pulumi.set(__self__, "tax_category_id", tax_category_id)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> Optional[pulumi.Input[bool]]:
"""
One shipping method in a project can be default
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
User-specific unique identifier for the shipping method
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="localizedDescription")
def localized_description(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "localized_description")
@localized_description.setter
def localized_description(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "localized_description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def predicate(self) -> Optional[pulumi.Input[str]]:
"""
A Cart predicate which can be used to more precisely select a shipping method for a cart
"""
return pulumi.get(self, "predicate")
@predicate.setter
def predicate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predicate", value)
@property
@pulumi.getter(name="taxCategoryId")
def tax_category_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
return pulumi.get(self, "tax_category_id")
@tax_category_id.setter
def tax_category_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tax_category_id", value)
@pulumi.input_type
class _ShippingMethodState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
localized_description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
predicate: Optional[pulumi.Input[str]] = None,
tax_category_id: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering ShippingMethod resources.
:param pulumi.Input[bool] is_default: One shipping method in a project can be default
:param pulumi.Input[str] key: User-specific unique identifier for the shipping method
:param pulumi.Input[Mapping[str, Any]] localized_description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: A Cart predicate which can be used to more precisely select a shipping method for a cart
:param pulumi.Input[str] tax_category_id: ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
if description is not None:
pulumi.set(__self__, "description", description)
if is_default is not None:
pulumi.set(__self__, "is_default", is_default)
if key is not None:
pulumi.set(__self__, "key", key)
if localized_description is not None:
pulumi.set(__self__, "localized_description", localized_description)
if name is not None:
pulumi.set(__self__, "name", name)
if predicate is not None:
pulumi.set(__self__, "predicate", predicate)
if tax_category_id is not None:
pulumi.set(__self__, "tax_category_id", tax_category_id)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> Optional[pulumi.Input[bool]]:
"""
One shipping method in a project can be default
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
User-specific unique identifier for the shipping method
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="localizedDescription")
def localized_description(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "localized_description")
@localized_description.setter
def localized_description(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "localized_description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def predicate(self) -> Optional[pulumi.Input[str]]:
"""
A Cart predicate which can be used to more precisely select a shipping method for a cart
"""
return pulumi.get(self, "predicate")
@predicate.setter
def predicate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predicate", value)
@property
@pulumi.getter(name="taxCategoryId")
def tax_category_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
return pulumi.get(self, "tax_category_id")
@tax_category_id.setter
def tax_category_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tax_category_id", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class ShippingMethod(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
localized_description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
predicate: Optional[pulumi.Input[str]] = None,
tax_category_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a ShippingMethod resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] is_default: One shipping method in a project can be default
:param pulumi.Input[str] key: User-specific unique identifier for the shipping method
:param pulumi.Input[Mapping[str, Any]] localized_description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: A Cart predicate which can be used to more precisely select a shipping method for a cart
:param pulumi.Input[str] tax_category_id: ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ShippingMethodArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a ShippingMethod resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ShippingMethodArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShippingMethodArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
localized_description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
predicate: Optional[pulumi.Input[str]] = None,
tax_category_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShippingMethodArgs.__new__(ShippingMethodArgs)
__props__.__dict__["description"] = description
__props__.__dict__["is_default"] = is_default
__props__.__dict__["key"] = key
__props__.__dict__["localized_description"] = localized_description
__props__.__dict__["name"] = name
__props__.__dict__["predicate"] = predicate
__props__.__dict__["tax_category_id"] = tax_category_id
__props__.__dict__["version"] = None
super(ShippingMethod, __self__).__init__(
'commercetools:index/shippingMethod:ShippingMethod',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
key: Optional[pulumi.Input[str]] = None,
localized_description: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
predicate: Optional[pulumi.Input[str]] = None,
tax_category_id: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None) -> 'ShippingMethod':
"""
Get an existing ShippingMethod resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] is_default: One shipping method in a project can be default
:param pulumi.Input[str] key: User-specific unique identifier for the shipping method
:param pulumi.Input[Mapping[str, Any]] localized_description: [LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
:param pulumi.Input[str] predicate: A Cart predicate which can be used to more precisely select a shipping method for a cart
:param pulumi.Input[str] tax_category_id: ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ShippingMethodState.__new__(_ShippingMethodState)
__props__.__dict__["description"] = description
__props__.__dict__["is_default"] = is_default
__props__.__dict__["key"] = key
__props__.__dict__["localized_description"] = localized_description
__props__.__dict__["name"] = name
__props__.__dict__["predicate"] = predicate
__props__.__dict__["tax_category_id"] = tax_category_id
__props__.__dict__["version"] = version
return ShippingMethod(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Output[Optional[bool]]:
"""
One shipping method in a project can be default
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter
def key(self) -> pulumi.Output[Optional[str]]:
"""
User-specific unique identifier for the shipping method
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="localizedDescription")
def localized_description(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
[LocalizedString](https://docs.commercetools.com/api/types#localizedstring)
"""
return pulumi.get(self, "localized_description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def predicate(self) -> pulumi.Output[Optional[str]]:
"""
A Cart predicate which can be used to more precisely select a shipping method for a cart
"""
return pulumi.get(self, "predicate")
@property
@pulumi.getter(name="taxCategoryId")
def tax_category_id(self) -> pulumi.Output[Optional[str]]:
"""
ID of a [Tax Category](https://docs.commercetools.com/api/projects/taxCategories#taxcategory)
"""
return pulumi.get(self, "tax_category_id")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
return pulumi.get(self, "version")
| StarcoderdataPython |
3301783 | <reponame>RaminMammadzada/security-webcam<filename>security webcam.py
#if opencv was installed through home-brew on mac
#sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import time
import datetime
from sendEmail import sendThroughEmail
from stringToImage import convertToImage
def rgb2gray(rgb_image):
gray_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2GRAY)
return gray_image
def rgb2gray1(rgb_image):
row,col,ch = rgb_image.shape
gray_image = np.zeros((row, col))
for i in range(row) :
for j in range(col):
gray_image[i,j] = ( rgb_image[i,j,0]*0.02989 + rgb_image[i,j,1]*0.5870 + rgb_image[i,j,2] *0.1140 ) #the algorithm i used id , G = B*0.07 + G*0.72 + R* 0.21
#I found it online
return gray_image
def difference(img1_gray, img2_gray, row, column):
print("row = ", row)
print("column = ", column)
local_difference = 0
img1_gray = img1_gray/255
img2_gray = img2_gray/255
counter = 0
for i in range(0, row, 10):
for j in range(0, column, 10):
local_difference = abs( np.sum(np.sum(img1_gray[i,j] - img2_gray[i,j])) )
if(local_difference>0.17):
counter = counter + 1
return counter
def add_timestamp():
time_now = str ( datetime.datetime.now() )
convertToImage(time_now)
simple_image = Image.open("simple_image.jpg")
pix_simple_image = simple_image.load()
date_image = Image.open("date_image.png")
pix_date_image = date_image.load()
row_of_simple_image, col_of_simple_image = simple_image.size
row_of_date_image, col_of_date_image = date_image.size
for a in range( (row_of_simple_image - row_of_date_image), row_of_simple_image, 1):
for b in range( (col_of_simple_image - col_of_date_image), col_of_simple_image, 1):
if (pix_date_image[a-(row_of_simple_image - row_of_date_image),b-(col_of_simple_image - col_of_date_image)])[1] > 230:
pix_simple_image[a,b] = pix_date_image[a-(row_of_simple_image - row_of_date_image),b-(col_of_simple_image - col_of_date_image)]
simple_image.save("main_image.jpg")
def mainFunction():
cam = cv2.VideoCapture(0)
diff = 0
while True:
img1 = cam.read()[1]
img1_gray = rgb2gray(img1)
time.sleep(1)
img2 = cam.read()[1]
img2_gray = rgb2gray(img2)
#cv2.imshow("Window",img2)
row, column = img2_gray.shape
diff = np.abs(difference(img1_gray, img2_gray, row, column))
print("counter: ", diff)
if diff > 10:
print(diff)
cv2.imwrite("simple_image.jpg", img1)
add_timestamp()
plt.imshow(img2_gray)
sendThroughEmail()
plt.show()
break
mainFunction() | StarcoderdataPython |
62254 | import tornado.gen
from .base import BaseApiHandler
from ..tasks import cel
class TaskHandler(BaseApiHandler):
@tornado.gen.coroutine
def get(self, task_id):
data = yield self.get_task_meta(task_id)
result_data = {'result': data['result'], 'status': data['status']}
self.finish(result_data)
@staticmethod
@tornado.gen.coroutine
def get_task_meta(task_id):
return cel.backend.get_task_meta(task_id)
| StarcoderdataPython |
3283173 | from flask import Flask, render_template, request
import sys
import requests
from PIL import Image
import numpy as np
app = Flask(__name__)
def get_img_array(request, img_key):
img = request.files[img_key]
img = Image.open(img).convert('RGB')
img_arr = np.array(img.getdata())
img_arr = img_arr.reshape(img.size[1], img.size[0], 3).tolist()
return img_arr
@app.route("/", methods=["POST", "GET"])
def mainm():
print("[simplequery.py] mainm() being called...")
if request.method == "POST": # User clicked submit button
print("[simplequery.py] Request received...")
style_img_fn = request.form["style_img_fn"]
content_img_fn = request.form["content_img_fn"]
style_weight = request.form["style_weight"]
print("[simplequery.py] Request texts parsed...")
style_img = get_img_array(request, "style_img")
content_img = get_img_array(request, "content_img")
print("[simplequery.py] Request files parsed...")
# send this data id to maindb.py
print("[simplequery.py] Downstream request being made...")
resp = requests.post(url=db_url, json={
"style_img_fn" : style_img_fn,
"content_img_fn" : content_img_fn,
"style_img" : style_img,
"content_img" : content_img,
"style_weight" : style_weight
})
print("[simplequery.py] Response returned...")
# return the response content
return resp.content
else:
return render_template("index.html")
if __name__ == "__main__":
print("[simplequery.py] Running simplequery.py...")
# determine what the URL for the database should be, port is always 8082 for DB
if len(sys.argv) == 2:
db_url = "http://" + sys.argv[1] + ":8082"
else:
db_url = "http://0.0.0.0:8082/"
app.run(host="0.0.0.0", port=8081, debug=True)
| StarcoderdataPython |
3263513 | from onelang_core import *
import OneLang.One.Ast.Types as types
import OneLang.One.ITransformer as iTrans
class CollectInheritanceInfo:
def __init__(self):
self.name = "CollectInheritanceInfo"
# C# fix
self.name = "CollectInheritanceInfo"
def visit_class(self, cls_):
all_base_iintfs = cls_.get_all_base_interfaces()
intfs = list(filter(lambda x: x != None, list(map(lambda x: x if isinstance(x, types.Interface) else None, all_base_iintfs))))
clses = list(filter(lambda x: x != None and x != cls_, list(map(lambda x: x if isinstance(x, types.Class) else None, all_base_iintfs))))
for field in cls_.fields:
field.interface_declarations = list(filter(lambda x: x != None, list(map(lambda x: next(filter(lambda f: f.name == field.name, x.fields), None), intfs))))
for method in cls_.methods:
method.interface_declarations = list(filter(lambda x: x != None, list(map(lambda x: next(filter(lambda m: m.name == method.name, x.methods), None), intfs))))
method.overrides = next(filter(lambda x: x != None, list(map(lambda x: next(filter(lambda m: m.name == method.name, x.methods), None), clses))), None)
if method.overrides != None:
method.overrides.overridden_by.append(method)
def visit_files(self, files):
for file in files:
for cls_ in file.classes:
self.visit_class(cls_) | StarcoderdataPython |
3393746 | # should be able to remove this try block when we drop OpenMM < 7.6
try:
import openmm as mm
from openmm import unit
except ImportError:
try:
from simtk import openmm as mm
from simtk import unit # -no-cov-
except ImportError:
HAS_OPENMM = False
mm = None
unit = None
else: # -no-cov-
HAS_OPENMM = True
else:
HAS_OPENMM = True
| StarcoderdataPython |
1626002 | <gh_stars>0
import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
def rodSpeed():
mass = input("Input mass (g): ")
resistance = input("Input the resistance (Ω): ")
distance = input("Input distance apart (cm): ")
magField = input("Input the magnetic Field (T): ")
emf = input("Input the EMF (V): ")
time = input("Input the time (s): ")
mass = float(mass)
resistance = float(resistance)
distance = float(distance)
magField = float(magField)
emf = float(emf)
time = float(time)
distance = distance/100
mass = mass/1000
#speed = ((emf/(magField*distance)))
#speed = (emf/(magField*distance)) * (1 - (pow(math.e, (-1* ((pow(distance,2) * pow(magField,2))/(mass*resistance))*time))))
#speed = (math.exp(((pow(time,2)*pow(magField,2))/(mass*resistance))*time))
#print(speed)
rodSpeed()
| StarcoderdataPython |
3237594 | from output.models.sun_data.elem_decl.nillable.nillable00101m.nillable00101m1_xsd.nillable00101m1 import Root
__all__ = [
"Root",
]
| StarcoderdataPython |
1785152 | from .part import Part
from .primitives import Long, Int
class Stat(Part):
"""
Znode stat structure
Contains attributes:
- **created_zxid** The zxid of the change that created this znode.
- **last_modified_zxid** The zxid of the change that last modified
this znode.
- **created** The time in milliseconds from epoch when this znode
was created.
- **modified** The time in milliseconds from epoch when this znode
was last modified.
- **version** The number of changes to the data of this znode.
- **child_version** The number of changes to the children of this znode.
- **acl_version** The number of changes to the ACL of this znode.
- **ephemeral_owner** The session id of the owner of this znode
if the znode is an ephemeral node. If it is not an ephemeral node,
it will be zero.
- **data_length** The length of the data field of this znode.
- **num_children** The number of children of this znode.
- **last_modified_children** The zxid of the change that last modified
this znode children.
"""
parts = (
("created_zxid", Long),
("last_modified_zxid", Long),
("created", Long),
("modified", Long),
("version", Int),
("child_version", Int),
("acl_version", Int),
("ephemeral_owner", Long),
("data_length", Int),
("num_children", Int),
("last_modified_children", Long),
)
class StatPersisted(Part):
"""
"""
parts = (
("created_zxid", Long),
("last_modified_zxid", Long),
("created", Long),
("modified", Long),
("version", Int),
("child_version", Int),
("acl_version", Int),
("ephemeral_owner", Long),
("last_modified_children", Long),
)
| StarcoderdataPython |
3320268 | from exercise344 import *
def validso3(S, e):
# S is the potential 3x3 skew-symmetric matrix in so(3)
# e is the allowable error to still be a skew-symmetric matrix
# returns true if S is within e of being an element of so(3); false otherwise
# matrix should be of form:
# 0 -x3 x2
# x3 0 -x1
# -x2 x1 0
# get x1, x2, x3, -x1, -x2, -x3 values
x1 = S[2][1]
x2 = S[0][2]
x3 = S[1][0]
x1_neg = S[1][2]
x2_neg = S[2][0]
x3_neg = S[0][1]
# check if opposite x-values match within e
if(x1 <= e-x1_neg and x1 >= -x1_neg-e and x2 <= e-x2_neg and x2 >= -x2_neg-e and x3 <= e-x3_neg and x3 >= -x3_neg-e):
sameX = True
else:
sameX = False
# check if S = -transpose(S) within e
S_trans = np.array(S).T
e_mat = [[e, e, e],
[e, e, e],
[e, e, e]]
S_trans_upper = np.add(-S_trans, e_mat)
S_trans_lower = np.subtract(-S_trans, e_mat)
if((S <= S_trans_upper).all() and (S >= S_trans_lower).all()):
isNegTrans = True
else:
isNegTrans = False
return sameX and isNegTrans
# testing so3
S = [[0, -1, 2],
[1, 0, -3],
[-2, 3, 0]]
e = 0.1
out = validso3(S, e)
#print(out)
| StarcoderdataPython |
1708750 | <filename>LeetCodeSolutions/python/22_Generate_Parentheses.py
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
def recur(result, string, left, right):
if left == 0 and right == 0:
result.append(string)
return
if left > 0:
recur(result, string + '(', left - 1, right)
if right > left:
recur(result, string + ')', left, right - 1)
result = []
recur(result, '', n, n)
return result
n = 3
print Solution().generateParenthesis(n) | StarcoderdataPython |
1764294 | bl_info = {
"name": "OSVR_Analog",
"category": "Object",
}
import bpy
from bpy.types import Operator
# from ClientKit import ClientKit
class OSVR_Analog(Operator):
"""OSVR_Analog""" # blender tooltip for menu items and buttons
bl_idname = "object.osvr_analog" # unique identifier for buttons and menu items to reference
bl_label = "OSVR_Analog" # displays name in the interface
bl_options = {'REGISTER', 'UNDO'} # enable undo for the operator
def execute(self, context): # execute() is called by blender when running the operator
obj = context.scene.objects.active
bpy.ops.object.game_property_new(type="FLOAT",name="analog")
prop = obj.game.properties["analog"];
prop.value = 0
bpy.ops.logic.sensor_add("ALWAYS", "OSVR-Analog-Sensor")
bpy.ops.logic.controller_add(type="PYTHON", name="OSVR-Analog-Controller")
return {'FINISHED'} # lets blender know the operator finished successfully
def register():
bpy.utils.register_class(OSVR_Analog)
def unregister():
bpy.utils.unregister_class(OSVR_Analog)
# allows the script to be run directly from blender's text editor
# without having to install the addon
if __name__ == "__main__":
register()
| StarcoderdataPython |
43354 | <filename>src/data/common.py
from typing import Optional, List, TypeVar, Iterable
import re
PREM_KEY = 'premise'
HYPO_KEY = 'hypothesis'
LABEL_KEY = 'label'
SENT_KEY = 'sentence'
ANTI_KEY = 'neg_sentence'
MASKED_SENT_KEY = 'masked_sentence'
MASKED_ANTI_KEY = 'masked_neg_sentence'
PATTERNS = [
"{pal} {prem} {par}, which means that {hal} {hypo} {har}.",
"It is not the case that {hal} {hypo} {har}, let alone that {pal} {prem} {par}.",
"{hal} {hypo} {har} because {pal} {prem} {par}.",
"{pal} {prem} {par} because {hal} {hypo} {har}.",
"{hal} {hypo} {har}, which means that {pal} {prem} {par}."
]
NEGATION_NECESSARY = [
(False, False),
(False, False),
(False, False),
(True, True),
(True, True)
]
ANTIPATTERNS = [
"It is not sure that {hal} {hypo} {har} just because {pal} {prem} {par}.",
"{pal} {prem} {par}. This does not mean that {hal} {hypo} {har}.",
"The fact that {pal} {prem} {par} does not necessarily mean that {hal} {hypo} {har}.",
"Even if {pal} {prem} {par}, {hal} maybe {hypo} {har}.",
"Just because {pal} {prem} {par}, it might still not be true that {hal} {hypo} {har}."
]
ANTI_NEGATION_NECESSARY = [
(False, False),
(False, False),
(False, False),
(False, True),
(False, False)
]
def choose_examples(examples_A, examples_B, is_reversed: bool):
if is_reversed:
return examples_B[0], examples_A[0]
else:
return examples_A[0], examples_B[0]
def negate(verb_phrase: str) -> str:
tokens = re.split(r'\s+', verb_phrase)
if tokens[0] in ['is', 'are', 'were', 'was']:
new_tokens = tokens[:1] + ['not'] + tokens[1:]
else:
if tokens[0].endswith('s'):
new_tokens = ['does', 'not', tokens[0][:-1]] + tokens[1:]
else:
new_tokens = ['do', 'not', tokens[0][:-1]] + tokens[1:]
return ' '.join(new_tokens)
def mask_equivalent(self, string: str, mask_token, tokenizer, add_space=True) -> str:
longer_string = mask_token
if add_space:
longer_string = longer_string + ' '
longer_string = longer_string + string.strip()
num_tokens = len(
tokenizer.encode(longer_string, add_special_tokens=False)
) - 1
return " ".join([mask_token] * num_tokens)
def load_patterns(pattern_file: str, best_k_patterns: Optional[int]) -> List[str]:
patterns = []
with open(pattern_file) as f:
for line in f:
score, pattern = line.strip().split('\t')
patterns.append(pattern)
if best_k_patterns is not None and len(patterns) == best_k_patterns:
break
return patterns
T = TypeVar('T')
def chunks(lst: List[T], n: int) -> Iterable[List[T]]:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
| StarcoderdataPython |
1761074 | <filename>classification/classification_test.py<gh_stars>1-10
import os
import os.path as osp
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data as data
from tqdm import tqdm
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from classifier import MLPClassifier
from dataset.modelnet40 import LatentCapsulesModelNet40, LatentVectorsModelNet40
from utils.utils import initialize_main, load_model_for_evaluation
def main():
args, logdir = initialize_main()
# set seed
torch.manual_seed(args["seed"])
random.seed(args["seed"])
np.random.seed(args["seed"])
# save_results folder
save_folder = osp.join(logdir, args["save_folder"])
if not osp.exists(save_folder):
os.makedirs(save_folder)
# device
device = torch.device(args["device"])
# root
args["root"] = osp.join(logdir, "latent_codes/model/modelnet40-test/saved_latent_vectors.npz")
# dataloader
if args["root"].endswith(".npz"):
dset = LatentVectorsModelNet40(args["root"]) # root is a npz file
elif args["root"].endswith(".h5"):
dset = LatentCapsulesModelNet40(args["root"])
else:
raise Exception("Unknown dataset.")
loader = data.DataLoader(
dset,
batch_size=args["batch_size"],
pin_memory=args["pin_memory"],
num_workers=args["num_workers"],
shuffle=args["shuffle"],
worker_init_fn=seed_worker,
)
# classifier
classifier = MLPClassifier(
args["input_size"], args["output_size"], args["dropout_p"], [int(i) for i in args["hidden_sizes"].split(",")]
).to(device)
try:
classifier = load_model_for_evaluation(classifier, args["model_path"])
except:
classifier = load_model_for_evaluation(classifier, osp.join(save_folder, args["model_path"]))
# test main
num_true = 0
with torch.no_grad():
for _, (batch, labels) in tqdm(enumerate(loader)):
batch = batch.to(device)
labels = labels.to(device).squeeze().type(torch.long)
predicted_labels = torch.argmax(F.softmax(classifier(batch), dim=-1), dim=-1)
num_true += (predicted_labels == labels).sum().item()
# report
print("Model: ", save_folder)
log = "Accuracy: {}\n".format(num_true / len(dset))
acc = num_true * 1.0 / len(dset)
print(log)
with open(os.path.join(save_folder, "accuracy.txt"), "a") as fp:
fp.write("{} ".format(acc))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3383207 | <reponame>Fogapod/information_security_pract_8<gh_stars>0
import sys
import string
from PIL import Image
SUPPORTED_CHARS = string.printable
BITS_PER_LETTER = 8
def main():
if len(sys.argv) < 3:
print("Not enough arguments provided")
sys.exit(1)
src_image_path = sys.argv[1]
text = " ".join(sys.argv[2:])
if (unsupported := [c for c in text if c not in SUPPORTED_CHARS]) :
print(f"Unsupported letters: {''.join(unsupported)}")
sys.exit(1)
if not src_image_path.lower().endswith(".bmp"):
print("Please, provide a BMP image")
sys.exit(1)
try:
src_image = Image.open(src_image_path).convert("RGBA")
except Exception as e:
print(f"Error opening image: {e}")
sys.exit(1)
src_width, src_height = src_image.size
if src_width * src_height * 3 < len(text) * BITS_PER_LETTER:
print("Not enough pixels to encode data")
sys.exit(1)
x = 0
y = 0
# R - 0
# G - 1
# B - 2
current_color = 0
for char in text:
char_value = ord(char)
for bit_pos in reversed(range(BITS_PER_LETTER)):
bit = bool(char_value & (0b1 << bit_pos))
pixel = list(src_image.getpixel((x, y)))
def hide_bit(color):
# if bit is set, color value should not be even
is_even = color % 2 == 0
if bit:
if is_even:
# cannot overflow
color += 1
else:
if not is_even:
if color == 255:
color -= 1
else:
color += 1
return color
pixel[current_color] = hide_bit(pixel[current_color])
src_image.putpixel((x, y), tuple(pixel))
if current_color == 2:
# advance coords when we are at BLUE value (3rd)
x += 1
if x == src_width:
x = 0
y += 1
current_color = 0
else:
current_color += 1
new_image_path = f"{src_image_path.partition('.')[0]}_enc.bmp"
src_image.save(new_image_path)
print(f"Wrote {len(text)} characters to {new_image_path}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
37323 | import os
import tests
from tests import at_most, compile, savefile
import subprocess
node_present = True
erlang_present = True
if os.system("node -v >/dev/null 2>/dev/null") != 0:
print " [!] ignoring nodejs tests"
node_present = False
if (os.system("erl -version >/dev/null 2>/dev/null") != 0 or
os.system("which escript >/dev/null 2>/dev/null") != 0):
print " [!] ignoring erlang tests"
erlang_present = False
sleep_sort_script='''\
#!/bin/bash
echo "Unsorted: $*"
function f() {
sleep "$1"
echo -n "$1 "
}
while [ -n "$1" ]; do
f "$1" &
shift
done
wait
echo
'''
class SingleProcess(tests.TestCase):
@at_most(seconds=2)
def test_bash_sleep(self):
self.system("sleep 10")
@at_most(seconds=2)
def test_bash_bash_sleep(self):
self.system("bash -c 'sleep 120;'")
@at_most(seconds=2)
def test_python2_sleep(self):
self.system('python2 -c "import time; time.sleep(10)"')
@at_most(seconds=2)
def test_python2_select(self):
self.system('python2 -c "import select; select.select([],[],[], 10)"')
@at_most(seconds=2)
def test_python2_poll(self):
self.system('python2 -c "import select; select.poll().poll(10000)"')
@at_most(seconds=2)
def test_python2_epoll(self):
self.system('python2 -c "import select; select.epoll().poll(10000)"')
@at_most(seconds=2)
def test_node_epoll(self):
if node_present:
self.system('node -e "setTimeout(function(){},10000);"')
def test_bad_command(self):
self.system('command_that_doesnt exist',
returncode=127, ignore_stderr=True)
def test_return_status(self):
self.system('python2 -c "import sys; sys.exit(188)"', returncode=188)
self.system('python2 -c "import sys; sys.exit(-1)"', returncode=255)
@at_most(seconds=2)
@compile(code='''
#include <unistd.h>
int main() {
sleep(10);
return(0);
}''')
def test_c_sleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=2)
@compile(code='''
#include <time.h>
int main() {
struct timespec ts = {1, 0};
nanosleep(&ts, NULL);
return(0);
}''')
def test_c_nanosleep(self, compiled=None):
self.system(compiled)
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K true -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp enable +A30 +K false -noinput
-export([main/1]).
main(_) ->
timer:sleep(10*1000),
halt(0).
''')
def test_erlang_sleep_smp_no_epoll(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=5)
@savefile(suffix="erl", text='''\
#!/usr/bin/env escript
%%! -smp disable +A1 +K true -noinput
-export([main/1]).
main(_) ->
self() ! msg,
proc(10),
receive
_ -> ok
end.
proc(0) ->
receive
_ -> halt(0)
end;
proc(N) ->
Pid = spawn(fun () -> proc(N-1) end),
receive
_ -> timer:sleep(1000),
Pid ! msg
end.
''')
def test_erlang_process_staircase(self, filename=None):
if erlang_present:
self.system("escript %s" % (filename,))
@at_most(seconds=2)
def test_perl_sleep(self):
self.system("perl -e 'sleep 10'")
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 1 12 1231 123213 13212 > /dev/null" % (filename,))
@at_most(seconds=5)
@savefile(suffix="sh", text=sleep_sort_script)
def test_sleep_sort(self, filename=None):
self.system("bash %s 5 3 6 3 6 3 1 4 7 > /dev/null" % (filename,))
@at_most(seconds=10)
def test_parallel_sleeps(self):
for i in range(10):
stdout = self.system(' -- '.join(['bash -c "date +%s"',
'bash -c "sleep 60; date +%s"',
'bash -c "sleep 120; date +%s"']),
capture_stdout=True)
a, b, c = [int(l) for l in stdout.split()]
assert 55 < (b - a) < 65, str(b-a)
assert 55 < (c - b) < 65, str(c-b)
assert 110 < (c - a) < 130, str(c-a)
@at_most(seconds=3)
def test_file_descriptor_leak(self):
out = subprocess.check_output("ls /proc/self/fd", shell=True)
normal_fds = len(out.split('\n'))
stdout = self.system(' -- '.join(['sleep 1',
'sleep 60',
'sleep 120',
'bash -c "sleep 180; ls /proc/self/fd"']),
capture_stdout=True)
after_fork_fds = len(stdout.split('\n'))
assert normal_fds == after_fork_fds
@at_most(seconds=4)
def test_2546_wraparound(self):
if os.uname()[4] == "x86_64":
stdout = self.system("bash -c 'for i in `seq 1 55`; do sleep 315360000; done; date +%Y'",
capture_stdout=True)
assert int(stdout) > 2500
if __name__ == '__main__':
import unittest
unittest.main()
| StarcoderdataPython |
1717739 | <gh_stars>0
#!/usr/bin/env python
# coding:utf-8
import sys, os, re
import distutils.core, py2exe
import optparse
import shutil
import zipfile
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="0.64.1.0"
processorArchitecture="x86"
name="Controls"
type="win32"
/>
<description>Test Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.30729.4918"
processorArchitecture="X86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
class Py2exe(py2exe.build_exe.py2exe):
"""A py2exe which archive *.py files to zip"""
def make_lib_archive(self, zip_filename, base_dir, files,
verbose=0, dry_run=0):
from distutils.dir_util import mkpath
if not self.skip_archive:
# Like distutils "make_archive", but we can specify the files
# to include, and the compression to use - default is
# ZIP_STORED to keep the runtime performance up. Also, we
# don't append '.zip' to the filename.
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
for f in files:
try:
z.write((os.path.join(x, f[:-1]) for x in sys.path if os.path.isfile(os.path.join(x, f[:-1]))).next(), f[:-1])
except:
z.write(os.path.join(base_dir, f), f)
z.close()
return zip_filename
else:
# Don't really produce an archive, just copy the files.
from distutils.file_util import copy_file
destFolder = os.path.dirname(zip_filename)
for f in files:
d = os.path.dirname(f)
if d:
mkpath(os.path.join(destFolder, d), verbose=verbose, dry_run=dry_run)
copy_file(
os.path.join(base_dir, f),
os.path.join(destFolder, f),
preserve_mode=0,
verbose=verbose,
dry_run=dry_run
)
return '.'
def optparse_options_to_dist_options(filename, options):
basename = os.path.splitext(os.path.basename(filename))[0]
mode = 'windows' if options.windowed else 'console'
mode_options = {'script' : filename,
'description' : options.description or 'https://github.com/goagent/pybuild',
'version' : options.version or '1.0.0.0',
'name' : options.name or basename,
'company_name' : options.company or 'goagent.org',
'copyright' : options.copyright or 'GPL License',
'icon_resources' : [(1, options.icon)] if options.icon else [],
'other_resources' : [(RT_MANIFEST, 1, manifest_template % dict(prog=basename))] if mode == 'windows' else [],
}
py2exe_options = {'dist_dir' : 'dist',
'compressed' : 1,
'optimize' : 1,
'dll_excludes' : ['w9xpopen.exe', 'MSVCP90.dll', 'mswsock.dll', 'powrprof.dll'],
'ascii' : options.ascii or False,
'bundle_files' : options.bundle or 1,
'excludes' : options.excludes.split(',') or [],
}
zipfile = options.zipfile
return { mode : [mode_options],
'zipfile' : zipfile,
'options' : {'py2exe' : py2exe_options},
'cmdclass' : {'py2exe' : Py2exe},
}
def finalize(windows=None, console=None, service=None, com_server=None, ctypes_com_server=None, zipfile=None, options=None, cmdclass=None):
shutil.rmtree('build')
mode = [x for x in (windows, console, service, com_server, ctypes_com_server) if x is not None][0][0]
py2exe_options = options['py2exe']
basename = os.path.splitext(os.path.basename(mode['script']))[0]
if py2exe_options['bundle_files'] == 1:
dist_files = ['%s.exe' % basename]
if zipfile is not None:
dist_files += [zipfile]
dist_dir = py2exe_options.get('dist_dir', 'dist')
for filename in dist_files:
shutil.move(os.path.join(dist_dir, filename), filename)
shutil.rmtree(dist_dir)
def main():
parser = optparse.OptionParser(usage='usage: %prog [options] filename')
parser.add_option("-w", "--windowed", dest="windowed", action="store_true", default=False, help="Use the Windows subsystem executable.")
parser.add_option("-a", "--ascii", dest="ascii", action="store_true", default=False, help="do not include encodings.")
parser.add_option("-b", "--bundle", dest="bundle", type="int", metavar="LEVEL", help="produce a bundle_files deployment.")
parser.add_option("-v", "--version", dest="version", type="string", metavar="number", help="add version number to the executable.")
parser.add_option("-d", "--description", dest="description", type="string", help="add description to the executable.")
parser.add_option("-C", "--copyright", dest="copyright", type="string", help="add copyright to the executable.")
parser.add_option("-n", "--name", dest="name", type="string", help="add name string to the executable.")
parser.add_option("-c", "--company", dest="company", type="string", help="add company string to the executable.")
parser.add_option("-i", "--icon" , dest="icon", type="string", metavar="file.ico", help="add file.ico to the executable's resources.")
parser.add_option("-z", "--zipfile", dest="zipfile", type="string", metavar="file.zip", help="add file.zip to the extra resources.")
parser.add_option("-x", "--excludes", dest="excludes", type="string", default='', help="py2exe excludes packages.")
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(0)
else:
print options, args
filename = args[0]
dist_options = optparse_options_to_dist_options(filename, options)
print dist_options
sys.argv[1:] = ['py2exe', '-q']
distutils.core.setup(**dist_options)
finalize(**dist_options)
if sys.version_info[:2] > (2, 5):
print "you need vc2008redist['Microsoft.VC90.CRT.manifest', 'msvcr90.dll']"
if __name__ == '__main__':
main() | StarcoderdataPython |
77575 | <filename>data_collection/gazette/spiders/pi_teresina.py
import datetime
from urllib.parse import urlencode
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class PiTeresina(BaseGazetteSpider):
TERRITORY_ID = "2211001"
name = "pi_teresina"
allowed_domains = ["dom.pmt.pi.gov.br"]
start_date = datetime.date(2005, 1, 7)
def start_requests(self):
initial_date = self.start_date.strftime("%d/%m/%Y")
end_date = self.end_date.strftime("%d/%m/%Y")
params = {
"pagina": 1,
"filtra_data": initial_date,
"filtra_dataf": end_date,
}
url_params = urlencode(params)
yield scrapy.Request(
f"http://dom.pmt.pi.gov.br/lista_diario.php?{url_params}",
)
def parse(self, response):
for entry in response.css("tbody tr"):
edition_number = entry.xpath(".//td[1]/text()").get()
gazette_date = entry.xpath(".//td[2]/text()").get()
gazette_date = datetime.datetime.strptime(gazette_date, "%d/%m/%Y").date()
gazettes_pdfs = entry.css("a::attr(href)").getall()
yield Gazette(
date=gazette_date,
edition_number=edition_number,
file_urls=gazettes_pdfs,
is_extra_edition=False,
power="executive",
)
for next_page_url in response.css("a.paginacao::attr(href)").getall():
yield scrapy.Request(response.urljoin(next_page_url))
| StarcoderdataPython |
1701680 |
from pathlib import Path
import unittest
import numpy as np
from bgen.reader import BgenFile
from tests.utils import load_gen_data
class TestBgenFile(unittest.TestCase):
''' class to make sure BgenFile works correctly
'''
@classmethod
def setUpClass(cls):
cls.gen_data = load_gen_data()
def setUp(self):
''' set path to folder with test data
'''
self.folder = Path(__file__).parent / "data"
def test_context_handler_closed_bgen_samples(self):
''' no samples available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.samples) > 0)
with self.assertRaises(ValueError):
bfile.samples
def test_context_handler_closed_bgen_varids(self):
''' no varids available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.varids()) > 0)
with self.assertRaises(ValueError):
bfile.varids()
def test_context_handler_closed_bgen_rsids(self):
''' no rsids available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.rsids()) > 0)
with self.assertRaises(ValueError):
bfile.rsids()
def test_context_handler_closed_bgen_positions(self):
''' no positions available from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile.positions()) > 0)
with self.assertRaises(ValueError):
bfile.positions()
def test_context_handler_closed_bgen_length(self):
''' error raised if accessing length of exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
len(bfile)
def test_context_handler_closed_bgen_slice(self):
''' error raised if slicing variant from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile[0]
def test_context_handler_closed_bgen_at_position(self):
''' error raised if getting variant at position from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile.at_position(100)
def test_context_handler_closed_bgen_with_rsid(self):
''' error raised if getting variant with rsid from exited BgenFile
'''
path = self.folder / 'example.16bits.zstd.bgen'
with BgenFile(path) as bfile:
self.assertTrue(len(bfile) > 0)
with self.assertRaises(ValueError):
var = bfile.with_rsid('rs111')
def test_fetch(self):
''' can fetch variants within a genomic region
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
self.assertTrue(bfile._check_for_index(str(self.folder / 'example.16bits.bgen')))
self.assertTrue(list(bfile.fetch('02')) == [])
def test_fetch_whole_chrom(self):
''' fetching just with chrom gives all variants on chromosome
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
# test fetching a whole chromosome
sortkey = lambda x: (x.chrom, x.pos)
for x, y in zip(sorted(bfile.fetch(chrom), key=sortkey), sorted(self.gen_data, key=sortkey)):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
def test_fetch_after_position(self):
''' fetching variants with chrom and start gives all variants after pos
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
sortkey = lambda x: (x.chrom, x.pos)
gen_vars = [x for x in sorted(self.gen_data, key=sortkey) if start <= x.pos]
for x, y in zip(sorted(bfile.fetch(chrom, start), key=sortkey), gen_vars):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
def test_fetch_in_region(self):
''' fetching variants with chrom, start, stop gives variants in region
'''
chrom, start, stop = '01', 5000, 50000
bfile = BgenFile(self.folder / 'example.16bits.bgen')
sortkey = lambda x: (x.chrom, x.pos)
gen_vars = [x for x in sorted(self.gen_data, key=sortkey) if start <= x.pos <= stop]
for x, y in zip(sorted(bfile.fetch(chrom, start, stop), key=sortkey), gen_vars):
self.assertEqual(x.rsid, y.rsid)
self.assertEqual(x.chrom, y.chrom)
self.assertEqual(x.pos, y.pos)
# check that we don't get any variants in a region without any
self.assertEqual(list(bfile.fetch(chrom, start * 1000, stop * 1000)), [])
| StarcoderdataPython |
139263 | def reverse(x: int) -> int:
neg = x < 0
if neg:
x *= -1
result = 0
while x:
result = result * 10 + x % 10
x //= 10
return result if not neg else -1 * result
assert reverse(123) == 321
assert reverse(-123) == -321
| StarcoderdataPython |
1727240 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#M3 -- Meka Robotics Robot Components
#Copyright (C) 2010 Meka Robotics
#Author: <EMAIL> (<NAME>)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with M3. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import roslib; roslib.load_manifest('meka_ik')
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from meka_ik.srv import *
from PyKDL import *
from m3.ik_axis import M3IKAxis
import numpy as nu
def ik_test_client():
rospy.wait_for_service('meka_ik')
try:
tmp = MekaIK()
ik = rospy.ServiceProxy('meka_ik', MekaIK)
resp1 = ik('right_arm',[0]*3,[0]*3,[0]*7)
print resp1.success, resp1.angles_solution
#return resp1.sum
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == "__main__":
ik_test_client()
| StarcoderdataPython |
3349732 | from simbatch.core import core as batch
import pytest
@pytest.fixture(scope="module")
def sib():
# TODO pytest-datadir pytest-datafiles vs ( path.dirname( path.realpath(sys.argv[0]) )
sib = batch.SimBatch(5, ini_file="config_tests.ini")
return sib
def test_prepare_data_directory_by_delete_all_files(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
assert sib.comfun.path_exists(sib.sts.store_data_json_directory) is True
else:
# PRO version with sql
pass
# sib.tsk.clear_all_tasks_data(clear_stored_data=True)
sib.tsk.delete_json_tasks_file()
def test_no_task_data(sib):
assert len(sib.sts.store_data_json_directory) > 0
assert len(sib.sts.JSON_TASKS_FILE_NAME) > 0
assert sib.comfun.file_exists(sib.sts.store_data_json_directory + sib.sts.JSON_TASKS_FILE_NAME) is False
def test_create_example_tasks_data(sib):
assert sib.tsk.create_example_tasks_data(do_save=True) == sib.tsk.sample_data_checksum
assert sib.tsk.sample_data_checksum is not None
assert sib.tsk.sample_data_total is not None
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
def test_exist_proj_data(sib):
assert sib.comfun.file_exists(sib.sts.store_data_json_directory + sib.sts.JSON_TASKS_FILE_NAME) is True
def test_clear_all_tasks_data(sib):
assert sib.tsk.clear_all_tasks_data() is True
assert sib.tsk.total_tasks == 0
assert len(sib.tsk.tasks_data) == 0
def test_json_schemas_data(sib):
assert sib.sts.store_data_mode is not None
if sib.sts.store_data_mode == 1:
json_file = sib.sts.store_data_json_directory + sib.sts.JSON_TASKS_FILE_NAME
json_tasks = sib.comfun.load_json_file(json_file)
json_keys = json_tasks.keys()
assert ("tasks" in json_keys) is True
def test_get_none_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) is None
def test_load_tasks_from_json(sib):
json_file = sib.sts.store_data_json_directory + sib.sts.JSON_TASKS_FILE_NAME
assert sib.comfun.file_exists(json_file) is True
assert sib.tsk.load_tasks_from_json(json_file=json_file) is True
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
def test_get2_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) == 1
def test_load_schemas(sib):
assert sib.tsk.clear_all_tasks_data() is True
assert sib.tsk.total_tasks == 0
assert sib.tsk.load_tasks() is True
def test_get3_index_from_id(sib):
assert sib.tsk.get_index_by_id(2) == 1
assert sib.tsk.get_index_by_id(3) == 2
def test_total_tasks(sib):
assert sib.tsk.total_tasks == sib.tsk.sample_data_total
assert len(sib.tsk.tasks_data) == sib.tsk.sample_data_total
def test_update_current_from_id(sib):
assert sib.tsk.current_task_id is None
assert sib.tsk.current_task_index is None
assert sib.tsk.update_current_from_id(2) == 1
assert sib.tsk.current_task_id == 2
assert sib.tsk.current_task_index == 1
assert sib.tsk.current_task.task_name == "tsk 2"
def test_update_current_from_index(sib):
sib.tsk.current_task_id = None
sib.tsk.current_task_index = None
assert sib.tsk.update_current_from_index(2) == 3
assert sib.tsk.current_task_id == 3
assert sib.tsk.current_task_index == 2
assert sib.tsk.current_task.task_name == "tsk 3"
def test_current_task_details(sib):
assert sib.tsk.current_task.id == 3
assert sib.tsk.current_task.task_name == "tsk 3"
assert sib.tsk.current_task.state_id == 1
assert sib.tsk.current_task.state == "INIT"
assert sib.tsk.current_task.project_id == 2
assert sib.tsk.current_task.schema_id == 3
assert sib.tsk.current_task.sequence == "02"
assert sib.tsk.current_task.shot == "004"
assert sib.tsk.current_task.take == "b"
assert sib.tsk.current_task.sim_frame_start == 7
assert sib.tsk.current_task.sim_frame_end == 28
assert sib.tsk.current_task.prev_frame_start == 8
assert sib.tsk.current_task.prev_frame_end == 22
assert sib.tsk.current_task.schema_ver == 4
assert sib.tsk.current_task.task_ver == 5
assert sib.tsk.current_task.queue_ver == 6
assert sib.tsk.current_task.options == "o"
assert sib.tsk.current_task.user_id == 1
assert sib.tsk.current_task.priority == 8
assert sib.tsk.current_task.description == "d"
def test_remove_single_schema_by_id(sib):
assert sib.tsk.remove_single_task(id=1) is True
assert sib.tsk.total_tasks == 4
assert len(sib.tsk.tasks_data) == 4
def test_remove_single_schema_by_index(sib):
assert sib.tsk.remove_single_task(index=1) is True
assert sib.tsk.total_tasks == 3
assert len(sib.tsk.tasks_data) == 3
def test_print_current(sib):
sib.tsk.print_current()
def test_print_all(sib):
sib.tsk.print_all()
| StarcoderdataPython |
172309 | #Neural net analysis of phyllosphere data
#Code adapted from https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/
#Contents of code:
#Loading and examining data
#Feature selection
#Feature engineering: dummy variable creation, NA imputation, scaling/centering
#Model definition and compilation
#Checking model performance (standard test/train split)
import pandas as pd
import numpy as np
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# from keras.wrappers.scikit_learn import KerasRegressor
# from sklearn.model_selection import cross_val_score
# from sklearn.model_selection import KFold
# from sklearn.preprocessing import StandardScaler
# from sklearn.pipeline import Pipeline
#Load data
data1 = pd.read_csv('../processedData/16smetadat_wrangled_for_post_modeling_analysis.csv')
#Explore the data
data1.head
data1.info()
data1.describe() #This is like R's summary command when called on numerical data
data1['taxon.x'].value_counts()
#data.columns
data1['sla'] = data1['area_cm2'] / data1['mass_extracted_g']
#Handy code for making histograms
# import matplotlib.pyplot as plt
# data.hist(bins=50,figsize=(20,15))
# plt.show()
#Select the columns that I want to use as features
#Avoiding things that are likely duplicative or boring
#TIP: Note the use of slashes here to improve readability, and allow me to
#quickly remove certain features.
#I am removing those categorical features that need dummy coding
cols=["area_cm2",\
"absorbance_420",\
"absorbance_940",\
"Ambient_Humidity",\
"Ambient_Temperature",\
"B",\
"circumStem",\
"compartment.y",\
"contactless_temp",\
"deadDown",\
"densitometer.y",\
"ecs_initial",\
"ecs_max",
"elev_m",\
"FmPrime",\
"FoPrime",\
"Fs",\
"FvP.FmP",\
"G",\
"gH.",\
"height_sample",\
"julianDate",\
"Latitude",\
"Leaf_Temp_Differential",\
"LEF",\
"leaves_extracted",\
"Light_Intensity..PAR.",\
"lifehistory",\
"Longitude",\
"mass_extracted_g",\
"MEM1.y",\
"MEM2.y",\
"NPQt_MPF",\
"phenology",\
"Phi2",\
"PhiNO",\
"PhiNPQ",\
"plant_vol",\
"pressure",\
"qL",\
"R",\
"Rel_Chl_intensity",\
"Relative_Chlorophyll",\
"RFd",\
"shannons_flora.y",\
"shrubRich",\
"sidePlantSampled",\
"sla",\
"slope_perc",\
"SPAD_420_intensity",\
"SPAD_420",\
"thickness",\
"TimeofDay",\
"taxon_final",\
"toughness",\
"treeRich",\
"waterRetention"\
# "NPQt",\
,"shannonsISD"
]
#Just the hot shit
data = data1[cols]
#####################################################################
# Do one hot encoding, conversion to numeric, and scaling/centering #
#####################################################################
#Note that you can do a lot of this inside the model, which is a bit better
#since the model can be ported more easily, since wrangling is internal. I am
#not doing this for the time being.
#First we figure out which features are numeric and which are not
num_features = []
categorical_features = []
for i in data:
if data[i].dtype.kind in 'iufc':
num_features.extend([i])
else:
categorical_features.extend([i])
#The 'iufc' thing means: i int (signed), u unsigned int, f float, c complex.
#Note I explored the pipeline options with sklearn and decided against
#for this use case.
from sklearn.preprocessing import StandardScaler
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
#Impute missing data, the default model here is a ridge regression
#Note how I have to convert back to a pandas data frame
#
imp = IterativeImputer(max_iter=50, verbose=0)
imp.fit(data[num_features])
imputed_df = imp.transform(data[num_features])
#Scale data
scaler = StandardScaler()
scaled_data = scaler.fit_transform(imputed_df)
imputed_scaled_df = pd.DataFrame(scaled_data, columns=data[num_features].columns)
#Make one-hot encoded categorical variables
taxa = pd.get_dummies(data['taxon_final'])
habit = pd.get_dummies(data['lifehistory'])
compartment = pd.get_dummies(data['compartment.y'])
phenology = pd.get_dummies(data['phenology'])
#Handy way to concatenate data frames, axis decides if fields or rows are added
#can do pretty smart merging, see help.
X = pd.concat([imputed_scaled_df, taxa, habit, compartment, phenology], axis=1)
#Do a final check for Null/NA
Xbool = X.isnull()
for i in Xbool:
print(Xbool[i].value_counts())
#write the transformed data to disk for future use
#Pandas makes writing a lot easier
towrite = pd.concat([data1.iloc[:,0:9], X], axis = 1)
towrite.to_csv(path_or_buf=("../processedData/imputed_scaled_16S_metadata.csv"))
| StarcoderdataPython |
3265099 | <reponame>HaoTy/qore
""" The pseudoflow algorithm for open-pit mining problems.
See https://hochbaum.ieor.berkeley.edu/html/pub/Hochbaum-OR.pdf
"""
from networkx import DiGraph
from numpy import MAXDIMS
from pseudoflow import hpf
class Pseudoflow:
def __init__(self, MAX_FLOW: int = 1000000) -> None:
self._MAX_FLOW = MAX_FLOW
@property
def MAX_FLOW(self):
return self._MAX_FLOW
def run(self, graph, source, sink, verbose=False) -> str:
_, cuts, info = hpf(graph, source, sink, const_cap="const")
if verbose:
print(info)
ground_state = [value[0] for _, value in cuts.items()][1:-1]
bitstring = "".join(list(map(str, ground_state[::-1])))
return bitstring
| StarcoderdataPython |
3313568 | <reponame>dalvarezperez/umse
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* UMSE Antivirus Agent Example
* Author: <NAME> <<EMAIL>[at]gmail[dot]com>
* Module: Main
* Description: This module launch the "UMSE Antivirus Agent Example" System Try Icon.
*
* Copyright (c) 2019-2020. The UMSE Authors. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from trayicon import SystemTrayIcon
def main():
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QWidget()
trayIcon = SystemTrayIcon(QtGui.QIcon("resources\\agent.ico"), w)
trayIcon.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| StarcoderdataPython |
1695574 | #!/usr/bin/python3
from sorters.sort_base import sort_base
from sort_util.data_tools import data_store
class bubble_sort(sort_base):
def __init__(self) -> None:
super().__init__()
def name(self) -> str:
return 'Bubble'
def _do_sort(self, data: data_store) -> None:
sorted = False
while not sorted:
sorted = True
for i in range(data.size() - 1):
if data.is_greater_than(i, i + 1):
data.swap(i, i + 1, skip_draw=True)
sorted = False
data.draw()
| StarcoderdataPython |
6797 | <filename>keystoneclient/auth/identity/v3/federated.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
import six
from keystoneclient.auth.identity.v3 import base
from keystoneclient.auth.identity.v3 import token
__all__ = ['FederatedBaseAuth']
@six.add_metaclass(abc.ABCMeta)
class FederatedBaseAuth(base.BaseAuth):
rescoping_plugin = token.Token
def __init__(self, auth_url, identity_provider, protocol, **kwargs):
"""Class constructor accepting following parameters:
:param auth_url: URL of the Identity Service
:type auth_url: string
:param identity_provider: name of the Identity Provider the client
will authenticate against. This parameter
will be used to build a dynamic URL used to
obtain unscoped OpenStack token.
:type identity_provider: string
"""
super(FederatedBaseAuth, self).__init__(auth_url=auth_url, **kwargs)
self.identity_provider = identity_provider
self.protocol = protocol
@classmethod
def get_options(cls):
options = super(FederatedBaseAuth, cls).get_options()
options.extend([
cfg.StrOpt('identity-provider',
help="Identity Provider's name"),
cfg.StrOpt('protocol',
help='Protocol for federated plugin'),
])
return options
@property
def federated_token_url(self):
"""Full URL where authorization data is sent."""
values = {
'host': self.auth_url.rstrip('/'),
'identity_provider': self.identity_provider,
'protocol': self.protocol
}
url = ("%(host)s/OS-FEDERATION/identity_providers/"
"%(identity_provider)s/protocols/%(protocol)s/auth")
url = url % values
return url
def _get_scoping_data(self):
return {'trust_id': self.trust_id,
'domain_id': self.domain_id,
'domain_name': self.domain_name,
'project_id': self.project_id,
'project_name': self.project_name,
'project_domain_id': self.project_domain_id,
'project_domain_name': self.project_domain_name}
def get_auth_ref(self, session, **kwargs):
"""Authenticate retrieve token information.
This is a multi-step process where a client does federated authn
receives an unscoped token.
If an unscoped token is successfully received and scoping information
is present then the token is rescoped to that target.
:param session: a session object to send out HTTP requests.
:type session: keystoneclient.session.Session
:returns: a token data representation
:rtype: :py:class:`keystoneclient.access.AccessInfo`
"""
auth_ref = self.get_unscoped_auth_ref(session)
scoping = self._get_scoping_data()
if any(scoping.values()):
token_plugin = self.rescoping_plugin(self.auth_url,
token=auth_ref.auth_token,
**scoping)
auth_ref = token_plugin.get_auth_ref(session)
return auth_ref
@abc.abstractmethod
def get_unscoped_auth_ref(self, session, **kwargs):
"""Fetch unscoped federated token."""
| StarcoderdataPython |
1700474 | <filename>util_scripts/pysystestxml_upgrader.py
# PySys System Test Framework, Copyright (C) 2006-2021 <NAME>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
A standalone utility script that recursively upgrades PySys tests from the old pysystest.xml+run.py format
to the new pysystest.py format. XML comments are copied into the new file, and there is support for using
your version control system's "move" commands to avoid losing the history of your existing run.py files.
"""
import os.path, stat, getopt, logging, traceback, sys
import inspect
import xml.dom.minidom
import re
IGNORED_COMMENTS = [
r"""<skipped reason=""/>""",
r"""uncomment this to skip the test:
<skipped reason=""/>""",
r"""To skip the test, uncomment this (and provide a reason): <skipped reason=""/>""",
r"""To provide a bug/story/requirement id for requirements tracing, uncomment this: <requirement id=""/>""",
]
LINE_LENGTH_GUIDE = '=' * 80
DEFAULT_AUTHORS = ""
DEFAULT_CREATED = None
allwarnings = []
def xmlToPy(xmlpath):
pypath = xmlpath[:-4]+'.py'
d = {}
comments = []
groupsinherit = ['true']
modesinherit = ['true']
oldmodes = []
d['user_data'] = {}
warnings = [] # warnings
doc = xml.dom.minidom.parse(xmlpath)
root = doc.getElementsByTagName('pysystest')[0]
def visitNode(n):
if n.nodeType==n.COMMENT_NODE:
t = re.sub(' +$', '', inspect.cleandoc(n.data).strip().replace('\t', ' '), flags=re.MULTILINE)
if t and t not in IGNORED_COMMENTS: comments.append(t)
return
if n.nodeType!=n.ELEMENT_NODE: return
# extract the text
t = u''
for cn in n.childNodes:
if (cn.nodeType in [n.TEXT_NODE, n.CDATA_SECTION_NODE]) and cn.data:
t += cn.data
t = t.strip()
tag = n.tagName
if tag == 'pysystest':
d['authors'] = n.getAttribute('authors') or DEFAULT_AUTHORS
d['created'] = n.getAttribute('created') or DEFAULT_CREATED
d['type'] = n.getAttribute('type')
if n.getAttribute('state') == 'skipped':
n['skipped_reason'] = 'Skipped (reason not specified)'
elif n.getAttribute('state') == 'deprecated':
warnings.append(f'state=deprecated was removed during migration since it is no longer supported in pysystest.py')
elif tag == 'id-prefix':
if t:
warnings.append(f'Ignored id-prefix="{t}" as these are only supported at the pysysdirconfig.xml level for pysystest.py files')
elif tag.replace('-dir','') in 'title,purpose,input,output,reference'.split(','):
d[tag.replace('-dir','')] = t
elif tag == 'skipped':
d['skipped_reason'] = n.getAttribute('reason')
elif tag == 'groups':
groupsinherit = [(n.getAttribute('inherit') or 'true').lower()]
if n.getAttribute('groups'):
d['groups'] = n.getAttribute('groups')
elif tag == 'group':
if t: d['groups'] = d.get('groups','')+','+t
elif tag == 'modes':
if t.startswith('lambda'):
d['modes'] = t
else:
modesinherit = [(n.getAttribute('inherit') or 'true').lower()]
elif tag == 'mode':
if t: oldmodes.append(t)
elif tag == 'execution-order':
d['execution_order_hint'] = n.getAttribute('hint')
elif tag == 'class':
d['python_class'] = n.getAttribute('name')
if d.get('python_class')=='PySysTest': del d['python_class']
d['python_module'] = n.getAttribute('module')
if d.get('python_module') in ['run', 'run.py']: del d['python_module']
elif tag == 'user-data':
d['user_data'][n.getAttribute('name')] = n.getAttribute('value') or t
elif tag == 'requirement':
if n.getAttribute('id'):
d['requirements'] = d.get('requirements', [])+[n.getAttribute('id')]
elif tag in 'description,classification,data,traceability,requirements'.split(','):
pass
else:
assert False, 'Unexpected element: %s'%tag
for cn in n.childNodes: visitNode(cn)
visitNode(root)
if d.pop('type','') == 'manual' and 'manual' not in d.get('groups',''):
d['groups'] = d.get('groups','')+',manual'
warnings.append(f'type=manual was converted to a group during migration since the type= attribute is no longer supported in pysystest.py')
if d.get('groups') or groupsinherit[0].lower()!='true': d['groups'] = ', '.join(x.strip() for x in d.get('groups','').split(',') if x.strip())+'; inherit='+groupsinherit[0]
if d.get('user_data'): d['user_data'] = repr(d['user_data'])
doc.unlink()
if oldmodes:
if any(m[0].islower() for m in oldmodes):
warnings.append(f'Some modes in this test start with a lowercase letter; these will be renamed to start with a capital letter (unless the enforceModeCapitalization=false project property is specified) - {oldmodes}')
modes = 'lambda helper: '
if modesinherit[0].lower() == 'true':
modes += "helper.inheritedModes + "
modes += repr([{'mode':m} for m in oldmodes])
d['modes'] = modes
def cleanIndentation(x):
x = inspect.cleandoc(x)
x = re.sub('^ +', lambda m: '\t'*(len(m.group(0))//8), x, flags=re.MULTILINE)
return x.replace("\n","\n\t")
py = f'__pysys_title__ = r""" {d.pop("title", "")} """\n'
py += f'# {LINE_LENGTH_GUIDE}\n\n'
py += f'__pysys_purpose__ = r""" {cleanIndentation(d.pop("purpose", ""))}\n\t"""\n\n'
value = d.pop('id-prefix', None)
if value: f'__pysys_id_prefix__ = "{value}"\n'
py += f'__pysys_authors__ = "{d.pop("authors","")}"\n'
value = d.pop("created", None)
if value: py += f'__pysys_created__ = "{value}"\n\n'
value = d.pop("requirements", None)
if value: py += f'__pysys_traceability_ids__ = "{", ".join(value)}"\n'
py += f'{"#" if not d.get("groups") else ""}__pysys_groups__ = "{d.pop("groups","myGroup; inherit=true")}"\n'
value = d.pop('modes', None)
if value:
py += f'__pysys_modes__ = r""" {cleanIndentation(value)} """'+'\n\n'
value = d.pop('execution_order_hint', None)
if value: py += f'__pysys_execution_order_hint__ = {value}\n'
for x in [
'__pysys_python_class__',
'__pysys_python_module__',
'__pysys_input_dir__',
'__pysys_reference_dir__',
'__pysys_output_dir__',
]:
key = x.replace('__pysys_','').replace('__','').replace('_dir','')
value = d.pop(key, None)
if value:
py += f'{x} = "{value}"\n'
value = d.pop('user_data', None)
if value: py += f'__pysys_user_data__ = r""" {value} """\n\n'
py += f'{"#" if not d.get("skipped_reason") else ""}__pysys_skipped_reason__ = "{d.pop("skipped_reason","Skipped until Bug-1234 is fixed")}"\n\n'
assert not d, 'Internal error - unexpected items: %s'%repr(d)
# add warnings as comments in the file
for w in warnings:
allwarnings.append(f'{w}: {pypath}')
py += '# Warning from pysystest.xml->pysystest.py conversion: {w}'+'\n'
if warnings: py += '\n'
if comments:
py += '# Comments copied from pysystest.xml (but original position of comments not retained):'+'\n\n'
for c in comments:
py += '# '+c.replace('\n', '\n# ')+'\n\n'
allwarnings.append(f'XML comments were copied to end of the pysystest.py descriptor section but they probably need moving to the right position: {pypath}')
return py, comments
def upgradeMain(args):
options = [x for x in args if x.startswith('-')]
args = [x for x in args if not x.startswith('-')]
dryrun = '--dry-run' in options
if (options and not dryrun) or len(args) != 2:
print('Unknown options or missing arguments'%options)
print('')
print('Automatically upgrade pysystest.xml+run.py all tests under the current directory to pysystest.py')
print('Usage:')
print('pysystestxml_upgrader.py [--dry-run] "DELETE CMD" "RENAME CMD"')
print('')
print('For example:')
print(' pysystestxml_upgrader.py "rm" "mv"')
print(' pysystestxml_upgrader.py "del" "move"')
print(' pysystestxml_upgrader.py "svn rm" "svn mv"')
print(' pysystestxml_upgrader.py "git rm" "git mv"')
print('')
print('Be sure to avoid having uncommitted changes in your working cooy before running this script.')
print('This script uses tabs not spaces for indentation; fix up afterwards if you prefer spaces.')
return 1
deleter, renamer = args
print(f'dry-run = {dryrun}, delete="{deleter}", rename="{renamer}"')
count = 0
errors = []
allcomments = {}
for (dirpath, dirnames, filenames) in os.walk('.'):
dirnames.sort() # do this in a deterministic order
if not ('pysystest.xml' in filenames and 'run.py' in filenames): continue
print('Upgrading: %s'%dirpath)
assert 'pysystest.py' not in filenames, dirpath
with open(f'{dirpath+os.sep}run.py', 'rb') as f:
runpy = f.read()
xmlpath = os.path.normpath(f'{dirpath+os.sep}pysystest.xml')
pysystestpath = xmlpath[:-4]+'.py'
try:
pydescriptor, comments = xmlToPy(xmlpath)
except Exception as ex:
traceback.print_exc()
errors.append(f'Failed to extract descriptor from {xmlpath} - {ex}')
continue
for c in comments:
allcomments[c] = allcomments.get(c, 0)+1
runpyencoding = None
try:
pydescriptor.encode('ascii')
except Exception as ex:
runpyencoding = re.search(r"[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)".encode('ascii'), runpy)
if runpyencoding: runpyencoding = runpyencoding.group(1)
runpyencoding = (runpyencoding or b'').decode('ascii').upper()
if runpyencoding != 'UTF-8':
try:
runpy.decode('ascii')
except:
pass
else:
runpyencoding = 'ASCII'
allwarnings.append(f'Non-ASCII characters found in descriptor will be added as UTF-8 to pysystest.py which uses encoding={runpyencoding or "unknown"}; this may need fixing up manually: {pysystestpath}')
if dryrun: print(pydescriptor.replace('\t', '<tab>'))
if not dryrun:
if os.system(f'{renamer} {dirpath+os.sep}run.py {dirpath+os.sep}pysystest.py') != 0:
errors.append(f'Failed to rename run.py to {pysystestpath}, aborting')
break
with open(pysystestpath, 'wb') as f:
f.write(pydescriptor.replace('\n', os.linesep).encode(runpyencoding or 'UTF-8'))
f.write(runpy)
if os.system(f'{deleter} {xmlpath}') != 0:
errors.append(f'Failed to delete {xmlpath}, aborting')
break
sys.stdout.flush()
count += 1
with open('pysys_upgrader.log', 'w') as log:
log.write(f'\nSuccessfully upgraded {count} tests under {os.getcwd()}\n')
if allcomments:
log.write(f'\n{len(allcomments)} unique comments found (more frequent last)\n')
for c in sorted(allcomments, key=lambda x: allcomments[x]): log.write(f' - {allcomments[c]} occurrences of: """{c}"""\n\n')
log.write(f'\n{len(allwarnings)} warnings\n')
for w in allwarnings: log.write(f' {w}\n')
log.write(f'\n{len(errors)} errors\n')
for e in errors: log.write(f' {e}\n')
with open('pysys_upgrader.log', 'r') as log:
sys.stdout.write(log.read())
sys.exit(upgradeMain(sys.argv[1:]) or 0)
| StarcoderdataPython |
1784197 | from django.db.models.signals import post_save
from django.dispatch import receiver
from researchhub_case.constants.case_constants import APPROVED, INITIATED
from researchhub_case.models import AuthorClaimCase
from researchhub_case.utils.author_claim_case_utils import (
get_new_validation_token,
reward_author_claim_case,
send_validation_email,
)
from user.utils import merge_author_profiles
from utils import sentry
@receiver(
post_save,
sender=AuthorClaimCase,
dispatch_uid='author_claim_case_post_create_signal',
)
def author_claim_case_post_create_signal(
created,
instance,
sender,
update_fields,
**kwargs
):
if (
created
and instance.status == INITIATED
and instance.validation_token is None
):
try:
[generated_time, token] = get_new_validation_token()
instance.token_generated_time = generated_time
instance.validation_token = token
# Note: intentionally sending email before incrementing attempt
send_validation_email(instance)
instance.validation_attempt_count += 1
instance.save()
except Exception as exception:
sentry.log_error(exception)
@receiver(
post_save,
sender=AuthorClaimCase,
dispatch_uid='merge_author_upon_approval',
)
def merge_author_upon_approval(
created,
instance,
sender,
update_fields,
**kwargs
):
if (
created is not True
and instance.status == APPROVED
and instance.validation_token is not None
and instance.target_author.user is None
):
try:
# logical ordering
requestor_author = instance.requestor.author_profile
target_author_papers = instance.target_author.authored_papers.all()
reward_author_claim_case(requestor_author, target_author_papers)
merge_author_profiles(requestor_author, instance.target_author)
instance.target_author = requestor_author
instance.save()
except Exception as exception:
print("merge_author_upon_approval: ", exception)
sentry.log_error(exception)
| StarcoderdataPython |
162750 | <gh_stars>1-10
# 序列化是将一个数据结构或者对象转换为连续的比特位的操作,
# 进而可以将转换后的数据存储在一个文件或者内存中,
# 同时也可以通过网络传输到另一个计算机环境,
# 采取相反方式重构得到原数据。
# 请设计一个算法来实现二叉树的序列化与反序列化。
# 这里不限定你的序列 / 反序列化算法执行逻辑,
# 你只需要保证一个二叉树可以被序列化为一个字符串,
# 并且将这个字符串反序列化为原始的树结构。
# 示例:
# 你可以将以下二叉树:
# 1
# / \
# 2 3
# / \
# 4 5
# 序列化为 "[1,2,3,null,null,4,5]"
# 提示:
# 这与 LeetCode 目前使用的方式一致,
# 详情请参阅 LeetCode 序列化二叉树的格式。
# 你并非必须采取这种方式,
# 你也可以采用其他的方法解决这个问题。
# 说明: 不要使用类的成员 / 全局 / 静态变量来存储状态,
# 你的序列化和反序列化算法应该是无状态的。
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return "[]"
queue = [root]
res = []
while len(queue) != 0:
current = queue[0]
if isinstance(current, TreeNode):
res.append(current.val)
else:
res.append(current)
queue.pop(0)
if current != None: # 把空的子节点也加入队列
queue.append(current.left)
queue.append(current.right)
# Remove the None in tail.
for i in range(len(res)-1, -1, -1):
if res[i] != None:
res = res[:i+1]
break
return str(res)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
l = eval(data) # 解析list字符串
if len(l) == 0:
return None
root = TreeNode(l[0]) # root node
queue = [root] # 建立队列并将root压入队列
i = 0
# 广度优先遍历添加节点
# 广度优先添加节点和遍历二叉树逻辑相同
# current标记当前父节点,依次更新下标将非空的子节点加入
while len(queue) != 0:
current = queue.pop(0)
i += 1 # 添加左节点
if i < len(l):
if l[i] != None:
current.left = left = TreeNode(l[i])
queue.append(left)
i += 1 # 添加右节点
if i < len(l):
if l[i] != None:
current.right = right = TreeNode(l[i])
queue.append(right)
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
if __name__ == "__main__":
# Test case: root
# 0
# / \
# 1 2
# / \ \
# 3 4 5
# / \
# 6 7
# /
# 8
root = TreeNode(0)
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node6 = TreeNode(6)
node7 = TreeNode(7)
node8 = TreeNode(8)
root.left = node1
root.right = node2
node1.left = node3
node1.right = node4
node2.right = node5
node5.left = node6
node5.right = node7
node7.left = node8
codec = Codec()
tree_str = codec.serialize(root)
tree = codec.deserialize(tree_str)
print(tree_str)
queue = [tree]
while len(queue) != 0:
current = queue.pop(0)
print(current.val, end=" -> ")
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
| StarcoderdataPython |
3263605 | <filename>s13_debug_unittest/bug.py<gh_stars>10-100
numbers = [1, 2, 3, 4, 10, -4, -7, 0]
def all_even(num_list):
even_numbers = []
for number in num_list:
if number%2 == 0:
even_numbers.append(number)
return even_numbers
print(all_even(numbers)) | StarcoderdataPython |
68405 | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the classroom page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import classroom_services
from core.domain import config_domain
from core.domain import topic_fetchers
import feconf
class ClassroomPage(base.BaseHandler):
"""Renders the classroom page."""
@acl_decorators.does_classroom_exist
def get(self, _):
"""Handles GET requests."""
self.render_template('classroom-page.mainpage.html')
class ClassroomDataHandler(base.BaseHandler):
"""Manages the data that needs to be displayed to a learner on the classroom
page.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.does_classroom_exist
def get(self, classroom_url_fragment):
"""Handles GET requests."""
classroom = classroom_services.get_classroom_by_url_fragment(
classroom_url_fragment)
topic_ids = classroom.topic_ids
topic_summaries = topic_fetchers.get_multi_topic_summaries(topic_ids)
topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids)
topic_summary_dicts = []
for index, summary in enumerate(topic_summaries):
if summary is not None:
topic_summary_dict = summary.to_dict()
topic_summary_dict['is_published'] = (
topic_rights[index].topic_is_published)
topic_summary_dicts.append(topic_summary_dict)
self.values.update({
'topic_summary_dicts': topic_summary_dicts,
'topic_list_intro': classroom.topic_list_intro,
'course_details': classroom.course_details,
'name': classroom.name
})
self.render_json(self.values)
class ClassroomPromosStatusHandler(base.BaseHandler):
"""The handler for checking whether the classroom promos are enabled."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
# This prevents partially logged in user from being logged out
# during user registration.
REDIRECT_UNFINISHED_SIGNUPS = False
@acl_decorators.open_access
def get(self):
self.render_json({
'classroom_promos_are_enabled': (
config_domain.CLASSROOM_PROMOS_ARE_ENABLED.value)
})
class DefaultClassroomRedirectPage(base.BaseHandler):
"""Redirects to the default classroom page."""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect('/learn/%s' % constants.DEFAULT_CLASSROOM_URL_FRAGMENT)
| StarcoderdataPython |
1635932 | from Core.IFactory import IFactory
from Regs.Block_D import RD140
class RD140Factory(IFactory):
def create_block_object(self, line):
self.rd140 = _rd140 = RD140()
_rd140.reg_list = line
return _rd140
| StarcoderdataPython |
1689835 | # -*- coding: utf-8 -*-
# License: Apache License 2.0
import os
import platform
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
def list_cpp_files(package_dir='wikipedia2vec'):
if sys.platform.startswith("win"):
compile_args = []
link_args = []
elif platform.system() == 'Darwin':
compile_args = ['-Wno-unused-function', '-std=c++11', '-stdlib=libc++']
link_args = ['-std=c++11', '-stdlib=libc++']
else:
compile_args = ['-Wno-unused-function', '-std=c++11']
link_args = ['-std=c++11']
ret = []
for (dir_name, _, files) in os.walk(package_dir):
for file_name in files:
(module_name, ext) = os.path.splitext(file_name)
if ext == '.pyx':
module_name = '.'.join(dir_name.split(os.sep) + [module_name])
path = os.path.join(dir_name, file_name)
ret.append((module_name, dict(
sources=[path], language='c++', extra_compile_args=compile_args,
extra_link_args=link_args
)))
return ret
# Copied from https://github.com/RaRe-Technologies/gensim/blob/master/setup.py
class custom_build_ext(build_ext):
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(
name='wikipedia2vec',
version='1.0.4',
description='A tool for learning vector representations of words and entities from Wikipedia',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='http://wikipedia2vec.github.io/',
packages=find_packages(exclude=('tests*',)),
cmdclass=dict(build_ext=custom_build_ext),
ext_modules=[Extension(module_name, **kwargs) for (module_name, kwargs) in list_cpp_files()],
include_package_data=True,
entry_points={
'console_scripts': [
'wikipedia2vec=wikipedia2vec.cli:cli',
]
},
keywords=['wikipedia', 'embedding', 'wikipedia2vec'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'click',
'jieba',
'joblib',
'lmdb',
'marisa-trie',
'mwparserfromhell',
'numpy',
'scipy',
'six',
'tqdm',
],
setup_requires=['numpy'],
tests_require=['nose'],
test_suite='nose.collector',
)
| StarcoderdataPython |
1677811 | <reponame>AccelByte/accelbyte-python-sdk
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..models import ModelsBatchDownloadLogsRequest
from ..models import ModelsListTerminatedServersResponse
from ..models import ResponseError
from ..operations.all_terminated_servers import BatchDownloadServerLogs
from ..operations.all_terminated_servers import ListAllTerminatedServers
@same_doc_as(BatchDownloadServerLogs)
def batch_download_server_logs(body: ModelsBatchDownloadLogsRequest, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = BatchDownloadServerLogs.create(
body=body,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(BatchDownloadServerLogs)
async def batch_download_server_logs_async(body: ModelsBatchDownloadLogsRequest, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = BatchDownloadServerLogs.create(
body=body,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ListAllTerminatedServers)
def list_all_terminated_servers(deployment: Optional[str] = None, end_date: Optional[str] = None, game_mode: Optional[str] = None, limit: Optional[int] = None, namespace: Optional[str] = None, next_: Optional[str] = None, party_id: Optional[str] = None, pod_name: Optional[str] = None, previous: Optional[str] = None, provider: Optional[str] = None, region: Optional[str] = None, session_id: Optional[str] = None, start_date: Optional[str] = None, user_id: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = ListAllTerminatedServers.create(
deployment=deployment,
end_date=end_date,
game_mode=game_mode,
limit=limit,
namespace=namespace,
next_=next_,
party_id=party_id,
pod_name=pod_name,
previous=previous,
provider=provider,
region=region,
session_id=session_id,
start_date=start_date,
user_id=user_id,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ListAllTerminatedServers)
async def list_all_terminated_servers_async(deployment: Optional[str] = None, end_date: Optional[str] = None, game_mode: Optional[str] = None, limit: Optional[int] = None, namespace: Optional[str] = None, next_: Optional[str] = None, party_id: Optional[str] = None, pod_name: Optional[str] = None, previous: Optional[str] = None, provider: Optional[str] = None, region: Optional[str] = None, session_id: Optional[str] = None, start_date: Optional[str] = None, user_id: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
request = ListAllTerminatedServers.create(
deployment=deployment,
end_date=end_date,
game_mode=game_mode,
limit=limit,
namespace=namespace,
next_=next_,
party_id=party_id,
pod_name=pod_name,
previous=previous,
provider=provider,
region=region,
session_id=session_id,
start_date=start_date,
user_id=user_id,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
| StarcoderdataPython |
4812911 | # wikimedia functions .wikimedia.py
"""
collection of existing Python tools and Wikimedia/Data endpoints
"""
import os
import pandas as pd
import xml.etree.ElementTree as ET
from amidict import Resources
# SPARQL keywords
WIKIDATA_QUERY_URL = 'https://query.wikidata.org/sparql'
RESULTS_NS = "http://www.w3.org/2005/sparql-results#"
RESULTS_NS_BR = "{"+RESULTS_NS+"}"
# query
FORMAT = 'format'
QUERY = 'query'
# parsing
XSD_DEC = "http://www.w3.org/2001/XMLSchema#decimal"
DATATYPE = "datatype"
TYPE = "type"
LITERAL = "literal"
NAME = "name"
URI = "uri"
VALUE = "value"
JSON = "json"
XML_LANG = "xml:lang"
HEAD = "head"
VARS = "vars"
BINDING = "binding"
BINDINGS = "bindings"
RESULT = "result"
RESULTS = "results"
#test query
TEST_QUERY = """
SELECT
?countryLabel ?population ?area
# ?medianIncome ?age
WHERE {
?country wdt:P463 wd:Q458.
OPTIONAL { ?country wdt:P1082 ?population }
OPTIONAL { ?country wdt:P2046 ?area }
OPTIONAL { ?country wdt:P3529 ?medianIncome }
OPTIONAL { ?country wdt:P571 ?inception.
BIND(year(now()) - year(?inception) AS ?age)
}
SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
}
# LIMIT 3
"""
# this fails - bad syntax
TEST_QUERY1 = """
SELECT
?item ?itemLabel ?GRINid ?itemAltLabel
WHERE {
?item wdt:P31 wd:Q16521.
?item wdt:P105 wd:Q34740.
?item wdt:P1421 ?GRINid.
}
SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
}
# LIMIT 3
"""
TEST_QUERY2 = """
SELECT
?item ?itemLabel ?GRINid ?itemAltLabel
WHERE {
?item wdt:P31 wd:Q16521.
?item wdt:P105 wd:Q34740.
?item wdt:P1421 ?GRINid.
SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
}
LIMIT 20
"""
PYAMIDICT, RESOURCE_DIR, DICT202011, TEMP_DIR, DICTIONARY_TOP = Resources().get_resources()
class WikimediaLib():
def __init__(self):
# print("WP init")
pass
def help(self):
print("help for Wikimedia routines NYI")
def read_sparql_xml_to_pandas(self, file):
"""<?xml version='1.0' encoding='UTF-8'?>
<sparql xmlns='http://www.w3.org/2005/sparql-results#'>
<head>
<variable name='wikidata'/>
...
</head>
<results>
<result>
<binding name='wikidata'>
<uri>http://www.wikidata.org/entity/Q889</uri>
</binding>
...
<binding name='term'>
<literal xml:lang='en'>Afghanistan</literal>
</binding>
</result>
"""
print("read_sparql_xml")
try:
root = ET.parse(file).getroot()
except:
print("cannot parse XML", file)
return None
result_list = root.findall("./*/" + RESULTS_NS_BR + RESULT)
rowdata = []
for i, result in enumerate(result_list):
binding_list = result.findall("./" + RESULTS_NS_BR + BINDING)
new_row_dict = {}
for binding in binding_list:
name = binding.attrib[NAME]
child = list(binding)[0]
# format is {uri}name so strip uri (may use this later)
parts = child.tag.partition("}")
tag = parts[2] if child.tag.startswith("{") else child.tag
namespace = parts[0][1:] if child.tag.startswith("{") else None
if namespace is not None and namespace != RESULTS_NS:
print("unexpected namespace in sparql xml", namespace, "expected", RESULTS_NS)
continue
val = child.text
# there may be other types of output - don't know
if tag == URI:
pass
elif tag == LITERAL:
pass
else:
print("unknown tag", tag)
new_row_dict[name] = val
rowdata.append(new_row_dict)
return pd.DataFrame(rowdata)
def post_sparql(self, query, format="json"):
"""https://requests.readthedocs.io/en/master/ HTTP for humans"""
return self.post_request(WIKIDATA_QUERY_URL, query, format)
def post_request(self, url, query, format=JSON):
import requests
if query is None or len(query) == 0:
print("empty query")
return None
try:
req = requests.get(url, params={FORMAT: format, QUERY: query})
print("req", req, req.status_code)
if str(req.status_code) != "200":
print("HTTP error: ", req.status_code)
return
else:
return req.json() if format == JSON else None
except requests.exceptions.ConnectionError:
print("Cannot connect")
return None
def test_query_wikipedia(self):
"""for experimenting"""
import wikipedia as wp
"""# wikipedia search library (many functions)
https://wikipedia.readthedocs.io/en/latest/code.html
"""
print("Bear", wp.search("bear"))
print("reality_summary", wp.summary("reality checkpoint"))
# print("pmr_page", wp.page(title="<NAME>", preload=True))
page = wp.WikipediaPage(title="Ocimum_kilimandscharicum", preload=True)
print("categories", page.categories,
## these are quite large
"\n", "content", page.content,
# "\n", page.coordinates,
# "\n", "html", page.html,
# "\n", "images", page.images,
# "\n", "links", page.links
)
pass
"""https://janakiev.com/blog/wikidata-mayors/"""
def submit_process_sparql(self, query):
"""
submits query to wikidata and creates table of results.
uses SPARQL SELECT-names as column-names
*query* SPARQL query (assumed correct)
return DataFrame (columns from SPARQL SELECT) or None if failed
"""
import pandas as pd
wm = WikimediaLib();
query_results_dict = wm.post_sparql(query)
if query_results_dict is None:
print("no results from SPARQL")
return None
# results is a 2-element dictionary with keys = "head" and "results"
# "head' dict with "vars" child dict as list of column names
head_dict = query_results_dict[HEAD]
colhead_array = head_dict[VARS]
print("column headings", colhead_array)
# second "results" with "bindings" child list of row dictionaries
results_dict = query_results_dict[RESULTS]
bindings = results_dict[BINDINGS]
return self.create_data_frame_from_bindings(bindings, colhead_array)
def create_data_frame_from_bindings(self, bindings, colhead_array):
rowdata = []
for row_dict in bindings:
new_row_dict = {}
for colhead in colhead_array:
val = None
if colhead in row_dict:
cell_dict = row_dict[colhead]
datatype_ = cell_dict.get(DATATYPE, None)
type_ = cell_dict.get(TYPE, None)
# there may be other types of output - don't know
val = cell_dict.get(VALUE, None)
if type_ == LITERAL:
if XSD_DEC == datatype_ :
val = float(cell_dict.get(VALUE))
elif type_ == URI:
pass
else:
print("Cannot parse type = ", type_, cell_dict)
new_row_dict[colhead] = val
rowdata.append(new_row_dict)
return pd.DataFrame(rowdata)
def analyze(self, sparql_xml_file, term="term", desc="wikidataDescription"):
df = self.read_sparql_xml_to_pandas(sparql_xml_file)
self.analyze_general(df, term=term, desc=desc)
self.analyze_specific(df, desc=desc)
def analyze_general(self, df, desc="description", term="term"):
print("\n", "cevspql_df", df, "\n")
print("cols", df.columns)
print(df[term], df[desc])
return df
def analyze_specific(self, df, desc):
region = "region"
df[region] = df[desc].str.partition(' in ')[2].str.lower()
print(region, df[region])
print(df.sort_values(by=[region]))
print(df[region].value_counts())
# fig, ax = plt.subplots()
# df[region].value_counts().plot(ax=ax, kind='bar')
def main():
import matplotlib.pyplot as plt
"""
return
"""
wm = WikimediaLib()
wm.help()
print("running query2")
df0 = wm.submit_process_sparql(query=TEST_QUERY2)
print("cevspql_df", df0)
sparql_xml_file = os.path.join(DICTIONARY_TOP, "openVirus202011/country/work/sparql_final_dict.xml")
wm.analyze(sparql_xml_file)
# wm.test_query_wikipedia()
print("end of wikipedia main")
#========================
if __name__ == "__main__":
main()
else:
main()
| StarcoderdataPython |
3382664 | <filename>examples/seedpython/scripts/sorting/quick_sort.py
# [[ Data ]]
a = [8, 1, 0, 5, 6, 3, 2, 4, 7, 1]
# [[ Index(start, end) ]]
def partition(start, end, a):
# [[ Index ]]
pivot_index = start
# [[ Save ]]
pivot = a[pivot_index]
while start < end:
while start < len(a) and a[start] <= pivot:
start += 1
while a[end] > pivot:
end -= 1
if (start < end):
# [[ Swap(start, end) ]]
a[start], a[end] = a[end], a[start]
# [[ Swap(end, pivot_index) ]]
a[end], a[pivot_index] = a[pivot_index], a[end]
return end
# [[ Bounds(start, end) ]]
def quick_sort(start, end, a):
if start < end:
mid = partition(start, end, a)
quick_sort(start, mid - 1, a)
quick_sort(mid + 1, end, a)
quick_sort(0, len(a) - 1, a)
print(a)
| StarcoderdataPython |
85828 | <reponame>dolong2110/Algorithm-By-Problems-Python
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
compare = []
def check(cur_node: Optional[TreeNode], depth: int, prev_node: int):
if not cur_node:
return
if cur_node.val == x:
compare.append((depth, prev_node))
if cur_node.val == y:
compare.append((depth, prev_node))
check(cur_node.left, depth + 1, cur_node.val)
check(cur_node.right, depth + 1, cur_node.val)
check(root, 0, -1)
return compare[0][0] == compare[1][0] and compare[0][1] != compare[1][1] | StarcoderdataPython |
69155 | from joblib import delayed, Parallel
import os
import sys
import glob
from tqdm import tqdm
import cv2
import argparse
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def str2bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Need bool; got %r' % s)
return {'true': True, 'false': False}[s.lower()]
def extract_video_opencv(v_path, f_root, dim=240):
'''v_path: single video path;
f_root: root to store frames'''
v_class = v_path.split('/')[-2]
v_name = os.path.basename(v_path)[0:-4]
out_dir = os.path.join(f_root, v_class, v_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
vidcap = cv2.VideoCapture(v_path)
nb_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
if (width == 0) or (height == 0):
print(v_path, 'not successfully loaded, drop ..'); return
new_dim = resize_dim(width, height, dim)
success, image = vidcap.read()
count = 1
while success:
image = cv2.resize(image, new_dim, interpolation = cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(out_dir, 'image_%05d.jpg' % count), image,
[cv2.IMWRITE_JPEG_QUALITY, 80])# quality from 0-100, 95 is default, high is good
success, image = vidcap.read()
count += 1
if nb_frames > count:
print('/'.join(out_dir.split('/')[-2::]), 'NOT extracted successfully: %df/%df' % (count, nb_frames))
vidcap.release()
def resize_dim(w, h, target):
'''resize (w, h), such that the smaller side is target, keep the aspect ratio'''
if w >= h:
return (int(target * w / h), int(target))
else:
return (int(target), int(target * h / w))
def main_UCF101(v_root, f_root):
print('extracting UCF101 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
print(len(v_act_root))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_HMDB51(v_root, f_root):
print('extracting HMDB51 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_JHMDB(v_root, f_root):
print('extracting JHMDB ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_kinetics400(v_root, f_root, dim=128):
print('extracting Kinetics400 ... ')
for basename in ['train', 'val']:
v_root_real = v_root + '/' + basename
if not os.path.exists(v_root_real):
print('Wrong v_root'); sys.exit()
f_root_real = f_root + '/' + basename
print('Extract to: \nframe: %s' % f_root_real)
if not os.path.exists(f_root_real):
os.makedirs(f_root_real)
v_act_root = glob.glob(os.path.join(v_root_real, '*/'))
v_act_root = sorted(v_act_root)
# if resume, remember to delete the last video folder
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mp4'))
v_paths = sorted(v_paths)
# for resume:
v_class = j.split('/')[-2]
out_dir = os.path.join(f_root_real, v_class)
if os.path.exists(out_dir): print(out_dir, 'exists!'); continue
print('extracting: %s' % v_class)
# dim = 150 (crop to 128 later) or 256 (crop to 224 later)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root_real, dim=dim) for p in tqdm(v_paths, total=len(v_paths)))
def main_Panasonic(v_root, f_root):
print('extracting Panasonic ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
print(len(v_act_root))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mkv'))
v_paths = sorted(v_paths)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
if __name__ == '__main__':
# v_root is the video source path, f_root is where to store frames
# edit 'your_path' here:
#dataset_path = '/vision/u/nishantr/data'
parser = argparse.ArgumentParser()
parser.add_argument('--ucf101', default=False, type=str2bool)
parser.add_argument('--jhmdb', default=False, type=str2bool)
parser.add_argument('--hmdb51', default=False, type=str2bool)
parser.add_argument('--kinetics', default=False, type=str2bool)
parser.add_argument('--panasonic', default=False, type=str2bool)
parser.add_argument('--dataset_path', default='/scr/nishantr/data', type=str)
parser.add_argument('--dim', default=128, type=int)
args = parser.parse_args()
dataset_path = args.dataset_path
if args.ucf101:
main_UCF101(v_root=dataset_path + '/ucf101/videos/', f_root=dataset_path + '/ucf101/frame/')
if args.jhmdb:
main_JHMDB(v_root=dataset_path + '/jhmdb/videos/', f_root=dataset_path + '/jhmdb/frame/')
if args.hmdb51:
main_HMDB51(v_root=dataset_path+'/hmdb/videos', f_root=dataset_path+'/hmdb/frame')
if args.panasonic:
main_Panasonic(v_root=dataset_path+'/action_split_data/V1.0', f_root=dataset_path+'/frame', dim=256)
if args.kinetics:
if args.dim == 256:
main_kinetics400(
v_root=dataset_path + '/kinetics/video', f_root=dataset_path + '/kinetics/frame256', dim=args.dim
)
else:
assert args.dim == 128, "Invalid dim: {}".format(args.dim)
main_kinetics400(v_root=dataset_path+'/kinetics/video', f_root=dataset_path+'/kinetics/frame', dim=128)
# main_kinetics400(v_root='your_path/Kinetics400_256/videos',
# f_root='your_path/Kinetics400_256/frame', dim=256)
| StarcoderdataPython |
1681813 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME> <vargash1>
# @Date: Sunday, April 10th 2016, 11:25:34 pm
# @Email: <EMAIL>
# @Last modified by: vargash1
# @Last modified time: Sunday, April 10th 2016, 11:28:31 pm
import os
from setuptools import setup
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "vRaspi",
version = "0.0.0",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("vRaspi"),
license = "MIT",
url = "https://github.com/vargash1/vraspi",
packages = ['vraspi'],
package_dir = {'vraspi':'vraspi'},
long_description = read('README.md'),
# entry_points = {
# 'console_scripts': [
# 'arithcli=arithmos.arithcli:main',
# ],
# },
)
| StarcoderdataPython |
1742550 | <filename>noise/dh/keypair.py<gh_stars>1-10
class KeyPair(object):
def __init__(self, public_key, private_key):
"""
:param public_key:
:type public_key: noise.dh.public.PublicKey
:param private_key:
:type private_key: noise.dh.private.PrivateKey
"""
self._public_key = public_key
self._private_key = private_key
@property
def public(self):
"""
:return:
:rtype: noise.dh.public.PublicKey
"""
return self._public_key
@property
def private(self):
"""
:return:
:rtype: noise.dh.private.PrivateKey
"""
return self._private_key
| StarcoderdataPython |
3378546 | from ntlm import HTTPNtlmAuthHandler
| StarcoderdataPython |
3203988 | import numpy as np
import part1
if __name__ == "__main__":
board, claims = part1.create_board("input.txt")
unique_id = None # store the result
for id, x0, y0, width, height in claims:
unique = True
for y in np.arange(y0, y0 + height):
for x in np.arange(x0, x0 + width):
# loop through board pieces only claimed by this claim
if board[y][x][0] != 1:
unique = False # stop if other claim found for this (x,y)
break
# leave the double for loop
if unique is False:
break
# we have a result, no need to consider other claims
if unique is True:
unique_id = id
break
print("Answer:", unique_id)
| StarcoderdataPython |
1618310 | import suspect
import numpy
def test_null_transform():
fid = numpy.ones(128, 'complex')
data = suspect.MRSData(fid, 1.0 / 128, 123)
transformed_data = suspect.processing.frequency_correction.transform_fid(data, 0, 0)
assert type(transformed_data) == suspect.MRSData
def test_water_peak_alignment_misshape():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = numpy.roll(spectrum, i)
fids[i] = numpy.fft.ifft(rolled_spectrum)
current_fid = numpy.reshape(fids[i], (1, 128))
frequency_shift = suspect.processing.frequency_correction.residual_water_alignment(current_fid)
numpy.testing.assert_almost_equal(frequency_shift, i)
def test_water_peak_alignment():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = numpy.roll(spectrum, i)
fids[i] = numpy.fft.ifft(rolled_spectrum)
frequency_shift = suspect.processing.frequency_correction.residual_water_alignment(fids[i])
numpy.testing.assert_almost_equal(frequency_shift, i)
def test_spectral_registration():
time_axis = numpy.arange(0, 0.512, 5e-4)
target_fid = suspect.MRSData(suspect.basis.gaussian(time_axis, 0, 0, 50.0), 5e-4, 123)
for i in range(1, 15):
input_fid = suspect.MRSData(suspect.basis.gaussian(time_axis, i, 0, 50.0), 5e-4, 123)
frequency_shift, phase_shift = suspect.processing.frequency_correction.spectral_registration(input_fid, target_fid)
numpy.testing.assert_allclose(frequency_shift, i)
def test_compare_frequency_correction():
test_data = suspect.io.load_twix("tests/test_data/siemens/twix_vb.dat")
test_data = test_data.inherit(numpy.average(test_data, axis=1, weights=suspect.processing.channel_combination.svd_weighting(numpy.average(test_data, axis=0))))
sr_target = test_data[0]
for i in range(test_data.shape[0]):
current_fid = test_data[i]
wpa_fs = suspect.processing.frequency_correction.residual_water_alignment(current_fid)
sr_fs = suspect.processing.frequency_correction.spectral_registration(current_fid, sr_target)[0]
numpy.testing.assert_allclose(wpa_fs, sr_fs, atol=current_fid.df)
def test_frequency_transform():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
for i in range(16):
rolled_spectrum = numpy.roll(spectrum, i)
fid = suspect.MRSData(numpy.fft.ifft(rolled_spectrum), 1.0 / 128, 123)
transformed_fid = suspect.processing.frequency_correction.transform_fid(fid, -i, 0)
transformed_spectrum = numpy.fft.fft(transformed_fid)
numpy.testing.assert_almost_equal(transformed_spectrum, spectrum)
def test_apodize():
data = suspect.MRSData(numpy.ones(1024), 5e-4, 123.456)
raw_spectrum = numpy.fft.fft(data)
apodized_data = suspect.processing.apodize(data, suspect.processing.gaussian_window, {"line_broadening": data.df * 8})
spectrum = numpy.fft.fft(apodized_data)
numpy.testing.assert_allclose(spectrum[4].real, 0.5 * numpy.amax(spectrum), rtol=0.01)
numpy.testing.assert_allclose(numpy.sum(spectrum), numpy.sum(raw_spectrum))
def test_gaussian_denoising():
# constant signal denoised should be the same as original
data = numpy.ones(128)
denoised_data = suspect.processing.denoising.sliding_gaussian(data, 11)
numpy.testing.assert_almost_equal(data, denoised_data)
def test_water_suppression():
data = suspect.io.load_twix("tests/test_data/siemens/twix_vb.dat")
channel_combined_data = data.inherit(numpy.average(data, axis=1))
components = suspect.processing.water_suppression.hsvd(channel_combined_data[10], 4, int(data.np / 2))
fid = suspect.processing.water_suppression.construct_fid(components, data.time_axis())
assert len(components) == 4
| StarcoderdataPython |
40273 | import pandas as pd
import matplotlib.pyplot as plt
from data import games
plays = games[games['type']=='play']
plays.columns= ['type','inning','team', 'player', 'count','pitches','event', 'game_id', 'year']
#print (plays)
hits = plays.loc[plays['event'].str.contains('^(?:S(?!B)|D|T|HR)'), ['inning','event']]
#print(hits)
#plays.columns = ['inning', 'event']
#attendance.loc[:, 'attendance']= pd.to_numeric(attendance.loc[:, 'attendance'])
hits.loc[:, 'inning']= pd.to_numeric(hits.loc[:, 'inning'])
print (hits)
replacements= {r'^S(.*)': 'single', r'^D(.*)': 'double', r'^T(.*)': 'triple', r'^HR(.*)': 'hr'}
#this is just an array, with now converted 'event' called hit_type
hit_type= hits['event'].replace(replacements, regex=True)
#print(hit_type)
#add hit_type into hits matrix,
#now we have ['inning', 'event','hit_type']
hits= hits.assign(hit_type=hit_type)
#print (hits)
'''
In one line of code, group the hits DataFrame by inning and hit_type,
call size() to count the number of hits per inning,
and then reset the index of the resulting DataFrame.
'''
hits = hits.groupby(['inning','hit_type']).size().reset_index(name= 'count')
#how does it know the reset_index is the size()?
#hits = hits.reset_index(name= 'count')
#print (hits)
hits['hit_type']= pd.Categorical(hits['hit_type'], ['single', 'double', 'triple', 'hr'])
#sort_values need parameter 'by=[column1, column2, ...]', 'by=' is optional
hits= hits.sort_values(['inning','hit_type'])
#print (hits)
hits= hits.pivot(index='inning', columns='hit_type',values='count')
#print (hits)
hits.plot.bar(stacked= True)
plt.show()
| StarcoderdataPython |
1674257 | import pytest
pytest.importorskip("requests")
pytest.importorskip("requests.exceptions")
def test_load_module():
__import__("modules.contrib.getcrypto")
| StarcoderdataPython |
1634871 | <filename>Python3/OTUS/lesson04/lesson04-1.py
class MyIterable:
def __init__(self, start, stop):
if not stop > start:
raise ValueError('Start has to be < than stop')
self.start = start
self.stop = stop
# self.current = start
self.reset()
def __iter__(self):
return self
def __next__(self):
if self.current < self.stop:
result = self.current
self.current += 1
return result
raise StopIteration
def reset(self):
self.current = self.start
it = MyIterable(stop=3, start=1)
for i in it:
print(i, end=' ')
print()
print(iter(it))
it.reset()
iterable = iter(it)
print(next(iterable))
iterable.reset()
print(next(iterable))
# print(next(it))
# print(next(it))
# print(next(it))
| StarcoderdataPython |
1727370 | # Generated by Django 3.2.8 on 2021-12-06 05:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='postsManager',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('post_id', models.CharField(default=uuid.uuid4, editable=False, max_length=100, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=100, verbose_name='title')),
('visibility', models.CharField(choices=[('PUBLIC', 'PUBLIC'), ('PRIVATE', 'PRIVATE')], default='PUBLIC', max_length=8)),
('description', models.CharField(blank=True, max_length=100, verbose_name='description')),
('content', models.TextField(blank=True, verbose_name='content')),
('contentType', models.CharField(choices=[('text/markdown', 'text/markdown'), ('text/plain', 'text/plain'), ('application/base64', 'application/base64'), ('image/png;base64', 'image/png;base64'), ('image/jpeg;base64', 'image/jpeg;base64')], default='text/plain', max_length=20)),
('source', models.URLField(editable=False)),
('origin', models.URLField(editable=False)),
('unlisted', models.BooleanField(default=False)),
('published', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('count', models.IntegerField(default=0)),
('categories', models.CharField(max_length=200)),
('author_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_author', to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
13486 | <gh_stars>10-100
""" Module to take a water_level reading."""
# Raspi-sump, a sump pump monitoring system.
# <NAME>
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html
try:
import ConfigParser as configparser # Python2
except ImportError:
import configparser # Python3
from hcsr04sensor import sensor
from raspisump import log, alerts, heartbeat
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
configs = {
"critical_water_level": config.getint("pit", "critical_water_level"),
"pit_depth": config.getint("pit", "pit_depth"),
"temperature": config.getint("pit", "temperature"),
"trig_pin": config.getint("gpio_pins", "trig_pin"),
"echo_pin": config.getint("gpio_pins", "echo_pin"),
"unit": config.get("pit", "unit"),
}
# If item in raspisump.conf add to configs dict. If not provide defaults.
try:
configs["alert_when"] = config.get("pit", "alert_when")
except configparser.NoOptionError:
configs["alert_when"] = "high"
try:
configs["heartbeat"] = config.getint("email", "heartbeat")
except configparser.NoOptionError:
configs["heartbeat"] = 0
def initiate_heartbeat():
"""Initiate the heartbeat email process if needed"""
if configs["heartbeat"] == 1:
heartbeat.determine_if_heartbeat()
else:
pass
def water_reading():
"""Initiate a water level reading."""
pit_depth = configs["pit_depth"]
trig_pin = configs["trig_pin"]
echo_pin = configs["echo_pin"]
temperature = configs["temperature"]
unit = configs["unit"]
value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)
try:
raw_distance = value.raw_distance(sample_wait=0.3)
except SystemError:
log.log_errors(
"**ERROR - Signal not received. Possible cable or sensor problem."
)
exit(0)
return round(value.depth(raw_distance, pit_depth), 1)
def water_depth():
"""Determine the depth of the water, log result and generate alert
if needed.
"""
critical_water_level = configs["critical_water_level"]
water_depth = water_reading()
if water_depth < 0.0:
water_depth = 0.0
log.log_reading(water_depth)
if water_depth > critical_water_level and configs["alert_when"] == "high":
alerts.determine_if_alert(water_depth)
elif water_depth < critical_water_level and configs["alert_when"] == "low":
alerts.determine_if_alert(water_depth)
else:
pass
initiate_heartbeat()
| StarcoderdataPython |
188713 | <filename>dbsetup.py
# -*- coding: utf-8 -*-
# swtstore->dbsetup.py
# Create and setup databases for the first time run of the application
import sys
import os
# Get the path to the base directory of the app
BASE_DIR = os.path.join(os.path.dirname(__file__))
# append the path to the WSGI env path
sys.path.insert(0, BASE_DIR)
# Import and create the app; also get the db instance from the current app
from swtstore import create_app, getDBInstance
app = create_app()
db = getDBInstance()
# Import all modules which represents a SQLAlchemy model;
# they have corresponding tables that are needed to be created
from swtstore.classes.models import Sweet, Context, Client
from swtstore.classes.models import User, Group, Membership
if __name__ == '__main__':
# Create them!
db.create_all()
| StarcoderdataPython |
1753351 | <reponame>spkuehl/circuitpython
import sys
import json
# Map start block to current allocation info.
current_heap = {}
allocation_history = []
root = {}
def change_root(trace, size):
level = root
for frame in reversed(trace):
file_location = frame[1]
if file_location not in level:
level[file_location] = {"blocks": 0,
"file": file_location,
"function": frame[2],
"subcalls": {}}
level[file_location]["blocks"] += size
level = level[file_location]["subcalls"]
total_actions = 0
non_single_block_streak = 0
max_nsbs = 0
last_action = None
last_total_actions = 0
count = 0
actions = {}
last_ticks_ms = 0
ticks_ms = 0
block_sizes = {}
allocation_sources = {}
with open(sys.argv[1], "r") as f:
for line in f:
if not line.strip():
break
for line in f:
action = None
if line.startswith("Breakpoint 2"):
break
next(f) # throw away breakpoint code line
# print(next(f)) # first frame
block = 0
size = 0
trace = []
for line in f:
# print(line.strip())
if line[0] == "#":
frame = line.strip().split()
if frame[1].startswith("0x"):
trace.append((frame[1], frame[-1], frame[3]))
else:
trace.append(("0x0", frame[-1], frame[1]))
elif line[0] == "$":
#print(line.strip().split()[-1])
block = int(line.strip().split()[-1][2:], 16)
next_line = next(f)
size = int(next_line.strip().split()[-1][2:], 16)
# next_line = next(f)
# ticks_ms = int(next_line.strip().split()[-1][2:], 16)
if not line.strip():
break
action = "unknown"
if block not in current_heap:
current_heap[block] = {"start_block": block, "size": size, "start_trace": trace, "start_time": total_actions}
action = "alloc"
if size == 1:
max_nsbs = max(max_nsbs, non_single_block_streak)
non_single_block_streak = 0
else:
non_single_block_streak += 1
#change_root(trace, size)
if size not in block_sizes:
block_sizes[size] = 0
source = trace[-1][-1]
if source not in allocation_sources:
print(trace)
allocation_sources[source] = 0
allocation_sources[source] += 1
block_sizes[size] += 1
else:
alloc = current_heap[block]
alloc["end_trace"] = trace
alloc["end_time"] = total_actions
change_root(alloc["start_trace"], -1 * alloc["size"])
if size > 0:
action = "realloc"
current_heap[block] = {"start_block": block, "size": size, "start_trace": trace, "start_time": total_actions}
#change_root(trace, size)
else:
action = "free"
if trace[0][2] == "gc_sweep":
action = "sweep"
non_single_block_streak = 0
if (trace[3][2] == "py_gc_collect" or (trace[3][2] == "gc_deinit" and count > 1)) and last_action != "sweep":
print(ticks_ms - last_ticks_ms, total_actions - last_total_actions, "gc.collect", max_nsbs)
print(actions)
print(block_sizes)
print(allocation_sources)
actions = {}
block_sizes = {}
allocation_sources = {}
if count % 2 == 0:
print()
count += 1
last_total_actions = total_actions
last_ticks_ms = ticks_ms
max_nsbs = 0
del current_heap[block]
alloc["end_cause"] = action
allocation_history.append(alloc)
if action not in actions:
actions[action] = 0
actions[action] += 1
last_action = action
#print(total_actions, non_single_block_streak, action, block, size)
total_actions += 1
print(actions)
print(max_nsbs)
print()
for alloc in current_heap.values():
alloc["end_trace"] = ""
alloc["end_time"] = total_actions
allocation_history.append(alloc)
def print_frame(frame, indent=0):
for key in sorted(frame):
if not frame[key]["blocks"] or key.startswith("../py/malloc.c") or key.startswith("../py/gc.c"):
continue
print(" " * (indent - 1), key, frame[key]["function"], frame[key]["blocks"], "blocks")
print_frame(frame[key]["subcalls"], indent + 2)
# print_frame(root)
# total_blocks = 0
# for key in sorted(root):
# total_blocks += root[key]["blocks"]
# print(total_blocks, "total blocks")
# with open("allocation_history.json", "w") as f:
# json.dump(allocation_history, f)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.