added stringdate 2024-11-18 17:59:49 2024-11-19 03:44:43 | created int64 0 2,086B | id stringlengths 40 40 | int_score int64 2 5 | metadata dict | score float64 2.31 5.34 | source stringclasses 1
value | text stringlengths 259 23.5k | num_lines int64 16 648 | avg_line_length float64 15 60.9 | max_line_length int64 31 179 | ast_depth int64 8 40 | length int64 101 3.8k | lang stringclasses 1
value | sast_codeql_findings stringlengths 2 276k | sast_codeql_findings_count int64 0 33 | sast_codeql_success bool 1
class | sast_codeql_error stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2024-11-18T18:05:43.388874+00:00 | 1,567,036,262,000 | 912041d1cee909d29a9b46991268ac30470556ff | 3 | {
"blob_id": "912041d1cee909d29a9b46991268ac30470556ff",
"branch_name": "refs/heads/master",
"committer_date": 1567036262000,
"content_id": "c6d2536833fc35c3e025cd9d5cea56298f7fe360",
"detected_licenses": [
"BSD-3-Clause"
],
"directory_id": "ca5fc43049f94a794d90a561fd8126f02b603599",
"extension": "py",
"filename": "alias.py",
"fork_events_count": 0,
"gha_created_at": 1469110078000,
"gha_event_created_at": 1527068726000,
"gha_language": "Python",
"gha_license_id": "BSD-3-Clause",
"github_id": 63874745,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2632,
"license": "BSD-3-Clause",
"license_type": "permissive",
"path": "/i3py/core/features/alias.py",
"provenance": "stack-edu-0054.json.gz:568753",
"repo_name": "Exopy/i3py",
"revision_date": 1567036262000,
"revision_id": "6f004d3e2ee2b788fb4693606cc4092147655ce1",
"snapshot_id": "32d9ee343d21d275680a2d030b660a80960e99ac",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/Exopy/i3py/6f004d3e2ee2b788fb4693606cc4092147655ce1/i3py/core/features/alias.py",
"visit_date": "2022-02-18T21:51:16.423188"
} | 2.5625 | stackv2 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2017 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Feature whose value is mapped to another Feature.
"""
from types import MethodType
from typing import Any, Dict, Callable
from ..abstracts import AbstractHasFeatures
from .feature import Feature, get_chain, set_chain
GET_DEF =\
"""def get(self, driver):
return {}
"""
SET_DEF =\
"""def set(self, driver, value):
{} = value
"""
class Alias(Feature):
"""Feature whose value is mapped to another Feature.
Parameters
----------
alias : str
Path to the feature to which the alias refers to. The path should be
dot separated and use leading dots to access to parent features.
settable: bool, optional
Boolean indicating if the alias can be used to set the value of the
aliased feature.
"""
def __init__(self, alias: str, settable: bool=False) -> None:
super(Alias, self).__init__(True, settable if settable else None)
accessor = 'driver.' + '.'.join([p if p else 'parent'
for p in alias.split('.')])
defs = GET_DEF.format(accessor)
if settable:
defs += '\n' + SET_DEF.format(accessor)
loc: Dict[str, Callable] = {}
exec(defs, globals(), loc)
self.get = MethodType(loc['get'], self) # type: ignore
if settable:
self.set = MethodType(loc['set'], self) # type: ignore
def post_set(self, driver: AbstractHasFeatures, value: Any, i_value: Any,
response: Any):
"""Re-implemented here as an Alias does not need to do anything
by default.
"""
pass
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _get(self, driver: AbstractHasFeatures):
"""Re-implemented so that Alias never use the cache.
"""
with driver.lock:
return get_chain(self, driver)
def _set(self, driver: AbstractHasFeatures, value: Any):
"""Re-implemented so that Alias never uses the cache.
"""
with driver.lock:
set_chain(self, driver, value)
| 90 | 28.24 | 79 | 16 | 535 | python | [] | 0 | true | |
2024-11-18T18:05:43.651051+00:00 | 1,515,988,614,000 | 3c76f351fc6cfd2597b6e8471e52ac18606ae091 | 2 | {
"blob_id": "3c76f351fc6cfd2597b6e8471e52ac18606ae091",
"branch_name": "refs/heads/master",
"committer_date": 1515988614000,
"content_id": "472cbf590142d1d67a62dfdc14e3622b830d60a2",
"detected_licenses": [
"MIT"
],
"directory_id": "ef5bec8d5d6875b3124b87ab799141377b969ffe",
"extension": "py",
"filename": "models.py",
"fork_events_count": 0,
"gha_created_at": 1515879807000,
"gha_event_created_at": 1515988136000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 117380754,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1375,
"license": "MIT",
"license_type": "permissive",
"path": "/borrow_me/app/models.py",
"provenance": "stack-edu-0054.json.gz:568756",
"repo_name": "bobheadxi/borrow-me",
"revision_date": 1515988614000,
"revision_id": "9a32a3dee1217d89ee84901de7d71a9da09af319",
"snapshot_id": "4cc938b866e52db7d0f3591925176c0a8c33a7ab",
"src_encoding": "UTF-8",
"star_events_count": 2,
"url": "https://raw.githubusercontent.com/bobheadxi/borrow-me/9a32a3dee1217d89ee84901de7d71a9da09af319/borrow_me/app/models.py",
"visit_date": "2021-09-04T03:10:34.604830"
} | 2.328125 | stackv2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class Item(models.Model):
'''
An item for loan
'''
item_type = models.CharField(max_length=30, blank=False)
location = models.CharField(max_length=30, blank=False)
created_at = models.DateTimeField(auto_now_add=True, blank=False, editable=False)
return_at = models.DateTimeField(blank=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=False)
karma = models.IntegerField(blank=False)
image = models.TextField(max_length=50, blank=False)
description = models.CharField(max_length=50)
lat = models.DecimalField(max_digits=10, decimal_places=4, blank=False)
lon = models.DecimalField(max_digits=10, decimal_places=4, blank=False)
available = models.BooleanField(blank=False, default=True) # true if available
borrowed_by = models.ForeignKey(User, blank=True, null=True, related_name='%(class)s_borrowed_by')
borrowed_at = models.DateTimeField(blank=True, null=True)
returned_at = models.DateTimeField(blank=True, null=True)
class Profile(models.Model):
'''
Adds karma field to user. Each user has a Profile
'''
user = models.OneToOneField(User, on_delete=models.CASCADE)
karma = models.IntegerField(blank=False, default=10)
| 34 | 39.44 | 102 | 10 | 300 | python | [] | 0 | true | |
2024-11-18T18:05:43.726888+00:00 | 1,610,109,030,000 | ee5c70ca3ae2e11037aba30e5e217e520133016b | 3 | {
"blob_id": "ee5c70ca3ae2e11037aba30e5e217e520133016b",
"branch_name": "refs/heads/master",
"committer_date": 1610109030000,
"content_id": "c7214a1d9e46a6d41202bffcb1bb62dea93f5df0",
"detected_licenses": [
"Apache-2.0",
"Python-2.0"
],
"directory_id": "e75e87f4114edf2e769e1502134a4f866f28ca78",
"extension": "py",
"filename": "timeseries.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2619,
"license": "Apache-2.0,Python-2.0",
"license_type": "permissive",
"path": "/energyquantified/api/timeseries.py",
"provenance": "stack-edu-0054.json.gz:568757",
"repo_name": "MPS-SOLUTIONS/eq-python-client",
"revision_date": 1610109030000,
"revision_id": "6dc65f5c16881b27fc22c9f461440c327997861e",
"snapshot_id": "c784a301c2cdfbffcb4b0232fe2cc5dff2683ddd",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/MPS-SOLUTIONS/eq-python-client/6dc65f5c16881b27fc22c9f461440c327997861e/energyquantified/api/timeseries.py",
"visit_date": "2023-04-14T01:40:19.660361"
} | 2.796875 | stackv2 | from .base import BaseAPI
from ..metadata import CurveType
from ..parser.timeseries import parse_timeseries
# Tuple of supported values for Curve.curve_type in the time series API
CURVE_TYPES = (CurveType.TIMESERIES, CurveType.SCENARIO_TIMESERIES)
class TimeseriesAPI(BaseAPI):
"""
Time series API operations. Access these operations via an
instance of the :py:class:`energyquantified.EnergyQuantified` class:
>>> eq = EnergyQuantified(api_key="aaaa-bbbb-cccc-dddd")
>>> eq.timeseries.load(curve, begin, end)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load(
self,
curve,
begin=None,
end=None,
frequency=None,
aggregation=None,
hour_filter=None):
"""
Load time series data for a :py:class:`energyquantified.metadata.Curve`.
This operation works for curves with
``curve_type = TIMESERIES | SCENARIO_TIMESERIES`` only.
:param curve: The curve or curve name
:type curve: :py:class:`energyquantified.metadata.Curve`, str
:param begin: The begin date-time
:type begin: date, datetime, str, required
:param end: The end date-time
:type end: date, datetime, str, required
:param frequency: Set the preferred frequency for aggregations, defaults to None
:type frequency: Frequency, optional
:param aggregation: The aggregation method (i.e. AVERAGE, MIN, MAX),\
has no effect unless *frequency* is provided, defaults to AVERAGE
:type aggregation: Aggregation, optional
:param hour_filter: Filters on hours to include (i.e. BASE, PEAK),\
has no effect unless *frequency* is provided, defaults to BASE
:type hour_filter: Filter, optional
:return: A time series
:rtype: :py:class:`energyquantified.data.Timeseries`
"""
# Build URL
safe_curve = self._urlencode_curve_name(curve, curve_types=CURVE_TYPES)
url = f"/timeseries/{safe_curve}/"
# Parameters
params = {}
self._add_datetime(params, "begin", begin, required=True)
self._add_datetime(params, "end", end, required=True)
self._add_frequency(params, "frequency", frequency)
if "frequency" in params:
self._add_aggregation(params, "aggregation", aggregation)
self._add_filter(params, "hour-filter", hour_filter)
# HTTP request
response = self._get(url, params=params)
return parse_timeseries(response.json())
| 66 | 38.68 | 88 | 12 | 595 | python | [] | 0 | true | |
2024-11-18T18:05:43.895243+00:00 | 1,619,674,139,000 | c814a34864a4158acf4e1abbbb583376f3efad66 | 3 | {
"blob_id": "c814a34864a4158acf4e1abbbb583376f3efad66",
"branch_name": "refs/heads/master",
"committer_date": 1619674139000,
"content_id": "8b45c9393b96c8f7482a7754a3078e8eb2a7bbc4",
"detected_licenses": [
"MIT"
],
"directory_id": "4790aa158050f52b2f0fa990467774c5276b4bc8",
"extension": "py",
"filename": "utils_math.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6388,
"license": "MIT",
"license_type": "permissive",
"path": "/utils/utils_math.py",
"provenance": "stack-edu-0054.json.gz:568759",
"repo_name": "yff12345/bsc_lcs",
"revision_date": 1619674139000,
"revision_id": "4076fd40656efee3365f10d4e2fd7e7d88d524a4",
"snapshot_id": "6024a6dd0ed82e35dd2acd8def1b9d480c84d28e",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/yff12345/bsc_lcs/4076fd40656efee3365f10d4e2fd7e7d88d524a4/utils/utils_math.py",
"visit_date": "2023-04-12T13:47:02.626502"
} | 2.640625 | stackv2 | import numpy as np
import torch
import torch.nn.functional as F
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return -torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, temp=1, eps=1e-10, dim=-1):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + gumbel_noise
return F.softmax(y / temp, dim=dim)
def gumbel_softmax(logits, temp=1, hard=False, eps=1e-10, dim=-1):
"""
NOTE: Stolen from https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
Added dimension selection feature.
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, temp=temp, eps=eps, dim=dim)
if hard:
shape = logits.size()
_, idx = y_soft.max(dim=dim, keepdim=True)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros_like(y_soft)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(dim, idx, 1.0)
y = (y_hard - y_soft).detach() + y_soft
else:
y = y_soft
return y
def threshold_sampling(logits, threshold=0.5, hard=False):
"""
Omit Gumbel sampling for deterministic sampling.
"""
y_soft = torch.sigmoid(logits)
y_hard = y_soft.ge(threshold).to(y_soft.device, dtype=torch.float32)
y = (y_hard - y_soft).detach() + y_soft
return y
def threshold_sampling_v2(logits, threshold=0.5, hard=False):
"""
Omit Gumbel sampling for deterministic sampling.
V2 different: no sigmoid in sampling function (sigmoid is applied at logit function)
"""
# y_soft = torch.sigmoid(logits)
y_soft = logits
y_hard = y_soft.ge(threshold).to(y_soft.device, dtype=torch.float32)
y = (y_hard - y_soft).detach() + y_soft
return y
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {
c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)
}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
triu_idx = triu_idx[get_offdiag_indices(num_nodes)]
return triu_idx.nonzero()
def get_tril_offdiag_indices(num_nodes):
"""Linear tril (lower) indices w.r.t. vector of off-diagonal elements."""
tril_idx = torch.zeros(num_nodes * num_nodes)
tril_idx[get_tril_indices(num_nodes)] = 1.
tril_idx = tril_idx[get_offdiag_indices(num_nodes)]
return tril_idx.nonzero()
def mat_to_offdiag(inputs, num_atoms, num_edge_types):
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms]).astype(np.int32)
num_edges = (num_atoms * num_atoms) - num_atoms
if not inputs.is_contiguous():
inputs = inputs.contiguous()
inputs = inputs.view(-1, num_edge_types, num_atoms * num_atoms)
inputs = torch.transpose(inputs, 2, 1)
off_diag_idx = torch.LongTensor(off_diag_idx)
if inputs.is_cuda:
off_diag_idx = off_diag_idx.cuda()
mat_offdiag = torch.index_select(inputs, 1, off_diag_idx).contiguous()
return mat_offdiag
def offdiag_to_mat(inputs, num_nodes):
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)),
[num_nodes, num_nodes]).astype(np.int32)
batch_size = inputs.size(0)
edge_types = inputs.size(2)
output = torch.zeros((batch_size, num_nodes * num_nodes, edge_types))
if inputs.is_cuda:
output = output.cuda()
output[:, off_diag_idx, :] = inputs
output = output.view(batch_size, num_nodes, num_nodes, edge_types)
return output
def sample_graph(logits, args):
if args.deterministic_sampling:
edges = threshold_sampling(logits, threshold=args.threshold)
else:
edges = gumbel_softmax(logits, temp=args.temp, hard=args.hard)
return edges
def sample_graph_v2(logits, args):
if args.deterministic_sampling:
edges = threshold_sampling_v2(logits, threshold=args.threshold)
else:
edges = gumbel_softmax(logits, temp=args.temp, hard=args.hard)
return edges
| 201 | 30.78 | 118 | 16 | 1,799 | python | [] | 0 | true | |
2024-11-18T18:05:44.180405+00:00 | 1,487,096,748,000 | f66444c9b20297e96d451b5216019f2f5e1608a2 | 3 | {
"blob_id": "f66444c9b20297e96d451b5216019f2f5e1608a2",
"branch_name": "refs/heads/master",
"committer_date": 1487096748000,
"content_id": "8d9ac11787edbcb15bd3b63115a95a1be7e3836b",
"detected_licenses": [
"BSD-3-Clause"
],
"directory_id": "1ef0b40be50ab0094b8bdf493dddbc1b2e8935cb",
"extension": "py",
"filename": "thermo3_example.py",
"fork_events_count": 1,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 64587897,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 585,
"license": "BSD-3-Clause",
"license_type": "permissive",
"path": "/examples/thermo3_example.py",
"provenance": "stack-edu-0054.json.gz:568762",
"repo_name": "francois-berder/PyLetMeCreate",
"revision_date": 1487096748000,
"revision_id": "91862a6b9cebe0123e9e98f2b1a3a54378751b2a",
"snapshot_id": "3132f69782f40e7a000d799dfa8eb6bb8f79bf05",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/francois-berder/PyLetMeCreate/91862a6b9cebe0123e9e98f2b1a3a54378751b2a/examples/thermo3_example.py",
"visit_date": "2021-01-20T20:03:20.663217"
} | 3.046875 | stackv2 | #!/usr/bin/env python3
"""This example shows how to use the Thermo3 Click wrapper of the LetMeCreate
library.
It reads the temperature from the sensor and exits.
The Thermo3 Click must be inserted in Mikrobus 1 before running this program.
"""
from letmecreate.core import i2c
from letmecreate.core.common import MIKROBUS_1
from letmecreate.click import thermo3
# Initialise I2C on Mikrobus 1
i2c.init()
i2c.select_bus(MIKROBUS_1)
# Read temperature
thermo3.enable(0)
print('{} degrees celsius'.format(thermo3.get_temperature()))
thermo3.disable()
# Release I2C
i2c.release()
| 27 | 20.67 | 77 | 9 | 164 | python | [] | 0 | true | |
2024-11-18T18:05:44.243598+00:00 | 1,616,379,521,000 | e499f5a96555c30d62ba84e6a2a828f038b81458 | 4 | {
"blob_id": "e499f5a96555c30d62ba84e6a2a828f038b81458",
"branch_name": "refs/heads/master",
"committer_date": 1616379521000,
"content_id": "fd1fd060ee4d78f53d8ddd4c089fe6243cc28c26",
"detected_licenses": [
"BSD-3-Clause"
],
"directory_id": "ec93898c4792f3eb920eaa0159cced2cc6d5274d",
"extension": "py",
"filename": "rename.py",
"fork_events_count": 1,
"gha_created_at": 1592644637000,
"gha_event_created_at": 1616379544000,
"gha_language": "C++",
"gha_license_id": "BSD-3-Clause",
"github_id": 273676092,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 837,
"license": "BSD-3-Clause",
"license_type": "permissive",
"path": "/web/rename.py",
"provenance": "stack-edu-0054.json.gz:568763",
"repo_name": "JackKuo-tw/BESSGreatFirewall",
"revision_date": 1616379521000,
"revision_id": "5a2d814df8ce508263eba1fcd4a2641c8f625402",
"snapshot_id": "95e0d80a646fd7adee409bc9ae03d3de42d4ac16",
"src_encoding": "UTF-8",
"star_events_count": 2,
"url": "https://raw.githubusercontent.com/JackKuo-tw/BESSGreatFirewall/5a2d814df8ce508263eba1fcd4a2641c8f625402/web/rename.py",
"visit_date": "2023-04-21T16:47:11.335247"
} | 3.703125 | stackv2 | # shuffle file and rename
import os
from random import shuffle
# Function to rename multiple files
def main():
num = []
for count in range(200):
if count < 10:
dst = "00" + str(count) + ".jpg"
num.append(dst)
elif count < 100 and count > 9:
dst = "0" + str(count) + ".jpg"
num.append(dst)
else:
dst = str(count) + ".jpg"
num.append(dst)
shuffle(num)
for count, filename in enumerate(os.listdir("./data/")):
src ='./data/'+ filename
dst ='./data/'+ num[count]
# rename() function will
# rename all the files
os.rename(src, dst)
# Driver Code
if __name__ == '__main__':
# Calling main() function
main() | 31 | 25.06 | 61 | 15 | 205 | python | [] | 0 | true | |
2024-11-18T18:05:44.308384+00:00 | 1,578,981,575,000 | 8d0411d1a3851550c8d731400213ff48f1c3b0c7 | 2 | {
"blob_id": "8d0411d1a3851550c8d731400213ff48f1c3b0c7",
"branch_name": "refs/heads/master",
"committer_date": 1578981575000,
"content_id": "e86b6be15675ecfb59b4c74485893d83221a5f43",
"detected_licenses": [
"MIT"
],
"directory_id": "8142e588999503979e1cd9d1989cdd43dc9417d6",
"extension": "py",
"filename": "losses.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11390,
"license": "MIT",
"license_type": "permissive",
"path": "/evkit/utils/losses.py",
"provenance": "stack-edu-0054.json.gz:568764",
"repo_name": "lilujunai/side-tuning",
"revision_date": 1578981575000,
"revision_id": "dea345691fb7ee0230150fe56ddd644efdffa6ac",
"snapshot_id": "07a0e2015fcb7f3699f749fb233b95ceba449277",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/lilujunai/side-tuning/dea345691fb7ee0230150fe56ddd644efdffa6ac/evkit/utils/losses.py",
"visit_date": "2021-02-26T23:52:13.468928"
} | 2.390625 | stackv2 | from evkit.models.taskonomy_network import TaskonomyDecoder
from tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS, FEED_FORWARD_TASKS
import torch
import torch.nn.functional as F
def softmax_cross_entropy(inputs, target, weight=None, cache={}, size_average=None, ignore_index=-100,
reduce=None, reduction='mean'):
cache['predictions'] = inputs
cache['labels'] = target
if len(target.shape) == 2: # unsqueeze one-hot representation
target = torch.argmax(target, dim=1)
loss = F.cross_entropy(inputs, target, weight)
# when working with 2D data, cannot use spatial weight mask, it becomes categorical/class
return {'total': loss, 'xentropy': loss}
def heteroscedastic_normal(mean_and_scales, target, weight=None, cache={}, eps=1e-2):
mu, scales = mean_and_scales
loss = (mu - target)**2 / (scales**2 + eps) + torch.log(scales**2 + eps)
# return torch.sum(weight * loss) / torch.sum(weight) if weight is not None else loss.mean()
loss = torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
return {'total': loss, 'nll': loss}
def heteroscedastic_double_exponential(mean_and_scales, target, weight=None, cache={}, eps=5e-2):
mu, scales = mean_and_scales
loss = torch.abs(mu - target) / (scales + eps) + torch.log(2.0 * (scales + eps))
loss = torch.mean(weight * loss) / weight.mean() if weight is not None else loss.mean()
return {'total': loss, 'nll': loss}
def weighted_mse_loss(inputs, target, weight=None, cache={}):
losses = {}
cache['predictions'] = inputs
cache['labels'] = target
if weight is not None:
# sq = (inputs - target) ** 2
# weightsq = torch.sum(weight * sq)
loss = torch.mean(weight * (inputs - target) ** 2)/torch.mean(weight)
else:
loss = F.mse_loss(inputs, target)
return {'total': loss, 'mse': loss}
weighted_l2_loss = weighted_mse_loss
def weighted_l1_loss(inputs, target, weight=None, cache={}):
target = target.float()
if weight is not None:
loss = torch.mean(weight * torch.abs(inputs - target))/torch.mean(weight)
else:
loss = F.l1_loss(inputs, target)
return {'total': loss, 'l1': loss}
def perceptual_l1_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
if weight is not None:
loss = torch.mean(weight * torch.abs(inputs_decoded - targets_decoded))/torch.mean(weight)
else:
loss = F.l1_loss(inputs_decoded, targets_decoded)
return {'total': loss, 'perceptual_l1': loss}
return runner
def perceptual_l2_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
if weight is not None:
loss = torch.mean(weight * (inputs_decoded - targets_decoded) ** 2)/torch.mean(weight)
else:
loss = F.mse_loss(inputs_decoded, targets_decoded)
return {'total': loss, 'perceptual_mse': loss}
return runner
def dense_softmax_cross_entropy_loss(inputs, targets, cache={}): # these should be logits (batch_size, n_class)
batch_size, _ = targets.shape
losses = {}
losses['final'] = -1. * torch.sum(torch.softmax(targets.float(), dim=1) * F.log_softmax(inputs.float(), dim=1)) / batch_size
losses['standard'] = losses['final']
return losses
def dense_cross_entropy_loss_(inputs, targets): # these should be logits (batch_size, n_class)
batch_size, _ = targets.shape
return -1. * torch.sum(targets * F.log_softmax(inputs, dim=1)) / batch_size
# def dense_softmax_cross_entropy(inputs, targets, weight=None, cache={}):
# assert weight == None
# cache['predictions'] = inputs
# cache['labels'] = targets
# # print(targets.shape)
# batch_size, _ = targets.shape
# loss = -1. * torch.sum(torch.softmax(targets, dim=1) * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
# return {'total': loss, 'xentropy': loss}
def dense_softmax_cross_entropy(inputs, targets, weight=None, cache={}):
assert weight is None
cache['predictions'] = inputs
cache['labels'] = targets
batch_size, _ = targets.shape
loss = -1. * torch.sum(torch.softmax(targets.detach(), dim=1) * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
return {'total': loss, 'xentropy': loss}
def dense_cross_entropy(inputs, targets, weight=None, cache={}):
assert weight == None
cache['predictions'] = inputs
cache['labels'] = targets
batch_size, _ = targets.shape
loss = -1. * torch.sum(targets.detach() * F.log_softmax(inputs, dim=1)) / batch_size
# loss = F.mse_loss(inputs, targets.detach())
return {'total': loss, 'xentropy': loss}
def perceptual_cross_entropy_loss(decoder_path, bake_decodings):
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
# the last arguments are so we can 'cache' and pass the decodings outside
inputs_decoded = decoder(inputs)
targets_decoded = target if bake_decodings else decoder(target)
cache['predictions'] = inputs_decoded
cache['labels'] = targets_decoded
return dense_softmax_cross_entropy_loss_(inputs_decoded, targets_decoded)
return runner
def identity_regularizer(loss_fn, model):
def runner(inputs, target, weight=None, cache={}):
losses = loss_fn(inputs, target, weight, cache)
return losses
return runner
def transfer_regularizer(loss_fn, model, reg_loss_fn='F.l1_loss', coef=1e-3):
def runner(inputs, target, weight=None, cache={}):
orig_losses = loss_fn(inputs, target, weight, cache)
#if isinstance(model, PolicyWithBase):
if type(model).__name__ == "PolicyWithBase":
# Imitation Learning - retreive encodings via the cache
assert 'base_encoding' in cache and 'transfered_encoding' in cache, f'cache is missing keys {cache.keys()}'
regularization_loss = 0
for base_encoding, transfered_encoding in zip(cache['base_encoding'], cache['transfered_encoding']):
regularization_loss += eval(reg_loss_fn)(model.base.perception_unit.sidetuner.net.transfer_network(base_encoding), transfered_encoding)
else:
# Vision Transfers - retreive encodings directly from model attributes
# (cannot do this for IL due to the FrameStacked being iterative)
assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used'
regularization_loss = eval(reg_loss_fn)(model.transfer_network(model.base_encoding), model.transfered_encoding)
orig_losses.update({
'total': orig_losses['total'] + coef * regularization_loss,
'weight_tying': regularization_loss,
})
return orig_losses
return runner
def perceptual_regularizer(loss_fn, model, coef=1e-3, decoder_path=None, use_transfer=True, reg_loss_fn='F.mse_loss'):
# compares model.base_encoding E(x) and model.transfered_encoding T(E(x) + S(x))
# use_transfer means we will compare exactly above
# use_transfer=False means we will compare model.base_encoding E(x) and model.merged_encoding E(x) + S(x)
# Recall, decoder requires unnormalized inputs!
assert decoder_path is not None, 'Pass in a decoder to which to transform our parameters and regularize on'
task = [t for t in SINGLE_IMAGE_TASKS if t in decoder_path][0]
decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=task in FEED_FORWARD_TASKS)
checkpoint = torch.load(decoder_path)
decoder.load_state_dict(checkpoint['state_dict'])
decoder.cuda()
decoder.eval()
if task in FEED_FORWARD_TASKS:
reg_loss_fn = "dense_softmax_cross_entropy_loss_"
else:
reg_loss_fn = "F.l1_loss"
print(f'Loaded decoder from {decoder_path} for perceptual loss')
def runner(inputs, target, weight=None, cache={}):
orig_losses = loss_fn(inputs, target, weight, cache)
if type(model).__name__ == "PolicyWithBase":
# Imitation Learning - retreive encodings via the cache
assert 'base_encoding' in cache, f'cache is missing base {cache.keys()}'
if use_transfer:
assert 'transfered_encoding' in cache, f'cache is missing tied {cache.keys()}'
tied_encodings = cache['transfered_encoding']
else:
assert 'merged_encoding' in cache, f'cache is missing tied{cache.keys()}'
tied_encodings = cache['merged_encoding']
regularization_loss = 0
for base_encoding, tied_encoding in zip(cache['base_encoding'], tied_encodings):
regularization_loss += eval(reg_loss_fn)(decoder(base_encoding), decoder(tied_encoding))
else:
# Vision Transfers - retreive encodings directly from model attributes
# (cannot do this for IL due to the FrameStacked being iterative)
assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used'
if use_transfer:
tied_encoding = model.transfered_encoding
else:
tied_encoding = model.merged_encoding
losses['weight_tying'] = eval(reg_loss_fn)(decoder(model.base_encoding), decoder(tied_encoding))
regularization_loss = reg_loss_fn(decoder(model.base_encoding), decoder(tied_encoding))
orig_losses.update({
'total': orig_losses['total'] + coef * regularization_loss,
'weight_tying': regularization_loss,
})
return orig_losses
return runner
| 235 | 47.47 | 151 | 20 | 2,680 | python | [] | 0 | true | |
2024-11-18T18:05:44.364393+00:00 | 1,611,199,867,000 | ee5b19626616ec7568a5cb774bc5a76529f3c51e | 3 | {
"blob_id": "ee5b19626616ec7568a5cb774bc5a76529f3c51e",
"branch_name": "refs/heads/main",
"committer_date": 1611199867000,
"content_id": "18f2c7b9a15197f4a4144413fbc6b994a8d3c99f",
"detected_licenses": [
"MIT"
],
"directory_id": "6803e1834d76c7a9c1fd2484bbfb438615c341a2",
"extension": "py",
"filename": "d22b.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 331503553,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2146,
"license": "MIT",
"license_type": "permissive",
"path": "/d22b.py",
"provenance": "stack-edu-0054.json.gz:568765",
"repo_name": "jogloran/advent-of-code-2020",
"revision_date": 1611199867000,
"revision_id": "9804f1eb8d94c991d9aa3348f01f4bf65c195849",
"snapshot_id": "50b2eb208f10ac9c832dd35cd3b07d8b27d09ad2",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/jogloran/advent-of-code-2020/9804f1eb8d94c991d9aa3348f01f4bf65c195849/d22b.py",
"visit_date": "2023-02-22T00:52:13.546412"
} | 2.921875 | stackv2 | from more_itertools import split_at
grps = split_at(map(str.rstrip, open('d22.txt')), pred=lambda e: e == '')
grps = list(grps)
grp1 = list(map(int, grps[0][1:]))
grp2 = list(map(int, grps[1][1:]))
FIRST_DECK_WON = 0
SECOND_DECK_WON = 1
old_print=print
print=lambda *args: None
def match(grp1, grp2, depth=0):
memo = set()
print('Match')
print('-' * 30)
round = 1
while grp1 and grp2:
print(f'Round {round} (Game {depth + 1})'); round += 1
print(f"Player 1's deck: {','.join(map(str,grp1))}")
print(f"Player 2's deck: {','.join(map(str,grp2))}\n")
if (tuple(grp1), tuple(grp2)) in memo:
print('Game repeat detected')
return FIRST_DECK_WON
if len(grp1) > grp1[0] and len(grp2) > grp2[0]:
memo.add((tuple(grp1), tuple(grp2)))
which_deck_won = match(grp1[:][1:1+grp1[0]], grp2[:][1:1+grp2[0]], depth+1)
print(f"Returning from sub-game {depth+1}")
print("<" * 30)
if which_deck_won == FIRST_DECK_WON:
print(f'Player 1 won sub-game {depth+1}')
# if player 1 wins, then the order of cards added to player 1's deck
# is P1's winning card, _then_ P2's losing card
grp1.append(grp1[0])
grp1.append(grp2[0])
else:
print(f'Player 2 won sub-game {depth+1}')
grp2.append(grp2[0])
grp2.append(grp1[0])
elif grp1[0] < grp2[0]:
# p2 wins
memo.add((tuple(grp1), tuple(grp2)))
grp2.append(grp2[0])
grp2.append(grp1[0])
else:
# p1 wins
memo.add((tuple(grp1), tuple(grp2)))
grp1.append(grp1[0])
grp1.append(grp2[0])
del grp1[0]
del grp2[0]
winner = SECOND_DECK_WON if not grp1 else FIRST_DECK_WON
return winner
winner = match(grp1, grp2)
winner = grp2 if winner == SECOND_DECK_WON else grp1
pts = sum((len(winner) - pos) * val for pos, val in enumerate(winner))
old_print(pts)
# return (SECOND_DECK_WON if not grp1 else FIRST_DECK_WON), pts | 61 | 34.2 | 87 | 17 | 668 | python | [] | 0 | true | |
2024-11-18T18:05:44.494368+00:00 | 1,636,657,919,000 | fa744d53462754fe0a60b43129fa4253a93b8640 | 2 | {
"blob_id": "fa744d53462754fe0a60b43129fa4253a93b8640",
"branch_name": "refs/heads/main",
"committer_date": 1636657919000,
"content_id": "0e1e3551fdc51aeacbd45c453caf0fe56abd7d37",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "d29ad540b82d834509697f65b98ccd54514bb51d",
"extension": "py",
"filename": "eink-sim.py",
"fork_events_count": 0,
"gha_created_at": 1636655702000,
"gha_event_created_at": 1636655703000,
"gha_language": null,
"gha_license_id": "Apache-2.0",
"github_id": 427104949,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2357,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/eink/eink-sim.py",
"provenance": "stack-edu-0054.json.gz:568766",
"repo_name": "phil-d-wilson/balena-health",
"revision_date": 1636657919000,
"revision_id": "b169b26bc50afe9d47027967c15cf0f77ac39fd0",
"snapshot_id": "24989e072fd062e4dfee6195539bfca5c4933e08",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/phil-d-wilson/balena-health/b169b26bc50afe9d47027967c15cf0f77ac39fd0/eink/eink-sim.py",
"visit_date": "2023-09-02T15:20:20.420228"
} | 2.453125 | stackv2 | #!/usr/local/bin/python
# -*- coding:utf-8 -*-
import os
import logging
import time
import atexit
import json
import random
from threading import Timer
import paho.mqtt.client as mqtt
logging.basicConfig(level=logging.INFO)
bpmThreshold = int(os.getenv("BPM_THRESHOLD", "80"))
mqttRetryPeriod = int(os.getenv("MQTT_RETRY_PERIOD", "30")) # seconds
simulationMode = os.getenv("SIMULATION_MODE", "false")
mqttConnectedFlag = False
client = mqtt.Client()
def on_connect(client, userdata, flags, rc):
global mqttConnectedFlag
logging.info("MQTT connection established, subscribing to the 'balena' topic")
client.subscribe("balena")
mqttConnectedFlag = True
def on_disconnect(client, userdata, rc):
global mqttConnectedFlag
logging.info("MQTT disconnect detected")
mqttConnectedFlag = False
def on_message(client, userdata, message):
strPayload = str(message.payload.decode("utf-8"))
if message.topic == "balena":
bpm = json.loads(strPayload)["bpm"]
if bpm < bpmThreshold:
logging.info("BPM received: {0}".format(bpm))
else:
logging.warn("Alarm Triggering Event! BPM received: {0}".format(bpm))
def main():
# Give the device state time to settle
time.sleep(5)
logging.info(
"Applying Config: "
+ json.dumps(
{
"simulationMode": simulationMode,
"bpmThreshold": bpmThreshold,
"mqttRetryPeriod": mqttRetryPeriod,
}
)
)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
while True:
if not mqttConnectedFlag:
logging.info(
"Attempting to establish an MQTT connection at mqtt://localhost:1883"
)
try:
client.connect("localhost", 1883, 60)
client.loop_start()
except Exception as e:
logging.error("MQTT connection error: {0}".format(str(e)))
time.sleep(mqttRetryPeriod)
else:
time.sleep(2)
def exit_handler():
logging.info("Exiting...")
client.disconnect()
client.loop_stop()
atexit.register(exit_handler)
if __name__ == "__main__":
try:
main()
except IOError as e:
logging.error(e)
exit(1)
| 94 | 24.07 | 85 | 18 | 526 | python | [] | 0 | true | |
2024-11-18T18:05:44.612828+00:00 | 1,692,088,231,000 | ea424e926ccd447792ed52f737b5a7a6c624043a | 2 | {
"blob_id": "ea424e926ccd447792ed52f737b5a7a6c624043a",
"branch_name": "refs/heads/master",
"committer_date": 1692088231000,
"content_id": "4a7bb7a8e088e4cbe8723325ef44f7da19e4de8b",
"detected_licenses": [
"MIT"
],
"directory_id": "586c97e81b448d9f4c1525205eaccc727f789ee7",
"extension": "py",
"filename": "concurrency.py",
"fork_events_count": 14,
"gha_created_at": 1461247685000,
"gha_event_created_at": 1694318776000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 56778863,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1130,
"license": "MIT",
"license_type": "permissive",
"path": "/src/buildercore/concurrency.py",
"provenance": "stack-edu-0054.json.gz:568768",
"repo_name": "elifesciences/builder",
"revision_date": 1692088231000,
"revision_id": "7de9bb53c7e6a447a075a66023815166ea54092f",
"snapshot_id": "33542171fd43a454d8c45feae181037ff414874d",
"src_encoding": "UTF-8",
"star_events_count": 12,
"url": "https://raw.githubusercontent.com/elifesciences/builder/7de9bb53c7e6a447a075a66023815166ea54092f/src/buildercore/concurrency.py",
"visit_date": "2023-08-16T11:22:40.684539"
} | 2.328125 | stackv2 | "Bit of a floating module, I guess to avoid circular dependencies. Needs to be reconciled somehow."
from . import bluegreen, bluegreen_v2, context_handler, cloudformation
def concurrency_for(stackname, concurrency_name):
"""concurrency default is to perform updates one machine at a time.
Concurrency can be:
- serial: one at a time
- parallel: all together
- blue-green: 50% at a time
requires `stackname` to exist on the filesystem, see `src.decorators.requires_aws_stack_template`."""
concurrency_names = ['serial', 'parallel', 'blue-green']
if concurrency_name == 'blue-green':
context = context_handler.load_context(stackname)
if cloudformation.template_using_elb_v1(stackname):
return bluegreen.BlueGreenConcurrency(context['aws']['region'])
return bluegreen_v2.do
if concurrency_name == 'serial' or concurrency_name == 'parallel':
return concurrency_name
if concurrency_name is None:
return 'parallel'
raise ValueError("Concurrency %s is not supported. Supported models: %s" % (concurrency_name, concurrency_names))
| 31 | 35.45 | 117 | 14 | 245 | python | [] | 0 | true | |
2024-11-18T18:05:45.004802+00:00 | 1,553,968,313,000 | 3d4e3f0ca608cdd90cf10c1b9ff48162f1894a9c | 3 | {
"blob_id": "3d4e3f0ca608cdd90cf10c1b9ff48162f1894a9c",
"branch_name": "refs/heads/master",
"committer_date": 1553968313000,
"content_id": "a401326a18810b96c3f274a9ca12b3d5b36fbe8f",
"detected_licenses": [
"MIT"
],
"directory_id": "8f501d223fa7942a7994cd5b7788626386bbe1ea",
"extension": "py",
"filename": "crypto.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 178590686,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1676,
"license": "MIT",
"license_type": "permissive",
"path": "/djangoless_signing/crypto.py",
"provenance": "stack-edu-0054.json.gz:568774",
"repo_name": "FKLC/djangoless-signing",
"revision_date": 1553968313000,
"revision_id": "8718f74275247c83b54d8753e3f8ffd9f2e8cf5a",
"snapshot_id": "0ed3d4c561a4ade334472501451986b3e339e2c3",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/FKLC/djangoless-signing/8718f74275247c83b54d8753e3f8ffd9f2e8cf5a/djangoless_signing/crypto.py",
"visit_date": "2020-05-03T10:57:02.512512"
} | 2.765625 | stackv2 | # source: https://github.com/django/django/blob/master/django/utils/crypto.py
"""
Django's standard crypto functions and utilities.
"""
import hashlib
import hmac
import random
import time
from .encoding import force_bytes
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn(
"A secure pseudo-random number generator is not available "
"on your system. Falling back to Mersenne Twister."
)
using_sysrandom = False
def salted_hmac(key_salt, value, secret):
"""
Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))
| 51 | 31.86 | 80 | 11 | 389 | python | [{"finding_id": "codeql_py/weak-sensitive-data-hashing_5c97c29f46b37af3_d1c117d7", "tool_name": "codeql", "rule_id": "py/weak-sensitive-data-hashing", "finding_type": "path-problem", "severity": "medium", "confidence": "high", "message": "[Sensitive data (secret)](1) is used in a hashing algorithm (SHA1) that is insecure.", "remediation": "", "location": {"file_path": "unknown", "line_start": 40, "line_end": null, "column_start": 24, "column_end": 41, "code_snippet": ""}, "cwe_id": "CWE-327", "cwe_name": null, "cvss_score": null, "cvss_vector": null, "owasp_category": null, "references": [], "fingerprint": null, "tags": ["security", "external/cwe/cwe-327", "external/cwe/cwe-328", "external/cwe/cwe-916"], "raw_output": {"ruleId": "py/weak-sensitive-data-hashing", "ruleIndex": 29, "rule": {"id": "py/weak-sensitive-data-hashing", "index": 29}, "message": {"text": "[Sensitive data (secret)](1) is used in a hashing algorithm (SHA1) that is insecure."}, "locations": [{"physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 40, "startColumn": 24, "endColumn": 41}}}], "partialFingerprints": {"primaryLocationLineHash": "ecba43b32cc4c068:1", "primaryLocationStartColumnFingerprint": "19"}, "codeFlows": [{"threadFlows": [{"locations": [{"location": {"physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 35, "startColumn": 14, "endColumn": 33}}, "message": {"text": "ControlFlowNode for force_bytes()"}}}, {"location": {"physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 35, "startColumn": 5, "endColumn": 11}}, "message": {"text": "ControlFlowNode for secret"}}}, {"location": {"physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 40, "startColumn": 24, "endColumn": 41}}, "message": {"text": "ControlFlowNode for BinaryExpr"}}}]}]}], "relatedLocations": [{"id": 1, "physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 35, "startColumn": 14, "endColumn": 33}}, "message": {"text": "Sensitive data (secret)"}}, {"physicalLocation": {"artifactLocation": {"uri": "code.py", "uriBaseId": "%SRCROOT%", "index": 0}, "region": {"startLine": 35, "startColumn": 14, "endColumn": 33}}}]}}] | 1 | true | |
2024-11-18T18:05:45.157547+00:00 | 1,430,934,893,000 | 9936379ea8ae076b9b0c4ce5b322d8a12497e38a | 2 | {
"blob_id": "9936379ea8ae076b9b0c4ce5b322d8a12497e38a",
"branch_name": "refs/heads/master",
"committer_date": 1431212320000,
"content_id": "92a5167070c5a64bd70518f188bd927997a14043",
"detected_licenses": [
"MIT"
],
"directory_id": "7492f373430262e8ba95c4cc52517ed23107dc67",
"extension": "py",
"filename": "dns_formatter.py",
"fork_events_count": 0,
"gha_created_at": 1426193181000,
"gha_event_created_at": 1426193181000,
"gha_language": null,
"gha_license_id": null,
"github_id": 32101544,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4191,
"license": "MIT",
"license_type": "permissive",
"path": "/dns_formatter.py",
"provenance": "stack-edu-0054.json.gz:568776",
"repo_name": "sargon/icvpn-scripts",
"revision_date": 1430934893000,
"revision_id": "89989365ebbfcd6bbdf7325a25e14465723bc327",
"snapshot_id": "6356ef80baabb7511e3132af1c9a91acaf29e427",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/sargon/icvpn-scripts/89989365ebbfcd6bbdf7325a25e14465723bc327/dns_formatter.py",
"visit_date": "2021-01-17T22:54:39.893859"
} | 2.359375 | stackv2 | from formatter import Formatter
from textwrap import dedent
from socket import AF_INET, AF_INET6, inet_pton, error as socket_error
def try_inet_pton(af, ip):
try:
inet_pton(af, ip)
return True
except socket_error:
return False
class _DNSFormatter(Formatter):
filters = {
"v4": lambda value: try_inet_pton(AF_INET, value),
"v6": lambda value: try_inet_pton(AF_INET6, value),
}
def populate_argument_parser(self, parser):
parser.add_argument(
"--filter",
dest="filter",
help="""Only include certain servers.
Possible choices: %s
""" %
", ".join(self.filters.keys()),
choices=list(self.filters.keys()))
def _map_communities(self, arguments, communities):
filters = [filters[options.filter]] if arguments.filter else []
filtered = dict()
for community, data in communities:
try:
domains = data['domains']
nameservers = data['nameservers']
except (TypeError, KeyError):
continue
servers = filter(lambda d: all(f(d) for f in filters), nameservers)
servers = list(servers)
servers = list(filter(lambda d: all(f(d) for f in filters), nameservers))
if len(domains) == 0 or len(servers) == 0:
filtered[community] = None
else:
filtered[community] = dict({'domains': domains, 'servers': servers})
return filtered.items()
def generate_config(self, arguments, communities):
communities = self._map_communities(arguments, communities)
for community, data in communities:
self.add_comment(community)
if data is None:
self.add_comment("No valid domains found")
else:
self._format_config(data['domains'], data['servers'])
class DnsmasqFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
for server in servers:
self.config.append("server=/%s/%s" % (domain, server))
class BindFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
self.config.append(dedent("""
zone "%s" {
type static-stub;
server-addresses { %s; };
};
""" % (domain, "; ".join(servers))).lstrip())
class BindForwardFormatter(_DNSFormatter):
def _format_config(self, domains, servers):
for domain in domains:
self.config.append(dedent("""
zone "%s" {
type forward;
forwarders { %s; };
forward only;
};
""" % (domain, "; ".join(servers))).lstrip())
class UnboundForwardFormatter(_DNSFormatter):
def generate_config(self, arguments, communities):
communities = self._map_communities(arguments, communities)
buffer = []
self.add_comment(
"""
This file is automatically generated.
""")
self.config.append('server:')
self.config.append('\tlocal-zone: "10.in-addr.arpa" nodefault')
for community, data in communities:
if data is None:
self.add_comment("No valid domains found")
continue
self.config.append('\n\t# %s' % community)
for domain in data['domains']:
if domain.endswith('.arpa'):
self.config.append('\tlocal-zone: "%s" nodefault' % domain)
else:
self.config.append('\tdomain-insecure: "%s"' % domain)
buffer.append('\n#\n# %s\n#\n' % community)
for domain in data['domains']:
buffer.append('forward-zone:')
buffer.append('\tname: "%s"' % domain)
for server in data['servers']:
buffer.append('\tforward-addr: %s' % server)
self.config = self.config + buffer
| 139 | 29.15 | 85 | 20 | 851 | python | [] | 0 | true | |
2024-11-18T18:05:45.216591+00:00 | 1,520,583,750,000 | 7b396ee7a42a3d6d13a2ce05911851c1a7280a99 | 2 | {
"blob_id": "7b396ee7a42a3d6d13a2ce05911851c1a7280a99",
"branch_name": "refs/heads/master",
"committer_date": 1520587886000,
"content_id": "834c4ebd683a9b3093dc1ad00ad760381f8f8b35",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "16a223d5cb6a96e1b6a9aa84636670fa7ddf7fda",
"extension": "py",
"filename": "environment.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2121,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/smarthome-web-portal/DB/api/environment.py",
"provenance": "stack-edu-0054.json.gz:568777",
"repo_name": "charliexp/SmartHome-Demo",
"revision_date": 1520583750000,
"revision_id": "27305be559f73cdf71629353df8d059664ce9053",
"snapshot_id": "b874a5fe68cb0704ea0af6edae6552a30ddfafac",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/charliexp/SmartHome-Demo/27305be559f73cdf71629353df8d059664ce9053/smarthome-web-portal/DB/api/environment.py",
"visit_date": "2020-03-17T01:31:41.203972"
} | 2.453125 | stackv2 | # -*- coding: utf-8 -*-
"""
CRUD operation for environment model
"""
from DB.api import database
from DB import exception
from DB.models import Environment
from DB.api import dbutils as utils
RESP_FIELDS = ['id', 'resource', 'resource_id', 'temperature', 'humidity', 'pressure', 'uv_index', 'created_at']
SRC_EXISTED_FIELD = {'id': 'id',
# 'uuid': 'uuid',
'temperature': 'temperature',
'humidity': 'humidity',
'pressure': 'pressure',
'uv_index': 'uv_index',
'resource_id': 'resource_id',
'created_at': 'created_at'
}
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS)
def new(session, src_dic, content={}):
for k, v in SRC_EXISTED_FIELD.items():
content[k] = src_dic.get(v, None)
return utils.add_db_object(session, Environment, **content)
def _get_env(session, resource_id, order_by=[], limit=None, **kwargs):
if isinstance(resource_id, int):
resource_ids = {'eq': resource_id}
elif isinstance(resource_id, list):
resource_ids = {'in': resource_id}
else:
raise exception.InvalidParameter('parameter resource id format are not supported.')
return \
utils.list_db_objects(session, Environment, order_by=order_by, limit=limit,
resource_id=resource_ids, **kwargs)
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) # wrap the raw DB object into dict
def get_env_by_gateway_uuid(session, resource_id):
return _get_env(session, resource_id)
# get the latest data if exists
@database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) # wrap the raw DB object into dict
def get_latest_by_gateway_uuid(session, resource_id, ):
env = _get_env(session, resource_id, order_by=[('id', True)], limit=1)
return env[0] if len(env) else None
if __name__ == '__main__':
# print Button.resource.property.primaryjoin
print get_latest_by_gateway_uuid(resource_id=2)
| 58 | 34.57 | 112 | 13 | 463 | python | [] | 0 | true | |
2024-11-18T18:05:45.285112+00:00 | 1,656,655,324,000 | a1276ef6fdb8b15fbc036f607af938a8167a8e55 | 2 | {
"blob_id": "a1276ef6fdb8b15fbc036f607af938a8167a8e55",
"branch_name": "refs/heads/master",
"committer_date": 1656655324000,
"content_id": "3a1c19d1fff76ffb23b49e8d8e650ef5b7d3353f",
"detected_licenses": [
"MIT"
],
"directory_id": "17887c1493c24e232de6ed65f4a7a1a715dad5de",
"extension": "py",
"filename": "user.py",
"fork_events_count": 2,
"gha_created_at": 1565768141000,
"gha_event_created_at": 1656655325000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 202299060,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14000,
"license": "MIT",
"license_type": "permissive",
"path": "/userman/user.py",
"provenance": "stack-edu-0054.json.gz:568778",
"repo_name": "NationalGenomicsInfrastructure/userman",
"revision_date": 1656655324000,
"revision_id": "47d81d0db016b2662b794a276b29938869fbde18",
"snapshot_id": "6b63545e36df8221cb561c84bb2a73adc09abe70",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/NationalGenomicsInfrastructure/userman/47d81d0db016b2662b794a276b29938869fbde18/userman/user.py",
"visit_date": "2022-07-05T20:50:54.094920"
} | 2.390625 | stackv2 | " Userman: User handlers. "
import json
import logging
import tornado.web
import pycountry
import requests
from . import constants
from . import settings
from . import utils
from .saver import DocumentSaver
from .requesthandler import RequestHandler
class UserSaver(DocumentSaver):
doctype = constants.USER
def initialize(self):
self['status'] = constants.PENDING
self['services'] = []
self['teams'] = []
self['created'] = utils.timestamp()
def check_email(self, value):
"""Raise ValueError if given email value has wrong format.
Raise KeyError if the value conflicts with another."""
if not value:
raise ValueError('email must be a non-empty value')
if value == self.doc.get('email'): return
if '/' in value:
raise ValueError("slash '/' disallowed in email")
parts = value.split('@')
if len(parts) != 2:
raise ValueError("at-sign '@' not used correcly in email")
if len(parts[1].split('.')) < 2:
raise ValueError('invalid domain name part in email')
if len(list(self.db.view('user/email')[value])) > 0:
raise KeyError("email already in use")
def check_username(self, value):
"""Raise ValueError if the given username has wrong format.
Raise KeyError if the value conflicts with another."""
if not value: return
if value == self.doc.get('username'): return
if '/' in value:
raise ValueError("slash '/' disallowed in username")
if '@' in value:
raise ValueError("at-sign '@' disallowed in username")
if len(list(self.db.view('user/username')[value])) > 0:
raise KeyError('username already in use')
def convert_email(self, value):
"Convert email value to lower case."
return value.lower()
def check_password(self, value):
"Check password quality."
utils.check_password_quality(value)
def convert_password(self, value):
return utils.hashed_password(value)
def check_status(self, value):
"Check status value."
if value not in constants.STATUSES:
raise ValueError('invalid status value')
class UserMixin(object):
def may_access_user(self, user):
if not self.current_user: return False
if self.is_admin(): return True
if user['email'] == self.current_user['email']: return True
return False
def check_access_user(self, user):
if not self.may_access_user(user):
raise tornado.web.HTTPError(403, 'you may not access user')
class User(UserMixin, RequestHandler):
"Display a user account."
@tornado.web.authenticated
def get(self, email):
user = self.get_user(email)
self.check_access_user(user)
services = [self.get_service(n) for n in user['services']]
teams = [self.get_team(n) for n in user['teams']]
self.render('user.html',
user=user,
services=services,
teams=teams,
logs=self.get_logs(user['_id']))
class UserEdit(UserMixin, RequestHandler):
"Edit a user account."
@tornado.web.authenticated
def get(self, email):
user = self.get_user(email)
self.check_access_user(user)
teams = [self.get_team(n) for n in user['teams']]
leading = [t for t in teams if email in t['leaders']]
self.render('user_edit.html',
user=user,
services=self.get_all_services(),
teams=teams,
leading=leading,
countries=sorted([c.name for c in pycountry.countries]))
@tornado.web.authenticated
def post(self, email):
self.check_xsrf_cookie()
user = self.get_user(email)
self.check_access_user(user)
with UserSaver(doc=user, rqh=self) as saver:
if self.is_admin():
role = self.get_argument('role', None)
if role in constants.ROLES:
saver['role'] = role
saver['services'] = self.get_arguments('service')
saver['username'] = self.get_argument('username', None)
saver['name'] = self.get_argument('name')
saver['department'] = self.get_argument('department', None)
saver['university'] = self.get_argument('university', None)
saver['country'] = self.get_argument('country')
self.redirect(self.reverse_url('user', user['email']))
class UserApproveMixin(object):
"Mixin to factor out common approval code."
def approve_user(self, user):
"Approve the given user."
assert self.is_admin()
with UserSaver(doc=user, rqh=self) as saver:
activation_code = utils.get_iuid()
deadline = utils.timestamp(days=settings['ACTIVATION_PERIOD'])
saver['activation'] = dict(code=activation_code, deadline=deadline)
saver['status'] = constants.APPROVED
url = self.get_absolute_url('user_activate')
url_with_params = self.get_absolute_url('user_activate',
email=user['email'],
activation_code=activation_code)
text = open(settings['ACTIVATION_EMAIL']).read().format(
period=settings['ACTIVATION_PERIOD'],
url=url,
url_with_params=url_with_params,
email=user['email'],
activation_code=activation_code)
self.send_email(user,
self.current_user,
'Userman account activation',
text)
self.redirect(self.reverse_url('user', user['email']))
class UserCreate(UserApproveMixin, RequestHandler):
"""Create a user account. Anyone may do this.
If non-admin, then send an email to the admin requesting approval review.
If admin, approve immediately and send that email instead."""
def get(self):
"Display the user account creation form."
self.render('user_create.html',
countries=sorted([c.name for c in pycountry.countries]))
def post(self):
"Create the user account."
self.check_xsrf_cookie()
# Some fields initialized by UserSaver
with UserSaver(rqh=self) as saver:
saver['email'] = self.get_argument('email')
saver['username'] = self.get_argument('username', None)
saver['role'] = constants.USER
saver['name'] = self.get_argument('name')
saver['department'] = self.get_argument('department', None)
saver['university'] = self.get_argument('university', None)
saver['country'] = self.get_argument('country')
saver['services'] = [r.key for r in self.db.view('service/public')]
user = saver.doc
if self.is_admin(): # Activate immediately if admin creator is admin.
self.approve_user(user)
else: # Require approval by admin if non-admin creator.
text = "Review Userman account {email} for approval: {url}".format(
email=user['email'],
url=self.get_absolute_url('user', user['email']))
for admin in self.get_admins():
self.send_email(admin,
admin,
'Review Userman account for approval',
text)
url = self.reverse_url('user_acknowledge', user['email'])
self.redirect(url)
class UserAcknowledge(RequestHandler):
"""Acknowledge the creation of the user account.
Explain what is going to happen."""
def get(self, name):
user = self.get_user(name)
if user['status'] != constants.PENDING:
raise tornado.web.HTTPError(409, 'account not pending')
self.render('user_acknowledge.html', user=user)
class UserApprove(UserApproveMixin, RequestHandler):
"Approve a user account; email the activation code."
@tornado.web.authenticated
def post(self, name):
self.check_xsrf_cookie()
self.check_admin()
user = self.get_user(name)
if user['status'] != constants.PENDING:
raise tornado.web.HTTPError(409, 'account not pending')
self.approve_user(user)
class UserBlock(RequestHandler):
"Block a user account."
@tornado.web.authenticated
def post(self, name):
self.check_xsrf_cookie()
self.check_admin()
user = self.get_user(name)
if user['status'] != constants.BLOCKED:
if user['role'] == constants.ADMIN:
raise tornado.web.HTTPError(409, 'cannot block admin account')
with UserSaver(doc=user, rqh=self) as saver:
saver['status'] = constants.BLOCKED
self.redirect(self.reverse_url('user', user['email']))
class UserUnblock(RequestHandler):
"Unblock a user account."
@tornado.web.authenticated
def post(self, name):
self.check_xsrf_cookie()
self.check_admin()
user = self.get_user(name)
if user['status'] != constants.ACTIVE:
with UserSaver(doc=user, rqh=self) as saver:
saver['status'] = constants.ACTIVE
self.redirect(self.reverse_url('user', user['email']))
class UserActivate(RequestHandler):
"Activate the user account, setting the password."
def get(self):
self.render('user_activate.html',
email=self.get_argument('email', ''),
activation_code=self.get_argument('activation_code', ''))
def post(self):
self.check_xsrf_cookie()
email = self.get_argument('email', None)
activation_code = self.get_argument('activation_code', None)
try:
if not email:
raise ValueError('missing email')
if not activation_code:
raise ValueError('missing activation code')
password = self.get_argument('password', '')
utils.check_password_quality(password)
confirm_password = self.get_argument('confirm_password', '')
if password != confirm_password:
raise ValueError('passwords do not match')
message = 'no such user, or invalid or expired activation code'
try:
user = self.get_user(email)
except:
raise ValueError(message)
activation = user.get('activation', dict())
if activation.get('code') != activation_code:
raise ValueError(message)
if activation.get('deadline', '') < utils.timestamp():
raise ValueError(message)
with UserSaver(doc=user, rqh=self) as saver:
del saver['activation']
saver['password'] = password
saver['status'] = constants.ACTIVE
self.set_secure_cookie(constants.USER_COOKIE_NAME, email)
self.redirect(self.reverse_url('user', email))
except ValueError as msg:
self.render('user_activate.html',
error=str(msg),
email=email,
activation_code=activation_code)
class UserReset(RequestHandler):
"Reset the user password, sending out an activation code."
def get(self):
self.render('user_reset.html')
def post(self):
self.check_xsrf_cookie()
try:
user = self.get_user(self.get_argument('email'))
if user.get('status') not in (constants.APPROVED, constants.ACTIVE):
raise ValueError('account status not active')
with UserSaver(doc=user, rqh=self) as saver:
activation_code = utils.get_iuid()
deadline = utils.timestamp(days=settings['ACTIVATION_PERIOD'])
saver['activation'] = dict(code=activation_code, deadline=deadline)
saver['password'] = utils.get_iuid()
saver['status'] = constants.APPROVED
url = self.get_absolute_url('user_activate')
url_with_params = self.get_absolute_url('user_activate',
email=user['email'],
activation_code=activation_code)
text = open(settings['RESET_EMAIL']).read().format(
period=settings['ACTIVATION_PERIOD'],
url=url,
url_with_params=url_with_params,
email=user['email'],
activation_code=activation_code)
self.send_email(user,
self.get_admins()[0], # Arbitrarily the first admin
'Userman account password reset',
text)
except (tornado.web.HTTPError, ValueError) as msg:
logging.debug("account reset error: %s", msg)
self.redirect(self.reverse_url('home'))
class Users(RequestHandler):
"List of all user accounts."
@tornado.web.authenticated
def get(self):
self.check_admin()
users = [r.doc for r in self.db.view('user/email', include_docs=True)]
self.render('users.html', users=users)
class UsersPending(RequestHandler):
"List of pending user accounts."
@tornado.web.authenticated
def get(self):
self.check_admin()
users = [r.doc for r in self.db.view('user/pending', include_docs=True)]
self.render('users_pending.html', users=users)
class UsersBlocked(RequestHandler):
"List of blocked user accounts."
@tornado.web.authenticated
def get(self):
self.check_admin()
users = [r.doc for r in self.db.view('user/blocked', include_docs=True)]
self.render('users_blocked.html', users=users)
| 366 | 37.25 | 84 | 18 | 2,806 | python | [] | 0 | true | |
2024-11-18T18:05:45.516829+00:00 | 1,555,971,647,000 | 802f1ee328eed72d7ebaefc238221a6e2473c2a0 | 3 | {
"blob_id": "802f1ee328eed72d7ebaefc238221a6e2473c2a0",
"branch_name": "refs/heads/master",
"committer_date": 1555971647000,
"content_id": "839d3604c6ec26c623a189a8fa3d34686739a921",
"detected_licenses": [
"MIT"
],
"directory_id": "3487e0a57626fb77b5e7a2b8ca05967d091bfd46",
"extension": "py",
"filename": "logger.py",
"fork_events_count": 0,
"gha_created_at": 1547464283000,
"gha_event_created_at": 1553191368000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 165647602,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license": "MIT",
"license_type": "permissive",
"path": "/services/api/flask_app/logger.py",
"provenance": "stack-edu-0054.json.gz:568782",
"repo_name": "jakkso/flask_translator",
"revision_date": 1555971647000,
"revision_id": "eae2f53e611c15f1c43d34bee1bfb047e66e9fad",
"snapshot_id": "b08d71b6332ba8600dd1ab3c117c19fc76b3fe8a",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/jakkso/flask_translator/eae2f53e611c15f1c43d34bee1bfb047e66e9fad/services/api/flask_app/logger.py",
"visit_date": "2020-04-16T13:54:42.759605"
} | 2.875 | stackv2 | """
"""
import logging
from flask import request
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super().format(record)
def fmt(method: str, username: str, description: str) -> str:
"""
Log message formatter
:param method: http method name
:param username:
:param description: event description
:return: string
"""
return f"Method: {method} | Username: `{username}` | {description}"
formatter = RequestFormatter(
"[%(asctime)s] %(remote_addr)s requested %(url)s\n"
"%(levelname)s in %(module)s: %(message)s"
)
| 30 | 21.67 | 71 | 11 | 156 | python | [] | 0 | true | |
2024-11-18T18:05:45.845735+00:00 | 1,546,505,158,000 | 3a16caa3024ddf7a923ceb439c9222a19d791635 | 4 | {
"blob_id": "3a16caa3024ddf7a923ceb439c9222a19d791635",
"branch_name": "refs/heads/master",
"committer_date": 1546505158000,
"content_id": "fe1b402847974895a719e2409dad259e001e7416",
"detected_licenses": [
"MIT"
],
"directory_id": "8bec264dedd43697074890e3e4ac2368f007dae4",
"extension": "py",
"filename": "createcsv.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 163937146,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license": "MIT",
"license_type": "permissive",
"path": "/createcsv.py",
"provenance": "stack-edu-0054.json.gz:568786",
"repo_name": "ananasbaba/GmailAutomation",
"revision_date": 1546505158000,
"revision_id": "58003fbcd8eb9c5612c6c7eac4b1db7211367913",
"snapshot_id": "7739eb2486e82e11cdcb96f46d3fddc3a0b3958f",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/ananasbaba/GmailAutomation/58003fbcd8eb9c5612c6c7eac4b1db7211367913/createcsv.py",
"visit_date": "2020-04-14T15:47:12.065367"
} | 3.609375 | stackv2 | import csv
def adresswriter(name,email):
with open('data.csv', 'a+') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow([name, email])
x=0
while 1==1:
em=input('enter email address: ')
nm=input('enter name/names: ')
adresswriter(nm,em)
x=x+1
print('No of entries={number}'.format(number=x))
| 16 | 25.06 | 96 | 12 | 114 | python | [] | 0 | true | |
2024-11-18T18:05:45.896623+00:00 | 1,459,329,252,000 | 6332ead8e3decd5eb3750b8a88834aa153641bce | 2 | {
"blob_id": "6332ead8e3decd5eb3750b8a88834aa153641bce",
"branch_name": "refs/heads/master",
"committer_date": 1459329252000,
"content_id": "af7fee61b0d5426e89b929f3901a907d2549829d",
"detected_licenses": [
"MIT"
],
"directory_id": "1d63cf9c51f14b70940263150cd93e3896c2ae55",
"extension": "py",
"filename": "shape.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 50328035,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1187,
"license": "MIT",
"license_type": "permissive",
"path": "/ivf/datasets/shape.py",
"provenance": "stack-edu-0054.json.gz:568787",
"repo_name": "tody411/ImageViewerFramework",
"revision_date": 1459329252000,
"revision_id": "5c183c34e65494b6af1287e70152b995a868c6ac",
"snapshot_id": "07393eee32ee4488578da3133777959d043c9f7a",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/tody411/ImageViewerFramework/5c183c34e65494b6af1287e70152b995a868c6ac/ivf/datasets/shape.py",
"visit_date": "2021-01-10T02:44:50.435043"
} | 2.421875 | stackv2 | # -*- coding: utf-8 -*-
## @package ivf.datasets.threed_model
#
# ivf.datasets.threed_model utility package.
# @author tody
# @date 2016/02/18
import os
from ivf.datasets.datasets import datasetRootDir, subDirectory, datasetFiles, datasetFileNames
def shapeDatasetDir():
return os.path.join(datasetRootDir(), "3dmodel")
def shapeNames():
file_names = datasetFileNames(shapeDatasetDir(), file_filter=".png")
shape_names = [os.path.splitext(file_name)[0] for file_name in file_names]
return shape_names
def shapeFiles():
return datasetFiles(shapeDatasetDir(), file_filter=".png")
def shapeFile(shape_name):
return os.path.join(shapeDatasetDir(), shape_name + ".png")
def shapeResultsDir():
return subDirectory(shapeDatasetDir(), "results")
def shapeResultDir(result_name):
return subDirectory(shapeResultsDir(), result_name)
def shapeResultFiles(result_name):
result_dir = shapeResultDir(result_name)
return datasetFiles(result_dir, file_filter=".png")
def shapeResultFile(result_name, data_name, file_ext=".png"):
result_dir = shapeResultDir(result_name)
return os.path.join(result_dir, data_name + file_ext)
| 44 | 25.98 | 94 | 11 | 282 | python | [] | 0 | true | |
2024-11-18T18:05:46.003289+00:00 | 1,622,992,309,000 | 8040661275f659e441d8d8ed5d98cb211cfd746e | 2 | {
"blob_id": "8040661275f659e441d8d8ed5d98cb211cfd746e",
"branch_name": "refs/heads/master",
"committer_date": 1622992309000,
"content_id": "2e87b042e559de792219aa4c9b318e1d4b55f78e",
"detected_licenses": [
"MIT"
],
"directory_id": "16b56c2b25126fd11a9e191f79473d13c33e6013",
"extension": "py",
"filename": "prep_data_for_EBM.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4657,
"license": "MIT",
"license_type": "permissive",
"path": "/prep_data_for_EBM.py",
"provenance": "stack-edu-0054.json.gz:568789",
"repo_name": "rnaimehaom/rxn-ebm",
"revision_date": 1622992309000,
"revision_id": "8480822d0d8ad74e46edf693ad1cdc787291f422",
"snapshot_id": "246cf57bd00db17b6ba0c6ebc20d607010825f39",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/rnaimehaom/rxn-ebm/8480822d0d8ad74e46edf693ad1cdc787291f422/prep_data_for_EBM.py",
"visit_date": "2023-05-15T06:56:29.278776"
} | 2.34375 | stackv2 | import argparse
import os
from rxnebm.data.preprocess import clean_smiles, prep_crem, prep_nmslib, smi_to_fp
def parse_args():
"""This is directly copied from trainEBM.py"""
parser = argparse.ArgumentParser("trainEBM.py")
parser.add_argument(
"--train_from_scratch",
help="Whether to train from scratch (True) or resume (False)",
type=bool,
default=True,
)
# file paths
parser.add_argument(
"--raw_smi_pre",
help="File prefix of original raw rxn_smi csv",
type=str,
default="schneider50k_raw",
)
parser.add_argument(
"--clean_smi_pre",
help="File prefix of cleaned rxn_smi pickle",
type=str,
default="50k_clean_rxnsmi_noreagent",
)
parser.add_argument(
"--raw_smi_root",
help="Full path to folder containing raw rxn_smi csv",
type=str,
)
parser.add_argument(
"--clean_smi_root",
help="Full path to folder that will contain cleaned rxn_smi pickle",
type=str,
)
# args for clean_rxn_smis_all_phases
parser.add_argument(
"--dataset_name",
help='Name of dataset: "50k", "STEREO" or "FULL"',
type=str,
default="50k",
)
parser.add_argument(
"--split_mode",
help='Whether to keep rxn_smi with multiple products: "single" or "multi"',
type=str,
default="multi",
)
parser.add_argument(
"--lines_to_skip", help="Number of lines to skip", type=int, default=1
)
parser.add_argument(
"--keep_reag",
help="Whether to keep reagents in output SMILES string",
type=bool,
default=False,
)
parser.add_argument(
"--keep_all_rcts",
help="Whether to keep all rcts even if they don't contribute atoms to product",
type=bool,
default=False,
)
parser.add_argument(
"--remove_dup_rxns",
help="Whether to remove duplicate rxn_smi",
type=bool,
default=True,
)
parser.add_argument(
"--remove_rct_mapping",
help="Whether to remove atom map if atom in rct is not in product",
type=bool,
default=True,
)
parser.add_argument(
"--remove_all_mapping",
help="Whether to remove all atom map",
type=bool,
default=True,
)
parser.add_argument(
"--save_idxs",
help="Whether to save all bad indices to a file in same dir as clean_smi",
type=bool,
default=True,
)
parser.add_argument(
"--parallelize",
help="Whether to parallelize computation across all available cpus",
type=bool,
default=True,
)
# args for get_uniq_mol_smis_all_phases: rxn_smi_file_prefix is same as
# clean_smi_pre, root is same as clean_smi_root
parser.add_argument(
"--mol_smi_filename",
help="Filename of output pickle file of all unique mol smis",
type=str,
default="50k_mol_smis",
)
parser.add_argument(
"--save_reags",
help="Whether to save unique reagent SMILES strings as separate file",
type=bool,
default=False,
)
return parser.parse_args()
def prepare_data(args):
# TODO: parse all arguments
if args.clean_smi_root:
print(f"Making dir {args.clean_smi_root}")
os.makedirs(args.clean_smi_root, exist_ok=True)
# TODO: add all arguments
clean_smiles.clean_rxn_smis_50k_all_phases(
input_file_prefix=args.raw_smi_pre,
output_file_prefix=args.clean_smi_pre,
dataset_name=args.dataset_name,
lines_to_skip=args.lines_to_skip,
keep_all_rcts=args.keep_all_rcts,
remove_dup_rxns=args.remove_dup_rxns,
remove_rct_mapping=args.remove_rct_mapping,
remove_all_mapping=args.remove_all_mapping,
)
clean_smiles.remove_overlapping_rxn_smis(
rxn_smi_file_prefix=args.clean_smi_pre,
root=args.clean_smi_root,
)
clean_smiles.get_uniq_mol_smis_all_phases(
rxn_smi_file_prefix=args.clean_smi_pre,
root=args.clean_smi_root,
output_filename=args.mol_smi_filename,
save_reagents=args.save_reags,
)
smi_to_fp.gen_count_mol_fps_from_file()
smi_to_fp.gen_lookup_dicts_from_file()
prep_nmslib.build_and_save_index()
prep_crem.gen_crem_negs(
num_neg=150, max_size=3, radius=2, frag_db_filename="replacements02_sa2.db"
)
print("\nSuccessfully prepared required data!")
print("#" * 50 + "\n\n")
if __name__ == "__main__":
args = parse_args()
prepare_data(args)
| 160 | 28.11 | 87 | 12 | 1,127 | python | [] | 0 | true | |
2024-11-18T18:05:46.316567+00:00 | 1,680,854,992,000 | 28acc4967de2f8d78443da54b955b2829bc6956a | 2 | {
"blob_id": "28acc4967de2f8d78443da54b955b2829bc6956a",
"branch_name": "refs/heads/master",
"committer_date": 1680854992000,
"content_id": "4d2cb48436d0ccb9e065fc11bbf615ca06821d73",
"detected_licenses": [
"MIT"
],
"directory_id": "c9fe7a29cceffeed3823609b5f4b2dfe401c2e19",
"extension": "py",
"filename": "bar_yaxis_formatter.py",
"fork_events_count": 627,
"gha_created_at": 1556196230000,
"gha_event_created_at": 1616407070000,
"gha_language": "HTML",
"gha_license_id": "MIT",
"github_id": 183432412,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license": "MIT",
"license_type": "permissive",
"path": "/Bar/bar_yaxis_formatter.py",
"provenance": "stack-edu-0054.json.gz:568792",
"repo_name": "pyecharts/pyecharts-gallery",
"revision_date": 1680854992000,
"revision_id": "2ced79907ec2c16af50b18457ba615898e65e126",
"snapshot_id": "79ece0cc4fb037ceb36346a8e4558a8522265e3a",
"src_encoding": "UTF-8",
"star_events_count": 1042,
"url": "https://raw.githubusercontent.com/pyecharts/pyecharts-gallery/2ced79907ec2c16af50b18457ba615898e65e126/Bar/bar_yaxis_formatter.py",
"visit_date": "2023-04-15T05:21:14.544668"
} | 2.453125 | stackv2 | from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.faker import Faker
c = (
Bar()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values())
.add_yaxis("商家B", Faker.values())
.set_global_opts(
title_opts=opts.TitleOpts(title="Bar-Y 轴 formatter"),
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(formatter="{value} /月")),
)
.render("bar_yaxis_formatter.html")
)
| 16 | 27.19 | 88 | 17 | 123 | python | [] | 0 | true | |
2024-11-18T18:05:47.825342+00:00 | 1,523,226,785,000 | d92bff9d07c93b4bf2cb825a0b311a0f571da528 | 3 | {
"blob_id": "d92bff9d07c93b4bf2cb825a0b311a0f571da528",
"branch_name": "refs/heads/master",
"committer_date": 1523226785000,
"content_id": "7a17779f3b0ab3aa1b6654552ffcf1072539d160",
"detected_licenses": [
"MIT"
],
"directory_id": "d91b9e75b6832aa8998c6acd7d61f150fa08f23e",
"extension": "py",
"filename": "railfence.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 123343316,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1564,
"license": "MIT",
"license_type": "permissive",
"path": "/railfence.py",
"provenance": "stack-edu-0054.json.gz:568797",
"repo_name": "rawalshree/classical-ciphers",
"revision_date": 1523226785000,
"revision_id": "fce82af1e01901447c4d5f4b54b047c69b681f8e",
"snapshot_id": "0b2900b449236cc16d8e912766f974a3a05999de",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/rawalshree/classical-ciphers/fce82af1e01901447c4d5f4b54b047c69b681f8e/railfence.py",
"visit_date": "2021-01-25T10:13:35.611182"
} | 3.09375 | stackv2 | '''
Owner - Rawal Shree
Email - rawalshreepal000@gmail.com
Github - https://github.com/rawalshree
'''
import math
global plain
global cipher
global Success
Success = False
plain = ""
cipher = ""
class Railfence:
def setKey(self, key):
global Success
try:
self.key = int(key)
if self.key > 0:
Success = True
except:
pass
def encryption(self, plainText):
global cipher, Success
self.plainText = plainText
if Success:
for x in range(self.key):
for y in range(x, len(self.plainText), self.key):
cipher += self.plainText[y]
return cipher
else:
print("Invalid Key")
return self.plainText
def decryption(self, cipherText):
global plain
self.cipherText = cipherText
if Success:
diff = len(self.cipherText) % self.key
width = int(math.ceil(len(self.cipherText) / (self.key * 1.0)))
for x in range(width):
z = x
while z < len(self.cipherText) and len(plain) < len(self.cipherText):
if (z < width * diff) or diff == 0:
plain += self.cipherText[z]
z += width
else:
plain += self.cipherText[z]
z += width - 1
return plain
else:
print("Invalid Key")
return self.plainText | 66 | 22.71 | 85 | 19 | 346 | python | [] | 0 | true | |
2024-11-18T18:05:47.942427+00:00 | 1,693,585,381,000 | ba30087c38abe46c4dc4415c1a63caaf047b5422 | 3 | {
"blob_id": "ba30087c38abe46c4dc4415c1a63caaf047b5422",
"branch_name": "refs/heads/main",
"committer_date": 1693585381000,
"content_id": "fcda9904a9e9694b8e8a4952261b2681baa39a3e",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"directory_id": "d51f530078404a24f1844b53b037bbfed238a784",
"extension": "py",
"filename": "rekfunction.py",
"fork_events_count": 6037,
"gha_created_at": 1471547217000,
"gha_event_created_at": 1694710322000,
"gha_language": "Java",
"gha_license_id": "Apache-2.0",
"github_id": 66023605,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1751,
"license": "Apache-2.0,MIT",
"license_type": "permissive",
"path": "/applications/photo-asset-manager/cdk/rekognition_photo_analyzer/rekfunction.py",
"provenance": "stack-edu-0054.json.gz:568799",
"repo_name": "awsdocs/aws-doc-sdk-examples",
"revision_date": 1693585381000,
"revision_id": "dec41fb589043ac9d8667aac36fb88a53c3abe50",
"snapshot_id": "fff85d1b4119fe3331174396a5723c7f054203eb",
"src_encoding": "UTF-8",
"star_events_count": 8240,
"url": "https://raw.githubusercontent.com/awsdocs/aws-doc-sdk-examples/dec41fb589043ac9d8667aac36fb88a53c3abe50/applications/photo-asset-manager/cdk/rekognition_photo_analyzer/rekfunction.py",
"visit_date": "2023-09-03T19:50:57.809260"
} | 2.5625 | stackv2 | # This code is a sample only. Not for use in production.
#
# Author: Katreena Mullican
# Contact: mullicak@amazon.com
#
import boto3
import os
import json
s3 = boto3.client('s3')
rekognition = boto3.client('rekognition')
dynamodb = boto3.client('dynamodb')
def handler(event, context):
bucket_name = (os.environ['BUCKET_NAME'])
key = event['Records'][0]['s3']['object']['key']
image = {
'S3Object': {
'Bucket': bucket_name,
'Name': key
}
}
try:
# Calls Amazon Rekognition DetectLabels API to classify images in Amazon Simple Storage Service (Amazon S3).
response = rekognition.detect_labels(Image=image, MaxLabels=10, MinConfidence=70)
# Print response to console, visible with Amazon CloudWatch logs.
print(key,response["Labels"])
# Write results to JSON file in bucket results folder.
json_labels = json.dumps(response["Labels"])
filename = os.path.basename(key)
filename_prefix = os.path.splitext(filename)[0]
obj = s3.put_object(Body=json_labels, Bucket=bucket_name, Key="results/"+filename_prefix+".json")
# Parse the JSON for Amazon DynamoDB.
db_result = []
db_labels = json.loads(json_labels)
for label in db_labels:
db_result.append(label["Name"])
# Write results to DynamoDB.
dynamodb.put_item(TableName=(os.environ['TABLE_NAME']),
Item = {
'image_name':{'S': key},
'labels':{'S': str(db_result)}
}
)
return response
except Exception as e:
print(e)
print("Error processing object {} from bucket {}. ".format(key, bucket_name))
raise e | 56 | 30.29 | 116 | 16 | 397 | python | [] | 0 | true | |
2024-11-18T18:05:48.482099+00:00 | 1,633,365,751,000 | bbefe569b43449a354c0f0f39fc14435447c2456 | 3 | {
"blob_id": "bbefe569b43449a354c0f0f39fc14435447c2456",
"branch_name": "refs/heads/main",
"committer_date": 1633365751000,
"content_id": "324904ce1c6219304616d7ef816834d92dec3937",
"detected_licenses": [
"MIT",
"MIT-0"
],
"directory_id": "97cfb911dd043a130a5467aceb4726bdd3ef5c4b",
"extension": "py",
"filename": "debugger.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2668,
"license": "MIT,MIT-0",
"license_type": "permissive",
"path": "/samples/debugger/debugger.py",
"provenance": "stack-edu-0054.json.gz:568804",
"repo_name": "rssnjee/cloudwatch-custom-widgets-samples",
"revision_date": 1633365751000,
"revision_id": "772d5d136fa89b4b5fb50de17ddbb2e7aa10a6ea",
"snapshot_id": "fe192be81d931941a9af3f62453def1915a69200",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/rssnjee/cloudwatch-custom-widgets-samples/772d5d136fa89b4b5fb50de17ddbb2e7aa10a6ea/samples/debugger/debugger.py",
"visit_date": "2023-08-03T03:22:39.676174"
} | 2.765625 | stackv2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# CloudWatch Custom Widget sample: simple debugger, displays form and parameters passed to widget
import json
import os
DOCS = """
## Custom Widget Debugger
A simpler "debugger" custom widget that prints out:
* the Lambda **event** oject including the **widgetContext** parameter passed to the widget by CloudWatch Dashboards
* the Lambda **context** object
* the Lambda enivronment variables
### Widget parameters
Just pass in any parameters you want to see how they are sent to the Lambda script.
### Example parameters
``` yaml
---
param1: value
param2:
- entry1
- entry2
param3:
sub: 7
```"""
def lambda_handler(event, context):
if 'describe' in event:
return DOCS
form = event['widgetContext']['forms']['all']
input = form.get('input', '')
stage = form.get('stage', 'prod')
prettyEvent = json.dumps(event, indent=4, sort_keys=True)
prettyContext = json.dumps(context.__dict__, indent=4, sort_keys=True, default=str)
prettyEnv = ""
for key, val in os.environ.items():
prettyEnv += f"{key}={val}\n"
return f"""
<form>
<table>
<tr>
<td>Input</td>
<td><input name="input" value="{input}"></td>
<td>Stage</td>
<td><input type="radio" name="stage" id="integ" value="integ" {"checked" if stage == "integ" else ""}><label for="integ">Integ</label></td>
<td><input type="radio" name="stage" id="prod" value="prod" {"checked" if stage == "prod" else ""}><label for="prod">Prod</label></td>
<td>
<a class="btn">Popup</a>
<cwdb-action action="html" display="popup">
<h1>Form values:</h1>
<table>
<tr><td>Input:</td><td><b>{input}</b></td></tr>
<tr><td>Stage:</td><td><b>{stage}</b></td></tr>
</table>
</cwdb-action>
</td>
<td>
<a class="btn btn-primary">Submit</a>
<cwdb-action action="call" endpoint="{context.invoked_function_arn}"></cwdb-action>
</td>
</tr>
</table>
</form>
<p>
<h1>event</h1>
<pre>{prettyEvent}</pre>
<h1>context</h1>
<pre>{prettyContext}</pre>
<h1>Lambda environment variables</h1>
<pre>{prettyEnv}</pre>
"""
| 75 | 34.57 | 159 | 10 | 648 | python | [] | 0 | true | |
2024-11-18T18:05:48.540417+00:00 | 1,616,777,238,000 | 768cf9044f56568dd13f1f85b888f28e1c866a05 | 2 | {
"blob_id": "768cf9044f56568dd13f1f85b888f28e1c866a05",
"branch_name": "refs/heads/master",
"committer_date": 1616777238000,
"content_id": "28db1e9337930026c4341405595326ba923c402f",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "c7739a60eac9492e4b34f062ccd495955ccc9531",
"extension": "py",
"filename": "utils.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/connect/cli/core/utils.py",
"provenance": "stack-edu-0054.json.gz:568805",
"repo_name": "d3rky/connect-cli",
"revision_date": 1616777238000,
"revision_id": "ddf70e135033d1baef036c822601da539ea4fe0b",
"snapshot_id": "fec1629f34bae14c5fbb8404b2cb0c7c4cc29beb",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/d3rky/connect-cli/ddf70e135033d1baef036c822601da539ea4fe0b/connect/cli/core/utils.py",
"visit_date": "2021-05-24T14:34:34.598244"
} | 2.40625 | stackv2 | # -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import json
import subprocess
import click
def continue_or_quit():
while True:
click.echo('')
click.echo("Press 'c' to continue or 'q' to quit ", nl=False)
c = click.getchar()
click.echo()
if c == 'c':
return True
if c == 'q':
return False
def check_for_updates(*args):
try:
out = subprocess.check_output(['pip', 'list', '-o', '--format', 'json'])
data = json.loads(out)
me = next(filter(lambda p: p['name'] == 'connect-cli', data))
click.secho(
f'\nYou are running CloudBlue Connect CLI version {me["version"]}. '
f'A newer version is available: {me["latest_version"]}.\n',
fg='yellow',
)
except (subprocess.CalledProcessError, StopIteration):
pass
| 34 | 27.85 | 80 | 16 | 241 | python | [] | 0 | true | |
2024-11-18T18:05:48.758111+00:00 | 1,580,765,531,000 | d4312ef8c150274407c0e4854c1de02e54551411 | 4 | {
"blob_id": "d4312ef8c150274407c0e4854c1de02e54551411",
"branch_name": "refs/heads/master",
"committer_date": 1580765531000,
"content_id": "1335b966db57c4859ac7768080db7b96935d44fa",
"detected_licenses": [
"MIT"
],
"directory_id": "2575ee8d1deff4f24b2dc10a392def0647f3a323",
"extension": "py",
"filename": "2-SUM problem.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 224192743,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license": "MIT",
"license_type": "permissive",
"path": "/Course 2/week 4/2-SUM problem.py",
"provenance": "stack-edu-0054.json.gz:568807",
"repo_name": "GJuceviciute/Algorithms-Specialization-Coursera",
"revision_date": 1580765531000,
"revision_id": "fc86be22b9034c8914310574468041b64188437f",
"snapshot_id": "1b56bc3a13586c2e20fce8a37d09b2ea35af60ca",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/GJuceviciute/Algorithms-Specialization-Coursera/fc86be22b9034c8914310574468041b64188437f/Course 2/week 4/2-SUM problem.py",
"visit_date": "2020-09-19T03:10:40.023932"
} | 3.59375 | stackv2 | # Solving the 2-SUM problem using a hash table
# prints out the number of target sums t in a given range that have at least one pair of numbers adding up to it
from tqdm import tqdm
h = {}
with open('2sum.txt', 'r') as doc:
for line in doc:
h[int(line)] = 1
c = 0
for t in tqdm(range(-10000, 10001)):
for key in h:
if t - key in h and t - key != key:
c += 1
break
print(c)
| 18 | 22.61 | 112 | 11 | 132 | python | [] | 0 | true | |
2024-11-18T18:05:48.834403+00:00 | 1,605,371,254,000 | fe0ea0c87e90612db5a6624c3317b7ca97d31d0a | 3 | {
"blob_id": "fe0ea0c87e90612db5a6624c3317b7ca97d31d0a",
"branch_name": "refs/heads/main",
"committer_date": 1605371254000,
"content_id": "3b88a5d19bcd5467e8414d5ba19790618bb9c499",
"detected_licenses": [
"MIT"
],
"directory_id": "ab4f947fa7d70247a3a71b557c1dcb9de4da232f",
"extension": "py",
"filename": "display.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 311646959,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1459,
"license": "MIT",
"license_type": "permissive",
"path": "/display.py",
"provenance": "stack-edu-0054.json.gz:568808",
"repo_name": "hateganfloringeorge/my_sudoku",
"revision_date": 1605371254000,
"revision_id": "7d2c96ee3ac485bc078b11e9f7ea9b4307e39696",
"snapshot_id": "a0d7df44c64f149f5583bf4a68866bbfb5b64239",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/hateganfloringeorge/my_sudoku/7d2c96ee3ac485bc078b11e9f7ea9b4307e39696/display.py",
"visit_date": "2023-01-09T00:20:12.978750"
} | 3.3125 | stackv2 | import pygame as pg
class Display:
"""
docstring
"""
def __init__(self, game, screen_width, screen_height, offset_height, background_color):
self.screen_height = screen_height
self.screen_width = screen_width
self.screen = pg.display.set_mode((screen_width, screen_height))
self.offset_height = offset_height
self.offset_width = (screen_width - game.cell_size * 9) / 2
self.background_color = background_color
self.game = game
def draw_screen(self):
self.screen.fill(self.background_color)
cell_h = self.game.cell_size
for i in range(10):
thickness = self.game.line_thickness
if i % 3 == 0:
thickness = self.game.line_thickness * 2
pg.draw.line(self.screen, self.game.line_color,
pg.Vector2(
((i * cell_h) + self.offset_width, self.offset_height)),
pg.Vector2((i * cell_h) + self.offset_width, 9 * cell_h + self.offset_height), thickness)
pg.draw.line(self.screen, cell_h,
pg.Vector2(self.offset_width,
(i * cell_h) + self.offset_height),
pg.Vector2(9 * cell_h + self.offset_width, (i * cell_h) + self.offset_height), thickness)
self.game.draw_numbers(
self.screen, self.offset_width, self.offset_height)
| 35 | 40.69 | 114 | 17 | 306 | python | [] | 0 | true | |
2024-11-18T18:05:49.936607+00:00 | 1,543,693,800,000 | 9fe6f61b04a321be9e7ea0318a488ccb8faf3608 | 3 | {
"blob_id": "9fe6f61b04a321be9e7ea0318a488ccb8faf3608",
"branch_name": "refs/heads/master",
"committer_date": 1543693800000,
"content_id": "d497ea43363e9a4a6a373fd45e7f54930340d0ad",
"detected_licenses": [
"MIT"
],
"directory_id": "2314eedcc18cb6915241b9f01cf6b37aab3dd580",
"extension": "py",
"filename": "degreeconverter.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 158262145,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 867,
"license": "MIT",
"license_type": "permissive",
"path": "/ev3dev2/utility/degreeconverter.py",
"provenance": "stack-edu-0054.json.gz:568809",
"repo_name": "alex93p/ev3dev-robopy",
"revision_date": 1543693800000,
"revision_id": "4f7b37e78387dc7b0da9ca196154351e821bd628",
"snapshot_id": "80ee472b80f382adf9829002d53d55633261e2e1",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/alex93p/ev3dev-robopy/4f7b37e78387dc7b0da9ca196154351e821bd628/ev3dev2/utility/degreeconverter.py",
"visit_date": "2020-04-07T09:42:28.364920"
} | 2.859375 | stackv2 | #!/usr/bin/env python3
from ev3dev2.utility.global_variable import *
def degrees_sollevatore(direction, degrees):
# comandi per il sollevatore
command = 1
# conversion = 8600
if direction is UP:
command = -1
return command*degrees*24
def degrees_braccio(direction, degrees):
# comandi per il braccio
command = -1
# conversion = 1310
if direction is FORWARD:
command = 1
return command*degrees*4
def degrees_base(position, direction, degrees):
# comandi per la base
# conversion = 1085
command = -1
if direction is CLOCKWISE:
command = 1
degrees *= command
if MIN_BASE <= (position + (degrees * 3)) <= MAX_BASE:
return degrees * 3
elif (position + (degrees * 3)) < MIN_BASE:
return MIN_BASE - position
else:
return (MAX_BASE - position) * 3
| 36 | 23.08 | 58 | 11 | 241 | python | [] | 0 | true | |
2024-11-18T18:05:50.236363+00:00 | 1,486,398,951,000 | 61ffdc477f0ecc2b6c07b3616e7042bc8e68f905 | 2 | {
"blob_id": "61ffdc477f0ecc2b6c07b3616e7042bc8e68f905",
"branch_name": "refs/heads/master",
"committer_date": 1486398951000,
"content_id": "35e82aa8efe4e5103e720972d8ec213a833c170c",
"detected_licenses": [
"ISC"
],
"directory_id": "e781a2a5dbd55bf7591fec9efb50b3bc3bad5167",
"extension": "py",
"filename": "models.py",
"fork_events_count": 2,
"gha_created_at": 1455898537000,
"gha_event_created_at": 1504366968000,
"gha_language": "TypeScript",
"gha_license_id": null,
"github_id": 52099994,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4333,
"license": "ISC",
"license_type": "permissive",
"path": "/server/whatsbuzz/models.py",
"provenance": "stack-edu-0054.json.gz:568811",
"repo_name": "nirgn975/WhatsBuzz",
"revision_date": 1486398951000,
"revision_id": "3c189ffb91ea707325d753e726d8433061f085e6",
"snapshot_id": "7b54934d464c14e9f4cd7ad9fb1fc03b271866d2",
"src_encoding": "UTF-8",
"star_events_count": 5,
"url": "https://raw.githubusercontent.com/nirgn975/WhatsBuzz/3c189ffb91ea707325d753e726d8433061f085e6/server/whatsbuzz/models.py",
"visit_date": "2022-10-28T03:23:53.632185"
} | 2.328125 | stackv2 | import uuid
from django.db import models
from ckeditor.fields import RichTextField
from django.utils.translation import ugettext_lazy as _
class AgeCategories(object):
choices = (
('default', _('Default')),
('children', _('Children')),
('young', _('Young')),
('adults', _('Adults')),
)
class FacebookUserName(object):
choices = (
('empty', ''),
('first_name', _('First Name')),
('last_name', _('Last Name')),
('full_name', _('First and Last Name')),
)
class UserNameAlign(object):
choices = (
('left', _('Left')),
('center', _('Center')),
('right', _('Right')),
)
class Tags(models.Model):
name = models.CharField(_('tag'), max_length=255)
def __str__(self):
return "{0}".format(self.name)
class Meta:
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
class Post(models.Model):
"""
The basic fields for every post.
"""
unique_id = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
title = models.CharField(_('title'), max_length=255, blank=True)
body = RichTextField(_('body'), blank=True)
created_at = models.DateTimeField(auto_now_add=True)
banner_image = models.ImageField(_('banner image'), upload_to='posts/%Y/%m/%d/', blank=True)
buzz = models.BooleanField(_('buzz'), default=False)
age_categories = models.CharField(_('age categories'), max_length=25, choices=AgeCategories.choices,
default='default')
publish = models.DateTimeField(_('publish'), null=True)
tags = models.ManyToManyField(Tags, _('tags'), blank=True)
REQUIRED_FIELDS = ['title', 'body', 'banner_image', 'publish']
class Trend(Post):
"""
An article like post.
"""
code = models.TextField(_('code'))
class Meta:
verbose_name = _('Trend')
verbose_name_plural = _('Trends')
class FacebookGame(Post):
"""
Facebook Game contains all the facebook related entities.
"""
class Meta:
verbose_name = _('Facebook Game')
verbose_name_plural = _('Facebook Games')
class FacebookGamesImage(models.Model):
"""
All the background images for a Facebook Game.
"""
post = models.ForeignKey(FacebookGame, related_name='background_image')
background_image = models.ImageField(_('background image'), upload_to='posts/%Y/%m/%d/', blank=True)
class Meta:
verbose_name = _('facebook background image')
verbose_name_plural = _('facebook background images')
class FacebookUsername(models.Model):
"""
The username options for a Facebook Game.
"""
post = models.ForeignKey(FacebookGame, related_name='facebook_username')
username = models.CharField(_('username'), max_length=255, choices=FacebookUserName.choices, default='empty')
x = models.PositiveIntegerField(_('x'))
y = models.PositiveIntegerField(_('y'))
color = models.CharField(_('color'), max_length=255)
font_size = models.PositiveIntegerField(_('font_size'))
text_align = models.CharField(_('text align'), max_length=225, choices=UserNameAlign.choices, default='center')
class Meta:
verbose_name = _('facebook username')
verbose_name_plural = _('facebook usernames')
class FacebookProfileImage(models.Model):
"""
The facebook profile image of the user, for a Facebook Game.
"""
post = models.ForeignKey(FacebookGame, related_name='facebook_profile_image')
width = models.PositiveIntegerField(_('width'))
height = models.PositiveIntegerField(_('height'))
x = models.PositiveIntegerField(_('x'))
y = models.PositiveIntegerField(_('y'))
class Meta:
verbose_name = _('facebook profile image')
verbose_name_plural = _('facebook profile images')
class User(models.Model):
"""
All the facebook users that ever logged in to the site.
"""
token = models.TextField()
user_id = models.TextField(_('user_id'), max_length=225)
name = models.CharField(_('name'), max_length=225)
created_at = models.DateTimeField(auto_now_add=True)
last_time_visit = models.DateTimeField(_('last_time_visit'))
class Meta:
verbose_name = _('Facebook User')
verbose_name_plural = _('Facebook Users')
| 138 | 30.4 | 115 | 11 | 902 | python | [] | 0 | true | |
2024-11-18T18:05:50.435877+00:00 | 1,632,236,428,000 | c07cc5f5412176eb9a5a9ee266c8c34e3363267d | 3 | {
"blob_id": "c07cc5f5412176eb9a5a9ee266c8c34e3363267d",
"branch_name": "refs/heads/main",
"committer_date": 1632236428000,
"content_id": "40254bbd782cebf888602efbcbafda1a4f93f1e8",
"detected_licenses": [
"MIT"
],
"directory_id": "e93c6e93f612bca7f192adf539b4f489ad114ab5",
"extension": "py",
"filename": "quokka_worker.py",
"fork_events_count": 0,
"gha_created_at": 1632462673000,
"gha_event_created_at": 1632462674000,
"gha_language": null,
"gha_license_id": "MIT",
"github_id": 409847518,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2398,
"license": "MIT",
"license_type": "permissive",
"path": "/m10_messaging_worker/quokka_worker.py",
"provenance": "stack-edu-0054.json.gz:568814",
"repo_name": "be1iever/python-52-weeks",
"revision_date": 1632236428000,
"revision_id": "185d8b3147c6bfb069d58e4933b74792081bf8f2",
"snapshot_id": "8d57a10af9c0f5309ba21a9503a8fdf4bd82840c",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/be1iever/python-52-weeks/185d8b3147c6bfb069d58e4933b74792081bf8f2/m10_messaging_worker/quokka_worker.py",
"visit_date": "2023-08-19T08:21:45.330447"
} | 2.625 | stackv2 | # ---- Worker application --------------------------------
import json
import os
import pika
from CaptureThread import CaptureThread
from PortscanThread import PortscanThread
from TracerouteThread import TracerouteThread
CAPTURE = "capture"
PORTSCAN = "portscan"
TRACEROUTE = "traceroute"
if os.geteuid() != 0:
exit("You must have root privileges to run this script, try using 'sudo'.")
def start_receiving():
print(f"Worker: starting rabbitmq, listening for work requests")
connection = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
channel = connection.channel()
worker_queue = "quokka-worker"
channel.queue_declare(queue=worker_queue, durable=True)
print(f"\n\n [*] Worker: waiting for messages on queue: {worker_queue}.")
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
on_message_callback=receive_work_request, queue=worker_queue
)
try:
channel.start_consuming()
except KeyboardInterrupt:
print(f"\n\n\n---> Worker: shutting down")
channel.close()
connection.close()
exit()
def receive_work_request(capture_channel, method, _, body):
capture_channel.basic_ack(delivery_tag=method.delivery_tag)
work_info = json.loads(body)
if "work_type" not in work_info:
print(f" !!! Received work request with no work_type: {work_info}")
return
if work_info["work_type"] not in [CAPTURE, PORTSCAN, TRACEROUTE]:
print(f" !!! Received work request for unknown work_type: {work_info['work_type']}")
return
print(f"Received work: {work_info['work_type']} full work info: {work_info}")
process_work_request(work_info["work_type"], work_info)
print("\n\n [*] Worker: waiting for messages.")
def process_work_request(work_type, work_info):
if "quokka" not in work_info:
quokka = "localhost:5001"
else:
quokka = work_info["quokka"]
if work_type == CAPTURE:
work_thread = CaptureThread(quokka, work_info)
elif work_type == PORTSCAN:
work_thread = PortscanThread(quokka, work_info)
elif work_type == TRACEROUTE:
work_thread = TracerouteThread(quokka, work_info)
else:
print(f" !!! Invalid work_type: {work_type}, should have been caught earlier")
return
work_thread.start()
if __name__ == "__main__":
start_receiving()
| 84 | 27.55 | 92 | 13 | 574 | python | [] | 0 | true | |
2024-11-18T18:05:50.494597+00:00 | 1,600,457,101,000 | cc71e14c4713664a632c671fd7c0cf4949920d0c | 3 | {
"blob_id": "cc71e14c4713664a632c671fd7c0cf4949920d0c",
"branch_name": "refs/heads/master",
"committer_date": 1600457101000,
"content_id": "06c4f4e947ff23b5c316f3fc9d3ff2755fad8197",
"detected_licenses": [
"MIT"
],
"directory_id": "202d5846883e772a5ffc96f7ee72878c11b6b7cf",
"extension": "py",
"filename": "Convolution.py",
"fork_events_count": 1,
"gha_created_at": 1600358426000,
"gha_event_created_at": 1600457015000,
"gha_language": null,
"gha_license_id": "MIT",
"github_id": 296375390,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6096,
"license": "MIT",
"license_type": "permissive",
"path": "/Convolution.py",
"provenance": "stack-edu-0054.json.gz:568815",
"repo_name": "Semana-Tec-AD2020-GDL-Equipo-6/Convolution",
"revision_date": 1600457101000,
"revision_id": "3d75696ae08443448f565c6feb45c4317610f299",
"snapshot_id": "778bd410a702206cda339af265689e0c8e92841a",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/Semana-Tec-AD2020-GDL-Equipo-6/Convolution/3d75696ae08443448f565c6feb45c4317610f299/Convolution.py",
"visit_date": "2022-12-15T18:10:21.673671"
} | 2.875 | stackv2 | import numpy
import argparse
import os
import time
import dlib
import cv2 as cv
from math import hypot
from face import face_tracker
# Descripción para argumentos del script
parser = argparse.ArgumentParser(description = 'Image Convolution')
parser.add_argument('-f','--file', default="", help="Introduce file name.")
parser.add_argument('-i','--cameraSource', default=0, help="Introduce number or camera pathm default is 0 (default cam)")
args = vars(parser.parse_args())
if args["file"] != "": # Si el argumento "file" no está en blanco
image = cv.imread(args["file"])
# Kernel por aplicar
kernelx = numpy.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]), numpy.float32)
kernely = numpy.array(([-1, -2, -1], [0, 0, 0], [1, 2, 1]), numpy.float32)
# Kernel aplicado a la convolucion
outputx = cv.filter2D(image, -1, kernelx)
outputy = cv.filter2D(image, -1, kernely)
output = cv.add(outputx, outputy) # Suma aritmetica de las dos imagenes
if not os.path.exists('out'): # Si no existe la carpeta "out"
os.makedirs('out') # Crea la carpeta "out"
cv.imwrite("out/"+args["file"][:-4]+".png", output) # Guardar el output como un archivo .png en la carpeta creada
else: # Si el arguemtno "file" está en blanco
variant = 0 # Efectos que se pueden aplicar al video
blur = False
script_start_time = time.time() # Inicializacion del cronometro del programa
vid = cv.VideoCapture(int(args["cameraSource"]),cv.CAP_DSHOW) # Tomar control de la camara
#Loading Camera and Nose image and Creating mask
l_image = cv.imread("lightning_emoji.png")
success, frame = vid.read()
rens = frame.shape[1]
cols = frame.shape[0]
face_mask = numpy.zeros((rens,cols), numpy.uint8)
#Loading Face Detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while(vid.isOpened()): # Mientras la camara este en uso
success, frame = vid.read() # Capturar el frame de la camara
if not success: # Si la camara no es capaz de capturar el frame
continue # Reintetar
if frame is None: # Si el frame el nulo
continue # Reintentar
if(blur): # Si blur es verdadero
frame = cv.medianBlur(frame, 5) # Aplicar el efecto de Kernel de la mediana para reducir el ruido
if(variant % 4 == 0): # Imagen original
kernel = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 0]), numpy.float32) # Sin tomar en cuenta pixeles aledaños
output = cv.filter2D(frame, -1, kernel) # Aplicar Kernel de identidad en la imagen
output = cv.putText(output,'Original',(20,30), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0),2)
elif(variant % 4 == 1): # Convolucion, operador Sobel
# Toma en cuenta los piexeles de derecha e izquierda, y los resta
kernelx = numpy.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]), numpy.float32)
# Toma en cuenta los piexeles de arriba y abajo, y los resta
kernely = numpy.array(([-1, -2, -1], [0, 0, 0], [1, 2, 1]), numpy.float32)
# Aplicar filtro de convolucion
outputx = cv.filter2D(frame, -1, kernelx)
outputy = cv.filter2D(frame, -1, kernely)
output = cv.add(outputx, outputy) # Suma aritmetica de las imagenes
output = cv.putText(output,'Sobel',(20,30), cv.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
elif(variant % 4 == 2): # Aumentar la saturacion de la imagen
kernel = numpy.array(([0, 1, 0], [1, 1, 1], [0, 1, 0]), numpy.float32) # Satura el pixel del centro con los pixeles aledaños
output = cv.filter2D(frame, -1, kernel) # Aplicar filtro de saturación
output = cv.putText(output,'Saturacion',(20,30), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0),2)
else:
face_mask.fill(0)
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = detector(frame)
output = face_tracker(frame, l_image,rens,cols,face_mask, detector, predictor, gray_frame, faces)
output = cv.putText(output,'Rayito emprendedor',(20,30), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0),2)
#print(output.shape)
output = cv.putText(output,'Espacio - Cambiar filtro',(18,cols-18), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0),1)
output = cv.putText(output,'Espacio - Cambiar filtro',(20,cols-20), cv.FONT_HERSHEY_PLAIN, 1, (255,255,255),1)
output = cv.putText(output,'B - Reduccion de ruido',(18,cols-38), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0),1)
if(blur):
output = cv.putText(output,'B - Reduccion de ruido',(20,cols-40), cv.FONT_HERSHEY_PLAIN, 1, (70,255,70),1)
else:
output = cv.putText(output,'B - Reduccion de ruido',(20,cols-40), cv.FONT_HERSHEY_PLAIN, 1, (20,20,255),1)
if variant % 4 == 1:
output = cv.putText(output,'Esc - Salir',(rens-100,20), cv.FONT_HERSHEY_PLAIN, 1, (255,255,255),1)
else:
output = cv.putText(output,'Esc - Salir',(rens-100,20), cv.FONT_HERSHEY_PLAIN, 1, (0,0,0),1)
if(frame is None):
continue
cv.imshow('frame', output) # Mostrar output en una ventana adicional
k = cv.waitKey(10)
if k == 27: # Cuando se pulsa esc.
break
elif k == 98: # Cuando se pulsa "b"
if blur:
blur = False
else:
blur = True
elif k == 32: # Cuando se pulsa espacio
variant+=1
vid.release() # Ceder el control de la camara
cv.destroyAllWindows() # Terminar todas las ventanas
print('Script took %f seconds.' % (time.time() - script_start_time)) # Mostar la duracion de la ejecucion del programa
| 127 | 45.95 | 136 | 17 | 1,788 | python | [] | 0 | true | |
2024-11-18T18:05:50.786093+00:00 | 1,425,253,712,000 | e5af85924f64e5cd20cb4634d93cd34727db9ef4 | 3 | {
"blob_id": "e5af85924f64e5cd20cb4634d93cd34727db9ef4",
"branch_name": "refs/heads/master",
"committer_date": 1425253712000,
"content_id": "dc44476f8f2550a6c47e17db2b6c9b10a75475de",
"detected_licenses": [
"BSD-3-Clause"
],
"directory_id": "769fb250df521e23a045940ad41b14d4e0b8d835",
"extension": "py",
"filename": "base.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 666,
"license": "BSD-3-Clause",
"license_type": "permissive",
"path": "/alveolocal/base.py",
"provenance": "stack-edu-0054.json.gz:568817",
"repo_name": "gvlawrence/alveolocal",
"revision_date": 1425253712000,
"revision_id": "5fe1df1ce533b0d5c2518b02f476292e98188e15",
"snapshot_id": "db4669d9f64050505194d1dde2b6706997cc2fce",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/gvlawrence/alveolocal/5fe1df1ce533b0d5c2518b02f476292e98188e15/alveolocal/base.py",
"visit_date": "2020-12-25T22:48:30.908798"
} | 2.90625 | stackv2 | from rdflib.graph import Graph
import os
class Store(object):
def attach_directory(self, dirname):
"""Attach to a directory containing RDF files
and deliver data from there"""
self.graph = Graph()
self.basedir = dirname
for dirpath, _, filenames in os.walk(dirname):
for filename in filenames:
if filename.endswith(".rdf"):
self.graph.parse(os.path.join(dirpath, filename), format='turtle')
elif filename.endswith(".n3"):
self.graph.parse(os.path.join(dirpath, filename), format='n3')
return len(self.graph) | 22 | 29.32 | 86 | 19 | 128 | python | [] | 0 | true | |
2024-11-18T18:05:50.891605+00:00 | 1,674,337,881,000 | a1d4e937240c69fa96324322c0525f937a87e6a7 | 3 | {
"blob_id": "a1d4e937240c69fa96324322c0525f937a87e6a7",
"branch_name": "refs/heads/master",
"committer_date": 1674337881000,
"content_id": "d89f33ca7a413fe1b1336e1fa59e3c8e77de92bf",
"detected_licenses": [
"MIT"
],
"directory_id": "d5d1e33f783568205d7883059fcd40a68c10a7f1",
"extension": "py",
"filename": "read_rules.py",
"fork_events_count": 4,
"gha_created_at": 1476391631000,
"gha_event_created_at": 1672781548000,
"gha_language": "JavaScript",
"gha_license_id": "NOASSERTION",
"github_id": 70846882,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2566,
"license": "MIT",
"license_type": "permissive",
"path": "/trainer/management/commands/read_rules.py",
"provenance": "stack-edu-0054.json.gz:568819",
"repo_name": "tthelen/interpunct",
"revision_date": 1674337881000,
"revision_id": "cfc69e345b9cf83026ca1767225af207ebb497e7",
"snapshot_id": "b715a13784bcb977a1125b5f871db861467a197f",
"src_encoding": "UTF-8",
"star_events_count": 3,
"url": "https://raw.githubusercontent.com/tthelen/interpunct/cfc69e345b9cf83026ca1767225af207ebb497e7/trainer/management/commands/read_rules.py",
"visit_date": "2023-01-24T18:18:58.074846"
} | 2.625 | stackv2 | from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from trainer.models import Rule, User
import re
class Command(BaseCommand):
help = 'Reads update rule set vom kommaregeln.csv'
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'--clean', # name of the argument
action='store_true', # store value as True
dest='clean', # name of the attribute
default=False, # default value if not specified
help='Delete all existing rules before importing (makes historical data useless)', # help text
)
def handle(self, *args, **options):
if options['clean']:
Rule.objects.all().delete()
# print warning
self.stdout.write(self.style.WARNING('All existing rules deleted! Database is useless now and has to refreched completely.'))
# delete all non-admin users
User.objects.filter(django_user__is_superuser=False).delete()
self.stdout.write(self.style.WARNING('All non-admin users deleted!'))
import csv
# read excel-created csv file
with open('data/kommaregeln.csv', newline='', encoding='utf8') as csvfile:
csvfile.readline() # skip first line
reader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in reader:
if row[0] == '': # empty line
continue # skip empty lines
try:
r = Rule.objects.get(code=row[0])
self.stdout.write(self.style.SUCCESS('Update rule "%s"' % row[0]))
except ObjectDoesNotExist:
r = Rule()
r.code = row[0]
self.stdout.write(self.style.SUCCESS('Create rule "%s"' % row[0]))
r.mode = ['darf nicht', 'kann', 'muss'].index(row[1])
r.description = row[2]
r.rule = row[3]
exs = "</p><p>".join(row[4:9])
if exs:
r.example = "<p>"+exs+"</p>"
else:
r.example = ""
# replace kommas
r.example = re.sub(r"\(,\)", "[,]", r.example) # optional comma displayed as [,]
r.example = re.sub(r"\([^)]+\)", "", r.example) # remove all other rule parts
r.save()
self.stdout.write(self.style.SUCCESS('Successfully updated rules.'))
| 59 | 42.49 | 137 | 20 | 544 | python | [] | 0 | true | |
2024-11-18T18:05:50.995077+00:00 | 1,681,380,941,000 | e318ef477e6416a2771f439d3ccca329e22e093b | 3 | {
"blob_id": "e318ef477e6416a2771f439d3ccca329e22e093b",
"branch_name": "refs/heads/master",
"committer_date": 1681380941000,
"content_id": "ac6bb68abe5cfe57022afe0a29bb34005b6b4d36",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "af84dbfbdca0ee1a354924881b6578c37a66efcf",
"extension": "py",
"filename": "NCF.py",
"fork_events_count": 0,
"gha_created_at": 1498319300000,
"gha_event_created_at": 1688678201000,
"gha_language": "Jupyter Notebook",
"gha_license_id": "Apache-2.0",
"github_id": 95307122,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10756,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/ML/DL/ncf/NCF.py",
"provenance": "stack-edu-0054.json.gz:568820",
"repo_name": "Johnwei386/Warehouse",
"revision_date": 1681380941000,
"revision_id": "77da078a176930c0107431b7a0ff7b01d6634ba7",
"snapshot_id": "96db3b3b7c258b41688395942f766c2f4299aa56",
"src_encoding": "UTF-8",
"star_events_count": 3,
"url": "https://raw.githubusercontent.com/Johnwei386/Warehouse/77da078a176930c0107431b7a0ff7b01d6634ba7/ML/DL/ncf/NCF.py",
"visit_date": "2023-07-19T22:12:33.331111"
} | 2.828125 | stackv2 | # _*_ coding:utf8 _*_
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
#import ncf.metrics
import metrics
class NCF(object):
def __init__(self, embed_size, user_size, item_size, lr,
optim, initializer, loss_func, activation_func,
regularizer_rate, iterator, topk, dropout, is_training):
"""
Important Arguments.
embed_size: The final embedding size for users and items.
optim: The optimization method chosen in this model.
initializer: The initialization method.
loss_func: Loss function, we choose the cross entropy.
regularizer_rate: L2 is chosen, this represents the L2 rate.
iterator: Input dataset.
topk: For evaluation, computing the topk items.
"""
self.embed_size = embed_size # 16
self.user_size = user_size # 1508
self.item_size = item_size # 2071
self.lr = lr
self.initializer = initializer
self.loss_func = loss_func
self.activation_func = activation_func
self.regularizer_rate = regularizer_rate
self.optim = optim
self.topk = topk # 10
self.dropout = dropout
self.is_training = is_training
self.iterator = iterator
def get_data(self):
sample = self.iterator.get_next() # 得到Dataset中的数据
self.user = sample['user']
self.item = sample['item']
# 转换tensor为一个新类型
self.label = tf.cast(sample['label'],tf.float32)
def inference(self):
# 设置参数初始化方式、损失函数、参数更新方式(优化器)
""" Initialize important settings """
self.regularizer = tf.contrib.layers.l2_regularizer(self.regularizer_rate)
if self.initializer == 'Normal':
self.initializer = tf.truncated_normal_initializer(stddev=0.01)
elif self.initializer == 'Xavier_Normal':
self.initializer = tf.contrib.layers.xavier_initializer()
else:
self.initializer = tf.glorot_uniform_initializer()
if self.activation_func == 'ReLU':
self.activation_func = tf.nn.relu
elif self.activation_func == 'Leaky_ReLU':
self.activation_func = tf.nn.leaky_relu
elif self.activation_func == 'ELU':
self.activation_func = tf.nn.elu
if self.loss_func == 'cross_entropy':
self.loss_func = tf.nn.sigmoid_cross_entropy_with_logits
if self.optim == 'SGD':
self.optim = tf.train.GradientDescentOptimizer(self.lr,name='SGD')
elif self.optim == 'RMSProp':
self.optim = tf.train.RMSPropOptimizer(self.lr,
decay=0.9,
momentum=0.0,
name='RMSProp')
elif self.optim == 'Adam':
self.optim = tf.train.AdamOptimizer(self.lr, name='Adam')
def create_model(self):
with tf.name_scope('input'):
# [0,1,...,0]指示某个用户的one-hot编码矩阵,大小为 Nx1508
# N为样本总数,训练集就是训练集总数,测试集就是测试集总数,1508是用户数
self.user_onehot = tf.one_hot(self.user,self.user_size,name='user_onehot')
# Nx2071,指示那个item被选中,2071为item的数量
self.item_onehot = tf.one_hot(self.item,self.item_size,name='item_onehot')
with tf.name_scope('embed'):
# inputs: 输入数据,这里是大小为 Nx1508 的Tensor张量数据
# units: 隐藏层神经元个数, 预置为16
# 激活函数为Relu,用Xavier方法初始化参数,使用L2范数作为正则化参数的惩罚项
# [Nx1508] x [1508x16] = Nx16
self.user_embed_GMF = tf.layers.dense(inputs = self.user_onehot,
units = self.embed_size,
activation = self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_GMF')
# [Nx2071] x [2071x16]= Nx16
self.item_embed_GMF = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_GMF')
# [Nx1508] x [1508x16] = Nx16
self.user_embed_MLP = tf.layers.dense(inputs=self.user_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='user_embed_MLP')
# [Nx2071] x [2071x16]= Nx16
self.item_embed_MLP = tf.layers.dense(inputs=self.item_onehot,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='item_embed_MLP')
with tf.name_scope("GMF"):
# [Nx16] x [Nx16] = [Nx16] 逐元素相加,输出一个等shape的矩阵
self.GMF = tf.multiply(self.user_embed_GMF, self.item_embed_GMF,name='GMF')
# 多层感知器网络
with tf.name_scope("MLP"):
# 按列拼接两个Tensor张量,[Nx16]与[Nx16]按列拼接等于[Nx32]
self.interaction = tf.concat([self.user_embed_MLP, self.item_embed_MLP],
axis=-1, name='interaction')
print(self.interaction.shape)
# [Nx32] x [32x32] = [Nx32]
self.layer1_MLP = tf.layers.dense(inputs=self.interaction,
units=self.embed_size * 2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer1_MLP')
# 使用dropout方法优化神经元的激活
self.layer1_MLP = tf.layers.dropout(self.layer1_MLP, rate=self.dropout)
print(self.layer1_MLP.shape)
# [Nx32] x [32x16] = [Nx16]
self.layer2_MLP = tf.layers.dense(inputs=self.layer1_MLP,
units=self.embed_size,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer2_MLP')
self.layer2_MLP = tf.layers.dropout(self.layer2_MLP, rate=self.dropout)
print(self.layer2_MLP.shape)
# [Nx16] x [16x8] = [Nx8]
self.layer3_MLP = tf.layers.dense(inputs=self.layer2_MLP,
units=self.embed_size // 2,
activation=self.activation_func,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='layer3_MLP')
self.layer3_MLP = tf.layers.dropout(self.layer3_MLP, rate=self.dropout)
print(self.layer3_MLP.shape)
#得到预测值
with tf.name_scope('concatenation'):
# [Nx16] 按列拼接 [Nx8] = [Nx24]
self.concatenation = tf.concat([self.GMF,self.layer3_MLP],
axis=-1,name='concatenation')
# [Nx24] x [24x1] = [Nx1]
self.logits = tf.layers.dense(inputs= self.concatenation,
units = 1,
activation=None,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='predict')
print(self.logits.shape)
# 转化[Nx1]矩阵为1D数组,为(N,)
self.logits_dense = tf.reshape(self.logits,[-1])
print(self.logits_dense.shape)
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(self.loss_func(
labels=self.label, logits=self.logits_dense, name='loss'))
with tf.name_scope("optimzation"):
self.optimzer = self.optim.minimize(self.loss)
def eval(self):
with tf.name_scope("evaluation"):
self.item_replica = self.item
_, self.indice = tf.nn.top_k(tf.sigmoid(self.logits_dense), self.topk)
def summary(self):
""" Create summaries to write on tensorboard. """
self.writer = tf.summary.FileWriter('./graphs/NCF', tf.get_default_graph())
with tf.name_scope("summaries"):
tf.summary.scalar('loss', self.loss)
tf.summary.histogram('histogram loss', self.loss)
self.summary_op = tf.summary.merge_all()
def build(self):
self.get_data()
self.inference()
self.create_model()
self.eval()
self.summary()
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, step):
""" Train the model step by step. """
if self.is_training:
loss, optim, summaries = session.run(
[self.loss, self.optimzer, self.summary_op])
self.writer.add_summary(summaries, global_step=step)
else:
indice, item = session.run([self.indice, self.item_replica])
prediction = np.take(item, indice)
return prediction, item | 227 | 44.54 | 87 | 16 | 2,121 | python | [] | 0 | true | |
2024-11-18T18:05:51.110968+00:00 | 1,616,895,046,000 | 8423c4bedaae40da0e6d2c2b43c48f56b89c5965 | 3 | {
"blob_id": "8423c4bedaae40da0e6d2c2b43c48f56b89c5965",
"branch_name": "refs/heads/master",
"committer_date": 1616895046000,
"content_id": "021e70692c5f2184c7f31c5af6a41e9a4edd76f2",
"detected_licenses": [
"MIT"
],
"directory_id": "2f5784360ec61fb19d28ca4d115c967ab415ee3d",
"extension": "py",
"filename": "servidor-UDP.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 342468606,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license": "MIT",
"license_type": "permissive",
"path": "/cliente-servidor-UDP/servidor-UDP.py",
"provenance": "stack-edu-0054.json.gz:568822",
"repo_name": "claubermartins/Information-Security-with-Python",
"revision_date": 1616895046000,
"revision_id": "33f8e495fb42b0eed37d4c58ca5f80ca20fc7965",
"snapshot_id": "a14fc319924a9386d91618a3b49c40e797e65102",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/claubermartins/Information-Security-with-Python/33f8e495fb42b0eed37d4c58ca5f80ca20fc7965/cliente-servidor-UDP/servidor-UDP.py",
"visit_date": "2023-04-06T22:01:13.193035"
} | 2.8125 | stackv2 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 01:56:34 2021
@author: Clauber
"""
import socket
#obj de conexão
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Socket criado com socesso!")
host = 'localhost'
port = 5432
#fazendo a ligação a partir do host e da porta do host
s.bind((host, port))
mensagem = '\n Servidor: Oi cliente, estou bem.'
#enquanto for verdadeiro a conexão, o serv vai receber 4096 bytes atraves do obj conexão
while 1:
dados, end = s.recvfrom(4096)
if dados:
print('Servidor enviando mensagem...')
#enviando a mensagem empacotada com pacotes UDP para o cliente
s.sendto(dados + (mensagem.encode()), end)
| 24 | 26.92 | 88 | 13 | 209 | python | [] | 0 | true | |
2024-11-18T18:05:51.163839+00:00 | 1,613,149,366,000 | d287c681a0f37bd86150a1f567579bdd0faf790a | 4 | {
"blob_id": "d287c681a0f37bd86150a1f567579bdd0faf790a",
"branch_name": "refs/heads/main",
"committer_date": 1613149366000,
"content_id": "efd89e6d6665463231e2f25b858433bb8e13635e",
"detected_licenses": [
"MIT"
],
"directory_id": "a9edcd7c994d0ba4a10be1bebd4d91600337483b",
"extension": "py",
"filename": "grapher.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1038,
"license": "MIT",
"license_type": "permissive",
"path": "/constructor/grapher.py",
"provenance": "stack-edu-0054.json.gz:568823",
"repo_name": "jeannadark/search_alchemy",
"revision_date": 1613149366000,
"revision_id": "2fd1360a7ea81ab12159d4f55877ab1a1ec870c8",
"snapshot_id": "9b77f4568273840d745254cff1deedca38f82183",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/jeannadark/search_alchemy/2fd1360a7ea81ab12159d4f55877ab1a1ec870c8/constructor/grapher.py",
"visit_date": "2023-03-04T16:42:57.042743"
} | 3.859375 | stackv2 | from collections import defaultdict
class UGraph:
"""Constructs an undirected weighted graph."""
def __init__(self):
self.graph = defaultdict(list)
def add_edge(self, src: int, dest: int, w: int) -> None:
"""Add weighted edges between source and destination vertices.
:param src: source vertex
:type src: int
:param dest: destination vertex
:type dest: int
:param w: weight of the edge between src and dest
:type w: int
"""
w = w / 100 # fix for square size
self.graph[src].append((dest, w))
self.graph[dest].append((src, w))
def get_weighted_neighbors(self, v_id: int) -> list:
"""Return the list of a vertex's neighbors & the respective edge weights.
Each neighbor is a tuple of type (neighbor_id, weight).
:param v_id: a given vertex id
:type v_id: int
:return: list of neighbors with weights
:rtype: list
"""
return self.graph[v_id]
| 35 | 28.66 | 81 | 11 | 248 | python | [] | 0 | true | |
2024-11-18T18:05:51.327859+00:00 | 1,605,611,951,000 | 3379b7bf8c895c1db53f826ee6b01d695c771ecb | 3 | {
"blob_id": "3379b7bf8c895c1db53f826ee6b01d695c771ecb",
"branch_name": "refs/heads/master",
"committer_date": 1605611951000,
"content_id": "c6533c22e590309dc2d05c216acfdfcf160affbc",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "05a3a2e348f2c452e48ea331b4c5effc18543128",
"extension": "py",
"filename": "directional_bias.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": null,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6051,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/directional_bias.py",
"provenance": "stack-edu-0054.json.gz:568825",
"repo_name": "bymavis/neural-anisotropy-directions",
"revision_date": 1605611951000,
"revision_id": "45df872f16fff850537afc611fa8606e5e5c19eb",
"snapshot_id": "265ac2d58eb4bc82c237291faaf34eb173ce2b46",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/bymavis/neural-anisotropy-directions/45df872f16fff850537afc611fa8606e5e5c19eb/directional_bias.py",
"visit_date": "2023-03-15T14:45:59.519792"
} | 2.75 | stackv2 | import numpy as np
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
from utils import train
class DirectionalLinearDataset(data.Dataset):
def __init__(self,
v,
num_samples=10000,
sigma=3,
epsilon=1,
shape=(1, 32, 32)
):
self.v = v
self.num_samples = num_samples
self.sigma = sigma
self.epsilon = epsilon
self.shape = shape
self.data, self.targets = self._generate_dataset(self.num_samples)
super()
def __getitem__(self, index):
img, target = self.data[index], int(self.targets[index])
return img, target
def __len__(self):
return self.num_samples
def _generate_dataset(self, n_samples):
if n_samples > 1:
data_plus = self._generate_samples(n_samples // 2 + n_samples % 2, 0).astype(np.float32)
labels_plus = np.zeros([n_samples // 2 + n_samples % 2]).astype(np.long)
data_minus = self._generate_samples(n_samples // 2, 1).astype(np.float32)
labels_minus = np.ones([n_samples // 2]).astype(np.long)
data = np.r_[data_plus, data_minus]
labels = np.r_[labels_plus, labels_minus]
else:
data = self._generate_samples(1, 0).astype(np.float32)
labels = np.zeros([1]).astype(np.long)
return torch.from_numpy(data), torch.from_numpy(labels)
def _generate_samples(self, n_samples, label):
data = self._generate_noise_floor(n_samples)
sign = 1 if label == 0 else -1
data = sign * self.epsilon / 2 * self.v[np.newaxis, :] + self._project_orthogonal(data)
return data
def _generate_noise_floor(self, n_samples):
shape = [n_samples] + self.shape
data = self.sigma * np.random.randn(*shape)
return data
def _project(self, x):
proj_x = np.reshape(x, [x.shape[0], -1]) @ np.reshape(self.v, [-1, 1])
return proj_x[:, :, np.newaxis, np.newaxis] * self.v[np.newaxis, :]
def _project_orthogonal(self, x):
return x - self._project(x)
def generate_synthetic_data(v,
num_train=10000,
num_test=10000,
sigma=3,
epsilon=1,
shape=(1, 32, 32),
batch_size=128):
trainset = DirectionalLinearDataset(v,
num_samples=num_train,
sigma=sigma,
epsilon=epsilon,
shape=shape)
testset = DirectionalLinearDataset(v,
num_samples=num_train,
sigma=sigma,
epsilon=epsilon,
shape=shape)
trainloader = DataLoader(trainset,
shuffle=True,
pin_memory=True,
num_workers=2,
batch_size=batch_size)
testloader = DataLoader(testset,
shuffle=False,
pin_memory=True,
num_workers=2,
batch_size=batch_size
)
return trainloader, testloader, trainset, testset
if __name__ == '__main__':
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
from models import TransformLayer # It normalizes the data to have prespecified mean and stddev
from models import LeNet
v = torch.zeros([1, 32, 32]) # Create empty vector
v_fft = torch.rfft(v, signal_ndim=2)
v_fft[0, 3, 4, 1] = 1 # Select coordinate in fourier space
v = torch.irfft(v_fft, signal_ndim=2, signal_sizes=[32, 32])
v = v / v.norm()
trainloader, testloader, trainset, testset = generate_synthetic_data(v.numpy(),
num_train=10000,
num_test=10000,
sigma=3,
epsilon=1,
shape=[1, 32, 32],
batch_size=128)
v = np.random.randn(1, 32, 32)
v = v / np.linalg.norm(v)
trainloader, testloader, trainset, testset = generate_synthetic_data(v,
num_train=10000,
num_test=10000,
sigma=3,
epsilon=1,
shape=[1, 32, 32],
batch_size=128)
# net = LogReg(input_dim=32 * 32, num_classes=2)
# net = VGG11_bn(num_channels=1, num_classes=2)
# net = ResNet18(num_channels=1, num_classes=2)
# net = DenseNet121(num_channels=1, num_classes=2)
net = LeNet(num_channels=1, num_classes=2)
net = net.to(DEVICE)
trained_model = train(model=net,
trans=TransformLayer(mean=torch.tensor(0., device=DEVICE),
std=torch.tensor(1., device=DEVICE)),
trainloader=trainloader,
testloader=testloader,
epochs=20,
max_lr=0.5,
momentum=0,
weight_decay=0
) | 151 | 39.08 | 100 | 17 | 1,218 | python | [] | 0 | true | |
2024-11-18T18:05:51.617780+00:00 | 1,598,241,307,000 | eb57e4f1d5091808917207f41e715581d3336e27 | 4 | {
"blob_id": "eb57e4f1d5091808917207f41e715581d3336e27",
"branch_name": "refs/heads/master",
"committer_date": 1598241307000,
"content_id": "9322317e181b1e0888b7b65df44e8faa774e820c",
"detected_licenses": [
"MIT"
],
"directory_id": "973fc8ee2cde8a7379cc49ee216b96e8b3d173d4",
"extension": "py",
"filename": "pyingest_util.py",
"fork_events_count": 0,
"gha_created_at": 1594982351000,
"gha_event_created_at": 1598241308000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 280398006,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1299,
"license": "MIT",
"license_type": "permissive",
"path": "/coralinede/auto_detect_df/pyingest_util.py",
"provenance": "stack-edu-0054.json.gz:568828",
"repo_name": "coralinetech/coralinede",
"revision_date": 1598241307000,
"revision_id": "018af0af3dd929dba718144a0e65a4e13004560b",
"snapshot_id": "9ee17126e034f331c769922e2b16def328d233b5",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/coralinetech/coralinede/018af0af3dd929dba718144a0e65a4e13004560b/coralinede/auto_detect_df/pyingest_util.py",
"visit_date": "2022-12-04T17:27:36.212573"
} | 3.734375 | stackv2 | import os
import re
from operator import itemgetter
def get_clean_col_and_delimiter(file_path):
"""
detect delimiter in text file by glancing at first row
:param file_path: path of text file (str)
:return:
number of delimiters that are detected (int)
delimiter (str)
"""
first_line = None
with open(file_path, encoding='utf8') as file:
first_line = file.readline()
list_of_count_delimiters = [
(',', first_line.count(',')),
('|', first_line.count('|')),
('\t', first_line.count('\t'))
]
delimiter = max(list_of_count_delimiters, key=itemgetter(1))[0]
arr_header = get_simplify_column_name(first_line, delimiter)
return arr_header, delimiter
def get_simplify_column_name(column_name, delimiter):
"""
simplify all the column name by removing all non-alphabetic character
except number, and converting name into snake case
:param column_name: column name (str)
:param deliemter: delimiter that detected from detect_delimiter() (str)
:return:
array of preprocessed column name (array)
"""
temp_string = column_name.lower()
arr_header = re.sub('[^{}_A-Za-z0-9 ]+'.format(delimiter), '', temp_string).replace(' ', '_').split(delimiter)
return arr_header | 45 | 27.89 | 114 | 15 | 306 | python | [] | 0 | true | |
2024-11-18T18:05:51.739998+00:00 | 1,563,480,494,000 | c361e9d07137271c12584cfd09f0345a994daa93 | 3 | {
"blob_id": "c361e9d07137271c12584cfd09f0345a994daa93",
"branch_name": "refs/heads/master",
"committer_date": 1563480494000,
"content_id": "2a7fe80f59976e8fd14f16295693dbf692472883",
"detected_licenses": [
"MIT"
],
"directory_id": "d7f7c31da748f0b7d02646fc26315c479035822d",
"extension": "py",
"filename": "chords.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 197644286,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1217,
"license": "MIT",
"license_type": "permissive",
"path": "/chords.py",
"provenance": "stack-edu-0054.json.gz:568829",
"repo_name": "dorfire/chord-page-maker",
"revision_date": 1563480494000,
"revision_id": "508e55afaea533308e27e60b8d8580fa82d35402",
"snapshot_id": "56c8d7a8cad8cea02dbe30b561e0c1fe9442c54a",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/dorfire/chord-page-maker/508e55afaea533308e27e60b8d8580fa82d35402/chords.py",
"visit_date": "2020-06-22T05:21:52.910305"
} | 2.546875 | stackv2 | from models import ChordDesc
import asyncio, aiohttp
from urllib.parse import quote
from lxml import html
import logging
HTTP_OK = 200
JGUITAR_BASE_URL = 'https://jguitar.com'
JGUITAR_SEARCH_URL_FMT = JGUITAR_BASE_URL + '/chordsearch?chordsearch={}'
async def _get_chord_image_urls(client: aiohttp.ClientSession, chord: ChordDesc):
url = JGUITAR_SEARCH_URL_FMT.format(quote(chord.name))
logging.debug('GET "%s"', url)
async with client.get(url) as res:
if res.status != HTTP_OK:
logging.error('Request to "%s" failed with status=%d', url, res.status)
return []
page = html.document_fromstring(await res.text())
return [
JGUITAR_BASE_URL + img.attrib['src']
for img in page.xpath('//img')
if 'chordshape' in img.attrib['src']
]
async def _gather_chord_image_url_lists(chords: list):
# Schedule requests concurrently
async with aiohttp.ClientSession() as client:
getters = [_get_chord_image_urls(client, chord) for chord in chords]
return await asyncio.gather(*getters)
def get_chord_image_lists(chords: list):
request_results = asyncio.run(_gather_chord_image_url_lists(chords))
logging.info('Gathered results from %d requests', len(request_results))
return request_results
| 39 | 30.21 | 81 | 13 | 319 | python | [] | 0 | true | |
2024-11-18T18:05:51.782526+00:00 | 1,633,726,686,000 | 3a6d8286d4d20bf8768da44caf705aa605abdc20 | 2 | {
"blob_id": "3a6d8286d4d20bf8768da44caf705aa605abdc20",
"branch_name": "refs/heads/main",
"committer_date": 1633726686000,
"content_id": "d29153fc0cfdf84211c4815513f890665658cb6d",
"detected_licenses": [
"Apache-2.0"
],
"directory_id": "c7b3f765302d4737ae8bc6e3973bce900d0a93cd",
"extension": "py",
"filename": "engine.py",
"fork_events_count": 0,
"gha_created_at": 1632863154000,
"gha_event_created_at": 1633086974000,
"gha_language": "Python",
"gha_license_id": "Apache-2.0",
"github_id": 411438402,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5388,
"license": "Apache-2.0",
"license_type": "permissive",
"path": "/wakeword/engine.py",
"provenance": "stack-edu-0054.json.gz:568830",
"repo_name": "rishiraj/heychinki",
"revision_date": 1633726686000,
"revision_id": "413e8fbf2b7b4b1c041575ab17a43b12bade4b00",
"snapshot_id": "0033fc0d1ba92e325244216bc986d332b3d497c3",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/rishiraj/heychinki/413e8fbf2b7b4b1c041575ab17a43b12bade4b00/wakeword/engine.py",
"visit_date": "2023-08-23T10:36:52.050510"
} | 2.453125 | stackv2 | import pyaudio
import threading
import time
import argparse
import wave
import torchaudio
import torch
import numpy as np
from neuralnet.dataset import get_featurizer
from threading import Event
class Listener:
def __init__(self, sample_rate=8000, record_seconds=2):
self.chunk = 1024
self.sample_rate = sample_rate
self.record_seconds = record_seconds
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.sample_rate,
input=True,
output=True,
frames_per_buffer=self.chunk)
def listen(self, queue):
while True:
data = self.stream.read(self.chunk , exception_on_overflow=False)
queue.append(data)
time.sleep(0.01)
def run(self, queue):
thread = threading.Thread(target=self.listen, args=(queue,), daemon=True)
thread.start()
print("\nWake Word Engine is now listening... \n")
class WakeWordEngine:
def __init__(self, model_file):
self.listener = Listener(sample_rate=8000, record_seconds=2)
self.model = torch.jit.load(model_file)
self.model.eval().to('cpu') #run on cpu
self.featurizer = get_featurizer(sample_rate=8000)
self.audio_q = list()
def save(self, waveforms, fname="wakeword_temp"):
wf = wave.open(fname, "wb")
# set the channels
wf.setnchannels(1)
# set the sample format
wf.setsampwidth(self.listener.p.get_sample_size(pyaudio.paInt16))
# set the sample rate
wf.setframerate(8000)
# write the frames as bytes
wf.writeframes(b"".join(waveforms))
# close the file
wf.close()
return fname
def predict(self, audio):
with torch.no_grad():
fname = self.save(audio)
waveform, _ = torchaudio.load(fname, normalization=False) # don't normalize on train
mfcc = self.featurizer(waveform).transpose(1, 2).transpose(0, 1)
# TODO: read from buffer instead of saving and loading file
# waveform = torch.Tensor([np.frombuffer(a, dtype=np.int16) for a in audio]).flatten()
# mfcc = self.featurizer(waveform).transpose(0, 1).unsqueeze(1)
out = self.model(mfcc)
pred = torch.round(torch.sigmoid(out))
return pred.item()
def inference_loop(self, action):
while True:
if len(self.audio_q) > 15: # remove part of stream
diff = len(self.audio_q) - 15
for _ in range(diff):
self.audio_q.pop(0)
action(self.predict(self.audio_q))
elif len(self.audio_q) == 15:
action(self.predict(self.audio_q))
time.sleep(0.05)
def run(self, action):
self.listener.run(self.audio_q)
thread = threading.Thread(target=self.inference_loop,
args=(action,), daemon=True)
thread.start()
class DemoAction:
"""This demo action will just randomly say Arnold Schwarzenegger quotes
args: sensitivty. the lower the number the more sensitive the
wakeword is to activation.
"""
def __init__(self, sensitivity=10):
# import stuff here to prevent engine.py from
# importing unecessary modules during production usage
import os
import subprocess
import random
from os.path import join, realpath
self.random = random
self.subprocess = subprocess
self.detect_in_row = 0
self.sensitivity = sensitivity
folder = realpath(join(realpath(__file__), '..', '..', '..', 'fun', 'arnold_audio'))
self.arnold_mp3 = [
os.path.join(folder, x)
for x in os.listdir(folder)
if ".wav" in x
]
def __call__(self, prediction):
if prediction == 1:
self.detect_in_row += 1
if self.detect_in_row == self.sensitivity:
self.play()
self.detect_in_row = 0
else:
self.detect_in_row = 0
def play(self):
filename = self.random.choice(self.arnold_mp3)
try:
print("playing", filename)
self.subprocess.check_output(['play', '-v', '.1', filename])
except Exception as e:
print(str(e))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="demoing the wakeword engine")
parser.add_argument('--model_file', type=str, default=None, required=True,
help='optimized file to load. use optimize_graph.py')
parser.add_argument('--sensitivty', type=int, default=10, required=False,
help='lower value is more sensitive to activations')
args = parser.parse_args()
wakeword_engine = WakeWordEngine(args.model_file)
action = DemoAction(sensitivity=10)
print("""\n*** Make sure you have sox installed on your system for the demo to work!!!
If you don't want to use sox, change the play function in the DemoAction class
in engine.py module to something that works with your system.\n
""")
# action = lambda x: print(x)
wakeword_engine.run(action)
threading.Event().wait()
| 155 | 33.76 | 98 | 17 | 1,221 | python | [] | 0 | true | |
2024-11-18T18:05:51.834545+00:00 | 1,462,411,624,000 | cf54a28b454191968d424270b14794bdf4c83365 | 2 | {
"blob_id": "cf54a28b454191968d424270b14794bdf4c83365",
"branch_name": "refs/heads/master",
"committer_date": 1462411624000,
"content_id": "9f667026d03e5e368767c3dcf424cf6df14db21f",
"detected_licenses": [
"MIT"
],
"directory_id": "6071f5f8c3bb1e0b6ecc280664058cf74e0e93e1",
"extension": "py",
"filename": "readit.py",
"fork_events_count": 2,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 55791765,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2060,
"license": "MIT",
"license_type": "permissive",
"path": "/readit.py",
"provenance": "stack-edu-0054.json.gz:568831",
"repo_name": "haupt235/waldo",
"revision_date": 1462411624000,
"revision_id": "8c5e8693fb8eb98076e42f1dbdfbd6a8d2699044",
"snapshot_id": "763726de0aa62f95173f56182b7266569d394145",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/haupt235/waldo/8c5e8693fb8eb98076e42f1dbdfbd6a8d2699044/readit.py",
"visit_date": "2021-01-10T10:40:34.584790"
} | 2.484375 | stackv2 | import pdb
def readit(filename):
data = open(filename, 'r+')
line = data.readline()
i=1
event_log = {}
while line != '':
if 'event' in line:
line = data.readline()
line = data.readline()
line = data.readline()
line = data.readline()
event = []
while (line.find('event') == -1) & (line.find('summary') == -1):
event.append(line)
line = data.readline()
if event != []:
event_log.update({'particle'+str(i) : event})
i=i+1
else: line = data.readline()
return event_log
def structure(filename):
event_log = readit(filename)
i = 1
j=1
sad = {}
while i <= len(event_log):
events = []
while j < len(event_log['particle'+str(i)]):
dp = event_log['particle'+str(i)][j].split()
events.append({'cell': dp[1], 'x': dp[2], 'y': dp[3], 'z': dp[4], 'u':dp[5], 'v': dp[6], 'w': dp[7], 'erg' :dp[8], 'wgt':dp[9]})
j=j+1
sad.update({'particle'+str(i): events})
i=i+1
return(sad)
def vtk_file(events, event_title):
file_name = event_title + ".vtk"
vtk_file = open(file_name,"w+")
num_events = 0
for event in events:
if event["cell"] != events[events.index(event)-1]["cell"]:
num_events += 1
vtk_file.write("# vtk DataFile Version 3.0 \nvtk output\nASCII\nDATASET POLYDATA\nPOINTS " + str(num_events) + " float\n")
for event in events:
if event["cell"] != events[events.index(event)-1]["cell"]:
vtk_file.write(event["x"] + " " + event["y"] + " " + event["z"] + "\n")
num_lines = num_events - 1
vtk_file.write("LINES " + str(num_lines) + " " + str(3*num_lines) + "\n")
for i in range(num_events-1):
vtk_file.write("2 " + str(i) + " " + str(i+1) + "\n")
vtk_file.write("CELL_DATA " + str(num_lines) + "\n")
vtk_file.write("scalars cellvar float\nLOOKUP_TABLE default\n")
vtk_file.write("2.0 2.4 2.1 2.2 2.3\n")
vtk_file.write("POINT_DATA " + str(num_events) + "\n")
vtk_file.write("scalars pointvar float\nLOOKUP_TABLE default\n")
vtk_file.write("1.2 1.3 1.4 1.5")
def vtk_builder(readable):
for event_titles in readable:
vtk_file(readable[event_titles], event_titles)
| 65 | 30.69 | 131 | 18 | 666 | python | [] | 0 | true | |
2024-11-18T18:05:51.893426+00:00 | 1,627,356,144,000 | 0e0c4b7fa36f3a02ffa8eda95febeb7d2d92704a | 2 | {
"blob_id": "0e0c4b7fa36f3a02ffa8eda95febeb7d2d92704a",
"branch_name": "refs/heads/master",
"committer_date": 1627356144000,
"content_id": "d076586c733abd1d56e4332b4acebfd4b0049418",
"detected_licenses": [
"MIT"
],
"directory_id": "917b080572da1d0572bd7322080962aae96c3534",
"extension": "py",
"filename": "banner.py",
"fork_events_count": 5,
"gha_created_at": 1597242886000,
"gha_event_created_at": 1619469913000,
"gha_language": "Python",
"gha_license_id": "MIT",
"github_id": 287036975,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1187,
"license": "MIT",
"license_type": "permissive",
"path": "/wpscan_out_parse/parser/components/_parts/banner.py",
"provenance": "stack-edu-0054.json.gz:568832",
"repo_name": "tristanlatr/wpscan_out_parse",
"revision_date": 1627356144000,
"revision_id": "737a7e9b5f47c7e433351b7a9c0b55d76f173e25",
"snapshot_id": "90bff72cf43cc718eb6daa1c183180cb13c33e24",
"src_encoding": "UTF-8",
"star_events_count": 8,
"url": "https://raw.githubusercontent.com/tristanlatr/wpscan_out_parse/737a7e9b5f47c7e433351b7a9c0b55d76f173e25/wpscan_out_parse/parser/components/_parts/banner.py",
"visit_date": "2023-06-24T18:44:50.969064"
} | 2.34375 | stackv2 | from typing import Any, Dict, Sequence, List
from ...base import Component
class Banner(Component):
def __init__(self, data: Dict[str, Any], *args: Any, **kwargs: Any):
"""From https://github.com/wpscanteam/wpscan/blob/master/app/views/json/core/banner.erb"""
super().__init__(data, *args, **kwargs)
self.description:str = self.data.get("description", None)
self.version:str = self.data.get("version", None)
self.authors:List[str] = self.data.get("authors", None)
self.sponsor:str = self.data.get("sponsor", None) or self.data.get(
"sponsored_by", None
)
def get_infos(self)-> Sequence[str]:
info = "Scanned with {}".format(self.description)
info += "\nVersion: {}".format(self.version)
if self.show_all_details:
info += "\nAuthors: {}".format(", ".join(self.authors))
if self.sponsor:
info += "\nSponsor: {}".format(self.sponsor)
return [info]
def get_warnings(self)-> Sequence[str]:
"""Return empty list"""
return []
def get_alerts(self)-> Sequence[str]:
"""Return empty list"""
return []
| 34 | 33.91 | 98 | 16 | 271 | python | [] | 0 | true | |
2024-11-18T18:05:52.011974+00:00 | 1,512,495,654,000 | 66e2f97893448058da44f94b9fe7bbe49c289692 | 2 | {
"blob_id": "66e2f97893448058da44f94b9fe7bbe49c289692",
"branch_name": "refs/heads/master",
"committer_date": 1512496294000,
"content_id": "a60712a4aa2b2ef2643052a13d8fd0ad25620334",
"detected_licenses": [
"MIT"
],
"directory_id": "5a49a78056a7f3bb7d5b4560eccaca878c58e999",
"extension": "py",
"filename": "model2vis.py",
"fork_events_count": 0,
"gha_created_at": 1512697467000,
"gha_event_created_at": 1512697467000,
"gha_language": null,
"gha_license_id": null,
"github_id": 113517900,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 932,
"license": "MIT",
"license_type": "permissive",
"path": "/scripts/model2vis.py",
"provenance": "stack-edu-0054.json.gz:568834",
"repo_name": "yilei/CoMET",
"revision_date": 1512495654000,
"revision_id": "a927ab2c1b54efa05c317c5baf14223c8802238a",
"snapshot_id": "371632e77e4c4e20d95c424f60ea52dd77774211",
"src_encoding": "UTF-8",
"star_events_count": 0,
"url": "https://raw.githubusercontent.com/yilei/CoMET/a927ab2c1b54efa05c317c5baf14223c8802238a/scripts/model2vis.py",
"visit_date": "2021-08-24T05:03:52.307155"
} | 2.484375 | stackv2 | # coding=utf-8
import json
import h5py
def get_weights_from_hdf5_group(hfg):
layer_names = [n.decode('utf8') for n in hfg.attrs['layer_names']]
weight_dict = dict()
for name in layer_names:
g = hfg[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name].value.tolist() for weight_name in weight_names]
weight_names = map(lambda x: x.split('/')[-1].replace(':0', ''), weight_names)
weight_dict[name] = dict(zip(weight_names, weight_values))
return weight_dict
filename = 'models/acetyl/100_30_34_1_1_DeepCoFAM.model'
with h5py.File(filename, mode='r') as hf:
model_config = hf.attrs['model_config'].decode('utf8')
model_weights = json.dumps(get_weights_from_hdf5_group(hf['model_weights']))
with open('test.model.json', 'w') as f:
f.write("""{"config": %s, "weights": %s}""" % (model_config, model_weights))
| 27 | 33.52 | 87 | 17 | 259 | python | [] | 0 | true | |
2024-11-18T18:05:52.066530+00:00 | 1,454,423,768,000 | 3ab9a582339c814f2fba0bef3e604bfb9c60abbe | 3 | {
"blob_id": "3ab9a582339c814f2fba0bef3e604bfb9c60abbe",
"branch_name": "refs/heads/master",
"committer_date": 1454423768000,
"content_id": "10fb2ae5eee7e9c0f2784e3e971476c5d7dd744c",
"detected_licenses": [
"MIT"
],
"directory_id": "178e21d6a361dd487eece27c22da6ccd9f301b29",
"extension": "py",
"filename": "base.py",
"fork_events_count": 0,
"gha_created_at": null,
"gha_event_created_at": null,
"gha_language": null,
"gha_license_id": null,
"github_id": 50801110,
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 812,
"license": "MIT",
"license_type": "permissive",
"path": "/bitlo/services/base.py",
"provenance": "stack-edu-0054.json.gz:568835",
"repo_name": "ilmesi/bitlo",
"revision_date": 1454423768000,
"revision_id": "d9fec6f16b9d1bc409e25cc1824f76c9c9089522",
"snapshot_id": "7c57f6cd7b593287de5e03400b8b07ad7138312a",
"src_encoding": "UTF-8",
"star_events_count": 1,
"url": "https://raw.githubusercontent.com/ilmesi/bitlo/d9fec6f16b9d1bc409e25cc1824f76c9c9089522/bitlo/services/base.py",
"visit_date": "2021-01-10T09:42:48.210463"
} | 2.609375 | stackv2 |
class ServiceBase(object):
@classmethod
def balance(cls, address, confirmations=0):
"""
Returns the amount of satoshis on the address
with the specified number of confirmations.
"""
raise NotImplementedError
@classmethod
def block_height(cls):
"""
Returns the height of the block.
"""
raise NotImplementedError
@classmethod
def transactions_for_address(cls, address, confirmations=0):
"""
Returns all the Tx related to the address
"""
raise NotImplementedError
@classmethod
def send(cls, from_address, to, private, amount_in_satoshis):
raise NotImplementedError
@classmethod
def unspents(cls, address, confirmations=0):
raise NotImplementedError
| 32 | 24.38 | 65 | 9 | 161 | python | [] | 0 | true |
End of preview. Expand in Data Studio
README.md exists but content is empty.
- Downloads last month
- 30