seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
9891659608 | import contextlib
import pprint
import sys
import traceback
import types
import typing
from seleniumwire.thirdparty.mitmproxy import controller, eventsequence, exceptions
from seleniumwire.thirdparty.mitmproxy import flow
from . import ctx
def _get_name(itm):
return getattr(itm, "name", itm.__class__.__name__.lower())
def cut_traceback(tb, func_name):
"""
Cut off a traceback at the function with the given name.
The func_name's frame is excluded.
Args:
tb: traceback object, as returned by sys.exc_info()[2]
func_name: function name
Returns:
Reduced traceback.
"""
tb_orig = tb
for _, _, fname, _ in traceback.extract_tb(tb):
tb = tb.tb_next
if fname == func_name:
break
return tb or tb_orig
@contextlib.contextmanager
def safecall():
try:
yield
except (exceptions.AddonHalt, exceptions.OptionsError):
raise
except Exception:
etype, value, tb = sys.exc_info()
tb = cut_traceback(tb, "invoke_addon")
ctx.log.error(
"Error handling request\n%s" % "".join(
traceback.format_exception(etype, value, tb)
)
)
class Loader:
"""
A loader object is passed to the load() event when addons start up.
"""
def __init__(self, master):
self.master = master
def add_option(
self,
name: str,
typespec: type,
default: typing.Any,
help: str,
choices: typing.Optional[typing.Sequence[str]] = None
) -> None:
"""
Add an option to mitmproxy.
Help should be a single paragraph with no linebreaks - it will be
reflowed by tools. Information on the data type should be omitted -
it will be generated and added by tools as needed.
"""
if name in self.master.options:
existing = self.master.options._options[name]
same_signature = (
existing.name == name and
existing.typespec == typespec and
existing.default == default and
existing.help == help and
existing.choices == choices
)
if same_signature:
return
else:
ctx.log.warn("Over-riding existing option %s" % name)
self.master.options.add_option(
name,
typespec,
default,
help,
choices
)
def add_command(self, path: str, func: typing.Callable) -> None:
self.master.commands.add(path, func)
def traverse(chain):
"""
Recursively traverse an addon chain.
"""
for a in chain:
yield a
if hasattr(a, "addons"):
yield from traverse(a.addons)
class AddonManager:
def __init__(self, master):
self.lookup = {}
self.chain = []
self.master = master
master.options.changed.connect(self._configure_all)
def _configure_all(self, options, updated):
self.trigger("configure", updated)
def clear(self):
"""
Remove all addons.
"""
for a in self.chain:
self.invoke_addon(a, "done")
self.lookup = {}
self.chain = []
def get(self, name):
"""
Retrieve an addon by name. Addon names are equal to the .name
attribute on the instance, or the lower case class name if that
does not exist.
"""
return self.lookup.get(name, None)
def register(self, addon):
"""
Register an addon, call its load event, and then register all its
sub-addons. This should be used by addons that dynamically manage
addons.
If the calling addon is already running, it should follow with
running and configure events. Must be called within a current
context.
"""
for a in traverse([addon]):
name = _get_name(a)
if name in self.lookup:
raise exceptions.AddonManagerError(
"An addon called '%s' already exists." % name
)
l = Loader(self.master)
self.invoke_addon(addon, "load", l)
for a in traverse([addon]):
name = _get_name(a)
self.lookup[name] = a
for a in traverse([addon]):
self.master.commands.collect_commands(a)
self.master.options.process_deferred()
return addon
def add(self, *addons):
"""
Add addons to the end of the chain, and run their load event.
If any addon has sub-addons, they are registered.
"""
for i in addons:
self.chain.append(self.register(i))
def remove(self, addon):
"""
Remove an addon and all its sub-addons.
If the addon is not in the chain - that is, if it's managed by a
parent addon - it's the parent's responsibility to remove it from
its own addons attribute.
"""
for a in traverse([addon]):
n = _get_name(a)
if n not in self.lookup:
raise exceptions.AddonManagerError("No such addon: %s" % n)
self.chain = [i for i in self.chain if i is not a]
del self.lookup[_get_name(a)]
self.invoke_addon(addon, "done")
def __len__(self):
return len(self.chain)
def __str__(self):
return pprint.pformat([str(i) for i in self.chain])
def __contains__(self, item):
name = _get_name(item)
return name in self.lookup
async def handle_lifecycle(self, name, message):
"""
Handle a lifecycle event.
"""
if not hasattr(message, "reply"): # pragma: no cover
raise exceptions.ControlException(
"Message %s has no reply attribute" % message
)
# We can use DummyReply objects multiple times. We only clear them up on
# the next handler so that we can access value and state in the
# meantime.
if isinstance(message.reply, controller.DummyReply):
message.reply.reset()
self.trigger(name, message)
if message.reply.state == "start":
message.reply.take()
if not message.reply.has_message:
message.reply.ack()
message.reply.commit()
if isinstance(message.reply, controller.DummyReply):
message.reply.mark_reset()
if isinstance(message, flow.Flow):
self.trigger("update", [message])
def invoke_addon(self, addon, name, *args, **kwargs):
"""
Invoke an event on an addon and all its children.
"""
if name not in eventsequence.Events:
raise exceptions.AddonManagerError("Unknown event: %s" % name)
for a in traverse([addon]):
func = getattr(a, name, None)
if func:
if callable(func):
func(*args, **kwargs)
elif isinstance(func, types.ModuleType):
# we gracefully exclude module imports with the same name as hooks.
# For example, a user may have "from mitmproxy import log" in an addon,
# which has the same name as the "log" hook. In this particular case,
# we end up in an error loop because we "log" this error.
pass
else:
raise exceptions.AddonManagerError(
"Addon handler {} ({}) not callable".format(name, a)
)
def trigger(self, name, *args, **kwargs):
"""
Trigger an event across all addons.
"""
for i in self.chain:
try:
with safecall():
self.invoke_addon(i, name, *args, **kwargs)
except exceptions.AddonHalt:
return
| wkeeling/selenium-wire | seleniumwire/thirdparty/mitmproxy/addonmanager.py | addonmanager.py | py | 8,065 | python | en | code | 1,689 | github-code | 90 |
31362422000 | # n = 4
# 1
# 2 1
# 3 2 1
# 4 3 2 1
n = int(input())
row = 1
while row <= n:
col = 1
val = row
while col <= row:
print(val, end="")
val -= 1
col += 1
row += 1
print("") | xambassador/DSA | Patterns/python/pattern13.py | pattern13.py | py | 213 | python | en | code | 6 | github-code | 90 |
14280217068 | n=int(input())
lst=list(map(int, input().split()))
lst.sort()
print('Sorted List:')
print(lst)
lst2=[]
for i in range (0,n-1):
v=lst[i+1]-lst[i]
lst2.append(v)
print('Sequence of increments:')
print(lst2) | ViciousAndDelicious/SRM-ELab | Python/Level 1/Searching and Sorting/Find increment sequence.py | Find increment sequence.py | py | 208 | python | en | code | 6 | github-code | 90 |
24552724577 | from ... import core as c
from ... import ctrlstru as cs
from ... import eudlib as sf
def InlineCodifyBinaryTrigger(bTrigger):
""" Inline codify raw(binary) trigger data.
For minimal protection, eudplib make some of the trig-triggers to
eudplib trigger. This function makes eudplib trigger out of raw
binary trigger stream.
:param bTrigger: Binary trigger data
:returns: (tStart, tEnd) pair, as being used by tEnd
"""
# 1. Get executing players of the trigger.
# If all player executes it, then pass it
if bTrigger[320 + 2048 + 4 + 17] != 0:
playerExecutesTrigger = [True] * 8
else: # Should check manually
playerExecutesTrigger = [False] * 8
# By player
for player in range(8):
if bTrigger[320 + 2048 + 4 + player] != 0:
playerExecutesTrigger[player] = True
# By force
playerForce = [0] * 8
for player in range(8):
playerForce[player] = c.GetPlayerInfo(player).force
for force in range(4):
if bTrigger[320 + 2048 + 4 + 18 + force] != 0:
for player in range(8):
if playerForce[player] == force:
playerExecutesTrigger[player] = True
# 2. Create function body
if c.PushTriggerScope():
tStart = c.RawTrigger(actions=c.SetDeaths(0, c.SetTo, 0, 0))
cp = sf.f_getcurpl()
cs.EUDSwitch(cp)
for player in range(8):
if playerExecutesTrigger[player]:
if cs.EUDSwitchCase()(player):
c.RawTrigger(trigSection=bTrigger)
cs.EUDBreak()
cs. EUDEndSwitch()
tEnd = c.RawTrigger()
c.PopTriggerScope()
return (tStart, tEnd)
| phu54321/eudplib | eudplib/maprw/inlinecode/btInliner.py | btInliner.py | py | 1,770 | python | en | code | 13 | github-code | 90 |
24808379571 | """
Hourglass network inserted in the pre-activated Resnet
Use lr=0.01 for current version
(c) Yichao Zhou (VanishingNet)
(c) Yichao Zhou (LCNN)
(c) YANG, Wei
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["HourglassNet", "hg"]
class Bottleneck2D(nn.Module):
'''
bn-relu-conv1-bn-relu-conv2-bn-relu-conv3
in_channel:inplanes
out_channel:2*planes
'''
expansion = 2
def __init__(self, inplanes, planes, stride=1, resample=None):
super(Bottleneck2D, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * Bottleneck2D.expansion, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.resample = resample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.resample is not None:
residual = self.resample(x)
out += residual
return out
class Hourglass(nn.Module):
def __init__(self, block, num_blocks, planes, depth):
super(Hourglass, self).__init__()
self.depth = depth #4
self.block = block
self.hg = self._make_hour_glass(block, num_blocks, planes, depth) #_make_hour_glass(bottle,1,128,4)
def _make_residual(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)
def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(3):
res.append(self._make_residual(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual(block, num_blocks, planes))
hg.append(nn.ModuleList(res))
return nn.ModuleList(hg)
def _hour_glass_forward(self, n, x):
up1 = self.hg[n - 1][0](x)
low1 = F.max_pool2d(x, 2, stride=2)
low1 = self.hg[n - 1][1](low1)
if n > 1:
low2 = self._hour_glass_forward(n - 1, low1)
else:
low2 = self.hg[n - 1][3](low1)
low3 = self.hg[n - 1][2](low2)
up2 = F.interpolate(low3, scale_factor=2)
out = up1 + up2
return out
def forward(self, x):
return self._hour_glass_forward(self.depth, x)
class HourglassNet(nn.Module):
'''
conv1-bn-relu-
'''
def __init__(self, planes, block, head, depth, num_stacks, num_blocks):
super(HourglassNet, self).__init__()
self.name = 'hourglassnet'
self.inplanes = 64
self.num_feats = 256
self.num_stacks = num_stacks # 1
self.num_channels = planes
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, 1)
self.layer3 = self._make_residual(block, self.inplanes, 1)
self.layer4 = self._make_residual(block, self.num_feats, 1)
self.maxpool = nn.MaxPool2d(2, stride=2)
# build hourglass modules
ch = self.num_feats * block.expansion # 256 * 2 = 512
hg, res, fc, score, fc_, score_ = [], [], [], [], [], []
for i in range(num_stacks):
hg.append(Hourglass(block, num_blocks, self.num_feats, depth))
res.append(self._make_residual(block, self.num_feats, num_blocks))
fc.append(self._make_fc(ch, ch))
score.append(head(ch, planes))
if i < num_stacks - 1:
fc_.append(nn.Conv2d(ch, ch, kernel_size=1))
score_.append(nn.Conv2d(planes, ch, kernel_size=1))
self.hg = nn.ModuleList(hg)
self.res = nn.ModuleList(res)
self.fc = nn.ModuleList(fc)
self.score = nn.ModuleList(score)
self.fc_ = nn.ModuleList(fc_)
self.score_ = nn.ModuleList(score_)
def _make_residual(self, block, planes, blocks, stride=1):
resample = None
if stride != 1 or self.inplanes != planes * block.expansion:
resample = nn.Conv2d(
self.inplanes, planes * block.expansion, kernel_size=1, stride=stride
)
layers = [block(self.inplanes, planes, stride, resample)]
self.inplanes = planes * block.expansion
for i in range(blocks - 1):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
return nn.Sequential(
nn.Conv2d(inplanes, outplanes, kernel_size=1),
nn.BatchNorm2d(inplanes),
nn.ReLU(inplace=True),
)
def forward(self, x):
out = []
#(8,3,512,512)
x = self.conv1(x)
#(8,64,256,256)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
#(8,128,256,256)
x = self.maxpool(x)
#(8,128,128,128)
x = self.layer2(x)
#(8,128,128,128)
x = self.layer3(x)
#(8,256,128,128)
for i in range(self.num_stacks):
y = self.hg[i](x)
y = self.res[i](y)
y = self.fc[i](y)
score = self.score[i](y)
score = self.maxpool(score)
out.append(score)
if i < self.num_stacks - 1:
fc_ = self.fc_[i](y)
score_ = self.score_[i](score)
x = x + fc_ + score_
return out[::-1]
def hg(**kwargs):
model = HourglassNet(
planes=kwargs["planes"],
block=Bottleneck2D,
head=kwargs.get("head", lambda c_in, c_out: nn.Conv2d(c_in, c_out, 1)),
depth=kwargs["depth"],
num_stacks=kwargs["num_stacks"],
num_blocks=kwargs["num_blocks"],
)
return model
def main():
hg(depth=2, num_stacks=1, num_blocks=1)
if __name__ == "__main__":
main()
| PJLab-ADG/SensorsCalibration | SensorX2car/camera2car/auto_calib/models/hourglass_pose.py | hourglass_pose.py | py | 6,582 | python | en | code | 1,730 | github-code | 90 |
14252184650 | from django.contrib import admin
from .models import Question, Choice
#copying template from 'C:\Users\LCM\pythonprojects\django\env\lib\python3.6\site-packages\django\contrib\admin\templates\admin'
# class ChoiceInline(admin.StackedInline):
# model = Choice
# extra = 3
# make this more compact -> TabularInline
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
#fields = ['pub_date', 'question_text']
# grouping the sets
fieldsets = [
(None, {'fields' : ['question_text']}),
('Date information', {'fields': ['pub_date'],
'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date'] # that extra filter tab on the right that seems quite meaningless in some cases
search_fields = ['question_text'] # adds the search box at the top of the list
admin.site.register(Question, QuestionAdmin)
#admin.site.register(Choice) # on the separate screen
| viridis45/Python_CodingExercise | forms/django_review/testdjango/polls/admin.py | admin.py | py | 1,088 | python | en | code | 0 | github-code | 90 |
28122104488 | from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.utils import timezone
from django.utils.encoding import smart_text
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from haleygg.models import League
from haleygg.models import Map
from haleygg.models import Match
from haleygg.models import Player
from haleygg.models import PlayerTuple
class LeagueSerializer(serializers.ModelSerializer):
class Meta:
model = League
fields = ["id", "name", "type", "start_date"]
extra_kwargs = {"type": {"required": False}, "start_date": {"required": False}}
class MapSerializer(serializers.ModelSerializer):
class Meta:
model = Map
fields = ["id", "name", "image", "type"]
class PlayerSerializer(serializers.ModelSerializer):
joined_date = serializers.DateField(default=timezone.now().date)
class Meta:
model = Player
fields = ["id", "name", "favorate_race", "joined_date", "career"]
extra_kwargs = {
"joined_date": {"required": False},
"career": {"required": False},
}
class PlayerTupleListSerializer(serializers.ListSerializer):
def validate(self, player_tuples):
error_msg = []
is_melee_match = len(player_tuples) == 1
if is_melee_match:
if not player_tuples[0]["winner_race"]:
error_msg.append({"winner_race": "๊ฐ์ธ์ ์ ํ๋ ์ด์ด์ ์ข
์กฑ๊ฐ์ด ํ์ํฉ๋๋ค."})
if not player_tuples[0]["loser_race"]:
error_msg.append({"loser_race": "๊ฐ์ธ์ ์ ํ๋ ์ด์ด์ ์ข
์กฑ๊ฐ์ด ํ์ํฉ๋๋ค."})
players = []
for player_tuple in player_tuples:
winner_name = player_tuple.get("winner").name
if not Player.objects.filter(name__iexact=winner_name).exists():
error_msg.append({"winner": f"ํ๋ ์ด์ด {winner_name}์ ์กด์ฌํ์ง ์์ต๋๋ค."})
else:
if winner_name in players:
error_msg.append({"winner": f"ํ๋ ์ด์ด {winner_name}๊ฐ ์ค๋ณต๋์์ต๋๋ค."})
players.append(winner_name)
loser_name = player_tuple.get("loser").name
if not Player.objects.filter(name__iexact=loser_name).exists():
error_msg.append({"loser": f"ํ๋ ์ด์ด {loser_name}์ ์กด์ฌํ์ง ์์ต๋๋ค."})
else:
if loser_name in players:
error_msg.append({"loser": f"ํ๋ ์ด์ด {loser_name}๊ฐ ์ค๋ณต๋์์ต๋๋ค."})
players.append(loser_name)
if error_msg:
raise serializers.ValidationError(error_msg)
return player_tuples
def create(self, validated_data, match):
player_tuples = []
for item in validated_data:
player_tuples.append(
PlayerTuple(
match=match,
winner=item["winner"],
loser=item["loser"],
winner_race=item.get("winner_race"),
loser_race=item.get("loser_race"),
)
)
return PlayerTuple.objects.bulk_create(player_tuples)
def update(self, instance, validated_data):
self.player_tuple_mapping = {
player_tuple.id: player_tuple for player_tuple in instance
}
data_mapping = {data.get("id"): data for data in validated_data}
different_ids = set(self.player_tuple_mapping.keys() - data_mapping.keys())
if different_ids:
raise serializers.ValidationError(
{"player_tuples_id": f"{different_ids}์ ํด๋นํ๋ ๊ฐ์ด ์์ต๋๋ค."}
)
self.has_changed = False
for player_id, data in data_mapping.items():
player_tuple_instance = self.find_player_tuple_from_instance(player_id)
if (
player_tuple_instance.winner.name != data["winner"].name
or player_tuple_instance.winner_race != data["winner_race"]
or player_tuple_instance.loser.name != data["loser"].name
or player_tuple_instance.loser_race != data["loser_race"]
):
self.has_changed = True
player_tuple_instance.winner = data["winner"]
player_tuple_instance.winner_race = data["winner_race"]
player_tuple_instance.loser = data["loser"]
player_tuple_instance.loser_race = data["loser_race"]
PlayerTuple.objects.bulk_update(
instance, ["winner", "winner_race", "loser", "loser_race"]
)
return instance
def find_player_tuple_from_instance(self, player_id):
return self.player_tuple_mapping[player_id]
class CreatableSlugRelatedField(serializers.SlugRelatedField):
def to_internal_value(self, data):
try:
return self.get_queryset().get_or_create(
**{f"{self.slug_field}__iexact": str(data)},
defaults={self.slug_field: str(data)},
)[0]
except ObjectDoesNotExist:
self.fail(
"does_not_exist", slug_name=self.slug_field, value=smart_text(data)
)
except (TypeError, ValueError):
self.fail("invalid")
class PlayerTupleSerializer(serializers.ModelSerializer):
RACE_LIST = [
("P", "Protoss"),
("T", "Terran"),
("Z", "Zerg"),
("R", "Random"),
("", ""),
]
winner = CreatableSlugRelatedField(queryset=Player.objects.all(), slug_field="name")
loser = CreatableSlugRelatedField(queryset=Player.objects.all(), slug_field="name")
winner_race = serializers.ChoiceField(
allow_null=True, choices=RACE_LIST, default="", required=False
)
loser_race = serializers.ChoiceField(
allow_null=True, choices=RACE_LIST, default="", required=False
)
class Meta:
model = PlayerTuple
fields = [
"id",
"winner",
"winner_race",
"loser",
"loser_race",
]
extra_kwargs = {
"id": {"read_only": False, "required": False},
}
list_serializer_class = PlayerTupleListSerializer
class MatchListSerializer(serializers.ListSerializer):
def validate(self, matches):
match_names = []
for match in matches:
match_name = match["league"].name + match["title"]
if match_name in match_names:
raise serializers.ValidationError("์ด๋ฏธ league์ title์ด ๋์ผํ ๋ฐ์ดํฐ๊ฐ ์กด์ฌํฉ๋๋ค.")
match_names.append(match_name)
return matches
class MatchSerializer(serializers.ModelSerializer):
league = serializers.SlugRelatedField(
queryset=League.objects.all(), slug_field="name"
)
map = CreatableSlugRelatedField(queryset=Map.objects.all(), slug_field="name")
player_tuples = PlayerTupleSerializer(
many=True, required=True, allow_empty=False, min_length=1
)
class Meta:
model = Match
fields = [
"id",
"league",
"date",
"title",
"map",
"miscellaneous",
"player_tuples",
]
extra_kwargs = {
"date": {"required": True},
"title": {"required": True},
"miscellaneous": {"required": False, "allow_blank": True},
}
validators = [
UniqueTogetherValidator(
queryset=Match.objects.all(),
fields=["league", "title"],
message="์ด๋ฏธ league์ title์ด ๋์ผํ ๋ฐ์ดํฐ๊ฐ ์กด์ฌํฉ๋๋ค.",
),
]
list_serializer_class = MatchListSerializer
def create(self, validated_data):
self.get_data_from_validated_data(validated_data=validated_data)
with transaction.atomic():
self.create_match()
self.create_player_tuples()
return self.match
def get_data_from_validated_data(self, validated_data):
self.league = validated_data.get("league")
self.map = validated_data.get("map")
self.date = validated_data.get("date")
self.title = validated_data.get("title")
self.miscellaneous = validated_data.get("miscellaneous")
self.player_tuples = validated_data.pop("player_tuples")
def create_match(self):
self.match = Match.objects.create(
league=self.league,
date=self.date,
title=self.title,
map=self.map,
miscellaneous=self.miscellaneous,
)
def create_player_tuples(self):
player_serializer = PlayerTupleSerializer(many=True)
self.player_tuples_instance = player_serializer.create(
validated_data=self.player_tuples, match=self.match
)
def update(self, instance, validated_data):
player_tuples_validated_data = validated_data.pop("player_tuples")
player_tuples_instance = instance.get_related_player_tuples()
self.player_serializer = PlayerTupleSerializer(many=True)
with transaction.atomic():
self.player_tuples_instance = self.player_serializer.update(
instance=player_tuples_instance,
validated_data=player_tuples_validated_data,
)
instance = super().update(instance=instance, validated_data=validated_data)
return instance
class WinRatioByRaceSerializer(serializers.Serializer):
protoss_wins_to_terran_count = serializers.IntegerField()
protoss_wins_to_zerg_count = serializers.IntegerField()
terran_wins_to_protoss_count = serializers.IntegerField()
terran_wins_to_zerg_count = serializers.IntegerField()
zerg_wins_to_protoss_count = serializers.IntegerField()
zerg_wins_to_terran_count = serializers.IntegerField()
class PlayerMatchSummarySerializer(WinRatioByRaceSerializer):
protoss_wins_to_protoss_count = serializers.IntegerField()
terran_wins_to_terran_count = serializers.IntegerField()
zerg_wins_to_zerg_count = serializers.IntegerField()
protoss_loses_to_protoss_count = serializers.IntegerField()
protoss_loses_to_terran_count = serializers.IntegerField()
protoss_loses_to_zerg_count = serializers.IntegerField()
terran_loses_to_protoss_count = serializers.IntegerField()
terran_loses_to_terran_count = serializers.IntegerField()
terran_loses_to_zerg_count = serializers.IntegerField()
zerg_loses_to_protoss_count = serializers.IntegerField()
zerg_loses_to_terran_count = serializers.IntegerField()
zerg_loses_to_zerg_count = serializers.IntegerField()
winning_melee_matches_count = serializers.IntegerField()
losing_melee_matches_count = serializers.IntegerField()
winning_top_and_bottom_matches_count = serializers.IntegerField()
losing_top_and_bottom_matches_count = serializers.IntegerField()
class MapStatisticsSerializer(WinRatioByRaceSerializer):
total_matches_count = serializers.IntegerField()
class PlayerRankValueSerializer(serializers.Serializer):
name = serializers.CharField()
value = serializers.IntegerField()
class PlayerComparisonSerializer(serializers.Serializer):
win_count = serializers.IntegerField()
lose_count = serializers.ImageField()
| 10cheon00/-deprecated--haleygg_server_v2 | haleygg/serializers.py | serializers.py | py | 11,438 | python | en | code | 0 | github-code | 90 |
18259224869 | s = input()
q = int(input())
f1 = 1
f2 = 2
cnt = 0
st = ""
se = ""
for _ in range(q):
q = input().split()
if int(q[0]) == 1:
cnt += 1
elif int(q[0]) == 2:
if cnt % 2 == 0:
if int(q[1]) == 1:
st = q[2] + st
elif int(q[1]) == 2:
se += q[2]
else:
if int(q[1]) == 1:
se += q[2]
elif int(q[1]) == 2:
st = q[2] + st
ans = st + s + se
if cnt % 2 == 0:
print(ans)
else:
ans = list(ans)
ans.reverse()
ans = "".join(ans)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02756/s597300656.py | s597300656.py | py | 593 | python | en | code | 0 | github-code | 90 |
11201571244 | from collections import defaultdict
from typing import cast
from unittest.mock import patch
import pytest
from aioqzone.type.internal import PersudoCurkey
from aioqzone_feed.type import FeedContent
from qqqr.utils.net import ClientAdapter
from qzemoji.utils import build_html
from telegram import Bot
from telegram.error import BadRequest, TimedOut
from qzone3tg.bot.queue import MsgQueue, QueueEvent, is_atoms, is_mids
from qzone3tg.bot.splitter import FetchSplitter
from qzone3tg.type import FeedPair
from . import FakeBot, fake_feed, fake_media
pytestmark = pytest.mark.asyncio
class Ihave0(QueueEvent):
async def GetMid(self, feed: FeedContent) -> list[int] | None:
if feed.entities[0].con == "0": # type: ignore
return [0]
async def reply_markup(self, feed, need_forward: bool):
return FeedPair(2, 1 if need_forward else None)
@pytest.fixture
def fake_bot():
return FakeBot()
@pytest.fixture
def queue(client: ClientAdapter, fake_bot: Bot):
q = MsgQueue(fake_bot, FetchSplitter(client), defaultdict(int))
q.register_hook(Ihave0())
return q
class TestQueue:
async def test_add(self, queue: MsgQueue):
queue.new_batch(0)
f = fake_feed(0)
queue.add(0, f)
assert len(queue.q) == 1
await queue.wait(PersudoCurkey(f.uin, f.abstime))
assert queue.q[f].feed and is_mids(queue.q[f].feed)
queue.add(1, f)
await queue.wait(PersudoCurkey(f.uin, f.abstime))
assert len(queue.q) == 1
f = fake_feed(1)
f.uin = 1
queue.add(0, f)
assert len(queue.q) == 2
await queue.wait(PersudoCurkey(f.uin, f.abstime))
assert queue.q[f].feed and is_atoms(queue.q[f].feed)
f = fake_feed(2)
f.uin = 2
f.forward = fake_feed(0)
queue.add(0, f)
assert len(queue.q) == 3
await queue.wait(PersudoCurkey(f.uin, f.abstime))
assert queue.q[f].feed and is_atoms(queue.q[f].feed)
assert queue.q[f].forward and is_mids(queue.q[f].forward)
async def test_send_norm(self, queue: MsgQueue):
queue.new_batch(1)
for i in range(3):
f = fake_feed(i + 1)
f.abstime = i * 1000
queue.add(1, f)
await queue.send_all()
assert queue.sending is None
bot = cast(FakeBot, queue.bot)
assert len(bot.log) == 3
assert "".join(i[2][-1] for i in bot.log) == "123"
async def test_drop_dup_feed(self, queue: MsgQueue):
queue.new_batch(3)
f = fake_feed(1)
queue.add(3, f)
f = fake_feed(1)
f.abstime = 1
queue.add(3, f)
f = fake_feed(1)
f.uin = f.abstime = 2
queue.add(3, f)
f = fake_feed(1)
f.abstime = 3
queue.add(3, f)
await queue.send_all()
bot = cast(FakeBot, queue.bot)
assert len(bot.log) == 2
assert sum((i.feed for i in queue.q.values()), []) == [1, 1, 2, 1]
@pytest.mark.parametrize(
["feed", "forward", "markups"],
[
(fake_feed(2), fake_feed(1), [1, 2]),
(fake_feed(2), fake_feed(0), [2]),
],
)
async def test_reply_markup(self, queue: MsgQueue, feed, forward, markups: list[int]):
queue.new_batch(2)
feed.forward = forward
queue.add(2, feed)
await queue.send_all()
assert queue.sending is None
bot = cast(FakeBot, queue.bot)
assert len(bot.log) == len(markups)
for i, markup in zip(bot.log, markups):
assert i[-1]["reply_markup"] == markup
@pytest.mark.parametrize(
["exc2r", "grp_len"],
[
(TimedOut, 2),
# (BadRequest("Reply message not found"), 2),
(BadRequest("Wrong file identifier/http url specified"), 2),
(RuntimeError, 1),
],
)
async def test_send_retry(self, queue: MsgQueue, exc2r: Exception, grp_len: int):
queue.new_batch(0)
with patch.object(FakeBot, "send_photo", side_effect=exc2r):
f = fake_feed(1)
f.media = [fake_media(build_html(100))]
queue.add(0, f)
await queue.send_all()
assert queue.sending is None
bot = cast(FakeBot, queue.bot)
assert not bot.log
assert len(queue.exc_groups[f]) == grp_len
| aioqzone/Qzone2TG | test/bot/test_queue.py | test_queue.py | py | 4,376 | python | en | code | 27 | github-code | 90 |
3363285227 | """Tests."""
from typing import List, Tuple
import pytest
from hash_chunker import HashChunker
@pytest.mark.parametrize(
"chunk_size, all_items_count, expected",
[
(1000, 2000, [("", "8000000000"), ("8000000000", "ffffffffff")]),
(
500,
1500,
[
("", "5555600000"),
("5555600000", "aaaac00000"),
("aaaac00000", "ffffffffff"),
],
),
],
)
def test_default_usage(
chunk_size: int,
all_items_count: int,
expected: List[Tuple[str, str]],
) -> None:
"""
Simple test.
:param chunk_size: chunk elements limit
:param all_items_count: count aff all data elements
:param expected: expected chunks
"""
assert list(HashChunker().get_chunks(chunk_size, all_items_count)) == expected
@pytest.mark.parametrize(
"chunk_size, all_items_count, chunk_hash_length, expected",
[
(1, 2, 5, [("", "80000"), ("80000", "fffff")]),
],
)
def test_chunk_hash_length(
chunk_size: int,
all_items_count: int,
chunk_hash_length: int,
expected: List[Tuple[str, str]],
) -> None:
"""
Test chunk_hash_length option.
:param chunk_size: chunk elements limit
:param all_items_count: count aff all data elements
:param chunk_hash_length: chunks hash length
:param expected: expected chunks
"""
hash_chunker = HashChunker(chunk_hash_length=chunk_hash_length)
assert list(hash_chunker.get_chunks(chunk_size, all_items_count)) == expected
@pytest.mark.parametrize(
"chunks_count, expected",
[
(2, [("", "8000000000"), ("8000000000", "ffffffffff")]),
(
3,
[
("", "5555600000"),
("5555600000", "aaaac00000"),
("aaaac00000", "ffffffffff"),
],
),
],
)
def test_get_fixed_chunks(
chunks_count: int,
expected: List[Tuple[str, str]],
) -> None:
"""
Simple test.
:param chunks_count: chunks limit
:param expected: expected chunks
"""
assert list(HashChunker().get_fixed_chunks(chunks_count)) == expected
| whysage/hash_chunker | tests/test_hash_chunker.py | test_hash_chunker.py | py | 2,159 | python | en | code | 2 | github-code | 90 |
25793914865 | import numpy as np
from scipy.constants import *
import cmath
import math
def func(a):
if a > 0:
return 2, 3
else:
return "no"
if __name__ == '__main__':
print(func(-2))
print(func(6))
val = func(6)
a, b = val
print(a)
print(b) | psusarla95/RL_MIMO | trail.py | trail.py | py | 279 | python | en | code | 0 | github-code | 90 |
18055107669 | n,a,b = list(map(int,input().split()))
s = list(input())
ok = 0
rank = 1
for i in s:
if ok >= a+b:
print('No')
elif i == 'a':
print('Yes')
ok += 1
elif i == 'b':
if rank <= b:
print('Yes')
rank += 1
ok += 1
else:
print('No')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p03971/s824710840.py | s824710840.py | py | 354 | python | en | code | 0 | github-code | 90 |
70762276138 | """
Hacer una funciรณn que reciba un nรบmero y que nos indique si es un nรบmero capicรบa o no.
El nรบmero capicรบa se lee igual de derecha a izquierda y viceversa.
Ejemplo:
nombreFuncion(3003)//Devuelve: Es capicรบa
nombreFuncion(2023)//Devuelve: No es capicรบa
"""
def verificaCapicua(numero):
resultado = ""
numero_invertido = ""
cadena_numero = str(numero)
for contador in cadena_numero[::-1]:
numero_invertido += contador
es_capicua = int(numero_invertido)
if es_capicua == numero:
resultado = "Es capicรบa."
else:
resultado = "No es capicรบa."
return resultado
numero = 2002
numero2 = 2023
print(verificaCapicua(numero))
print(verificaCapicua(numero2)) | AlexSR2590/logica_python | 3-Ejercicios-logica/ejercicio29.py | ejercicio29.py | py | 718 | python | es | code | 0 | github-code | 90 |
23241704653 | from sys import stdin, stdout
def solve():
children: list = [i for i in range(1, 31)]
for _ in range(28):
children.remove(int(stdin.readline().rstrip()))
for num in children:
stdout.write("%d\n" % num)
if __name__ == "__main__":
solve()
| anothel/CodeKata | ๋ฐฑ์ค/Bronze/5597.โ
๊ณผ์ โ
์โ
๋ด์ โ
๋ถ๏ผ๏ผ๏ผ/๊ณผ์ โ
์โ
๋ด์ โ
๋ถ๏ผ๏ผ๏ผ.py | ๊ณผ์ โ
์โ
๋ด์ โ
๋ถ๏ผ๏ผ๏ผ.py | py | 259 | python | en | code | 1 | github-code | 90 |
4737237694 | #!/usr/bin/kivy
import kivy
kivy.require('1.0.6')
from cg_graphics_audio import *
from kivy.app import App
class CuriosityApp(App):
cg = None
float_layout = None
root = None
def build(self):
self.cg = CuriosityGame(self)
self.root = CuriosityWidget()
self.float_layout = self.root.children[0].children[0]
for key, value in self.cg.items.items():
self.float_layout.add_widget(value)
def on_pause(self):
return True
if __name__ == '__main__':
CuriosityApp().run()
| curiosity-lab/curiosity_game_graphics_audio | main.py | main.py | py | 545 | python | en | code | 0 | github-code | 90 |
26392246038 | import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'DJANGO_SECRET_KEY')
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
#SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['himprofgrodno.by', '/', '*']
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'blog',
'filebrowser',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'rest_framework',
'django_summernote',
]
#INSTALLED_APPS += ('django_summernote', )
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dja2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media'
],
},
},
]
WSGI_APPLICATION = 'dja2.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'sql_mode': 'traditional',
},
'NAME': 'NAME',
'USER': 'USER',
'PASSWORD': 'PASSWORD',
'HOST': 'localhost',
'PORT': 'PORT',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
#ADMIN_MEDIA_PREFIX = STATIC_URL + "grappelli/"
X_FRAME_OPTIONS = 'SAMEORIGIN'
SUMMERNOTE_THEME = 'bs4' # Show summernote with Bootstrap4
MEDIA_ROOT = os.path.join(BASE_DIR, 'blog/media/')
MEDIA_URL = '/media/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
# Activate Django-Heroku.
django_heroku.settings(locals())
#del DATABASES['default']['OPTIONS']['sslmode']
SUMMERNOTE_CONFIG = {
# Using SummernoteWidget - iframe mode, default
# Or, you can set it as False to use SummernoteInplaceWidget by default - no iframe mode
# In this case, you have to load Bootstrap/jQuery stuff by manually.
# Use this when you're already using Bootstraip/jQuery based themes.
'iframe': True,
# You can put custom Summernote settings
'summernote': {
# As an example, using Summernote Air-mode
'airMode': False,
# Change editor size
'width': '90%',
'height': '580',
# Use proper language setting automatically (default)
'lang': None,
# Toolbar customization
# https://summernote.org/deep-dive/#custom-toolbar-popover
'toolbar': [
['style', ['style']],
['font', ['bold', 'underline', 'clear']],
['fontname', ['fontname']],
['color', ['color']],
['para', ['ul', 'ol', 'paragraph']],
['table', ['table']],
['insert', ['link', 'picture', 'video']],
['view', ['fullscreen', 'codeview', 'help']],
],
}
}
| AlexXG0152/django-landing | dja2/settings.py | settings.py | py | 5,120 | python | en | code | 0 | github-code | 90 |
15171672382 | import requests
from bs4 import BeautifulSoup
url = "https://www.cricbuzz.com/cricket-match/live-scores"
live_match = []
live_match_links = []
completed_match = []
completed_match_links = []
all_matches = []
ongoing_matches = ""
finished_matches = ""
live_match_len = 0
completed_match_len = 0
def match():
page = requests.get(url)
soup = BeautifulSoup(page.text,'html.parser')
print(soup)
headers = soup.find_all(class_="cb-col cb-col-100 cb-lv-main")
print(soup)
first_links = soup.find(class_='text-hvr-underline text-bold')
first_links.decompose()
global live_match
global completed_match
global live_match_links
global completed_match_inks
live_match.clear()
completed_match.clear()
live_match_links.clear()
completed_match_links.clear()
for head in headers:
match_live = head.find_all(class_="cb-lv-scrs-well cb-lv-scrs-well-live")
print(match_live)
match_completed = head.find_all(class_="cb-lv-scrs-well cb-lv-scrs-well-complete")
print()
print()
print(match_completed)
for match in match_live:
live_match.append(match['title'])
live_match_links.append(match['href'])
for match in match_completed:
completed_match.append(match['title'])
completed_match_links.append(match['href'])
global live_match_len
global completed_match_len
live_match_len = len(live_match)
completed_match_len = len(completed_match)
print(live_match_len)
print(completed_match_len)
all_matches_string = ""
count = 1
if live_match_len > 0:
all_matches_string += "*************LIVE MATCHES*************\n"
for i in range(0,live_match_len):
print(live_match[i])
print(url + live_match_links[i])
all_matches_string += str(count) + " >>> " + live_match[i] + "\n\n"
count+=1
all_matches_string += "\n\n"
if completed_match_len > 0:
all_matches_string += "*********COMPLETED MATCHES*********\n"
for i in range(0,completed_match_len):
print(completed_match[i])
print(url + completed_match_links[i])
all_matches_string += str(count) + " >>> " + completed_match[i] + "\n\n"
count+=1
if live_match_len==0 and completed_match_len==0:
all_matches_string = "Currently, There is neither no live matches nor recently ended matches."
return all_matches_string
def extract_live_match(match_url):
match_page = requests.get("https://www.cricbuzz.com/" + match_url)
soup = BeautifulSoup(match_page.text,'html.parser')
match_status = ["cb-text-stump", "cb-text-inprogress", "cb-text-lunch", "cb-text-rain", "cb-text-tea"]
match_details = ""
main_content = soup.find(class_="cb-col cb-col-67 cb-nws-lft-col cb-comm-pg")
match_header = main_content.find(class_="cb-min-bat-rw")
prevSession = main_content.find_all(class_="cb-col cb-col-67 cb-scrs-wrp")
prevSession = prevSession[0]
info = []
for status in match_status:
info = prevSession.find_all(class_=status)
if info != []:
break
else:
pass
info = info[0].contents[0]
prevSession = prevSession.find(class_="cb-text-gray cb-font-16")
if prevSession is not None:
prevSession = prevSession.contents[0][1:]
match_details += "Previous session: " + prevSession + "\n\n"
rr = main_content.find(class_="cb-font-12 cb-text-gray")
rr = rr.find_all("span")
rr = rr[1].text
match_score = match_header.find(class_="cb-font-20 text-bold")
#match_current_status = match_current_status[0]
match_details += "Current session: " + match_score.contents[0][1:] + "\n"
match_details += "Current RR: " + rr +"\n" + info
print(match_details)
print()
return match_details
def extract_completed_match(match_url):
match_page = requests.get("https://www.cricbuzz.com/" + match_url)
soup = BeautifulSoup(match_page.text,'html.parser')
abandon = 0
match_details = ""
main_content = soup.find(class_="cb-col cb-col-67 cb-nws-lft-col cb-comm-pg")
result = main_content.find(class_="cb-col cb-col-100 cb-min-stts cb-text-mom")
if result is None:
result = main_content.find(class_="cb-col cb-col-100 cb-min-stts cb-text-complete")
if result is None:
result = soup.find(class_="cb-col cb-col-100 cb-font-18 cb-toss-sts cb-text-abandon")
abandon = 1
result = result.contents[0]
if abandon == 0:
match_header = main_content.find_all(class_="cb-col cb-col-100 cb-col-scores")
match_header = match_header[0]
Session = match_header.find(class_="cb-col cb-col-100 cb-min-tm cb-text-gray")
match_details += Session.contents[0] + "\n"
Session = match_header.find(class_="cb-col cb-col-100 cb-min-tm")
match_details += Session.contents[0] + "\n\n"
match_details += result
print(match_details)
return match_details
if __name__ == "__main__":
match()
| sathvikrijo/Cricket-Score_TelegramBOT | cricket.py | cricket.py | py | 5,292 | python | en | code | 4 | github-code | 90 |
22139046682 | import numpy as np
import matplotlib.pyplot as plt
def sinusGenerator(amplitudo, frekuensi, tAkhir, theta):
t = np.arange(0, tAkhir, 0.1)
y = amplitudo * np.sin(2*frekuensi*t + np.deg2rad(theta))
return t,y
# 1. membuat data
t1,y1 = sinusGenerator(1,1,4,0)
t2,y2 = sinusGenerator(1,1,4,90)
t3,y3 = sinusGenerator(1,1,4,180)
# 2. membuat plot
dataPlot1 = plt.plot(t1,y1)
dataPlot2 = plt.plot(t2,y2)
dataPlot3 = plt.plot(t3,y3)
# setting properties
plt.setp(dataPlot1, color='r', linestyle='-', linewidth=0.75)
plt.setp(dataPlot2, color='b', linestyle='-.', linewidth=4)
plt.setp(dataPlot3, color='g', linestyle='--', linewidth=1.25)
# 3. menampilkan plot
plt.show()
| tobialbertino/belajar-code | Belajar_Python/LearnMatplotlib/4_SetProperties.py | 4_SetProperties.py | py | 686 | python | en | code | 2 | github-code | 90 |
40239615528 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 11:42:47 2021
@author: SethHarden
"""
from collections import Counter
class Solution:
def solve(self, str1, str2, edges):
possible = len(str1)
#setup our graph its an array for everything in our possible range
graph = [[] for _ in range(possible)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
matches = 0
seen = [False] * possible
for u in range(possible):
if not seen[u]:
queue = [u]
seen[u] = True
for x in queue:
for y in graph[x]:
if not seen[y]:
queue.append(y)
seen[y] = True
count = Counter(str2[i] for i in queue)
for i in queue:
if count[str1[i]]:
count[str1[i]] -= 1
matches += 1
return matches
ob = Solution()
str1 = ["a", "b", "c", "d"]
str2 = ["b", "a", "c", "d"]
C = [[0, 1],[2, 3]]
print(ob.solve(A, B, C)) | sethmh82/SethDevelopment | Practice/Solve-Using-Graphing.py | Solve-Using-Graphing.py | py | 1,266 | python | en | code | 1 | github-code | 90 |
19299993857 | import logging
from django.http import JsonResponse, HttpResponseBadRequest
from django.views.generic.base import TemplateResponseMixin
from .bases import LoggedInView
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from common.functions import get_model_pk_name
from django.utils.safestring import mark_safe
logger = logging.getLogger("hris_integration.views.core")
class ListView(TemplateResponseMixin, LoggedInView):
"""A simple list view of all the objects in the model."""
form = None
template_name = "base/base_list.html"
http_method_names = ["get", "head", "options", "trace"]
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
if self.form == None:
raise ValueError("FormView has no ModelForm class specified")
if hasattr(request.GET, "form"):
request.GET.pop("form")
self._model = self.form._meta.model
self.fields = self.form.list_fields
if self.fields == []:
self.fields = list(self.form.base_fields.keys())
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.page_title = getattr(self.form, "name", None)
self.page_description = getattr(self.form, "description", None)
context = self.get_context(**kwargs)
# theres no pk in the request so return the list view
self.template_name = "base/base_list.html"
labels = []
for field in self.fields:
labels.append(self.form.base_fields[field].label)
context["form_fields"] = labels
context["form_row"] = self.list_rows()
logger.debug(f"context: {context}")
return self.render_to_response(context)
def list_rows(self):
"""Generate the rows for the list view."""
logger.debug("requested list_rows")
output = []
pk = get_model_pk_name(self._model)
if pk not in self.fields:
pk = self.fields[0]
logger.debug(f"using {pk} as primary key in view")
for row in self._model.objects.all():
output.append(f'<tr id="{row.pk}">')
for field in self.fields:
val = getattr(row, field)
if field == pk:
val = f'<a href="{self.request.path}{row.pk}/"><strong>{val}</strong></a>'
if (
getattr(self._model, field).__class__.__name__
== "ForwardManyToOneDescriptor"
):
# TODO: [#57] Lookup ForeignKey URL and update val with link to that view
pass
output.append(f"<td>{val}</td>")
output.append("</tr>")
logger.debug(f"Output contains: {output}")
return mark_safe("\n".join(output))
class FormView(TemplateResponseMixin, LoggedInView):
"""The edit view for a model object."""
form = None
template_name = "base/base_edit.html"
enable_delete = True
edit_postfix = "_edit"
def setup(self, request: "django.http.HttpRequest", *args, **kwargs):
super().setup(request, *args, **kwargs)
if self.form == None:
raise ValueError("FormView has no ModelForm class specified")
if hasattr(request.POST, "form"):
request.POST.pop("form")
if hasattr(request.GET, "form"):
request.GET.pop("form")
try:
pk = kwargs["id"]
except KeyError:
pk = 0
self._model = self.form._meta.model
self.fields = self.form.base_fields.keys()
if pk > 0:
model = self._model.objects.get(pk=pk)
elif pk == 0:
model = self._model()
self.enable_delete = False
if request.method in ("POST", "PUT"):
self._form = self.form(request.POST, request.FILES, instance=model)
elif isinstance(pk, int):
self._form = self.form(instance=model)
else:
self._form = None
@method_decorator(csrf_protect)
def dispatch(self, request: "django.http.HttpRequest", *args, **kwargs):
logger.debug(f"dispatching {request.method}")
return super().dispatch(request, *args, **kwargs)
def get(self, request: "django.http.HttpRequest", *args, **kwargs):
self.page_title = getattr(self.form, "name", None)
self.page_description = getattr(self.form, "description", None)
context = self.get_context(form_delete=self.enable_delete, **kwargs)
context["form"] = self._form
logger.debug(f"context: {context}")
return self.render_to_response(context)
def post(self, request: "django.http.HttpRequest", *args, **kwargs) -> JsonResponse:
"""
Creates or updates a model instance based on the given form data.
:param request: The request object
:type request: django.http.HttpRequest
:return: A JSON response containing status and errors or status and updated field
values of the created/updated model
:rtype: JsonResponse
"""
logger.debug(f"post data is: {request.POST}")
if int(kwargs["id"]) != 0 and int(request.POST.get("id", 0)) != int(
kwargs.get("id", 0)
):
logger.warn(f"ID in post data does not match ID in url")
logger.debug(f"ID in post data: {request.POST.get('id', 0)}")
return HttpResponseBadRequest("The ID may not be changed once set")
if self._form.is_valid():
logger.debug("Form is valid, saving()")
save_data = self._form.save()
else:
logging.error(
f"encountered Exception while saving form {self.form.name}\n"
f"Errors are: {self._form._errors.keys()}"
)
ers = []
for e in self._form._errors.values():
ers.append("<br>".join(e))
response = {
"status": "error",
"fields": list(self._form._errors.keys()),
"errors": ers,
}
response.update(dict((k, v[0]) for k, v in self._form._errors.items()))
logger.debug(f"response: {response}")
return JsonResponse(response)
res = {"status": "success", "id": save_data.pk}
for field in self.fields:
res[field] = getattr(save_data, field)
if not isinstance(res[field], (str, int, bool)):
if hasattr(res[field], "id"):
res[field] = res[field].id
elif hasattr(res[field], "all"):
res[field] = [x.id for x in res[field].all()]
else:
res[field] = str(res[field])
response = JsonResponse(res)
return response
def delete(self, request, *args, **kwargs):
try:
pk = kwargs["id"]
except KeyError:
logger.warn("No ID in kwargs")
return HttpResponseBadRequest()
o = self._model.objects.get(pk=pk)
try:
o.delete()
return JsonResponse({"status": "success"})
except Exception as e:
logger.exception(f"lazy catch of {e}")
return HttpResponseBadRequest(str(e))
| jcarswell/hris-integration | hris_integration/hris_integration/views/core.py | core.py | py | 7,412 | python | en | code | 0 | github-code | 90 |
30946098767 | from django.urls import path
from . import views
urlpatterns = [
path('crews/', views.list_crew_members, name='crew_list'),
path('crews/<int:id>/', views.crew_detail, name='crew_detail'),
path('crews/new/', views.create_crew_member, name='create_crew'),
path('crews/<int:id>/edit/', views.update_crew_member, name='update_crew'),
path('crews/<int:id>/delete/', views.delete_crew_member, name='delete_crew'),
path('ships/', views.list_ships, name='ship_list'),
path('ships/<int:id>/', views.ship_detail, name='ship_detail'),
path('ships/new/', views.create_ship, name='create_ship'),
path('ships/<int:id>/edit/', views.update_ship, name='update_ship'),
path('ships/<int:id>/delete/', views.delete_ship, name='delete_ship'),
path('assignments/', views.list_assignments, name='assignment_list'),
path('assignments/<int:id>/', views.assignment_detail, name='assignment_detail'),
path('assignments/new/', views.create_assignment, name='create_assignment'),
path('assignments/<int:id>/edit/', views.update_assignment, name='update_assignment'),
path('assignments/<int:id>/delete/', views.delete_assignment, name='delete_assignment'),
path('schedules/', views.list_schedules, name='schedule_list'),
path('schedules/<int:id>/', views.schedule_detail, name='schedule_detail'),
path('schedules/new/', views.create_schedule, name='create_schedule'),
path('schedules/<int:id>/edit/', views.update_schedule, name='update_schedule'),
path('schedules/<int:id>/delete/', views.delete_schedule, name='delete_schedule'),
] | Ashrayergso/Disposition-v4 | app_name/urls.py | urls.py | py | 1,588 | python | en | code | 0 | github-code | 90 |
14528291554 | """ Contains the source editor widget
"""
from __future__ import print_function
import logging, sys
from astviewer.qtpy import QtCore, QtGui, QtWidgets
logger = logging.getLogger(__name__)
# The widget inherits from a Qt class, therefore it has many
# ancestors public methods and attributes.
# pylint: disable=R0901, R0902, R0904, W0201, R0913
class SourceEditor(QtWidgets.QPlainTextEdit):
""" Source read-ony editor that can detect double clicks.
"""
sigTextClicked = QtCore.Signal(int, int)
def __init__(self, parent=None):
""" Constructor
"""
super(SourceEditor, self).__init__(parent=parent)
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
# It's hard to make a platform independend font size
# http://stackoverflow.com/a/7381441/625350
if sys.platform.startswith('linux'):
font.setPointSize(12)
self.setReadOnly(True)
self.setFont(font)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setCenterOnScroll(True)
self.setStyleSheet("selection-color: black; selection-background-color: #FFE000;")
def sizeHint(self):
""" The recommended size for the widget.
"""
size = QtCore.QSize()
size.setWidth(700)
size.setHeight(700)
return size
def mousePressEvent(self, mouseEvent):
""" On mouse press, the sigTextClicked(line_nr, column_nr) is emitted.
"""
if mouseEvent.button() == QtCore.Qt.LeftButton:
cursor = self.cursorForPosition(mouseEvent.pos())
# Since the word wrap is off, there is one block per line. Block numbers are zero-based
# but code lines start at 1.
self.sigTextClicked.emit(cursor.blockNumber() + 1, cursor.positionInBlock())
def select_text(self, from_pos, to_line_pos):
""" Selects a text in the range from_line:col ... to_line:col
from_pos and to_line_pos should be a (line, column) tuple.
If from_pos is None, the selection starts at the beginning of the document.
If to_line_pos is None, the selection goes to the end of the document.
"""
text_cursor = self.textCursor()
# Select from back to front. This makes block better visible after scrolling.
if to_line_pos is None:
text_cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
else:
to_line, to_col = to_line_pos
to_text_block = self.document().findBlockByLineNumber(to_line - 1)
to_pos = to_text_block.position() + to_col
text_cursor.setPosition(to_pos, QtGui.QTextCursor.MoveAnchor)
if from_pos is None:
text_cursor.movePosition(QtGui.QTextCursor.Start, QtGui.QTextCursor.KeepAnchor)
else:
from_line, from_col = from_pos
# findBlockByLineNumber seems to be 0-based.
from_text_block = self.document().findBlockByLineNumber(from_line - 1)
from_pos = from_text_block.position() + from_col
text_cursor.setPosition(from_pos, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(text_cursor)
self.ensureCursorVisible()
def get_last_pos(self):
""" Gets the linenr and column of the last character.
"""
text_cursor = self.textCursor()
text_cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
return (text_cursor.blockNumber() + 1, text_cursor.positionInBlock())
| titusjan/astviewer | astviewer/editor.py | editor.py | py | 3,595 | python | en | code | 103 | github-code | 90 |
72672277738 | """
้้ๆsocketๆๅ
ๆไธๅๆจกๅก๏ผ่ฒ ่ฒฌ่ESP32้่จ
"""
import socket
import convert as con
s = socket.socket()
def read_vol():
client, addr = s.accept()
while True:
content = client.recv(32)
if len(content) == 0:
break
else:
print(con.cvt(content))
return con.cvt(content)
def init(IP):
s.bind((IP, 8090))
s.listen(0)
| harry123180/adaptive_control | Python/ESP32/WIFI.py | WIFI.py | py | 415 | python | en | code | 0 | github-code | 90 |
23893228038 | # Knapsack Problem with Dynamic Programming
def dynamic_0_1_knapsack(value, weight, n, total_weight):
for w in range(total_weight):
value[0][w] = 0
for i in range(1, n):
value[i][0] = 0
for w in range(1, total_weight):
if weight[i] > w:
value[i][w] = value[i - 1][w]
keep[i][w] = 0
else:
value[i][w] = max(value[i] + value[i - 1][w - w[i]], value[i - 1][w])
keep[i][w] = 1
# Print the selected items here
k = total_weight
for i in range(1, n):
if keep[i][k] == 1:
print(i)
k = k - weight[i]
return value[n][w]
| Subrata2402/Algorithms | Knapsack Problem/dynamic_programming.py | dynamic_programming.py | py | 556 | python | en | code | 0 | github-code | 90 |
33375706684 | import smbus2
from time import sleep
class AccelerometerSensor:
i2c_bus_num = 1 # bus 1 is connected to the breakout adapter
address = 0x18 # Assumes SDO pad left unconnected
def __init__(self): #init sensor
#Create a bus instance
self.bus = smbus2.SMBus(self.i2c_bus_num)
#Select data rate, normal mode at 100 Hz, enable z y x axes
ctr_reg_1 = 0b01010111
self.bus.write_byte_data(self.address, 0x20, ctr_reg_1)
#Turn of filters
ctr_reg_2 = 0b10100001
self.bus.write_byte_data(self.address, 0x21, ctr_reg_2)
#Send interrupt to INT1
ctr_reg_3 = 0b01000000
self.bus.write_byte_data(self.address, 0x22, ctr_reg_3)
#Set measurements to non-continuous update (necessary to use thermometer), big endian notation, ยฑ4g measurement range
ctr_reg_4 = 0b00010000
self.bus.write_byte_data(self.address, 0x23, ctr_reg_4)
#Latch Interrupt 1 until it is read
ctr_reg_5 = 0b00001000
self.bus.write_byte_data(self.address, 0x24, ctr_reg_5)
#INT1 threshold
int1_ths = 0b00001000
self.bus.write_byte_data(self.address, 0x32, int1_ths)
#INT1 duration
int1_duration = 0b00000000 #0.1s
self.bus.write_byte_data(self.address, 0x33, int1_duration)
#Enable temperature sensor and ADC
temp_cfg_reg = 0b11000000
self.bus.write_byte_data(self.address, 0x1f, temp_cfg_reg)
sleep(0.1)
#Configure interupt 1
int1_cfg_reg = 0b00001010
self.bus.write_byte_data(self.address, 0x30, int1_cfg_reg)
def readTemperature(self):
#Returns a temperature value with step size of celsius
#Note temperature sensor is only good for measuring temperature changes
#Get data from the two temperature registers
raw_data = [self.bus.read_byte_data(self.address, 0x0c + i) for i in range(2)]
#Convert raw bytes to signed ints
temp = int.from_bytes(raw_data[0:2],byteorder='little', signed=True)
#Convert to change in celsius
temp /= 2**8
return temp
def readAccelerometer(self):
#Returns a list of accelerations in the order [x,y,z] (unit = g-force)
#Get data from all acceleration registers
raw_data = [self.bus.read_byte_data(self.address, 0x28 + i) for i in range(6)]
#Convert raw bytes to signed ints
x = int.from_bytes(raw_data[0:2],byteorder='little', signed=True)
y = int.from_bytes(raw_data[2:4],byteorder='little', signed=True)
z = int.from_bytes(raw_data[4:6],byteorder='little', signed=True)
#Convert signed ints to g-force
acceleration = [x,y,z]
acceleration = [i/(2**15)*4 for i in acceleration] #Factor of 4 as we measure ยฑ4g
return acceleration
| tianyilim/Not-Lionel-s-CW1 | rpi_interface/accelerometer.py | accelerometer.py | py | 2,791 | python | en | code | 1 | github-code | 90 |
16041096515 | from collections.abc import Iterable
class Demo:
def __init__(self, x):
self.x = x
self.count = -1
def __iter__(self): # ๅช่ฆ้ๅไบไบ__iter__ๆนๆณๅฐฑๆฏไธไธชๅฏ่ฟญไปฃๅฏน่ฑก
return self
def __next__(self):
self.count += 1
if self.count < self.x:
# ๆฏไธๆฌกfor...in้ฝไผ่ฐ็จไธๆฌก__next__ๆนๆณ๏ผ่ทๅ่ฟๅๅผ
return self.count
else:
raise StopIteration # ่ฎฉ่ฟญไปฃๅจๅๆญข
d = Demo(100)
print(isinstance(d, Iterable))
# for ... in ๅพช็ฏ็ๆฌ่ดจๅฐฑๆฏ่ฐ็จๅฏ่ฟญไปฃๅฏน่ฑก็__iter__ๆนๆณ๏ผ่ทๅๅฐ่ฟไธชๆนๆณ็่ฟๅๅผ
# ่ฟไธช่ฟๅๅผๆฏไธไธช่ฟญไปฃๅจๅฏน่ฑก๏ผ็ถๅๅ่ฐ็จ่ฟไธชๅฏน่ฑก็__next__ๆนๆณ
# for i in d:
# print(i)
# ๅ
็ฝฎๅฝๆฐiterๅฏไปฅ่ทๅๅฐไธไธชๅฏ่ฟญไปฃๅฏน่ฑก็่ฟญไปฃๅจ
i = iter(d)
print(next(i))
print(next(i))
print(next(i))
| EricWord/PythonStudy | 18-iterator/iter_demo.py | iter_demo.py | py | 890 | python | zh | code | 0 | github-code | 90 |
37381337271 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
import math
import re
import traceback
import logger
from public.logger import simple_log
logger = simple_log()
def contains_chinese(contents):
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
match = zhPattern.search(contents)
if match:
return True
else:
return False
def to_radians_float(degress):
return degress * math.pi / 180.0
def get_poi_distance(f_poi_lng, f_poi_lat, f_search_data_lng, f_search_data_lat):
"""
่ทๅไธคไธชpoi็นไน้ด็็ด็บฟ่ท็ฆป
:param f_poi_lng:
:param f_poi_lat:
:param f_search_data_lng:
:param f_search_data_lat:
"""
fdlng = to_radians_float(f_poi_lng - f_search_data_lng)
fdlat = to_radians_float(f_poi_lat - f_search_data_lat)
a = math.sin(fdlat) * math.sin(fdlat) + math.cos(to_radians_float(f_poi_lat)) * math.cos(
to_radians_float(f_search_data_lat)) * math.sin(fdlng / 2) * math.sin(fdlng / 2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))
EARCH_RADIUS = 6378137.0
return EARCH_RADIUS * c
def if_need_filter_poi_item_data_by_lat_lng(f_poi_lng, f_poi_lat, f_search_data_lng, f_search_data_lat, str_poi_name,
str_search_cn_name, str_type_key):
"""
่ฟๆปค ็ฌฆๅ่งๅ็poiๆฐๆฎ๏ผๆๅฎ่งๅ๏ผ
1:POI่ฟๅ็ๆฐๆฎๅๆ็ดขๆฐๆฎ็ด็บฟ่ท็ฆป<=500mไธๅ็งฐ3/4้ๅ
2:POI่ฟๅ็ๆฐๆฎๅๆ็ดขๆฐๆฎ็ด็บฟ่ท็ฆป<=1400mไธๅ็งฐๅฎๅ
จ็ธๅ
:param f_poi_lng:
:param f_poi_lat:
:param f_search_data_lng:
:param f_search_data_lat:
:param str_poi_name:
:param str_search_cn_name:
:param str_type_key:
:return:
"""
try:
f_distance = get_poi_distance(f_poi_lng, f_poi_lat, f_search_data_lng, f_search_data_lat)
b_name_match = False
b_full_math = False
i_poi_name_len = len(str_poi_name)
if i_poi_name_len < 4:
b_name_match = str_search_cn_name.find(str_poi_name) != -1
else:
b_name_match = str_search_cn_name.find(str_poi_name[:i_poi_name_len * 3 / 4]) != -1
b_full_math = str_search_cn_name.__eq__(str_poi_name)
if f_distance <= 500 and b_name_match:
logger.warn(
'radius distance less than 500m between poi item and search item! radius is: %d \tpoi info is:[%d,%d,%d,%d] \tpoi_name:%s,search_cn_name:%s,searchType:%s') % \
(f_distance,f_poi_lng,f_poi_lat,f_search_data_lng,f_search_data_lat,str_poi_name,str_search_cn_name,str_type_key);
return True
elif f_distance<=1400 and b_full_math:
logger.warn(
'radius distance less than 1400m between poi item and search item! radius is: %d \tpoi info is:[%d,%d,%d,%d] \tpoi_name:%s,search_cn_name:%s,searchType:%s') % \
(f_distance, f_poi_lng, f_poi_lat, f_search_data_lng, f_search_data_lat, str_poi_name, str_search_cn_name,
str_type_key);
return True
return False
except:
logger.error(traceback.format_exc())
| soldiers1989/poi_server | public/tools.py | tools.py | py | 3,083 | python | en | code | 0 | github-code | 90 |
70373915818 | from .airtable_api_base import AirtableAPIBase
import json
import urllib
import urllib.parse
from datetime import datetime
from pytz import timezone, utc
import pytz
from currency_converter import CurrencyConverter
import re
from pprint import pprint
class FinFit(AirtableAPIBase):
def __init__(self, api_key, arg):
AirtableAPIBase.__init__(self, api_key, arg)
self.state_currency = {'US':'USD','CA':'CAD'}
def get_pst_time(self, date_format='%m/%d/%Y %H:%M:%S'):
date = datetime.now(tz=pytz.utc)
date = date.astimezone(timezone('US/Pacific'))
pstDateTime = date.strftime(date_format)
return pstDateTime
def getSVG(self,name='dot'):
res = {}
res['dot'] = '<svg width="14" height="14" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg" id="IconChangeColor"> <path d="M9.875 7.5C9.875 8.81168 8.81168 9.875 7.5 9.875C6.18832 9.875 5.125 8.81168 5.125 7.5C5.125 6.18832 6.18832 5.125 7.5 5.125C8.81168 5.125 9.875 6.18832 9.875 7.5Z" fill="currentColor" id="mainIconPathAttribute"></path> </svg>'
return res[name]
def get_dynamic_data(self,rid=None):
get_agents = self.get_table_data()
orgs = []
agents = {}
for ag in get_agents:
agents[ag['id']] = {}
mby = agt = ''
agt = ag['id']
if 'Agent Name' in ag['fields'].keys():
agents[ag['id']]['Agent Name'] = ag['fields']['Agent Name']
if 'Agent Title' in ag['fields'].keys():
agents[ag['id']]['Agent Title'] = ag['fields']['Agent Title']
if 'Reports To' in ag['fields'].keys():
agents[ag['id']]['Reports To'] = mby = ag['fields']['Reports To'][0]
if 'Prospects Count' in ag['fields'].keys():
agents[ag['id']]['Prospects Count'] = ag['fields']['Prospects Count']
if 'Agent Status' in ag['fields'].keys():
agents[ag['id']]['Agent Status'] = ag['fields']['Agent Status']
if 'Forecast Sales' in ag['fields'].keys():
agents[ag['id']]['Forecast Sales'] = ag['fields']['Forecast Sales']
if 'Agent State' in ag['fields'].keys():
agents[ag['id']]['Agent State'] = ag['fields']['Agent State']
if 'Monthly Estimated Revenue' in ag['fields'].keys():
agents[ag['id']]['Monthly Estimated Revenue'] = ag['fields']['Monthly Estimated Revenue']
orgs.append({agt:mby})
start = rid #'recMAs4K7UIJxC4HK'
top_st = agents[start]['Agent Status'] if 'Agent Status' in agents[start].keys() else ''
top_status = f' <span style="color:{"green" if top_st=="Active" else "red"}">{self.getSVG()}</span>' if top_st != '' else ''
top_pc = agents[start]['Prospects Count'] if 'Prospects Count' in agents[start].keys() else 0
top_state = agents[start]['Agent State'] if 'Agent State' in agents[start].keys() else 'US'
top_forecast_sales = agents[start]['Forecast Sales'] if 'Forecast Sales' in agents[start].keys() else '$0.00'
top_CC_amt = self.cc_amt(top_forecast_sales,self.state_currency[top_st]) if top_st != '' and top_st != 'US' and top_st in self.state_currency.keys() else top_forecast_sales
pprint(agents)
pprint(orgs)
hierarchical_json = self.build_hierarchy(start, orgs)
html_output = self.render_hierarchy(hierarchical_json,agents)
#print(hierarchical_json)
prefix = f"<ul class='parent'><li><details><summary>{agents[start]['Agent Name']} {top_status}<br><small>{agents[start]['Agent Title']} - Prospects Count:{top_pc}, Forecast Sales: {top_CC_amt}</small></summary>"
suffix = "</details></li></ul>"
print(prefix + html_output + suffix)
response = {
"org_chart_html": prefix + html_output + suffix,
"agents":agents,
"hierarchical_json":hierarchical_json,
"tdate": self.get_pst_time('%Y-%m-%d')
}
return response
def cc_amt(self,amt = int, ct_to = 'CAD', ct_from = 'USD'):
#Substitute all non-digit characters (except the decimal point) with an empty string
amt_only = amt if isinstance(amt, int) else re.sub("[^\d.]", "", amt)
cc_amt = f'{amt}'
# Create a currency converter object
converter = CurrencyConverter()
# Convert ex USD to EUR
ct_amount = converter.convert(amt_only, ct_from, ct_to)
if ct_amount:
cc_amt = f'${round(ct_amount, 2)}'
return cc_amt
def build_hierarchy(self,category='', data=[]):
result = {}
for item in data:
for key, value in item.items():
if value == category:
result[key] = self.build_hierarchy(key,data)
return result
def render_hierarchy(self,hierarchical_json={},agents={}):
result = "<ul>\n"
for key, value in hierarchical_json.items():
st = agents[key]['Agent Status'] if 'Agent Status' in agents[key].keys() else ''
status = f' <span style="color:{"green" if st=="Active" else "red"}">{self.getSVG()}</span>' if st != '' else ''
pc = agents[key]['Prospects Count'] if 'Prospects Count' in agents[key].keys() else 0
sta = agents[key]['Agent State'] if 'Agent State' in agents[key].keys() else 'US'
fs = agents[key]['Forecast Sales'] if 'Forecast Sales' in agents[key].keys() else 0
CC_amt = self.cc_amt(fs,self.state_currency[sta]) if sta != '' and sta != 'US' and sta in self.state_currency.keys() else fs
result += "<li>\n"
#if not value:
result += f" <details>\n"
result += f" <summary>{agents[key]['Agent Name']} {status}<br><small>{agents[key]['Agent Title']} - Prospects Count:{pc}, Forecast Sales: {CC_amt}</small></summary>\n"
if value:
result += self.render_hierarchy(value,agents)
#if not value:
result += " </details>\n"
result += "</li>\n"
result += "</ul>\n"
return result
| sterlingsms/sterling_finfit | finfit/airtable_api.py | airtable_api.py | py | 6,147 | python | en | code | 0 | github-code | 90 |
14065517561 | import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
from improver.utilities.probability_manipulation import (
comparison_operator_dict,
invert_probabilities,
to_threshold_inequality,
)
def test_comparison_operator_keys():
"""Test that only the expected keys are contained within the comparison
operator dictionary, and that each contains a namedtuple containing
the expected elements."""
expected_keys = sorted(
["ge", "GE", ">=", "gt", "GT", ">", "le", "LE", "<=", "lt", "LT", "<"]
)
expected_items = ("function", "spp_string", "inverse")
result = comparison_operator_dict()
assert isinstance(result, dict)
assert sorted(result.keys()) == expected_keys
for k, v in result.items():
assert v._fields == expected_items
assert v.function.__module__ == "_operator"
assert isinstance(v.spp_string, str)
assert isinstance(v.inverse, str)
@pytest.fixture
def probability_cube(inequality):
"""Set up probability cube"""
data = np.linspace(0.0, 0.7, 8).reshape(2, 2, 2).astype(np.float32)
cube = set_up_probability_cube(
data,
thresholds=[273.15, 278.15],
spatial_grid="equalarea",
spp__relative_to_threshold=inequality,
)
return cube
@pytest.fixture
def expected_inverted_probabilities():
return np.linspace(1.0, 0.3, 8).reshape(2, 2, 2).astype(np.float32)
@pytest.mark.parametrize("above", [True, False])
@pytest.mark.parametrize(
"inequality, expected_above, inverted_attr",
(
("greater_than_or_equal_to", True, "less_than"),
("greater_than", True, "less_than_or_equal_to"),
("less_than_or_equal_to", False, "greater_than"),
("less_than", False, "greater_than_or_equal_to"),
),
)
def test_to_threshold_inequality(
probability_cube,
expected_above,
inverted_attr,
above,
expected_inverted_probabilities,
):
"""Test function returns probabilities with the target threshold inequality."""
def threshold_attr(cube):
return cube.coord(var_name="threshold").attributes["spp__relative_to_threshold"]
ref_attr = threshold_attr(probability_cube)
result = to_threshold_inequality(probability_cube, above=above)
result_attr = threshold_attr(result)
if expected_above == above:
assert result_attr == ref_attr
else:
assert result_attr == inverted_attr
@pytest.mark.parametrize(
"inequality, expected_attr, expected_name",
(
("greater_than_or_equal_to", "less_than", "below"),
("greater_than", "less_than_or_equal_to", "below"),
("less_than_or_equal_to", "greater_than", "above"),
("less_than", "greater_than_or_equal_to", "above"),
),
)
def test_invert_probabilities(
probability_cube, expected_attr, expected_name, expected_inverted_probabilities
):
"""Test function inverts probabilities and updates cube metadata."""
result = invert_probabilities(probability_cube)
assert (
result.coord(var_name="threshold").attributes["spp__relative_to_threshold"]
== expected_attr
)
assert expected_name in result.name()
assert_almost_equal(result.data, expected_inverted_probabilities)
@pytest.mark.parametrize("inequality", ["greater_than"])
def test_no_threshold_coordinate(probability_cube):
"""Test an exception is raised if no threshold coordinate is found."""
cube = probability_cube[0]
threshold = cube.coord(var_name="threshold")
cube.remove_coord(threshold)
with pytest.raises(ValueError, match="Cube does not have a threshold coordinate"):
invert_probabilities(cube)
| metoppv/improver | improver_tests/utilities/test_probability_manipulation.py | test_probability_manipulation.py | py | 3,753 | python | en | code | 95 | github-code | 90 |
38685971129 | import tweepy
from textblob import TextBlob
def get_most_talked_about_stocks():
# Replace these with your own Twitter API keys and tokens
consumer_key = 'YOUR_CONSUMER_KEY'
consumer_secret = 'YOUR_CONSUMER_SECRET'
access_token = 'YOUR_ACCESS_TOKEN'
access_token_secret = 'YOUR_ACCESS_TOKEN_SECRET'
# Use tweepy to access the Twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Download the latest tweets that mention stock tickers
query = '$AAPL OR $GOOGL OR $MSFT OR $AMZN OR $FB'
tweets = api.search_tweets(q=query, lang='en', count=100)
# Perform sentiment analysis on the downloaded tweets
stock_sentiments = {}
for tweet in tweets:
text = tweet.full_text
sentiment = TextBlob(text).sentiment.polarity
for ticker in ['$AAPL', '$GOOGL', '$MSFT', '$AMZN', '$FB']:
if ticker in text:
if ticker not in stock_sentiments:
stock_sentiments[ticker] = 0
stock_sentiments[ticker] += sentiment
# Return the most talked about stock tickers
return sorted(stock_sentiments, key=stock_sentiments.get, reverse=True)
# Download and analyze the tweets
most_talked_about_stocks = get_most_talked_about_stocks()
# Print out the most talked about stock tickers
print('Most talked about stocks:')
for stock in most_talked_about_stocks:
print(stock)
| kzhekov/Idea-Pod | twitter/most_mentioned_tickers.py | most_mentioned_tickers.py | py | 1,500 | python | en | code | 0 | github-code | 90 |
16448381270 | import torch
import os
import yaml
from typing import Dict
import numpy as np
import re
def compute_advantage(gamma, lmbda, td_delta):
td_delta = td_delta.detach().numpy()
advantage_list = []
advantage = 0.0
for delta in td_delta[::-1]:
advantage = gamma * lmbda * advantage + delta
advantage_list.append(advantage)
advantage_list.reverse()
return torch.tensor(np.array(advantage_list), dtype=torch.float)
def ensure_dir(dir_path):
# ๅคๆญ่ทฏๅพๅญๅจ
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# ่ฏปๅ้
็ฝฎๆไปถ
def load_yaml(cfg_path) -> Dict:
with open(cfg_path, "r") as file:
cfg = yaml.safe_load(file)
return cfg
def to_snake_case(string, rest_to_lower: bool = False):
string = re.sub(r"(?:(?<=[a-z])(?=[A-Z]))|[^a-zA-Z]", " ", string).replace(" ", "_")
if rest_to_lower == True:
return "".join(string.lower())
else:
return "".join(string)
| noobHuKai/arl | arl/utils/utils.py | utils.py | py | 974 | python | en | code | 0 | github-code | 90 |
37894115828 | import nmrglue as ng
import numpy as np
import matplotlib.pyplot as plt
path = "C:/Users/s4548361/Desktop/Full/f2_proc.sec"
path2 = "C:/Users/s4548361/Desktop/Subsampled/f2_proc_sub.sec"
path3 = 'C:/Users/s4548361/Desktop/ML_test_data_easy_hsqc.tar/ML_test_data_easy_hsqc/Exp_Easy_HSQC/MaxEnt/maxent.sec'
path4 = 'C:/Users/s4548361/Desktop/ML_test_data_easy_hsqc.tar/ML_test_data_easy_hsqc/Exp_Easy_HSQC/FT_data/2DFT.sec'
path5 = 'C:/Users/s4548361/Desktop/ML_test_data_easy_hsqc.tar/ML_test_data_easy_hsqc/Exp_Easy_HSQC/FT_data/extrapolated/2DLPFT.sec'
path_test_full = 'C:/Users/s4548361/Desktop/MRS_new/test_data/full/f2_proc.sec'
path_test_sub = 'test_data/sub/f2_proc_sub.sec'
dicfull, datafull = ng.fileio.rnmrtk.read(
path)
# Read in the .par/.sec files using NMR glue
dic, data = ng.fileio.rnmrtk.read(
'result/global_LW5.sec')
udic = ng.fileio.rnmrtk.guess_udic(dic, data)
# Flip the data so that the last axis is the one we operate on
data = np.transpose(data)
# Unpack the STATES format into complex data in the last dimension
data = data[:, 0::2] + 1j * data[:, 1::2]
# FFT the time domain axis
data = np.fft.fftshift(np.fft.fft(data, axis=-1), -1)
# Repack the complex data back into STATES
size = list(data.shape)
half = int(size[-1])
size[-1] = int(size[-1]) * 2
d = np.empty(size, data.real.dtype)
d[..., ::2] = data.real
d[..., 1::2] = data.imag
data = d
# Flip the data back to the original format
data = np.transpose(data)
print(dic)
print(udic)
print(data.dtype, data.shape)
# Create a contour plot so that we can view the data
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
# Create some contour levels for the plot
lvl = np.max(data)
scale = 1.4
num_contours = 12
pcl = [lvl * (1 / scale) ** x for x in range(num_contours)]
list.reverse(pcl)
colors = ['black'] * num_contours
linewidths = [0.5] * (num_contours)
# Use the udic to work out the axis scales
ref_ppm_0 = udic[0]['car'] / udic[0]['obs']
sw_ppm_0 = udic[0]['sw'] / udic[0]['obs']
ref_ppm_1 = udic[1]['car'] / udic[1]['obs']
sw_ppm_1 = udic[1]['sw'] / udic[1]['obs']
y0 = ref_ppm_0 - sw_ppm_0 / 2
y1 = ref_ppm_0 + sw_ppm_0 / 2 - sw_ppm_0 / (data.shape[0] / 2)
x0 = ref_ppm_1
x1 = ref_ppm_1 + sw_ppm_1 - sw_ppm_1 / data.shape[1]
# Set the labels to display nice
ax.set_ylim(y1, y0)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_xlim(x1, x0)
extent = (x1, x0, y1, y0)
ax.set_ylabel("15N ppm", size=20)
ax.set_xlabel("1H ppm", size=20)
ax.grid()
ax.set_title('15N HSQC')
# NOTE: I am using data[::2, :] which takes every second element from the data
# matrix. This is gives me only the real data in the first dimension as it is STATES encoded
ax.contour(data[::2, :], pcl, extent=extent,
colors=colors, linewidths=linewidths)
plt.show()
| Shawn-S-ong/MRS-Script | LW_MRS/show2d.py | show2d.py | py | 2,879 | python | en | code | 1 | github-code | 90 |
18569389939 | import sys
import math
from collections import defaultdict
from bisect import bisect_left, bisect_right
sys.setrecursionlimit(10**7)
def input():
return sys.stdin.readline()[:-1]
mod = 10**9 + 7
def I(): return int(input())
def LI(): return list(map(int, input().split()))
def LIR(row,col):
if row <= 0:
return [[] for _ in range(col)]
elif col == 1:
return [I() for _ in range(row)]
else:
read_all = [LI() for _ in range(row)]
return map(list, zip(*read_all))
#################
def check(i,s,s0):
if i%2 == 0:
if s == s0:
return True
else:
return False
else:
if s != s0:
return True
else:
return False
N = I()
print(0)
sys.stdout.flush()
s0 = str(input())
if s0 == 'Vacant':
exit()
print(N-1)
sys.stdout.flush()
s1 = str(input())
if s1 == 'Vacant':
exit()
ok = N-1
ng = 0
while True:
mid = (ok+ng)//2
print(mid)
sys.stdout.flush()
s = str(input())
if s == 'Vacant':
exit()
flag = check(mid,s,s0)
if flag:
ng = mid
else:
ok = mid | Aasthaengg/IBMdataset | Python_codes/p03439/s982427545.py | s982427545.py | py | 1,137 | python | en | code | 0 | github-code | 90 |
70069393576 | #---------------------------ๅๆณกๆๅบ--------------------------------
def Bubble_sort(li):
for i in range(0, len(li)-1): #ๅพช็ฏ่ถๆฐ๏ผๆฏ่ถๅฏไปฅๆๅบไธไธชๆๅคงๆฐ
exchange = False
for j in range(0, len(li)-i-1): #ๆฏ่ถ้่ฆไบคๆขๅคๅฐๆฌก
if li[j] > li[j+1]:
li[j], li[j+1] = li[j+1], li[j]
exchange = True
if not exchange:
return
li = [2,3,2,65,75,3,2,345,64,7877]
Bubble_sort(li)
print(li)
#---------------------------้ๆฉๆๅบ--------------------------------
def Select_sort_simple(li):
li_new = []
for i in range(0, len(li)):
min_val = min(li)
li_new.append(min_val)
li.remove(min_val)
return li_new
def Select_sort(li):
for i in range(0, len(li)-1): #็ฌฌi่ถ
min_loc = i
for j in range(i+1, len(li)): #ๅ 1ๆฏไธบไบไธๅ่ชๅทฑๆฏ๏ผ่็บฆไธๆฌก
if li[min_loc] > li[j]:
min_loc = j
li[i], li[min_loc] = li[min_loc], li[i]
#---------------------------ๆๅ
ฅๆๅบ--------------------------------
def Insert_sort(li):
for i in range(1, len(li)): #i ่กจ็คบๅพ
ๆๅ
ฅๆฐๅญ็ไธๆ
j = i - 1
tmp = li[i]
while li[j] > tmp and j >= 0:
li[j+1] = li[j]
j -= 1
li[j+1] = tmp
li = [3,4,5,2,4,6,4,7,9]
Insert_sort(li)
print(li)
| czjdsg/Note | pyๆไปถ/็ฎๅๆๅบ.py | ็ฎๅๆๅบ.py | py | 1,378 | python | en | code | 0 | github-code | 90 |
5449622326 | import os
from zipfile import ZipFile
import appdirs
import requests
from .utils import get_members
class ReleaseDownloader:
"""Represents a basic release downloader."""
name = None
release = None
vcs = None
url = None
_download_type = None
_slug_type = None
def __init__(self):
if self.name is None:
raise Exception('Class variable `name` must be overridden')
appname = 'vagrirc'
appauthor = 'danieloaks'
self.base_cache_directory = appdirs.user_cache_dir(appname, appauthor)
# server_* slug here to stop possible collisions with services/etc names
slug = '{}_{}'.format(self._slug_type, self.name)
self.cache_directory = os.path.join(self.base_cache_directory, slug)
if self.release is None and self.vcs:
self.release = 'trunk'
self.source_folder = os.path.join(self.cache_directory, self.release)
self.external_source_folder = self.name
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
# fill out dl type ourselves if we can
if self._download_type is None:
if isinstance(self.url, str) and len(self.url.split('.')) > 1:
# could also strip out # magic if necessary, later
self._download_type = self.url.rsplit('.', 1)[-1]
def download_release(self):
"""Download our expected release of the server, if not already cached."""
if self.vcs == 'git':
import git
if os.path.exists(self.source_folder):
print('Updating', self.name)
repo = git.Repo(self.source_folder)
repo.remotes.origin.fetch()
repo.remotes.origin.pull()
else:
repo = git.Repo.init(self.source_folder)
origin = repo.create_remote('origin', self.url)
assert origin.exists()
origin.fetch()
# track remote branch
repo.create_head('master', origin.refs.master).set_tracking_branch(
origin.refs.master)
origin.pull()
for submodule in repo.submodules:
submodule.update(init=True)
repo.heads.master.checkout()
else:
if self.url is None:
return False
# see if it already exists
if os.path.exists(self.source_folder):
return True
url = self.url.format(release=self.release)
# download file
tmp_with_ext = 'tmp_download.{}'.format(self._download_type)
tmp_filename = os.path.join(self.cache_directory, tmp_with_ext)
with open(tmp_filename, 'wb') as handle:
r = requests.get(url, stream=True)
if not r.ok:
return False
ONE_MEGABYTE = 1024 * 1024
for block in r.iter_content(ONE_MEGABYTE):
if not block:
break
handle.write(block)
# extract into directory
if self._download_type == 'zip':
with ZipFile(tmp_filename, 'r') as source_zip:
source_zip.extractall(self.source_folder, get_members(source_zip))
else:
raise Exception('Cannot extract/parse given download type')
# remove temp file
os.remove(tmp_filename)
class BaseSoftware(ReleaseDownloader):
info = {}
requires = {}
def init_info(self, config_folder=None):
"""Initialise user/channel/etc info."""
...
def init_users(self, info):
"""Return a list of 'users' to join to the network, along with commands.
Used during network provisioning to register accounts with NickServ,
register and set channel info such as topic, etc.
"""
return []
def write_config(self, folder, info):
"""Write config file to the given folder."""
...
def write_build_files(self, folder, src_folder, bin_folder, build_folder, config_folder):
"""Write build files to the given folder."""
...
def write_launch_files(self, folder, src_folder, bin_folder, build_folder, config_folder):
"""Write launch files to the given folder."""
...
class BaseServer(BaseSoftware):
_slug_type = 'ircd'
class BaseServices(BaseSoftware):
_slug_type = 'services'
class BaseServiceBot(BaseSoftware):
_slug_type = 'servicebot'
| DanielOaks/vagrirc | virc/base.py | base.py | py | 4,596 | python | en | code | 2 | github-code | 90 |
42744024355 |
def get_input():
with open('inputs/input1.txt', 'r') as f:
data = sorted([int(line) for line in f.readlines()])
return data
def part1(data, target):
start = 0
end = len(data)-1
while (start < end):
csum = data[start] + data[end]
if csum == target:
break
elif csum < target:
start += 1
else: # csum > target
end -= 1
return data[start] * data[end] if start != end else None
def part2(data, target):
for i in data:
other_two = part1(data, target-i) # should exclude i from data, but doesn't make a difference on this input set
if other_two:
return i * other_two
return None
if __name__ == '__main__':
data = get_input()
result_p1 = part1(data, target=2020)
print(result_p1) # 731731
result_p2 = part2(data, target=2020)
print(result_p2) # 115115990
| argfrot/aoc2020 | day01.py | day01.py | py | 907 | python | en | code | 0 | github-code | 90 |
12985313004 | from scrapy import Request
import re
from sumaq.spiders import BaseSpider
from sumaq.items import PostItem
class ForosPeruSpider(BaseSpider):
name = "forosperu"
base_url = "https://www.forosperu.net"
max_section_pagination = 1
max_forum_pagination = 1
paginated_url_pattern = "https://www.forosperu.net/{relative_url}/pagina-{page}"
base_url_pattern = "https://www.forosperu.net/{relative_url}"
def parse(self, response, *args, **kwargs):
sections = response.css("div.forumNodeInfo h3.nodeTitle a")
for section in sections:
relative_url = section.css("::attr(href)").get().strip("/")
yield Request(
self.paginated_url_pattern.format(relative_url=relative_url, page=1),
callback=self.parse_section,
meta={
"section_url": relative_url,
"page": 1,
},
)
def parse_section(self, response, **kwargs):
current_page = response.css("div.PageNav::attr(data-page)").get()
if current_page and int(current_page) != response.meta.get("page", 1):
return
for forum in response.css("li.discussionListItem h3.title a"):
relative_url = forum.css("::attr(href)").get().strip("/")
yield Request(
self.base_url_pattern.format(relative_url=relative_url),
callback=self.parse_forum,
meta={
"forum_url": relative_url,
},
)
if not current_page:
return
next_page = int(current_page) + 1
if next_page > self.max_section_pagination:
return
section_url = response.meta["section_url"]
yield Request(
self.paginated_url_pattern.format(relative_url=section_url, page=next_page),
callback=self.parse_section,
meta={
"section_url": section_url,
"page": next_page,
},
)
def parse_forum(self, response, **kwargs):
last_page = response.meta.get(
"page", response.css("div.PageNav::attr(data-last)").get()
)
if last_page:
yield Request(
self.paginated_url_pattern.format(
relative_url=response.meta["forum_url"], page=last_page
),
callback=self.parse_forum_posts,
meta={
"page": int(last_page),
"forum_url": response.meta["forum_url"],
},
)
else:
yield Request(
self.base_url_pattern.format(
relative_url=response.meta["forum_url"], page=last_page
),
callback=self.parse_forum_posts,
meta={
"forum_url": response.meta["forum_url"],
},
)
def parse_forum_posts(self, response, *args, **kwargs):
depth = response.meta.get("depth", 1)
page = response.meta.get("page", 1)
for message in response.css("li.message"):
item = PostItem()
item["id"] = message.css("::attr(id)").get()
item["post_body"] = " ".join(
message.css("div.messageContent ::text").getall()
)
item["post_body"] = " ".join(
re.sub("[^A-Za-z0-9\s]+", "", item["post_body"]).split()
)
item["mention"] = message.css("a.AttributionLink::attr(href)").get()
yield item
depth += 1
page -= 1
if depth > self.max_forum_pagination or page < 1:
return
yield Request(
self.paginated_url_pattern.format(
relative_url=response.meta["forum_url"], page=page
),
callback=self.parse_forum_posts,
meta={
"page": page,
"depth": depth,
"forum_url": response.meta["forum_url"],
},
)
| comjoueur/cloud-scraper-ucsp | sumaq/spiders/forosperu.py | forosperu.py | py | 4,081 | python | en | code | 0 | github-code | 90 |
13224370621 | from __future__ import annotations
import warnings
from typing import Any
import pandas as pd
from sklearn.impute import SimpleImputer as sk_SimpleImputer
from safeds.data.tabular.containers import Table
from safeds.data.tabular.transformation._table_transformer import TableTransformer
from safeds.data.tabular.typing import ImputerStrategy
from safeds.exceptions import NonNumericColumnError, TransformerNotFittedError, UnknownColumnNameError
class Imputer(TableTransformer):
"""
Replace missing values with the given strategy.
Parameters
----------
strategy : ImputerStrategy
The strategy used to impute missing values. Use the classes nested inside `Imputer.Strategy` to specify it.
Examples
--------
>>> from safeds.data.tabular.containers import Column, Table
>>> from safeds.data.tabular.transformation import Imputer
>>>
>>> table = Table.from_columns(
... [
... Column("a", [1, 3, None]),
... Column("b", [None, 2, 3]),
... ],
... )
>>> transformer = Imputer(Imputer.Strategy.Constant(0))
>>> transformed_table = transformer.fit_and_transform(table)
"""
class Strategy:
class Constant(ImputerStrategy):
"""
An imputation strategy for imputing missing data with given constant values.
Parameters
----------
value :
The given value to impute missing values.
"""
def __init__(self, value: Any):
self._value = value
def __str__(self) -> str:
return f"Constant({self._value})"
def _augment_imputer(self, imputer: sk_SimpleImputer) -> None:
imputer.strategy = "constant"
imputer.fill_value = self._value
class Mean(ImputerStrategy):
"""An imputation strategy for imputing missing data with mean values."""
def __str__(self) -> str:
return "Mean"
def _augment_imputer(self, imputer: sk_SimpleImputer) -> None:
imputer.strategy = "mean"
class Median(ImputerStrategy):
"""An imputation strategy for imputing missing data with median values."""
def __str__(self) -> str:
return "Median"
def _augment_imputer(self, imputer: sk_SimpleImputer) -> None:
imputer.strategy = "median"
class Mode(ImputerStrategy):
"""An imputation strategy for imputing missing data with mode values. The lowest value will be used if there are multiple values with the same highest count."""
def __str__(self) -> str:
return "Mode"
def _augment_imputer(self, imputer: sk_SimpleImputer) -> None:
imputer.strategy = "most_frequent"
def __init__(self, strategy: ImputerStrategy):
self._strategy = strategy
self._wrapped_transformer: sk_SimpleImputer | None = None
self._column_names: list[str] | None = None
# noinspection PyProtectedMember
def fit(self, table: Table, column_names: list[str] | None) -> Imputer:
"""
Learn a transformation for a set of columns in a table.
This transformer is not modified.
Parameters
----------
table : Table
The table used to fit the transformer.
column_names : list[str] | None
The list of columns from the table used to fit the transformer. If `None`, all columns are used.
Returns
-------
fitted_transformer : TableTransformer
The fitted transformer.
Raises
------
UnknownColumnNameError
If column_names contain a column name that is missing in the table
ValueError
If the table contains 0 rows
NonNumericColumnError
If the strategy is set to either Mean or Median and the specified columns of the table contain non-numerical data.
"""
if column_names is None:
column_names = table.column_names
else:
missing_columns = sorted(set(column_names) - set(table.column_names))
if len(missing_columns) > 0:
raise UnknownColumnNameError(missing_columns)
if table.number_of_rows == 0:
raise ValueError("The Imputer cannot be fitted because the table contains 0 rows")
if (isinstance(self._strategy, Imputer.Strategy.Mean | Imputer.Strategy.Median)) and table.keep_only_columns(
column_names,
).remove_columns_with_non_numerical_values().number_of_columns < len(
column_names,
):
raise NonNumericColumnError(
str(
sorted(
set(table.keep_only_columns(column_names).column_names)
- set(
table.keep_only_columns(column_names)
.remove_columns_with_non_numerical_values()
.column_names,
),
),
),
)
if isinstance(self._strategy, Imputer.Strategy.Mode):
multiple_most_frequent = {}
for name in column_names:
if len(table.get_column(name).mode()) > 1:
multiple_most_frequent[name] = table.get_column(name).mode()
if len(multiple_most_frequent) > 0:
warnings.warn(
"There are multiple most frequent values in a column given to the Imputer.\nThe lowest values"
" are being chosen in this cases. The following columns have multiple most frequent"
f" values:\n{multiple_most_frequent}",
UserWarning,
stacklevel=2,
)
wrapped_transformer = sk_SimpleImputer()
self._strategy._augment_imputer(wrapped_transformer)
wrapped_transformer.fit(table._data[column_names])
result = Imputer(self._strategy)
result._wrapped_transformer = wrapped_transformer
result._column_names = column_names
return result
# noinspection PyProtectedMember
def transform(self, table: Table) -> Table:
"""
Apply the learned transformation to a table.
The table is not modified.
Parameters
----------
table : Table
The table to which the learned transformation is applied.
Returns
-------
transformed_table : Table
The transformed table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
UnknownColumnNameError
If the input table does not contain all columns used to fit the transformer.
ValueError
If the table contains 0 rows.
"""
# Transformer has not been fitted yet
if self._wrapped_transformer is None or self._column_names is None:
raise TransformerNotFittedError
# Input table does not contain all columns used to fit the transformer
missing_columns = sorted(set(self._column_names) - set(table.column_names))
if len(missing_columns) > 0:
raise UnknownColumnNameError(missing_columns)
if table.number_of_rows == 0:
raise ValueError("The Imputer cannot transform the table because it contains 0 rows")
data = table._data.reset_index(drop=True)
data[self._column_names] = pd.DataFrame(
self._wrapped_transformer.transform(data[self._column_names]),
columns=self._column_names,
)
return Table._from_pandas_dataframe(data, table.schema)
def is_fitted(self) -> bool:
"""
Check if the transformer is fitted.
Returns
-------
is_fitted : bool
Whether the transformer is fitted.
"""
return self._wrapped_transformer is not None
def get_names_of_added_columns(self) -> list[str]:
"""
Get the names of all new columns that have been added by the Imputer.
Returns
-------
added_columns : list[str]
A list of names of the added columns, ordered as they will appear in the table.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if not self.is_fitted():
raise TransformerNotFittedError
return []
# (Must implement abstract method, cannot instantiate class otherwise.)
def get_names_of_changed_columns(self) -> list[str]:
"""
Get the names of all columns that may have been changed by the Imputer.
Returns
-------
changed_columns : list[str]
The list of (potentially) changed column names, as passed to fit.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if self._column_names is None:
raise TransformerNotFittedError
return self._column_names
def get_names_of_removed_columns(self) -> list[str]:
"""
Get the names of all columns that have been removed by the Imputer.
Returns
-------
removed_columns : list[str]
A list of names of the removed columns, ordered as they appear in the table the Imputer was fitted on.
Raises
------
TransformerNotFittedError
If the transformer has not been fitted yet.
"""
if not self.is_fitted():
raise TransformerNotFittedError
return []
| Safe-DS/Library | src/safeds/data/tabular/transformation/_imputer.py | _imputer.py | py | 9,805 | python | en | code | 11 | github-code | 90 |
29737788345 | import json
from datetime import datetime
from io import BytesIO
from typing import List
from objectiv_backend.common.config import get_collector_config, SnowplowConfig
from objectiv_backend.common.types import EventDataList
from objectiv_backend.schema.validate_events import EventError
from objectiv_backend.snowplow.snowplow_helper import write_data_to_aws_pipeline, write_data_to_gcp_pubsub
if get_collector_config().output.aws:
import boto3
from botocore.exceptions import ClientError
def events_to_json(events: EventDataList) -> str:
"""
Convert list of events to a string with on each line a json object representing a single event.
Note that the returned string is not a json list; This format makes it suitable as raw input to AWS
Athena.
"""
return json.dumps(events)
def write_data_to_fs_if_configured(data: str, prefix: str, moment: datetime) -> None:
"""
Write data to disk, if file_system output is configured. If file_system output is not configured, then
this function returns directly.
:param data: data to write
:param prefix: directory prefix, added to path after the configured path/ and before /filename.json
:param moment: timestamp that the data arrived
"""
fs_config = get_collector_config().output.file_system
if not fs_config:
return
timestamp = moment.timestamp()
path = f'{fs_config.path}/{prefix}/{timestamp}.json'
with open(path, 'w') as of:
of.write(data)
def write_data_to_s3_if_configured(data: str, prefix: str, moment: datetime) -> None:
"""
Write data to AWS S3, if S3 output is configured. if aws s3 output is not configured, then this
function returns directly.
:param data: data to write
:param prefix: prefix, included in the keyname after the configured path/ and datestamp/ and
before /filename.json
:param moment: timestamp that the data arrived
"""
aws_config = get_collector_config().output.aws
if not aws_config:
return
timestamp = moment.timestamp()
datestamp = moment.strftime('%Y/%m/%d')
object_name = f'{aws_config.s3_prefix}/{datestamp}/{prefix}/{timestamp}.json'
file_obj = BytesIO(data.encode('utf-8'))
s3_client = boto3.client(
service_name='s3',
region_name=aws_config.region,
aws_access_key_id=aws_config.access_key_id,
aws_secret_access_key=aws_config.secret_access_key)
try:
s3_client.upload_fileobj(file_obj, aws_config.bucket, object_name)
except ClientError as e:
print(f'Error uploading to s3: {e} ')
def write_data_to_snowplow_if_configured(events: EventDataList,
good: bool,
event_errors: List[EventError] = None) -> None:
"""
Write data to Snowplow pipeline if either GCP or AWS for Snowplow if configures
:param events: EventDataList
:param prefix: should be either OK or NOK
:param event_errors: list of errors, if any
:return:
"""
output_config = get_collector_config().output
if output_config.snowplow:
config = output_config.snowplow
if config.aws_enabled:
write_data_to_aws_pipeline(events=events, config=config, good=good, event_errors=event_errors)
if config.gcp_enabled:
write_data_to_gcp_pubsub(events=events, config=config, good=good, event_errors=event_errors)
| massimo1220/objectiv-analytics-main | backend/objectiv_backend/end_points/extra_output.py | extra_output.py | py | 3,443 | python | en | code | 5 | github-code | 90 |
31052270375 | import numpy as np
import matplotlib.pyplot as plt
class OptimizationProblem:
"""Base Optimization Problem"""
def __init__(self, max_iterations=50, learning_rate=0.002, tolerance=10 ** -4):
self.max_iterations = max_iterations
self.learning_rate = learning_rate
self.tolerance = tolerance
self.costFunction = None
self.gradFunction = None
self.Xpath = []
self.Ypath = []
self.Zpath = []
def evaluate_Jacobian(self, x, y):
"""Calculate the Gradient at the given point.
Keyword arguments:
arg_1 (float): current x location
arg_2 (float): current y location
Returns:
dx (float): partial x derivative calculated at the input location
dy (float): partial y derivative calculated at the input location
Additional Information:
When m=1 the Jacobian is the same as the Gradient,
since it is a generalization of the Gradient.
"""
if self.gradFunction == None:
print("You must define a gradient function")
return -1
else:
dx, dy = self.gradFunction(x, y)
return dx, dy
def solve(self, x, y):
"""Perform Gradient Descent Algorithm starting at the input location.
Print out results and stopping condition.
Keyword arguments:
arg_1 (float): starting x value for gradient descent
arg_2 (float): starting y value for gradient descent
Returns:
None
Errors:
Returns -1 if there is no cost function or gradient function defined for the problem to solve
"""
if self.costFunction == None or self.gradFunction == None:
print("You must define a cost function and gradient function")
return -1
flag = 0
for iteration in range(self.max_iterations):
previousEval = self.costFunction(x, y)
dx, dy = self.evaluate_Jacobian(x, y)
x = x - self.learning_rate * dx
y = y - self.learning_rate * dy
newEval = self.costFunction(x, y)
residual = abs(previousEval - newEval)
print(f"Iteration {iteration}")
print(f"The location now is x = {x}, y = {y}")
print(f"The value of the cost function is {newEval}")
print(f"The residual is: {residual}")
self.Xpath.append(x)
self.Ypath.append(y)
self.Zpath.append(newEval)
if residual < self.tolerance:
flag = 1
break
if flag:
print("Tolerance Achieved")
else:
print("Maximum Number Iterations Hit Without Convergence")
def visualize(self, x, y, size=10, resolution=100):
"""Plot the Gradient Descent Path and Surrounding Cost Function.
Keyword arguments:
arg_1 (float): starting x value for gradient descent
arg_2 (float): starting y value for gradient descent
arg_3 (float, optional): how far from the initial point in both directions will be visualized
arg_4 (float, optional): granularity of the visualization. Higher value--> better quality
Returns:
None
"""
if self.Xpath == []:
print("You Must Solve The Gradient Descent Problem First")
if self.costFunction == None or self.gradFunction == None:
print("You must define a cost function and gradient function")
d1 = np.linspace(x - size, x + size, resolution)
d2 = np.linspace(y - size, y + size, resolution)
costX, costY = np.meshgrid(d1, d2)
costZ = self.costFunction(costX, costY)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(projection='3d')
ax.plot_surface(costX, costY, costZ, cmap='Oranges', edgecolor=None, alpha=0.5)
path = ax.scatter3D(self.Xpath, self.Ypath, self.Zpath, c=(self.Zpath), cmap='viridis',
label="Gradient Descent Path")
ax.scatter3D(self.Xpath[0], self.Ypath[0], self.Zpath[0], c="Black", label="Start")
ax.scatter3D(self.Xpath[-1], self.Ypath[-1], self.Zpath[-1], c="Red", label="End")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("Gradient Descent")
plt.colorbar(path)
plt.legend()
plt.show()
class GDOptProblem(OptimizationProblem):
"""This extends the OptimizationProblem class. It uses Gradient Descent to minimize a specific cost function """
def __init__(self, max_iterations=50, learning_rate=0.01, tolerance=10 ** -4):
super().__init__(max_iterations, learning_rate, tolerance)
self.costFunction = lambda x, y: (x ** 2 + y - 3) ** 2 + (x + y ** 2 - 9) ** 2
self.gradFunction = lambda x, y: (4 * x ** 3 + 4 * x * y - 10 * x + 2 * y ** 2 - 18, 4 * y ** 3 + 4 * x * y - 34 * y + 2 * x ** 2 - 6)
| jacksonCurry6464/StandardGradientDescent | Final Package/gdtest/solvers.py | solvers.py | py | 5,022 | python | en | code | 0 | github-code | 90 |
38723674249 | import mysql.connector
from faker import Faker
import random
conn = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="userboard"
)
cursor = conn.cursor()
Faker.seed(random.randint(1, 1000))
fake = Faker()
IT_list = ['Java', 'Python', 'Programming', 'React JS', 'Socket', 'Django',
'C++', 'C#', 'Embedded Systems', 'Java Servlets', 'Java Applets',
'AI Dev', 'Machine Learning', 'Functional Programming', 'Transfer Protocol',
'Regex Editor', 'REST API', 'Login App', 'CRUD', 'GUI Swing', 'Bootstrap',
'TailWindCss', 'JDBC', 'DB Manager', 'Flask Tech', 'Angular JS', 'JQUery',
'Ajax', 'IDE', 'Game Engine', 'Chat Log', 'HTML Editor', 'Numpy', 'Faker Generator']
"""
query_insert = "INSERT INTO echipa (denumire) VALUES (%(name)s)"
n = int(input())
for i in range(0, n):
data_team = {
'name': fake.company()
}
cursor.execute(query_insert, data_team)
print(str(n) + " teams inserted successfully!")
"""
"""
query_insert = "INSERT INTO proiect (tip, deadline, echipa_id, finalizat) " \
"VALUES (%(tip)s, %(data)s, %(team)s, %(state)s)"
options = ["WORKING", "DONE"]
n = int(input())
for i in range(0, n):
state = random.randint(0, 1)
team = random.randint(1, 100)
query_select = "SELECT * FROM echipa WHERE id=%(team_id)s LIMIT 1"
if state == 0:
query_select = "SELECT * FROM echipa WHERE id=%(team_id)s AND id NOT IN" \
"(SELECT echipa_id FROM proiect WHERE finalizat=\"WORKING\") LIMIT 1"
data_team = {
'team_id': team
}
cursor.execute(query_select, data_team)
result = cursor.fetchone()
while result is None:
team = random.randint(1, 100)
data_team = {
'team_id': team
}
cursor.execute(query_select, data_team)
result = cursor.fetchone()
data_project = {
'tip': fake.sentence(nb_words=2, ext_word_list=IT_list),
'data': fake.date_this_decade(),
'team': team,
'state': options[state]
}
cursor.execute(query_insert, data_project)
# print(data_project)
print(str(n) + " projects inserted successfully!")
"""
query_update = "UPDATE proiect SET tip=%(tip)s, deadline=%(data)s WHERE id=%(id)s"
query_select = "SELECT * FROM proiect"
cursor.execute(query_select)
result = cursor.fetchall()
for row in result:
data_proiect = {
'tip': row[4][:-1],
'data': fake.date_this_decade(),
'id': row[0]
}
cursor.execute(query_update, data_proiect)
conn.commit()
cursor.close()
conn.close()
| WarriorsSami/CRUD-PHP | pythonGenerators/pythonScripts/projects&teams_names.py | projects&teams_names.py | py | 2,630 | python | en | code | 1 | github-code | 90 |
15640238099 | from dataclasses import dataclass
@dataclass
class Instruction:
op: str
arg: int
visited: bool
def parse_input(filename):
program = []
with open(filename) as f:
for line in f.readlines():
op, arg = line.strip().split(" ")
program.append(Instruction(op, int(arg), False))
return program
def problem_1():
program = parse_input("input.txt")
terminates, accumulator = run_program(program)
return accumulator
def run_program(program):
accumulator = 0
program_counter = 0
program_len = len(program)
terminates = False
while True:
if program_counter >= program_len:
terminates = True
break
instruction = program[program_counter]
if instruction.visited:
terminates = False
break
program[program_counter].visited = True
if instruction.op == "nop":
program_counter += 1
elif instruction.op == "acc":
accumulator += instruction.arg
program_counter += 1
elif instruction.op == "jmp":
program_counter += instruction.arg
return terminates, accumulator
def problem_2():
program = parse_input("input.txt")
for i, instruction in enumerate(program):
old_op = instruction.op
new_op = None
if instruction.op == "acc":
continue
elif instruction.op == "nop":
new_op = "jmp"
elif instruction.op == "jmp":
new_op = "nop"
program[i].op = new_op
terminates, accumulator = run_program(program)
if terminates:
return accumulator
program[i].op = old_op
for x in range(len(program)):
program[x].visited = False
return None
def main():
res = problem_1()
print(res)
res = problem_2()
print(res)
if __name__ == "__main__":
main()
| swarthout/AdventOfCode20 | day8/main.py | main.py | py | 1,927 | python | en | code | 0 | github-code | 90 |
9666304927 | #! /usr/bin/python
import re
import os
import sys
import numpy as np
import pandas as pd
import scanpy as sc
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
def loadMetaData(fn):
df = pd.read_csv(fn,sep='\t',header=None,index_col=0)
df.index = list(map(lambda x: 'CB'+str(x),df.index))
df.columns = ['FSH_dose','FSH','hCG_dose','hCG']
df['FSH_dose'] = df['FSH_dose'].astype(int)
df['treatment'] = df.apply(lambda x: '049:'+str(x['FSH_dose'])+'_'+'302:'+str(x['hCG_dose']),axis=1)
mapping = {'049:0_302:0.0':'Vehicle','049:0_302:0.0375':'hCG_m','049:0_302:1.2':'hCG_hi',
'049:1_302:0.0':'FSH_1','049:3_302:0.0':'FSH_3',
'049:3_302:0.0375':'FSH_3 + hCG_m','049:3_302:1.2':'FSH_3 + hCG_hi',
'049:10_302:0.0':'FSH_10','049:10_302:0.0375':'FSH_10 + hCG_m',
'049:10_302:1.2':'FSH_10 + hCG_hi'}
df['treatment1'] = df['treatment'].map(mapping)
return df
def annotateH5AD(adata,meta):
adata.obs['treatment'] = adata.obs['sample'].map(meta['treatment1'])
dict1 = {'0':'Antral','1':'Proliferating','2':'Preantral','3':'Atretic'}
dict2 = {'0':'Native','1':'Lhcgr-'}
adata1 = sc.read_h5ad('scanpy.GC.recluster.h5ad')
adata1.obs['GC_cluster'] = adata1.obs.apply(lambda x: dict1[x['leiden']] if x['leiden'] in dict1 else x['leiden'],axis=1)
new_cluster_names = {'0':'GC','1':'GC','2':'GC','3':'GC','4':'Theca','5':'Empty','6':'Empty','7':'Stroma','8':'Epithelial','9':'BEC','10':'Immune','11':'Oocyte','12':'LEC','13':'Luteinizing GC','14':'Doublets'}
adata.obs['ann_cluster'] = adata.obs['leiden'].map(new_cluster_names)
shown_groups = ['GC','Theca','Stroma','Epithelial','BEC','Oocyte','Immune','LEC']
#adata = adata[list(map(lambda x: x in shown_groups,adata.obs['ann_cluster'])),:]
adata.obs = pd.concat([adata.obs,adata1.obs['GC_cluster']],join='outer',axis=1)
adata.obs.loc[:,'new_cluster'] = adata.obs.apply(lambda x: 'GC-'+x['GC_cluster'] if x['ann_cluster']=='GC' else x['ann_cluster'],axis=1)
print(adata)
adata.write('scanpy.DIEM.annotated.h5ad')
def main():
h5adFile = sys.argv[1]
sc.settings.verbosity = 3
sc.logging.print_header()
adata = sc.read_h5ad(h5adFile)
meta = loadMetaData('../metadata.tsv')
annotateH5AD(adata,meta)
if __name__=='__main__':
main()
| JialiUMassWengLab/Misc | 2022/addAnno2H5AD.py | addAnno2H5AD.py | py | 2,411 | python | en | code | 0 | github-code | 90 |
38252854230 | import sys
input = sys.stdin.readline
v, e = map(int, input().split())
start = int(input())
INF = sys.maxsize
graph = [[] for _ in range(v+1)]
visit = [True] + [False for _ in range(v)]
res = [INF for _ in range(v+1)]
for _ in range(e):
a, b, w = map(int, input().split())
graph[a].append([b, w])
visit[start] = True
res[start] = 0
node = start
for _ in range(v):
for g in graph[node]:
nxt, weight = g
if not visit[nxt]:
res[nxt] = min(res[nxt], res[node] + weight)
m = INF
nxt = 0
for i in range(1, v+1):
if m > res[i] and not visit[i]:
m = res[i]
nxt = i
node = nxt
visit[nxt] = True
for i in range(1, v+1):
if res[i] == INF:
print("INF")
else:
print(res[i]) | Z1Park/baekjoon | ๋ฐฑ์ค/Gold/1753.โ
์ต๋จ๊ฒฝ๋ก/์ต๋จ๊ฒฝ๋ก.py | ์ต๋จ๊ฒฝ๋ก.py | py | 809 | python | en | code | 0 | github-code | 90 |
15094706093 | import psutil
import plugins
MEM_DEFAULTS = {'title': 'RAM', 'problem': 85}
class PluginThread(plugins.PluginThreadCommon):
def __init__(self, config):
super(PluginThread, self).__init__(config, MEM_DEFAULTS)
def main(self):
mem_stat = psutil.virtual_memory()
if mem_stat.percent > self.conf['problem']:
self.hide = False
urgent = True
else:
self.hide = True
urgent = False
status = '{:.2f}G'.format(mem_stat.available / 2**30)
self.format_status(status, urgent)
| vdrandom/vdstatus | plugins/mem.py | mem.py | py | 573 | python | en | code | 0 | github-code | 90 |
18436622299 | a, b, k = list(map(int, input().split()))
def divisor(n):
i = 1
table = []
while i * i <= n:
if n%i == 0:
table.append(i)
table.append(n//i)
i += 1
table = set(table)
return table
ablist = sorted(list(divisor(a) & divisor(b)),reverse=True)
print(ablist[k-1]) | Aasthaengg/IBMdataset | Python_codes/p03106/s460532720.py | s460532720.py | py | 318 | python | en | code | 0 | github-code | 90 |
35855181990 | from __future__ import annotations
import json
import time
import textwrap
from pathlib import Path as P
from unittest.mock import patch
from typing import Any, Iterable, Optional
import click
import jwt
import pytest
import responses
from commodore.config import (
Config,
set_fact_value,
parse_dynamic_fact_value,
parse_dynamic_facts_from_cli,
)
from commodore.package import Package
from commodore.multi_dependency import dependency_key
def test_verify_component_aliases_no_instance(config):
alias_data = {"bar": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"namespace": "syn-bar"}}
config.verify_component_aliases(params)
def test_verify_component_aliases_explicit_no_instance(config):
alias_data = {"bar": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"_metadata": {"multi_instance": False}, "namespace": "syn-bar"}}
config.verify_component_aliases(params)
def test_verify_component_aliases_metadata(config):
alias_data = {"baz": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"_metadata": {"multi_instance": True}, "namespace": "syn-bar"}}
config.verify_component_aliases(params)
assert len(config._deprecation_notices) == 0
def test_verify_toplevel_component_aliases_exception(config):
alias_data = {"baz": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"multi_instance": True, "namespace": "syn-bar"}}
with pytest.raises(click.ClickException) as e:
config.verify_component_aliases(params)
assert "Component bar with alias baz does not support instantiation." in str(
e.value
)
def test_verify_component_aliases_error(config):
alias_data = {"baz": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"namespace": "syn-bar"}}
with pytest.raises(click.ClickException):
config.verify_component_aliases(params)
def test_verify_component_aliases_explicit_no_instance_error(config):
alias_data = {"baz": "bar"}
config.register_component_aliases(alias_data)
params = {"bar": {"_metadata": {"multi_instance": False}, "namespace": "syn-bar"}}
with pytest.raises(click.ClickException):
config.verify_component_aliases(params)
@pytest.mark.parametrize(
"params,expected",
[
(
{
"bar": {"namespace": "syn-bar"},
"foo": {},
},
[],
),
(
{
"bar": {
"namespace": "syn-bar",
"_metadata": {"deprecated": False, "replaced_by": "irrelevant"},
},
"foo": {},
},
[],
),
(
{
"bar": {
"namespace": "syn-bar",
"_metadata": {"deprecated": True},
},
"foo": {},
},
["Component bar is deprecated."],
),
(
{
"bar": {
"namespace": "syn-bar",
"_metadata": {"deprecated": True, "replaced_by": "foo"},
},
"foo": {},
},
["Component bar is deprecated. Use component foo instead."],
),
(
{
"bar": {
"namespace": "syn-bar",
"_metadata": {
"deprecated": True,
"replaced_by": "foo",
"deprecation_notice": "See https://example.com/migrate-from-bar.html for a migration guide.",
},
},
"foo": {},
},
[
"Component bar is deprecated. Use component foo instead. "
+ "See https://example.com/migrate-from-bar.html for a migration guide."
],
),
(
{
"bar": {
"namespace": "syn-bar",
"_metadata": {
"deprecated": True,
},
},
"foo": {
"namespace": "syn-foo",
"_metadata": {
"deprecated": True,
},
},
},
["Component bar is deprecated.", "Component foo is deprecated."],
),
],
)
def test_register_component_deprecations(config, params, expected):
alias_data = {"baz": "bar", "qux": "foo"}
config.register_component_aliases(alias_data)
config.register_component_deprecations(params)
assert len(expected) == len(config._deprecation_notices)
for en, an in zip(sorted(expected), sorted(config._deprecation_notices)):
assert en == an
def _setup_deprecation_notices(config):
config.register_deprecation_notice("test 1")
config.register_deprecation_notice("test 2")
def test_register_deprecation_notices(config):
_setup_deprecation_notices(config)
assert ["test 1", "test 2"] == config._deprecation_notices
def test_print_deprecation_notices_no_notices(config, capsys):
config.print_deprecation_notices()
captured = capsys.readouterr()
assert "" == captured.out
def test_print_deprecation_notices(config, capsys):
_setup_deprecation_notices(config)
config.print_deprecation_notices()
captured = capsys.readouterr()
assert (
textwrap.dedent(
"""
Commodore notices:
> test 1
> test 2
"""
)
== captured.out
)
def mock_get_token(url: str) -> Optional[str]:
if url == "https://syn.example.com":
return {
"id_token": jwt.encode(
{"exp": time.time() + 100, "from_cache": True},
"secret",
algorithm="HS256",
)
}
elif url == "https://expired.example.com":
return {
"id_token": jwt.encode(
{"exp": time.time() - 100, "from_cache": True},
"secret",
algorithm="HS256",
)
}
else:
return None
@patch("commodore.tokencache.get")
def test_use_token_cache(test_patch):
test_patch.side_effect = mock_get_token
conf = Config(P("."), api_url="https://syn.example.com")
t = jwt.decode(
conf.api_token, algorithms=["RS256"], options={"verify_signature": False}
)
assert t["from_cache"]
@patch("commodore.tokencache.get")
def test_expired_token_cache(test_patch):
test_patch.side_effect = mock_get_token
conf = Config(P("."), api_url="https://expired.example.com")
assert conf.api_token is None
def test_register_get_package(config: Config, tmp_path: P, mockdep):
# No preregistered packages
assert config.get_packages() == {}
p = Package("test", mockdep, tmp_path / "pkg")
config.register_package("test", p)
assert config.get_packages() == {"test": p}
def test_register_get_dependency(config: Config, tmp_path: P):
repo_url = "https://git.example.com/repo.git"
# No dependencies registered initially
assert len(config._dependency_repos) == 0
md = config.register_dependency_repo(repo_url)
depkey = dependency_key(repo_url)
assert len(config._dependency_repos) == 1
assert config._dependency_repos.get(depkey) == md
def test_register_get_dependency_deduplicates(config: Config, tmp_path: P):
repo_url_1 = "https://git.example.com/repo1.git"
repo_url_2 = "https://git.example.com/repo2.git"
assert len(config._dependency_repos) == 0
md = config.register_dependency_repo(repo_url_1)
depkey = dependency_key(repo_url_1)
assert len(config._dependency_repos) == 1
assert config._dependency_repos.get(depkey) == md
md1_dup = config.register_dependency_repo(repo_url_1)
assert len(config._dependency_repos) == 1
assert md1_dup == md
md2 = config.register_dependency_repo(repo_url_2)
depkey2 = dependency_key(repo_url_2)
assert len(config._dependency_repos) == 2
assert config._dependency_repos.get(depkey2) == md2
assert set(config._dependency_repos.keys()) == {depkey, depkey2}
def test_register_dependency_prefer_ssh(config: Config, tmp_path: P):
repo_url_https = "https://git.example.com/repo.git"
repo_url_ssh = "ssh://git@git.example.com/repo.git"
md = config.register_dependency_repo(repo_url_https)
assert md.url == repo_url_https
md2 = config.register_dependency_repo(repo_url_ssh)
assert md2 == md
assert md.url == repo_url_ssh
md3 = config.register_dependency_repo(repo_url_https)
assert md3 == md
assert md.url == repo_url_ssh
@pytest.mark.parametrize(
"key,base_dict,expected_dict",
[
("toplevel", {}, {"toplevel": "sentinel"}),
("path.to.key", {}, {"path": {"to": {"key": "sentinel"}}}),
("path.to.key", {"path": {"to": "value"}}, {"path": {"to": "value"}}),
(
"path.to.key",
{"path": {"to": {"other": "value"}}},
{"path": {"to": {"other": "value", "key": "sentinel"}}},
),
("path.", {}, {}),
("path..foo", {}, {}),
(".foo", {}, {}),
],
)
def test_set_fact_value(
key: str,
base_dict: dict[str, Any],
expected_dict: dict[str, Any],
):
set_fact_value(base_dict, key, "sentinel")
assert base_dict == expected_dict
@pytest.mark.parametrize(
"value,expected",
[
("foo", "foo"),
("test:foo", "test:foo"),
("json:foo", None),
('json:{"foo":"bar"', None),
('json:"foo"', "foo"),
("json:1", 1),
('json:["a"]', ["a"]),
('json:{"test":{"key":"value"}}', {"test": {"key": "value"}}),
],
)
def test_parse_dynamic_fact_value(value: str, expected: Any):
parsed = parse_dynamic_fact_value(value)
assert parsed == expected
@pytest.mark.parametrize(
"args,expected",
[
([], {}),
(["key"], {}),
(["key="], {}),
(["="], {}),
(["=value"], {}),
(['key=json:""'], {"key": ""}),
(["key=value"], {"key": "value"}),
(["key=value", "foo=bar"], {"key": "value", "foo": "bar"}),
(["key=value=x"], {"key": "value=x"}),
(["key=value", "key=another"], {"key": "another"}),
(["key=value", "key.foo=bar"], {"key": "value"}),
(["key.foo=bar", "key=value"], {"key": "value"}),
(["key.foo=bar", "key.baz=qux"], {"key": {"foo": "bar", "baz": "qux"}}),
(["key=json:[1,2,3]"], {"key": [1, 2, 3]}),
(["key=json:[1,2,3"], {}),
(["path.to.key=json:foo"], {}),
],
)
def test_parse_dynamic_facts_from_cli(args: Iterable[str], expected: dict[str, Any]):
dynamic_facts = parse_dynamic_facts_from_cli(args)
assert dynamic_facts == expected
@responses.activate
@pytest.mark.parametrize(
"api_url,discovery_resp,expected_client,expected_url",
[
("https://syn.example.com", {}, None, None),
# Non-JSON is ignored
("https://syn.example.com", "oidc", None, None),
# Broken JSON is ignored
("https://syn.example.com", '"oidc":{"tes: 1}', None, None),
# Unexpected data format is ignored
("https://syn.example.com", {"oidc": {"client_id": "test"}}, None, None),
# Partial responses are propagated into the config object
(
"https://syn.example.com",
{"oidc": {"clientId": "test-client"}},
"test-client",
None,
),
(
"https://syn.example.com",
{
"oidc": {
"clientId": "test-client",
"discoveryUrl": "https://oidc.example.com",
},
},
"test-client",
"https://oidc.example.com",
),
],
)
def test_config_discover_oidc_config(
tmp_path: P,
api_url: str,
discovery_resp: Any,
expected_client: str,
expected_url: str,
):
if isinstance(discovery_resp, dict):
ct = "application/json"
resp_body = json.dumps(discovery_resp)
else:
resp_body = f"{discovery_resp}"
ct = "text/plain"
responses.add(
responses.GET, url=api_url, content_type=ct, body=resp_body, status=200
)
c = Config(tmp_path, api_url=api_url)
c.discover_oidc_config()
assert len(responses.calls) == 1
assert c.oidc_client == expected_client
assert c.oidc_discovery_url == expected_url
| projectsyn/commodore | tests/test_config.py | test_config.py | py | 12,639 | python | en | code | 43 | github-code | 90 |
18111202239 | from collections import deque
x = input()
A = 0
S1 = deque()
S2 = deque()
for i in range(len(x)):
if x[i] == "\\":
S1.append(i)
elif x[i] == "/" and len(S1) > 0 :
j = S1.pop()
A += i - j
a = i - j
while len(S2) > 0 and S2[-1][0] > j:
a += S2[-1][1]
S2.pop()
S2.append([j,a])
print(A)
if len(S2)== 0:
print(0)
else:
print(len(S2),"",end="")
for i in range(len(S2)):
if i == len(S2) - 1:
print(S2[i][1])
else:
print(S2[i][1],"",end="")
| Aasthaengg/IBMdataset | Python_codes/p02266/s187798353.py | s187798353.py | py | 570 | python | en | code | 0 | github-code | 90 |
6173520082 | #Author: Brandon Gier
#Email: gier.b@husky.neu.edu
#Uses forward chaining to reason about percepts to demonstrate how knowledge-based agents work.
kb = []
#Loads initial knowledge base. Path is the name of the file to be loaded.
#Assumes path is in the same directory
def loadInitialKB(path):
f = open(path)
for line in f.readlines():
kb.append(line.split())
print("Knowledge Base loaded.")
#Processes a percepts file. Path is the file name
#Assumes path is a file in same directory as this.
def processPercepts(path):
f = open(path)
for line in f.readlines():
text = line.replace("\n","")
if line in kb:
print("Getting a percept: " + text + " ---- already contained in knowledge base")
else:
kb.append(line.replace("\n", ""))
print("Getting a percept: " + text + " ---- added to knowledge base")
forwardChain()
#Forward chains to find if more knowledge can be gained from the world.
def forwardChain():
for ex in kb:
if "IF" in ex and not(ex[0] in kb):
intx = ex.index('IF')
temp = ex[intx+1:]
allin = 1
for vals in temp:
valList = [vals]
if not vals in kb and not valList in kb:
allin = 0
adds = ex[:intx]
if allin:
for news in adds:
kb.append(news)
print("New conclusion from forward chaining is: " + "".join(adds))
print("\tjustified by " + ", ".join(temp) + " and " + "^".join(temp) + "-->" + "".join(adds))
forwardChain()
| GierB/Artificial-Intelligence-Samples | ForwardChainPL.py | ForwardChainPL.py | py | 1,658 | python | en | code | 2 | github-code | 90 |
18588814429 | n = int(input())
a_list = list(map(int, input().split()))
status = True
execute = 0
while status:
for i in range(n):
if a_list[i] % 2 == 1:
status = False
break
else:
a_list[i] = a_list[i] / 2
if status:
execute += 1
print(execute)
| Aasthaengg/IBMdataset | Python_codes/p03494/s040740173.py | s040740173.py | py | 301 | python | en | code | 0 | github-code | 90 |
36833230935 | # 1-D array
arr_1d = [1, 2, 3, 4, 5]
print("1-D Array:")
for value in arr_1d:
print(f"Element {value} has memory address: {id(value)}")
# 2-D array
arr_2d = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print("\n2-D Array:")
for i in range(len(arr_2d)):
for j in range(len(arr_2d[i])):
value = arr_2d[i][j]
print(f"Element {value} at index ({i}, {j}) has memory address: {id(value)}")
| asifurrahmanbubt/CSE-232-Data-Structure-Lab-Code | Lab Report 4/1-D and 2-D array element Address finding.py | 1-D and 2-D array element Address finding.py | py | 400 | python | en | code | 1 | github-code | 90 |
18566780639 | from queue import Queue
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
H, W = map(int, input().split())
S = [input() for _ in range(H)]
dist = [[-1] * W for _ in range(H)]
que = Queue()
# ๅๆๆกไปถ
dist[0][0] = 0
que.put((0, 0))
# ๅน
ๅชๅ
ๆข็ดข
while not que.empty():
x, y = que.get()
for dir in range(4):
x2, y2 = x + dx[dir], y + dy[dir]
if x2 < 0 or x2 >= H or y2 < 0 or y2 >= W or S[x2][y2] == '#':
continue
if dist[x2][y2] != -1:
continue
dist[x2][y2] = dist[x][y] + 1
que.put((x2, y2))
# ็ฝใในๆฐ
white = sum(sum(1 if S[x][y] == '.' else 0 for y in range(W)) for x in range(H))
# ็ญใ
print(white - dist[H-1][W-1] - 1 if dist[H-1][W-1] != -1 else -1)
| Aasthaengg/IBMdataset | Python_codes/p03436/s741585188.py | s741585188.py | py | 736 | python | en | code | 0 | github-code | 90 |
23561260514 | """
- Author: Sharif Ehsani
- Date: June 2020
- https://github.com/sharifehsani
"""
# gets a value (age) from the user and converts the value of year (string) to int value
year = input("Enter your birth year and I tell how old you are! ")
age = int(year)
print("You'r " , 2020 - age , " years old!. ")
# The following does all the codes in one line
age = int(input("Enter your weight in lb and I tell you how much is it in kg! "))
print("You'r ", age * 0.453592, "kilograms.")
| sharifehsani/starting-out-with-python | chapter2/type_conversion.py | type_conversion.py | py | 481 | python | en | code | 0 | github-code | 90 |
42927888698 | import os
from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="ssh2ec2",
version="0.5",
author="Mike Ryan",
author_email="mike@awssystemadministration.com",
description="SSH into EC2 instances via tags and metadata filters",
license="MIT",
url="https://github.com/mikery/ssh2ec2",
keywords=["amazon", "aws", "ec2", "ssh", "cloud", "boto"],
packages=['ssh2ec2'],
install_requires=requirements,
entry_points={
'console_scripts': ['ssh2ec2=ssh2ec2:main'],
}
) | micahhausler/ssh2ec2 | setup.py | setup.py | py | 580 | python | en | code | 1 | github-code | 90 |
19542645306 | from re import sub
if __name__ == "__main__":
inp = ""
repeat = int(input())
for i in range(repeat):
inp += input()
if i != repeat -1:
inp += "\n"
inp = sub(r"\s{1}&&(?=[\s]{1})", " and", inp)
print(sub(r"\s{1}\|\|(?=[\s]{1})", " or", inp)) | AutonomousFactory/eWendesiya | Task_94.py | Task_94.py | py | 301 | python | en | code | 0 | github-code | 90 |
13076794523 | from __future__ import division, print_function, absolute_import
import numpy as np
import cv2
import os
import glob
import re
import matplotlib.pyplot as plt
def group_converter(in_dir, in_format, key, mark, out_dir):
"""convert file format in directory
Args:
in_dir (str): input directory
in_format (str): input file format
key (str): key element
mark (str): mark for 'L' --> left, 'R' --> right
out_dir (str): output directory
Raises:
ValueError: _description_
"""
i = 0
file_names = os.listdir(in_dir)
os.makedirs(out_dir, exist_ok=True)
for file in glob.glob(os.path.join(in_dir, f"*.{in_format}")):
if file.find(key) != -1:
img_name = file_names[i]
print(f"Original name is: {img_name}")
img = cv2.imread(os.path.join(in_dir, img_name))
# should be `2` but not `0` since opencv read img as `BGR`
img = img[:, :, 2]
# get image index
# idx = [_.start() for _ in re.finditer('-', img_name)]
idx = [_.start() for _ in re.finditer("_", img_name)]
# renam = img_name[idx[-2]+1: idx[-1]]
renam = img_name[idx[0] + 1 : -4]
# renam = img_name
print(f"Convert name to: {renam}")
with open(os.path.join(out_dir, f"rect{mark}_{renam}.yuv"), "wb") as f:
np.asarray(img, dtype=np.uint8).tofile(f)
i += 1
else:
raise ValueError("You have been mixed up right/left position.")
def yuv2rgb(dim, key, dep_dir, out_dir, format, dtype=np.uint16):
"""target on the output of NVP dv4 chip
Args:
dim (list): image width, height
key (str): key word of the image name
dep_dir (str): input directory
out_dir (str): output directory
format (str): target suffix
dtype (numpy type, optional): storage format. Defaults to np.uint16.
Raises:
ValueError: _description_
"""
width = dim[0]
height = dim[1]
i = 0
file_names = os.listdir(dep_dir)
if "out_config.txt" in file_names:
file_names.remove("out_config.txt")
os.makedirs(out_dir, exist_ok=True)
for file in glob.glob(os.path.join(dep_dir, "*.yuv")):
if file.find(key) != -1:
img_name = file_names[i]
name = img_name.replace("yuv", f"{format}")
img = np.fromfile(os.path.join(dep_dir, img_name), dtype=dtype)
img = np.reshape(img, [height, width])
cv2.imwrite(os.path.join(out_dir, name), img)
i += 1
else:
raise ValueError("You have been mixed up right/left position.")
def png2colormap(png_dir, color_dir):
"""apply colormap on images
Args:
png_dir (_type_): _description_
color_dir (_type_): _description_
"""
os.makedirs(color_dir, exist_ok=True)
file_names = os.listdir(png_dir)
i = 0
for file in glob.glob(os.path.join(png_dir, "*.png")):
img_name = file_names[i]
print(img_name)
print(file)
img = plt.imread(file)
if len(img) == 3:
img = img[:, :, 0]
sc = plt.imshow(img)
sc.set_cmap("jet")
# plt.colorbar()
plt.colorbar(sc)
# plt.show()
plt.savefig(os.path.join(color_dir, img_name))
i += 1
def yuv2colormap(dep_dir, dim, out_dir):
"""apply colormap on yuv format file
Args:
dep_dir (_type_): _description_
dim (_type_): _description_
out_dir (_type_): _description_
"""
width = dim[0]
height = dim[1]
os.makedirs(out_dir, exist_ok=True)
file_names = os.listdir(dep_dir)
file_names.remove("out_config.txt")
i = 0
for file in glob.glob(os.path.join(dep_dir, "*.yuv")):
img_name = file_names[i]
print(img_name)
img = np.fromfile(file, dtype=np.uint16)
img = np.reshape(img, [height, width])
img = img.astype(np.uint8)
img_color = cv2.applyColorMap(img, cv2.COLORMAP_JET)
cv2.imshow("colormap", img_color)
cv2.waitKey(0)
cv2.destroyAllWindows()
name = img_name.replace("yuv", "png")
cv2.imwrite(os.path.join(out_dir, name), img_color)
i += 1
def conv2d(data, kernel_size, pooling):
kernel = np.ones((kernel_size, kernel_size))
h_raw, w_raw = data.shape[0], data.shape[1]
out_dim = np.zeros((h_raw - kernel_size + 1, w_raw - kernel_size + 1))
# print(out_dim.shape[0], out_dim.shape[1])
h, w = out_dim.shape[0], out_dim.shape[1]
out = np.zeros(shape=(h, w))
if pooling == "avg":
for i in range(h):
for j in range(w):
out[i, j] = np.round(
np.mean(data[i : i + kernel_size, j : j + kernel_size] * kernel)
)
elif pooling == "max":
for i in range(h):
for j in range(w):
out[i, j] = np.max(
data[i : i + kernel_size, j : j + kernel_size] * kernel
)
return out
def plot_grid(
x,
y,
xrange,
yrange,
grid_size,
suptitle,
ylabel,
xlabel,
pooling,
kernel,
type,
pos,
):
path = os.getcwd()
save_path = os.path.join(path, f"kernel_{kernel}")
plt.style.use("fivethirtyeight")
plt.figure(figsize=(15, 12))
plt.subplots_adjust(hspace=0.5)
plt.suptitle(suptitle)
x_range = xrange
y_sub = yrange
i = 1
for _y in y_sub:
ax = plt.subplot(grid_size[0], grid_size[1], i)
ax.scatter(x[x_range[0] : x_range[1], _y], y[x_range[0] : x_range[1], _y])
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
i += 1
plt.savefig(os.path.join(save_path, f"{pos}_{pooling}_{type}.png"))
def plot_color_combine(
x, y, xrange, yrange, suptitle, ylabel, xlabel, pooling, kernel, type, pos
):
path = os.getcwd()
save_path = os.path.join(path, f"kernel_{kernel}")
plt.style.use("fivethirtyeight")
plt.figure(figsize=(9, 6))
plt.subplots_adjust(hspace=0.5)
plt.suptitle(suptitle)
x_range = xrange
y_sub = yrange
i = 1
for _y in y_sub:
ax = plt.subplot(1, 1, i)
ax.scatter(x[x_range[0] : x_range[1], _y], y[x_range[0] : x_range[1], _y])
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
plt.savefig(os.path.join(save_path, f"{pos}_{pooling}_{type}.png"))
| WEIIEW97/imgprocess | src/misc.py | misc.py | py | 6,458 | python | en | code | 0 | github-code | 90 |
4921244458 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import numpy.linalg as linalg
from numpy import dot, zeros, eye
class FixedLagSmoother(object):
""" Fixed Lag Kalman smoother.
DO NOT USE: NOT DEBUGGED.
Computes a smoothed sequence from a set of measurements.
"""
def __init__(self, dim_x, dim_z):
""" Create a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will
not give you a functional filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
"""
self.dim_x = dim_x
self.dim_z = dim_z
self.x = zeros((dim_x,1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.u = 0 # control input vector
self.B = 0
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self.R = eye(dim_z) # state uncertainty
self.K = 0 # kalman gain
self.residual = zeros((dim_z, 1))
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
def smooth(self, Ms, N):
# take advantage of the fact that np.array are assigned by reference.
H = self.H
R = self.R
F = self.F
B = self.B
u = self.u
P = self.P
x = self.x
Q = self.Q
PCol = zeros((self.dim_x, self.dim_x, N+2))
PColOld = zeros((self.dim_x, self.dim_x, N+2))
PSmooth = zeros((self.dim_x, self.dim_x, N+2))
PSmoothOld = zeros((self.dim_x, self.dim_x, N+2))
xhat = []
for z in Ms:
x = dot(F, x) + dot(B, u)
inn = z - dot(H, x)
S = dot(H, dot(P, H.T)) + R
SI = linalg.inv(S)
K = dot(F, dot(P, dot(H.T, SI)))
KSmooth = K.copy()
x = x + dot(K, inn)
#xSmooth = x.copy()
xhat.append (x.copy())
PColOld[:,:,0] = P.copy()
PSmoothOld[:,:,0] = P.copy()
LHS = dot (F, dot(P, H.T))
RHS = dot (H, dot (P, F.T))
P = dot (F, dot(P, F.T)) - dot (LHS, dot (SI, RHS)) + Q
for i in range (N+1):
KSmooth = dot(PColOld[:,:,i], dot(H.T, SI))
PSmooth[:,:,i+1] = PSmoothOld[:,:,i] - dot(PColOld[:,:,i], dot(H.T, dot(KSmooth.T, H.T)))
PCol[:,:,i+1] = dot(PColOld[:,:,i], (F - dot(K,H)).T)
#xSmooth = xSmooth + dot(KSmooth, inn)
PSmoothOld = PSmooth.copy()
PColOld = PCol.copy()
return xhat
| ocastell/pyMovMind | filterpy/kalman/fixed_lag_smoother.py | fixed_lag_smoother.py | py | 3,301 | python | en | code | 0 | github-code | 90 |
33359510280 | """ Populate's Mongo DB from CSV """
from pymongo import MongoClient
import pandas as pd
import json
def populate_csv():
#Connects to MongoDBs
cursor = MongoClient('localhost', 27017)
db = cursor["movie_db"]
collection = db["movie_stats"]
#Loads data from CSV into a dataframe
frame = pd.read_csv('support/movie_metadata.csv', encoding = 'ISO-8859-1')
data = json.loads(frame.to_json(orient='records'))
print(data)
db_response = collection.insert_many(data)
print(db_response)
if __name__ == ("__main__"):
populate_csv()
| mfarazahmad/DataMovie | data/populate_db.py | populate_db.py | py | 591 | python | en | code | 0 | github-code | 90 |
69929071018 | """
Is an anonymous letter constructible: EPI 12.2
"""
import collections
def is_constructible(magazine, letter):
char_count = collections.Counter()
for char in magazine:
char_count[char] += 1
for char in letter:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
print(is_constructible('aaabbbccc', 'abc'))
print(is_constructible('abc', 'aaabbbccc'))
| annguyenict172/coding-exercises | exercises/hash_tables/is_letter_constructible.py | is_letter_constructible.py | py | 432 | python | en | code | 1 | github-code | 90 |
18561868699 | from itertools import combinations
n=int(input())
cnt=[0]*26
for i in range(n):
s=input()
cnt[ord(s[0])-ord('A')]+=1
ans=0
for c1,c2,c3 in combinations(["M","A","R","C","H"],3):
c1=ord(c1)-ord("A")
c2=ord(c2)-ord("A")
c3=ord(c3)-ord("A")
ans+=cnt[c1]*cnt[c2]*cnt[c3]
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03425/s652385661.py | s652385661.py | py | 307 | python | en | code | 0 | github-code | 90 |
23216367625 |
class TrieNode:
#Trie node data structure
def __init__(self):
self.prefix=0
self.word=0
self.paths={}
class Trie:
#Trie Tree
def __init__(self):
self.head=TrieNode()
def add(self, word):
#add a word to the Trie
word=word.lower()
self.addToTrie(word, self.head)
def addToTrie(self, word, node):
if len(word) <= 0:
node.word += 1
else:
node.prefix += 1
firstChar=word[0]
restWord=word[1:]
if firstChar in node.paths:
nextNode=node.paths[firstChar]
else:
nextNode=TrieNode()
node.paths[firstChar]=nextNode
self.addToTrie(restWord, nextNode)
def isWord(self, word):
#checks if the word is in the trie
word=word.lower()
numWords=self.wordCount(word, self.head)
if numWords > 0:
return True
else:
return False
def wordCount(self, word, node):
if len(word) <= 0:
return node.word
else:
firstChar=word[0]
restWord=word[1:]
if firstChar not in node.paths:
return 0
nextNode=node.paths[firstChar]
return self.wordCount(restWord, nextNode)
def isPrefix(self, word):
#checks if the word is a prefix to another word
word=word.lower()
numPrefix=self.prefixCount(word, self.head)
if numPrefix > 0:
return True
else:
return False
def prefixCount(self, word, node):
if len(word) <= 0:
return node.prefix
else:
firstChar=word[0]
restWord=word[1:]
if firstChar not in node.paths:
return 0
nextNode=node.paths[firstChar]
return self.prefixCount(restWord, nextNode)
| wudanny/BoggleSolver | python/trie.py | trie.py | py | 1,528 | python | en | code | 0 | github-code | 90 |
74933485096 | # weather_api.py
from flask import Flask, request
app = Flask(__name__)
# Example dictionary to store weather information for zip codes
weather = {
'10001': 'Description : Clear Sky, Temperature: 72, Humidity:30, Main:Clear',
'60601': 'Description : Freezing Rain, Temperature: 50, Humidity:20, Main:Rainy',
'90001': 'Description : Snow Showers, Temperature: 80, Humidity:25, Main:Snow',
'94560': 'Description : Partly Cloud, Temperature: 40, Humidity:18, Main:Cloudy',
'94550': 'Description : Mostly Sunny, Temperature: 90, Humidity:18, Main:Sunny'
}
@app.route('/weather/<string:zip_code>', methods=['GET'])
def weather_api(zip_code):
if zip_code in weather:
return weather[zip_code]
else:
return 'Weather information not available for the given zip code'
if __name__ == '__main__':
app.run(port=5001) | jesalshah14/CloudComputing_Homework-1-Build-your-own-Dockerfile-Image-and-Container | Service2/service2.py | service2.py | py | 854 | python | en | code | 0 | github-code | 90 |
18380568079 | import math
def solve(a,c,d):
x=a+1
x-=a//c+1
x-=a//d+1
x+=a//(c*d//math.gcd(c,d))+1
return x
A,B,C,D=map(int,input().split())
print(solve(B,C,D)-solve(A-1,C,D))
| Aasthaengg/IBMdataset | Python_codes/p02995/s995372059.py | s995372059.py | py | 184 | python | en | code | 0 | github-code | 90 |
34537173369 | import collections
class Solution:
def minWindow(self, s: str, t: str) -> str:
need = collections.defaultdict(int)
# needไธญ็ป่ฎกไบc็ไธชๆฐ
for c in t :
need[c]+=1
needCnt = len(t)
start = 0
res=(0,float('inf'))
for end ,c in enumerate(s):
# ไธๅ
ฑneedCntไธช๏ผๅฆๆๅๅฎไบใ้ฃไน่ฏดๆ้ฃไธชrightๅฐฑๆฏๅฐrightไบใ่ฆๅจleftไบใ
if need[c]>0:
needCnt-=1
# ๆฏไธชs็ๅญ็ฌฆ้ฝไผ่ขซneed[c]ๅๆ๏ผๅฏ่ฝไธๅ้้ข็ใ
need[c]-=1
# ่ฆๅผๅงๅจleftไบใ
if needCnt==0:
while True:
c = s[start]
if need[c] ==0:
break
need[c]+=1
start+=1
if end-start<res[1]-res[0]:
res=(start,end)
need[s[start]]+=1
needCnt+=1
start+=1
return '' if res[1]>len(s) else s[res[0]:res[1]+1]
if __name__ == '__main__':
s="ADOBECODEBANC"
t = "ABC"
test = Solution()
print(test.minWindow(s,t)) | zhengyaoyaoyao/leetcodePython | leetcode/hard/76. Minimum Window Substring.py | 76. Minimum Window Substring.py | py | 1,177 | python | zh | code | 0 | github-code | 90 |
37845749710 | from .embedding import WordEmbedding
from .data import load_data
from .hash_str import hash_str, get_csci_salt
from .cosine_sim import cosine_similarity
# from .find_friends import (
# calculate_distance,
# print_distancefile,
# salted_hash,
# return_vector,
# )
import pandas as pd
import os
def main(args=None):
def readin_data():
cwd = os.getcwd()
data_dir = os.path.abspath(os.path.join(os.getcwd(), ".", "data"))
file_to_use = os.path.join(data_dir, "project.parquet")
peer_distance_filename = os.path.join(data_dir, "distance_to_peers.parquet")
data = load_data(file_to_use)
# create the vector representation for each survey entry
# Note: this result will be a Series object preserving the index with vectors inside
embedding = WordEmbedding.from_files("data/words.txt", "data/vectors.npy.gz")
embeddings = data["project"].apply(embedding.embed_document)
return embeddings
def get_dataindex():
cwd = os.getcwd()
data_dir = os.path.abspath(os.path.join(os.getcwd(), ".", "data"))
file_to_use = os.path.join(data_dir, "project.parquet")
data = load_data(file_to_use)
return data.index.values
def peerdistance_filename():
cwd = os.getcwd()
data_dir = os.path.abspath(os.path.join(os.getcwd(), ".", "data"))
file_to_use = os.path.join(data_dir, "project.parquet")
peer_distance_filename = os.path.join(data_dir, "distance_to_peers.parquet")
return peer_distance_filename
def print_distancefile(dataframe_to_write, path_to_file=peerdistance_filename()):
"""
did not have time to implement atomicwriter - issues with _netrc file and install
:param path_to_file: what file you want to check if is written, then write to
:return: parquet file in data directory
"""
if os.path.exists(path_to_file):
# this would be logged if Logging
print("File already exists! Moving on...")
pass
else:
# TODO - implement atomic_write
print("Printing file...")
dataframe_to_write.to_parquet(path_to_file, compression=None)
def salted_hash(word, salt=get_csci_salt()):
"""
properly formats the salted hash to work with functions in this application
:param word: str to hash
:param salt: if you want to add a specific salt
:return: str
"""
if salt:
return hash_str(some_val=word, salt=salt).hex()[:8]
else:
return hash_str(some_val=word, salt=salt).hex()[:8]
def return_vector(student_name, calculated_embeddings=readin_data()):
"""
implementing a way to return corresponding vectors
:param student_name: str to get vector of
:param calculated_embeddings: prepared embeddings of words
:return: vector
"""
return calculated_embeddings.loc[student_name]
def calculate_distance(myname="casey patch", students_input=get_dataindex()):
"""
:param myname: base string to compare others' descriptions to
:param students_input: list of inputs to compare to base string
:return: dataframe containing distance from each student input to base input indexed on student input id
"""
myself = salted_hash(myname)
myself_vector = return_vector(myself)
students = students_input
list_of_student_ids = list(students)
students_vector = []
for x in students:
students_vector.append(return_vector(x))
cos_sim_myselftostudents = list(
map(lambda y: cosine_similarity(myself_vector, y), students_vector)
)
distance_list = []
for x in cos_sim_myselftostudents:
distance_list.append(1 - x)
distance = pd.DataFrame(
distance_list, index=list_of_student_ids, columns=["distance"]
)
return distance
cwd = os.getcwd()
data_dir = os.path.abspath(os.path.join(os.getcwd(), ".", "data"))
file_to_use = os.path.join(data_dir, "project.parquet")
peer_distance_filename = os.path.join(data_dir, "distance_to_peers.parquet")
data = load_data(file_to_use)
distance = calculate_distance()
print_distancefile(dataframe_to_write=distance)
loaded_distance = load_data(peer_distance_filename)
merged_df = pd.merge(loaded_distance, data, left_index=True, right_index=True)
closest_5students = merged_df.nsmallest(5, ["distance"])
# check to make sure I am in the list
if salted_hash("casey patch") in closest_5students.index:
pass
else:
# this might not be the correct error to raise?
raise IndexError
for friend, row in closest_5students.iterrows():
print(
"\nStudent (first entry is me!)\n",
"distance:\n",
row["distance"],
"\nresponse: \n",
row["project"],
)
| patchcasey/2019fa-pset-3-patchcasey | pset_3/cli.py | cli.py | py | 5,041 | python | en | code | 0 | github-code | 90 |
1681126008 | #ฮตฮนฯฮฑฮณฯฮณฮฎ ฮฑฯฮฑฯฮฑฮฏฯฮทฯฯฮฝ ฮฒฮนฮฒฮปฮนฮฟฮธฮทฮบฯฮฝ
import numpy as np
import cv2
import tkinter as tk
from tkinter import filedialog
from PIL import ImageTk,Image
from tkinter import messagebox
import filetype
def object_det_MobileNetSSD():
modelConfiguration = "MobileNetSSD_deploy.prototxt.txt"
modelWeights = "MobileNetSSD_deploy.caffemodel"
#ฮฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฮปฮฏฯฯฮฑฯ ฮผฮต ฯฮฑ ฮฟฮฝฯฮผฮฑฯฮฑ ฯฯฮฝ ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฯฮฝ ฯฮฟฯ
ฮผฯฮฟฯฮตฮฏ ฮฝฮฑ ฮฑฮฝฮฑฮฝฮณฯฯฮฏฯฮตฮน ฯฮฟ model
#ฮบฮฑฮน ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฮผฮนฮฑฯ ฮฟฮผฮฌฮดฮฑฯ ฯฯฯฮผฮฌฯฯฮฝ ฮณฮนฮฑ ฮบฮฌฮธฮต "ฮบฮฟฯ
ฯฮฏ" ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฮฟฯ
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
#ฮฆฯฯฯฯฯฮท ฯฮฟฯ
model
print("[INFO] loading model...")
model = cv2.dnn.readNetFromCaffe(modelConfiguration, modelWeights)
#ฯฯฯฯฯฯฮท ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯ
ฯฯฮฎฯฯฮท ฮบฮฑฮน ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฮตฮฝฯฯ blob(ฮดฮตฯ ฯฮญฮปฮฟฯ ฮบฯฮดฮนฮบฮฑ) ฮณฮนฮฑ ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ
image = cv2.imread(input_image)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5)
#ฯฮตฯฮฝฮฌฮตฮน ฯฮฟ blob ฮผฮต ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ ฯฯฮฟ model ฮบฮฑฮน ฮตฮพฮฌฮณฮฟฮฝฯฮฑฮน ฮฟฮน ฮฑฮฝฮฑฮณฮฝฯฯฮฏฯฮตฮนฯ ฮบฮฑฮน ฮฟฮน ฯฯฮฟฮฒฮปฮญฯฮตฮนฯ
print("[INFO] computing object detections...")
model.setInput(blob)
detections = model.forward()
for i in np.arange(0, detections.shape[2]):
#ฮตฮพฮฑฮณฯฮณฮฎ ฯฮฟฯ
confidence(ฯฮนฮธฮฑฮฝฯฯฮทฯฮฑ ฮตฮณฮบฯ
ฯฯฯฮทฯฮฑฯ ฯฮทฯ ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮทฯ) ฮณฮนฮฑ ฮบฮฌฮธฮต ฯฯฯฮฒฮปฮตฯฮท
confidence = detections[0, 0, i, 2]
#ฯฮฑฯฮฌฮฒฮปฮตฯฮท ฮฑฮดฯฮฝฮฑฮผฯฮฝ ฯฯฮฟฮฒฮปฮญฯฮตฯฮฝ ฮบฮฌฮฝฮฟฮฝฯฮฑฯ ฯฮฏฮณฮฟฯ
ฯฮฟ ฯฯฮน ฯฮฟ confidence ฮตฮฏฮฝฮฑฮน
#ฮผฮตฮณฮฑฮปฯฯฮตฯฮฟ ฮฑฯฯ ฯฮฟ ฮตฮปฮฌฯฮนฯฯฮฟ
if confidence > 0.2:
#ฮตฮพฮฑฮณฯฮณฮฎ ฯฮทฯ ฮธฮญฯฮทฯ ฯฮฟฯ
ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฮฟฯ
(index) ฯฯฮทฮฝ ฮปฮฏฯฯฮฑ CLASSES ฮฑฯฯ ฯฮฟ "detections"
#ฮบฮฑฮน ฯ
ฯฮฟฮปฮฟฮณฮนฯฮผฯฯ ฯฯฮฝ ฯฯ
ฮฝฯฮตฯฮฑฮณฮผฮญฮฝฯฮฝ ฯฯฮฝ ฮบฮฟฯ
ฯฮนฯฮฝ ฯฮฟฯ
ฮธฮฑ ฯฮตฯฮนฮฒฮฌฮปฮปฮฟฯ
ฮฝ ฯฮฑ
#ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮฑ ฮฑฮฝฯฮนฮบฮตฮฏฮผฮตฮฝฮฑ
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
#ฮตฮผฯฮฌฮฝฮนฯฮท ฯฮทฯ ฯฯฯฮฒฮปฮตฯฮทฯ
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
print("[INFO] {}".format(label))
cv2.rectangle(image, (startX, startY), (endX, endY),#ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฯฮปฮฑฮนฯฮฏฮฟฯ
ฮณฯฯฯ ฮฑฯฯ ฯฮฟ ฮฑฮฝฯฮนฮบฮตฮฏฮผฮตฮฝฮฟ
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(image, label, (startX, y), #ฮตฮนฯฮฑฮณฯฮณฮฎ ฯฮฟฯ
ฮฟฮฝฯฮผฮฑฯฮฟฯ ฯฮฟฯ
ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฮฟฯ
ฯฯฮฟ ฯฮปฮฑฮฏฯฮนฮฟ
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 1)
global imageSSD1#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯฯฮทฯฮนฮผฮฟฯฮฟฮนฮตฮฏฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฯฮฟ MobileNetSSD
global imageSSD2#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯฯฮทฯฮนฮผฮฟฯฮฟฮนฮตฮฏฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฮท ฮฃฯฮณฮบฯฮนฯฮท
global imageSSD#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฮฑฯฮฟฮธฮทฮบฮตฯฮตฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฯฮฟ MobileNetSSD
imageSSD = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))#ฮผฮตฯฮฑฯฯฮฟฯฮฎ ฮฑฯฯ numpy.ndarrey ฯฮต PIL.Image.Image ฯฯฯฮต ฮฝฮฑ ฮผฯฮฟฯฮตฮฏ ฮฝฮฑ ฮตฯฮตฮพฮตฯฮณฮฑฯฯฮตฮฏ
width, height = imageSSD.size
#ฮฑฮปฮปฮฑฮณฮฎ ฮผฮตฮณฮญฮธฮฟฯ
ฯ ฯฯฮฝ ฮฑฯฯฮนฮบฯฮฝ ฮตฮนฮบฯฮฝฯฮฝ ฮฑฮฝฮฌฮปฮฟฮณฮฑ ฮผฮต ฯฮนฯ ฮฑฯฯฮนฮบฮญฯ ฯฮฟฯ
ฯ ฮดฮนฮฑฯฯฮฌฯฮตฮนฯ
#ฯฯฯฮต ฮฝฮฑ ฮผฯฮฟฯฮฟฯฮฝ ฮฝฮฑ ฮตฮผฯฮฑฮฝฮนฯฯฮฟฯฮฝ ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
if int(width)<int(height): imageSSD1 = imageSSD.resize((400, 552), Image.ANTIALIAS)
else: imageSSD1 = imageSSD.resize((900, 552), Image.ANTIALIAS)
if int(width)<int(height): imageSSD2 = imageSSD.resize((350, 450), Image.ANTIALIAS)
else: imageSSD2 = imageSSD.resize((485, 310), Image.ANTIALIAS)
return imageSSD1,imageSSD2
def object_det_Yolo():
whT = 320
confThreshold =0.5 #ฮ ฯฮฟฮฒฮปฮญฯฮตฮนฯ ฮผฮต ฯฮนฮธฮฑฮฝฯฯฮทฯฮฑ ฮตฮณฮบฯ
ฯฯฯฮทฯฮฑฯ ฮบฮฌฯฯ ฮฑฯฮฟ 50% ฮธฮฑ ฯฮฑฯฮฑฮฒฮปฮญฯฮฟฮฝฯฮฑฮน
nmsThreshold= 0.2
classesFile = "coco.names"
classNames = []
#ฮฮทฮผฮนฮฟฯ
ฯฮณฮตฮฏฯฮฑฮน ฮปฮฏฯฯฮฑ ฮผฮต ฯฮฑ ฮบฮปฮฌฯฮตฮนฯ ฯฯฮฝ ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฯฮฝ ฮฑฯฯ ฯฮฟ "coco.names"
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
modelConfiguration = "yolov3.cfg"
modelWeights = "yolov3.weights"
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
#ฯฮฑฯฮฌฮฒฮปฮตฯฮท ฮฑฮดฯฮฝฮฑฮผฯฮฝ ฯฯฮฟฮฒฮปฮญฯฮตฯฮฝ ฮบฮฌฮฝฮฟฮฝฯฮฑฯ ฯฮฏฮณฮฟฯ
ฯฮฟ ฯฯฮน ฯฮฟ confidence ฮตฮฏฮฝฮฑฮน
#ฮผฮตฮณฮฑฮปฯฯฮตฯฮฟ ฮฑฯฯ ฯฮฟ ฮตฮปฮฌฯฮนฯฯฮฟ(0.5)
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)#ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฯฮปฮฑฮนฯฮฏฮฟฯ
ฮณฯฯฯ ฮฑฯฯ ฯฮฟ ฮฑฮฝฯฮนฮบฮตฮฏฮผฮตฮฝฮฟ
cv2.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%', #ฮตฮนฯฮฑฮณฯฮณฮฎ ฯฮฟฯ
ฮฟฮฝฯฮผฮฑฯฮฟฯ ฯฮฟฯ
ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฮฟฯ
ฯฯฮฟ ฯฮปฮฑฮฏฯฮนฮฟ
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 255), 0)
img = cv2.imread(input_image) #"ฮดฮนฮฑฮฒฮฌฮถฮตฮน ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯฯฮฎฯฯฮท"
blob = cv2.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False) #ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฮตฮฝฯฯ blob(ฮดฮตฯ ฯฮญฮปฮฟฯ ฮบฯฮดฮนฮบฮฑ) ฮณฮนฮฑ ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img) #ฮบฮปฮฎฯฮท ฯฮทฯ ฯฯ
ฮฝฮฌฯฯฮทฯฮทฯ ฯฮฟฯ
ฮบฮฌฮฝฮตฮน ฯฮทฮฝ ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮท ฯฯฮฝ ฮฑฮฝฯฮนฮบฮตฮนฮผฮญฮฝฯฮฝ
global imageYolo1#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯฯฮทฯฮนฮผฮฟฯฮฟฮนฮตฮฏฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฯฮฟ Yolo
global imageYolo2#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯฯฮทฯฮนฮผฮฟฯฮฟฮนฮตฮฏฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฮท ฮฃฯฮณฮบฯฮนฯฮท
global imageYolo#ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฮฑฯฮฟฮธฮทฮบฮตฯฮตฯฮฑฮน ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฯฮฟ Yolo
imageYolo = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))#ฮผฮตฯฮฑฯฯฮฟฯฮฎ ฮฑฯฯ numpy.ndarrey ฯฮต PIL.Image.Image ฯฯฯฮต ฮฝฮฑ ฮผฯฮฟฯฮตฮฏ ฮฝฮฑ ฮตฯฮตฮพฮตฯฮณฮฑฯฯฮตฮฏ
width, height = imageYolo.size
#ฮฑฮปฮปฮฑฮณฮฎ ฮผฮตฮณฮญฮธฮฟฯ
ฯ ฯฯฮฝ ฮฑฯฯฮนฮบฯฮฝ ฮตฮนฮบฯฮฝฯฮฝ ฮฑฮฝฮฌฮปฮฟฮณฮฑ ฮผฮต ฯฮนฯ ฮฑฯฯฮนฮบฮญฯ ฯฮฟฯ
ฯ ฮดฮนฮฑฯฯฮฌฯฮตฮนฯ
#ฯฯฯฮต ฮฝฮฑ ฮผฯฮฟฯฮฟฯฮฝ ฮฝฮฑ ฮตฮผฯฮฑฮฝฮนฯฯฮฟฯฮฝ ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
if int(width)<int(height): imageYolo1 = imageYolo.resize((400, 552), Image.ANTIALIAS)
else: imageYolo1 = imageYolo.resize((900, 552), Image.ANTIALIAS)
if int(width)<int(height): imageYolo2 = imageYolo.resize((350, 450), Image.ANTIALIAS)
else: imageYolo2 = imageYolo.resize((485, 310), Image.ANTIALIAS)
return imageYolo1,imageYolo2
def model_compare(): #ฮฃฯฮณฮบฯฮนฯฮท ฯฯฮฝ 2 models
object_det_MobileNetSSD()
object_det_Yolo()
img1 = ImageTk.PhotoImage(imageYolo2)
panel1.config(image=img1)
panel1.image=img1
img2=ImageTk.PhotoImage(imageSSD2)
panel2.config(image=img2)
panel2.image=img2
#ฮธฮญฯฮฟฯ
ฮผฮต ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
label, ฯฯฮฟ ฮฟฯฮฟฮฏฮฟ ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮท ฮตฮนฮบฯฮฝฮฑ ฮตฮฝฯฯ model
#ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฮผฯฮฝฮฟ ฮฑฯ
ฯฯ, ฯฯ ฮบฮตฮฝฮฎ ฮญฯฯฮน ฯฯฯฮต ฮฑฮฝ ฯฯฮนฮฝ ฮฟ ฯฯฮฎฯฯฮทฯ ฮตฯฮนฮปฮญฮพฮตฮน ฯฮทฮฝ ฯฯฮณฮบฯฮนฯฮท
#ฮตฮฏฯฮต ฮตฯฮนฮปฮญฮพฮตฮน ฮบฮฌฯฮฟฮนฮฟ model, ฮฝฮฑ ฮผฮทฮฝ ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฯ
ฯฮฎฯฯฮต ฮฑฯฯ ฯฮทฮฝ ฯฯฮฟฮทฮณฮฟฯฮผฮตฮฝฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮท
panel.config(image="")
panel.image=""
def check_MobileNetSSD() :
global val1
val1=is_checked1.get()#ฯฮฑฮฏฯฮฝฮฟฯ
ฮผฮต ฯฮทฮฝ ฯฮนฮผฮฎ ฯฮทฯ ฮผฮตฯฮฑฮฒฮปฮทฯฮฎฯ is_checked1
#(0 ฮฑฮฝ ฮดฮตฮฝ ฮตฮฏฮฝฮฑฮน ฯฮนฮบฮฑฯฮนฯฮผฮญฮฝฮฟ ฯฮฟ MobileNetSSD ฮบฮฑฮน 1 ฮฑฮฝ ฮตฮฏฮฝฮฑฮน ฯฮนฮบฮฑฯฮนฯฮผฮญฮฝฮฟ)
if val1== 1:
#ฮฑฮฝ ฮตฮฏฮฝฮฑฮน ฯฮนฮบฮฑฯฮนฯฮผฮญฮฝฮฟ ฯฮฟ MobileNetSSD, ฯฮฑ ฯ
ฯฯฮปฮฟฮนฯฮฑ ฮพฮตฯฮนฮบฮฌฯฮฟฮฝฯฮฑฮน
is_checked2.set(0)
is_checked3.set(0)
button1.place(x=390,y=625)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮน ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฮตฯฮฑฮฝฮญฯฯฮตฯฮฑฮน ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฯฮฟฯ
ฮธฮญฯฮท
button2.place(x=590,y=900)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฮตฮบฯฯฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
return val1
def check_YOLO():
global val2
val2=is_checked2.get()
if val2 == 1:
#ฮฑฮฝ ฮตฮฏฮฝฮฑฮน ฯฮนฮบฮฑฯฮนฯฮผฮญฮฝฮฟ ฯฮฟ Yolo, ฯฮฑ ฯ
ฯฯฮปฮฟฮนฯฮฑ ฮพฮตฯฮนฮบฮฌฯฮฟฮฝฯฮฑฮน
is_checked1.set(0)
is_checked3.set(0)
button1.place(x=390,y=625)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮน ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฮตฯฮฑฮฝฮญฯฯฮตฯฮฑฮน ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฯฮฟฯ
ฮธฮญฯฮท
button2.place(x=590,y=900)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฮตฮบฯฯฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
return val2
def check_compare():
global val3
val3=is_checked3.get()
if val3==1:
#ฮฑฮฝ ฮตฮฏฮฝฮฑฮน ฯฮนฮบฮฑฯฮนฯฮผฮญฮฝฮท ฮท ฮฃฯฮณฮบฯฮนฯฮท, ฯฮฑ ฯ
ฯฯฮปฮฟฮนฯฮฑ ฮพฮตฯฮนฮบฮฌฯฮฟฮฝฯฮฑฮน
is_checked1.set(0)
is_checked2.set(0)
button1.place(x=390,y=625)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮน ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฮตฯฮฑฮฝฮญฯฯฮตฯฮฑฮน ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฯฮฟฯ
ฮธฮญฯฮท
button2.place(x=590,y=900)#ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฮตฮบฯฯฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
return val3
def model_choice():
try:
if not filetype.is_image(input_image): #ฮฑฮฝ ฯฮฟ ฮฑฯฯฮตฮฏฮฟ ฯฮฟฯ
ฮธฮฑ ฮตฯฮนฮปฮญฮพฮตฮน ฮฟ ฯฯฮฎฯฯฮทฯ ฮดฮตฮฝ ฮตฮฏฮฝฮฑฮน ฮตฮนฮบฯฮฝฮฑ,
#ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮผฮฎฮฝฯ
ฮผฮฑ ฯฯฮฌฮปฮผฮฑฯฮฟฯ
messagebox.showerror("Error","ฮฮตฮฝ ฯ
ฯฮฟฯฯฮทฯฮฏฮถฮตฯฮฑฮน ฮฑฯ
ฯฯ ฯฮฟ format ฮฑฯฯฮตฮฏฮฟฯ
.")
check_MobileNetSSD()
check_YOLO()
check_compare()
if val1==1:
object_det_MobileNetSSD()
img1 = ImageTk.PhotoImage(imageSSD1)
#ฮฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮท ฮตฮนฮบฯฮฝฮฑ ฯฯฮฟ panel,ฮตฯฮฑฮฝฮฑฯฮญฯฮฟฮฝฯฮฑฮน ฯฮฑ ฯ
ฯฯฮปฮฟฮนฯฮฑ label ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฮผฮฟฯฯฮฎ
#ฮบฮฑฮน ฯฮฟฯฮฟฮธฮตฯฮฟฯฮฝฯฮฑฮน ฯฮต ฯฮทฮผฮตฮฏฮฟ ฮตฮบฯฯฯ ฯฮทฯ ฮตฯฮนฯฮฌฮฝฮตฮนฮฑฯ ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯ
ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน
panel.config(image=img1)
panel.image=img1
panel1.config(image="")
panel1.image=""
panel2.config(image="")
panel2.image=""
panel2.place(x=994,y=150)
title1.config(text="")
title1.place(x=15,y=100)
title2.config(text="")
title2.place(x=990,y=100)
button1.place(x=290,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮนฮฟ ฮฑฯฮนฯฯฮตฯฮฌ
button2.place(x=590,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮผฮญฯฮฑ ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
elif val2==1:
object_det_Yolo()
img2 = ImageTk.PhotoImage(imageYolo1)
#ฮฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮท ฮตฮนฮบฯฮฝฮฑ ฯฯฮฟ panel,ฮตฯฮฑฮฝฮฑฯฮญฯฮฟฮฝฯฮฑฮน ฯฮฑ ฯ
ฯฯฮปฮฟฮนฯฮฑ label ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฮผฮฟฯฯฮฎ
#ฮบฮฑฮน ฯฮฟฯฮฟฮธฮตฯฮฟฯฮฝฯฮฑฮน ฯฮต ฯฮทฮผฮตฮฏฮฟ ฮตฮบฯฯฯ ฯฮทฯ ฮตฯฮนฯฮฌฮฝฮตฮนฮฑฯ ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮฟฯ
ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน
panel.config(image=img2)
panel.image=img2
panel1.config(image="")
panel1.image=""
panel2.config(image="")
panel2.image=""
panel2.place(x=994,y=150)
title1.config(text="")
title1.place(x=15,y=100)
title2.config(text="")
title2.place(x=990,y=100)
button1.place(x=290,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮนฮฟ ฮฑฯฮนฯฯฮตฯฮฌ
button2.place(x=590,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮผฮญฯฮฑ ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
elif val3==1:
model_compare()
panel2.place(x=500,y=150)
#ฮฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฯฮฟ ฮฟฮฝฯฮผฮฑ ฯฮฟฯ
ฮบฮฌฮธฮต model ฯฮฌฮฝฯ ฮฑฯฯ ฯฮทฮฝ ฮฑฮฝฯฮฏฯฯฮฟฮนฯฮท ฮตฮนฮบฯฮฝฮฑ
title1.config(text="YOLO")
title1.place(x=220,y=100)
title2.config(text="MobileNetSSD")
title2.place(x=650,y=100)
button1.place(x=290,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮตฯฮนฮปฮฟฮณฮฎฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฮนฮฟ ฮฑฯฮนฯฯฮตฯฮฌ
button2.place(x=490,y=625)#ฯฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ ฯฮทฯ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮทฯ ฮผฮญฯฮฑ ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
elif filetype.is_image(input_image):#ฮฑฮฝ ฮฟ ฯฯฮฎฯฯฮทฯ ฮตฯฮนฮปฮญฮพฮตฮน ฮตฮนฮบฯฮฝฮฑ ฯฯฯฮฏฯ ฮฝฮฑ ฮญฯฮตฮน ฮตฯฮนฮปฮญฮพฮตฮน model,
#ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮผฮฎฮฝฯ
ฮผฮฑ ฯฯฮฌฮปฮผฮฑฯฮฟฯ
messagebox.showerror("Error", "ฮฮตฮฝ ฮญฯฮตฮนฯ ฮตฯฮนฮปฮญฮพฮตฮน model.")
except:pass
def showinstructions():#ฮฟฮน ฮฟฮดฮทฮณฮฏฮตฯ ฯฯฮฎฯฮทฯ ฯฮฟฯ
ฯฯฮฟฮณฯฮฌฮผฮผฮฑฯฮฟฯ
messagebox.showinfo(title="ฮคฯฯฯฮฟฮน ฯฯฮฎฯฮทฯ",message='''
======HOW TO USE THIS PROGRAM=======
1)ฮฃฯฮฟ menu 'Detection Models' ฮตฯฮนฮปฮญฮณฮตฯฮต ฯฮฟ model ฯฮฟ ฮฟฯฮฟฮฏฮฟ ฮธฮฑ ฯฯฮฑฮณฮผฮฑฯฮฟฯฮฟฮฏฮทฯฮตฮน ฯฮทฮฝ ฮฑฮฝฮฏฯฮฝฮตฯ
ฯฮท ฯฮทฯ ฮตฮนฮบฯฮฝฮฑฯ
ฯฮฟฯ
ฮธฮฑ ฮตฯฮนฮปฮญฮพฮตฯฮต.
ฮฯฮตฮนฯฮฑ ฯฮฑฯฮฎฯฯฮต ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ: 'ฮฯฮนฮปฮฟฮณฮฎ ฮฮนฮบฯฮฝฮฑฯ' ฮบฮฑฮน ฮตฯฮนฮปฮญฮพฯฮต
ฯฮทฮฝ ฮตฮนฮบฯฮฝฮฑ ฯฮฟฯ
ฮธฮญฮปฮตฯฮต ฮณฮนฮฑ ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮท.
ฮฯฮฟฯ ฮตฮผฯฮฑฮฝฮนฯฯฮตฮฏ ฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮท ฮตฮนฮบฯฮฝฮฑ ฮผฯฮฟฯฮตฮฏฯฮต ฮฝฮฑ ฯฮทฮฝ ฮฑฯฮฟฮธฮทฮบฮตฯฯฮตฯฮต ฯฮฑฯฯฮฝฯฮฑฯ ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ 'ฮฯฮฟฮธฮฎฮบฮตฯ
ฯฮท ฮฮนฮบฯฮฝฮฑฯ'.
2)ฮฃฯฮฟ menu 'ฮฮตฮนฯฮฟฯ
ฯฮณฮฏฮตฯ' ฯ
ฯฮฌฯฯฮฟฯ
ฮฝ 3 ฮตฮฝฯฮฟฮปฮญฯ.
ฮฮต ฯฮทฮฝ ฮตฮฝฯฮฟฮปฮฎ 'ฮฃฯฮณฮบฯฮนฯฮท' ฯฮฑฯฮฟฯ
ฯฮนฮฌฮถฮฟฮฝฯฮฑฮน ฯฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
ฯฮฑ ฮฑฯฮฟฯฮตฮปฮญฯฮผฮฑฯฮฑ ฮบฮฑฮน ฯฯฮฝ 2 models ฮณฮนฮฑ ฯฮทฮฝ ฮฏฮดฮนฮฑ ฮตฮนฮบฯฮฝฮฑ.
ฮฮต ฯฮทฮฝ ฮตฮฝฯฮฟฮปฮฎ 'Reset' ฮตฯฮฑฮฝฮญฯฯฮตฯฮฑฮน ฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ
ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฯฮฟฯ
ฮบฮฑฯฮฌฯฯฮฑฯฮท.
ฮฮต ฯฮทฮฝ ฮตฮฝฯฮฟฮปฮฎ 'Exit' ฯฯฮฑฮณฮผฮฑฯฮฟฯฮฟฮตฮฏฯฮฑฮน ฮญฮพฮฟฮดฮฟฯ ฮฑฯฯ ฯฮฟ
ฯฯฯฮณฯฮฑฮผฮผฮฑ.
''')
def showhelp():#ฮบฮฌฯฮฟฮนฮตฯ ฯฮปฮทฯฮฟฯฮฟฯฮฏฮตฯ ฮณฮนฮฑ ฯฮฑ 2 models ฯฮฟฯ
ฯฯฮฟฮณฯฮฌฮผฮผฮฑฯฮฟฯ
messagebox.showinfo(title="ฮ ฮปฮทฯฮฟฯฮฟฯฮฏฮตฯ",message='''
==========MODELS EXPLANATION==========
1) YOLO:
ฮคฮฟ ฮผฮฟฮฝฯฮญฮปฮฟ YOLO ฮฑฮฝฮฑฮณฮฝฯฯฮฏฮถฮตฮน ฯฮตฯฮนฯฯฯฯฮตฯฮฑ ฮฑฮฝฯฮนฮบฮตฮฏฮผฮตฮฝฮฑ
ฮฏฮดฮนฮฑฯ ฮบฮปฮฌฯฮทฯ ฯฯฮทฮฝ ฮฏฮดฮนฮฑ ฮตฮนฮบฯฮฝฮฑ ฯฮต ฯฯฮญฯฮท ฮผฮต ฯฮฟ
MobileNetSSD.
2) MobileNetSSD:
ฮ ฯฮฟฮฟฯฮฏฮถฮตฯฮฑฮน ฮณฮนฮฑ ฮบฮนฮฝฮทฯฮญฯ ฯฯ
ฯฮบฮตฯ
ฮญฯ ฮบฮน ฮญฯฯฮน ฯ
ฯฯฮตฯฮตฮฏ
ฯฯ
ฮฝฮฎฮธฯฯ ฯฯฮทฮฝ ฮฑฮบฯฮฏฮฒฮตฮนฮฑ ฯฮต ฯฯฮญฯฮท ฮผฮต ฯฮฑ YOLO
models.
''')
def reset():#ฮตฯฮฑฮฝฮฑฯฮญฯฮตฮน ฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ ฯฯฮทฮฝ ฮฑฯฯฮนฮบฮฎ ฯฮฟฯ
ฮบฮฑฯฮฌฯฯฮฑฯฮท
panel.config(image="")
panel.image=""
panel1.config(image="")
panel1.image=""
panel2.config(image="")
panel2.image=""
title1.config(text="")
title2.config(text="")
button2.place(x=590,y=900)
button1.place(x=390,y=625)
def callback():#ฮฑฮฝฮฟฮฏฮณฮตฮน ฯฮฑฯฮฌฮธฯ
ฯฮฟ ฮณฮนฮฑ ฮตฯฮนฮปฮฟฮณฮฎ ฮตฮนฮบฯฮฝฮฑฯ ฯฯฮฑฮฝ ฮฟ ฯฯฮฎฯฯฮทฯ ฯฮฑฯฮฎฯฮตฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ "ฮฯฮนฮปฮฟฮณฮฎ ฮฮนฮบฯฮฝฮฑฯ"
global input_image
input_image= filedialog.askopenfilename(title="ฮฯฮนฮปฮญฮพฯฮต ฮตฮนฮบฯฮฝฮฑ ฯฮฑฯฮฑฮบฮฑฮปฯ")
model_choice()
def saveimage():#ฮฑฮฝฮฟฮฏฮณฮตฮน ฯฮฑฯฮฌฮธฯ
ฯฮฟ ฮณฮนฮฑ ฮฑฯฮฟฮธฯฮฎฮบฮตฯ
ฯฮท ฯฮทฯ ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮทฯ ฮตฮนฮบฯฮฝฮฑฯ ฯฯฮฑฮฝ ฮฟ ฯฯฮฎฯฯฮทฯ ฯฮฑฯฮฎฯฮตฮน ฯฮฟ ฮบฮฟฯ
ฮผฯฮฏ "ฮฯฮฟฮธฮฎฮบฮตฯ
ฯฮท ฮตฮนฮบฯฮฝฮฑฯ"
filename = filedialog.asksaveasfile(mode='w', defaultextension=".jpg")#ฮฑฯฮฟฮธฮทฮบฮตฯฮตฯฮฑฮน ฮท ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮท ฮตฮนฮบฯฮฝฮฑ ฯฮต ฮฑฯฯฮตฮฏฮฟ.jpg
if not filename:
return
if val1==1: imageSSD.save(filename)
elif val2==1: imageYolo.save(filename)
elif val3==1:
image1=imageYolo2
image1_size=image1.size
new_image = Image.new('RGB',(2*image1_size[0], image1_size[1]), (250,250,250))
new_image.paste(image1,(0,0))
new_image.paste(imageSSD2,(image1_size[0],0))
new_image.save(filename)
root=tk.Tk()
root.title("Object Detection-ฮฮผฮฌฮดฮฑ 12")
root.resizable(False, False)
window_height = 670 #ฯฯฮฟฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
window_width = 1000 #ฯฮปฮฌฯฮฟฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
#ฮฮฑฮผฮฒฮฌฮฝฮฟฮฝฯฮฑฮน ฯฮฑ ฯฯฮนฮฑ ฯฮทฯ ฮฟฮธฯฮฝฮทฯ ฯฮฟฯ
ฯฯฮฎฯฯฮท
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x_cordinate = int((screen_width/2) - (window_width/2))
y_cordinate = int((screen_height/2) - (window_height/2))
#ฮคฮฟฯฮฟฮธฮตฯฮตฮฏฯฮฑฮน ฯฮฟ ฯฮฑฯฮฌฮธฯ
ฯฮฟ ฯฯฮฟ ฮบฮญฮฝฯฯฮฟ ฯฮทฯ ฮฟฮธฯฮฝฮทฯ ฯฮฟฯ
ฯฯฮฎฯฯฮท
root.geometry("{}x{}+{}+{}".format(window_width, window_height, x_cordinate-45, y_cordinate-45))
root.configure(background='grey')
is_checked1 = tk.IntVar()
is_checked2 = tk.IntVar()
is_checked3 = tk.IntVar()
menubar=tk.Menu(root)#ฮดฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฮผฮตฮฝฮฟฯ
selection=tk.Menu(menubar,tearoff=0)
commands=tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label="Detection Models",menu=selection)
selection.add_checkbutton(label="MobileNetSSD",onvalue = 1, offvalue = 0, variable = is_checked1,
command = check_MobileNetSSD)
selection.add_checkbutton(label="YOLO",onvalue = 1, offvalue = 0, variable = is_checked2,
command = check_YOLO)
selection.add_separator()#ฯฯฮฟฯฯฮฏฮธฮตฯฮฑฮน ฮผฮนฮฑ ฮณฯฮฑฮผฮผฮฎ ฮฑฮฝฮฌฮผฮตฯฮฑ ฯฯฮทฮฝ ฯฯฮฟฮทฮณฮฟฯฮผฮตฮฝฮท ฮบฮฑฮน ฯฮทฮฝ ฮตฯฯฮผฮตฮฝฮท ฮตฯฮนฮปฮฟฮณฮฎ
selection.add_command(label="ฮ ฮปฮทฯฮฟฯฮฟฯฮฏฮตฯ",command=showhelp)
selection.add_separator()
menubar.add_cascade(label="ฮฮตฮนฯฮฟฯ
ฯฮณฮฏฮตฯ",menu=commands)
commands.add_checkbutton(label="ฮฃฯฮณฮบฯฮนฯฮท",onvalue = 1, offvalue = 0, variable = is_checked3, command = check_compare)
commands.add_command(label="ฮฯฮฑฮฝฮฑฯฮฟฯฮฌ",command=reset)
commands.add_separator()
commands.add_command(label="Exit",command=root.destroy)
commands.add_separator()
menubar.add_cascade(label="ฮฮดฮทฮณฮฏฮตฯ",command=showinstructions)
root.config(menu=menubar)
#ฮฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฯฯฮฝ labels ฯฯฮฑ ฮฟฯฮฟฮฏฮฑ ฮธฮฑ ฮตฮผฯฮฑฮฝฮฏฮถฮฟฮฝฯฮฑฮน ฮฟฮน ฮฑฮฝฮฑฮณฮฝฯฯฮนฯฮผฮญฮฝฮตฯ ฮตฮนฮบฯฮฝฮตฯ
w1=tk.Label(root,text="ฮฮฝฮฑฮณฮฝฯฯฮนฯฮท ฮฮฝฯฮนฮบฮตฮนฮผฮญฮฝฯฮฝ",font = "Impact 36", bg ='lightgray', width = 900, borderwidth=4, relief="solid")
w1.pack(fill="x")
panel = tk.Label(root,bg="grey") #ฯฮฟ label ฯฮฟฯ
ฮธฮฑ ฮตฮผฯฮฑฮฝฮฏฮถฮฟฮฝฯฮฑฮน ฮฟฮน ฮตฮนฮบฯฮฝฮตฯ ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฯฯ
ฮณฮบฮตฮบฯฮนฮผฮญฮฝฮฟ model
panel.pack()
panel1=tk.Label(root,bg="grey")#ฯฮฟ label ฯฮฟฯ
ฮธฮฑ ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮฑฯฮนฯฯฮตฯฮฎ ฮตฮนฮบฯฮฝฮฑ ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฮท ฯฯฮณฮบฯฮนฯฮท
panel1.pack()
panel1.place(x=10,y=150)
panel2=tk.Label(root,bg="grey")#ฯฮฟ label ฯฮฟฯ
ฮธฮฑ ฮตฮผฯฮฑฮฝฮฏฮถฮตฯฮฑฮน ฮท ฮดฮตฮพฮนฮฌ ฮตฮนฮบฯฮฝฮฑ ฯฯฮฑฮฝ ฮตฯฮนฮปฮญฮณฮตฯฮฑฮน ฮท ฯฯฮณฮบฯฮนฯฮท
panel2.pack()
panel2.place(x=500,y=150)
#ฮฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฯฯฮฝ ฯฮฏฯฮปฯฮฝ ฯฯฮฝ models ฮณฮนฮฑ ฯฮทฮฝ ฮฑฮฝฯฮฏฯฯฮฟฮนฯฮท ฮตฮนฮบฯฮฝฮฑ ฯฯฮทฮฝ ฮฃฯฮณฮบฯฮนฯฮท
title1=tk.Label(root,bg="grey",font = "Impact 25")
title1.pack()
title1.place(x=220,y=100)
title2=tk.Label(root,bg="grey",font="Impact 25")
title2.pack()
title2.place(x=650,y=100)
#ฮฮทฮผฮนฮฟฯ
ฯฮณฮฏฮฑ ฯฯฮฝ ฮบฮฟฯ
ฮผฯฮนฯฮฝ
button1=tk.Button(root,text="ฮฯฮนฮปฮฟฮณฮฎ ฮฮนฮบฯฮฝฮฑฯ",font = "Impact 15", fg = "lightgray",
highlightbackground="lightgray", bg ="black",command=callback)
button1.pack()
button1.place(x=390,y=625)
button2=tk.Button(root,text="ฮฯฮฟฮธฮฎฮบฮตฯ
ฯฮท ฮฮนฮบฯฮฝฮฑฯ",font = "Impact 15", fg = "lightgray",
highlightbackground="lightgray", bg ="black",command=saveimage)
button2.pack()
button2.place(x=590,y=900)#ฮคฮฟฯฮฟฮธฮญฯฮทฯฮท ฮตฮบฯฯฯ ฯฮฑฯฮฑฮธฯฯฮฟฯ
root.mainloop()
##blob= Binary Large OBject.ฮคฮฟ blob ฮตฮฏฮฝฮฑฮน ฮญฮฝฮฑฯ ฯฯฯฮฟฯ ฮดฮตฮดฮฟฮผฮญฮฝฯฮฝ ฯฮฟฯ
ฮผฯฮฟฯฮตฮฏ ฮฝฮฑ ฮฑฯฮฟฮธฮทฮบฮตฯฯฮตฮน ฮดฮตฮดฮฟฮผฮญฮฝฮฑ ฯฮต ฮดฯ
ฮฑฮดฮนฮบฮฎ ฮผฮฟฯฯฮฎ.
#ฮฮฏฮฝฮฑฮน ฮดฮนฮฑฯฮฟฯฮตฯฮนฮบฯ ฯฮผฯฯ ฮฑฯฯ ฯฮฑ ฯฮตฯฮนฯฯฯฯฮตฯฮฟฯ
ฯ ฯฯฯฮฟฯ
ฯ ฮดฮตฮดฮฟฮผฮญฮฝฯฮฝ (ฯฯฯฯ integers, floating point numbers, characters, and strings),ฯฮฟฯ
ฮฑฯฮฟฮธฮทฮบฮตฯฮฟฯ
ฮฝ ฮณฯฮฌฮผฮผฮฑฯฮฑ ฮบฮฑฮน ฮฑฯฮนฮธฮผฮฟฯฯ.
#ฮฯ
ฯฮนฮฑฯฯฮนฮบฮฌ ฯฮฟ BLOB ฯฯฮทฯฮนฮผฮฟฯฮฟฮตฮฏฯฮฑฮน ฮณฮนฮฑ ฮฑฯฮฟฮธฮฎฮบฮตฯ
ฯฮท ฮฑฯฯฮตฮฏฯฮฝ ฯฯฯฯ ฮตฮนฮบฯฮฝฮตฯ,ฮฒฮฏฮฝฯฮตฮฟ ฮบฮฑฮน ฮฑฯฯฮตฮฏฮฑ ฮฎฯฮฟฯ
, ฮบฮฑฮธฯฯ ฯฮฑฯฮญฯฮตฮน ฮผฮตฮณฮฌฮปฮตฯ ฯฮฑฯฯฯฮทฯฮตฯ ฮผฮตฯฮฑฯฮฟฯฮญฯ ฮฑฯฯฮตฮฏฯฮฝ ฯฮฟฮปฯ
ฮผฮญฯฯฮฝ. | Manolisp52/Object_detection | 2model_objectdetection-v4.py | 2model_objectdetection-v4.py | py | 23,444 | python | el | code | 0 | github-code | 90 |
12243072148 | import datetime
from django.utils import timezone
from utils import *
RCD_PER_PAGE = 8
def user_own_events(request, userid):
context = {}
context['user_info'] = get_object_or_404(UserInfo, user__username=userid)
event_list = Event.objects.filter(topic__user__exact=context['user_info'].user).order_by("-time_period__start")
paginator = Paginator(event_list, RCD_PER_PAGE)
context['pages'] = [None] * paginator.num_pages
page = request.GET.get('page')
if page != None:
context['pages'][int(page)-1] = 1
context['own_events'] = getPageContent(paginator,page)
return render(request, 'event/user_own_events.json', context, content_type="application/json")
@login_required
def user_event_applications(request):
context = {}
application_list = EventApplication.objects.filter(event__topic__user__exact=request.user).order_by("-created_on")
paginator = Paginator(application_list, RCD_PER_PAGE)
context['pages'] = [None] * paginator.num_pages
page = request.GET.get('page')
if page != None:
context['pages'][int(page)-1] = 1
context['event_applications'] = getPageContent(paginator,page)
return render(request, 'event/user_event_applications.json', context, content_type="application/json")
def user_participating_events(request, userid):
context = {}
context['user_info'] = get_object_or_404(UserInfo, user__username=userid)
event_list = context['user_info'].participating_events.order_by("-time_period__start")
paginator = Paginator(event_list, RCD_PER_PAGE)
context['pages'] = [None] * paginator.num_pages
page = request.GET.get('page')
if page != None:
context['pages'][int(page)-1] = 1
context['participanting_events'] = getPageContent(paginator,page)
return render(request, 'event/user_participating_events.json', context, content_type="application/json")
@login_required
@transaction.atomic
def create_event(request, forum_name):
context = {}
forum = get_object_or_404(Forum, name=forum_name)
context['forum'] = forum
if request.method == 'GET':
context['form'] = EventForm(initial={'start':timezone.now(),'end':timezone.now() + datetime.timedelta(minutes=60)})
return render(request, 'event/create_event.html', context)
form = EventForm(request.POST, user=request.user, forum=forum)
if not form.is_valid():
context ['form'] = form
return render(request, 'event/create_event.html', context)
new_event = form.save()
new_event.topic.user = request.user
new_event.topic.forum = forum
new_event.save()
return redirect(reverse('in_forum', kwargs={"forum_name":forum_name})+ "?filter_tag=Event")
@transaction.atomic
def load_events(request, forum_name):
forum = get_object_or_404(Forum, name=forum_name)
events = Event.objects.filter(topic__forum=forum).filter(time_period__end__gte=datetime.date.today)
return render(request, 'event/events.json', {"events":events}, content_type="application/json")
@login_required
@transaction.atomic
def edit_event(request, topic_id):
event = get_object_or_404(Event, topic_id__exact=topic_id)
if event.topic.user.username != request.user.username:
raise PermissionDenied()
context = {}
context['topic_id'] = topic_id
context['participants'] = event.participants.all()
if request.method == 'GET':
context ['form'] = EventForm(event=event)
context['participant_form'] = ParticipantForm(event=event)
return render(request, 'event/edit_event.html', context)
form = EventForm(request.POST,event=event)
if not form.is_valid():
context ['form'] = form
return render(request, 'event/edit_event.html', context)
form.save()
return redirect(reverse('in_topic', kwargs={'topic_id':topic_id}))
@login_required
def apply_for_event(request, topic_id):
if request.method == 'POST':
event = get_object_or_404(Event, topic__id__exact=topic_id)
event_available = (Event.objects.filter(topic__exact=event.topic).filter(time_period__end__gte=datetime.date.today).exists()) and (len(event.participants.all()) < event.max_participants) and not EventApplication.objects.filter(applicant__exact=request.user).filter(event__exact=event).exists()
if event.public or event.participants.filter(user__exact=request.user).exists() or not event_available:
raise PermissionDenied();
user = User.objects.get(username=request.user)
friend = event.topic.user
context = {}
apply_form = EventApplicationForm(request.POST)
if not apply_form.is_valid():
return HttpResponse()
context['message'] = apply_form.cleaned_data['message']
new_application = EventApplication(event=event, applicant=request.user, message=context['message'])
new_application.save()
context['application'] = new_application
notification = Notification(user=friend, notification_type="event_app", key=event.topic.id)
notification.save()
return redirect(reverse('in_topic',kwargs={'topic_id':topic_id}))
return HttpResponse()
@transaction.atomic
def add_event_participant(event, new_participant, notify=False):
event.participants.add(new_participant.userinfo)
event.save()
if notify:
context = {}
context['operation'] = 'added into'
context['event'] = event
context['time'] = time.strftime("%I:%M %d/%m/%Y")
notification = Notification(user=new_participant, notification_type="event_add", key=event.topic.id)
notification.save()
return True
@transaction.atomic
def delete_event_participant(event, participant, notify=False):
if event.participants.filter(user__exact=participant).exists():
event.participants.remove(participant.userinfo)
event.save()
if notify:
context = {}
context['operation'] = 'deleted from'
context['event'] = event
context['time'] = time.strftime("%I:%M %d/%m/%Y")
notification = Notification(user=participant, notification_type="event_del", key=event.topic.id)
notification.save()
return True
return False
@login_required
@transaction.atomic
def delete_participant(request, topic_id, userid):
participant = get_object_or_404(User, username=userid)
event = get_object_or_404(Event, topic__id__exact=topic_id)
if event.topic.user != request.user:
raise PermissionDenied()
delete_event_participant(event, participant, True)
return redirect(reverse('edit_event', kwargs={"topic_id":topic_id}))
@login_required
@transaction.atomic
def add_participant(request, topic_id):
event = get_object_or_404(Event, topic__id__exact=topic_id)
if event.topic.user != request.user:
raise PermissionDenied()
context = {}
if request.method == 'GET':
form = ParticipantForm(event=event)
return render(request, 'event/add_participant.json', context, content_type="application/json")
form = ParticipantForm(request.POST, event=event)
if not form.is_valid():
context ['participant_form'] = form
context ['successful'] = False
return render(request, 'event/add_participant.json', context, content_type="application/json")
new_participant = get_object_or_404(User, username__exact=form.cleaned_data['username'])
context ['successful'] = add_event_participant(event, new_participant, True)
return render(request, 'event/add_participant.json', context, content_type="application/json")
@login_required
@transaction.atomic
def rsvp_event(request, topic_id):
event = get_object_or_404(Event, topic__id__exact=topic_id)
if not event.public:
raise PermissionDenied()
if not event.participants.filter(user__exact=request.user).exists():
if len(event.participants.all()) < event.max_participants:
add_event_participant(event, request.user, False)
return JsonResponse({'rsvp_successful':True})
else:
return JsonResponse({'rsvp_successful':False})
@login_required
@transaction.atomic
def undo_rsvp_event(request, topic_id):
event = get_object_or_404(Event, topic__id__exact=topic_id)
st = delete_event_participant(event, request.user, False)
return JsonResponse({'undo_rsvp_successful':st})
@login_required
@transaction.atomic
def accept_event_application(request, application_id):
application = get_object_or_404(EventApplication, id__exact=application_id)
if application.event.topic.user != request.user:
raise PermissionDenied()
add_event_participant(application.event, application.applicant, True)
application.delete()
return JsonResponse({'successful':True})
@login_required
@transaction.atomic
def decline_event_application(request, application_id):
application = get_object_or_404(EventApplication, id__exact=application_id)
if application.event.topic.user != request.user:
raise PermissionDenied()
application.delete()
return JsonResponse({'successful':True})
| zhubw91/biyemaijianbing | src/cmubbs/views/views_event.py | views_event.py | py | 8,481 | python | en | code | 0 | github-code | 90 |
18021418109 | N,Ma,Mb=map(int,input().split())
a,b,c=map(list,zip(*[list(map(int,input().split())) for i in range(N)]))
ma,mb=sum(a),sum(b)
MAX=5000
dp=[[[MAX]*(mb+1) for i in range(ma+1)] for j in range(N+1)]
dp[0][0][0]=0
for i in range(N):
for j in range(ma):
for k in range(mb):
dp[i+1][j][k]=min(dp[i][j][k],dp[i+1][j][k])
if j+a[i]<=ma and k+b[i]<=mb:
dp[i+1][j+a[i]][k+b[i]]=min(dp[i+1][j+a[i]][k+b[i]],dp[i][j][k]+c[i])
ans=MAX
x=1
while x*Ma<=ma and x*Mb<=mb:
ans=min(ans,dp[N][x*Ma][x*Mb])
x+=1
print(ans if ans<MAX else -1) | Aasthaengg/IBMdataset | Python_codes/p03806/s093620850.py | s093620850.py | py | 580 | python | en | code | 0 | github-code | 90 |
684895195 |
open_list = ['(', '[', '{']
close_list = [')', ']', '}']
def is_balanced(string):
stack = list()
for i in string:
if i in open_list:
stack.append(i)
elif i in close_list:
pos = close_list.index(i)
if len(stack) > 0 and stack[-1] == open_list[pos]:
stack.pop()
else:
return "Unbalanced"
if len(stack) == 0:
return "Balanced"
else:
return "Unbalanced"
if __name__ == '__main__':
string = str(input().strip())
print(is_balanced(string)) | YogeshUpdhyay/python-datastructures | stacks/balanced_parenthese.py | balanced_parenthese.py | py | 580 | python | en | code | 0 | github-code | 90 |
18228741789 | from collections import Counter
# forใ้ใใๅใใฆไธใใ็ด ็ดใซmodใๅใใจTLEใ ใฃใ
# ็ดฏ็ฉๅ็ใช่จ็ฎใง้ซ้ๅใใใใใใใใใใใซreverse
S = input()[::-1]
# ex. 1817181712114 โ 4112171817181
# print(S)
# 0ๆก็ฎใพใงใฎMODใ0ใจใใใใจใงใ
# 1ๆก็ฎใๅซใๆฐใ2019ใฎๅๆฐใฎๆใซ้ฝๅใ่ฏใใชใ
X = [0]
# 4,14,114,2114,12114,...ใฎmod2019ใ่จ็ฎ
# pow(a,b,c)ใฏๆฎ้ใซMODใใใใ้ใ
# ๆฎ้ใซใใฃใใTLEใ ใฃใ
for i, s in enumerate(S):
X.append((X[-1] + int(s) * pow(10, i, 2019)) % 2019)
# print(X)
C = Counter(X)
# print(C)
ans = 0
# Xใๅใใซใชใฃใใจใใใ2ใค้ธในใฐ้กๆใๆบใใ
# v_C_2ใฎ่จ็ฎ
for v in C.values():
ans += v * (v - 1) // 2
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02702/s852815139.py | s852815139.py | py | 778 | python | ja | code | 0 | github-code | 90 |
18474567439 | import sys
stdin = sys.stdin
ni = lambda: int(ns())
ns = lambda: stdin.readline().rstrip()
na = lambda: list(map(int, stdin.readline().split()))
# code here
X = ni()
print('YES' if X in [3, 5, 7] else 'NO') | Aasthaengg/IBMdataset | Python_codes/p03210/s452757294.py | s452757294.py | py | 210 | python | en | code | 0 | github-code | 90 |
27566250962 | import sys
from os import listdir
from random import sample
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_score, recall_score, accuracy_score, roc_auc_score, fbeta_score, matthews_corrcoef, balanced_accuracy_score, cohen_kappa_score
from joblib import dump
from utils.feature import feature_est1, feature_est2
from mfa_wtmm import mfa_wtmm_2d
from wavelet_2d import select_wavelet
from functions.function_image import FunctionImage
def train_model(imageLocation, saveLocation, wavelet, do_metrics = True):
print(wavelet, do_metrics)
normal_chestRx = [ imageLocation+'/Normal/'+file for file in listdir(imageLocation+'/Normal')]
nodule_chestRx = [ imageLocation+'/Nodule/'+file for file in listdir(imageLocation+'/Nodule')]
sample_normal = sample(normal_chestRx, 47)
sample_nodule = sample(nodule_chestRx, 77)
Y = [0] * len(sample_normal) + [1] * len(sample_nodule)
X = list()
for file in sample_normal+sample_nodule:
print(f'train: {file}')
func = FunctionImage(file)
func.reduce_size_toHalf(2)
holder, dh = mfa_wtmm_2d(0, func.columns-1, 0, func.rows-1, 0, func.eval_range, wavelet, 3, 5, 10, 0, 2, 5)
X.append(feature_est2(holder, dh))
clf = make_pipeline(StandardScaler(), SVC(cache_size=1024))
clf.fit(X,Y)
dump(clf, saveLocation)
if do_metrics:
metrics_sample_normal = [chestRx for chestRx in normal_chestRx if chestRx not in sample_normal]
metrics_sample_nodule = [chestRx for chestRx in nodule_chestRx if chestRx not in sample_nodule]
Y_real = [0] * len(metrics_sample_normal) + [1] * len(metrics_sample_nodule)
X = list()
for file in metrics_sample_normal+metrics_sample_nodule:
print(f'test: {file}')
func = FunctionImage(file)
func.reduce_size_toHalf(2)
holder, dh = mfa_wtmm_2d(0, func.columns-1, 0, func.rows-1, 0, func.eval_range, wavelet, 3, 5, 10, 0, 2, 5)
X.append(feature_est2(holder, dh))
Y_predict = clf.predict(X)
precision = precision_score(Y_real, Y_predict)
print(f'precision: {precision}')
recall = recall_score(Y_real, Y_predict)
print(f'recall: {recall}')
accuracy = accuracy_score(Y_real, Y_predict)
print(f'accuracy: {accuracy}')
roc_auc = roc_auc_score(Y_real, Y_predict)
print(f'roc_auc: {roc_auc}')
matthews = matthews_corrcoef(Y_real, Y_predict)
print(f'matthews_corrcoef: {matthews}')
youden = balanced_accuracy_score(Y_real, Y_predict)
print(f'youden: {youden}')
cohen_kappa = cohen_kappa_score(Y_real, Y_predict)
print(f'cohen kappa: {cohen_kappa}')
fbeta_2 = fbeta_score(Y_real, Y_predict, beta=2)
print(f'fbeta: {fbeta_2}')
fbeta_05 = fbeta_score(Y_real, Y_predict, beta=0.5)
print(f'fbeta: {fbeta_05}')
fbeta_1 = fbeta_score(Y_real, Y_predict, beta=1)
print(f'fbeta: {fbeta_1}')
if __name__=='__main__':
usage = ''' '''
try:
if sys.argv[-1] == '--metrics':
doMetrics = True
else:
doMetrics = False
except:
Exception(usage)
train_model(sys.argv[1], sys.argv[2], select_wavelet(int(sys.argv[3])), do_metrics=doMetrics) | juandamdc/MFA_WTMM | main_train.py | main_train.py | py | 3,493 | python | en | code | 1 | github-code | 90 |
6834544621 | from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponse
from datetime import date, timedelta
import os
import random
import folium
from uszipcode import SearchEngine
import pandas as pd
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import scipy as scipy
import datetime
from wtforms import StringField
from wtforms.validators import DataRequired
import pandas as pd
import matplotlib.pyplot as plt
import math
import json
import edisontracker.constants as constants
search = SearchEngine(simple_zipcode = True)
dat = pd.read_csv('edisontracker/static/edisontracker/csv/data.csv')
state_zip = {}
def home(request):
return render(request, 'edisontracker/index.html', {"categories": constants.categories})
def getOptions(request):
choice = request.GET.get("choice")
merchants = constants.merchants
merchants.remove(choice)
display = ""
for merchant in merchants:
display += "<div class ='form-check form-check-inline' style= 'width: 500px'>"
display += "<input class ='form-check-input' type='checkbox' name='merchants' value=\"" + merchant + "\" id='merchants'>"
display += "<label class ='form-check-label' for ='merchants' >" + merchant + "</label> </div>"
html = HttpResponse(display)
return html
# run with command: FLASK_APP=app.py; flask run
def salesHome(request):
return render(request, 'edisontracker/marketsales.html', {"categories": constants.categories})
def allSaleHome(request):
return render(request, 'edisontracker/salescompany.html', {"categories": constants.categories})
def mapGenerate(request):
search = SearchEngine(simple_zipcode=True)
to_plot = request.GET.get("to_plot")
compare = request.GET.getlist("compare[]")
compare.append(to_plot)
map_obj = plot_market_on_map(dat, compare, to_plot)
style_statement = '<style>.leaflet-control{color:#00FF00}</style>'
map_obj.get_root().html.add_child(folium.Element(style_statement))
map_html = map_obj.get_root().render()
filename = "map"
file = open("edisontracker/static/edisontracker/plot/" + filename + ".html", "w")
file.write(map_html)
file.close()
html = HttpResponse("")
return html
def getMap(request):
html = render(request, 'edisontracker/map.html')
return html
def plot_market_on_map(data, compare, to_plot):
dat_state = data.loc[:, ['user_zip_code', 'merchant_name', 'email_day']]
dat_state = dat_state.loc[dat_state['merchant_name'].isin(compare), :]
# Add the state column
dat_state['state'] = dat_state['user_zip_code'].apply(lambda x: find_state(x))
year_week = []
day_to_week = {}
for date in np.array(dat_state["email_day"]):
if date in day_to_week.keys():
year_week.append(day_to_week[date])
else:
year = date[0:4]
month = date[5:7]
day = date[8:10]
week = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
text = str(year) + "-" + "{:02d}".format(week)
year_week.append(text)
dat_state["week"] = year_week
grouped_state = dat_state.groupby('state')
np.seterr(all="ignore")
state_change = pd.DataFrame(columns=['State', 'Change'])
row_to_add = 0
for state in grouped_state:
sales = pd.DataFrame()
all_weeks = dat_state["week"].unique()
all_weeks.sort()
for week in all_weeks:
sale_count = state[1].loc[state[1]["week"] == week, "merchant_name"].value_counts()
sales = sales.append(sale_count, ignore_index=True)
compare = sales.fillna(0)
compare = compare.assign(x=np.array(range(compare.shape[0])))
res = market_share_change(compare)
if to_plot in list(res.keys()):
state_change.loc[row_to_add] = [state[0], res[to_plot]]
else:
state_change.loc[row_to_add] = [state[0], 0]
row_to_add += 1
state_edges = os.path.join('edisontracker/static/edisontracker/csv/states.geojson')
map_obj = folium.Map(
location=[39.8283, -98.5795],
zoom_start=4,
tiles='OpenStreetMap'
)
minimum = min(state_change["Change"])
maximum = max(state_change["Change"])
breaks = 5
limit = max(abs(math.floor(minimum)), abs(math.ceil(maximum)))
scale = list(np.histogram(np.arange(math.floor((-limit) / breaks) * breaks, 0 + 1), bins=breaks)[1])
scale.extend(list(np.histogram(np.arange(0, math.ceil((limit) / breaks) * breaks + 1), bins=breaks)[1]))
folium.Choropleth(
geo_data=state_edges,
data=state_change,
columns=["State", "Change"],
key_on='feature.properties.name',
fill_color='RdYlBu',
fill_opacity=0.8,
line_opacity=0.6,
threshold_scale=scale,
).add_to(map_obj)
return map_obj
def market_share_change(dat):
import sys
# Creates a list of companies
company_names = []
for column_name in dat.columns:
if column_name is not "x":
company_names.append(column_name)
# Calculates the probabilities
probs = pd.DataFrame()
comp = 0
for company in company_names:
denom = dat.loc[0, dat.columns != 'x'].sum()
if denom != 0:
start = dat[company][0] / denom
else:
start = 1 / len(company_names)
prob_row = []
# Finds the percentage for each value, minus the first value of the company
for row in range(len(dat[company])):
denom = dat.loc[row, dat.columns != 'x'].sum()
if denom != 0:
prob = ((dat[company][row] / denom) - start) * 100
else:
prob = 0
prob_row.append(prob)
# Adds the probabilities to a new column in the dataframe
probs = probs.assign(c=pd.Series(prob_row))
probs = probs.rename(columns={'c': company_names[comp]})
comp += 1
max_x = probs.shape[0] - 1
xrange = np.arange(0, probs.shape[0], 1)
changes = {}
for company in probs:
slope, intercept, r, p, error = scipy.stats.linregress(xrange, probs[company])
changes[company] = slope * max_x
return changes
def find_state(zip):
state = constants.state
get_full = {v: k for k, v in state.items()}
if zip in state_zip:
return state_zip[zip]
else:
state_abrv = search.by_zipcode(str(zip)).state
if state_abrv is not None:
state = get_full[state_abrv]
state_zip[zip] = state
return state
def marketsale(request):
merchantType = request.GET.get("merchantChoice")
merchants = json.loads(merchantType)
print(merchants)
xlab = None
tick = 5
trend = False
rval = False
# On_load
year_week = []
for date in np.array(dat["email_day"]):
year = date[0:4]
month = date[5:7]
day = date[8:10]
week = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
text = str(year) + "-" + "{:02d}".format(week)
year_week.append(text)
dat["week"] = year_week
sales = pd.DataFrame()
all_weeks = dat["week"].unique()
all_weeks.sort()
for week in all_weeks:
sales = sales.append(dat.loc[dat["week"] == week, "merchant_name"].value_counts(), ignore_index=True)
compare = sales[merchants]
compare.plot(title="Sales Across a Time Range")
plt.xticks(np.arange(len(all_weeks), step=5), all_weeks[0::5], rotation=-75)
plt.xlabel("Time Range")
plt.ylabel("Number of Sales Records")
plt.tight_layout()
plt.savefig('edisontracker/static/edisontracker/plot/plotTimeSale.png')
plt.close()
compare = compare.assign(x=np.array(range(compare.shape[0])))
market_share_plot(compare, all_weeks, trend=True, rval=False)
html = HttpResponse('{ "plotTimeSale" : "/static/edisontracker/plot/plotTimeSale.png", "plotMarketShare" : "/static/edisontracker/plot/plotMarketShare.png" }')
return html
def market_share_plot(dat, xlab = None, tick=5, trend=False, rval=False):
if xlab is not None:
if dat.shape[0] is not len(xlab):
print("Error: xlab length does not match the data")
print(str(dat.shape[0]) + " != " + str(len(xlab)))
dat = pd.DataFrame(dat)
num_companies = dat.shape[1] - 1
# Creates a list of companies
company_names = []
for column_name in dat.columns:
if column_name is not "x":
company_names.append(column_name)
# Calculates the probabilities
probs = pd.DataFrame()
comp = 0
for company in company_names:
start = dat[company][0] / (dat.loc[0, dat.columns != 'x'].sum())
prob_row = []
# Finds the percentage for each value, minus the first value of the company
for row in range(len(dat[company])):
prob = ((dat[company][row] / (dat.loc[row, dat.columns != 'x'].sum())) - start) * 100
if pd.isna(prob) or prob == None:
if pd.isna(start):
prob = 0
else:
prob = 0 - start
prob_row.append(prob)
# Adds the probabilities to a new column in the dataframe
probs = probs.assign(c=pd.Series(prob_row))
probs = probs.rename(columns={'c': company_names[comp]})
comp += 1
# Plots the probabilities over time
probs.plot()
plt.xlabel("Time Range")
plt.ylabel("% Change")
plt.title("Change in Market Share Over Time")
plt.axhline(y=0, color="gray", linewidth=1, linestyle="--")
# Add the x label text if given
if xlab is not None:
plt.xticks(np.arange(len(xlab), step=tick), xlab[0::tick], rotation=-75)
# Add the trend lines
plt.gca().set_prop_cycle(None)
if trend:
r_values = {}
xrange = np.arange(0, probs.shape[0], 1)
i = 0
for p in probs:
# Calculate and plot the equation of the trend line for each company
slope, intercept, r, p, error = scipy.stats.linregress(xrange, probs[p])
if rval:
r_values[probs.columns[i]] = r
line = slope * xrange + intercept
plt.plot(xrange, line, linestyle="--", linewidth=1)
i += 1
plt.tight_layout()
plt.savefig('edisontracker/static/edisontracker/plot/plotMarketShare.png')
plt.close()
if trend and rval:
return r_values
def allSales(request):
global dat
image_path = 'edisontracker/static/edisontracker/plot/plotAllSales.png'
plt.figure(1)
dat["merchant_name"].value_counts().plot(kind="bar", color="red")
plt.title("Sales per Company")
plt.show()
plt.savefig(image_path, bbox_inches="tight")
plt.close()
html = HttpResponse('{"plotAllSales" : "/static/edisontracker/plot/plotAllSales.png"}')
return html
def loadBarPlotNumSales(request):
global dat
year_month = []
try:
dat["month"]
except KeyError:
for date in np.array(dat["email_day"]):
year_month.append(date[0:7])
dat["month"] = year_month
# convert to datetime objects
dat["email_day"] = pd.to_datetime(dat["email_day"])
#html = render('initial.html', select=build_options())
html = render(request, 'edisontracker/barplotNumSales.html', {"select": build_options()})
return html
def build_options(feat = None ):
global dat
merchants = sorted(list(dat.merchant_name.unique()))
options = "<select id = 'merchant' class='custom-select my-1 mr-sm-2 mb-3' form=\"form\", name=\"feat\">"
for merchant in merchants:
if merchant == feat:
options += "<option class='custom-select my-1 mr-sm-2 mb-3' value=\"" + merchant + \
"\" selected>" + merchant + "</option>"
else:
options += "<option class='custom-select my-1 mr-sm-2 mb-3' value=\"" + merchant + "\">" + merchant + "</option>"
options += "<select>"
return options
def getMerchants(request):
merchantType = request.GET.get("category")
merchants = []
for item in constants.categories[merchantType]:
merchants.append(item)
display = "<select id = 'choice' class='custom-select my-1 mr-sm-2 mb-3' multiple>"
import html
for merchant in merchants:
name = html.escape(merchant)
display += "<option class ='form-check-input' type='checkbox' name='merchants' value='" + name + "'>"+name+"</option>"
display += "</select>"
html = HttpResponse(display)
return html
def getBarPlot(request):
feat = request.GET.get("feat")
start_date = request.GET.get("start_date")
end_date = request.GET.get("end_date")
title = "Sales per Month for %s from %s to %s" % (feat, start_date, end_date)
# "first string is: %s, second one is: %s" % (str1, "geo.tif")
print(start_date)
print(end_date)
# create the plot
image_path = 'edisontracker/static/edisontracker/plot/plotMarketShare.png'
selected = (dat["merchant_name"] == feat) & (
dat["email_day"] >= start_date) & (dat["email_day"] <= end_date)
df = dat.loc[selected]
plt.figure(1)
df["month"].value_counts().sort_index().plot(kind="bar", color="red")
plt.grid(color='gray', linestyle='-', linewidth=1)
plt.title(title)
plt.savefig(image_path)
plt.close()
html = HttpResponse('{"plot" : "/static/edisontracker/plot/plotMarketShare.png"}')
return html
| RembertoNunez/EdisonTrendsCapstone | edisontracker/views.py | views.py | py | 13,583 | python | en | code | 0 | github-code | 90 |
4052183468 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 12:58:01 2017
@author: ricardo
"""
import pandas
import matplotlib.pyplot as plt
import numpy as np
# Leer el archivo consolidado
file = pandas.read_csv("consolidado.csv", skiprows = 0, sep = ';', header=0)
# Para obtener el promedio de exito, reemplazar un exito con 1, fracaso con 0
file['state'] = file['state'].replace('successful', 1)
file['state'] = file['state'].replace('failed', 0)
# Por categorรญa, obtener la cantidad de proyectos,el porcentaje de exitos
# el promedio de patrocinadores
frecuenciaCategoria = file.groupby('category').agg(
{'backers_count':'mean', 'state':'mean', 'category':'count',
'goal': 'mean'}
)
frecuenciaCategoria.columns = ['Patrocinadores', 'Exito', 'Cantidad', 'Objetivo']
frecuenciaCategoria.reset_index(inplace=True)
frecuenciaCategoria.columns = ['Categoria', 'Patrocinadores', 'Exito', 'Cantidad', 'Objetivo']
# reordenar segรบn la cantidad de patrocinadores
frecuenciaCategoria.sort_values(by = ['Patrocinadores'], inplace = True)
frecuenciaCategoria.reset_index(inplace=True, drop = True)
# graficar la cantidad de patrocinadores vs en รฉxito obtenido
plt.figure()
plt.plot( list(frecuenciaCategoria['Patrocinadores']),
list(frecuenciaCategoria['Exito']),
'ro')
plt.title('Promedio de patrocinadores vs Porcentaje de exito por categorรญa')
plt.xlabel('Promedio de patrocinadores')
plt.ylabel('Porcentaje de exito')
plt.show(block=False)
# graficar la cantidad de patrocinadores vs en รฉxito obtenido
plt.figure()
plt.plot( list(frecuenciaCategoria['Objetivo']),
list(frecuenciaCategoria['Exito']),
'ro')
plt.title('Promedio del objetivo vs Porcentaje de exito por categorรญa')
plt.xlabel('Promedio del objetivo')
plt.ylabel('Porcentaje de exito')
plt.show(block=False)
print('')
print('')
print("Correlaciรณn entre promedio de patrocinadores y porcentaje de exito entre categorias")
print(np.corrcoef(list(frecuenciaCategoria['Patrocinadores']),
list(frecuenciaCategoria['Exito']))[0][1]
)
print('')
print('')
print(frecuenciaCategoria)
print('')
print('')
frecuenciaCategoria.to_csv("frec.csv", sep = ";", na_rep = '', index = False)
frecuenciaCategoria.sort_values(
by = ['Patrocinadores'],
ascending=[1],
inplace = True)
frecuenciaCategoria.reset_index(inplace=True, drop=True)
print("Categorรญas mรกs apoyadas: ")
for i in range(0, 5):
n = len(frecuenciaCategoria)-1-i
print( frecuenciaCategoria.loc[n, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[n, 'Patrocinadores']))
+ ' patrocinadores por proyecto')
print('')
print('')
print("Categorรญas menos apoyadas: ")
for i in range(0, 5):
print( frecuenciaCategoria.loc[i, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[i, 'Patrocinadores']))
+ ' patrocinadores por proyecto')
print('')
print('')
# reordenar segรบn el exito promedio de la categorรญa
frecuenciaCategoria.sort_values(
by = ['Exito'],
ascending=[1],
inplace = True)
frecuenciaCategoria.reset_index(inplace=True, drop=True)
print('')
print('')
print("Categorรญas mas exitosas: ")
for i in range(0, 5):
n = len(frecuenciaCategoria)-1-i
print( frecuenciaCategoria.loc[n, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[n, 'Exito'] * 100))
+ '%')
print('')
print('')
print("Categorรญas menos exitosas: ")
for i in range(0, 5):
print( frecuenciaCategoria.loc[i, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[i, 'Exito'] * 100))
+ '%')
# reordenar segรบn el objetivo promedio de la categorรญa
frecuenciaCategoria.sort_values(
by = ['Objetivo'],
ascending=[1],
inplace = True)
frecuenciaCategoria.reset_index(inplace=True, drop=True)
print('')
print('')
print("Categorรญas mรกs ambiciosas: ")
for i in range(0, 5):
n = len(frecuenciaCategoria)-1-i
print( frecuenciaCategoria.loc[n, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[n, 'Objetivo']))
+ '$ por proyecto')
print('')
print('')
print("Categorรญas menos ambiciosas: ")
for i in range(0, 5):
print( frecuenciaCategoria.loc[i, 'Categoria'] + ': '
+ str(int(frecuenciaCategoria.loc[i, 'Objetivo']))
+ '$ por proyecto')
print('')
print("Por categorรญa, la correlaciรณn entre el nรบmero de patrocinadores y la probabilidad de exito")
# Ahora por categorรญa
categorias = file['category'].unique()
for cat in categorias:
filec = file.loc[file['category'] == cat]
#filec = file
filec.reset_index(inplace=True, drop = True)
# organizar por goal
filec.sort_values(by = ['goal'], inplace = True)
filec.reset_index(inplace=True, drop = True)
objetivo = []
exito = []
cantidades = filec['goal'].unique()
for i in cantidades:
print(i)
#segmentof = file.loc[i*largo:(i+1)*largo]
segmentof = filec.loc[filec['backers_count'] == i]
objetivo.append( int(segmentof['goal'].mean()) )
exito.append( int(segmentof['state'].mean()*100) )
#print(patrocinadores)
#print(exito)
#plt.plot(patrocinadores, exito, 'ro')
#print('Categorรญa ' + cat + ': ' + str(np.corrcoef(patrocinadores, exito)[1][0]))
#print('Categorรญa ' + cat + ': ' + str(np.corrcoef(patrocinadores, exito)))
#np.corrcoef( list(filec['backers_count']), list(filec['state']) )
#plt.plot( filec.groupby('backers_count')['state'].mean() )
#filec = file.loc[file['category'] == cat]
#plt.plot( list(file['backers_count']),
# list(file['state']),
# 'ro')
#plt.show()
| hagarciag/Trabajo_Inteligencia_Colectiva | categorias.py | categorias.py | py | 5,940 | python | es | code | 0 | github-code | 90 |
22881513586 | import PCR_Vic3D_elem
import os
vic3d_data_path = [r".\Test_res\def_init_vic3d.csv", r".\Test_res\def_final_vic3d.csv"]
ansys_cdb_path = r".\Test_res\mesh.cdb" # UNZIP THE MESH FILE IN "TEST_RES" FOLDER BEFORE RUNNING THE SCRIPT
ansys_deformations_path = [r".\Test_res\MaxPrincipalStrain_meshSurface.txt"]
dirs = []
dirs.append({"vic3d": vic3d_data_path,
"ansys_def": ansys_deformations_path,
"cdb": ansys_cdb_path})
for paths in dirs:
Vic3D_nodes_coord_init, Vic3D_nodes_deformations_init = PCR_Vic3D_elem.read_nodes_def_vic3d(paths["vic3d"][0])
Vic3D_nodes_coord, Vic3D_nodes_deformations = PCR_Vic3D_elem.read_nodes_def_vic3d(paths["vic3d"][1])
Vic3D_nodes_deformations[:, 1] -= Vic3D_nodes_deformations_init[:, 1]
Ansys_nodes_coord = PCR_Vic3D_elem.read_nodes_coord_ansys(paths["cdb"])
Ansys_elem_tab = PCR_Vic3D_elem.read_elem_tab_ansys(paths["cdb"])
for ansys_deformations_path in paths["ansys_def"]:
try:
ansys_deformations = PCR_Vic3D_elem.read_nodes_def_ansys(ansys_deformations_path)
save_img_path = os.path.splitext(ansys_deformations_path)[0] + ".png"
save_deformation_path = os.path.splitext(ansys_deformations_path)[0] + "_compareVic3D.xlsx"
# zyx_rot_eul_angles_init = (np.pi, -np.pi/2, 0)
zyx_rot_eul_angles_init = (0, 0, 0)
radius_smooth = 0.0005
PCR_Vic3D_elem.pcr_vic3d(Vic3D_nodes_coord, Ansys_nodes_coord, Vic3D_nodes_deformations, ansys_deformations,
Ansys_elem_tab,
zyx_rot_eul_angles_init=zyx_rot_eul_angles_init,
radius_smooth=radius_smooth,
show_plot=True,
save_img_path=save_img_path,
save_deformation_path=save_deformation_path)
except Exception as e:
print(e)
print("ERROR with file : ", ansys_deformations_path)
| MarcG-LBMC-Lyos/PCR_Vic3D | test.py | test.py | py | 2,090 | python | en | code | 0 | github-code | 90 |
44699242934 | from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.layers import Dense, Dropout, Convolution2D, MaxPool2D, Flatten
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets("../../tensorflow/MNIST", one_hot=True)
x_train, y_train = mnist.train.images, mnist.train .labels
x_test, y_test = mnist.test.images, mnist.test.labels
# (5500,784)->(5500,28,28,1)
x_train = x_train.reshape(-1,28,28,1)
x_test = x_train.reshape(-1,28,28,1)
model = Sequential()
# input_shape only be set in first convolution layer
model.add(Convolution2D(
input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu',
name='conv1'
))
model.add(MaxPool2D(
pool_size=2,
strides=2,
padding='same',
name='pool1'
))
model.add(Convolution2D(64,5,strides=1,padding='same', activation='relu',name='conv2'))
model.add(MaxPool2D(2,2,'same', name='pool2'))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10,activation='softmax'))
# adam = Adam(lr=1e-4)
#
# model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
#
# model.fit(x_train, y_train, batch_size=100, epochs=10)
# loss, accuracy = model.evaluate(x_test, y_test)
#
# print('\ntest loss: ',loss)
# print('accuracy: ', accuracy)
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names='False',rankdir='TB')
plt.figure(figsize=(10, 10))
img = plt.imread('model.png')
plt.imshow(img)
plt.axis('off')
plt.show() | PierreVon/Learning-python | Keras-NN-/10-1 Drawing net constructure.py | 10-1 Drawing net constructure.py | py | 1,659 | python | en | code | 0 | github-code | 90 |
18413534029 | n = int(input())
v = input().split(" ")
c = input().split(" ")
for i in range(n):
v[i] = int(v[i])
c[i] = int(c[i])
sum = 0
for i in range(n):
if(v[i] > c[i]):
sum += v[i] - c[i]
print(sum)
| Aasthaengg/IBMdataset | Python_codes/p03060/s816449061.py | s816449061.py | py | 214 | python | en | code | 0 | github-code | 90 |
26036692595 | def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
def solve(m, n, x, y):
l = lcm(m, n)
add, base = (m, x) if m > n else (n, y)
for k in range(base, lcm(m,n)+1, add):
if (k-1) % m == x-1 and (k-1) % n == y-1:
return k
return -1
T = int(input())
for _ in range(T):
M, N, x, y = map(int, input().split())
print(solve(M, N, x, y))
| yeonghoey/boj | 6064/main.py | main.py | py | 439 | python | en | code | 2 | github-code | 90 |
5830200765 | from rdkit import Chem
import numpy as np
import pandas as pd
def mol2bit(MOLS):
BIT = []
FP = []
for i, mol in enumerate(MOLS):
if mol is not None:
bit = {}
fp = Chem.RDKFingerprint(mol, bitInfo=bit)
BIT.append(bit)
FP.append(fp)
else:
BIT.append(np.nan)
FP.append(np.nan)
print(i)
return BIT
def bit2df(BIT):
df = pd.DataFrame(np.zeros((len(BIT),2048), dtype=int))
for i in range(len(BIT)):
if type(BIT[i])==float:
df.loc[i,:] = np.nan
else:
bit = list(BIT[i].keys())
df.loc[i,bit] = int(1)
return df
if __name__ == '__main__':
path = './data'
df = pd.read_csv(f"{path}/SMILES.csv")
SMILES = df['SMILES'].tolist()
MOLS = [Chem.MolFromSmiles(smi) for smi in SMILES]
# calculate fingerprint
BIT = mol2bit(MOLS)
df_fp = bit2df(BIT)
df_fp = pd.concat([df['SMILES'], df_fp], axis=1)
df_fp.to_csv(f'{path}/smiles2fp.csv') | TeddyGlass/Fingerprint | smiles2fp.py | smiles2fp.py | py | 1,039 | python | en | code | 0 | github-code | 90 |
5752567519 | # -*- coding: utf-8 -*-
import sys, os
# -- General configuration -----------------------------------------------------
extensions = ['sphinx.ext.todo', 'sphinxcontrib.spelling', 'sphinx.ext.graphviz']
spelling_lang='en_AU'
spelling_word_list_filename='OK_wordlist.txt'
spelling_show_suggestions=True
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'SAR-Scenarios'
copyright = u'2013, CanberraUAV'
version = '0.1'
release = '0.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'SARScenarioDoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'SARScenarios.tex', u'Scenarios for employing UAVs in Search And Rescue situations',
u'CanberraUAV', 'manual'),
]
| CanberraUAV/SAR-Scenarios | conf.py | conf.py | py | 932 | python | en | code | 3 | github-code | 90 |
9497103856 | import requests, json, time
from transformers import AutoTokenizer
model_id = "daekeun-ml/Llama-2-ko-instruct-13B"
tokenizer = AutoTokenizer.from_pretrained(model_id)
def chat(prompt:str):
payload = {"prompt": prompt}
settings = {"max_new_tokens": 400, "beams": 3, "beam_length": 3, "in_beam_search": True}
payload["settings"] = settings
headers = {'Content-Type': 'application/json'}
start = time.perf_counter()
response = requests.post("http://localhost:8004/infer_bench", headers=headers, data=json.dumps(payload))
# response = requests.post("http://localhost:8004/infer_bench", data=payload)
# print(response.text)
generated_text = response.text
print(generated_text)
request_time = time.perf_counter() - start
tok_count = len(tokenizer.encode(generated_text))
tokens_per_second = tok_count/request_time
return {
'Framework': 'exllama-gptq-4bit',
'Generated_tokens': len(tokenizer.encode(generated_text)),
'Inference_time': format(request_time, '.2f'),
'Tokens/sec': format(tokens_per_second, '.2f'),
'Question': prompt,
'Answer': generated_text,
}
if __name__ == '__main__':
prompt = "Who are you?"
print(f"User: {prompt}\nLlama2: {chat(prompt)})") | bibekyess/ko-llama-serving | exllama_sandbox/exllama/client_exllama.py | client_exllama.py | py | 1,287 | python | en | code | 0 | github-code | 90 |
18183477159 | from itertools import combinations as cmb
import copy
H,W,K = map(int,input().split())
C = []
ans = 0
for i in range(H):
C.append(list(input()))
for i in range(H):#้ธใถๅๆฐ
for j in range(W):#้ธใถ่กๆฐ
for hs in cmb(range(H),i):#้ธใถๅๆฐใฎ็ตใฟๅใใ
for ws in cmb(range(W),j):#้ธใถ่กๆฐใฎ็ตใฟๅใใ
tmpC = copy.deepcopy(C)#ๆไฝ็จ้
ๅ
for h in hs:
tmpC[h] = ["" for k in range(W)]#ใใฎๅใๆถใ
for w in ws:
for k in range(H):
tmpC[k][w]="" #ใใฎ่กใๆถใ
#print(tmpC,hs,ws,sum(tmpC,[]).count("#")==K)
if sum(tmpC,[]).count("#")==K:
ans +=1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02614/s795861064.py | s795861064.py | py | 697 | python | en | code | 0 | github-code | 90 |
18036188899 | n=int(input())
a=list(map(int,input().split()))
flg = True
if n%2==0:
if 0 in a or len(set(a))!=n//2:
flg=False
else:
if len([0 for i in a if i==0])!=1 or len(set(a))!=n//2+1:
flg=False
if flg:
print(2**(n//2)%(10**9+7))
else:
print(0)
| Aasthaengg/IBMdataset | Python_codes/p03846/s434143232.py | s434143232.py | py | 253 | python | fa | code | 0 | github-code | 90 |
39634276829 | def summa(key, value):
assert len(key) == len(value)
res = [x + y for x, y in zip(key, value)]
return res
if __name__ == '__main__':
a = [1, 2, 3]
b = [7, 8, 9]
print(summa(a, b))
| SamimiesGames/itslearning | osa-4/4/summalista.py | summalista.py | py | 209 | python | en | code | 3 | github-code | 90 |
16268186248 | """Tests for the string_reverser function in the copies module of the text package."""
import pytest
from speedsurprises.text import string_reverser
@pytest.mark.benchmark
def test_string_reverser_benchmark(benchmark):
"""Benchmark the mcopies_of function."""
reversed_bnch_string = benchmark(
string_reverser.reverse,
s="hello",
)
assert len(reversed_bnch_string) == 5
assert reversed_bnch_string == "olleh"
@pytest.mark.parametrize(
"original_string, reversed_string",
[
("tester", "retset"),
("hello", "olleh"),
],
)
def test_string_reverser_multiple(original_string, reversed_string):
"""Returns multiple reversed strings."""
function_reversed_string = string_reverser.reverse(original_string)
assert function_reversed_string == reversed_string
def test_string_reverser_single():
"""Returns single reversed string."""
function_reversed_string = string_reverser.reverse("welcome")
assert function_reversed_string == "emoclew"
| Tada-Project/speed-surprises | tests/test_string_reverser.py | test_string_reverser.py | py | 1,025 | python | en | code | 3 | github-code | 90 |
74061419176 | """
It is a quadratic time worse case
Insertion Sort it in-place , which in turn means it doesn't use any extra space
Insertion sort is always good for small arrays
Best case is when the array is already sorted, worst case is it is inversely sorted
"""
def insertion_sort_own(input_array):
"""
Here in the input array , the comparison is started from the first element and then ,
the element is compared with each element and is swapped with the element where it has to be
placed
:param input_array:
:return:
"""
for i in range(1, len(input_array)):
for j in range(0, i):
if input_array[i] < input_array[j]:
input_array[i], input_array[j] = input_array[j], input_array[i]
j += 1
i += 1
return input_array
"""
In the below example, the comparison starts from 5, it is compared with 20 and the element is
inserted in its index,
For i=4
10 is compared with 60 and replaced with 60 in the 10th position,
10 is compared with 40 and replaced with 40 in the 60th position,
10 is compared with 20 and replaced with 20 in the 40th position,
Now the while loop fails , and the x which is 10 , is replaced in i+1 position which is 1
at last x is
"""
sample_array = [20, 5, 40, 60, 10, 30]
def insertion_sort(l):
for i in range(1, len(l)):
x = l[i]
j = i - 1
while j >= 0 and x < l[j]:
l[j + 1] = l[j]
j -= 1
l[j + 1] = x
return l
print(insertion_sort(sample_array))
| GowravTata/DSA | 5.sorting/5.insertion_sort.py | 5.insertion_sort.py | py | 1,530 | python | en | code | 0 | github-code | 90 |
18306205719 | N = int(input())
A = [0 for n in range(N)]
XY = [0 for n in range(N)]
count_max = 0
for i in range(N):
A[i] = int(input())
xy = []
for j in range(A[i]):
x_y = list(map(int,input().split()))
xy.append(x_y)
XY[i] = xy
for i in range(2 ** N):
op = [0]*N
for j in range(N): # ใใคใณใ๏ผ
if ((i >> j) & 1):
op[N-1-j] = 1
flag = True
for k in range(len(op)):
if op[k] == 1:
for l in range(A[k]):
if XY[k][l][1] != op[XY[k][l][0]-1]:
flag = False
if flag:
if sum(op) > count_max:
count_max = sum(op)
print(count_max) | Aasthaengg/IBMdataset | Python_codes/p02837/s865332748.py | s865332748.py | py | 611 | python | en | code | 0 | github-code | 90 |
18196894879 | import sys
x, y = map(int, input().split())
ans = 0
for i in range(x+1):
if y == i*2 + (x - i)* 4:
ans = 1
break
if ans == 1:
print("Yes")
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p02640/s377864421.py | s377864421.py | py | 193 | python | en | code | 0 | github-code | 90 |
18356780411 | import argparse
from os import listdir, path, makedirs
from random import shuffle
from shutil import rmtree
import extract_turns
def main(in_root_folder, in_result_folder, in_trainset_ratio):
dstc_files = [filename
for filename in listdir(in_root_folder)
if 'task6' in filename and 'candidates' not in filename]
result = []
for filename in dstc_files:
result += extract_turns.main(path.join(in_root_folder, filename), 'user', True)
save_dataset(result, in_result_folder, in_trainset_ratio)
def save_dataset(in_turns, in_result_folder, in_trainset_ratio):
if path.exists(in_result_folder):
rmtree(in_result_folder)
makedirs(in_result_folder)
shuffle(in_turns)
trainset_size = int(in_trainset_ratio * len(in_turns))
devset_size = int((len(in_turns) - trainset_size) / 2.0)
trainset = in_turns[:trainset_size]
devset = in_turns[trainset_size: trainset_size + devset_size]
testset = in_turns[trainset_size + devset_size:]
for dataset_name, dataset in zip(['train', 'dev', 'test'], [trainset, devset, testset]):
makedirs(path.join(in_result_folder, dataset_name))
with open(path.join(in_result_folder, dataset_name, 'encoder.txt'), 'w') as encoder_out:
for line in dataset:
print >>encoder_out, line
with open(path.join(in_result_folder, dataset_name, 'decoder.txt'), 'w') as decoder_out:
for line in dataset:
print >>decoder_out, line
def build_argument_parser():
result = argparse.ArgumentParser()
result.add_argument('babi_folder', help='bAbI Dialog tasks folder')
result.add_argument('result_folder')
result.add_argument('--trainset_ratio', type=float, default=0.8)
return result
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
main(args.babi_folder, args.result_folder, args.trainset_ratio)
| ishalyminov/babi_tools | make_echo_dataset.py | make_echo_dataset.py | py | 1,959 | python | en | code | 14 | github-code | 90 |
33075860383 | # -*- coding: utf-8 -*-
# @Time : 2019/7/16 ไธๅ3:18
# @Author : Ryan
# @File : 5-nearest_neighbor.py
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# limit mnist data
Xtr, Ytr = mnist.train.next_batch(5000) # 5000 for training
Xte, Yte = mnist.test.next_batch(200) # 200 for testing
# tf graph input
xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])
# nearest neighbor calculation using L1 distance
# calculate L1 distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)
# prediction: get min distance index (nearest neighbor)
pred = tf.arg_min(distance, 0)
accuracy = 0
# initialize the variables
init = tf.global_variables_initializer()
# start training
with tf.Session() as sess:
sess.run(init)
# loop
for i in range(len(Xte)):
# get nearest neighbor # ่ทๅๅฝๅๆ ทๆฌ็ๆ่ฟ้ป็ดขๅผ
nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
# get nn class label and compare it to its true label # ๆ่ฟ้ปๅ็ฑปๆ ็ญพไธ็ๅฎๆ ็ญพๆฏ่พ
print("test", i, "prediction:", np.argmax(Ytr[nn_index]),
"true class:", np.argmax(Yte[i]))
# calculate accuracy # ่ฎก็ฎ็ฒพ็กฎๅบฆ
if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]):
accuracy += 1./len(Xte)
print("done!")
print("accuracy:", accuracy)
| jayzhou2468/TensorFlow-basic-grammar | learn_TensorFlow/5-nearest_neighbor.py | 5-nearest_neighbor.py | py | 1,529 | python | en | code | 2 | github-code | 90 |
71795059177 | # -*- coding: utf-8 -*-
import scrapy
from design.items import DesignItem
# ไธญๅฝๅฎถๅ
ท่ฎพ่ฎก้็นๅฅ
data = {
'channel': 'jiagle',
'name': '',
'color_tags':'',
'brand_tags': '',
'material_tags': '',
'style_tags': '',
'technique_tags': '',
'other_tags': '',
'user_id': 0,
'kind': 1,
'brand_id': 0,
'prize_id': 22,
'prize': 'ไธญๅฝๅฎถๅ
ท่ฎพ่ฎก้็นๅฅ',
'evt': 3,
'prize_level': '',
'prize_time': '',
'category_id': 0,
'status': 1, # ็ถๆ
'deleted': 0, # ๆฏๅฆ่ฝฏๅ ้ค
'info': '',
}
class DesignCaseSpider(scrapy.Spider):
name = 'jindian'
allowed_domains = ['gida.jiagle.com']
id = 2 # 2,3,4,5,6,7
start_urls = ['http://gida.jiagle.com/match/' + str(id) + '.html']
def parse(self, response):
design_list = response.xpath('//li[contains(@class,"ft")]/a/@href').extract()
tags = response.xpath('//li[@class="active"]/a/text()').extract()[0]
for design in design_list:
yield scrapy.Request(design, callback=self.parse_detail,
meta={'tags': tags})
if self.id < 7:
self.id += 1
yield scrapy.Request('http://gida.jiagle.com/match/' + str(self.id) + '.html', callback=self.parse)
def parse_detail(self, response):
url = response.url
item = DesignItem()
tags = response.meta['tags']
img_url = response.xpath('//div[@id="productBig"]/img/@src').extract()[0]
if img_url.endswith('.pdf'):
return
message = response.xpath('//div[@class="detail-title"]//dd/text()').extract()
title = message[0]
company = message[1]
designer = message[2]
remark = response.xpath('//div[@class="detail-text-box"]/p/text()').extract()[0]
remark = remark.replace('\n','').replace(' ','').replace('\r','').strip()
print(remark)
item['url'] = url
item['img_url'] = img_url.strip()
item['title'] = title.strip()
item['company'] = company.strip()
item['remark'] = remark
item['tags'] = tags
item['designer'] = designer.strip()
for key, value in data.items():
item[key] = value
yield item
| LIMr1209/Internet-worm | design/design/spiders/jindian.py | jindian.py | py | 2,331 | python | en | code | 0 | github-code | 90 |
41379562357 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django import forms
__all__ = ("AutocompleteWidget",)
class AutocompleteInputWidget(forms.TextInput):
"""
renders the autocomplete *inner* `<input>` field
"""
def __init__(self, lookup_model, *args, **kwargs):
self.lookup_model = lookup_model
self.allow_new = kwargs.pop("allow_new", False)
super(AutocompleteInputWidget, self).__init__(*args, **kwargs)
def build_attrs(self, extra_attrs=None, **kwargs):
attrs = super(AutocompleteInputWidget, self).build_attrs(extra_attrs, **kwargs)
attrs["autocomplete"] = "off"
attrs["data-autocomplete-widget-url"] = self.lookup_model.url
attrs["data-autocomplete-widget-type"] = "text"
attrs["data-autocomplete-widget-allow-new"] = str(self.allow_new).lower()
return attrs
class AutocompleteWidget(forms.MultiWidget):
"""
combo widget to render representation text and hidden id field
"""
def __init__(self, lookup_model, *args, **kwargs):
self.lookup_model = lookup_model
self.allow_new = kwargs.pop("allow_new", False)
widgets = [
AutocompleteInputWidget(
lookup_model=lookup_model,
allow_new=self.allow_new,
attrs=kwargs.get("attrs"),
),
forms.HiddenInput(attrs={"data-autocomplete-widget-type": "hidden"}),
]
super(AutocompleteWidget, self).__init__(widgets, *args, **kwargs)
def decompress(self, value):
if value:
repr = self.lookup_model.model_class.objects.get(pk=value)
return [repr, value]
return [None, None]
def value_from_datadict(self, data, files, name):
value = super(AutocompleteWidget, self).value_from_datadict(data, files, name)
if not self.allow_new:
return value[1]
return value
| digris/openbroadcast.org | website/apps/search/forms/widgets.py | widgets.py | py | 1,952 | python | en | code | 9 | github-code | 90 |
18020272169 | n,m = map(int,input().split())
a = []
b = []
for _ in range(n):
a.append(input())
for _ in range(m):
b.append(input())
for i in range(n-m+1):
if b[i] in a[i]:
for x in range(m-1):
if b[i+x] not in a[i+x]:
print("No")
exit()
print("Yes")
exit()
else:
print("No")
exit() | Aasthaengg/IBMdataset | Python_codes/p03804/s641451151.py | s641451151.py | py | 318 | python | en | code | 0 | github-code | 90 |
35224931129 | """
Wrappers for controls used in widgets
"""
import logging
import sys
import warnings
import weakref
from collections.abc import Sequence
import math
import numpy as np
from AnyQt import QtWidgets, QtCore, QtGui
from AnyQt.QtCore import Qt, QSize, QItemSelection, QSortFilterProxyModel
from AnyQt.QtGui import QColor, QWheelEvent
from AnyQt.QtWidgets import QWidget, QListView, QComboBox
from orangewidget.utils.itemdelegates import (
BarItemDataDelegate as _BarItemDataDelegate
)
# re-export relevant objects
from orangewidget.gui import (
OWComponent, OrangeUserRole, TableView, resource_filename,
miscellanea, setLayout, separator, rubber, widgetBox, hBox, vBox,
comboBox as gui_comboBox,
indentedBox, widgetLabel, label, spin, doubleSpin, checkBox, lineEdit,
button, toolButton, radioButtons, radioButtonsInBox, appendRadioButton,
hSlider, labeledSlider, valueSlider, auto_commit, auto_send, auto_apply,
deferred,
# ItemDataRole's
BarRatioRole, BarBrushRole, SortOrderRole, LinkRole,
IndicatorItemDelegate, BarItemDelegate, LinkStyledItemDelegate,
ColoredBarItemDelegate, HorizontalGridDelegate, VerticalItemDelegate,
VerticalLabel, tabWidget, createTabPage, table, tableItem,
VisibleHeaderSectionContextEventFilter,
checkButtonOffsetHint, toolButtonSizeHint, FloatSlider,
CalendarWidgetWithTime, DateTimeEditWCalendarTime,
ControlGetter, VerticalScrollArea, ProgressBar,
ControlledCallback, ControlledCallFront, ValueCallback, connectControl,
is_macstyle
)
from orangewidget.utils.itemmodels import PyTableModel
try:
# Some Orange widgets might expect this here
# pylint: disable=unused-import
from Orange.widgets.utils.webview import WebviewWidget
except ImportError:
pass # Neither WebKit nor WebEngine are available
import Orange.data
from Orange.widgets.utils import getdeepattr, vartype
from Orange.data import \
ContinuousVariable, StringVariable, TimeVariable, DiscreteVariable, \
Variable, Value
__all__ = [
# Re-exported
"OWComponent", "OrangeUserRole", "TableView", "resource_filename",
"miscellanea", "setLayout", "separator", "rubber",
"widgetBox", "hBox", "vBox", "indentedBox",
"widgetLabel", "label", "spin", "doubleSpin",
"checkBox", "lineEdit", "button", "toolButton", "comboBox",
"radioButtons", "radioButtonsInBox", "appendRadioButton",
"hSlider", "labeledSlider", "valueSlider",
"auto_commit", "auto_send", "auto_apply", "ProgressBar",
"VerticalLabel", "tabWidget", "createTabPage", "table", "tableItem",
"VisibleHeaderSectionContextEventFilter", "checkButtonOffsetHint",
"toolButtonSizeHint", "FloatSlider", "ControlGetter", "VerticalScrollArea",
"CalendarWidgetWithTime", "DateTimeEditWCalendarTime",
"BarRatioRole", "BarBrushRole", "SortOrderRole", "LinkRole",
"BarItemDelegate", "IndicatorItemDelegate", "LinkStyledItemDelegate",
"ColoredBarItemDelegate", "HorizontalGridDelegate", "VerticalItemDelegate",
"ValueCallback", 'is_macstyle',
# Defined here
"createAttributePixmap", "attributeIconDict", "attributeItem",
"listView", "ListViewWithSizeHint", "listBox", "OrangeListBox",
"TableValueRole", "TableClassValueRole", "TableDistribution",
"TableVariable", "TableBarItem", "palette_combo_box", "BarRatioTableModel"
]
log = logging.getLogger(__name__)
def palette_combo_box(initial_palette):
from Orange.widgets.utils import itemmodels
cb = QComboBox()
model = itemmodels.ContinuousPalettesModel()
cb.setModel(model)
cb.setCurrentIndex(model.indexOf(initial_palette))
cb.setIconSize(QSize(64, 16))
return cb
def createAttributePixmap(char, background=Qt.black, color=Qt.white):
"""
Create a QIcon with a given character. The icon is 13 pixels high and wide.
:param char: The character that is printed in the icon
:type char: str
:param background: the background color (default: black)
:type background: QColor
:param color: the character color (default: white)
:type color: QColor
:rtype: QIcon
"""
icon = QtGui.QIcon()
for size in (13, 16, 18, 20, 22, 24, 28, 32, 64):
pixmap = QtGui.QPixmap(size, size)
pixmap.fill(Qt.transparent)
painter = QtGui.QPainter()
painter.begin(pixmap)
painter.setRenderHints(painter.Antialiasing | painter.TextAntialiasing |
painter.SmoothPixmapTransform)
painter.setPen(background)
painter.setBrush(background)
margin = 1 + size // 16
text_margin = size // 20
rect = QtCore.QRectF(margin, margin,
size - 2 * margin, size - 2 * margin)
painter.drawRoundedRect(rect, 30.0, 30.0, Qt.RelativeSize)
painter.setPen(color)
font = painter.font() # type: QtGui.QFont
font.setPixelSize(size - 2 * margin - 2 * text_margin)
painter.setFont(font)
painter.drawText(rect, Qt.AlignCenter, char)
painter.end()
icon.addPixmap(pixmap)
return icon
class __AttributeIconDict(dict):
def __getitem__(self, key):
if not self:
for tpe, char, col in ((vartype(ContinuousVariable("c")),
"N", (202, 0, 32)),
(vartype(DiscreteVariable("d")),
"C", (26, 150, 65)),
(vartype(StringVariable("s")),
"S", (0, 0, 0)),
(vartype(TimeVariable("t")),
"T", (68, 170, 255)),
(-1, "?", (128, 128, 128))):
self[tpe] = createAttributePixmap(char, QtGui.QColor(*col))
if key not in self:
key = vartype(key) if isinstance(key, Variable) else -1
return super().__getitem__(key)
#: A dict that returns icons for different attribute types. The dict is
#: constructed on first use since icons cannot be created before initializing
#: the application.
#:
#: Accepted keys are variable type codes and instances
#: of :obj:`Orange.data.Variable`: `attributeIconDict[var]` will give the
#: appropriate icon for variable `var` or a question mark if the type is not
#: recognized
attributeIconDict = __AttributeIconDict()
def attributeItem(var):
"""
Construct a pair (icon, name) for inserting a variable into a combo or
list box
:param var: variable
:type var: Orange.data.Variable
:rtype: tuple with QIcon and str
"""
return attributeIconDict[var], var.name
class ListViewWithSizeHint(QListView):
def __init__(self, *args, preferred_size=None, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(preferred_size, tuple):
preferred_size = QSize(*preferred_size)
self.preferred_size = preferred_size
def sizeHint(self):
return self.preferred_size if self.preferred_size is not None \
else super().sizeHint()
def listView(widget, master, value=None, model=None, box=None, callback=None,
sizeHint=None, *, viewType=ListViewWithSizeHint, **misc):
if box:
bg = vBox(widget, box, addToLayout=False)
else:
bg = widget
view = viewType(preferred_size=sizeHint)
if isinstance(view.model(), QSortFilterProxyModel):
view.model().setSourceModel(model)
else:
view.setModel(model)
if value is not None:
connectControl(master, value, callback,
view.selectionModel().selectionChanged,
CallFrontListView(view),
CallBackListView(model, view, master, value))
misc.setdefault('uniformItemSizes', True)
miscellanea(view, bg, widget, **misc)
return view
def listBox(widget, master, value=None, labels=None, box=None, callback=None,
selectionMode=QtWidgets.QListWidget.SingleSelection,
enableDragDrop=False, dragDropCallback=None,
dataValidityCallback=None, sizeHint=None, **misc):
"""
Insert a list box.
The value with which the box's value synchronizes (`master.<value>`)
is a list of indices of selected items.
:param widget: the widget into which the box is inserted
:type widget: QWidget or None
:param master: master widget
:type master: OWWidget or OWComponent
:param value: the name of the master's attribute with which the value is
synchronized (list of ints - indices of selected items)
:type value: str
:param labels: the name of the master's attribute with the list of items
(as strings or tuples with icon and string)
:type labels: str
:param box: tells whether the widget has a border, and its label
:type box: int or str or None
:param callback: a function that is called when the selection state is
changed
:type callback: function
:param selectionMode: selection mode - single, multiple etc
:type selectionMode: QAbstractItemView.SelectionMode
:param enableDragDrop: flag telling whether drag and drop is available
:type enableDragDrop: bool
:param dragDropCallback: callback function on drop event
:type dragDropCallback: function
:param dataValidityCallback: function that check the validity on enter
and move event; it should return either `ev.accept()` or `ev.ignore()`.
:type dataValidityCallback: function
:param sizeHint: size hint
:type sizeHint: QSize
:rtype: OrangeListBox
"""
if box:
bg = hBox(widget, box, addToLayout=False)
else:
bg = widget
lb = OrangeListBox(master, enableDragDrop, dragDropCallback,
dataValidityCallback, sizeHint, bg)
lb.setSelectionMode(selectionMode)
lb.ogValue = value
lb.ogLabels = labels
lb.ogMaster = master
if labels is not None:
setattr(master, labels, getdeepattr(master, labels))
master.connect_control(labels, CallFrontListBoxLabels(lb))
if value is not None:
clist = getdeepattr(master, value)
if not isinstance(clist, (int, ControlledList)):
clist = ControlledList(clist, lb)
master.__setattr__(value, clist)
setattr(master, value, clist)
connectControl(master, value, callback, lb.itemSelectionChanged,
CallFrontListBox(lb), CallBackListBox(lb, master))
miscellanea(lb, bg, widget, **misc)
return lb
class OrangeListBox(QtWidgets.QListWidget):
"""
List box with drag and drop functionality. Function :obj:`listBox`
constructs instances of this class; do not use the class directly.
.. attribute:: master
The widget into which the listbox is inserted.
.. attribute:: ogLabels
The name of the master's attribute that holds the strings with items
in the list box.
.. attribute:: ogValue
The name of the master's attribute that holds the indices of selected
items.
.. attribute:: enableDragDrop
A flag telling whether drag-and-drop is enabled.
.. attribute:: dragDropCallback
A callback that is called at the end of drop event.
.. attribute:: dataValidityCallback
A callback that is called on dragEnter and dragMove events and returns
either `ev.accept()` or `ev.ignore()`.
.. attribute:: defaultSizeHint
The size returned by the `sizeHint` method.
"""
def __init__(self, master, enableDragDrop=False, dragDropCallback=None,
dataValidityCallback=None, sizeHint=None, *args):
"""
:param master: the master widget
:type master: OWWidget or OWComponent
:param enableDragDrop: flag telling whether drag and drop is enabled
:type enableDragDrop: bool
:param dragDropCallback: callback for the end of drop event
:type dragDropCallback: function
:param dataValidityCallback: callback that accepts or ignores dragEnter
and dragMove events
:type dataValidityCallback: function with one argument (event)
:param sizeHint: size hint
:type sizeHint: QSize
:param args: optional arguments for the inherited constructor
"""
self.master = master
super().__init__(*args)
self.drop_callback = dragDropCallback
self.valid_data_callback = dataValidityCallback
if not sizeHint:
self.size_hint = QtCore.QSize(150, 100)
else:
self.size_hint = sizeHint
if enableDragDrop:
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def sizeHint(self):
return self.size_hint
def minimumSizeHint(self):
return self.size_hint
def dragEnterEvent(self, event):
super().dragEnterEvent(event)
if self.valid_data_callback:
self.valid_data_callback(event)
elif isinstance(event.source(), OrangeListBox):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
event.setDropAction(Qt.MoveAction)
super().dropEvent(event)
items = self.update_master()
if event.source() is not self:
event.source().update_master(exclude=items)
if self.drop_callback:
self.drop_callback()
def update_master(self, exclude=()):
control_list = [self.item(i).data(Qt.UserRole)
for i in range(self.count())
if self.item(i).data(Qt.UserRole) not in exclude]
if self.ogLabels:
master_list = getattr(self.master, self.ogLabels)
if master_list != control_list:
setattr(self.master, self.ogLabels, control_list)
return control_list
def updateGeometries(self):
# A workaround for a bug in Qt
# (see: http://bugreports.qt.nokia.com/browse/QTBUG-14412)
if getattr(self, "_updatingGeometriesNow", False):
return
self._updatingGeometriesNow = True
try:
return super().updateGeometries()
finally:
self._updatingGeometriesNow = False
class ControlledList(list):
"""
A class derived from a list that is connected to a
:obj:`QListBox`: the list contains indices of items that are
selected in the list box. Changing the list content changes the
selection in the list box.
"""
def __init__(self, content, listBox=None):
super().__init__(content if content is not None else [])
# Controlled list is created behind the back by gui.listBox and
# commonly used as a setting which gets synced into a GLOBAL
# SettingsHandler and which keeps the OWWidget instance alive via a
# reference in listBox (see gui.listBox)
if listBox is not None:
self.listBox = weakref.ref(listBox)
else:
self.listBox = lambda: None
def __reduce__(self):
# cannot pickle self.listBox, but can't discard it
# (ControlledList may live on)
import copyreg
return copyreg._reconstructor, (list, list, ()), None, self.__iter__()
# TODO ControllgedList.item2name is probably never used
def item2name(self, item):
item = self.listBox().labels[item]
if isinstance(item, tuple):
return item[1]
else:
return item
def __setitem__(self, index, item):
def unselect(i):
try:
item = self.listBox().item(i)
except RuntimeError: # Underlying C/C++ object has been deleted
item = None
if item is None:
# Labels changed before clearing the selection: clear everything
self.listBox().selectionModel().clear()
else:
item.setSelected(0)
if isinstance(index, int):
unselect(self[index])
item.setSelected(1)
else:
for i in self[index]:
unselect(i)
for i in item:
self.listBox().item(i).setSelected(1)
super().__setitem__(index, item)
def __delitem__(self, index):
if isinstance(index, int):
self.listBox().item(self[index]).setSelected(0)
else:
for i in self[index]:
self.listBox().item(i).setSelected(0)
super().__delitem__(index)
def append(self, item):
super().append(item)
item.setSelected(1)
def extend(self, items):
super().extend(items)
for i in items:
self.listBox().item(i).setSelected(1)
def insert(self, index, item):
item.setSelected(1)
super().insert(index, item)
def pop(self, index=-1):
i = super().pop(index)
self.listBox().item(i).setSelected(0)
def remove(self, item):
item.setSelected(0)
super().remove(item)
def comboBox(widget, master, value, box=None, label=None, labelWidth=None,
orientation=Qt.Vertical, items=(), callback=None,
sendSelectedValue=None, emptyString=None, editable=False,
contentsLength=None, searchable=False, *, model=None,
tooltips=None, **misc):
if "valueType" in misc:
del misc["valueType"]
warnings.warn("Argument 'valueType' is deprecated and ignored",
DeprecationWarning)
return gui_comboBox(
widget, master, value, box, label, labelWidth, orientation, items,
callback, sendSelectedValue, emptyString, editable,
contentsLength, searchable, model=model, tooltips=tooltips, **misc)
comboBox.__doc__ = gui_comboBox.__doc__
class CallBackListView(ControlledCallback):
def __init__(self, model, view, widget, attribute):
super().__init__(widget, attribute)
self.model = model
self.view = view
# triggered by selectionModel().selectionChanged()
def __call__(self, *_):
# This must be imported locally to avoid circular imports
from Orange.widgets.utils.itemmodels import PyListModel
selection = self.view.selectionModel().selection()
if isinstance(self.view.model(), QSortFilterProxyModel):
selection = self.view.model().mapSelectionToSource(selection)
values = [i.row() for i in selection.indexes()]
# set attribute's values
if isinstance(self.model, PyListModel):
values = [self.model[i] for i in values]
if self.view.selectionMode() == self.view.SingleSelection:
assert len(values) <= 1
values = values[0] if values else None
self.acyclic_setattr(values)
class CallBackListBox:
def __init__(self, control, widget):
self.control = control
self.widget = widget
self.disabled = 0
def __call__(self, *_): # triggered by selectionChange()
if not self.disabled and self.control.ogValue is not None:
clist = getdeepattr(self.widget, self.control.ogValue)
control = self.control
selection = [i for i in range(control.count())
if control.item(i).isSelected()]
if isinstance(clist, int):
self.widget.__setattr__(
self.control.ogValue, selection[0] if selection else None)
else:
list.__setitem__(clist, slice(0, len(clist)), selection)
self.widget.__setattr__(self.control.ogValue, clist)
##############################################################################
# call fronts (change of the attribute value changes the related control)
class CallFrontListView(ControlledCallFront):
def action(self, values):
view = self.control
model = view.model()
if isinstance(view.model(), QSortFilterProxyModel):
model = model.sourceModel()
sel_model = view.selectionModel()
if not isinstance(values, Sequence):
values = [values]
selection = QItemSelection()
for value in values:
index = None
if not isinstance(value, int):
if value is None or isinstance(value, Variable):
search_role = TableVariable
else:
search_role = Qt.DisplayRole
value = str(value)
for i in range(model.rowCount()):
if model.data(model.index(i), search_role) == value:
index = i
break
else:
index = value
if index is not None:
selection.select(model.index(index), model.index(index))
if isinstance(view.model(), QSortFilterProxyModel):
selection = view.model().mapSelectionFromSource(selection)
sel_model.select(selection, sel_model.ClearAndSelect)
class CallFrontListBox(ControlledCallFront):
def action(self, value):
if value is not None:
if isinstance(value, int):
for i in range(self.control.count()):
self.control.item(i).setSelected(i == value)
else:
if not isinstance(value, ControlledList):
setattr(self.control.ogMaster, self.control.ogValue,
ControlledList(value, self.control))
for i in range(self.control.count()):
shouldBe = i in value
if shouldBe != self.control.item(i).isSelected():
self.control.item(i).setSelected(shouldBe)
class CallFrontListBoxLabels(ControlledCallFront):
unknownType = None
def action(self, values):
self.control.clear()
if values:
for value in values:
if isinstance(value, tuple):
text, icon = value
if isinstance(icon, int):
item = QtWidgets.QListWidgetItem(attributeIconDict[icon], text)
else:
item = QtWidgets.QListWidgetItem(icon, text)
elif isinstance(value, Variable):
item = QtWidgets.QListWidgetItem(*attributeItem(value))
else:
item = QtWidgets.QListWidgetItem(value)
item.setData(Qt.UserRole, value)
self.control.addItem(item)
#: Role to retrieve Orange.data.Value
TableValueRole = next(OrangeUserRole)
#: Role to retrieve class value for a row
TableClassValueRole = next(OrangeUserRole)
# Role to retrieve distribution of a column
TableDistribution = next(OrangeUserRole)
#: Role to retrieve the column's variable
TableVariable = next(OrangeUserRole)
class TableBarItem(_BarItemDataDelegate):
BarRole = next(OrangeUserRole)
BarColorRole = next(OrangeUserRole)
__slots__ = ("color_schema",)
def __init__(
self, parent=None, color=QColor(255, 170, 127), width=5,
barFillRatioRole=BarRole, barColorRole=BarColorRole,
color_schema=None,
**kwargs
):
"""
:param QObject parent: Parent object.
:param QColor color: Default color of the distribution bar.
:param color_schema:
If not None it must be an instance of
:class:`OWColorPalette.ColorPaletteGenerator` (note: this
parameter, if set, overrides the ``color``)
:type color_schema: :class:`OWColorPalette.ColorPaletteGenerator`
"""
super().__init__(
parent, color=color, penWidth=width,
barFillRatioRole=barFillRatioRole, barColorRole=barColorRole,
**kwargs
)
self.color_schema = color_schema
def barColorData(self, index):
class_ = self.cachedData(index, TableClassValueRole)
if self.color_schema is not None \
and isinstance(class_, Value) \
and isinstance(class_.variable, DiscreteVariable) \
and not math.isnan(class_):
return self.color_schema[int(class_)]
return self.cachedData(index, self.BarColorRole)
from Orange.widgets.utils.colorpalettes import patch_variable_colors
patch_variable_colors()
class HScrollStepMixin:
"""
Overrides default TableView horizontal behavior (scrolls 1 page at a time)
to a friendlier scroll speed akin to that of vertical scrolling.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.horizontalScrollBar().setSingleStep(20)
def wheelEvent(self, event: QWheelEvent):
if hasattr(event, "source") and \
event.source() == Qt.MouseEventNotSynthesized and \
(event.modifiers() & Qt.ShiftModifier and sys.platform == 'darwin' or
event.modifiers() & Qt.AltModifier and sys.platform != 'darwin'):
new_event = QWheelEvent(
event.pos(), event.globalPos(), event.pixelDelta(),
event.angleDelta(), event.buttons(), Qt.NoModifier,
event.phase(), event.inverted(), Qt.MouseEventSynthesizedByApplication
)
event.accept()
super().wheelEvent(new_event)
else:
super().wheelEvent(event)
class BarRatioTableModel(PyTableModel):
"""A model for displaying python tables.
Adds a BarRatioRole that returns Data, normalized between the extremes.
NaNs are listed last when sorting."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._extremes = {}
def data(self, index, role=Qt.DisplayRole):
if role == BarRatioRole and index.isValid():
value = super().data(index, Qt.EditRole)
if not isinstance(value, float):
return None
vmin, vmax = self._extremes.get(index.column(), (-np.inf, np.inf))
value = (value - vmin) / ((vmax - vmin) or 1)
return value
if role == Qt.DisplayRole and index.column() != 0:
role = Qt.EditRole
value = super().data(index, role)
# Display nothing for non-existent attr value counts in column 1
if role == Qt.EditRole \
and index.column() == 1 and np.isnan(value):
return ''
return value
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.InitialSortOrderRole:
return Qt.DescendingOrder if section > 0 else Qt.AscendingOrder
return super().headerData(section, orientation, role)
def setExtremesFrom(self, column, values):
"""Set extremes for column's ratio bars from values"""
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", ".*All-NaN slice encountered.*", RuntimeWarning)
vmin = np.nanmin(values)
if np.isnan(vmin):
raise TypeError
except TypeError:
vmin, vmax = -np.inf, np.inf
else:
vmax = np.nanmax(values)
self._extremes[column] = (vmin, vmax)
def resetSorting(self, yes_reset=False):
# pylint: disable=arguments-differ
"""We don't want to invalidate our sort proxy model everytime we
wrap a new list. Our proxymodel only invalidates explicitly
(i.e. when new data is set)"""
if yes_reset:
super().resetSorting()
def _argsortData(self, data, order):
if data.dtype not in (float, int):
data = np.char.lower(data)
indices = np.argsort(data, kind='mergesort')
if order == Qt.DescendingOrder:
indices = indices[::-1]
if data.dtype == float:
# Always sort NaNs last
return np.roll(indices, -np.isnan(data).sum())
return indices
| biolab/orange3 | Orange/widgets/gui.py | gui.py | py | 28,085 | python | en | code | 4,360 | github-code | 90 |
40615653470 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 25.05.2021
@author: Feliks Kiszkurno
"""
import matplotlib.pyplot as plt
import numpy as np
import slopestabilitytools
def plot_depth_true_estim(ml_results, *, batch_name=''):
fig, ax = plt.subplots()
colors = ['red', 'green', 'orange', 'blue', 'yellow', 'cyan', 'black', 'purple', 'khaki', 'orange', 'gold',
'turquoise', 'orangered']
colors_count = 0
for classifier in ml_results.keys():
if classifier is not 'com':
depth_estim = []
depth_true = []
for depth_estim_value in ml_results[classifier]['prediction'][batch_name]['depth_estim']:
if isinstance(depth_estim_value, list):
for value in depth_estim_value:
depth_estim.append(value)
else:
depth_estim.append(value[0])
for depth_true_value in ml_results[classifier]['prediction'][batch_name]['depth_true']:
if isinstance(depth_true_value, list):
for value in depth_true_value:
depth_true.append(value)
else:
depth_true.append(value[0])
ax.plot(depth_estim, depth_true, marker='o', color=colors[colors_count], label=classifier, linestyle='None')
colors_count += 1
y_lim = ax.get_ylim()
x_lim = ax.get_xlim()
if y_lim[1] > x_lim[1]:
ax_max = y_lim[1]
else:
ax_max = x_lim[1]
if y_lim[0] > x_lim[0]:
ax_min = y_lim[0]
else:
ax_min = x_lim[0]
ax.set_xlim([ax_min, ax_max])
ax.set_ylim([ax_min, ax_max])
ref_x =np.arange(ax_min, ax_max, 1)
ref_y = ref_x
ax.plot(ref_x, ref_y, color='black', label='reference')
plt.ylabel('True depth [m]')
plt.xlabel('Predicted depth [m]')
ax.legend()
plt.title('Predicted vs True interface depth')
slopestabilitytools.save_plot(fig, 'All', '_true_vs_pred', subfolder='ML/', batch_name=batch_name)
| felikskiszkurno/SlopeStability | slopestabilityML/plot_depth_true_estim.py | plot_depth_true_estim.py | py | 2,070 | python | en | code | 4 | github-code | 90 |
70172693736 | #Warnungen deaktivieren. Muss fรผr TF geladen werden
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#Datset laden
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
#Pixelwerte von Int zu Float
train_images, test_images = train_images / 255.0, test_images / 255.0
"""
Model des Netzwerkes
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
print(np.argmax(predictions[0]))
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format([predicted_label],
100*np.max(predictions_array),
[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 1
num_cols = 1
num_images = num_rows*num_cols
plt.figure(figsize=(20, 20))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
print("Footage test")
import cv2
# Video source - can be camera index number given by 'ls /dev/video*
# or can be a video file, e.g. '~/Video.avi'
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
cv2.rectangle(frame,(270,200),(340,270),(0,0,255),2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
roiImg = frame[200:270,270:340]
img = cv2.resize(roiImg,(28,28))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
np_img = img.astype(np.float32)
predictions = probability_model.predict(tf.reshape(np_img / 255.0, [-1,28,28,1]))
print(np.argmax(predictions[0]))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| Starhammer/Tensorflow_Test_Live | Pythn_KI_GUI/Pythn_KI_GUI.py | Pythn_KI_GUI.py | py | 3,396 | python | en | code | 0 | github-code | 90 |
18315927379 | from heapq import heappush, heappop
N, M = map(int, input().split())
s = input()
q = [(0,N)]
INF = 10**9
dic = {}
for i in range(N-1, -1, -1):
cnt, j = q[0]
while j-i>M:
heappop(q)
cnt, j = q[0]
heappush(q, (cnt,j))
cnt += 1 if s[i]!='1' else INF
heappush(q, (cnt,i))
dic[i] = j
if cnt>=INF:
print(-1)
else:
x = 0
while x!=N:
y = dic[x]
print(y-x)
x = y | Aasthaengg/IBMdataset | Python_codes/p02852/s888061853.py | s888061853.py | py | 392 | python | en | code | 0 | github-code | 90 |
33457982954 | # Definition for a binary tree node.
from typing import List, Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.subtrees = set()
self.duplicates = []
def dfs(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if not root:
return
if root.left:
leftSubtree = self.dfs(root.left)
if leftSubtree and leftSubtree.val in self.subtrees:
self.duplicates.append(leftSubtree.val)
if leftSubtree and leftSubtree.val:
self.subtrees.add(leftSubtree)
if root.right:
rightSubtree = self.dfs(root.left)
if rightSubtree and rightSubtree.val in self.subtrees:
self.duplicates.append(rightSubtree.val)
if rightSubtree and rightSubtree.val:
self.subtrees.add(rightSubtree)
def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:
res = self.dfs(root)
print(res)
leafLeft3 = TreeNode(5)
leafLeft2 = TreeNode(4)
leafLeft1 = TreeNode(2, leafLeft2, leafLeft3)
leafRight3 = TreeNode(7)
leafRight2 = TreeNode(6)
leafRight1 = TreeNode(3, leafRight2, leafRight3)
root = TreeNode(1, leafLeft1, leafRight1)
solution = Solution()
print(solution.findDuplicateSubtrees(root)) | Samuel-Black/leetcode | find-duplicates-subtrees.py | find-duplicates-subtrees.py | py | 1,384 | python | en | code | 0 | github-code | 90 |
43663369583 | # Code for creating figure 11 in the report.
# Makes a linear scatter plot with noise to illustrate concept
# of optimal iterations
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
plt.xlabel('iterations')
plt.ylabel('points')
x = range(10)
random_x = []
random_y = []
for val in x:
random_x.append(np.random.rand() + val)
random_y.append(np.random.rand() + val)
random_y = np.array(random_y)
random_x = np.array(random_x)
s = 80
plt.scatter(random_x, random_y, s = s, label = 'Calculated optimal values')
plt.scatter(10 + random_x, 10 + random_y, s = s, label = 'Verification optimal values')
plt.plot(range(20), range(20), label = 'Predicted optimal values')
plt.legend()
plt.grid()
plt.show() | Snoeprol/stochastic_simulations | Assignment_code/further_research_example.py | further_research_example.py | py | 873 | python | en | code | 0 | github-code | 90 |
17680296521 | #!/usr/bin/env python
import logging
import subprocess
import argparse
import sys
import tempfile
import string
import shutil
def run_command(command=str):
logging.info("Running: %s" % (command))
run=subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout,stderr)=run.communicate()
if run.returncode != 0:
raise Exception("Tool fail %s\n%s" % (stdout,stderr))
return (stdout,stderr)
def timing(command_name):
date_cmd = r'date +%s'
run_command("%s >> %s_timing.txt" % (date_cmd,command_name))
def get_readgroup(bamfile):
out, err = run_command("samtools view -H %s" % (bamfile))
rgs = []
for line in out.split("\n"):
if line.startswith("@RG\t"):
rgs.append(line.rstrip("\n\r"))
if len(rgs) != 1:
raise Exception("Wrong number of readgroups in BAM file")
return rgs[0]
def get_rgid(rgline):
for i in rgline.split("\t"):
if i.startswith("ID:"):
return i[3:]
def main(args):
rgline = get_readgroup(args.inbam)
rgid = get_rgid(rgline)
work_dir = tempfile.mkdtemp(dir=args.workdir, prefix="bwa_mem_")
template = "bamtofastq T=${tmpdir}/bamtofastq_tmp S=${tmpdir}/single.fq O=${tmpdir}/unmatched_1.fq O2=${tmpdir}/unmatched_2.fq exclude=QCFAIL,SECONDARY,SUPPLEMENTARY collate=1 filename=${inbam} | \
${filter_cmd} 2> ${inbam}.count.txt | \
bwa mem -t 8 -p -T 0 -R '${rgline}' ${refseq} - | \
bamsort inputformat=sam level=1 inputthreads=2 outputthreads=2 calmdnm=1 calmdnmrecompindetonly=1 calmdnmreference=${refseq} tmpfile=${tmpdir}/out.sorttmp O=${tmpdir}/out.bam 2> ${inbam}.bamsort_info.txt"
#used to fix read name issue in bamtofastq output AND to produce a read counts file
rg_filter_command = 'perl -e \'while(<>){$i++; $_ =~ s|@[01](/[12])$|\\1| if($i % 4 == 1); print $_;} $c = $i/4; warn "$c\n";\''
cmd = string.Template(template).substitute({
"inbam" : args.inbam,
"outbam" : args.outbam,
"filter_cmd" : rg_filter_command,
"rgline" : rgline,
"refseq" : args.refseq,
"tmpdir" : work_dir
})
run_command(cmd)
shutil.move( "%s/out.bam" % (work_dir), args.outbam)
shutil.rmtree(work_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--refseq", required=True)
#parser.add_argument("-it", "--input-threads", default="2" )
#parser.add_argument("-ot", "--output-threads", default="2" )
parser.add_argument("-i", "--inbam", required=True)
parser.add_argument("-o", "--outbam", required=True)
parser.add_argument("-w", "--workdir", default="./")
args = parser.parse_args()
sys.exit(main(args))
| ucscCancer/icgc-pcap-tools | bwamem.py | bwamem.py | py | 2,553 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.