Document
stringlengths 395
24.5k
| Source
stringclasses 6
values |
|---|---|
const $rock = document.querySelector('.player-rock');
const $paper = document.querySelector('.player-paper');
const $scissors = document.querySelector('.player-scissors');
const $startGame = document.querySelector('.start-game');
const $finalScore = document.querySelector("#final-score");
const $results = document.querySelector("#results");
const $container = document.querySelector(".container");
const options = ['rock', 'paper', 'scissors'];
let $numberOfRounds = document.querySelector('input');
let playerScore = 0;
let computerScore = 0;
let round = 0;
function computerPlay(){
let choice = Math.floor(Math.random() * 3);
selected('computer' + '-' + options[choice]);
return options[choice];
}
function selected(item){
document.getElementsByClassName(item)[0].classList.toggle("selected");
setTimeout(function(){
document.getElementsByClassName(item)[0].classList.toggle("selected")}, 1000
);
}
function playerPlay(playerSelection){
selected('player' + '-' + playerSelection);
game(playerSelection);
}
$rock.addEventListener('click', function(){playerPlay('rock')});
$paper.addEventListener('click', function(){playerPlay('paper')});
$scissors.addEventListener('click', function(){playerPlay('scissors')});
function playRound(playerSelection, computerSelection) {
if (playerSelection === 'rock' && computerSelection === 'paper'){
return "You Win! Rock beats Paper";
}
else if (playerSelection === 'rock' && computerSelection === 'scissors'){
return "You Win! Rock beats Scissors";
}
else if (playerSelection === 'paper' && computerSelection === 'rock'){
return "You Win! Paper beats Rock";
}
else if (playerSelection === 'paper' && computerSelection === 'scissors'){
return "You Lose! Scissors beats Paper";
}
else if (playerSelection === 'scissors' && computerSelection === 'rock'){
return "You Lose! Rock beats Scissors";
}
else if (playerSelection === 'scissors' && computerSelection === 'paper'){
return "You Win! Scissors beats Paper";
}
else if (playerSelection === 'rock' && computerSelection === 'rock'){
return "Draw!";
}
else if (playerSelection === 'scissors' && computerSelection === 'scissors'){
return "Draw!";
}
else if (playerSelection === 'paper' && computerSelection === 'paper'){
return "Draw!";
}
}
function finalScore(playerScore, computerScore){
if (playerScore > computerScore){
return `You have won ${playerScore} to ${computerScore}`;
}
if (playerScore < computerScore){
return `You have lost ${computerScore} to ${playerScore}`;
}
if (playerScore === computerScore){
return `You have drawn ${playerScore} to ${computerScore}`;
}
}
function lock(){
$container.classList.add("locked");
}
function unlock(){
$container.classList.remove("locked");
}
function newGame(){
unlock();
round = 0;
playerScore = 0;
computerScore = 0;
$finalScore.classList.add("hidden");
$results.innerText = '';
}
function game(playerSelection){
let numberOfRounds = Number($numberOfRounds.value);
if (numberOfRounds <= 0){
lock();
return;
}
round ++;
let computerSelection = computerPlay()
let result = playRound(playerSelection, computerSelection);
$results.innerText = result;
if (result.indexOf('Win') != -1){
playerScore++;
}
if (result.indexOf('Lose') != -1){
computerScore++;
}
if (round === numberOfRounds){
$finalScore.classList.remove("hidden");
let finalResults = finalScore(playerScore, computerScore);
$finalScore.innerText = finalResults;
lock();
}
}
$startGame.onclick = newGame;
|
STACK_EDU
|
PL/Ruby is a loadable procedural language for the PostgreSQL database system that enables the Ruby language to create functions and trigger procedures.
- ruby 1.8.7 or later (maybe 1.8.6 too)
- postgresql >= 7.3
All PostgreSQL headers need to be installed. Command (see
INSTALL in the
ruby extconf.rb make make install
You may need to specify some of the following extconf.rb options:
--with-pg-config=<location of the pg_config command of PostgreSQL>
Specifies the location of pg_config. e.g. --with-pg-config=/usr/local/bin/pg_config
Builds plruby for Greenplum instead of PostgreSQL.
By default plruby tries to convert a postgres type to a ruby class. This option gives the possibility to disable all conversions.
--with-suffix=<suffix to add>
Specifies a suffix to add to the extension module file.
ruby extconf.rb --with-suffix=_geo will create
Lowers the safe level which the plruby functions are run under. (default: 12; meaning the maximum)
Sets the timeout for each function call. (default: none)
Lowers the safe level which the main thread waiting for timeouts is run under. (default: 3) This option is read only when --with-timeout is given.
Test (and examples)
WARNING: if plruby was compiled without --disable-conversion you
must FIRST run
make install before
This will run the following two commands:
( cd test/plt; ./runtest ) ( cd test/plp; ./runtest )
plruby_test is created and then destroyed. Don't use
it if you have such a database.
Now you are ready to create the PL/Ruby language in PostgreSQL.
pg_language system catalog is private to each database,
the new language can be created only for individual databases, or in
the template1 database. In the latter case, it is automatically
available in all newly created databases.
The commands to create the new language are:
create function plruby_call_handler () returns language_handler as 'path-to-plruby-shared-lib' language 'C'; create trusted language 'plruby' handler plruby_call_handler lancompiler 'PL/Ruby';
trusted keyword on
create language tells PostgreSQL,
that all users (not only those with superuser privilege) are
permitted to create functions with
LANGUAGE 'plruby'. This is
absolutely safe, because there is nothing a normal user can do
with PL/Ruby, to get around access restrictions he/she has.
New releases and sources can be obtained from http://github.com/knu/postgresql-plruby
This extension module is copyrighted free software by Guy Decoux.
You can redistribute it and/or modify it under the same term as Ruby.
|
OPCFW_CODE
|
M: Ask HN: Is webgl a good idea that will never quite make it? - forgotAgain
For a while it seemed that all webgl needed for widespread acceptance was mobile Safari support. That hasn't happened. I'd like to take advantage of the capabilities it offers but the risk of losing any investment made in the technology seems very high at this point.<p>Is webgl a good idea that will never quite make it?
R: viraptor
I kind of like where webgl is right now. I wouldn't say it didn't make it.
Every time I see a website using webgl these days it makes sense, but I rarely
if ever think - "this could use some webgl".
So it really depends on what you mean by "will never quite make it". What
exactly did you expect to happen after Safari? I'm quite happy webgl isn't
everywhere like flash was ("I need a menu with mouseover effect - I know,
flash!")
R: forgotAgain
"will never quite make it" expresses my concerns that native apps have made
high performance graphics in web apps a secondary concern. Basically if you
want high performance graphics then you should develop a native app.
R: MayorOfMonkeys
WebGL is being utilized in Google Maps which has massive adoption. There are
even whole browser based game engines like PlayCanvas - see
[https://playcanvas.com](https://playcanvas.com) \- that rely on WebGL. You
have to remember, WebGL only gained full cross-device support a year ago.
You're only going to see more and more uses of the API in the coming months
and years.
R: angersock
It made it--just a lot of sites don't use it (
[http://caniuse.com/#search=webgl](http://caniuse.com/#search=webgl) ).
It's not a good idea (for certain reasons isomorphic to what makes OpenGL not
a good idea), but it works just fine.
|
HACKER_NEWS
|
On April 17, OpenStack® issued its ninth release, code-named Icehouse™, which came synchronously with Ubuntu 14.04 LTS. Frankly speaking, it seems that the vast and monolithic Neutron plugins are now history. The Icehouse release is certain to foster the Modular Layer 2 (ML2) plugin usage, previously introduced in Havana release, which should eventually reduce the overall complexity of new L2 networking technologies, as well as significantly simplify their support.
ML2 Plugin Overview
Basically, the ML2 plugin is a Neutron plugin that provides an interface for extensible sets of specific drivers of two types:
- Layer 2 network types drivers (AKA TypeDriver);
- Networking mehanisms (AKA MechanismDrivers) for connecting to networks of the aforementioned types.
In contrast to the monolithic Neutron plugins, multiple MechanismDrivers can be used concurrently and utilize existing L2 agents and/or interact with external devices or controllers. A MechanismDriver is called upon creation, update, and deletion of networks, sub-networks, or ports. For every event, there are two methods that get called – one during the database transaction, and another right after it. The device vendors can freely implement their own MechanismDrivers to provide vendor-specific hardware support in OpenStack. There are currently MechanismDrivers implementations for Open vSwitch, Linux Bridge, and Hyper-V. Each MechanismDriver uses resources/information provided by the selected TypeDriver.
As of now, the ML2 framework includes TypeDrivers for the following networks:
- local – provides connectivity between VMs and other devices running on the same node. Does not provide any connectivity between nodes;
- flat – provides connectivity between VMs and other devices using any IEEE 802.1D conformant physical network without the use of VLANs, tunneling, or other segmentation approaches. Only one Flat network can exist on each physical network;
- vlan – provides connectivity between VMs and other devices using any IEEE 802.1Q conformant physical network. The physical network becomes segmented via VLAN headers. Consequently, up to 4094 segments can exist on each physical network;
- gre and vxlan – provide connectivity between VMs and other devices via using tunnel endpoints for organizing networking segments.
Although the ML2 plugin was included into Havana release, the official Installation Guide for Ubuntu 12.04 (LTS) provides info on installation of monolithic OVS plugin only. The thing is that when you decide to move on to the more advanced ML2 plugin, it becomes apparent that it uses different database structure, and the easiest way to proceed is deleting any neutron resources (routers, subnets, networks, etc.) and simply creating a ML2 database from scratch.
Since these steps result in an empty Neutron database, the Icehouse release provides a as a part of live migration. It transfers the data from Open vSwitch or LinuxBridge plugin schema to the ML2 schema, thus preserving the provisioned resources.
I’d like to note that the Havana Installation Guide contains some confusing configuration inaccuracies. Thus, to avoid typical mistakes and save loads of time and nerve during the OpenStack configuration process, it’s advisable to look through user’ comments as well.
OpenDaylight Controller Support
Icehouse has also introduced support of OpenDaylight SDN controller. It is implemented as a lightweight ML2 MechanismDriver, acting as a REST proxy that passes all Neutron API calls to OpenDaylight. Meanwhile, the major functionality resides inside OpenDaylight controller itself.
Instead of using specific L2 agents on each compute node, the single ODL controller directly configures Open vSwitches by means of OVSDB Plugin.
As I have mentioned in my OpenDaylight Clustering Made Easier with OVSDB blog post, the OVSDB protocol uses JSON-RPC calls through active or passive client connection to manipulate a physical of virtual switch that has OVSDB attached to it. The connection mode must be set up manually using ovs-vsctl tool on networking and compute nodes.
The Devstack tool – shell script intended for building a complete OpenStack development environment – provides support for running OpenDaylight as an OpenStack Neutron plugin to manage Open vSwitches over the active client connection. Though I suppose passive connection mode should work as well.
The OpenDaylight’s OVSDB sub-system creates per-tenant networks using GRE or VXLAN tunnels based on information provided by OpenStack Neutron.
The way I see it, the next step toward improving OpenStack networking reliability is ensuring Open vSwitch proper behavior in case of OpenDaylight controller connection failure or even controller crash. It seems that ODL Clustering fits perfectly for this purpose.
|
OPCFW_CODE
|
Query: Does SH need to open browser? (SH unable to load in edge)
Summary
Does SH need to open the browser windows itself in order to work? (i.e. if I just open stash from a regular shortcut, or by running stash-win.exe will SH integrations work?)
SH sounds great and I was looking forward to using several of the features, however SH was unable to load Edge 'normally' and so would auto-close after a few seconds and so I couldn't use it at all
Further Info
I had stash already open in Edge, in my main window that had other tabs open also
I configured SH to use Edge in the initial setup wizard
When SH ran, it didn't open a new tab in my existing windows as I thought it would (e.g. this is how stash-win.exe works) and instead opened stash in a new private window (while I appreciate the privacy consideration, I would've preferred to just have it run in my main window/profile)
I open SH settings and saw there was an option to run SH in the default profile so selected this and restarted SH
SH then opened Edge, but not the default profile instead it opened a secondary profile I had setup in Edge
I then got error messages about webdriver not working and SH auto-closes a few seconds later...
Impact
Whilst running in a private session was not ideal, as I didn't have access to any of my other tabs, bookmarks, extensions, etc, it did at least open stash... however now that SH auto-closes after a few seconds I am not sure how to revert back :-/
If I could just use the SH features in my regular Edge session (i.e. the one that is already running, where I have various tabs (incl. several stash tabs open) already open that would be great! Is this possible?
I have stash set to not open window on launch as I already stash open and pinned in my browser for easy access, i.e. I just double-click stash-win.exe and then alt+tab to my browser and use a tab I already have open, rather than stash opening it for me... Can SH work like this too?
Thanks!
Thank you for the detail info !
SH will try to detect if a Stash is already running or not. If it's not running, it will run the stash-win.exe. Otherwise it will just open a browser instance.
You can close that browser and let SH runs in the background. SH will open a new browser instance when you click on its menu.
The problem of Edge is probably due to the Web driver update again. I haven't updated the code for web driver for a while, so it probably stop working for now.
The program do need some improving. It should try to attach an existing browser first, only when it fails it should create a new instance. I will do some updates and see if this is possible.
No problem, I am naturally very detailed (it takes me ages to write anything! 😀)
Thanks for replying straight away and looking into this - hopefully Microsoft don't make this too hard for you to fix or make you spend ages to get this working again! (I agree M/S are annoying but I really like the vertical tabs feature, which is only in Edge I think)
Extra info
After reading your message I did some additional checking (hopefully this helps)...
I checked that stash-win.exe was NOT running, opened Edge with stash tabs open, and then ran SH (AutoIt3_x64.exe)
I did this again, but this time made sure stash-win.exe WAS running before running AutoIt3_x64.exe
Both times SH opened a new Edge window on an unexpected Edge profile, and both times it did not seem to open a page for http://localhost:9999
Interestingly, in the second test (stash-win.exe WAS running before I started SH), when SH closed/crashed it also caused stash-win.exe to stop running too!
How to reset?
I am not sure how to reset SH
I tried running it without the Stash_Helper.a3x parameter, but this just asked for a .a3x file
AFAIK the only way to get back to the "welcome wizard" is to now uninstall SH completely and install it again, but if there is an easier way please let me know
Extra: Local Webdriver version info...
I was copying some files in my %AppData% folder and noticed the new Webdriver folder (which I guess is the file / folder that SH uses?)
I was curious if I could fix this myself / force it to upgrade to the latest version, but it seems like maybe SH has a hard-coded value for Webdriver or something?...
I deleted msedgedriver.exe and downloaded the latest one from https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver
When I ran SH, I still got the error saying SH was forcing an update of Webdriver, but it seemed like SH actually downgraded webdriver? 😕
The new version 2.4.9 added a new function to let you choose the browser again.
The error dialog will give you "Yes" "No" "Cancel" choice. Use "No", and next time Stash_Helper will let you choose a new browser.
This time use Firefox. I am really fed up with Chrome and Edge.
|
GITHUB_ARCHIVE
|
|Summary:||games/gnome-mahjongg: 'stage' tries to install directly to LOCALBASE|
|Product:||Ports & Packages||Reporter:||John Hein <jcfyecrayz>|
|Component:||Individual Port(s)||Assignee:||freebsd-gnome (Nobody) <gnome>|
|Severity:||Affects Some People||CC:||jcfyecrayz, tcberner|
Description John Hein 2021-07-29 20:50:00 UTC
'make -C /usr/ports/games/gnome-mahjongg stage' fails when run as a user that does not have write permission in LOCALBASE. It tries to install a file directly to LOCALBASE instead of the 'work/stage' dir. This worked fine in Nov 2020 when this port was updated to 3.38.2, but now fails - probably(?) due to changes in meson. ===> Staging for gnome-mahjongg-3.38.3 ===> Generating temporary packing list . . Running custom install script '/usr/local/bin/meson --internal gettext install --subdir=po --localedir=share/locale --pkgname=gnome-mahjongg' Running custom install script '/usr/local/bin/glib-compile-schemas /usr/local/share/glib-2.0/schemas' --- stdout --- --- stderr --- Failed to create file “/usr/local/share/glib-2.0/schemas/gschemas.compiled.43UL70”: Permission denied FAILED: install script '/usr/local/bin/glib-compile-schemas /usr/local/share/glib-2.0/schemas' exit code 1, stopped FAILED: meson-install /usr/local/bin/meson install --no-rebuild ninja: build stopped: subcommand failed. *** Error code 1
Comment 1 John Hein 2021-07-29 21:16:41 UTC
It seems that it's not a meson change, but it IS the change from 3.38.2 to 3.38.3 after all that is triggering the problem. Using the latest meson in ports (0.58.1) and 3.38.2 works fine ('make stage' as a regular user does not tail). 3.38.2 does not even try to run glib-compile-schemas whereas 3.38.3 runs glib-compile-schemas but does not obey DESTDIR ('/usr/local/bin/glib-compile-schemas /usr/lcoal/share/glib02.0/schemas'). Committer CC'd.
Comment 2 John Hein 2021-07-29 21:19:14 UTC
Here's the difference between 3.38.2 and .3 that causes 3.38.3 to run glib-compile-schemas: --- gnome-mahjongg-3.38.2/meson_options.txt 2020-10-03 10:16:30.594575400 -0600 +++ gnome-mahjongg-3.38.3/meson_options.txt 2020-11-01 02:39:32.629650000 -0700 @@ -1,2 +1,2 @@ -option('compile-schemas', type: 'feature', value : 'disabled', description : 'Compile GSettings schemas on install') -option('update-icon-cache', type: 'feature', value : 'disabled', description : 'Update icon cache') +option('compile-schemas', type: 'feature', value : 'enabled', description : 'Compile GSettings schemas on install') +option('update-icon-cache', type: 'feature', value : 'enabled', description : 'Update icon cache') That is, it was disabled for 3.38.2
Comment 3 John Hein 2021-07-29 21:20:22 UTC
Now... how to get meson to invoke glib-compile-schemas so it writes to work/stage for 'make stage'.
Comment 4 John Hein 2021-07-29 21:40:41 UTC
It looks like the module in meson's modules/gnome.py writes to 'prefix'/'datadir' if 'glib_compile_schemas' is enabled. Without any 'DESTDIR' in front of it. Maybe it's just right to disable glib_compile_schemas for 'stage'. 'install' will run it via Keywords/glib-schemas.ucl
Comment 5 John Hein 2021-07-29 22:26:38 UTC
I think this might be the right fix: CONFIGURE_ARGS+= -Dcompile-schemas=disabled -Dupdate-icon-cache=disabled We may also have always been missing USES+=gtk-update-icon-cache for this port as well - it does store files in share/icons.
Comment 6 John Hein 2021-07-29 22:30:16 UTC
(In reply to John Hein from comment #5) Never mind on the comment about USES+=gtk-update-icon-cache. INSTALLS_ICONS=yes takes care of that.
Comment 7 John Hein 2021-07-30 21:58:46 UTC
Created attachment 226809 [details] [patch] games/gnome-mahjongg - fix 'stage' writing to LOCALBASE This patch disables the attempt to run glib-compile-schemas and gtk-update-icon-cache during 'make stage'. Both of these were trying to write to LOCALBASE instead of the staging dir (work/stage) which triggered a 'permission denied' issue if running 'make stage' as a regular user. And both are already done anyway by the gnome ports infrastructure at 'install' time (or if installing from pkg, at the time the of the 'pkg install'). There is also a change in ordering in the Makefile for the USES line (prodded by portlint's whine). There is no need to bump the PORTREVISION - there is no change in the package. I tested in poudriere (testport -i to get interactive mode) by creating a regular user and doing 'make clean stage' as the regular user. Without the patch it fails (as expected) - with the patch it passes 'make stage stage-qa check-plist' as the regular user and 'make install' as root. QA: - portlint: ok (1 less warning after patch) - testport: ok (11-stable/amd64)
Comment 8 John Hein 2021-08-16 15:05:29 UTC
Maintainer timeout. Has anyone had a chance to look at this?
|
OPCFW_CODE
|
One of the strengths that LINQ to SQL has over the upcoming Entity Framework is its support for POCO, or Plain Old Class Objects. With LINQ to SQL, the framework doesn't require any particular base classes, interfaces or even reliance on the 3.5 framework for the resulting objects. I demonstrated this in the talk I did at the Teched Tweener weekend. Download the demo project to see this in action.
In this sample, I created two separate projects. The first class library project, I created only targeting the 2.0 framework. As a result the project can not use any LINQ specific techniques. This will also allow us to consume the resulting objects in projects that do not have access to the newer framework, or to all of the namespaces. This is particularly important in cases like Silverlight. To call attention to the differences in the projects, I declared the 2.0 project in C# and the LINQ enabled project in VB.
The 2.0 class library project consists of a single class file. This represents the Subject entity from the Linq In Action database.
Notice here, there are no interfaces, base classes or custom attributes. Excluding the attributes is critical here because the standard <Table> and <Column> attributes reside in the System.Data.Linq.Mapping namespace which would not be supported in the 2.0 framework.
Admittedly, it consists of three auto-implemented properties. Auto-implemented properties are used for brevity here and are consumable by the .Net 2.0 Framework because it relies on compiler features rather than runtime features.
Because we can't allow the class structure to include the attributes, we can't use the LINQ to SQL designer classes or SQL Metal to generate our classes. We do need to have a way to indicate the mapping to our data store. Here is where the XML Mapping file comes in handy.
When instantiating the DataContext, we can either rely on the inline attributes, or an external mapping file. Luckily, the XML mapping file's structure is concise and very similar to the attributes that would have been applied to the class otherwise. The main difference we need to do is indicate the Type that is used for a given table since we are not directly annotating the class itself. The other difference you may notice is that I don't include the Storage attribute. While there is nothing to stop me from using that in a Mapping source, we can't identify the backing field when using auto-implemented properties.
Now, with that out of the way, we can get to the LINQ portion of the work. Actually, that is quite easy. In our 3.5 enabled project, we will create a XmlMappingSource, pass it into the constructor of the DataContext and then fetch the object from this context as we would any other LINQ enabled class.
|
OPCFW_CODE
|
Code contracts .net - alternatives
Recently, I started to use Code contracts for .net. The idea of code contracts itself is great in my opinion, but the implementation is very unpleasant.
The main reasons I don't like it are:
I can use only methods like Contract.Require() inside my procedures. ContractAbbreviators have so many restrictions (like, I cannot put them in a separate assembly and I cannot use parameters) that it makes them less usable. There are no attributes, and no extension methods, so my code becomes very verbose. For example, if I just want to check that my return value of type Dictionary<string, string>is not null, I need to add a monster such as Contract.Ensure(Contract.Result<Dictionary<string, string>> != null). It's not even readable.
The static analyzer makes so many false alarms that I spend more time shutting it up than fixing actual problems.
It is extremely slow. Even though it has some cache, it takes a few minutes to analyze even a small project, and that makes it useless for incremental fixing. It just takes too long.
There are bugs in ccrewriter—it cannot chew half of assemblies, and it cannot survive .net 4.0 assemblies in the .net 4.5 runtime.
There is runtime/static checker dualism. When I had just started I thought it was as simple as Debug.Assert—you just add them everywhere you feel you need. But it turns out that I need to prove everything to the static checker, which is advanced, for sure, but the checker is stupid sometimes and can't resolve many obvious code constructions (like while). And there's no instrument to just say to the analyzer "I know what I'm doing, just ignore this contract violation".
You cannot relate conditions. For example, there is no way to explain to the static analyzer that if I check string.NotNullOrEmpty this includes string != null. Or if I have my own big procedure checking a file path, I cannot explain to the analyzer that it's for sure not null and not empty. The ContractAbbreviator attributes help a little bit, but all this verbosity goes there and it still looks dirty and stupid.
Code contracts is being developed very slowly, and even though it is open source now, as far as I know the code base is in a bad state.
Is there any advanced alternative to Code Contracts which has fewer flaws?
I don't know of any alternatives, but I perhaps I can address a few of your issues. I work in a team that uses contracts in all our code (~1000 modules) and run static analysis on every checkin as well as in VS.
Code ugliness. We have separate interfaces for (almost) everything, along with abstract classes implementing those, joined by ContractClass and ContractClassFor attributes. The contracts are in the abstract ContractClassFor-classes which makes the actual implementation code almost free from code contracts.
Static analyzer slowness is often due to not enough contracts, which forces the analyzer to do more work to find out if contracts can be broken.
Static checker false Alarms. I have had a few, but not to an extent where it has become a problem. Again, if you have too few contracts, the static checker might not be able to complete the analysis in time.
Static analyzer slowness can be debugged with these MSBuild options, which will show which methods take the most time to analyze.
msbuild myproject.sln /p:CodeContractsExtraAnalysisOptions="-show progress -stats=!! -stats slowMethods"
If you're certain that some condition always holds, then you can use Contract.Assume(condition) to instruct the static checker to assume that this is the case. Eg.
Contract.Assume(mystring != null && mystring != "") or maybe just Contract.Assume(!string.IsNullOrEmpty(mystring))
Regarding the use of Debug.Assert, I think it's a huge advantage to have the static checking instead of just making my application crash at the customers site. This way, I can remove the risk of having the application crash before I release the product. Maybe I misunderstand you, but I really don't think your comparison makes sense.
|
STACK_EXCHANGE
|
busted test stdlib extension is not working
With luacheck 1.0.0 I get
luacheck --std min --config tests/lua/.luacheckrc --codes tests/lua/command_log_spec.lua
Checking tests/lua/command_log_spec.lua 31 warnings
tests/lua/command_log_spec.lua:3:1: (W113) accessing undefined variable describe
tests/lua/command_log_spec.lua:4:2: (W113) accessing undefined variable it
tests/lua/command_log_spec.lua:5:3: (W143) accessing undefined field has_no.error of global assert
tests/lua/command_log_spec.lua:6:3: (W143) accessing undefined field has_no.error of global assert
tests/lua/command_log_spec.lua:8:2: (W113) accessing undefined variable it
tests/lua/command_log_spec.lua:9:3: (W143) accessing undefined field has.error of global assert
tests/lua/command_log_spec.lua:10:3: (W143) accessing undefined field has.error of global assert
tests/lua/command_log_spec.lua:12:2: (W113) accessing undefined variable it
tests/lua/command_log_spec.lua:13:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:14:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:15:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:16:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:18:2: (W113) accessing undefined variable it
tests/lua/command_log_spec.lua:19:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:20:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:22:2: (W113) accessing undefined variable it
tests/lua/command_log_spec.lua:24:13: (W113) accessing undefined variable spy
tests/lua/command_log_spec.lua:25:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:26:3: (W143) accessing undefined field spy of global assert
tests/lua/command_log_spec.lua:26:33: (W113) accessing undefined variable match
tests/lua/command_log_spec.lua:28:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:29:3: (W143) accessing undefined field spy of global assert
tests/lua/command_log_spec.lua:29:33: (W113) accessing undefined variable match
tests/lua/command_log_spec.lua:31:3: (W143) accessing undefined field has.error of global assert
tests/lua/command_log_spec.lua:32:3: (W143) accessing undefined field spy of global assert
tests/lua/command_log_spec.lua:32:33: (W113) accessing undefined variable match
tests/lua/command_log_spec.lua:34:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:35:3: (W143) accessing undefined field spy of global assert
tests/lua/command_log_spec.lua:36:3: (W143) accessing undefined field equal of global assert
tests/lua/command_log_spec.lua:37:3: (W143) accessing undefined field spy of global assert
tests/lua/command_log_spec.lua:37:33: (W113) accessing undefined variable match
This is in conflict with what the documentation says:
files["/tests//*_spec.lua"].std = "+busted"
That is test files should recognize the busted test environment globals.
Did you try this without the --std min override+
If you explicitly specify a standard to use, it doesn't seem right that luacheck would keep around any special overrides. If you want to use the defaults then use the defaults, or put what you want in the config, but I would fully expect the CLI args to override whatever is in the config (whither my project's or the defaults).
Does it do the same thing clearing the defaults if you change the base std in a .luacheckrc file?
No, setting the standard in .luacheckrc works fine.
That however means that that the std option behaves differently between commandline and config file.
I for one would expect it to. I would expect the config file to integrate a bit more with defaults and the CLI options to clobber everything. I also wouldn't expect to have to specify all the possible options to get them all cleared on the CLI if I want to use a different std that the default.
I could see making a case for the CLI just changing the same value as the config, but where would the option be to not have any file matching rules playing around with anything?
That's because you are aware of implementation details of the tool and know it's difficult or even impossible to override the default path rules from the commandline, and that gives rise to a need to have an override.
The problem here is that the override is implicit and undocumented rather than explicit.
I cannot find any reference to this change of behavior of the std option between configuration file and commandline, neither in description of the std option nor in the description of the default path overrides.
Ideally there should be a separate option for disabling the default path overrides, that's clear, transparent, and no less powerful.
Nonetheless in the situation that the existing behavior is quirky and changing it could break stuff it would be nice to at least clearly document it.
BTW can users use this +something syntax for their own path overrides?
I con't find documented that anywhere either.
Nevermind, the there is a notice about leading + in https://luacheck.readthedocs.io/en/stable/cli.html#sets-of-standard-globals
|
GITHUB_ARCHIVE
|
“Far too many candidates are indistinguishable in that they don’t have original ideas at all; instead, they regurgitate what they’ve heard at conferences or in group meetings to be the next obvious steps.” I agree with this. But to be honest, I think the reason many people just regurgitate what they’ve heard at conferences and group meetings is because it’s really REALLY hard to come up with new, creative, and feasible ideas. So what about those of us–like me–who often find it extremely difficult, if not impossible, to come up with original ideas? Does that mean we’re just not meant to be in science?”
I can’t say whether someone has a future in science or not without knowing them well.
But I know that sometimes people have ideas, they just don’t know that those are viable ideas or they overestimate how much novelty is needed for something to be considered a viable idea.
Often the best ideas come from picking on a scab that was formed over a question that hadn’t been successfully settled in a research area. If you are honest about what you do or don’t understand, you will find that there are things that everyone glosses over but they are far from understood. If you pull on that thread of the unknown, you can uncover a whole set of interesting questions.
Are you curious in general? Are you a person who enjoys talking about science with colleagues, listening to talks, meeting visitors? In general, ideas can form almost subconsciously if you provide your brain with enough varied input to chew on.
Follow your interests. If you are really, truly into something and are in the position to pursue it, then do so. Follow your gut.
Follow your gut, but also learn from those with more experience. Learn how to estimate how long things will take, whether they are a good use of your time, how much manpower is needed, how to predict potential pitfalls and what to do when they happen.
A few years ago, one of my then senior graduate students read a proposal and said something like, “Oh, this looks totally doable. I thought you would have to propose something much more out there.” We started talking about what a proposal is, that it’s not a pie in the sky, but work that builds on what is known in a logical, well-justified way. That it’s not just the crazy idea, but that you need to convince people that spending money on you is a good idea.
So if you wonder whether you have sufficient ability to generate ideas, ask yourself:
When you work as a grad student or postdoc, are you systematic about your work? Do you try to clarify every detail in your understanding? When your advisor asks you if you checked something, do you often find that you already thought of the issue on your own and checked it? These are good signs.
Do you enjoy listening to your lab mates present their work? How much do you know about your lab mates’ work? Would you be able to give their talk at a conference? (That’s an excellent exercise that I’ve been meaning to implement as training in my group — have students present each other’s work.) Building a broad base to your expertise is good.
When you go to a conference and listen to talks, do you feel like you have questions? Do you notice things that are unclear, or missing, or suspicious, or perhaps unusually insightful? Do conferences make your mind catch fire in the best sense? I often find that I have a lot of ideas after a good meeting.
Can you identify a good project the size of a single manuscript in a society journal? How about in a prestigious journal? Now can you identify a project the size of a typical grant in your field? A project is like a novel, where each paper is like a chapter. The project has to have an overarching idea, a set of coherently sewn together smaller questions, each answered within a small number of papers.
The unit of scientific communication is a research paper. Understanding what makes a paper, how much is enough for a paper, how to weave a paper-worthy scientific story — these are all key ingredients in starting to believe in your ability to generate ideas.
And, of course, some people are happiest when doing technical work within the context of a large project outlined by someone else. Such people are usually very detail oriented. Others wouldn’t want to have anyone else tell them what is to be done, may prefer big-idea thinking (and selling those ideas) to in-the-trenches technical work; these folks, if they are creative and good at marketing, they can go far; otherwise, it can be tough.
Blogosphere, what do you say?
|
OPCFW_CODE
|
M: Casetext Survey Finds 'Shocking' Level of Missing Relevant Cases in US Courts - raleighm
https://www.artificiallawyer.com/2018/06/08/casetext-survey-finds-shocking-level-of-missing-relevant-cases-in-us-courts/
R: pseingatl
Interesting, but there are a few things you need to know:
1) in federal court there is no right to a hearing on a particular motion.
Because federal courts are, stastically at least, criminal drug courts and
criminal cases are heard first, civil cases go to the back of the line. But
there is no guarantee at all of a hearing on a criminal motion, either.
2) "Relevancy" is in the eye of the beholder. A lawyer may leave a case out
because he has a different view of the facts.
3) "Precedent" means that portions of cases get repeated in other cases. In
the old days, say, pre-1975, it was easy to miss cases because case law,
though computerized, was not widely available in electronic format.
4) Time and money are issues. Many lawyers are sole practitioners. Federal
judges have full-time clerks and free access to electronic caselaw. That is
certainly not the case for all lawyers. I personally know of situations where
a judge has ordered at 7:00 pm that a matter be briefed for 8:00 am the next
day. With these kind of time constraints, it's not surprising that some
caselaw is missed.
5) None of this is "shocking." On the other hand, if you work in a restaurant,
you know what goes on in the kitchen.
|
HACKER_NEWS
|
ESXi Embedded Host Client | VMware Flings
Minor bug fixes and cosmetic changes Version 1. This avoids re-generating a MAC address when changing portgroups. Default disk provisioning to Thick when adding new disks, this brings host client consistent with vCenter Server. Move VM name to beginning of page title in full console vjware and tabs. Slight adjustment to resolution change size when fitting VM remote console to window to avoid vspheer resolution change event on initial load.
Override firefox input:disabled background color to make text in disabled inputs more readable.Download VMware vSphere Hypervisor for Free
Let hostd determine the filename of newly created disks. Don't restrict the number of disks a VM can have. Prevent regex replace of undefined ova property description. Add support for adding group permissions. Address an issue where new VM disks are doqnload stored on the correct datastore.Jan 18, · vSphere – Download Free ESXi License Keys views / Posted Last updated Jan 1, at AM | Published on Oct 9, vSphere -Difference between vSphere , , and vSphere VMware Software Manager makes it easy to find, select, and download the content needed to install or upgrade a VMware product or suite with the push of a button. Download Now Customers who have purchased VMware vSphere can download their relevant installation package from the product download tab below. Admittedly, the post’s title can be a little bit misleading since the converter has other uses apart from converting Hyper-V VMs to run on VMware products such as vSphere. It can also be used to convert VMs and physical machines (P2V) running either Windows and Linux to VMware .
Assign permissions to a user on one or more specific VMs, allowing them to log xlient an interact with only those VMs. It is no longer necessary to assign host-level read-only permissions to allow limited users to log in. Address some issues with the PCI devices table showing data in decimal rather than hexadecimal.
VMware vSphere Hypervisor – Install & Configure
Latest localization translations included. Fix issue where host client settings were not saved server-side with ESXi 5. Update to Angular 1. Add warning to About dialog if running version frde from installed version i. Fix a couple of issues in the startup script that might cause an error when updating the VIB on a live system. Improvements to Autostart configuration - ability to configure per-VM settings, make the operations more clear.
Fix permissions dialog issue in Safari. Improved validation on scheduling affinity field in VM editor. Optimizations to Navigator when loading xlient counts.Nov 03, · With vCenter Server Update 2b and later, VMware has provided two new links in the VMware vSphere Web Client called Open with VMRC and Download VMRC.I n vCenter Server the links are called Launch Remote Console and Download Remote karenchristine.co an HTML in-browser basic virtual machine console, you can continue to use the Launch Console link in vCenter Server . Jun 22, · Download and Installation steps for the vSphere Web Client Mount the vCenter Server installation ISO file. To download vCenter Server , see VMware Downloads. The installation wizard should open automatically. In the left pane, under Custom Install, click vSphere Web Client and then click Install. Jan 01, · vSphere released and we are evaluating the features of our brand new hypervisor with our evaluation license. Evaluation period is 60 days but do you think 60 days is not enough to keep your hands dirty with ESXi It will be time consuming to re-install ESXi .
Allow selection of USB 3. Address navigator object name display issue in Firefox on Linux. Better error reporting when uploading an OVA fails due to a svphere upload failure. Fix datastore rename notification. Fix an issue when removing USB devices from a virtual machine.
vSphere - Download Free ESXi License Keys
Add ability to show virtual machine performance metrics on host monitoring charts. Catch accidental double clicks in context menus. Add support for virtual disk sharing configuration. Fix issue where VM list does not update when filters are applied and VM states change. Fix an issue where double clicking on a VM console in Firefox would create drag and vmwars handles on the canvas.
Add checkbox to disable automatic VM power on after deployment, allowing hardware configuration. Display discovered subnets on the physical NIC summary page if available. Update localization catalogs. Fix a couple of bugs in init script leading to incorrect behavior when saving Host Client settings. Add a Host Summary warning when the host's license has expired. Refresh the datastore metrics when the host summary loads. Change Network chart units to bits per second. Show an error when trying to upload a file larger than 4GB using Internet Cljent.
Ignore missing datastores when editing a VM, e. Disable application session timeout while support bundle is generating. Fix a crash on refresh in the Physical NIC summary page. Better hardware sensor data, including translations of discrete values. Include sensor last updated time. Sort the sensor list by health so red sensors are at the top of the table.
Reserved Space for Virtualization
Add a VM's datastores to the VM list columns. Add quickfilters to the VM list and the Advanced settings list. Fix width of checkbox column in VM list. Support for inflating thin vmwaare disks to thick provisioned. Access is from within the datastore browser by right-clicking on a disk file. Support NFS 4. Fix a rare crash in Firefox related to showing the VM screen shot. Fix issue when clicking Refresh on the datastore list which would result in stale data being presented.
Dlient an issue validating some paths that contain unicode characters. Fix truncation of vswitch name in portgroup list when vswitch name contains a dash. This download center features technical documentation, installation demos and classes to make your use of vSphere Hypervisor a success. Looking for ESXi 4? Download it here. Learn basic tips and tricks for troubleshooting various components of VMware vSphere Hypervisor.
VMware vSphere Hypervisor enables single-server partitioning and forms the foundation for a virtualized datacenter. By upgrading to more advanced editions of VMware vSphereyou can build upon this base virtualization layer to obtain centralized management, continuous application availability, and maximum operational efficiency.
VMware vSphere is the most widely deployed enterprise virtualization suite that offers customers:.
vSphere – Download Free ESXi License Keys
Please login or create an account to access VMware vSphere Hypervisor license and downloads. View the top articles related to troubleshooting and support for mvware product. Add keywords to narrow your search.
|
OPCFW_CODE
|
enable cpu adam op on powerpc architectures
I am trying to build DeepSpeed on a PowerPC architecture (Power9). I run into two issues.
First, the -march option is not supported in gcc on this platform, though it seems like -mcpu could be a substitute for it. To support that, one change replaces that C++ flag.
Second, the cpuid.h and x86intrin.h headers do not exist on this system. To work around that, I've protected including those behind an x86 compile guard.
This allows the CPU_ADAM op to build, however, I don't have a good way to verify that the resulting build is actually valid.
The asynchronous I/O op similarly has an -march=native flag in op_builder/async_io.py and includes for cpuid.h and x86intin.h in csrc/aio/py_lib/deepspeed_py_copy.h.
@adammoody, thanks so much for porting DeepSpeed to PowerPC, this is very important and greatly appreciated. In terms of validating the builds for both cpu_adam and aio, is it possible to run the unit tests? To get started you may want to initially focus on the specific unit tests for cpu_adam and aio.
@adammoody, this PR is failing CI now because of code formatting issues. Please see this. Thanks!
@adammoody, thanks so much for porting DeepSpeed to PowerPC, this is very important and greatly appreciated.
Thanks. I'm happy to be help where I can.
In terms of validating the builds for both cpu_adam and aio, is it possible to run the unit tests? To get started you may want to initially focus on the specific unit tests for cpu_adam and aio.
Yes, I'll try to give this a shot.
Has this by chance changed something in the nvme extension build process, as our nvme test started failing to jit build after 0.4.3 release: https://github.com/huggingface/transformers/issues/12715
update: false alarm, the problem was solved by rm -rf ~/.cache/torch_extensions/ as suggested by
@tjruwase
@tjruwase , is this the proper way to run the cpu_adam test?
>>: pytest tests/unit/test_cpu_adam.py
============================================================ test session starts =============================================================
platform linux -- Python 3.7.10, pytest-6.2.4, py-1.10.0, pluggy-0.13.1
rootdir: /path/to/DeepSpeed
collected 6 items
tests/unit/test_cpu_adam.py ...... [100%]
============================================================= 6 passed in 11.89s =============================================================
Config: alpha=0.001000, betas=(0.900000, 0.999000), weight_decay=0.000000, adam_w=1
Yes, it is. And it looks like a 6 tests passed.
Also, you can run verbose mode by adding -sv flags: pytest -sv tests/unit/test_cpu_adam.py.
For the aio test, I get:
============================================================ test session starts =============================================================
platform linux -- Python 3.7.10, pytest-6.2.4, py-1.10.0, pluggy-0.13.1
rootdir: /path/to/DeepSpeed
collected 28 items
tests/unit/test_aio.py ............................ [100%]
======================================================= 28 passed in 129.68s (0:02:09) =======================================================
I'm guessing both of these might be slow compared to x86, since on PowerPC, both fall back to use a non-SIMD implementation. Can you judge performance from this?
To get aio to actually build on my Redhat system, I also had to make this change:
diff --git a/op_builder/async_io.py b/op_builder/async_io.py
index 78aa2fe..9c6dde6 100644
--- a/op_builder/async_io.py
+++ b/op_builder/async_io.py
@@ -50,7 +50,7 @@ class AsyncIOBuilder(OpBuilder):
return ['-laio']
def is_compatible(self):
- aio_libraries = ['libaio-dev']
+ aio_libraries = ['libaio-dev', 'libaio-devel']
aio_compatible = self.libraries_installed(aio_libraries)
if not aio_compatible:
self.warning(
diff --git a/op_builder/builder.py b/op_builder/builder.py
index 3eeb4e4..bd6bfa8 100644
--- a/op_builder/builder.py
+++ b/op_builder/builder.py
@@ -151,9 +151,8 @@ class OpBuilder(ABC):
def libraries_installed(self, libraries):
valid = False
- check_cmd = 'dpkg -l'
for lib in libraries:
- result = subprocess.Popen(f'dpkg -l {lib}',
+ result = subprocess.Popen(f'rpm -q -l {lib}',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
That's related to the issue @stas00 opened here: https://github.com/microsoft/DeepSpeed/issues/1126854101593
I wrote most of the code to handle 3 flavors of linux here:
https://github.com/microsoft/DeepSpeed/issues/1126#issuecomment-854101593
@adammoody, if you'd like to integrate and PR it that would be super helpful!
Solution #2 sounds good to me, and such a PR would be greatly appreciated. Thanks @stas00 and @adammoody.
I'm guessing both of these might be slow compared to x86, since on PowerPC, both fall back to use a non-SIMD implementation. Can you judge performance from this?
SIMD perf is not important to aio as it is an NVMe library.
Sure, I'll work on solution #2 to get started.
However, I suspect my conda setup doesn't quite fit properly with those three package managers, since my build isn't actually using the libaio from the rpm. Solution #4 to test a compile/link might work. Maybe something with compiler.has_function like described on this page would work?
https://www.cac.cornell.edu/wiki/index.php?title=Python_Distutils_Tips#How_to_find_if_a_library_exists
compiler=distutils.ccompiler.new_compiler()
if compiler.has_function('timer_create',libraries=('rt',)):
user_macros.append(('HAVE_POSIX_TIMER','1'))
I can investigate that, as well. In my build of DeepSpeed, I'm externally setting CC, CXX, and CFLAGS to point to conda-specific items (like the include path to libaio.h) and that gets picked up in the build.
I think there should be an easy way to do this for a typical user, which is what approach 2 was trying to do and then allow anybody to do it in their own way by overriding the default checking.
The problem with solution 4 is that we can't tell the user what to do if it fails, so we still need to know what system they are on and what library to tell them to install - as you can see they are differently named on different flavors of linux.
Chances are very low that users will already have this library installed.
Also may I ask why the rpm library doesn't work for you? No sudo access to install it?
Thanks @stas00 , I'll stick with the plan to implement solution 2.
In my case, I don't have sudo access on this particular system. The system admins have installed the libaio-devel rpm on the host system.
Having said that, in this case, I'm trying to avoid using the existing system install. That's because I'm using a base install of IBM's OpenCE for PyTorch and other software, which uses conda. Under this conda environment, it's best practice to only build against software installed within the same conda environment rather than using "external" system-installed packages. I don't think this will be a common problem for others, though other OpenCE users might be interested.
Thank you for explaining your particular needs.
This is great, so as suggested earlier the solution should include the generic automatic package-manager-based approach which should cater to the majority, and the manual solution for when either the automatic version doesn't cover the yet to be supported platform and when one doesn't want to use the former and wants to provide their own (your case). So there needs to be a way to tell the builder - I'm taking over, here is all the details you need.
Hi,
I wanted to add that a minor issue still persists on PowerPC architectures, cause by the following:
https://github.com/microsoft/DeepSpeed/blob/98cc35b6a8e53e15829ce64fd8da835db0c61da9/deepspeed/ops/adam/cpu_adam.py#L78
Which results in KeyError: 'vendor_id_raw'.
I fixed it by simply replacing it with self.cpu_vensor="PowerPC". I guess the string doesn't really matter.
Should I create a new pull request?
Should I create a new pull request?
This is an excellent idea, @FarzanT
|
GITHUB_ARCHIVE
|
Permission issue preveting posting on sandbox dockers
when creating containers from Sandbox artifacts, a permission error prevents certain transactions from being posted.
You need to give me a little more than this.
There are 1000s of containers started every day on sandbox artifacts, where this doesn't happen, so you need to at least provide script and output if you want any assistance.
Hello Freddy
Thank you for your response and sorry for the delay in mine.
I have the feeling that the phenomenon occurs with all sandbox containers.
Scritp:
$credential = New-Object pscredential 'admin', (ConvertTo-SecureString -String @.' -AsPlainText -Force)
New-BcContainer -accept_eula -containerName BC250W1T -artifactUrl https://bcartifacts-exdbf9fwegejdqak.b02.azurefd.net/sandbox/25.0.23364.25359/w1 -assignPremiumPlan -auth NavUserPassword -Credential $credential -isolation hyperv -shortcuts None -updateHosts
last artifact BC is used.
@.
Thanks again,
Pedro
De : Freddy Kristiansen @.>
Envoyé : jeudi 10 octobre 2024 07:49
À : microsoft/navcontainerhelper @.>
Cc : PERERA AGUILA Pedro @.>; Author @.>
Objet : Re: [microsoft/navcontainerhelper] Permission issue preveting posting on sandbox dockers (Issue #3680)
Closed #3680https://github.com/microsoft/navcontainerhelper/issues/3680 as completed.
Reply to this email directly, view it on GitHubhttps://github.com/microsoft/navcontainerhelper/issues/3680#event-14584170172, or unsubscribehttps://github.com/notifications/unsubscribe-auth/BFZ7PEREYWQAC7DD7GHTDGDZ2YIMDAVCNFSM6AAAAABO2D25QGVHI2DSMVQWIX3LMV45UABCJFZXG5LFIV3GK3TUJZXXI2LGNFRWC5DJN5XDWMJUGU4DIMJXGAYTOMQ.
You are receiving this because you authored the thread.Message ID<EMAIL_ADDRESS>
Yeah, well the error message leaves a lot for interpretation/guessing - which wastes a lot of time for people looking at this.
If you could describe what you are doing to provoke this - whether this happens in 24 versions as well and a lot more information, which makes it possible for us to run the script and do the same things - or maybe even say that this is expected with the license file used.
Thanks a lot,
important to specify that this problem does not exist on on-prem artifacts
De : Freddy Kristiansen @.>
Envoyé : jeudi 10 octobre 2024 10:27
À : microsoft/navcontainerhelper @.>
Cc : PERERA AGUILA Pedro @.>; Author @.>
Objet : Re: [microsoft/navcontainerhelper] Permission issue preveting posting on sandbox dockers (Issue #3680)
Yeah, well the error message leaves a lot for interpretation/guessing - which wastes a lot of time for people looking at this.
If you could describe what you are doing to provoke this - whether this happens in 24 versions as well and a lot more information, which makes it possible for us to run the script and do the same things - or maybe even say that this is expected with the license file used.
Reply to this email directly, view it on GitHubhttps://github.com/microsoft/navcontainerhelper/issues/3680#issuecomment-2404436234, or unsubscribehttps://github.com/notifications/unsubscribe-auth/BFZ7PERRDUDINOKYSV5DURTZ2Y23XAVCNFSM6AAAAABO2D25QGVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMZDIMBUGQZTMMRTGQ.
You are receiving this because you authored the thread.Message ID<EMAIL_ADDRESS>
with BC24 there is the same problem.
We try to post a simple General Journal.
The license file used is that of demo, cronus by default.
Thanks Freddy for your answers.. (we get around the problem by using on-prem artifacts)
De : Freddy Kristiansen @.>
Envoyé : jeudi 10 octobre 2024 10:27
À : microsoft/navcontainerhelper @.>
Cc : PERERA AGUILA Pedro @.>; Author @.>
Objet : Re: [microsoft/navcontainerhelper] Permission issue preveting posting on sandbox dockers (Issue #3680)
Yeah, well the error message leaves a lot for interpretation/guessing - which wastes a lot of time for people looking at this.
If you could describe what you are doing to provoke this - whether this happens in 24 versions as well and a lot more information, which makes it possible for us to run the script and do the same things - or maybe even say that this is expected with the license file used.
Reply to this email directly, view it on GitHubhttps://github.com/microsoft/navcontainerhelper/issues/3680#issuecomment-2404436234, or unsubscribehttps://github.com/notifications/unsubscribe-auth/BFZ7PERRDUDINOKYSV5DURTZ2Y23XAVCNFSM6AAAAABO2D25QGVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMZDIMBUGQZTMMRTGQ.
You are receiving this because you authored the thread.Message ID<EMAIL_ADDRESS>
|
GITHUB_ARCHIVE
|
Deevyfiction Dragon King’s Son-In-Law webnovel – Chapter 661 – Catching A Dragon? tub bruise propose-p1
Mr. Punch at the Play
Novel–Dragon King’s Son-In-Law–Dragon King’s Son-In-Law
Empty Promises: The CEO Cunning Bride
Chapter 661 – Catching A Dragon? ladybug spoon
With a chuckle, Hao Zhonghua walked even closer to Yue Yang and started off to buy issues from your h2o to the basin. Every time they have been poor, their ancient house was constantly filled in hard storms, plus they obtained to have the standard water out basin by basin.
“Ignore her! Only lucky persons could see dragons!” Grandma explained angrily.
Xie Yujia curved decrease and experimented with her best to dried up a floor by using a cloth. It was actually hefty effort, but she just washed her perspiration along with her sleeves with out a word of issue.
At this thought, Hao Zhonghua checked dejected and claimed, “Maybe it is merely a fantasy. Neglect it all of you try taking some sleep.”
Your house was filled in standard water.
Hao Zhonghua started the top and also the rear entrances to permit out some water, and Zhao Yanzi and also the Lu sisters had been very busy scooping within the liquid with little basins before pa.s.sing those to Hao Ren who dumped the water away from the home.
With enjoy, one would sacrifice the immortal life. It turned out wonderful that Hao Ren developed more powerful, but she would shell out a normal living with him even though he were with no farming strength
lady in waiting synonym
Ranking behind Hao Ren, Duan Yao curled her mouth area. She understood Hao Ren’s ident.i.ty, but it really was not needed to show him.
Hum! A thought took place to Hao Zhonghua all of a sudden. “Dragon degree! It might be a dragon degree!”
With appreciate, one would throw in the towel the immortal lifestyle. It was actually good that Hao Ren became much stronger, but she would commit a typical everyday life with him even though he were actually with no farming strength
Observing Yue Yang emerging to support and her angry facial area, Hao Zhonghua experienced responsible for fighting together so intensely. He went over which has a smile and draped a coat around her shoulder area.
Xie Yujia was jealous within the love between Hao Zhonghua and Yue Yang once they looked over the other.
“Zhonghua, it’s your fortune to see true dragons! Cheer up!” Grandma patted Hao Zhonghua for the back again.
After the sea instantly calmed down, Grandmother got hurried into Hao Ren’s space to confirm high on him. When she observed the damaged window along with the bare space, she was nervous.
“Yue Yang! Yue Yang! I’d like to see your expression after i convince you that dragons do exist!” he imagined.
“So good…” Experiencing the girls working together with Hao Ren on the living room area, Hao Zhonghua was slightly touched.
Yue Yang didn’t practice the topic at his casual words and phrases, and she carried on to clean up other small products.
“Gongzi, return and alter your garments,” the Lu sisters claimed inside of a minimal speech while they stood by Hao Ren.
“Where’s Congming?” Grandma inquired.
The deluge possessed pushed wide open the cupboard, and plenty of products obtained decreased into the drinking water, but she didn’t recall they had this item in the home. Hence, she inquired Hao Zhonghua the best places to put it.
Regardless that she was spoiled and vulnerable, Zhao Yanzi proved helpful tricky, and she would become a considerate wife.
Listening to Grandma’s terms, Hao Ren sensed overwhelmed, thinking why his father and grandma could see the dragons whilst his mom couldn’t. “Is it genuine that merely the fortunate versions can easily see dragons as Granny obtained stated?” he considered.
3rd army commanders
Once the seas all of a sudden calmed lower, Grandma experienced hurried into Hao Ren’s area to confirm up on him. When she observed the destroyed windows and the vacant area, she was anxious.
As an alternative to staying frightened, she was as enthusiastic as a youngster. Comparatively, she didn’t treatment a tad concerning the flooded house and spoiled home appliances.
Xie Yujia froze for just a moment and trudged through the standard water into Zhen Congming’s room, discovering that the latter was audio asleep in bed!
If Grandmother were actually ideal, and just the fortunate kinds could see dragons, then his research endeavor would appear like a scam…
He was obviously a little frustrated. He experienced obviously noticed several dragons struggling with within the skies, but Yue Yang stated she didn’t see anything at all but the heavy clouds above the beach as well as enormous water waves. It had been fortunate the shaking didn’t result in a tsunami in her view.
Abruptly, Hao Zhonghua grasped Grandma’s sentiments.
Bucholz and the Detectives
“We did actually have seen dragons, however not very evidently,” the Lu sisters reported when they noticed Hao Zhonghua’s frustrated face.
Translator: Noodletown Translated Editor: Noodletown Translated
Yue Yang heard their talk and walked straight down, picking up a big basin.
The ice cold house was now active.
Soon after fishing for 30 minutes, Very little Whitened shook the water off its fur and trotted into Hao Ren’s bedroom with these.
“Here! On this page!” Lu Linlin and Lu Lili waved and clarified.
“Here! On this page!” Lu Linlin and Lu Lili waved and answered.
Although the liquid ended up being exhausted, the ability was still out, and they would have to question the maintenance employees to correct it. Regarding the stuff which had been soaked in normal water, they might be aired out the future.
Mr. Punch at the Play
“Ruff… ruff…” Using the swamped family area for your pool area, Tiny Bright swam cheerfully about the lounger as well as the your kitchen.
Instead of remaining frightened, she was as ecstatic as a kid. Somewhat, she didn’t treatment a tad concerning the bombarded residence and spoiled gadgets.
|
OPCFW_CODE
|
using custom ALC for module written in PowerShell (not c#)
Thank you for this repo, lots of information in there!
I'm trying to understand if there's a way to adapt those approaches in modules written in PowerShell.
Let me describe my scenario, what I understand, and what I think I don't know...
Scenario
I would like to fork & rename the Powershell-Yaml module to experiment with some things, while I still use the Powershell-Yaml module (i.e. for building that module).
I'd like my fork to be able to use a newer version of the YamlDotNet assembly
I don't want to wrap YamlDotNet in another assembly
I think I need the assembly resolution to work at module parse time
What I think I understand
As the YamlDotNet library is compatible with netstandard 2.0, I can use the same dll for PS 5.1 AND PS 7+.
The dependency of YamlDotNet is at the 'class-level' (well, parse time for my new PS module) so it's closer to scenario 2 described in your repo here.
As per your sample:
the registration of AssemblyResolve needs to happen in a separate assembly (resolver.dll in this sample), which needs to be loaded before the above assembly"
What I'd like clarification/guidance on
Could the resolver DLL be loaded before parse time using RequiredAssemblies or ScriptToProcess in the ModuleManifest?
If I list the assemblies in the module manifest, like so, should it work?
RequiredAssemblies = @('resolver.dll','conflict.dll')
Will the loading order be respected (so that registration of AssemblyResolve happen first)?
Why don't you use this approach in your sample and prefer a nested module? Is it for the OnImport() call?
Are nested module (resolver.dll) loaded before the RootModule psm1 is parsed?
Since the module (let's call it MyModule) isn't a binary module, in IsAssemblyMatching() the requestingAssembly will be null? Does that mean I can't select which library will it resolve to, so anything calling it in my PowerShell session will now go to that newer lib?
Thanks for the clarification!
Unfortunately there is no built in way to do this. The closest you can get is to manually create your own ALC, hold on to instances of the Assembly objects within, and only reference types via Assembly.GetType(string name). I wouldn't really recommend trying that, but it might work.
@daxian-dbw did some pretty extensive research into the possibility of adding support for script based ALC isolation, you can read about the blockers that were discovered in his RFC on the topic. The conclusion was that it is not feasible to support the scenario.
@gaelcolas I'm so sorry that I didn't notice this issue until Steve mentioned it within the team.
@SeeminglyScience is right, you cannot use ALC for a script module, unless some fundamental support is built into powershell, such as in type resolution, assembly loading, and caching/caching invalidation.
I wrote another RFC in 2021 trying to tackle this problem, which has lots of details about what needs to be done and what problems we will be facing. The RFC was closed in the end because of some usability problems and potentially breaking changes in behavior (see the summary).
@daxian-dbw, @SeeminglyScience,
I wonder if instead of a general-purpose load/unload for PowerShell script modules themselves it would be possible to devise a pattern for a dependency wrapper module and make that easier.
For example, instead of being able to define RequiredAssemblies = @('YamlDotNet.dll') we could define RequiredAssemblies = @('MyModuleDeps.dll') which pulls in the dependencies we need, loads them via ALC, and surfaces them (in a new namespace? I'm not savvy enough about C# to have an idea on methodology here) for use in MyModule. We'd still need to define a binary module to go with our script modules, but we could abstract the dependency load/unload problem to that binary module.
One of the biggest drawbacks to the ALC model right now is that the majority of community modules are written purely in PowerShell without any C# and the community of practice doesn't have particularly strong experience with the toolchain for testing and publishing C# code.
Moving first-party module dependencies into the ALC (in my case, YamlDotNet for PlatyPS and MarkDig for the markdown renderer) goes a long way towards helping avoid conflicts for community modules, but doesn't fully resolve the problem. That leaves people with the choice of migrating to a fully binary module (much higher friction for typical PowerShell authors), trying to reimplement the functionality they need in PowerShell itself, or giving up on that functionality.
If there was a functional pattern for being able to get at those classes and methods without having to write the entire module in C#, I can think that would make using external libraries much less prone to conflicts, even if still higher friction than merely vendoring the library directly for usage.
As it stands right now, using any external libraries in a script module risks dependency conflicts and there doesn't seem to be a way around that without switching to a binary module all-up.
For example, instead of being able to define RequiredAssemblies = @('YamlDotNet.dll') we could define RequiredAssemblies = @('MyModuleDeps.dll') which pulls in the dependencies we need, loads them via ALC, and surfaces them for use in MyModule.
We already have the sample code for this pattern -- you basically will need a bridge assembly to wrap the dependency and expose the dependent types/APIs in a different way from the bridge assembly. Then the script module then can just depend on the bridge assembly.
However, having a bridge assembly to forward the calls to real dependency types/APIs has limitations and also increase complexity to the module design. It's non-trivial work to powershell users who are not too familiar with C#.
The dependency for script module should ideally be mitigated by PowerShellGet. In an ideal world, script modules would not need to ship dependency assemblies along with the module, but just declare it, and then it's the PowerShellGet's job to sort out the dependency and always use the latest version of the dependency assembly.
We already have the sample code for this pattern -- you basically will need a bridge assembly to wrap the dependency and expose the dependent types/APIs in a different way from the bridge assembly. Then the script module then can just depend on the bridge assembly.
Dongbo laid it out excellently here but just in case you read this thinking it can be applied to the script portion of a module - you still can't directly reference the dependencies from script using this pattern. You would need to write a bunch of proxy APIs in order to use them in the script portion.
|
GITHUB_ARCHIVE
|
request to add option or mode to save local history metrics to remote write endpoint
Proposal
Use case. Why is this important?
As when situations that there are already a huge amount of historical data on local storage, but as a better persistence, local storage should be migrated remote storage system. cases are we have 3 DCs and each DC has several prometheus instances and all of them are federated together. and all of them have generated huge data here(we have change retention period to a long period to achieve this) before remote write storage become robust.
we already have tens of TB data on the disk, want to export/migrate these data to a third remote storage system,currently our choices are m3db. and the whole export/migration process should contains 2 steps :
read metrics from disk
write them to remote storage system
From the prometheus itself function sets, it already implemented both features, but currently prometheus can only save new data to remote write endpoint.
we have two options here to serve this:
add option to the remote write configure, which can set how long the historical data can be written to remote write endpoint
we can also add a sub-command to prometheus (for e.g. prometheus migrate), this is only works when migrating local historical data to remote write endpoint ; or
“Nice to have” is not a good use case. :)
plus we can also make it possible for prometheus to read all data from a remote read endpoint.
This is reasonable ; could be implemented in the tsdb tool, or in a third party tool. However that would be a lot of efforts.
This does sound a bit niche, and sending them the block would be more efficient. I think a 3rd party tool would be the best approach here.
Setting Pmaybe but it would better fit in an 3rd party tool. Let's think about it a few days.
This would have to be a separate tool since remote write is based on the WAL, it couldn't be done with the current remote write system.
is there any documents that show some details about the WAL, tsdb, and how these metrics are being stored to remote endpoint ?
currently I have a primitive code that can getting metrics from the local storage, but failed to store them to remote endport, remote storage failed to store these metrics to its database.
my process is
get the MinTime and MaxTime from a block
tsdb connection ---->tsdbConn.Blocks() -------> each blockreader ----->blockMinT, blockMaxT
use blockMinT, blockMaxT as parameter, to create a storage.Querier,
sdbConn.Querier(ctx, blockMinT, blockMaxT) ---> storage.Querier
create a labelSelectParams and labelMatcher
pass labelSelectParams and labelMatcher to storageQuerier.Select, will get the timeSeriesSet, then get the label and sample timestamp and value
storageQuerier.Select(labelSelectParams, labelMatcher) ----> timeSeriesSet -----> lables and sample
then construct []prompb.TimeSeries with labels and sample
create prompb.WriteRequest with []prompb.TimeSeries
proto.Marshal and snappy.encode the prompb.WriteRequest
use http client to send the metric to remote endpoint
and I can see all the labels and samples(timpstamp and value), when sending metrics to remote endpoint is successfully with 200 status code, but remote endpoint failed to write the metrics to its storage, each time I only sent 1 sample to the endpoint
I am stuck here at the final step, I tcpdumped the packet from prometheus to remote write endpoint, and the prompb.WriteRequest is same structure, only difference is that prometheus sent muliple samples but mine only one.
here is an exmple of my prompb.WriteRequest
prompb.WriteRequest{Timeseries:[]prompb.TimeSeries{prompb.TimeSeries{Labels:[]prompb.Label{prompb.Label{Name:"__name__", Value:"alertmanager_alerts", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"app", Value:"alertmanager", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"env", Value:"prod", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"instance", Value:"<IP_ADDRESS>:9093", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"job", Value:"alertmanager", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"netdatatype", Value:"inb", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"state", Value:"suppressed", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}, prompb.Label{Name:"type", Value:"service", XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}}, Samples:[]prompb.Sample{prompb.Sample{Value:1, Timestamp:1573594387949, XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}}, XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}}, XXX_NoUnkeyedLiteral:struct {}{}, XXX_unrecognized:[]uint8(nil), XXX_sizecache:0}
I suggest that we close this issue here and move to the developers mailing list https://groups.google.com/forum/#!forum/prometheus-developers where more people can help.
I already create a mail there, but seems none intrested, so I ask help here if someone can give me some guidence.
https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!topic/prometheus-users/hfXB7qHVASU
We have decided in the bug scrub to close this issue.
Remote write is better at tailing WAL. We would expect the remote write imports to work with snapshots/tsdb instead of remote write.
|
GITHUB_ARCHIVE
|
It seems like one of the parts is corrupted. In this case, try to contact the person who uploaded the multipart RAR file and kindly ask for a re-upload. I certainly like my RAR extractor, especially the way it can extract multi part RAR files by just selecting the first part. Do you want to ‘join’ one single file that is inside the multiparts? Then place all files of the set in the same folder and double-click the first file with the extension “. There is no difference.
Is there a way to fix a corrupted RAR file? And maybe there is not enough free memory or disk space. Sometimes this still doesn’t help, because the RAR files on the server might be damaged already. All other parts will be found and extracted automatically! You need to download all parts of such split archives to be able to begin with the unraring process, so go and get all the RAR files with the extensions “. Depending on how many parts have been generated, there may be a few or a few hundred of these “.
Is there a way to fix a multipart RAR file? Do I need to download all of the parts, before I extract?
Shutterstock PDv31 Windowsfullrar
Then double-check that you have all parts of the multipart archive and that they are all in the same folder. A multipart RAR file can additionally be encrypted and protected by a password. All other parts will be found and extracted automatically! What does RAR stand for?
Part 1 Tamil dub lesbian | Free HD Porn Videos
Please check this also. Simply install one of the above freeware programs, put all parts of the multipart RAR archive in the same directory and double-click the first part. It is recommended to put all the RAR parts in the same directory to speed up the unpacking process.
Put all the files from the multipart RAR in it. I stumbled upon some files with the extensions. So go and get all the RAR files with the extensions.
I would like to uncompress some multipart files that were compressed using the RAR algorithm.
Results for : torrent movie tamil
Today, however, I tried to open a RAR file, but it was not unpacked. How can I merge the two RAR parts together? All other files, e. Well, I have a strange problem when it comes to multi rar unpacking.
Then the following message comes up: RAR files belong to a set of RAR files which form a multipart archive, that you can not ‘unrar’ without all of it’s parts. Depending on how many parts have been generated, there may be a few far a few hundred of these files. I want to download a large multipart RAR file and before that, I would like to test the extraction of a split archive “. In this case, try to contact the person who uploaded the multipart RAR moviie and kindly ask for a re-upload and repeat the steps above.
If you have downloaded all files needed, e. In this case, try to contact the person who uploaded the multipart RAR file and kindly ask for a re-upload. Other users reported that other download helpers, e.
ISO when extracting, but it disappears when the extractor is finished, and it asks me to enter info for the next archive. If you have downloaded all 17 “. Sometimes a multipart RAR file can be damaged and corrupted.
All other parts, e. I’m trying to play a part2.
A large RAR file broken into many smaller files. I have used the unrar tool for some time and it has always worked well. Make devadasii, that there is enough free disk space available on your hard drive to extract the RAR files.
I was wondering maybe I need to download all of the files I wanted, all 17, before I start to open them?
Hi there, I have a multipart rar, 3 parts name. Maybe one or more of the RAR parts are corrupted or there is not enough memory or disk space.
|
OPCFW_CODE
|
Composer installs appear to be cached incorrectly
Hooray for introducing my own issues! This leads on from https://github.com/silverstripe-labs/silverstripe-travis-support/pull/35.
When running a travis build using the new --prefer-source option, Composer attempts to download the source version of the package and sometimes fails - falling back to the dist version.
Travis build log:
- Installing silverstripe/framework (3.3.x-dev de61551)
Cloning de6155188552003c294f115588dcdab9a79cef44
Failed to download silverstripe/framework from source: Failed to execute git checkout 'de6155188552003c294f115588dcdab9a79cef44' -- && git reset --hard 'de6155188552003c294f115588dcdab9a79cef44' --
fatal: Not a git repository (or any of the parent directories): .git
Now trying to download from dist
- Installing silverstripe/framework (3.3.x-dev de61551)
Downloading
Extracting archive
I’ve got no idea why that would fail, the commit definitely exists and --verbose doesn’t give any more info than the above. You can see from the rest of the log that some packages work and some don’t (it doesn’t seem to be related to whether they’re using tags or commit hashes).
I’ve copied the generated composer.json and run it locally and it works absolutely fine. That, and the Not a git repository error message, makes me think some caching is going on that’s downloading the dist copy when Composer thinks it’s downloading the source copy.
I don’t know if this is an issue with this repo, Travis, Composer, or me 😆.
It normally happens when rate limited; You can get around it sometimes by setting up a github api key against your travis repo.
Try doing this on https://travis-ci.org/bigfork/silverstripe-oauth-login/settings
And create your token from github. In the top menu to the right, click the tools icon (Account Settings).
Click on “Applications” in the sidebar (to the left). In the box with the title “Personal Access Tokens”, press the “Create new token” button.
Nothing is cached because travis ci instances are thrown away, so no caches persist between builds. You can get around it, but we opt to not do so, as we end up with stale builds more often than not...
Anyway, ping me if you get stuck. :)
I’m still struggling 😢. I added the token, but it’s still loading some repos from dist instead of source. I’ve opened an issue on the Composer repo too https://github.com/composer/composer/issues/5546
Yeah there's still some problem going on here.
Failed to download silverstripe/framework from source: Failed to execute git checkout '96b061ffffd472f917273b43790f5aaa2f9f89b4' -- && git reset --hard '96b061ffffd472f917273b43790f5aaa2f9f89b4' --
Ah, maybe there is an issue with composer... regarding that ticket you raised. Hope it bears some fruit. :)
|
GITHUB_ARCHIVE
|
Data and scripts from: Pathogen spillover driven by rapid changes in bat ecology. Food shortage regression tree model
No Access Until
These files contain compiled input data, output data and model script for the Food Shortage Regression Tree Model, as reported in Eby et al. (2022), Pathogen spillover driven by rapid changes in bat ecology. In Eby et al., we found: During recent decades, pathogens that originated in bats have become an increasing public health concern. A major challenge is to identify how those pathogens spill over into human populations to generate a pandemic threat. Many correlational studies associate spillover with changes in land use or other anthropogenic stressors, although the mechanisms underlying the observed correlations have not been identified. One limitation is the lack of spatially and temporally explicit data on multiple spillovers, and on the connections among spillovers, reservoir host ecology and behavior, and viral dynamics. We present 25 years of data on land-use change, bat behavior, and spillover of Hendra virus from Pteropodid bats to horses in subtropical Australia. These data show that bats are responding to environmental change by persistently adopting behaviors that were previously transient responses to nutritional stress. Interactions between land-use change and climate now lead to persistent bat residency in agricultural areas, where periodic food shortages drive clusters of spillovers. Pulses of winter flowering of trees in remnant forests appeared to prevent spillover. We developed integrative Bayesian network models based on these phenomena that accurately predicted the presence or absence of clusters of spillovers in each of 25 years. Our long-term study identifies the mechanistic connections among habitat loss, climate, and increased spillover risk. It provides a framework for examining causes of bat virus spillover and for developing ecological countermeasures to prevent pandemics.
Journal / Series
Volume & Issue
This research was developed with funding from the National Science Foundation (DEB-1716698), U.S. Defense Advanced Research Projects Agency (DARPA PREEMPT D18AC00031), and U.S. National Institute of Food and Agriculture (1015891). AJP was supported by an Australian Research Council DECRA fellowship (DE190100710).
Emerging Infectious Diseases; Viral Zoonoses; Bat Viruses; Land Use Induced spillover
Number of Workers
Based on Related Item
Has Other Format(s)
Part of Related Item
Eby, Peggy, Alison Peel, Andrew Hoegh, Wyatt Madden, John Giles, Peter Hudson, and Raina Plowright (2022) Pathogen spillover driven by rapid changes in bat ecology. Nature. https://doi.org/10.1038/s41586-022-05506-2
Link(s) to Related Publication(s)
Link(s) to Reference(s)
Previously Published As
CC0 1.0 Universal
|
OPCFW_CODE
|
Increase scan performance in Apache Hbase
I am working on an use case and help me in improving the scan performance.
Customers visiting our website are generated as logs and we will be processing it which is usually done by Apache Pig and inserts the output from pig into hbase table(test) directly using HbaseStorage. This will be done every morning. Data consists of following columns
Customerid | Name | visitedurl | timestamp | location | companyname
I have only one column family (test_family)
As of now I have generated random no for each row and it is inserted as row key for that table. For ex I have following data to be inserted into table
1725|xxx|www.something.com|127987834 | india |zzzz
1726|yyy|www.some.com|128389478 | UK | yyyy
If so I will add 1 as row key for first row and 2 for second one and so on.
Note : Same id will be repeated for different days so I chose random no to be row-key
while querying data from table where I use scan 'test', {FILTER=>"SingleColumnValueFilter('test_family','Customerid',=,'binary:1002')"} it takes more than 2 minutes to return the results.`
Suggest me a way so that I have to bring down this process to 1 to 2 seconds since I am using it in real-time analytics
Thanks
HBase is not designed for this kind of queries. Probably you can use MySQL instead?
As per the query you have mentioned, I am assuming you need records based on Customer ID. If it is correct, then, to improve the performance, you should use Customer ID as Row Key.
However, multiple entries could be there for single Customer ID. So, better design Row key as CustomerID|unique number. This unique number could be the timestamp too. It depends upon your requirements.
To scan the data in this case, you need to use PrefixFilter on row key. This will give you better performance.
Hope this help..
Hi maddy is there any changes I need to do in hbase configurations for better performance
Yes you can do the changes in HBase configurations for better results. But for that, I need to know more about the scenarios. However, a simple performance optimization is to set scanner caching feature. It can improve your performance a lot.
I am implementing a web-service in when we click a customerid (which is a result of other queries) it have to retrieve the records related to that id which is stored in hbase table
In this scenario, just try to design the row key as suggested earlier and use scanner caching while scanning.. All the best.. :)
I have one query whether hbase returns the records within 2 or 3 seconds
Yes. it returns depending upon the scenario like amount of data fetched, amount of data in tables, schema design etc.
There ill be max 1000 records for each id and there will be 300k id present
|
STACK_EXCHANGE
|
Sketch to Art
You could be an artist with AI
[Live Demo] Note: Only frontend is available now
Or you can Run with Docker in minutes
- Run with Docker
- Manual Installation
This project can transform your casual sketch to beautiful painting/artwork using modern AI technology.
Run with Docker
With Docker, you can quickly build and run the entire application in minutes
# 1. First, clone the repo git clone https://github.com/mtobeiyf/sketch-to-art.git cd sketch-to-art # 2. Build Docker image docker build -t sketch-to-art:dev . # 3. Run! docker run -it --rm -p 8080:8080 -p 5001:5001 -p 5002:5002 sketch-to-art:dev
Then, go to localhost:8080 and play with the demo!
To achieve the goal, there are mainly two steps in the pipeline:
- Reconstruct and generate real image from the sketch
- Arbitary style transfer to beautify the result with given result
The principle behind this is called Conditional Adversarial Networks, known as pix2pix, which is able to generate image based on the given image.
It became known to us with the appearance of Prisma app. Typically, we generate an individual model for each pre-defined style. Here, we want to go further by using any new picture as the style. So, we adopted the method, Universal Style Transfer via Feature Transforms proposed in NIPS2017, which enables us to perform arbitary style transfer.
The server side is powered by Python and Flask. You can see this simpler example.
Navigate to the
server directory and all the files concerning the service and neural networks are there. The two main files:
app_pix.pyfor pix2pix translation
app_stylize.pyfor arbitrary style transfer
Make sure you have Python installed, and some packages are needed: tensorflow, keras, pillow, flask, gevent. You can use pip to install them:
pip install -r server/requirements.txt
# Simply run with python python app_xxx.py
And you could see the output indicating the port it's listening (5001 and 5002). Go to
http://localhost:5001 and you should see the returned information.
You should installed:
# Clone the repo git clone firstname.lastname@example.org:mtobeiyf/sketch-to-art.git cd sketch-to-art # Install dependencies yarn # or npm install # Run yarn dev # or npm run dev
Open your favorite browser at
http://localhost:8080, the site is there.
This is the final project of Digital Image Processing instructed by Prof. Jia Yan.
Xin Fu, Shuaibin Zhang, Tangbo Liu, Haoran Su
Copyright © 2018, Fing
Released under the MIT License.
|
OPCFW_CODE
|
Should taxpayers be allowed to vote where their taxpayer dollars go?
I am pondering over the pros and cons of having taxpayers submit via popular vote which federal executive departments or agencies they want their tax dollars to be allocated towards. This would appear to be in accordance with democracy but I'm still debating it.
Is this not just a specific case of "Is direct democracy better than representative democracy"?
I'm sorry, but we generally don't answer questions about what politicians should do, because answers to such questions are just personal opinion. For more information on what kind of questions do and do not belong on this website, please check out the articles What topics can I ask about here? and What types of questions should I avoid asking? on the help center
Let's imagine that for a second: no taxpayer wants to fund the IRS, either because they're planning to cheat or simply because it's not sexy. A few years later, the IRS is severely under-funded so most people don't feel the need to pay their taxes since they can't be caught. Conclusion: the federal government is bankrupt because people stop paying taxes, game over.
Some taxpayers might want to fund the IRS to ensure that other people pay their taxes (people are spiteful), but probably not enough.
@user253751 “People are spiteful” Some, perhaps. But some people would vote for it to be funded because good governance is important?
No.
There are questions where a direct "yes or no" vote is appropriate. Do you want stem cell research or not? Do you want capital punishment or not? Those questions can be clearly answered.
There are other questions which are not "yes or no" but rather "how much" or "what percentage." Like how to split the budget.
"Do you want low taxes?" "Yes, of course."
"Do you want public debt?" "No, of course not."
"Do you want decent infrastructure?" "Yes, of course."
"How shall we reconcile that?" "Let the politicians figure that one out."
Consider the Brexit mess. There was a slim majority for Brexit, but no majority for any one type of Brexit. They might have tried to negotiate a deal first and then vote on it, but Article 50 didn't work that way.
Those earlier questions are more nuanced as well. You can have capital punishment in some cases but not in others, you can allow some methods of capital punishment but not others, etc. The same goes for stem cell research, how much money do you want to spend on it, what rules govern what researchers can do? All these questions yield to more nuanced questions.
@JJforTransparencyandMonica There is a clear yes/no question about capital punishment. No means no capital punishment of any kind for any crime. In most Western European countries this is the law and a large majority of the population shares this view, so this is not something hypothetical. The proponents of capital punishment don't agree with each other on the details but that is a followup question on the binary yes/no. The same principle applies to stem cell research, the no camp is just a universal no, no matter the details.
@quarague You might find (hypothetically) that a majority of people are for capital punishment, but a majority are against lethal injection, a majority are against beheading, a majority are against execution, and a majority are against electrocution.
@quarague by that logic the Brexit referendum is also an appropriate "yes or no" vote. After all, it's only difficult when people vote in favor of it. Realistically though, that makes the referendum moot. If you are sure that one option will win, then it's a waste of money to hold the referendum. If you're not sure, then you need to prepare for dealing with both outcomes and that's where it gets tricky.
|
STACK_EXCHANGE
|
import unittest
class TestAutoTS(unittest.TestCase):
def setUp(self):
# Pre Release
import sys
import os
sys.path.append(os.environ['DEV_AUTOTS'])
import pandas as pd # type: ignore
datapath = 'example_datasets/'
filename1 = 'Sales_and_Marketing.csv'
dft = pd.read_csv(datapath+filename1,index_col=None)
self.ts_column = 'Time Period'
self.sep = ','
self.target = 'Sales'
self.train = dft[:40]
self.test = dft[40:]
def test_auto_ts(self):
"""
test to check functionality of the auto_ts function
"""
import numpy as np # type: ignore
import auto_ts as AT
ml_dict = AT.Auto_Timeseries(
self.train, self.ts_column,
self.target, self.sep, score_type='rmse', forecast_period=8,
time_interval='Month', non_seasonal_pdq=None, seasonality=False,
seasonal_period=12, seasonal_PDQ=None, model_type='best',
verbose=0
)
# https://stackoverflow.com/questions/25348532/can-python-pickle-lambda-functions
# import dill # the code below will fail without this line
# import pickle
# with open('ml_dict.pickle', 'wb') as handle:
# pickle.dump(ml_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open('ml_dict.pickle', 'rb') as handle:
# ml_dict_gold = pickle.load(handle)
# self.assertDictEqual(ml_dict, ml_dict_gold, "The generated ml_dict does not match the golden dictionary.")
print(ml_dict)
##################################
#### Checking Prophet Results ####
##################################
forecast_gold = np.array([
397.43339084, 394.26439651, 475.13957452, 552.65076563, 606.16644019, 593.80751381, 660.50017734, 660.71231806,
507.50617922, 428.91362082, 394.42162318, 460.58145002, 414.11761317, 411.79136617, 513.90686713, 548.44630982,
625.04519821, 601.93200453, 692.72711895, 713.80546701, 509.75238742, 452.27192698, 417.23842764, 489.43692325,
464.33630331, 463.7618856 , 554.96050385, 607.84174268, 680.80447392, 665.27454447, 751.95122103, 769.70733192,
583.80971329, 520.80174673, 487.2960147 , 558.92329098, 527.98407913, 528.04537126, 615.77231537, 682.98205328,
749.06124155, 751.07726213, 796.89236612, 783.20673348,689.69812976, 595.71342586, 569.48660003, 635.88437079
])
self.assertIsNone(
np.testing.assert_array_equal(np.round(ml_dict.get('FB_Prophet').get('forecast'),8), forecast_gold),
"Prophet Forecast does not match up with expected values."
)
rmse_gold = 27.01794672
self.assertEqual(round(ml_dict.get('FB_Prophet').get('rmse'),8), rmse_gold, "Prophet RMSE does not match up with expected values.")
################################
#### Checking ARIMA Results ####
################################
forecast_gold = np.array([
801.78660584, 743.16044526, 694.38764549, 684.72931967,
686.70229610, 692.13402266, 698.59426282, 705.36034762
])
# https://stackoverflow.com/questions/19387608/attributeerror-rint-when-using-numpy-round
self.assertIsNone(
np.testing.assert_array_equal(np.round(ml_dict.get('ARIMA').get('forecast')['mean'].values.astype(np.double), 8), forecast_gold),
"ARIMA Forecast does not match up with expected values."
)
rmse_gold = 169.00016628
self.assertEqual(round(ml_dict.get('ARIMA').get('rmse'),8), rmse_gold, "ARIMA RMSE does not match up with expected values.")
##################################
#### Checking SARIMAX Results ####
##################################
forecast_gold = np.array([
803.31673726, 762.46093997, 718.3581931, 711.42130506,
719.36254603, 732.70981867, 747.57645435, 762.47349398
])
self.assertIsNone(
np.testing.assert_array_equal(np.round(ml_dict.get('SARIMAX').get('forecast')['mean'].values.astype(np.double), 8), forecast_gold),
"SARIMAX Forecast does not match up with expected values."
)
rmse_gold = 193.49650578
self.assertEqual(round(ml_dict.get('SARIMAX').get('rmse'),8), rmse_gold, "SARIMAX RMSE does not match up with expected values.")
##############################
#### Checking VAR Results ####
##############################
forecast_gold = np.array([
741.37790864, 676.23341949, 615.53872102, 571.7977285,
546.95278336, 537.34223069, 537.4744872, 542.30739271
])
self.assertIsNone(
np.testing.assert_array_equal(np.round(ml_dict.get('VAR').get('forecast')['mean'].values.astype(np.double), 8), forecast_gold),
"VAR Forecast does not match up with expected values."
)
rmse_gold = 112.4770318
self.assertEqual(round(ml_dict.get('VAR').get('rmse'),8), rmse_gold, "VAR RMSE does not match up with expected values.")
#############################
#### Checking ML Results ####
#############################
forecast_gold = np.array([
475.24, 455.72, 446.58, 450.82,
453.76, 457.96, 475.04, 564.78
])
self.assertIsNone(
np.testing.assert_array_equal(np.round(ml_dict.get('ML').get('forecast').astype(np.double), 2), forecast_gold),
"ML Forecast does not match up with expected values."
)
rmse_gold = 94.94981174
self.assertEqual(round(ml_dict.get('ML').get('rmse'),8), rmse_gold, "VAR RMSE does not match up with expected values.")
if __name__ == '__main__':
unittest.main()
|
STACK_EDU
|
We're continuing the momentum. After a great first meetup we're inviting you to the second DataTalks JLM Meetup.
This time we will be focusing on computer vision. We are hosting two super interesting speakers who will be talking about advanced computer vision application in the fields of sports (and specifically soccer) and medical.
Come and get together with the amazing AI community of Jerusalem, this time in the beautiful atmosphere of Beit Hansen, one of the coolest places in the city.
As always, beers and food on us.
18:45 - 19:30 : Mingling, Beers, Pizzas
19:30 - 20:00: Dr. Jackie Assa - Co-Founder and VP Research @ ClearVuze
20:00 - 20:30: Dr. Yehiel Shilo - Algorithm Team Leader @ RSIP Vision
20:30 - 21:00: Mingling (Cont.)
Speaker: Dr. Jackie Assa
Title: "With all the deep learning and computer vision out there, we have nothing left to do.."
Clearvuze is a drone AI startup for generating compelling and useful video footage of events. The company product, teamvuze, focuses on automatic extraction of intelligent data from video footage of field sports games and training sessions, by applying deep-learning / computer vision algorithms.
In this talk we will describe some of the challenges we confronted in analyzing soccer footage, facing multiple problems that were claimed to be ‘already solved’. We will show how existing solutions were always proven to be incomplete, limited or not robust enough to handle real-life scenarios, and how we adapted them to solve our product problems.
Jackie is the co-founder and VP research at ClearVuze. Jackie is a senior software executive with over 25 years of experience. He was the founder of SpiralMiners, a data science company, and FFreview, focusing on tools for fast video reviewing. He has consulted and led ongoing research and data science teams in international companies, including CitiBank, Amdocs, eBay, Agfa, and Intel. Prior to FFreview, Jackie managed R&D and engineering teams at several companies. Jackie holds a PhD from Tel Aviv University (2010) in Computer Graphics and Computer Vision.
Speaker: Dr. Yehiel Shilo
Title: Title: 3D Segmentation of a Human Spine
Abstract: Various medical data, such as CT and MRI scans contains 3D data of different organs in the human body. A major challenge in the medical world is to perform an accurate 3D segmentation of a given image. In the talk, a brief introduction about medical data and handling 3D images will be given. This will be followed by the full segmentation cycle of a CT scan of a human spine. The segmentation process contains both classical image processing and computer vision methods, as well as modern neural networks methods. A discussion about these different methods and their pros and cons will be held.
Claim the event and start manage its content.I am the organizer
|
OPCFW_CODE
|
Selecting an artifact from the list generated by a search operation will bring you to a screen with all the artifact details. Depending on the permissions you have on this tracker, the detailed view is made of text fields and menus that you can update with new values. If you are an anonymous user or a registered user who does not belong to the project team, most of the fields will likely appear as immutable text. By default, non-project members cannot edit any of the artifact fields. They can only submit a follow-up comment.
The Artifact Update screen is divided in several parts: Header and Comments.
The header zone is where you’ll find all the fields associated with an artifact. Many of these fields are assigned a set of predefined values (Status, Category, Resolution) while some others have a number format (Effort) or a free text format (Summary). For more clarity, the fields are grouped in field sets. The set of fields used in a given tracker, as well as the related set of predefined values and the field sets can be configured by project members who have administration permissions on this tracker.
Some fields have a particular behaviour and some explanations seem necessary.
Automatically edit fields: Tuleap includes fields that are automatically set : “Artifact ID”, “Last Update Date”, “Submitted By”, “Submitted On” and “Rank”. The users can’t edit that fields.
Cross-Reference: Tuleap offers another dynamic field which has the ability to cross-reference any artifact, or any other Tuleap object from within a follow-up comment.
When typing a follow-up comment, any text that follows the pattern “XXX #NNN” will be interpreted as a reference to the artifact XXX number NNN, where NNN is the unique artifact ID, and XXX is the tracker short name (e.g. “bug #123”, “task #321”, “req #12”, etc.). If you don’t know the tracker short name or don’t want to specify it, you may simply use “art #NNN”. Each time Tuleap displays a piece of text that follows this pattern it will auto-magically create an hyperlink to the web page showing all the details of the artifact.
Tuleap reference patterns may be used to reference artifacts, as well as source code commits, documents, files, etc. Please refer to Tuleap references for more details on References.
Furthemore references concerning artifacts, svn revisions and cvs commits are stored in the database. They are displayed in the next section, ordered by type and initial reference direction.
You can attach files to an artifact by using file field.
Please note that it is possible to drag ‘n drop (or copy/paste) images
directly in the text or in the follow-up comment field as soon as they
Such images will be added in the first updatable file field in the artifact.
Drag ‘n drop or copy/paste are not possible if:
there isn’t any file field
the file field is not updatable (due to permissions) or is frozen (due to workflow)
Artifact link field provides a very powerful way to link an artifact to others. Any artifact from any accessible tracker can be linked to the current edited artifact. Artifact are displayed as a regular tracker report where each tab is a list of artifact of a given tracker according to the default renderer of the default selected report.
If you know the id of artifact you want to link, just add it in the unput box. You can add several artifact at once, by separating them with comma ‘1,2’. If you don’t know the id of artifact to add click on the search button, a modal will allow you to choose artifacts by trackers.
Creating an artifact
As linking another artifacts to a currently edited one, it is possible to create a artifact in different tracker (e.g. sub-tasks addition) any created artifact is set as “to be linked”, this means that right after creating it is added to its tracker tab, and in the input field text.
Managing linked artifacts
Linked or “to be linked” artifacts can be managed in the artifact link tabs view. Not yet linked artifacts are highlighted. All artifacts can be uncheck (by clicking the cross).
Using typed artifact links
Since Tuleap 9.14, trackers use by default the typed artifact links. This feature gives to the user the ability to add a type to a link between two artifacts.
Trackers and project administrators are able to deactivate the whole feature or some types for the project. This can be done in the new tracker global administration section
_is_child type, it cannot be disabled.
Some types cannot be disabled because they are used by an active plugin in the project.
A type cannot be used in new links
Old links using this type still exist
The type of these links will be cleared after the next artifact update (the link will continue to exist without any type)
Before Tuleap 12.5, parent/child relationship was driven by the hierarchy.
It meant that the artifact link type
_is_child was automatically set between two artifacts if a hierarchy was defined between the two trackers.
Since Tuleap 12.5, parent/child relationship is only defined by the artifact link type
that must be added manually by the user while linking an artifact to another.
With this feature, any artifact can be defined as a child of another, without any constraints.
The children added this way will be displayed everywhere (artifact view, planning view, taskboard, cardwall, etc).
The hierarchy is now only an helper.
It means that the artifact link type
_is_child is no more added automatically based on hierarchy but it’s still used in:
Agiledashboard to have quick access to create new items and new parents
Agiledashboard for scrum plannings definition
Tracker triggers that only deals with children based on the hierarchy
As many follow-up comments as needed can be attached to any given artifact. Follow-up comments are free text fields where virtually any kind of information or comment can be typed in.
Follow-up comments have several of interesting capabilities and extensions:
Changeset: Each follow-up comment has a part dedicated to the artifact history. This keeps track of all changes that occurred on all artifact fields since the creation of the artifact. The artifact history shows what fields changed, what the old value was befor the change took place, who changed it and when.
Canned Responses: it is not infrequent to see the project members in charge of the artifact classification and dispatch process to post the same follow-up comments again and again. Typical examples of repeatedly posted comments are: a thank you message to the originator, a request for the originator to provide commonly missing information like version numbers or type of machine used, etc. Rather than typing the same comments all the time, Tuleap allows project members to create a predefined set of responses. Each canned response is defined by a name and by the body of the response.
Posting a canned response is just a matter of selecting the appropriate response from the pull down menu in the artifact update screen and submitting the changes.
|
OPCFW_CODE
|
Pester 5: Automate It
For my hobby project ConvertTo-Expression, I am rebuilding my test (Pester 5) script. I would like to automate the It (and possibly the Context part) as there are large number of syntax formats to test for and the function actually roundtrips which &([ScriptBlock]::Create("$Expression")). For a Minimal, Reproducible Example, I am using ConvertTo-Json which roundtrips with ConvertTo-Json.
My goal for this question is basically to create an easy test syntax whether the concerned function correctly roundtrips, e.g.:
Test -Compress '{"a":1}'
I would like to do something like this:
Function Test ([String]$Json, [Switch]$Compress, [String]$It = $Json) {
$Context = if ($Compress) { 'Compress' } Else { 'Default' }
$Object = ConvertFrom-Json $Json
Context $Context {
# BeforeEach {
# $Compress = $True
# $Json = '{"a":1}'
# $Object = ConvertFrom-Json $Json
# }
It $It { ConvertTo-Json -Compress:$Compress -Depth 9 $Object | Should -Be $Json }
}
}
Describe 'Syntax check' {
Test -Compress '{"a":1}'
}
but this results in errors like:
Starting discovery in 1 files.
Discovery finished in 35ms.
[-] Syntax check.Compress.{"a":1} 15ms (13ms|2ms)
RuntimeException: The variable '$Compress' cannot be retrieved because it has not been set.
at <ScriptBlock>, C:\Test.ps1:6
Tests completed in 140ms
Tests Passed: 0, Failed: 1, Skipped: 0 NotRun: 0
Enabling the commented-out hardcoded BeforeEach returns the expected results:
Starting discovery in 1 files.
Discovery finished in 32ms.
[+] C:\Test.ps1 197ms (80ms|88ms)
Tests completed in 202ms
Tests Passed: 1, Failed: 0, Skipped: 0 NotRun: 0
I would like to put It (and Context) in a function and control them with arguments so that I can do a simple test like Test -Compress '{"a":1}'. Unfortunately, I got completely lost in the new Pester discovery scopes and start wondering whether this is actually possible at all with Pester 5.
I think the problem is that the variables of interest are only referenced inside the nested script block (the one passed to It), not the outer one (the one passed to Context).
A simple solution is to call the .GetNewClosure() method on the outer script block, which forms a closure around the calling scope's local variables:
Function Test ([String]$Json, [Switch]$Compress, [String]$It = $Json) {
$Context = if ($Compress) { 'Compress' } Else { 'Default' }
$Object = ConvertFrom-Json $Json
Context $Context {
It $It { ConvertTo-Json -Compress:$Compress -Depth 9 $Object | Should -Be $Json }
}.GetNewClosure() # This captures $Json, $Compress, $It, and $Object
}
Describe 'Syntax check' {
Test -Compress '{"a":1}'
}
Note that the docs are very terse at the moment, but the conversation in GitHub issue #9077 suggests that the script block returned by .GetNewClosure() runs in a newly created dynamic (in-memory) module.
I found a way to do it using the -TestCases parameter:
Function Test ([String]$Json, [Switch]$Compress, [String]$It = $Json) {
$Context = if ($Compress) { 'Compress' } Else { 'Default' }
$Object = ConvertFrom-Json $Json
Context $Context {
$TestCase = @{ Params = @{ Compress = $Compress; Object = $Object; Json = $Json } }
It $It -TestCases $TestCase {
param($Params)
ConvertTo-Json -Compress:$Params.Compress -Depth 9 $Params.Object | Should -Be $Params.Json
}
}
}
Describe 'Syntax check' {
Test -Compress '{"a":1}'
}
(But I am still open to other suggestions, espessially if it appears that I overlooked something obvious)
|
STACK_EXCHANGE
|
How Do I Configure IIS To Handle Really Large File Uploads?
Platform: IIS 6, ASP.Net 2.0 (.Net 3.5), Server 2003.
I'm building an application that accepts files from a user, processes them, and returns a result. The file is uploaded using HTTP POST to an ASP.Net web form. The application is expecting some large files (hundreds of MB).
I'm using SWFUpload to accomplish the upload with a nice progress bar, but that's not contributing to the issue, because when I bypass it using a standard HTML form pointing at my upload accepter page, I get the exact same error. When using the progress bar, the upload continues to 100%, then fails. With a standard form, the behavior appears to be the same.
I'm having a problem right now uploading a file that's about 150MB. I've changed every settings I can find, but still no luck.
Here's a summary of what I've changed so far:
In Web.config: Added this inside system.web:
<httpRuntime executionTimeout="3600" maxRequestLength="1536000"/>
In machine.config: Inside system.web, changed:
<processModel autoConfig="true" />
<processModel autoConfig="true" responseDeadlockInterval="00:30:00" responseRestartDeadlockInterval="00:30:00" />
and in MetaBase.xml: Changed:
When the upload fails, I get a 404 error from IIS. My web form does not begin processing, or at least, it doesn't make it to the Page_Load event. I threw an exception at the beginning of that handler, and it doesn't execute at all on large files.
Everything works fine with smaller files (I've tested up to about 5.5MB). I'm not exactly sure what file size is the limit, but I know that my limit needs to be higher than 150MB, since this is not the largest file that the client will need to upload.
Can anyone help?
Urlscan was active on all websites, and has it's own request entity length limit. I wasn't aware that Urlscan was running on our server because it was a global ISAPI filter, not running on my individual website.
Note: to locate global ISAPI filters, right click on the Web Sites folder in IIS Admin and click Properties, then on the ISAPI Filters tab.
- → Function Undefined in Axios promise
- → React formatting dates with momentjs server side
- → AngularJS directive: "templateUrl" doesn't work while "template" works
- → how to add cannonical tag for ASPX .NET page
- → URL routing requires /Home/Page?page=1 instead of /Home/Page/1
- → ASP.NET routing w/ changing article name
- → NumericTextBox Loses Value using Html.TextBoxFor, not HTML
- → How to do Bundling of css & js files in Drupal
- → Does the fact that every page is reachable through www.example.com and www.example.com/index have any SEO impact?
- → How to add rich snippet in a website?
- → UrlRewriting does not work on server, but on localhost it works fine on asp.net?
- → Bootstrap Nav Collapse via Data Attributes Not Working
|
OPCFW_CODE
|
using Xunit;
using CSharpUnitTesting.assert.Sdk;
// 05 Ranges
//
// Use comparer for non traditional types
namespace CSharpUnitTesting.assert
{
public class Ranges
{
[Theory]
[InlineData(42, 42, 42)]
[InlineData(42, int.MinValue, 42)]
[InlineData(42, 42, int.MaxValue)]
[InlineData(42, int.MinValue, int.MaxValue)]
public void InRange_Int(int actual, int low, int high)
{
Assert.InRange(actual, low, high);
Assert.InRange<int>(actual, low, high);
}
[Theory]
[InlineData('e', 'e', 'e')]
[InlineData('e', 'a', 'e')]
[InlineData('e', 'e', 'z')]
[InlineData('e', 'a', 'z')]
public void InRange_Char(char actual, char low, char high)
{
Assert.InRange(actual, low, high);
Assert.InRange<char>(actual, low, high);
}
[Theory]
[InlineData("eee", "eee", "eee")]
[InlineData("eee", "aaa", "eee")]
[InlineData("eee", "eee", "zzz")]
[InlineData("eee", "aaa", "zzz")]
public void InRange_String(string actual, string low, string high)
{
Assert.InRange(actual, low, high);
Assert.InRange<string>(actual, low, high);
}
[Fact]
public void InRange_Type_Custom()
{
var ref1 = new AClassWithComparer(1);
var ref2 = new AClassWithComparer(2);
var ref3 = new AClassWithComparer(3);
Assert.InRange(ref2, ref1, ref3);
Assert.InRange<AClassWithComparer>(ref2, ref1, ref3);
}
[Fact]
public void InRange_Type_Custom_WithComparer()
{
var ref1 = new AClass(1);
var ref2 = new AClass(2);
var ref3 = new AClass(3);
Assert.InRange(ref2, ref1, ref3, new AClassComparer());
Assert.InRange<AClass>(ref2, ref1, ref3, new AClassComparer());
}
[Fact]
public void NotInRange_Type_Base()
{
Assert.NotInRange(42, 43, 45);
Assert.NotInRange<int>(42, 43, 45);
Assert.NotInRange('a', 'e', 'z');
Assert.NotInRange<char>('a', 'e', 'z');
Assert.NotInRange("aaa", "eee", "zzz");
Assert.NotInRange<string>("aaa", "eee", "zzz");
}
[Fact]
public void NotInRange_Type_Custom()
{
var ref1 = new AClassWithComparer(1);
var ref2 = new AClassWithComparer(2);
var ref3 = new AClassWithComparer(3);
Assert.NotInRange(ref1, ref2, ref3);
Assert.NotInRange<AClassWithComparer>(ref1, ref2, ref3);
}
[Fact]
public void NotInRange_Type_Custom_WithComparer()
{
var ref1 = new AClass(1);
var ref2 = new AClass(2);
var ref3 = new AClass(3);
Assert.NotInRange(ref1, ref2, ref3, new AClassComparer());
Assert.NotInRange<AClass>(ref1, ref2, ref3, new AClassComparer());
}
}
}
|
STACK_EDU
|
"""More efficient implementation of infiniteroad_simulation which only partially simulates vehicles."""
import havsim.simulation as hs
import havsim.plotting as hp
import time
import numpy as np
#%% set up parameters
IDM_parameters = [30, 1.5, 4, 1.3, 2] # in order: max speed, time headway, jam spacing, comfortable acceleration,
# comfortable deceleration. Units are in meters.
eql_speed = 5 # define the equilibrium speed you want to perturb around
nveh = 1000 # number of vehicles
dt = .25 # timestep in seconds
acc_tolerance = 1e-3 # acc tolerance for adding new vehicles
speed_tolerance = 1e-1 # speed tolerance for subtracting vehicles
# define speed profile of initial vehicle
def downstream(timeind, *args):
if timeind < 10:
# if timeind < 200:
# if False:
return eql_speed-3
else:
return eql_speed
# define initial headway of the first following vehicle
init_hd = hs.Vehicle(-1, None, IDM_parameters, None, length=5).get_eql(eql_speed)
# to add noise, uncomment code marked as acceleration noise
#%%
mainroad_len= 1e10
mainroad = hs.Road(1,mainroad_len, 'main road')
mainroad.connect('exit', is_exit=True)
mainroad.set_downstream({'method':'speed', 'time_series':downstream})
def newveh(vehid, *args):
cf_p = IDM_parameters
unused, lc_p = hs.models.IDM_parameters()
kwargs = {'route': ['exit'], 'maxspeed':cf_p[0], 'length':5}
return hs.Vehicle(vehid, mainroad[0], cf_p, lc_p, **kwargs)
def make_waves():
"""Simulates the evolution of a traffic wave initiated by downstream over nveh vehicles.
We assume that all vehicles start in the equilibrium defined by eql_speed. The first vehicle follows
the speed profile defined by the downstream function. This simulates how this initial speed profile
evolves as it propagates through the vehicles. This implements an efficient algorithm that scales to large
numbers of vehicles/timesteps. The infiniteroad_simulation.py script has code which does the full
simulation; the full simulation was used to validate this algorithm.
At every timestep, we first evaluate whether to add a new vehicle to the simulation. We can calculate
in closed form the trajectory of the next following vehicle, assuming it stays in equilibrium. This
is used to evaluate the acceleration of this potential new vehicle.
If the acceleration is greater than the acc_tolerance threshold, the new vehicle is added and will begin
to be fully simulated. Otherwise, we continue to approximate its trajectory by the equilibrium solution.
To evaluate whether to remove the most downstream vehicles from the simulation, we calculate
the difference between its current speed and the equilibrium speed. If the difference is less than
speed_tolerance, and the acceleration is less than the acc_tolerance, then this most downstream vehicle
is no longer fully simulated, and we approximate its future trajectory by the equilibrium solution for the
remaining upstream vehicles.
"""
# initialization
next_initpos = 1e5
testveh = newveh(-1) # used to evluate whether to add a new vehicle
testveh.speed = eql_speed
leadveh = newveh(-2) # used to approximate the downstream most vehicle when it ceases to be fully simulated
leadveh.speed = eql_speed
leadveh.pos = 0
eql_hd = testveh.get_eql(eql_speed)
cur_vehicles = [] # vehicles currently being fully simulated
all_vehicles = [] # vehicles which were previously fully simulated
prev_veh = None # current upstream most vehicle
counter = 0 # cumulative number of fully simulated vehicles
curtime = 0 # current simulation timestep
while counter < nveh and (len(cur_vehicles) > 0 or counter==0):
# check if we need to add a new vehicle
testveh.pos = next_initpos + curtime*dt*eql_speed
testhd = hs.get_headway(testveh, prev_veh) if prev_veh is not None else None
acc = testveh.get_cf(testhd, eql_speed, prev_veh, mainroad[0], curtime, dt, False)
if abs(acc*dt) > acc_tolerance or counter==0:
veh = newveh(counter)
veh.lead = prev_veh
if counter > 0:
hd = eql_hd
else:
hd = init_hd
veh.initialize(testveh.pos, eql_speed, testhd, curtime)
next_initpos += -hd - veh.len
cur_vehicles.append(veh)
prev_veh = veh
counter += 1
# update simulation
for veh in cur_vehicles:
veh.set_cf(curtime, dt)
##### acceleration noise #####
if curtime < 400:
veh.acc += (np.random.rand()-.5)/2
#####
for veh in cur_vehicles:
veh.update(curtime, dt)
leadveh.pos += leadveh.speed*dt
leadveh.speed = eql_speed
for veh in cur_vehicles:
veh.hd = hs.get_headway(veh, veh.lead) if veh.lead is not None else None
curtime += 1
# check if we need to remove vehicles
# if len(cur_vehicles)>0:
veh = cur_vehicles[0]
if abs(veh.acc*dt) < acc_tolerance*10 and abs(veh.speed-eql_speed) < speed_tolerance:
all_vehicles.append(veh)
veh.end = curtime
leadveh.pos = veh.pos
leadveh.speed = veh.speed
cur_vehicles.pop(0)
if len(cur_vehicles) > 0:
cur_vehicles[0].lead = leadveh
return all_vehicles, cur_vehicles, curtime
#%% simulation and plotting
start = time.time()
all_vehicles, cur_vehicles, timesteps = make_waves()
end = time.time()
print('simulation time is '+str(end-start)+' over '+str(sum([timesteps - veh.start+1 if veh.end is None else veh.end - veh.start+1
for veh in all_vehicles]))+' timesteps')
all_vehicles.extend(cur_vehicles)
laneinds = {mainroad[0]:0}
sim, siminfo = hp.plot_format(all_vehicles, laneinds)
hp.platoonplot(sim,None, siminfo, lane=0, opacity=0)
|
STACK_EDU
|
Magento only displaying CSS updates on secure URL
I'm having a rough time with Magento. I spent a good deal of time going through the user guide on the Responsive theme, setting up Compass and editing SCSS files and whatnot, but was having the hardest time getting the changes to actually appear on the frontend.
I refreshed/flushed the cache, deleted contents of var/session and var/cache folders, I made sure the site was picking up the correct .css file under 'page source' in chrome.
It seemed like I could get the changes to apply if I changed 'merge css files' to yes, but even when the changes displayed if I made updates they wouldn't be pushed to the front end!
I changed the base URLs, however, and noticed that the secure URL for our site will load the correct .css and actually display the changes, while the unsecured URL says its loading the same .css file but NOT showing the changes.
I'm sure this is simple, but I'm going insane trying to figure out exactly what the problem is. Please let me know if you need more specifics from me!
Thanks a bunch,
Jesse
do you have full page caching on your magento store? Also are you using Apache or Nginx?
Thanks for the response. I believe Apache through cPanel, and I don't believe we have full page caching, seems like thats only available through the enterprise edition or extensions.
Also I realized upon further inspection that the two css files being loaded are the same file, just with different edits that I made over a period of time. The secure URL is just displaying the more recent configuration of the file...What's weird though is that if I make edits and re-compile and upload them to the server, clear all caches, and hard refresh, the changes aren't applying to the secure URL either. It's like they are both not loading the proper file, and instead are loading the file from different points in its revision history. Which I guess would make sense if its caching
sounds like you have browser caching configured on the server - typically in your .htaccess file. confirm by entering the full url of your stylesheet here;
http://www.webconfs.com/http-header-check.php
HTTP/1.1 200 OK =>
Server => Sucuri/Cloudproxy
Date => Sun, 18 Oct 2015 13:45:34 GMT
Content-Type => text/css
Content-Length => 229610
Connection => close
Vary => Accept-Encoding
Last-Modified => Fri, 16 Oct 2015 20:01:19 GMT
Cache-Control => max-age=315360000
Expires => Thu, 31 Dec 2037 23:55:55 GMT
X-XSS-Protection => 1; mode=block
X-Frame-Options => SAMEORIGIN
X-Content-Type-Options => nosniff
Accept-Ranges => bytes
bingo: Expires => Thu, 31 Dec 2037 23:55:55. Take a look in your .htaccesss file (root directory of magento) and modify the expires config
I'm not seeing that date anywhere in our .htaccess file...the only place I'm finding the word 'expires' is:
############################################
Add default Expires header
http://developer.yahoo.com/performance/rules.html#expires
ExpiresDefault "access plus 1 year"
header details are usually configured on the server that serves the page. without knowing your full setup its difficult to be sure though
Hey I figured it out, we actually did have full page caching enabled through the outside security software! Thanks so much for the help!
|
STACK_EXCHANGE
|
KAFKA-15608: Assign lastet leader eopch and offset checkpoint to future log when replacing current log
Recently, I encountered a issue that there was always 1 partition having only 1 ISR (no produce traffic on this topic). The bug is related to altering log dir. When replacing current log with future log, broker doesn't copy the leader epoch checkpoint cache, which records the current leader epoch and log start offset. The cache for each partition is updated only when appending new messages or becoming leader. If there is no traffic and the replica is already the leader, the cache will not be updated any more. However, the partition leader will fetch its leader epoch from the cache and compare with the leader epoch sent by follower when handling fetch request. If the former one is missed or less than the latter one, the leader will interrupt the process and return an OffsetOutOfRangeException to follower. The follower might be out of sync over time.
Take the following case as an example, all the key points are listed in chronological order:
Reassigner submitted a partition reassignment for partition foo-1.
{
"topic": "foo",
"partition": 1,
"replicas": [
5002,
3003,
4001
],
"logDirs": [
"\\data\\kafka-logs-1",
"any",
"any"
]
}
Reassignment completed immediately due to there is no traffic on this topic.
Controller sent LeaderAndISR requests to all the replicas.
Newly added replica 5002 became the new leader and the current log updated the leader epoch offset cache. Replica 5002 successfully handled the LeaderAndISR request.
Altering log dir completed and the newly updated current log didn't have leader epoch offset information.
Replica 5002 handled fetch requests (include fetch offset and current leader epoch) from followers and returned OffsetOutOfRangeException due to leader epoch offset cache hadn't been updated. So, the replica 5002 couldn't update the fetch state for each follower and reported ISRShrink later. The followers 3003 and 4001 would repeatedly print the following log:
WARN [ReplicaFetcher replicaId=4001, leaderId=5002, fetcherId=2] Reset fetch offset for partition foo-1 from 231196 to current leader's start offset 231196 (kafka.server.ReplicaFetcherThread)
INFO [ReplicaFetcher replicaId=4001, leaderId=5002, fetcherId=2] Current offset 231196 for partition foo-1 is out of range, which typically implies a leader change. Reset fetch offset to 231196 (kafka.server.ReplicaFetcherThread)
This issue arises only when all the three conditions are met:
No produce traffic on the partition.
Newly added replica become new leader.
LeaderAndISR request is handled successfully before altering log dir completed.
Kindly ping @hudeqi
I tested this process on the latest trunk branch and it did not reproduce. My process is: create a topic with 1 partition, then write traffic and stop writing. For example, the current isr is [2,0,1], I will submit a reassign, and the isr is [2,0,1] , 2 of which logdir changes. Finally execute reassign. Please confirm whether my testing process is different from yours? Thanks @drawxy
I tested this process on the latest trunk branch and it did not reproduce. My process is: create a topic with 1 partition, then write traffic and stop writing. For example, the current isr is [2,0,1], I will submit a reassign, and the isr is [2,0,1] , 2 of which logdir changes. Finally execute reassign. Please confirm whether my testing process is different from yours? Thanks @drawxy
I tested this process on the latest trunk branch and it did not reproduce. My process is: create a topic with 1 partition, then write traffic and stop writing. For example, the current isr is [2,0,1], I will submit a reassign, and the isr is [2,0,1] , 2 of which logdir changes. Finally execute reassign. Please confirm whether my testing process is different from yours? Thanks @drawxy
Hi @hudeqi, in your test, the replica 2 already was the leader and it aleady kept the fetch state of the other followers. And the ISR set wouldn't shrink without produce traffic due to there was no lag from leader and follower (the leader replica 2 was aware of the log end offset of other followers). For example, 1. you can create a topic with 1 partition and the assignment list is [0,1] (make replica 0 to be the leader); 2. produce some messages to the partition and stop; 3. submit a partition reassignment [2,1]; 4. submit a log dir alter for replica 2. After that, you can see the log file is fullly filled.
Btw, I close this PR by mistakes, could you help to reactive it, thanks!
I tested this process on the latest trunk branch and it did not reproduce. My process is: create a topic with 1 partition, then write traffic and stop writing. For example, the current isr is [2,0,1], I will submit a reassign, and the isr is [2,0,1] , 2 of which logdir changes. Finally execute reassign. Please confirm whether my testing process is different from yours? Thanks @drawxy
Hi @hudeqi, in your test, the replica 2 already was the leader and it aleady kept the fetch state of the other followers. And the ISR set wouldn't shrink without produce traffic due to there was no lag from leader and follower (the leader replica 2 was aware of the log end offset of other followers). For example, 1. you can create a topic with 1 partition and the assignment list is [0,1] (make replica 0 to be the leader); 2. produce some messages to the partition and stop; 3. submit a partition reassignment [2,1]; 4. submit a log dir alter for replica 2. After that, you can see the log file is fullly filled.
Btw, I close this PR by mistakes, could you help to reactive it, thanks!
Can you pull the latest trunk branch and reproduce it? The relevant logs you mentioned have been modified on the latest trunk. And I tested it multiple times based on the steps you gave based on the trunk branch, and everything seems to be normal.
I tested this process on the latest trunk branch and it did not reproduce. My process is: create a topic with 1 partition, then write traffic and stop writing. For example, the current isr is [2,0,1], I will submit a reassign, and the isr is [2,0,1] , 2 of which logdir changes. Finally execute reassign. Please confirm whether my testing process is different from yours? Thanks @drawxy
Hi @hudeqi, in your test, the replica 2 already was the leader and it aleady kept the fetch state of the other followers. And the ISR set wouldn't shrink without produce traffic due to there was no lag from leader and follower (the leader replica 2 was aware of the log end offset of other followers). For example, 1. you can create a topic with 1 partition and the assignment list is [0,1] (make replica 0 to be the leader); 2. produce some messages to the partition and stop; 3. submit a partition reassignment [2,1]; 4. submit a log dir alter for replica 2. After that, you can see the log file is fullly filled.
Btw, I close this PR by mistakes, could you help to reactive it, thanks!
Can you pull the latest trunk branch and reproduce it? The relevant logs you mentioned have been modified on the latest trunk. And I tested it multiple times based on the steps you gave based on the trunk branch, and everything seems to be normal.
Thanks for your verification, @hudeqi ! I found this issue on Kafka version 3.1 and didn't find the solution from Internet. I will try to reproduce it with latest trunk branch.
|
GITHUB_ARCHIVE
|
File Name: difference between client os and server os .zip
The main difference between client and server operating system is that client operating system works in the devices used by the end users such as desktops and other portable devices while server operating system runs on a special device called a server. An operating system operates as the interface between the user and hardware. It performs multiple important tasks such as process handling, memory management, controlling hardware devices, files and many more. Furthermore, it provides security to the system resources and data. There are various types of operating systems. Two of them are client and server operating systems. Client operating system is a system that works within computer desktops and other portable devices such as laptops and smartphones.
By Priya Pedamkar. Linux is basically an open-source software operating system that builds around the Linux kernel. It is a combination in a form, called a Linux distribution for both desktop and server use. The main function of Linux OS is to manage software resources and its artifacts. Windows Server is basically a Microsoft product and is a brand name for a group of server operating systems. It is a set of operating systems developed by Microsoft and the basic architecture is layered into user mode and kernel mode. Linux is mainly centered around the Linux kernel.
It can serve multiple client at a time.
A server is a computer or system that provides resources, data, services, or programs to other computers, known as clients, over a network. In theory, whenever computers share resources with client machines they are considered servers. There are many types of servers, including web servers, mail servers, and virtual servers. An individual system can provide resources and use them from another system at the same time. This means that a device could be both a server and a client at the same time.
Client Server Architecture is a computing model in which the server hosts, delivers and manages most of the resources and services to be consumed by the client. This type of architecture has one or more client computers connected to a central server over a network or internet connection. This system shares computing resources. Client Server Architecture Figure 1.
This is itself not only a question, but also an answer to another question. If you read any of our guide where we have installed an OS on a Server, like Installing OpenPanel on Rackspace Cloud Server , you might think why we are not using some easy graphical user interface, but a complex command line interface to install the softwares? Where the usual desktop of a computer gone?
A client is a small computer that accesses a server through a network. For example, in an organization, an employee logs in to the client machine to access the files and applications running on a server machine.
Unlike operating systems, such as Windows, that are designed for single users to control one computer, network operating systems NOS coordinate the activities of multiple computers across a network. The network operating system acts as a director to keep the network running smoothly. Nearly all modern networks are a combination of both. The networking design can be considered independent of the servers and workstations that will share it. Peer-to-peer network operating systems allow users to share resources and files located on their computers and to access shared resources found on other computers. However, they do not have a file server or a centralized management source See fig. In a peer-to-peer network, all computers are considered equal; they all have the same abilities to use the resources available on the network.
A computer network consists of two or more computers intended to share resources:. Each client computer must use an operating system that allows it to be identified to participate in the network. Besides the computers, other types of devices can be part of the network:. Other computers can also access the resources stored in a computer, as in a peer-to-peer scenario. This means that a computer, the server, can hold them and other computers can access them.
Он хотел крикнуть, но в легких не было воздуха, с губ срывалось лишь невнятное мычание. - Нет! - закашлявшись, исторгнул он из груди. Но звук так и не сорвался с его губ. Беккер понимал, что, как только дверь за Меган закроется, она исчезнет навсегда. Он снова попробовал ее позвать, но язык отказывался ему подчиняться. Девушка почти уже добралась до двери.
Вы хотите приделать к Цифровой крепости черный ход.
|
OPCFW_CODE
|
Data lineage tracking is one of the critical requirements for organizations that are in highly regulated industries face. As a regulatory requirement these organizations need to have a lineage detail of how data flows through their systems. To process data, organization need fast and big data technologies. Spark is one of the popular tool. It is a unified analytics engine for big data processing, with built-in modules for streaming, SQL, machine learning and graph. While there are several products that cater to building various aspects of governance, Apache Atlas is a scalable and extensible set of core foundational governance services — enabling enterprises to effectively and efficiently meet their compliance requirements within Hadoop and allows integration with the whole enterprise data ecosystem. A connector is required to track Spark SQL/DataFrame transformations and push metadata changes to Apache Atlas. Spark Atlas Connector provides basic job information. If we need to capture attribute level transformation information within the jobs , then Spline is the another option. Spline is a data lineage tracking and visualization tool for Apache Spark. Spline captures and stores lineage information from internal Spark execution plans in a lightweight, unobtrusive and easy to use manner.
Users can manage metadata in Atlas using two methods via a REST API or Messaging. For Atlas integration with Spline, in this post we have shortlisted a messaging interface that is based on Kafka. The messaging interface is particularly useful if one wishes to use a more loosely coupled integration with Atlas that could allow for better scalability, reliability etc. Atlas uses Kafka based messaging services as a notification server for communication between hooks and downstream consumers of metadata notification events. Events are written by the hooks and Atlas to different Kafka topics. Azure Event Hubs provides a Kafka endpoint that can be used by your existing Kafka based applications as an alternative to running your own Kafka cluster. Event Hubs supports Apache Kafka protocol 1.0 and later, and works with your existing Kafka applications, including MirrorMaker.
This post is using Spline from within Azure Databricks, persisting the lineage information to Apache atlas using the Azure Kafka enabled Event. To implement this, below are the required steps:
1. Create Kafka enabled Eventhub
2. Configure Apache Atlas to use Event Hub
3. Upload Spline Typedefs
4. Install Spline libraries within Azure Databricks
5. Spark Code Changes
6. Using Eventhub to check message flow
7. Using Atlas UI to check Lineage
Create Kafka enabled Eventhub
As a first step, create Kafka enabled eventhub name space using article. For Atlas Spline integration, only eventhub namespace to be created not event hub. Atlas Kafka plugin reads messages from ATLAS_HOOK topic and it will be created by Spline API during run time. We would need connection string during step 2(Configure Apache Atlas to use Event Hub) and step 5(Spark Code Changes). Once eventhub name space is created, open evenhub namespace. Goto setting -> Shared access policies -> RootManageSharedAccessKey and copy “Connection string–primary key”. As shown below:
Configure Apache Atlas to use Event Hub
Apache Atlas configuration are saved in java properties style configuration. The main configuration file is atlas-application.properties which is in the conf dir at the deployed location. To add event hub configuration into Apache Atlas, we need to modify below sections of atlas-application.properties file
· Notification Configs
· JAAS Configuration
Azure kafka enabled eventhub is outside Atlas, so modify atlas.notification.embedded to false. To pull messages from Eventhub, Atlas needs eventhub kafka bootstrap server name, so modify atlas.kafka.bootstrap.servers to <<eventhub namespace name>>.servicebus.windows.net:9093.
Notification Configs section will look like below after all modifications:
######### Notification Configs #########atlas.notification.embedded=falseatlas.kafka.bootstrap.servers=<<eventhub namespace name>>.servicebus.windows.net:9093
Atlas hook uses JAAS configuration section named “KakfaClient” to authenticate with Kafka broker. In a typical Kafka enabled Eventhub deployment this configuration section is set to use the Username and password. Where username is set to $ConnectionString and password is connection string copied from step 1. Eventhub kafka uses protocol as SASL_SSL and mechanism as PLAIN. These values also need to be set in JAAS configuration section.
As a solution need to add/update below in atlas-application.properties to enabled in secure mode communication between Eventhub and Atlas.
######### JAAS Configuration ########atlas.jaas.KafkaClient.loginModuleName=org.apache.kafka.common.security.plain.PlainLoginModuleatlas.jaas.KafkaClient.loginModuleControlFlag=requiredatlas.jaas.KafkaClient.option.username=$ConnectionStringatlas.jaas.KafkaClient.option.password=<<Eventhub Namespace connection string copied from step 1>>atlas.jaas.KafkaClient.option.mechanism=PLAINatlas.jaas.KafkaClient.option.protocol=SASL_SSL
Upload Spline TypeDefs
Before start harvesting spark lineage information into Atlas, Spline meta model must be uploaded into Atlas environment using the Rest API v2. To interact with the Atlas REST V2 endpoint, either use curl or tools like Postman. First download spline-meta-model.json from github . Below is a sample interaction which is used to POST the Spline type definitions in Atlas:
ATLAS_BASE_URL=https://atlas-servername:port/api/atlas/v2curl -negotiate -u reenu -X POST -H ‘Content-Type: application/json’ -H ‘Accept: application/json’ “$ATLAS_BASE_URL/types/typedefs” -d “@./spline-meta-model.json”
Note: My Atlas instance was Kerberos protected and therefore the negotiate flag was used. Request JSON is stored into spline-meta-model.json.
On successful, you can see the definition in the response as shown below:
Install Spline libraries within Azure Databricks
To make Spline libraries code available to databricks notebooks and jobs running on your clusters, install spline-core libraries. We need to use the Maven coordinates and install these into Azure Databricks as Maven libraries. With assumption of using Spark 2.4, as part of Spline Atlas integration only below two libraries are required
To add just these libraries, you need to specify “exclusions” when adding these libraries in the Databricks UI. The exclusions that we have to add are:
Spark Code Changes
Now its time to setup the Spark session configuration items in order to connect to Kafka enabled Event Hub endpoint.
System.setProperty("spline.mode", "REQUIRED")System.setProperty("spline.persistence.factory", "za.co.absa.spline.persistence.atlas.AtlasPersistenceFactory")System.setProperty("atlas.kafka.bootstrap.servers", "<<Eventhub NameSpace Name>>.servicebus.windows.net:9093")System.setProperty("atlas.kafka.hook.group.id", "atlas")System.setProperty("atlas.kafka.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$ConnectionString\" password=\"<<Eventhub Namespace connection string copied from step 1>>\";")System.setProperty("atlas.kafka.sasl.mechanism", "PLAIN")System.setProperty("atlas.kafka.security.protocol", "SASL_SSL")
Next step is enable lineage tracking for that Spark session:
Then we run a sample query which will read csv file into dataframe and later write 2 csv files from same dataframe.
val emp = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load("/FileStore/tables/emp13.csv")display(emp)emp.write.format("com.databricks.spark.csv").save("/FileStore/tables/emp100.csv")emp.write.format("com.databricks.spark.csv").save("/FileStore/tables/emp101.csv")
Note: For above code snippet, I had uploaded one csv file. And emp13.csv is an output of other databricks spark job.
After successful execution of Jupiter notebook, you can find 2 new csv files(emp100.csv and emp101.csv) in Data section of Azure Databricks
Using Event Hub to check message flow
Open Azure portal and open Event Hub Namespace which was created as part of step1. Goto Entities-> Event Hubs section. There you can find a new event hub with name atlas_hook. This is created by Spline apis while processing databricks job. Atlas_hook is used as Kafka topic by Atlas kafka hook to pull data from Event Hub . In overview tab of atlas_hook you can see 2 incoming messages and 2 outgoing messages, as shown below. This indicate successful retrieval of messages by Atlas hook.
Using Atlas UI to check lineage
Now its time to check lineage information of data we processed in spark shell. Every time a spark job runs in Databricks shell, it creates a new lineage information of type spark_job. Open Apache Atlas UI. Go to search and find “spark_job” in “Search By Type” drop down. In search result, there is a new entry with name “Databricks Shell”. Open “Databricks Shell” job and you can see properties as shown below.
To get details about lineage information, go to Lineage tab. It will provide details as shown below. You can see multiple Databricks Shell in this diagram, because emp13.csv was an output of multiple spark jobs.
If you want to get more details about emp101.csv file, click on dbfs:/FileStore/tables/emp101.csv in above lineage information graph or search for hdfs_path in “Search By Type” drop down.
For a multi-step spark shell job, output will be slightly different as shown below.
In more complex scenario, where one file is being processed by multiple Databricks shell, output will consist of details of all spark jobs, hdfs and related operations.
To capture attribute level lineage information of a spark job, we used Spline plugin to push lineage information from Databricks shell to Apache Atlas. We leveraged Kafka enabled Event Hub to work as messaging platform between Spline and Atlas. Later we used azure portal to track flow of messages from Event hub and Atlas UI for insight of lineage information.Data lineage tracking using Atlas + Spline via Event Hub
|
OPCFW_CODE
|
import { EntitySystem, World, WorldBuilder, Component, ComponentSetBuilder } from '../src';
test('integration test', () => {
const entitySystemA = new TestEntitySystemA();
const entitySystemB = new TestEntitySystemB();
const entitySystemC = new TestEntitySystemC();
const world: World = new WorldBuilder().with(entitySystemA, entitySystemB, entitySystemC).build();
const componentAMapper = world.getComponentMapper<TestComponentA>(TestComponentA);
const componentBMapper = world.getComponentMapper<TestComponentB>(TestComponentB);
const componentCMapper = world.getComponentMapper<TestComponentC>(TestComponentC);
const componentDMapper = world.getComponentMapper<TestComponentD>(TestComponentD);
let entity1 = world.createEntityId();
componentAMapper.addComponent(entity1, new TestComponentA());
componentBMapper.addComponent(entity1, new TestComponentB());
componentCMapper.addComponent(entity1, new TestComponentC());
componentDMapper.addComponent(entity1, new TestComponentD());
let entity2 = world.createEntityId();
componentAMapper.addComponent(entity2, new TestComponentA());
componentBMapper.addComponent(entity2, new TestComponentB());
let entity3 = world.createEntityId();
componentBMapper.addComponent(entity3, new TestComponentB());
componentCMapper.addComponent(entity3, new TestComponentC());
let entity4 = world.createEntityId();
componentAMapper.addComponent(entity4, new TestComponentA());
componentDMapper.addComponent(entity4, new TestComponentD());
world.update(0);
expect(entitySystemA.getEntityIds()).toStrictEqual([entity1, entity2, entity4]);
expect(entitySystemB.getEntityIds()).toStrictEqual([entity1, entity4]);
expect(entitySystemC.getEntityIds()).toStrictEqual([entity3]);
componentAMapper.removeComponent(entity1);
componentAMapper.removeComponent(entity2);
world.update(0);
expect(entitySystemA.getEntityIds()).toStrictEqual([entity1, entity4]);
expect(entitySystemB.getEntityIds()).toStrictEqual([entity4]);
expect(entitySystemC.getEntityIds()).toStrictEqual([entity1, entity2, entity3]);
world.deleteEntityById(entity1);
world.update(0);
expect(entitySystemA.getEntityIds()).toStrictEqual([entity4]);
expect(entitySystemB.getEntityIds()).toStrictEqual([entity4]);
expect(entitySystemC.getEntityIds()).toStrictEqual([entity2, entity3]);
//gets recycled
expect(world.createEntityId()).toEqual(entity1);
});
class TestComponentA extends Component {}
class TestComponentB extends Component {}
class TestComponentC extends Component {}
class TestComponentD extends Component {}
class TestEntitySystemA extends EntitySystem {
onInit(): void {}
onUpdate(_dt: number): void {}
initComponentSet(componentSetBuilder: ComponentSetBuilder): ComponentSetBuilder {
return componentSetBuilder.containingAny(TestComponentA, TestComponentD);
}
}
class TestEntitySystemB extends EntitySystem {
onInit(): void {}
onUpdate(_dt: number): void {}
initComponentSet(componentSetBuilder: ComponentSetBuilder): ComponentSetBuilder {
return componentSetBuilder.containingAll(TestComponentA, TestComponentD);
}
}
class TestEntitySystemC extends EntitySystem {
onInit(): void {}
onUpdate(_dt: number): void {}
initComponentSet(componentSetBuilder: ComponentSetBuilder): ComponentSetBuilder {
return componentSetBuilder.containingNone(TestComponentA);
}
}
|
STACK_EDU
|
Code signing creates a digital "shrink-wrap" for secure distribution of code and content over the Internet. Symantec Code Signing adds a level of trust by providing third-party authentication of the code signer, recognized worldwide. Learn more in the Information Center
or get .
Sign code more securely to protect your business and identity, provide a more frictionless download experience for users, and immediately establish reputation in Windows 8 and IE 9.
Digitally sign 32-bit or 64-bit user-mode (.exe, .cab, .dll, .ocx, .msi, .xpi, and .xap files) and kernel-mode software. Provider for Microsoft Windows Logo programs.
Digitally sign and optimize .apk files for the Android platform. Automatically keep track of certificate keys and access full reporting of signing activity within the Symantec Code Signing Portal.
For Java applications for desktop and mobile devices, digitally sign .jar files and Netscape Object Signing. Recognized by Java Runtime Environment (JRE).
Digitally sign VBA objects and macros. For Microsoft Office and third-party applications using VBA.
Digitally sign .air or .airi files. Required for all AIR-based applications.
Digitally sign code for Windows Phone and Xbox 360 applications. Required for Microsoft App Hub service.
Digitally sign code for Windows Phone Private Enterprise applications. Required for organizations using Microsoft’s Windows Phone Dev Center.
Become a commercial BREW developer. Digitally notarize BREW applications.
Symantec Code Signing helps you securely deliver your apps and code to more customers, on more platforms than any other provider.
Maximize Distribution and Revenue on More Platforms
Symantec root certificate ubiquity is second to none. Our root certificates come preinstalled on most devices and are embedded in most applications, helping you minimize security warnings while maximizing distribution and revenue. Symantec is the one trusted provider of code signing for Windows Phone, AT&T Developer Program, Java Verified and Symbian Signed applications.
Rely on the Trusted Certificate Authority (CA)
More developers and publishers rely on Symantec, the most recognized and trusted Certificate Authority (CA) worldwide, than any other CA. In fact, 7 out of 10 code signing users choose Symantec for code signing. (Symantec's online interactive survey, September 2011).
- Symantec’s rigorous authentication practices, established by Symantec and audited annually by KPMG, lead the industry in reputation qualification measures.
- Symantec’s robust PKI infrastructure includes military-grade data centers and disaster recovery sites for unsurpassed customer data protection, availability, and peace of mind.
- Symantec’s web-based portal and API integrations make it easy to integrate code signing into your development process.
Symantec Code Signing helps ensure a safe, secure experience for you and your customers.
Manage Existing Code Signing Certificates
Renew, Revoke or Replace Code Signing Certificates
To get started, search by common name, order number or serial number.Click the Search button, and then enter a common name, order number or serial number.
Check Order Status
Click the Search button, and then enter the Order Number from your confirmation email.
Please visit the for additional support.
|
OPCFW_CODE
|
In a word, no.
There are several reasons, some quite independent of each other, for
Django having its own ORM and trusting that people who don't like it
or want/need something else will take advantage of the fact that
Django's just Python, and will use whatever ORM solution they prefer.
First, and dearest to my heart as release manager, is simply that a
move toward SQLAlchemy would be large enough and disruptive enough
that we couldn't in good faith do it during the Django 1.x release
series; if it were to happen at all, it'd have to happen at a bump of
the major version number (e.g., in a move from a Django 1.x -> Django
You'll notice, for example, that even something as tiny as switching
``AdminSite.root`` to an include of ``AdminSite.urls`` is going to
need multiple full release cycles to complete, because we have a
strong commitment to API compatibilty and stability, and so follow the
same general process as Python itself for deprecating and removing
pieces of API. Given what's involved even in a tiny change like the
admin URLs, it should be fairly clear that we simply could not switch
ORMs midstream in a release series even if we wanted to.
Second, I don't particularly think there's a major need to try to make
everyone converge on one single ORM, or even as large a community
impetus as you may suspect; there are, right now, either four or five
(depending on how you want to count) major Python ORM projects with
decent community support, and none of the others seem to be
particularly interested in abandoning their efforts (or even feeling
any particular pressure to do so) in favor of a merge with SQLAlchemy.
Third, and somewhat related to the above, I don't think it would be a
good thing for Django to do this; SQLAlchemy is a very good ORM,
certainly, but it also has a certain approach to and assumptions about
ORM which are very deeply and very rigidly embedded into it. It is,
fundamentally, a Data Mapper-style ORM, and that will practically
always be visible and have effects on end user code even with
extensions designed to emphasize a different approach. Django's ORM,
meanwhile, has its own approach and assumptions which are, again,
rather deeply and solidly built in; it is, fundamentally, an Active
Record-style ORM, and that will practically always be visible and have
effects on end-user code.
And this is not a bad thing! Both of these are valid, respectable
approaches to the ORM problem, and personally I think the Python
ecosystem would be poorer, not richer, if one of these approaches were
to be eliminated. Right now Django and SA represent the "best of
breed" implementations of these approaches, and in order to keep good
implementations available I think the Django ORM needs to continue to
be developed and supported.
Fourth, it's worth noting that we have certain stated goals for what
we'd like our ORM to do, and those goals are irreconcilably
incompatible with stated goals of SQLAlchemy. For example, it is a
goal of Django's DB layer to eventually support backends which are not
SQL databases (e.g., the BigTable implementation in Google App Engine,
CouchDB, etc.). So far as I know, however, SQLAlchemy has stated
unequivocally that they will remain SQL-only.
I could probably go on for a while here, but hopefully it's clear by
now that there are some fairly solid arguments for Django continuing
to maintain and use its own ORM solution instead of switching to
SQLAlchemy (or any other project which might come along and gain
similar traction -- it's useful to remember that, in the beginning,
emails such as yours asked why we didn't just switch to SQLObject,
since that was at the time the standard default ORM in other major
frameworks; had we done so we'd have been in a bit of a pickle when
everybody abandoned it).
"Bureaucrat Conrad, you are technically correct -- the best kind of correct."
To put in one more voice of authority - speaking as a core developer,
I'm strongly opposed to modifying Django to use SQLAlchemy (or any
other external project for that matter) as the ORM.
On top of the many valid reasons that James mentioned, there is one
more that I consider to be very important - one that stems from the
task that _any_ ORM is trying to perform.
The goal of any SQL-backed ORM is to provide a syntax that makes it
easy to express SQL queries. The problem is, we already have a very
powerful, 100% feature complete syntax for expressing SQL queries -
it's called SQL. By very definition, _every_ SQL-backed ORM is
reinventing the wheel.
ORMs have an important role to play in making the simple cases very
simple - and this is a sweet spot that Django's ORM, SQLAlchemy, and
any number of other ORM projects manage quite well. It is much easier
to write "Author.objects.all()" than to write "SELECT id, firstname,
lastname, birthdate, address1, address2, .... FROM author".
However, this argument isn't about the simple cases - it is about the
complex cases. I will certainly grant that SQLAlchemy is certainly
able to cover more of the world of possible SQL queries than Django's
ORM. However, there are queries that even SQLAlchemy can't express (or
can't express elegantly). At this point, you can either continue
making modifications to your non-SQL language in an attempt to provide
100% coverage of the SQL spec, or you can step back and say "No - we
already have a language that does this", and embrace it.
Django has decided to take the latter approach. The recent proposals
for a raw() operator to make it easier to return Django model objects
as the result of a raw SQL query is an expression of this underlying
So, no - for this reason, and many many others, we're not going to
adopt SQLAlchemy as Django's ORM.
That said, we are committed to making it easier to use non-Django
components inside a Django project. If there is anything that we can
do to make it easier to use SQLAlchemy (or any other non-Django ORM)
inside a Django project, we're open to those suggestions.
Russ Magee %-)
I'd agree and elaborate on #1, that ORMs are good for simple
object retrieval, and also for eliminating some of the most
grievous cross-platform issues.
However, I must disagree on #2 and #3: I'd never consider the
*addition* of a generalization-layer (an ORM in this case, but
the idea is far more broadly applicable) a step towards
optimization. Likewise, for my more complex queries, there's no
good way to express them efficiently in *any* ORM I've used --
many end up either inexpressible, or they involve bringing back
gobs of data client-side and post-processing it there instead of
at the server.
I use an ORM for the same reason I use Python: they make common
tasks easy. Using Python/ORM, I can bang out most of what I
need. But when I hit a wall (performance or complexity), both
need to allow be to go *down* a layer (to C or SQL).
So I'd say Django has done a fine job of making the ORM what it
should be -- an approachable, cross-platform, cross-database
data-access layer that covers most use-cases and gets out of your
way when you need it to.
|
OPCFW_CODE
|
- Ruan Bekker
When dealing with a lot of servers where you need to ssh to different servers and especially if they require different authentication from different private ssh keys, it kinda gets annoying specifying the private key you need, when you want to SSH to them.
~/.ssh/config is powerful!
In this config file, you can specify the remote host, the key, user and the alias, so that when you want to SSH to it, you dont have to use the fully qualified domain name or IP address.
Let's take for example our server-a with the following details:
- FQDN: host1.eu.compute.domain.coom
- User: james
- PrivateKeyFile: /path/to/key.pem
- Disable Strict Host Checking
So to access that host, you would use the following command (without ssh config):
$ ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -i /path/to/key.pem firstname.lastname@example.org
Now with SSH Config, open up the config file:
$ vim ~/.ssh/config
and declare the host details:
Now, if we need to SSH to it, we can do it as simply as:
$ ssh host1
as it will pull in the configs from the config that is described from the host alias that you calling from the argument of the ssh binary.
Appending to our SSH Config, we can configure either our client or server to prevent SSH Timeouts due to inactivity.
- SSH Timeout on our Client:
$ vim ~/.ssh/config
Here we can set how often a NULL Packet is sent to the SSH Connections to keep the connection alive, in this case every 120 seconds:
- SSH Timeout on the Servers:
$ vim /etc/ssh/sshd_config
Below we have 2 properties, the interval of how often to instruct the client connected to send a NULL packet to keep the connection alive and the max number of intervals, so for a idle connection to timeout in 24 hours, we will take 86400 seconds which is 24 hours, divide into 120 second intervals, which gives as 720 intervals.
So the config will look like this:
The restart the sshd service:
$ /etc/init.d/sshd restart
Another handy tool is
ssh-agent, if you have password encryption on your key, everytime you need to ssh, a password will be prompted. A way to get around this is to use the ssh-agent.
We also want to set a TTL to the ssh-agent, as we don't want it to run forever (unless you want it to). In this case I will let the ssh-agent exit after 2 hours. It will also only run in the shell session from where you execute it. Lets start up our ssh-agent:
$ eval $(ssh-agent -t 7200)
Agent pid 88760
Now add the private key to the ssh-agent. If your private key is password protected, it will prompt you for the password and after successful verification the key will be added:
$ ssh-add /path/to/key.pem
Identity added: /path/to/key.pem (/path/to/key.pem)
Multiple Github Accounts:
Here is a great post on how to work with different GitHub Accounts:
|
OPCFW_CODE
|
We have this idea that the Solar System is a vast place, and when we hear that Pluto is about 3 billion miles away or that the probe currently making its way to Pluto will arrive in 2015, our suspicions are confirmed. Even proposed manned missions to Mars talk about a 7 month transit time.
But that's more a reflection of how we choose to go. Currently, almost all(*1) of our probe propulsion is chemical... we typically accelerate in one big WOOSH (technical rocketry term) and then coast the rest of the way. You often see terms like 'Hohmann transfer orbit' - which sounds cool, but it's really just a minimum energy trajectory. Think 'slow'. Yes, the probe to Pluto has been coasting for years and will coast for many more years(*2).
There are two performance numbers of interest for a rocket, roughly sortakinda analogous to gas mileage and top speed for a car.(*3).
Here's the big 'a-ha': If we got better 'gas mileage' with our rockets, we wouldn't *have* to use minimum-energy trajectories! And the trips wouldn't take nearly so long.
In the future we'll travel using continuous-thrust propulsion. And we don't even need much thrust. If we could thrust at a continuous .03g (that is 3% of the gravitational pull you feel right now), Pluto is just over 2 months away! Talk about shrinking the size of the Solar System!
But .03g would really be quite a futuristic feat. I'd love to see a manned vehicle capable of even .0001g, which would be interesting just as a proof-of-concept; it's too wimpy to actually get us anywhere.
Things start to get fun around .001g. If we had a manned vehicle capable of .001g, we could start thinking about a 7-month trip to Europa rather than Mars. A while back I wrote a sci-fi story(*4) about the first manned mission to Ceres, and in that story, the crew ship was capable of .001g acceleration.
A century from now 'Hohmann transfer orbit' will be a quaint olde term. All manned trips will be continuous-thrust, we'll use spiral trajectories, etc. The Solar System will effectively shrink in size just like the Earth has shrunk over the last century due to telecommunications.
(*1) We have used Ion engines on one or two probe missions, everything else is chemical.
(*2) With the exception of minor course corrections. We may have (I don't know) done a burn while passing Jupiter, it is an efficient way to add speed; accelerating during a gravitational slingshot.
(*3) The analogous numbers are Isp and maximum sustained acceleration. Isp is the Specific Impulse of the rocket; measured in how long (in seconds) a pound of fuel can produce a pound of thrust. Today's chemical rockets are in the 300s - 450s range. Ion engines are over 20,000s - but today, we can't take off from Earth with Ion engines. Even if we could we probably wouldn't choose to since the exhaust is dangerous.
(*4) Since I wrote it I have come across some writings on the web that make me painfully aware I can't write worth a hoot!
|
OPCFW_CODE
|
Nov 14 2016 09:01 AM
Nov 14 2016 09:01 AM
Outlook Customer Manager, a new Office 365 service, helps small businesses track and grow their customer relationships right within Outlook. It gives a complete view of interactions with each business contact, helps track tasks and deals in progress, and surfaces timely reminders—all next to your inbox. Outlook Customer Manager will be available at no extra cost to Office 365 Business Premium subscribers – beginning with those in First Release program and continuing worldwide in the coming months. It is turned on by default allowing users to access it in one-click from the home tab in Outlook 2016 for Windows, and includes a mobile app. Learn more at Outlook Customer Manager relationships made easy for small businesses
Nov 15 2016 03:04 AM
There is only mention of IOS and windows options for the Outlook Customer Manager, were is the version for Mac OS version ?
Nov 16 2016 08:27 AM
We would REALLY like this feature in our Office 365 E3 tenant. Please help to make it available sooner rather than later. :)
Dec 09 2016 03:23 AM
@Welly Lee Some questions. All contacts are stored in the "regular" Outlook contacts? Can we use a central/managed contacts "database". And is it possible to use Customer Manager data? Like use an API or something like that. For example, when I create a new deal, I want to pull of some action to create a supporting SharePoint (online) site.
And is it possible to customize Customer Manager? For example can I change "Deals" in "Licences"?
Dec 11 2016 01:43 PM
Hi! I'm a little confused. I have an Office 365 Business Premium license, "first release" option is activated and assigned to my users but OCM is not showing up in Outlook tabs. I really like this option and I really need it ASAP. Could someone help me?
Jan 08 2017 11:37 AM
Almost 2 months since this was announced! Any word on the progress of this rollout? Seems really slow
Jan 23 2017 08:14 AM
We were using BCM and then had to switch over to CRM which does not really work for us. We are using Enterprise E3 and my understanding is that the Outlook Customer Manager only works on the Office 365 Business Premium right now but will for Enterprise E3 in the near future. From the looks of it the Outlook Customer Manager would be a better fit for us. I have done beta testing for Microsoft and previously had ben a MSDN developer. Also I developed major CRM systems for several banks in the past. Is there a way to connect with the beta development team for E3 and become part of the beta program? David A Billings, President CEO GVP Global Corp
Jan 23 2017 01:56 PM
Jan 23 2017 03:33 PM
Jan 30 2017 05:59 AM
Where are the instructions on how to use Outlook Customer Manager. I have installed it, but don't know how to use it.
Jan 30 2017 06:21 PM
You can refer to the following blog post to get started: https://techcommunity.microsoft.com/t5/Outlook-Blog/bg-p/Outlook
|
OPCFW_CODE
|
Advantage of ADO.Net?2 8712
How would u connect to database using .NET?5 6285
Difference between DataReader and DataAdapter / DataSet and DataAdapter?3 29185
What are the differences between RAW, AUTO and Explicit modes in retrieving data from SQL Server in XML format?1 16728
Differences between dataset.clone and dataset.copy?2 5294
What are the two fundamental objects in ADO.NET?8 17909
Call a stored procedure from ado.net and pass parameter to it ?7 19282
Where does ADO.NET and XML web services come in the architecture ?1 9693
What does ADO.NET consists of ?7 9311
What is the key feature of ADO.NET compared to ADO ?3 5861
What is ODP.NET ?1 7326
What provider ADO.net use by default?10 17135
What is the default view of Data Grid?5 10266
How To Update A Column In A DataGrid Using C#.NET? I am getting InvalidCastException as (Specified cast is not valid) while updating 2nd column in a datagrid? Id,firstname,lastname are the three columns of my datagrid respectively. I wanted to edit the second column(lastname) and update it. I did the following code in DataGrid's updatecommand(),but failed to update ! Int varid=(int)DataGrid1.DataKeys[e.Item.ItemIndex]; TextBox lnm=(TextBox)e.Item.Cells.Controls; string str=lnm.Text ; SqlCommand cmd=new SqlCommand("update customer set lastname='" + str + "' where id=" + varid + "",con); cmd.ExecuteNonQuery(); DataGrid1.EditItemIndex=- 1; DataGrid1.DataBind();2 13441
Can we use Polystyrene as a Bitumen modifier !! if so is it a good MS research topic ? and what type of laboratory tests will be conducted during Research work ??
How work of capacitor in dc supply...???
Hi Friends..... I am interested to write Groups.Can anyone guide me?Till now i didn't gave any attempt.How many attempts will be there for Group-1/Group-2/Civils?How much amount to spent totally to complete a particular one?And how much time to spend?I am thinking to give my first attempt in the year 2009.
what are the values of compressieve strenght?
How to Access a FIFO?
hai! i am a BE(computer science) graduate with 54% of aggregate looking for bank clerk job? in interview they are asking why are you looking for a bank job and what guareente that once you are selected ? plz guide me?
In our project, we use Withdrawal Indent, we are confused to use heading as "Withdrawal Indent" or "Withdraw Indent". Please suggest me which one to use ASAP, Thanks in advance
IS there any effect of vortex in low pressure side of the transmiter after the orific in feed water line? if yes then what is the calculation?
What are Visual Debugger?
Hi..Guys, (1). Iam intrested in Webdesinging, can u pls tell me openings for the post of webdesigners in any company.. (2). Pls let me know the new openings in any MNC around Hyderabad.
Topic- looping,function overloading,nesting ,polymorphism. Aim - to write a function with a name buzz-buds,that will check whether the given numbers are buddies or not on the basis of no. of parameters passed during function calling.
how to check and keep control on the wastage of material at site
why the name trunking bus PT
Why the diesel generator suddenly increases the output voltage and avr get shorted ... And also increases the load the generator decreases the voltage why ..and also the output power in amps generated in 30 kv diesel generator ..
list three things you can do with the files you create by recording your voice on your computer?
|
OPCFW_CODE
|
The Pros and Cons of Free vs Paid Software
A well-formed XHTML document adheres to all the syntax requirements of XML. A valid document adheres to the content specification for XHTML, which describes the document structure. Those are the main changes necessary to translate a document from XHTML 1.0 to HTML 4.01. To translate from HTML to XHTML would also require the addition of any omitted opening or closing tags.
The Knappens have taken more than 50 loads with their pickups and also have used the couple's Buick Riviera for the 9-mile round trip from their farm to Galesville and back. Live Science is part of Future US Inc, an international media group and leading digital publisher. Stay up to date on the latest science news by signing up for our Essentials newsletter. The paper says "the open-ended generation of novelty does not fit cleanly in the paradigmatic frameworks of either biology or physics," which doesn't seem to make much sense. In recent years, web accessibility has gained prominence, emphasizing the importance of creating inclusive web experiences for all users.
The social shaping of technology
The first version of HTML was called HTML 1.0 Strict and was released in 1993. In this tutorial, you will learn about the history of HTML, how it was created, who created it, how many versions of HTML there are, how it was used in the past, etc. After HTML 4.01 and XHTML 1.0, the guys who were in control of HTML’s direction html5 application development got sidetracked working on a new proposal for XHTML 2. At the same time, clever web developers were innovating constantly, hacking new functionality into websites and browsers. The path that XHTML 2 was taking started to look both boring and unrealistic, and it became pretty clear that a new approach was needed.
- And W3C was also trying to make the HTML more potent than previous versions.
- CSS3 introduced advanced features like animations, transitions, and media queries, further enhancing the visual and interactive capabilities of HTML.
- HTML (Hypertext Markup Language) has experienced a major transformation during the last decade prior to releasing its latest version known as HTML5 (Hypertext Markup Language 5).
- The path that XHTML 2 was taking started to look both boring and unrealistic, and it became pretty clear that a new approach was needed.
- With each iteration, HTML has become more versatile, powerful, and aligned with the demands of the ever-changing web landscape.
Along with this, many other advanced features were not in previous versions of it. Hence, because of these additional features, it was a more trouble-free version of HTML. The concept of HTML came to the mind of Sir Tim Berners-Lee when he saw researchers at CERN need to share documents many times. And then, he proposed that he develop a markup language that would help connect computers worldwide.
Brief History of HTML
A document sent with the XHTML MIME type is expected to be well-formed XML; syntax errors may cause the browser to fail to render it. The same document sent with the HTML MIME type might be displayed successfully since some browsers are more lenient with HTML. Another important component is the HTML document type declaration, which triggers standards mode rendering. It was the second version of HTML and became the standard language until HTML 3.2 came. Using this language, the user can change the background color, text color, and other formatting.
HTML is a markup language that web browsers use to interpret and compose text, images, and other material into visible or audible web pages. Default characteristics for every item of HTML markup are defined in the browser, and these characteristics can be altered or enhanced by the web page designer's additional use of CSS. These formatting commands were derived from the commands used by typesetters to manually format documents.
And W3C was also trying to make the HTML more potent than previous versions. And with these additional attributes and features, HTML 3.2 was endorsed by W3C. By 1997, HTML 3.2 became the standard markup language for web-based applications. But the most popular version of it was HTML4.01 which became the standard markup language in 1999.
In 1995, the IETF organizes a working group of HTML and manages to publish, on September 22 of that same year, the HTML 2.0 standard. The first official proposal to convert HTML into a standard was made in 1993 by the IETF (Internet Engineering Task Force). After completing the development of its "hypertext" system, Tim Berners-Lee presented it to a call organized to develop a "hypertext" system for the Internet. Since then, the WHATWG has been working on this specification (amongst others), and the W3C has been copying fixes made by the WHATWG into their fork of the document (which also has other changes).
|
OPCFW_CODE
|
Is Microsoft really claiming pre-boot authentication (PBA) for Full Disk Encryption (FDE) is not necessary? One could certainly get that impression from recent articles (HERE and HERE) posted by the organization. The first article on “Types of attacks for volume encryption keys” lists a few known historical attacks that “could be used to compromise a volume encryption key, whether for BitLocker or a non-Microsoft encryption solution”, and the second makes statements like “For many years, Microsoft has recommended using pre-boot authentication to protect against DMA and memory remanence attacks. Today, Microsoft only recommends using pre-boot authentication on PCs where the mitigations described in this document cannot be implemented.”
I will let you read the entire Microsoft articles yourself, if you like, but here is why they have missed the big picture. Encryption of any form doesn’t provide confidentiality without authentication. For example, a computer with a SED (Self Encrypting Drive), or software FDE that boots right into a user’s account without authenticating, leaves the data completely exposed. The need for authentication is obvious, but when should it be done? The two basic choices are:
- Authenticate the user before the drive is unlocked and the OS is booted up.
- Authenticate the user after the drive is unlocked. Unlock the drive automatically, then load the OS or an application and prompt the user to authenticate.
NB: You could also do both if you are a strong believer in defense in depth.
The first choice above is where PBA comes in. PBA provides an environment external to the operating system (OS) as a trusted authentication layer. The PBA prevents any data from being read from the drive, including the operating system, until the user has confirmed he/she has the correct password or other credentials.
The second choice would normally be to have the OS prompt the user for authentication credentials after unlocking. (BTW this isn’t PCI DSS compliant: Read more HERE.) I have never heard anyone argue that it is more secure to unlock or decrypt data BEFORE authenticating, but the article does seem to be saying that if you configure your OS just right and have the correct type of hardware, PBA is not necessary. The argument goes that PBA is only necessary to protect against RAM attacks where the attacker gets direct access to RAM, either physically or through a DMA port, and extracts the data encryption key (DEK). With proper configuration and the right hardware, this is not possible.
The argument that the security of PBA is not needed is incorrect for two reasons:
1) The attacker may deploy considerable resources to get valuable data on the machine, so you cannot rule that out without an analysis of the hardware on a model by model basis. Do you know the make, model, and firmware revision of all the hardware components (including TPM and memory) in your enterprise in order to decide if you need PBA or not on a machine by machine basis? And as even Microsoft states in this older ARTICLE, “TPM-only authentication method offers the lowest level of data protection. This authentication method protects against attacks that modify early boot components, but the level of protection can be affected by potential weaknesses in hardware or in the early boot components.”
2) More importantly, memory attacks are not the only possible attacks. Once booted into Windows and the drive is “unlocked”, full disk encryption is no longer providing any cryptographic protection, which is orders of magnitude stronger than the “programmed” security an OS can provide. If you let your computer boot automatically up to the OS prompt, you are putting all your faith in OS security and that of the machine hardware. You have to trust that even though the DEK is sitting in plain text in memory, and the data is all readable, that the OS login screen is going to keep attackers out, and that OS is not going to let data leak out of the device via LAN, WAN, or other ports or connections. Modern operating systems consist of millions of lines of code and are very complex. The computer hardware also evolves rapidly. Together, they constitute an enormous attack surface with countless potential unknown vulnerabilities. It is very difficult, if not impossible, to get a reasonable degree of assurance that authentication before decryption is not required.
On the other hand PBA’s main reason for existing is to cryptographically protect the drive keys. With PBA the attack surface is much smaller and manageable. For example, the PBA application can be submitted to a Common Criteria lab for certification against the collaborative Protection Profile (cPP) for FDE Authorization Acquisition (cPP AA). Read more HERE.
The argument against PBA is really based more on usability and high total cost of ownership (TCO). For example, in the Microsoft article on “Configuring BitLocker for Tablets” they write, “deploying pre-boot authentication within their organizations which results in a diminished user experience and increases support costs (for example, a forgotten PIN).” The answer to this is NOT to lower security and compliance standards to avoid the high TCO of PBA, but rather deploy products which have full-fledged PBA that addresses these issues. For example, WinMagic’s SecureDoc offers user-based authentication, self-recovery and network connectivity at pre-boot, providing a level of usability to match security requirements.
To summarize, Microsoft has got this one wrong. The fault in their logic is thinking that PBA is limited to protection against memory attacks AFTER automatically unlocking the drive. They missed the whole point of PBA, which is to prevent anything being read from the drive, such as the operating system BEFORE the user has confirmed they have the correct password or other credentials. PBA is a necessary component of a FDE solution in order to fully achieve the confidentiality (and compliance) that full disk encryption is capable of providing.
|
OPCFW_CODE
|
Binary file not executable
danny at ricin.com
Tue Apr 10 20:02:21 UTC 2007
Op Tuesday 10 April 2007 19:13:04 schreef Christian Walther:
> On 10/04/07, h t <th_freebsd at yahoo.com.cn> wrote:
> > I'm freebsd beginner
> > I Download RealPlayer10GOLD.bin from http://www.real.com/linux/
> > then chmod +x RealPlayer10GOLD.bin
> > run
> > ./RealPlayer10GOLD.bin
> > but broken
> > the message is
> > ELF binary type "0" not known.
> > ./RealPlayer10GOLD.bin: Exec format error. Binary file not executable.
> As it says in the URL where you downloaded the Realplayer from, it's
> for Linux. But FreeBSD is not Linux. There is a Linux Emulation Layer
> available that allows you to execute Linux binaries. You can do a
> # kldload linux
> to enable it. But you'll need to install some additional stuff,
> because Linux application will need Linux libraries...
> > How can I do this?
> > Not install in port
> Realplayer is in Ports:
> # cd /usr/ports
> # make search name=realplayer
> Port: linux-realplayer-10.0.8.805.20060718
> Path: /usr/ports/multimedia/linux-realplayer
> Info: Linux RealPlayer 10 from RealNetworks
> Maint: multimedia at FreeBSD.org
> R-deps: linux-atk-1.9.1 linux-expat-1.95.8 linux-fontconfig-2.2.3_6
> linux-glib2-2.6.6 linux-gtk2-2.6.10 linux-jpeg-6b.34 linux-pango-1.8.1
> linux-png-1.2.8_2 linux-tiff-3.7.1 linux-xorg-libs-6.8.2_5
> WWW: https://player.helixcommunity.org/
> AFAIK Realplayer GOLD is not freely available, you'll have to pay for it.
No, it's the same thing. Same release also. It's just branding. From past
experience, I think when Real starts naming a major version "Gold" it more or
less means it's slated for obsoleteness (or declared final to phrase it more
friendly). They're probably brewing a new release based on helix2. I wonder
if it will only support ALSA (like flash9). I don't follow helix development
though (its redundant IMHO).
More information about the freebsd-questions
|
OPCFW_CODE
|
View Full Version : Weird XP Theme Glitch...
09-29-07, 01:40 PM
I use a windows 2000"ish" theme on XP, but lately the window frame style has been reverting "on it's own" to an XP style. When I check the theme it is still "set" to my personal one but it isn't using it. I have to change it and change it back.
Other than annoyance this is a very minor problem so far, I don't however see an obvious resolution off the top of my head. I did a reg repair with no problems found.
Have any of you seen this behavior before and found a solution?
Edit: after 37 views.. no response... Themes? Come on guys...
10-02-07, 05:50 PM
Anyone? "Bump" This is annoying the crap out of me. It's such an inane little problem that it's not worth using my system image from over a month ago... yet... :)
Is this a built in theme or a 3rd party one. If it is 3rd party, are you using a utility to apply it or just a patched dll? Vanilla XP or MCE?
10-02-07, 07:02 PM
It's just the regular "windows classic" them that I've modified. It reverts to XP even if I use the unmodified one. Thanks for the reply, btw.
10-20-07, 03:31 PM
One last bump in case the person that can solve this problem just hasn't seen the this thread yet...
10-21-07, 12:35 AM
I've seen this happen a lot on machines with various pieces of spyware.
Maybe give Ad-Aware, Spybot and Windows Defender a run.
If you are just running the windows classic look, which isn't really a theme, just adjust XP for best performance or disable themes altogether in services.
Unless I missed your meaning on what you are using, then disregard. :p
10-21-07, 01:29 PM
Go to services and check and make sure themes service has started, check in performance to see what you have checked, leave everything blank execpt for the bottom 3 choices, and if that don't work, you have a hyjack.
Run spybot first than ad-aware. Those should fix it and you could try hyjackthis for the hyjack.
If all fails you need a virus scaner. CA online is the best.
vBulletin® v3.7.1, Copyright ©2000-2015, Jelsoft Enterprises Ltd.
|
OPCFW_CODE
|
How to omit manipulated variables in stargazer?
I want to omit manipulated variables in stargazer. Here manipulated variable mean that I do not have those variables in my data set, such as I introduced a squared term.
Below is my code. How can I omit variables like log(fall_t) and log(fall_t)^2. I do have fall_t in my data set.
runmtw2_cp_oz11 <- plm(formula = lnyield ~ log(fall_t) + I(log(fall_t)^2) +
winter_t + wintertsq +
log(spring_t) + I(log(spring_t)^2) +
log(fall_p) + I(log(fall_p)^2) +
log(winter_p) + I(log(winter_p)^2) +
log(spring_p) + I(log(spring_p)^2) +
ltt + qtt,
data = dfto, effect = c("individual"),
model = c("within"))
stargazer(runmtw2_cp_oz11, runmtw2_cp_oz6, runmtw2_cp_oz8, runmtw2_cp_oz10,
title = "Panel data with time trend",
model.names = F, model.numbers = F,
dep.var.caption = "Yield", dep.var.labels.include = F,
notes = "*Average and square temperature of Winter are not in log form
due to negative values.",
align = T, no.space = T, column.sep.width = "1pt", omit.stat = "f")
If your goal is correct inference, theny ou should consider using poly rather than using I(.)^2 constructions. Failing to do this is a trap for statistically inexperienced analysts. It's also a strong argument for employing adequately trained statisticians or liberally using consulting contracts with same, since they would be able to have seen this before the preparation of results.
Totally make sense. This also solved one of my problems. However, back to the original question, any idea to omitting coefficients in stargazer???
@YabinDa are you referring to removing explanatory variables from the stargazer table? one could include 'omit' in the stargazer command, which takes a vector of regular expressions (or numeric vector of which elements to omit). for example, omit="log" would exclude any coefficients that included "log" by regex
@Ben I don't know how to code regular expressions, so I tried using numeric vector and it works now. Another questions is, say I omitted three explanatory variables, but I wan to use only one omit.label in the generated table. How should I realize that?
@YabinDa omit and omit.label need to be same length. would this help? https://stackoverflow.com/questions/46621199/label-only-part-of-the-omitted-variables-with-stargazer
@Ben Yeap,the length should be same. I am trying to figure out a way to let them have different lengths. Say, I have three precipitation variable for fall, winter and spring in the model, and I want to omit them in the generated table with only one label "Precipitation included: Yes or no". Any idea? I really appreciate your comments.
@YabinDa was this link helpful (using keep): https://stackoverflow.com/questions/46621199/label-only-part-of-the-omitted-variables-with-stargazer (or if you want, post your data with dput, and show the desired table output, and i can try to see if i can help with a solution...)
|
STACK_EXCHANGE
|
Post by Son Anheuser on Aug 19, 2011 5:29:08 GMT -8
Well according to something called math if witches vote out one more human the game is a lock for them.
So far Amer and Annie have avoided any real suspicion as witches. mainly because they voted one out the first week. But over the last couple weeks, if my memory is correct, part of the plan was to let a confirmed human decide who to vote for.
Tulley said Brad was his choice but then the votes went to Dan, vampire, but also, a non-witch.
This week I was asked to put forth a couple names. Erik and DJ were my choices. But then that was ignored in favour of Chris, again, non-witch.
Annie and Amer are both good players and it would not surprise me if they sacrificed Mike in week one to win the confidence of the humans. The last two weeks votes make me wonder.
Post by Son Anheuser on Aug 19, 2011 8:34:57 GMT -8
True, but you did end with this:
"If I my hypothesis is at all correct, I don't really see how Dan could be a witch but not Erok & vice versa. Since I like the idea of voting for a pilgrim, that leaves Brad, Kyle, & Steve. Of those 3, Brad has been super suspicious in his lack of participation, so I would go with him."
I was convinced by Annie and Brad that Chris was a better candidate than DJ. However, it didn't take much convincing. Chris was already near the top of my list. I already gave my reasons why I didn't think Erock was a good candidate.
At this point, you just pull a name out of a hat and you're almost as likely to get a witch as a human.
It doesn't matter, though. The game is over. The witches might not have a mathematical majority, but there's no fucking way you're getting ALL of the remaining humans to vote for the same player. Just look at the last vote.
Fine. Let Tulley and Son do it -- if they can. I don't need to hear the reason; Or the discussion. In fact, I'd prefer to not hear it. Come up with one person that we should vote for. Use Chris as a tiebreaker if you have to. As far as I'm concerned, game's already over anyway, so I'll vote for anyone.
Brad, DJ and Erock we proposed last two weeks. I think this week's vote should be one of them to see how it turns out.
See, this is what I meant when I said you two needed to quietly discuss this and come back with one name. Throwing out suggestions is half the reason why we can't get everyone to vote for the same person.
|
OPCFW_CODE
|
Failed to find backlight device
running wluma:
ERROR: Failed to find backlight device file in base dir: /sys/class/backlight
ls /sys/class/backlight
lrwxrwxrwx root root 0 B Thu Sep 24 08:15:56 2020 intel_backlight ⇒ ../../devices/pci0000:00/0000:00:02.0/drm/card0/card0-eDP-1/intel_backlight
any chances to wluma automatically detects type of graphic card ?
or some command line switches to point to correct folder ?
Running archlinux 5.8.10-arch1-1
Interesting because I have the same path as you.
Could you check if these files exist for you?
$ ls /sys/class/backlight/intel_backlight/max_brightness
$ ls /sys/class/backlight/intel_backlight/brightness
yes these files exists and i can print values inside these files
.r--r--r-- root root 4 KB Thu Sep 24 21:56:09 2020 actual_brightness .rw-r--r-- root root 4 KB Thu Sep 24 08:15:56 2020 bl_power .rw-r--r-- root root 4 KB Thu Sep 24 08:15:56 2020 brightness lrwxrwxrwx root root 0 B Thu Sep 24 21:56:09 2020 device ⇒ ../../card0-eDP-1 .r--r--r-- root root 4 KB Thu Sep 24 08:15:56 2020 max_brightness drwxr-xr-x root root 0 B Thu Sep 24 21:56:09 2020 power .r--r--r-- root root 4 KB Thu Sep 24 21:56:09 2020 scale lrwxrwxrwx root root 0 B Thu Sep 24 08:15:56 2020 subsystem ⇒ ../../../../../../../class/backlight .r--r--r-- root root 4 KB Thu Sep 24 08:15:56 2020 type .rw-r--r-- root root 4 KB Thu Sep 24 08:15:56 2020 uevent
tried to run with sudo
ERROR: Failed to open light sensor base dir: /sys/bus/iio/devices
maybe this will help
Ah I think I know, wluma process must be able to write values into the brightness file, but in your case it is only writeable by root, while in my case it is also writeable by video group, and my user is part of this group:
$ la /sys/class/backlight/intel_backlight/brightness
.rw-rw-r-- 4.1k root video 25 Sep 11:58 /sys/class/backlight/intel_backlight/brightness
I think this video group is something standard, I definitely have since the time I was using light tool.
The fact that you get further with sudo confirms this 🙂
As for the light sensor error, it seems your laptop doesn't have light sensor (so wluma cannot tell how bright it is around you right now), have a look here on how to simulate this device by various strategies:
https://github.com/maximbaz/wluma#strategies-other-than-ambient-light-sensor
I'm actually re-considering the algorithm, so if you have any ideas or feedback as you will use the tool, they are very very appreciated.
I'm not super happy with using light sensor directly, because (1) it is not present in all laptops, and (2) on my laptop it is quite imprecise, it cannot tell dim light from complete darkness.
So I'm considering to change the algorithm, maybe make wluma use time by default, and only if sensor is present, then additionally slightly correct the brightness...
Fixed in main, new major breaking release will be soon, but for now you can apply https://github.com/maximbaz/wluma/commit/827629d618ac38ddd8fcbb2baab535c93859d11c manually or use AUR/wluma-git
|
GITHUB_ARCHIVE
|
#Start the while loop
while 1:
#Ask the user for input
print("What task would you like to perform")
task = input("0 to quit, 1 for entering data, 2 to retrieve all data, 3 for searching for specific users: ")
#If the user wants to quit, quit
if task == "0":
break
#ELse if the user wants to add data
elif task == "1":
#Start a new while loop so if the data is incorrectly entered the user can renter it
while 1:
#Ask for the relevent data
name = input("Name: ")
dob = input("Date of birth: ")
postcode = input("Postcode: ")
#Ask for confirmation
confirm = input("To confirm press 1, to reneter press 2:\nYour name is: {}\nYour date of birth is: {}\nAnd your postcode is: {}\n".format(name, dob, postcode))
#If the data is confirmed, append it to the txt file and break from the loop
if confirm == "1":
file = open("data.txt", "a")
file.write("{}-{}-{}\n".format(name, dob, postcode))
file.close()
break
#Both task 2 and task 3 need the data formated
elif task == "2" or task == "3":
#Open the file and retrive the data
file = open("data.txt", "r")
fileData = file.readlines()
file.close()
#Iterate through the list of data from the file
for x in range(len(fileData)):
#Remove all \n's and set the element of the list to the data split at the -'s
fileData[x] = fileData[x].strip().split("-")
#If it is the second task
if task == "2":
#Iterate through the list
for lst in fileData:
#Print the relevent data
print("\nName: {}\nDate Of birth: {}\nPostcode: {}\n".format(lst[0], lst[1], lst[2]))
#If it is the third
elif task == "3":
#Ask what type of search is needed, only for the inputs
searchType = input("1 to search for names, 2 to search for date of birth, 3 to search for postcode: ")
#Get an input from the user
if searchType == "1":
search = input("What name are you looking for: ")
elif searchType == "2":
search = input("What date of birth are you looking for: ")
elif searchType == "3":
search = input("What postcode are you looking for: ")
#Iterate through the list
for lst in fileData:
#Check of the element is in the list
if search in lst:
#If it is, print the entire list
print("\nName: {}\nDate Of birth: {}\nPostcode: {}\n".format(lst[0], lst[1], lst[2]))
|
STACK_EDU
|
There are some misconceptions in the IT world that Microsoft Continuous Integration (CI) development tools may not operate smoothly on AWS. Some assume that because Visual Studio Team Services (VSTS) and .NET are Microsoft products that they must run better on a Microsoft infrastructure. We’ll show you how that’s not the case with our demo, “.NET/VSTS Demo – Continuous Integration on AWS.”
Using Visual Studio
Take, for example, using Visual Studio on AWS. According to Visual Studio Magazine, AWS “provides a streamlined option for Visual Studio developers who want to move deployments onto Amazon Web Services…The beauty of Amazon’s solution comes in the form of the AWS Toolkit for Visual Studio, a simple and elegant solution.”
A second misconception is that if a customer is embedded within the Microsoft product set such as .NET and Visual Studio, the business will face too much disruption and too many changes to the CI process. This is not the case either. Again, using Visual Studio as an example, “Working in an AWS environment does not necessarily mean that developers have to give up the tools that they use on a day-to-day basis,” Visual Studio Magazine says. “Visual Studio can be linked to AWS with relative ease. In fact, the entire process can be completed in a matter of a few minutes.”
Simplify and Automate CI for .NET on AWS
AWS has multiple tools to simplify and automate CI for .NET environments in Visual Studio:
- The AWS Toolkit for Visual Studio is an extension for Microsoft Visual Studio that makes it easy for developers to develop, debug, and deploy .NET applications using AWS.
- Visual Studio Team Services (VSTS) are additional extensions for Microsoft VSTS and on-premises Microsoft Team Foundation Server (TFS) that make it easy to deploy .NET applications using AWS.
- The AWS Tools for Windows PowerShell lets developers manage their AWS services from the Windows PowerShell scripting environment. You can manage your AWS resources with the same Windows PowerShell tools used to manage your Windows environment.
- AWS SDK for .NET provides .NET APIs for AWS services including Amazon S3, Amazon EC2, Amazon DynamoDB and more.
Easy Development on AWS
How easy is .NET development in Visual Studio on AWS? In this demo, we’ll walk through the following workflow scenario for developers that are working in Visual Studio on AWS. We will:
- Check in code changes
- Push those changes to a TFS or VSTS server
- Launch a TFS build
- Push that build up into Amazon S3
- Use AWS Lambda to signal to AWS CodeDeploy that deployment is ready
- Push the deployment to an EC2 infrastructure on an Auto Scaling group fronted by a load balancer
Check a demonstration that showcases how easy it is to work in .NET using VSTS on AWS!
CI for .NET applications in Visual Studio on AWS is simple, fast, and easy. There are multiple tools available that operate just as they would in a Windows environment. There is nothing new that needs to be learned or changed. The tools work smoothly on AWS and there is little or no interruption of development best practices.
Onica is among the top 1 percent of AWS Premier Consulting Partners worldwide, and has migrated more than 10,000 Microsoft Windows instances to AWS. If you’d like to discuss your Windows environment and how it might work on AWS, please contact us.
|
OPCFW_CODE
|
Add the function of generating correlated graph pairs and test.
Add a document to generate the correlated graph pairs and write tests for it.
Codecov Report
Merging #284 into master will increase coverage by 0.05%.
The diff coverage is 100%.
@@ Coverage Diff @@
## master #284 +/- ##
==========================================
+ Coverage 87.51% 87.56% +0.05%
==========================================
Files 33 34 +1
Lines 1954 1962 +8
==========================================
+ Hits 1710 1718 +8
Misses 244 244
Impacted Files
Coverage Δ
graspy/simulations/simulations.py
96.37% <ø> (ø)
:arrow_up:
graspy/simulations/simulations_corr.py
100% <100%> (ø)
Continue to review full report at Codecov.
Legend - Click here to learn more
Δ = absolute <relative> (impact), ø = not affected, ? = missing data
Powered by Codecov. Last update 844af99...45e8a2a. Read the comment docs.
@bdpedigo Could you please review this PR and leave some comments for me?
Here is a notebook showing the properties of sample_edges_corr, er_corr and sbm_corr functions.
https://nbviewer.jupyter.org/github/NeuroDataDesign/team-networkclass/blob/master/Shan/notebook_corr.ipynb
Hi @SHAAAAN, nice progress. I still need to take a closer look but a few things at first glance
all of the most recent commits to graspy are shown as new commits here, how did you go about keeping things up to date with master? Did you do a rebase or merge? We don't want those changes reflected as commits that you made as part of this PR (since they are already in graspy) so we need to revert those changes in your branch, ideally
hard for me to tell if the SBM is working, graph 1 and graph 2 look a bit off. In terms of probability and maybe rho. have you tested against Bear's code at all? Do you agree that they look slightly shifted on your plots? How long do these sims take, perhaps just do more simulations if these don't take very long?
jovo said he wanted to see that the prob/rho values still looked right for a few other different values of rho and p.
have you tried any edge cases? Like what happens when rho is 1 or 0, or p is 1 or 0?
Hi Pedigo, I found a dimension mistake in the previous notebook and I revise it earlier today. Now I can see that the function works well. I also test the properties of the function when p ranging from 0.001 to 0.999 and rho ranging from 0.00 to 1.00. It seems still working well. (This jupyter notebook is the same link above.)
As for the Bear's codes, I have tested it for several pairs. But like we have discussed before, we have to define edge probability for both graphs. As the result, I haven't use his code so far.
Hi @SHAAAAN
Hi @bdpedigo , I found a dimension mistake in the previous notebook and I revised it earlier today. Now we can see that the function works well. I also test the properties of the function when p ranging from 0.001 to 0.999 and rho ranging from 0.00 to 1.00. It seems still working well. (This jupyter notebook is the same link above.) In addition, I restore the recent four commits back to the previous version.
Thanks for updating the notebook. A few things:
Somehow you are not calculating correlation of the sampled graphs correctly. I know this from your plots because correlation should be defined from [-1, 1] and your correlation looks like it 1) cannot be negative and 2) can be bigger than 0. I also don't see why you think the experiment with rho=0 worked, to me it look like the mean and expected are very different?
As for the Bear's codes, I have tested it for several pairs. But like we have discussed before, we have to define edge probability for both graphs. As the result, I haven't use his code so far.
I don't know what you mean about "define edge probability for both graphs." I can't quite remember the discussion that we had before. Does this mean that the two graphs can have different P matrices? If so, why can we not just set them the same and test against that?
Hi @bdpedigo , I have updated the above notebook and add the limitation of the relation between p and rho for er_corr and sbm_corr functions. And Bear's codes are also tested at the end of the above notebook.
So far I have two "sklearn" errors, which I think is similar to Ali's. But other things seem to be correct.
Hi Shan:
corr.ipynb
I think the paragraph at the beginning needs to be changed. I wouldn't talk about the implementation at all. I would basically just say that the correlated model means that we sample two graphs, but we can modify the probability of observing the same edge in both graphs by adjusting the correlation value.
for the titles of the first two heatmaps, just say Correlated ER Graph 1/2
In the demo, I would only use the er_corr and sbm functions
Can you remove the changes you made to erdos_reyni.ipynb and sbm.ipynb
Have you tried to see if you can just do an import as from graspy.simulations import er_corr? That is what we want.
Main code
You have a lot of redundant code where you check inputs to er_corr and sbm_corr. Any common inputs (or parts of inputs that they share) you are welcome to make private functions to deal with them in both
Anywhere you have math or notationin the docstrings, you should get to render as math in sphinx. See examples in the rdpg code or RDPG estimator, it looks like :math: before some piece of text
where is the most up to date proof notebook? Can you edit the PR description at the very top to include that, I would like to take another look and also have it linked to the PR itself.
The code has come a really long way, nice work Shan!
@SHAAAAN this looks pretty good. I want to play with the code a bit before merging but that's on me, not you. So for the purposes of class, no need to worry, I will try to merge it tomorrow but if I don't that's not your fault.
Nice work!
|
GITHUB_ARCHIVE
|
Best practice for leaving main site to go to Intranet
I work on a college website and I need to link to a faculty intranet from the navigation. This is a change from the current setup, so I'm just wondering if I need to warn the faculty that they are leaving the site. Another option would be to have the intranet open in a new tab. Is there a best practice for this?
Is there a separate faculty login on this site before access to the intranet is granted? Or is the login on the intranet itself?
I believe if they are signed into their faculty account for the main site, it will not prompt them when going to the intranet.
Universities (as with very large corporations) have many departments and I rarely have come across a use-case where informing users that they are going to a new section was necessary.
Assuming your users are logged in then I would recommend a seamless transition to the faculty intranet. I assume that, by design or otherwise, the intranet will have a different look and feel than where they were before.
If the faculty member is not logged in then he would have to log in to access the intranet.
I would strongly recommend against opening a new tab. It is horrible usability. Imagine if the library and each department opened a new tab when navigating there. The back button and all sense of continuity is lost.
I think this quote by Jakob Nielsen is still relevant today:
Opening up new browser windows is like a vacuum cleaner sales person who starts a visit by emptying an ash tray on the customer's carpet. Don't pollute my screen with any more windows...If I want a new window, I will open it myself!
Designers open new browser windows on the theory that it keeps users on their site. But even disregarding the user-hostile message implied in taking over the user's machine, the strategy is self-defeating since it disables the Back button which is the normal way users return to previous sites. Users often don't notice that a new window has opened, especially if they are using a small monitor where the windows are maximized to fill up the screen. So a user who tries to return to the origin will be confused by a grayed out Back button.
tl;dr I don't think there is any reason to tell the faculty member that they are entering the intranet. If they clicked the link by mistake they can easily walk back out.
Thanks for your input. Yes, the intranet has a different look and feel so that is probably a sufficient indicator. I would say there are times when it is appropriate to open in a new tab. I agree with the quote -- but opening a new window is far more annoying than opening a new tab. I guess I am of that thought that if I am opening an intranet, I am starting a new task. So to have to click the back button several times to get back to where I came from feels annoying. However that may be my personal preference getting in the way.
Reanna - the quote applies to windows and tabs equally. It's less the physical appearance of a new window than the lost navigation that comes from it. Not to mention the annoyance that this action was done without your permission.
|
STACK_EXCHANGE
|
Change to Seed for legacy users (Level 2/3) - Issue 1 of 2
Step 1:- The user should be able to change the backup from Level 2 and Level 3 to Seed word-based backup. (Old Hexa User)
Step 2:- The user is given information about the moving to seed words and the precondition for it. (Pre-Condition)This will only be allowed if the Savings Account is empty and archived. If the Savings Account is either not empty or not archived, the app would give corresponding error. If the Savings Account is either not empty or not archived, the app would give corresponding error.
(Note to Yusuf: Please create 2 error message screens. One that says the Savings Account is not empty with corresponding CTA, and the other that says the Savings Account is not archived with corresponding CTA.
Note to Pawan: The wallet will have to identify wath state the Savings Account is in and then give corresponding error message.)
(Note to Yusuf: Please create 2 error message screens. One that says the Savings Account is not empty with corresponding CTA, and the other that says the Savings Account is not archived with corresponding CTA.
Note to Pawan: The wallet will have to identify wath state the Savings Account is in and then give corresponding error message.)
Step 4:- Also, note that the backup is now in settings, not at the Home Screen tab, so the layout will be different.
Step 5:- The backup will have a suggested/ major CTA to " Move to Seed Words Backup". Also, the settings will have a red dot as if something is suggested, along with a red dot in Wallet Backup in settings. (The red dot goes away when the user has moved to seed word backup).
Step 6:- Once the user notes the seed words, they cannot return to Level 2 or 3. We can reuse the screen here that allows users to back up in the first place. The seed words should be 12 words if possible, else 24 words.
User is having levels based backup.
We now want them to stop using levels based backup and start using Seed Words based backup.
When such a user logs in - The notification on top should say - “Please switch to Seed Words Backup”. There is a red icon there.
On clicking, “Please switch to Seed Words Backup” the user is taken to the Settings tab > Wallet backup. There is a Red dot here too. The subtext there should say, “Backup wallet using Seed Words.”
User clicks on the Wallet Backup button.
He clicks on “Write Down Seed Words”.
Case 1: In case there are no Sats in the savings account, a modal pops up telling the user to archive the Savings Account with the major CTA being “Archive Now”. Clicking that button would archive the Savings Account and take the user to the next screen where the seed words are shown to him.
Case 2: In case there are Sats in the savings account, a modal pops up telling the user to empty the Savings Account and archive it. Major CTA is “Proceed”. The user is taken to the Savings Account. With an intent for him to empty it. The user empties the Savings Account. The wallet recognizes if the user has come from a Seed Words backup flow. If he has, in the Sent Successfully toast message itself, we give “Archive Account” as major CTA. After account is archived, the user is taken to the screen with the seed words for him to write it down.
Rest of the procedure is as per normal backup for new users.
After the backup process is complete, the levels based backup should be completely erased from the wallet and the user’s cloud.
https://xd.adobe.com/view/a14b2273-19ad-4c4c-976f-09bac56e5fa6-f418/
@Deveshshankar Scenario 3 is pending (Sweep fund)
Scenario 3: In case there are Sats in the savings account, a modal pops up telling the user to empty the Savings Account and archive it. Major CTA is “Sweep Now”. The user is taken to the Savings Account. With an intent for him to empty it. The user empties the Savings Account. The wallet recognizes if the user has come from a Seed Words backup flow. If he has, in the Sent Successfully toast message itself, we give “Archive Account” as major CTA. After account is archived, the user is taken to the screen with the seed words for him to write it down.
|
GITHUB_ARCHIVE
|
Calculating the percentage of overlapping areas from different layers in QGIS
I work with QGIS 3.16 Hannover and I have three polygon layers 'A', 'B', 'C' with multiple geometries in each.
In the image, layer 'A' has the bold polygons and, for each of them (meaning within each of them), I need to calculate the percentage of the area of all the blue polygons (layer 'B') that is covered by the squared polygons (layer 'C')
Following the guidelines given by @MrXsquared in this post, I built the following code in the field calculator for layer 'A', but it returns null or wrong values
area(
intersection(
(buffer(collect_geometries(overlay_intersects('B', $geometry)),0)),
(buffer(collect_geometries(overlay_intersects('C', $geometry)),0))))
*100/area(buffer(collect_geometries(overlay_intersects('B', $geometry)),0))
Any idea of what's wrong?
Could you provide a screenshot how the layers look like and indicate which kind of overlapping you want to calculate? Or do you have some sample data you could share? What do you mean by "doesn't work properly" - what does not work?
You want to calculate the area where all three layer overlap, but as percentage related to the area of each feature of layer A?
I have edited the question. Hope it is clearer now.
Thanks for providing more details, hope my solution provides what you're looking for.
The problem with the expression you provided was that the intersection () part only calculated the overlap of C and B, notwithstanding if they are inside A or not. You have to add another overlap, pseudocode: overlap of A with ( overlap of B and C) - you had only the part inside the paranthesis.
To create the overlap you're interested in, the expression looks like (for the area and percentage see below):
intersection (
$geometry ,
intersection(
buffer(
collect_geometries(
overlay_intersects(
'B',
$geometry
)
),
0
),
buffer(
collect_geometries(
overlay_intersects(
'C',
$geometry
)
),
0
)
)
)
Screenshot: A= black lines; B: blue; C: green; red: overlap calculated with the expression above:
So based on this, calculating the percentage of ovelapping for each polygon of A is easy - pseudocode: area of the overlap created above / area of each polygon of A * 100. In fact, buffering by 0 is not necessary, you can leave this part away. Thus the expression to calculate the percentage looks like this:
area (
intersection (
$geometry ,
intersection(
collect_geometries(
overlay_intersects(
'B',
$geometry
)
),
collect_geometries(
overlay_intersects(
'C',
$geometry
)
)
)
)
)
/area($geometry)*100
Screenshot: the label is created dynamically with the expression above (+ a round() to round to one decimal). Polygon A on the right (balck outlined) contains four overlapping areas (red), the polygon at the bottom left 2, the other two just one red area. The percentage is calculated as the sum of all red areas inside a polygon, whereas the area of the black outlined polygon A features is 100% for each polygon:
If however the percentage of ovelapping you need is not for each polygon of A, but for the area of the blue polygons that are within each polygon of A, than replace the last line /area($geometry)*100 with this expression:
/area (
intersection (
$geometry ,
collect_geometries(
overlay_intersects(
'B',
$geometry
)
)
)
)*100
After some tries, the final expression that works for the date of the OP includes the buffers of 0:
area (
buffer(
intersection (
$geometry ,
buffer (
intersection(
buffer(
collect_geometries(
overlay_intersects(
'B',
$geometry
)
),
0
),
buffer(
collect_geometries(
overlay_intersects(
'C',
$geometry
)
),0
)
),0
)
),0
)
)/area(
buffer(
collect_geometries(
overlay_intersects(
'B',
$geometry
)
),0
)
)*100
Many thanks indeed for such a detailed explanation. However, the percentage of ovelapping that I need is not for each polygon of A, but for the area of the blue polygons that are within each polygon of A. The pseudocode: area of the overlap in red / overlap of A and B (* 100). So, if I'm not mistaken, the code for the overlap of A and B would be intersection(collect_geometries(overlay_intersects('B', $geometry). BTW, the "* 100", shouldn't it be in the numerator?
Updated that part. if *100 is in the numerator or not does not play a role: test the preview of the expression editor for something like 10*5/2 and 10/2*5 - the result is the same.
By the way: are all the layers in the same CRS?
Yes, they are in the same CRS
OK, pasted your solution to my answer. In the case of my test-polygons, aplying the buffer does not play a role - I'm not sure why in your case this is crucial. This shows that a soution might depend on the kind of data you have - so adding some sample data to a question can be helpful as creating own test data will not always reflect the problem in it's whole complexity. Glad you resolved your problem.
|
STACK_EXCHANGE
|
[wp-edu] Open source release of BU-developed WP plugins
ryeany at bu.edu
Mon Mar 18 16:06:58 UTC 2013
I'm happy to announce the open source availability of three WordPress plugins developed at Boston University. The features offered by these plugins have allowed Boston University to effectively use WordPress as a CMS on a very large scale, with robust tools for managing hierarchical page content, granular control over who can edit what content, and workflow to stage edits and revisions to existing content.
The plugins are free and available now on wordpress.org<http://wordpress.org/>, and are also available on Github if you want to customize them or further develop them in order to submit new features and/or enhancements.
BU Navigation Plugin<http://developer.bu.edu/bu-navigation/>: We always found the native WordPress tools for managing hierarchical page content to be lacking. Re-ordering pages was a chore, and managing your navigation menus separately was not efficient. With BU Navigation, you can tame even the most unruly site structures. With two lines of code any theme can benefit from this feature-rich custom menu alternative.
BU Section Editing Plugin<http://developer.bu.edu/bu-section-editing/>: We needed a way to allow individual site administrators to manage their staff's editing permissions without exposing the complexities of WordPress roles and capabilities. With BU Section Editing, you have crowd control for WordPress. Create section editing groups, assign editors to a group, and granularly define what content members of that group can edit.
BU Versions Plugin<http://developer.bu.edu/bu-versions/>: We needed a mechanism so editors could save edits to published content without it immediately replacing the live page, so the edits could be reviewed and approved. With BU Versions, you can clone, edit, and replace. Seamlessly. Now you can implement a workflow that's easy to use and "behind the scenes" without disruptive or unreviewed changes to your live content.
Boston University first started using WordPress in Fall 2008 to replace a home-grown CMS used by our medical campus to host 137 sites. In March 2013, we will launch our 800th WordPress-based website. We've developed our own WordPress theme framework that supports a curated collection of standard University designs for hundreds of sites as well as dozens of custom themes for our highest-profile and most-trafficked sites. With WordPress, and through a close collaboration between development and design teams at BU, we offer web publishing services to hundreds of University departments that are low-cost, easy to use, and deliver a consistent user experience to visitors on the BU Web.
We invite you to download our plugins, and perhaps your college or university (or even you personally!) can benefit from these new features we have developed for WordPress. And finally, stay tuned -- we plan to share more of our WordPress work in the future.
Web Applications Manager
Information Services & Technology
-------------- next part --------------
An HTML attachment was scrubbed...
More information about the wp-edu
|
OPCFW_CODE
|
Azure:Cloud Service stood up for every VM?
I'm running PerfkitBenchmarker against Microsoft Azure, and I'm noticing the system creates a cloud service for every VM it stands up. This is not a realistic use of the cloud service. In Azure, Cloud Service is similar to a GCE "Project." In fact, I would suggest that the behavior against Azure be modified such that if the user specifies a --project parameter at runtime, that you use it just like you would for GCP - to put all the VMs for the benchmarks into a cloud service with that name. Otherwise, I would recommend that a cloud service only be stood up against the run-id, and not for every VM.
+Eric
This is a very good suggestion. I'll put this higher on the priority
list. We have a couple of tweaks to make in general to networking.
On Mon, Mar 2, 2015 at 6:09 PM, Tim Harris<EMAIL_ADDRESS>wrote:
I'm running PerfkitBenchmarker against Microsoft Azure, and I'm noticing
the system creates a cloud service for every VM it stands up. This is
not a realistic use of the cloud service. In Azure, Cloud Service is
similar to a GCE "Project." In fact, I would suggest that the behavior
against Azure be modified such that if the user specifies a --project
parameter at runtime, that you use it just like you would for GCP - to put
all the VMs for the benchmarks into a cloud service with that name.
Otherwise, I would recommend that a cloud service only be stood up against
the run-id, and not for every VM.
--
Reply to this email directly or view it on GitHub
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker/issues/139.
The same concept holds true for affinity groups. You should really only have to stand one up and tear it down once, rather than doing it in between each benchmark. Creating an affinity group for the run-id and using it through all the benchmarks seems like that would be better than what's happening now.
You can't easily add another VM to the same service (if you want to ssh into both) - the endpoints in the service have to be unique. If you try to create another VM with the the same ssh port open you get this error:
error: BadRequest : Port 22 is already in use by one of the endpoints in this deployment. Ensure that the port numbers are unique across endpoints within a deployment.
info: Error information has been recorded to azure.err
error: vm create command failed
We did initially try to do what you were suggesting, but we ran into this. If there's an easy way around this problem please let me know.
As for the affinity groups, that's a different ask - it should definitely be possible to share pkb "networks" between benchmarks with the same run uri. It will take some amount of work though, because the way pkb is currently structured, benchmarks don't share anything.
I should also probably note that, by default, the "azure vm create" command will automatically create a cloud service per VM for you, so I'd argue that this is fairly realistic compared to someone creating VMs from the linux CLI.
Seems like changing the --ssh option works for remapping the ssh port - using one cloud service will definitely be possible in this case, but will also mean changing the firewall class as well.
The nomenclature within Azure is a bit different, I think. Creating a VM inside a Web Service is "adding a role". So using the --connect parameter to create the VM gives you the ability to connect it to an existing Web Service. The cli --help is not really all that helpful in this case :-(
Here's a stackoverflow article on the issue: http://stackoverflow.com/questions/23959017/cant-add-new-vm-to-existing-cloud-service-in-azure
Yep - I tried using the -c option and that was what was causing the call to fail like I mentioned above until I changed the --ssh parameter to a port other than 22 - after that it worked.
That makes sense since they're both on the same external IP address. For benchmarks dealing only with the "internal" resources of the cloud platform, storage, and network, I think this is a good approach (it's more like simulating a multi-tier application). If, however, benchmarks ever get added that require external connectivity, then it's better to have each stood up as a separate entity (that said, trying to "benchmark" when the external connection comes in to play is a bit of a non starter).
For all three (and future) platforms, it seems like PKB could have a "global prepare" that happens at the front end to stand up projects, affinity groups, networks, etc rather than standing all these up and tearing them all down in between each benchmark. Cleaning these up could happen in a "global cleanup" section once all the benchmarks have run.
I could see, in the future, wanting to stand up multiple "global containers" -- say in Azure AND GCE -- and run some of the multi-machine benchmarks with member-nodes in each :-)
Moving to one shared service has the downside of slowing down vm/disk creation and deletion. If you try to do more than one operation at a time you get the error:
Windows Azure is currently performing an operation with x-ms-requestid ***** on this deployment that requires exclusive access.
This gets retried and eventually succeeds, but it does slow things down.
The cluster_boot benchmark with 5 vms took 545 seconds using the old approach and 831 seconds with this approach. Does having one shared service seem worth it given that down side? If so I can put the change out for review.
I have pinged some engineering folks to get their guidance on what they see as the "right way" to do this.
@timothyeharris - Did you every get any follow-up on this bug? Still and issue?
@ehankland - What is the state of this?
We still use one service per VM. I was waiting to hear back to see if there was a better way of doing things.
I have not gotten any "preferred" advice from engineering (and due to organizational changes, am no longer working on this project). Sorry :(
Sent from Outlook Mobilehttps://aka.ms/qtex0l
On Thu, Dec 17, 2015 at 9:41 AM -0800, "ehankland" <notifications<EMAIL_ADDRESS>wrote:
We still use one service per VM. I was waiting to hear back to see if there was a better way of doing things.
Reply to this email directly or view it on GitHubhttps://github.com/GoogleCloudPlatform/PerfKitBenchmarker/issues/139#issuecomment-165524514.
|
GITHUB_ARCHIVE
|
Can I pay for Simulink MATLAB project assistance for tasks related to automatic code generation for embedded systems? My goal is to get the MATLAB project assist fromSimulink. Some users said they will pay for Simulink MATLAB project assistance without any money, so I’m like, “Can I start a Simulink MATLAB for my Matlab task?” How? How would that work? Let’s get going! Let’s Start! 1 – Start up the project 2 – First build the MATLAB project 3 – Build the Matlab shell script 4 – Open all the files on the project 5 – Insert the MATLAB files into the shell script 6 – Replace all the Matlab code (exported and installed) 7 – Install the MATLAB project on the MCP-16 8 – Install the MATLAB project onto the MCP-16 9 – Make an XML file 10 – Rename the project 11 – Save the MATLAB Project 12 – Reboot. Make the project and switch to VSTC Hence, in the VSTC command, you add it to the list of nodes for the MATLAB shell script. Now the user can just put the MATLAB project into the current directory. User’s Guide to MATLAB for Free Sample Open the MATLAB project click here to find out more the folder MATLABPath. That’s all! VSTC command is only like a tutorial! Next, open the latest MCP-16 VSTC command and run it: Open the MATLAB project directory and find the version number (in every MCP-16 editor). If you look in the Git Documentation & Installer, you will find the version number. Next, you must use it. In the source files: User ID number (in the MATLAB source file). Next, in Git: Git/GCan I pay for Simulink MATLAB project assistance for tasks related to automatic code generation for embedded systems? [lady](https://github.com/andylsbreyer/Simulink_MATLAB_PSTAX_STMBA_1_1.md) [troy](https://github.com/troyjs/Simulink_MATLAB_PSTAX_STMBA_1_2.md) I would like to know if Simulink MATLAB project assistance is available for Automatic CodeGeneration Projects where a complete description of the project task/task-dependency requirements can be obtained. I know that there are some similar projects where an automated code generation is required for embedded or simulink MATLAB (eximer, arganis etc.). Thanks To meet the user requirements, an answer should be posted in the project comments by: [Mark Cramer](https://github.com/andylsbreyer/Simulink_MATLAB_PSTAX_STMBA_2_1_3.md) and [Mike Di Prato](https://github.com/boris/Simulink_MATLAB_PSTAX_STMBA_2_1_3.
Pay Someone To Do My Homework Online
md). While they are listed in one of the github versions of this issue, these have two issues that need to be resolved – the implementation and how to handle error. These are three similar and related to it, the more recently stated methodology, below. Please go on playing around with these notes and agree on a clear idea as to what to do to achieve our goal. How to Fix for Automation Issues — The other issue is to make good all versions look similar, so for quick example as to what to do with the method, please go to the details of the source code and describe how to get to it. It’s time-consumingCan I pay for Simulink MATLAB project assistance for tasks related to automatic code generation for embedded systems? I know this thread is extremely abridged and I don’t know what to do. I would like to look at MATLAB and see what some people already have gone through. Thanks for your time! A: I give you a working script that will provide a few options depending on your application. It should be able to find out what any local program (without code) is doing but also use some information from the program to use with it. (In some cases external variables may be in some elements of the script. See “Where can you find helpful external variable properties” in Chapter 5.) What you need to do is have a single application where you run MATLAB at least once. One case where you can get some idea is to have a single program running both in your local machine and the main machine (or both of them) which includes both MATLAB and Visual for you (if you feel the need). MATLAB and Visual is typically imported by clicking on File –> program source –> /home/cswv/NPC-Test-Project. Once that is done, you are a bit of a pickle when you want something done only when the package type is installed. In a case like that, if you run MATLAB at not-too-late date (or the moment you write the main program for your installation) you will not notice what’s happening at the moment. If you do want Visual in the main program, then you will want to have Visual installed in the /home/cswv/NPC-Test-Project/env/bin/visual \ –output option which will be converted to something like /home/cswv/NPC/Test-Project/tool-setupout3_installdir\n The code for a Visual application may look like this: /home/cswv/NPC-Test-
|
OPCFW_CODE
|
I have a small radio transmitter and I’m successfully generating tones for it to send data via PWM. The issue is that it is scattering the transmission on multiple channels and this is mentioned as a result of an impedance mismatch between the driver for the transmitter and the transmitter. The manual includes a sample circuit for matching up the impedance if the output impedance of the driving device is 2K - does anyone happen to know the output impedance for a Panda from the PWM pin? Does it differ for a Panda 2? I haven’t seen any info on this in any of the support downloads for the Panda.
The pins on panda are connected directly to the processor so maybe take a look at the processor datasheet? LPC2387
What kind of modulation are you using ? I.E. USB or FM ?
The problem might as well come because you are injecting square waves into a transmitter, which generates a lot of harmonics. You should consider interfacing the panda and the transmitter with an audio low pass filter, even a passive one
Your input level might be too strong as well, staturating the first pre-amplifier stage of the modulator. Are you injecting your PWM into the high impedance microphone jack, or in a dedicated input jack with low impedance ?
Here’s the transmitter and user manual: https://www.argentdata.com/catalog/product_info.php?cPath=25&products_id=134&osCsid=f3c83927968ef7fd849480427ce549ea
It does also suggest a low pass filter, I was going to do that in addition to the impedance matching but figured it would be good to know the output impedance before I started in on that. The input is a dedicated modulation line, not a microphone line.
I looked at the LPC manual and didn’t see anything giving actual impedance values.
The impedance should match your dedicated modulation line.
Just add a passive low pass filter (serial res, cap to ground, serial res, cap to ground, serial caps high value for decoupling, then a variable res to adjust your level).
…C…C…Pot----- Input TX Line
It’s important to adjust the level at the minimum acceptable value. Start from 0 (no modulation) and then increase slowly untill you get an acceptable modulation, but don’t exceed that value).
Good luck !
Ok, I’ll try that out and if it works, measure the resistance in the variable and replace it with a fixed resistor. I’ll also try out what the manual recommends in the off chance the output impedance is comparable to other devices that use this radio.
Just calculate the R/C filter values for a low pass of 2500Hz and you should be fine
For the last serial C before the Pot just use a higher value like 1uF
So I looked into the low pass filter stuff a bit and tried out just modulation in to single 100 Ohm resistor to modulation out to single 0.47uf capacitor to ground and that seems to confine the transmission a bit more. What would be the benefit of using your more complicated example and how could I measure if it had a benefit and by how much? The way I am “debugging” this issue is looking at the output in fldigi and seeing how close the transmitted signal is to the two tones in the waterfall display. Also not sure what RC values to use in your example since some are in series and some are in parallel. With the one I did I calculated it as 1/(2piR*C) which is around 3K Hz.
|
OPCFW_CODE
|
Due to the coronavirus, the ICMS workshop and the research incubator have been postponed to 2021. The LMS-Bath Symposium will go ahead online.
Mathematics of Machine Learning
The last decade has seen a huge uptake of the applications of machine learning methods, yet, the mechanisms behind the success of such methods is far from well understood. We will be running three events on the Mathematics of Machine Learning in the summer of 2020: an ICMS workshop at the University of Bath in in the week 27-31 July, followed by an LMS Symposium (3-7 August 2020), which in turn is followed by a Research Incubator in the week starting 10 August. The origin of this activity is that the LMS symposia, run at Durham since 1974, are moving to in 2020 to Bath.
ICMS Workshop on Analytic and Geometric Approaches to Machine Learning (27-31 July 2020)
This is the first ever ICMS workshop outside Scotland. The aim of the workshop is to bring together researchers that apply mathematical methodology to machine learning. The emphasis will be on how mathematical theory can inform applications and vice versa. On the theoretical side the event focuses on large data limits, approximation theory and probabilistic modelling. The theory will be connected to applications that either make use of these results or have the potential to do so. Topics include the geometry of data, total variation functionals, Gaussian process regression and neural networks. This is largely an event by invitation only, but the organisers would like to actively encourage locals to attend. Participation of students is particularly welcome.
Organisers are Patricia Vitoria Carrera, Bamdad Hosseini and Matthew Thorpe.
For more information, see link.
LMS-Bath Symposium on the Mathematics of Machine Learning (3-7 August 2020)
The aim of the Symposium is to bring together world leading mathematicians, theoretical physicists and statisticians who are working on the mathematics of machine learning, to promote dialogue between different groups and to identify new directions of research.
Throughout the week, there will be talks by 30 invited established and early career researchers who are currently at the forefront of theoretical and applications-based research in this field. They will deliver talks on a wide range of machine learning related topics. We also plan to organise a public lecture (speaker to be confirmed) and expert panel sessions. There is also an opportunity to contribute to the symposium by presenting a poster. A selection of these posters will be accompanied by spotlight talks. A few SAMBa and SAMBa-aligned students have already expressed interest in presenting a poster.
There are a number of ways how students and other members of the department can get involved. First, as mentioned before, everyone can apply for a poster presentation (deadline 27 March). This is a great way of starting a detailed discussion with experts in the field. Moreover, there will be space for local participants to attend the invited talks and ask questions. There will be plenty of opportunity to discuss and meet these researchers during the coffee breaks/lunch breaks/conference dinner.
Organisers are Philip Aston, Matthias Ehrhardt, Catherine Higham and Clarice Poon.
For more information, see link.
EPSRC research incubator (10-14 August 2020)
This event is similar in style to a SAMBa ITT but with the focus being on the Mathematics of Machine Learning. This mix of a summer school and brainstorming event will bring excellent early career and established researchers in the field to Bath to share their expertise with upcoming talent. Two days of lectures are followed by three days where the participants work on problems set by researchers and users of machine learning. There will be an open call (to be announced March/April) for participation, which is open to everyone, including SAMBa students.
Organisers are Neill Campbell and Matthias Ehrhardt.
|
OPCFW_CODE
|
Students take 60 ECTS credits (one year of full-time dedication) plus up to 50 credits of complementary credits from advanced undergraduate courses (extending the program to two years). Of the 60 ECTS credits of the master, 40 correspond to courses and 20 to the thesis project.
Check the class schedule for the current year.
- Core Courses (25 ECTS)
- Optional Courses (15 ECTS)
- Thesis Project (20 ECTS)
- Complementary Courses (extra courses for up to 50 additional ECTS)
Core Courses (5 ECTS each)
- Audio Signal Processing for Music Applications (term 1): Covers signal processing methodologies and technologies specific for audio and music applications. Special emphasis is given to the use of spectral processing techniques for the description and transformation of sound and music signals. [syllabus]
- Music Perception and Cognition (term 1): Goes over the principles, structures, and functions that make it possible for humans to perceive and understand sound and music, presented from empirical and computational points of view. The psychophysics of the transduction, the neural encoding of the acoustic input, the perceptual organization of audio streams, musical memory, melodic, rhythmic and tonal cognition, emotion and music, and the development and learning of musical capabilities. [syllabus]
- Research Methods (term 1): Covers the major considerations and tasks involved in conducting scientific research, with special emphasis on those aspects related to the context of Information and Communication Technologies. [syllabus]
- Music Information Retrieval (term 2): Survey of the field of Music Information Retrieval with a special emphasis on the well-established techniques for the automatic description of audio content in terms of different facets (e.g. melody, harmony, rhythm, timbre), temporal scopes, and abstraction levels (from low-level features to semantic descriptions such as genre or mood). [syllabus]
- Computational Music Creativity (term 2): Covers the creation and performance of music using computers and algorithms, with a focus on real-time interaction and control. [syllabus]
Optional Courses (5 ECTS each)
- Advanced Topics in SMC (terms 1 & 2): Seminar covering core methodological and application topics of relevance to Sound and Music Computing with special emphasis on machine learning. [syllabus]
- Audio and Music Processing Lab (term 2): State-of-the-art methods and tools for the automatic processing (focusing on its generation and transformation) of music content. [syllabus]
- Music Recording and Mixing (term 3): Theoretical and practical examination of the most usual microphonic and multitrack recording techniques, the audio processing techniques that are used in a musical recording, and the technical and practical problems that need to be addressed in mixing. [syllabus]
- Sound Communication (term 3): Methods, concepts and practice of artificial intelligence, machine learning, music, sound and sonic therapies with particular emphasis on practical applications. [syllabus]
- Advanced Interface Design (term 1): Paradigms, methods, and tools used in the construction of complex multimodal interfaces between people and artefacts. [syllabus]
- Systems Design, Integration and Control (term 2): Paradigms within design, integration, and control of truly feasible complex systems, with special stress on neuromorphic principles underlying biological, interactive, cognitive and emotive systems. [syllabus]
- Web Intelligence (term 2): Study how to gather, process, search and mine data in the Web and its applications to search engines. Understand the basic concepts behind information retrieval and data mining. [syllabus]
- Natural Language Interaction (term 2): The subject covers central themes related to interaction with intelligent agents through natural language. The approach will be built on models of written dialogue, analysis and generation of natural language, and implementations. [syllabus]
- Cognitive Science and Psychology: Mind, Brain, and Behaviour (term 1): The seven central disciplines that form traditional cognitive science, showing how the concepts and paradigms of these disciplines bring complementary visions of mind, brain and behaviour. [syllabus]
- Machine Learning (term 1): The course covers the theory, definition, and implementation of various machine learning methods and algorithms. These are algorithms that generalize from labelled or unlabelled examples. [syllabus]
- Research Project (20 ECTS): Carry out a research project and write a thesis report under the supervision of a teacher. Includes a weekly class to present and discuss relevant topics to help decide, develop and present the individual thesis work [more info].
- Courses from the undergraduate programs in engineering of the Engineering School covering topics such as: Audio Signal Processing, Software Engineering, Data Structures, Software Programming, Artificial Intelligence, Music Technology, Mathematics, and Interactive Systems.
|
OPCFW_CODE
|
UseBlackBox AI is an innovative artificial intelligence coding assistant designed to make developers more efficient and productive. As software development becomes more complex, tools like UseBlackBox provide smart support to help programmers keep up with the rapid pace of technology.
In this comprehensive guide, we’ll explore what UseBlackBox AI is, its key features and capabilities, best practices, pricing options, and tips to get the most value from this AI-powered coding tool. Whether you’re a seasoned developer or just starting out, UseBlackBox AI aims to improve your coding experience.
What is UseBlackBox AI?
UseBlackBox AI is an AI-based coding assistant created by Cours Connecte, Inc. It was launched in 2020 as a tool to help developers write code faster by providing intelligent suggestions, autocompletion, and more.
Here are some important facts about UseBlackBox AI:
- Editor integration: Available as a plugin for VS Code, IntelliJ, PyCharm and other code editors. Also works directly in the browser.
- Core features: Provides code autocompletion, search, chat, debugging, and text extraction from images/PDFs.
- Pricing: Free trial plus paid plans starting at $0.99 per week.
The goal of UseBlackBox AI is to save developers time and mental energy by providing an AI copilot that can understand what they want to build and generate relevant code snippets on the fly. It aims to change programming from a tedious manual task to a more creative collaborative process between developer and AI assistant.
Key features and capabilities
UseBlackBox AI is packed with advanced features to improve productivity. Here are some of the most notable capabilities:
UseBlackBox AI provides intelligent code autocompletion in more than 20 programming languages. As you type, relevant functions, variables, parameters, and more are suggested, helping you write code faster. The suggestions improve as you continue coding.
You can search over 100 million open source GitHub repositories using natural language queries directly in your editor. This makes it easy to find code examples and solutions without leaving your workflow.
The code chat feature allows you to convert any question or statement into executable code by simply typing // and your question. For example, you can type // how to connect to MongoDB database in Node.js and UseBlackBox will provide relevant code.
UseBlackBox AI can extract text and code from images, videos, PDFs and other documents. This automates the tedious process of manually transcribing code snippets.
Get AI-powered help to debug your code. UseBlackBox analyzes errors and proposes solutions to resolve problems quickly.
Install the UseBlackBox plugin for seamless integration with VS Code, IntelliJ, PyCharm, and other coding environments you use.
The browser extension brings autofill, search, and other features right to your browser, so you can always access them quickly.
With these robust capabilities, UseBlackBox AI aims to provide an AI co-pilot for developers who can understand context and intent to provide the most relevant suggestions and time-saving automation.
Real-world use cases
UseBlackBox AI can benefit developers in many different roles and use cases:
- Software Engineers – Autocomplete standard code, find code samples for new libraries, and get suggestions to fix bugs faster.
- Data Scientists – Generate code for cleaning, analyzing, and visualizing data in Python and R.
- Students – Learn faster by getting code examples and explanations for school projects and assignments.
- Hobbyists – Build side projects and hacks by quickly turning ideas into code with an AI assistant.
- Teams – Maintain consistent style and standards by letting an AI suggest improvements to the code.
The time savings and productivity improvements that UseBlackBox AI provides can accelerate developers in virtually any use case that involves writing code. Both beginners and experts can benefit from the AI-powered support.
Use BlackBox AI pricing and plans
UseBlackBox AI offers flexible pricing plans to meet the needs of individual developers and teams.
Here you will find an overview of the pricing options:
|Plan||Price||Most important features|
|Free trial period||$0/month||All functions for 14 days|
|Good developer||$0.99/week||1,000 monthly code searches
|Great developer||$1.99/week||Unlimited code search
|Team||Custom quote||Dedicated AI agency
The free trial provides full access to test out the features for 14 days before committing to a paid subscription.
The Good Developer plan is ideal for individuals who want basic access with 1000 code generations per month. For power users, the Awesome Developer plan offers unlimited code completions and searching.
Custom pricing is available for teams by contacting the UseBlackBox sales team. Volume discounts can be applied based on team size and usage.
UseBlackBox also offers a discount on annual pricing if you pay annually in advance instead of monthly. Overall, the pricing aims to be affordable for developers of all levels, while providing significant time savings.
Tips to get the most out of UseBlackBox AI
Here are some tips to maximize productivity with UseBlackBox AI:
- Be specific – Ask clear and detailed questions for best results during code chat and search.
- Use multiple languages – Benefit from the broad language support.
- Find examples – Search public code to discover patterns and examples.
- Fix bugs faster – Get AI-suggested debugging solutions.
- Integrate into Workflow – Install plugins for your editor and browser.
- Train the AI – Generate helpful suggestions and refine searches to improve relevance over time.
- Use text extraction – Automate transcribing code from videos, PDFs and more.
- Stay organized – Use code bookmarks and version history to stay on track.
Limitations and risks
Although Blackbox AI aims to improve productivity and software quality, it also comes with some limitations to consider:
- The quality of the code generated is only as good as the AI training data
- Complex original code still requires human skill and creativity
- It is essential that you understand the generated code before using it
- Excessive use can lead to dependence on the aid that hinders learning
- Security issues can arise without proper code auditing
- Biases in data/algorithms can lead to the exclusion of certain groups
- Potential for exploitation by automatically generating malicious programs
Responsibly designing and monitoring systems like Blackbox will be important to maximize benefits and minimize risks as AI capabilities grow.
Blackbox AI has the potential to fundamentally change coding practices through the innovative application of AI techniques. It lowers the barriers for programmers at all skill levels to benefit from AI support.
For new users, take the time to learn the quirks of communicating coding needs in natural language. No AI system today will be 100% perfect. But when used properly, Blackbox can increase your productivity and help you hone your skills as a developer.
The future of AI-assisted programming looks promising as research in this area accelerates. In the coming years, we’ll likely see even smarter integrations between code editors and AI. Exciting times lie ahead at the intersection of artificial and human intelligence!
🌟 Do you have burning questions about UseBlackBox AI? Do you need some extra help with AI tools or something else?
💡 Feel free to send an email to Govind, our expert at OpenAIMaster. Send your questions to email@example.com and Govind will be happy to help you!
|
OPCFW_CODE
|
In this article, we will control our Raspberry Pi using Cayenne. Cayenne uses MQTT API for interaction with Raspberry Pi.
We shall control PiRelay with Cayenne. PiRelay is a Raspberry Pi Shield with 4 Relays(5 Amp/240VAC or 10Amp/30VDC), with customizable GPIOs, LED indicators that can be used to operate almost all the domestic appliances.
What is Cayenne?
Cayenne is a drag-and-drop IoT project builder, developed by myDevices. Cayenne is available for smartphones and computers which helps the user to connect to there devices from anywhere in the world.
There are certain advantages of Cayenne for IoT integration, like-
- Remotely control sensors, motors, actuators, and GPIO Pins
- Customizable dashboards with drag-and-drop widgets
- Create triggers and threshold alerts for devices, events, and actions
- Schedule one-time or multi-device events for easy automation
- Connect your Pi quickly and easily
Things you need
Setup Cayenne for Raspberry Pi
Sign Up to Cayenne
Cayenne from Apple Store
Keep a Raspberry Pi ready with Rasbian, you can download Raspbian from raspberrypi.org
Download and install Cayenne
After the Sign up you will be redirected to the device selection menu,
Select Raspberry Pi,
Step 2 will appear
Click on the ‘Next’ button
You will be directed to step 3, where you will be asked to download the application or install Cayenne on Raspberry Pi.
Open Terminal or ssh to your Pi.
From option 2 Copy the text and paste to the terminal,
And execute the downloaded file using
sudo bash filename.sh -v
‘Please be patient 10 minutes to ultimate the installation process.’ The message will be displayed on the Cayenne web portal. After the installation, reboot the Raspberry Pi.
After the successful reboot, the Cayenne web or app interface will open the Overview tab of Raspberry Pi. In the overview section, you can see raspberry Pi’s CPU usage, RAM, Storage, temperature, Network Speed on your screen. You can also see a Reset and Shutdown button on the screen Reset button will restart your pi.
You can configure your Raspberry pi using the settings button on the right side.
Use the Add new option to Add new Devices, Events, Triggers, and Projects.
Using your smartphone download the application and sign in with the registered Email Id. After a successful login, you will be directed to the overview page.
Controlling PiRelay with Cayenne GPIO
Stack up PiRelay over the Raspberry Pi
A green led will glow if PiRelay is stacked properly and Pi is powered up.
Go to GPIO option on Cayenne on your Smartphone or web interface. Both the interfaces will look alike-
If the yellow jumpers on PiRelay are connected, the GPIO connection of PiRelay with Raspberry Pi is -Relay 1: Pin 15
Relay 2: Pin 13
Relay 3: Pin 11
Relay 4: Pin 7
Note: These are Board Pins
However, you can use Female to Female jumper wires to connect Pirelay with a GPIO Pin of your choice.
On the Cayenne interface, you can see the GPIO interface of Raspberry Pi with the pin description. You can use the GPIO pins, change the mode of pins(IN/OUT), and set the value high or low accordingly.
For controlling the PiRelay, set the pin mode to ‘OUT’ and change the value to high or low to switch relays on your PiRelay.
You can use other components and operate them from anywhere in the world. Cayenne uses MQTT API for controlling Raspberry Pi.
Enjoy controlling your home appliances and other devices with your Smart Phone.
Hope you enjoyed this article...
|
OPCFW_CODE
|
SQL error: 'column reference "id" is ambiguous'
I'll look into this tomorrow but since the update to 2.2.0 I've noticed this issue on sentry;
ProgrammingError at /app/model/1/
column reference "id" is ambiguous
LINE 1: SELECT (CASE WHEN id='1' THEN 0 END) AS "ordering", "...
It complains about validate_values(), as I said I'll update this ticket later, but it occurs when saving inlines that have autocomplete fields
Yeah I'm seeing the same thing:
Traceback:
File "/usr/local/lib/python3.4/site-packages/django/core/handlers/base.py" in get_response
132. response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/local/lib/python3.4/site-packages/django/views/generic/base.py" in view
71. return self.dispatch(request, *args, **kwargs)
File "/usr/local/lib/python3.4/site-packages/braces/views.py" in dispatch
140. request, *args, **kwargs)
File "/usr/local/lib/python3.4/site-packages/django/views/generic/base.py" in dispatch
89. return handler(request, *args, **kwargs)
File "/usr/local/lib/python3.4/site-packages/django/views/generic/edit.py" in post
272. return super(BaseUpdateView, self).post(request, *args, **kwargs)
File "/usr/local/lib/python3.4/site-packages/django/views/generic/edit.py" in post
214. if form.is_valid():
File "/usr/local/lib/python3.4/site-packages/django/forms/forms.py" in is_valid
184. return self.is_bound and not self.errors
File "/usr/local/lib/python3.4/site-packages/django/forms/forms.py" in errors
176. self.full_clean()
File "/usr/local/lib/python3.4/site-packages/django/forms/forms.py" in full_clean
392. self._clean_fields()
File "/usr/local/lib/python3.4/site-packages/django/forms/forms.py" in _clean_fields
407. value = field.clean(value)
File "/usr/local/lib/python3.4/site-packages/django/forms/fields.py" in clean
163. self.validate(value)
File "/usr/local/lib/python3.4/site-packages/autocomplete_light/fields.py" in validate
61. if value and not self.autocomplete(values=values).validate_values():
File "/usr/local/lib/python3.4/site-packages/autocomplete_light/autocomplete/model.py" in validate_values
172. return len(self.choices_for_values()) == len(self.values)
File "/usr/local/lib/python3.4/site-packages/django/db/models/query.py" in __len__
144. self._fetch_all()
File "/usr/local/lib/python3.4/site-packages/django/db/models/query.py" in _fetch_all
965. self._result_cache = list(self.iterator())
File "/usr/local/lib/python3.4/site-packages/django/db/models/query.py" in iterator
238. results = compiler.execute_sql()
File "/usr/local/lib/python3.4/site-packages/django/db/models/sql/compiler.py" in execute_sql
840. cursor.execute(sql, params)
File "/usr/local/lib/python3.4/site-packages/debug_toolbar/panels/sql/tracking.py" in execute
159. return self._record(self.cursor.execute, sql, params)
File "/usr/local/lib/python3.4/site-packages/debug_toolbar/panels/sql/tracking.py" in _record
101. return method(sql, params)
File "/usr/local/lib/python3.4/site-packages/django/db/backends/utils.py" in execute
79. return super(CursorDebugWrapper, self).execute(sql, params)
File "/usr/local/lib/python3.4/site-packages/django/db/backends/utils.py" in execute
64. return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.4/site-packages/django/db/utils.py" in __exit__
97. six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/usr/local/lib/python3.4/site-packages/django/utils/six.py" in reraise
658. raise value.with_traceback(tb)
File "/usr/local/lib/python3.4/site-packages/django/db/backends/utils.py" in execute
64. return self.cursor.execute(sql, params)
Exception Type: ProgrammingError at /tasks/rounds/472/update/
Exception Value: column reference "id" is ambiguous
LINE 1: SELECT (CASE WHEN id='877' THEN 0 END) AS "ordering", "autht...
Bug is here: https://github.com/yourlabs/django-autocomplete-light/blob/f2d30773519d00cc3a17b135a7c1b13c0fc70137/autocomplete_light/autocomplete/model.py#L80
Db version please B-)
Le 7 juin 2015 05:24, "Aidan Lister"<EMAIL_ADDRESS>a écrit :
Bug is here:
https://github.com/yourlabs/django-autocomplete-light/blob/f2d30773519d00cc3a17b135a7c1b13c0fc70137/autocomplete_light/autocomplete/model.py#L80
—
Reply to this email directly or view it on GitHub
https://github.com/yourlabs/django-autocomplete-light/issues/442#issuecomment-109689386
.
postgres (PostgreSQL) 9.4.1 for me, haven't had time to look into it thoroughly yet
Thanks, me neither but will tonight, we should just prefix the PR column
name with the table name, I suspect this would help when using select
related and this sort of things.
Le 7 juin 2015 20:24, "Hedde van der Heide"<EMAIL_ADDRESS>a
écrit :
postgres (PostgreSQL) 9.4.1 for me, haven't had time to look into it
thoroughly yet
—
Reply to this email directly or view it on GitHub
https://github.com/yourlabs/django-autocomplete-light/issues/442#issuecomment-109789964
.
I've tried to overrule the behaviour using super and adding the column prefix, but it also required distinct statement to remove duplicate results
Are you sure that your initial queryset did not require distinct by itself ?
We might want to go back on this feature and rely on the choices queryset,
if it's too complicated and doesn't work in all cases, we could stick it in
a .contrib module.
Yes you're right distinct was required on the initial set, here's a way to reproduce the issue:
No issue:
class FooAutocomplete(..):
choices = Model.objects.all()
Ambiguous "id":
class FooAutocomplete(..):
choices = Model.objects.filter(<anyReverseFkAttr>__is_null=False).distinct()
Thanks Hedde, I think we have all the information we need here to make a
patch with a test case and a fix and a minor release.
Hi, the bug 'column reference "id" is ambiguous' is resolved. But now we have a new problem when pk name is different from default (id) and the autocomplete field is a foreign key.
The problem is in "SELECT (CASE WHEN the_pk_name='877' THEN 0 END) AS "ordering", "autht...".
So I changed the code in models.py FROM:
pk_name = ('id' if not getattr(choices.model._meta, 'pk', None)
else choices.model._meta.pk.name)
pk_name = '%s.%s' % (choices.model._meta.db_table, pk_name)
TO:
pk_name = ('id' if not getattr(choices.model._meta, 'pk', None)
else choices.model._meta.pk.attname)
pk_name = '%s.%s' % (choices.model._meta.db_table, pk_name)
Then change pk.name to pk.attname it works to me.
I think the solution is get the field name in table directly.
Could you please open another issue and provide more information so that at least we could reproduce.
The use case you're talking about seems tested in 111b0e4dd27d5dcf61cfbfdb0ebbf8da53907adb.
I opened a new issue. The problem is get the correct pk_name on subclasses. Take I look #447.
|
GITHUB_ARCHIVE
|
Part of the series Cambre
In my day-to-day work, I don’t get to work too much with user-facing code. Such is the world of embedded programming. So I decided to start a new project to explore a different aspect of software. I also want to seriously consider the architecture of this project, so not all of the posts will be directly related to the implementation of the project. Lastly, I want to have fun making this project, because otherwise there’s no point.
An important reason I write for this site is to share the knowledge that I gained while doing this project. At the time of writing this, I’m still deciding upon a system of feedback and updating the content.
I would like to make a Minecraft-inspired clone. It will be a 3D voxel game where I can explore an expansive world. I have some ideas that I would like to try that would distinguish this from Minecraft itself, and I’ll describe those at the time I develop them.
I did a project in college working with OpenGL. I know that the OpenGL standard is being deprecated for the newer Vulkan standard (or if you’re Apple, the Metal standard), but none of those have a good knowledgebase yet and I don’t want to deal with that. So I’m going to use OpenGL for the graphics context.
A Sidetrack Into OpenGL
OpenGL is a API for communications between a Central Processing Unit (CPU) and a Graphics Processing Unit (GPU). The OpenGL specification does not describe an implementation; this is because the API is implemented at various levels in hardware and software. A graphics vendor (Nvidia, Intel, AMD) will create a GPU and provide a driver to access it. Each graphics vendor provides their own driver specific calls that don’t necessarily represent the OpenGL API (often they include proprietary extensions) and application developers don’t want to provide many different builds of their application for each individual driver. The role of translating between OpenGL calls and driver-specific calls is performed by an OpenGL framework.
OpenGL frameworks are available on different systems:
- Linux: Provided by the Mesa 3D Graphics Library.
- MacOS: Provided by Apple’s OpenGL Framework.
- Windows: Provided by Microsoft in the Windows SDK.
Libraries and Technologies
Using OpenGL is a large design decision, but it also represents only a single facet of this project: Graphics Rendering. I’ll also need to handle user input, provide audio support, and use a networking library to make it multiplayer. For now, I’ll decide upon a user-input library (which also handles window creation), and I’ll decide on the others later.
In college, we used
GLUT to handle window creation and capturing user Input.
However, in the time since I’ve learned about
GLFW. Both provide the same
functionality: creating a window and allowing me to respond to input; however, I
prefer to use
GLFW because it provides more control over the application
structure than using
GLUT. I can write my own game loop this way.
I also need an extension-management library so I can make the most out of my
GPU. For this, I’ll use
GLEW. And I want to use a math library for performing
linear algebra operations easily. For that, I’ll use
This project uses the following libraries:
- GLFW (This library automatically includes Apple’s OpenGL framework).
Grab a Copy
If you would like to see the code as it has evolved, check it out on GitHub.
|
OPCFW_CODE
|
Crashtest Security is a cyber security startup located in the heart of Munich. We are a team of tech lovers, that makes the IT Security space interesting, even for non-techies. We are pioneering in the multi billion dollar market of automated vulnerability assessments (penetration testing), which is growing at over 25% annually. We are offering an innovative SaaS solution, which allows modern development teams to perform a full security assessment of the web application with a click of a button. With a strong client base, including Ottonova, Flixbus and Datev, we are now looking at the global expansion of our product and want you in our team.
From day one you will be thrown at different tasks and problems across the Company (for example, identifying new business opportunities, hiring talented people, creating and measuring KPIs for different teams within Crashtest etc) and directly report to the founders. It is an incomparable opportunity, learning how to build a business from scratch, how to solve absolutely any problem, how to achieve any possible goal directly from a founder.
You will be assigned high priority projects to ensure we achieve our goals
- This role comes with great autonomy allowing you to identify problems, carry out analysis, design solutions and execute them
- You will collaborate with different teams within the company to identify, review and solve problems
- You will work on multiple projects in parallel that will require you to be adaptable across multiple functions
- Some previous projects have involved identifying new business opportunities, hiring talented people, creating and measuring KPIs for different teams, creating documents for funding rounds.
- This role provides an insight on how to build a business and the ability to solve any problem thrown at you
- Experience in a face-paced business environment, ideally a start-up working a business or cross-function
- Ability and willingness to excel in a dynamic start-up environment
- Your hands-on mentality helps you to quickly achieve set goals and you are willing to go the extra mile
- You can break complex problems into smaller ones and enjoy working with data to facilitate your work
- You approach tasks in a structured and careful manner and meet your deadlines - without exception!
- Excellent written and oral communications skills in German and English
- Technical interest in the area of Cyber Security
Why join Crashtest Security?
- Freedom to build the marketing team at Crashtest Security and expand the business
- Attractive bonus program based on your performance
- Young, dynamic and driven team shaping the web application testing market on a global scale
- Freedom to take responsibility for your area of expertise and quickly grow into a management role
- Potential participation in company success via employee stock option program
- Office in the heart of Munich and opportunity for flexible home office schedule
- Regular team events and personal development days to boost your area of expertise or learn a completely new one
- Mac-book, fully equipped workstations, and other tech gadgets needed for your work
- Personal development budget of 1.000€ per year and it is up to you to use it for development purposes
- Attractive employe benefit programs, ranging from gym passes to contributions to your lunch
|
OPCFW_CODE
|
The thing about an intranet is that it can be built on several platforms. Office 365 intranet or SharePoint intranet, call it however you like. The question is, is Office 365 with SharePoint a perfect platform for your company?
Every year Nielsen Normal Group releases an Intranet Design Annual. This contains a list of ten best intranets.
Now, it appears that at least half of the winning intranets of 2018 were built on SharePoint or Office 365 with SharePoint.
Microsoft itself boasts that its SharePoint has over 190 million users over 200,000 organizations.
Office 365 with Sharepoint is praised
Now, as a member of the Office 365 family, small and mid-sized business owners, as well as several large businesses praise SharePoint. Why?
Just one simple example. One of the biggest hospital systems in Illinois US is called Advocate Health Care. Now they claim that they save over millions of dollars due to increased productivity. Thanks to its intranet built on Office 365 with SharePoint.
We moved our intranet onto SharePoint Online and save $400,000 in infrastructure every four years, plus annual maintenance cost savings.
Director of Unified Communications for Advocate Health Care
Discover in more detail how Advocate Health Care benefits from its investment in Office 365 with SharePoint.
and what about Microsoft …
Microsoft itself uses SharePoint as its primary collaboration platform for its intranet and extranet solutions.
What is key for a SharePoint intranet
SharePoint is Microsoft’s leading document management and collaboration tool. It is a modern platform on which an intranet can be built.
As a result, it can be used by an organization for internal purposes and to simplify everyday business activities.
Because of its simple user experience, it helps employees connect easily with information and expertise.
It can accelerate shared business processes. It can also help manage and re-purpose content.
SharePoint team sites connect a team to the information and content they rely on every day.
Office 365 a perfect platform for your intranet
Office 365 allows employees to work from anywhere while efficiently collaborating with team members. (Just in case you forget – SharePoint is available on the Office 365 package.)
Office 365 with SharePoint provides a collaborative and flexible framework
Who uses SharePoint?
According to enlyft, computer software companies and information technology and services companies use SharePoint the most.
Following this, are hospitals and health care, staffing and recruiting, financial services, construction, management consulting, higher education, government administration, and non-profit organizations. The list is endless.
Intranet hosted on Office 365 with SharePoint – does it work?
So, does an intranet built on Office 365 with SharePoint work? Yes, it does – and here’s why:
- SharePoint modern sites are flexible, easy to use, and attractive. Jeff Teper, The Godfather of SharePoint, announces increased capabilities of intranets based on SharePoint 2019.
- Intranets based on SharePoint can be customized deeply to exude a company’s brand image.
- SharePoint spaces provide immersive mixed reality experiences that are great for the recruiting and onboarding processes. Recruits or new hires can learn about the company in a 360-degree virtual orientation program.
- Thanks to Office 365 with SharePoint, an intranet built on this platform can help business owners and managers ensure that their employees and teams are connected at all times. They have access to the latest and most relevant organizational information.
- An intranet built on SharePoint can be integrated with CRM, ERP, BI, ITSM, and other major enterprise systems.
Microsoft continuously improves the SharePoint experience – its accessibility, performance and usability had been amended religiously since its inception 17 years ago.
With SharePoint’s move to the cloud:
- All tools and sites are available as one service from a single vendor.
- Both business and personal sites are within the same functional and administrative scope.
- You can use a single toolset to design, develop, and manage an environment.
SharePoint Online is not new – it has been around since 2012. But it continues to evolve constantly. Every version is more agile, more secure, and more reliable.
Microsoft’s modernization consists of key elements such as audience targeting, cross-suite integration, advanced site designs and a shift to modern sites.
This serves as the default site types for great accessibility, better responsiveness, more security, less dependence on a developer and more power to create a useful, attractive, and responsive site.
The idea is, minimum help from a developer – this is time and cost-saving.
According to Statista, in 2019, 74% of Microsoft SharePoint users utilized cloud-based SharePoint and still prefers this to on-premise server. This is an increase since the previous year.
An intranet on Office 365 with SharePoint creates a sustainable cloud solution – a recommendation made by Microsoft. But that does not mean that an on-premise solution is not ideal.
While 74% of Microsoft SharePoint users utilize cloud-based SharePoint, SharePoint on-premise has its own set of ardent followers who are more than satisfied with this solution.
The on-premise version has undergone several upgrades. Each upgrade better than the previous one.
Intranet on Sharepoint on-premise tops the list of best intranet software with regard to ease of use, meeting requirements, ease of administration, and quality and support.
The on-premise version is known to be one of the most adaptable intranets even 20 years later.
It has extensive customization properties that make it extremely flexible and covers multiple functional areas.
Organizations can deploy the on-premise version using their own infrastructure. They have full responsibility for their intranet customization, its management, and support.
The latest SharePoint on-premise intranet uses SharePoint 2016 and 2019. Perpetual licensing is available as well as the server/Client Access License model.
The license holder – the organization that buys the licenses – has full ownership rights over the on-premise version.
This works out great for organizations that do not want to be bogged down by paying various types of subscriptions.
The future of the market
- SharePoint will continue to be a critical part of Office 365. House a business’ intranet, as well as pages and files and other key information that you will need access to.
- Microsoft begins to position SharePoint as the content layer of Office 365 and will continue to do so in the near future.
- Many of the SharePoint Online features are added to the on-premise version of SharePoint 2019. Hence, this makes it a viable option for several businesses that prefer the on-premise version.
- More companies will make a shift to Office 365 and SharePoint Online.
- SharePoint 2019 will find its market in the government, healthcare, and financial services sectors that are governed by strict regulations. The strict regulations will prevent them from fully migrating to the cloud.
- Companies that use older SharePoint versions like 2010, 2013 and 2016 will migrate to SharePoint 2019.
There is no doubt that Microsoft does a tremendous job with SharePoint and Office365 platforms. There is no doubt that we will hear about the progress these platforms will make on a regular basis in the years to come.
If you are looking to better your organization’s performance with intranet or looking to making a switch from your current intranet to an intranet that makes use of Office 365 with SharePoint, get in touch with us today.
If you have any questions don’t hesitate to contact us.
|
OPCFW_CODE
|
What extra information is stored in a photograph taken by a digital camera?
When I take pictures with my camera the file sizes seem exceedingly large. In this example the original is 5186kb. I wrote a java program to just read the image and then write it again, removing any information except the pixel values.
The first time it is rewritten, the file size goes down to 1005kb, over a 500% reduction! To make sure I wasn't losing data to compression I iterated the program 100 times on the resulting images and the file size stayed at exactly 1005kb with no loss in image quality.
My question is, what is the camera storing in the other 4181kb? Some sort of metadata I know, but it seems like a lot. I would like to know what I am losing by resizing my images like this.
Are you sure you're not re-compressing the file? My older Nikon 5100 saves jpegs at about that size (RAW files are even bigger) and it's definitely not 4megs of non-image data. Try using something like ImageMagick Compare function to get the actual pixel level difference between the before and after picture.
I did the comparison and there is a lot of red, so it is compressing an uncompressed jpg? Visually the two files look identical to me, and why does it only resize it the first time and not subsequent iterations? The code I am using is just this:
BufferedImage image = ImageIO.read(source);
image.getGraphics().drawImage(image, 0, 0, null);
ImageIO.write(image, "jpg", target);
Here are the image files I am using.
Original:https://drive.google.com/file/d/12gJMGVcaegEqLR4Cn0tvHdTODPvPpVal/view?usp=sharing
Result:https://drive.google.com/file/d/1HrdZ8Zu9GjQnK-w2I5pQUxMhwoI_K5On/view?usp=sharing
Diff:https://drive.google.com/file/d/1bZTMUCM2IqEdW_c8y8IutvPRdQ5lvT2G/view?usp=sharing
Jpegs are, by definition compressed images. Additionally, such compression is "Lossy", meaning each time you re-compress the image, you are losing more and more image data (which leads to the needs more jpeg meme). See this Photo.StackExchange question for details. The tl;dr is that your code is damaging your images. It's up to you to decide if the trade off for small files it worth it.
Ok thanks, I knew I was losing something I was just interested in what I was losing so I could make that decision. The weird thing to me is that if I run it through once, or if I run it through 1000 times, the output is the same.
Assuming the file format you are using is .jpg, the original file was saved in a higher value of jpg compression say 95%, while when you resave the file, you probably were using say 85% jpg compression value.
The size doesn't change in consequent saves as the compression value stays the same
|
STACK_EXCHANGE
|
To make it really Star Wars-y they should be named “Deflectors”,
but that’s probably a bit too long for the screen.
To make it really Star Wars-y they should be named “Deflectors”,
Just tried this on the emulator, it’s pretty good
Out of interest, is there a reason you decided to change from MIT to GPL?
I might not have chosen the right one since I’m new to the license business, but I only wanted to, if anyone made any modifications or improvements, that they also released the source for it. That’s about it, I don’t know if I got the license terms right.
- I tried to make it less complicated by having just 3 hits to be destroyed. Maybe I’ll rename hull to Shields.
Well, in reality (hahahah) you have both shields and hull. Though the hull never seems to take much in star wars. So maybe shields is more accurate.
About the controls, the crosshair scroll 2 pixels per frame, so yes it’s not very precise, but If reduce to just 1 frame, it scrolls too slow.
So you need a different frame rate… or different polling rate (hard) or an algorithm that varies the speed… OR you can also move it just 1.5 pixels per frame too though… no reason it’s gotta be a whole number.
Ah ok, that makes sense.
Though technically they only have to publish the source if they also publish their modified/improved version of the game.
If they don’t publish the game then they don’t have to publish the source.
I.e. they could share their modified version between a group of friends as long as nobody publicly published the game. If the source code was hosted on a private server or unlisted public server then technically it would still be following the licence terms.
The GPL and MIT licences are both popular licences for Arduboy games.
However personally I dislike the GPL for two main reasons:
- Modifications to the source must have a relevant date next to the notice stating the code has been modified.
- Mainly this is just annoying - people don’t tend to think about the date when they modify code
- If someone wants to use a small piece of code from a GPL program, they must publish their own program under the GPL. If they don’t want to release their own code under the GPL, they simply can’t use the code. This is technically called ‘copyleft’, but often derogatorily called a ‘viral’ licence.
- I dislike this because it seems like an unnecessary restriction
If either of those things bother you I’ve found a licence that still requires people to publish the source of their derived versions, but doesn’t require them to use the same licence (the LaTeX project public licence). It’s not a particularly common licence, but it’s still a completely valid open source licence.
If you aren’t worried about either issue then feel free disregard this comment.
If you have any questions, feel free to ask.
I’ll just leave this here…
I really didn’t know, I’ll revert back to MIT if it’s better to the community
I tried not using Floating Points knowing there is a performance problem, though I haven’t benchmarked the game using it. I also knew about Fixed Point, but as your post describes, I didn’t knew how to use it.
I’ll probably make a V2 with some of the suggestions on the thread. Also there are some optimizations that I can do to reduce the Sketch size
Unfortunately licences can be quite complicated.
Some people don’t bother with licences at all, but that’s not a good idea either because it means that technically nobody can modify your code.
It’s better to use a licence that’s not quite right than to not have one.
MIT is probably the simplest and most permissive licence.
(Unless you count licences like CC0 that actually try to get rid of copyright altogether.)
Using MIT would mean that technically people could publish closed source modified versions if they wanted, which isn’t what you wanted.
Ultimately it’s up to you what you choose, I’m just trying to make sure you’re aware of what each licence allows and that you’re happy with the implications.
(I should really write a simplified tutorial about this stuff someday…)
If you’re worried about what’s best for the ‘community’,
I think a ‘permissive’ licence is better than a ‘copyleft’ licence because ‘permissive’ licences give other users more freedom and they tend to be less complicated.
The library comes with:
- An example program that demonstrates many of the available functions.
- A document explaining how fixed points work:
- And a bit of extra info in the readme.
Overall it’s pretty simple.
All the normal arithmetic operators work as expected, and floating point literals are converted at compile time:
UQ8x8 a = 1.5; UQ8x8 b = 2.25; // c becomes 3.375 UQ8x8 c = a * b;
Though somtimes you have to remember to explicitly convert:
int a = 5; // 2.25 would be a double in this case // UQ8x8 b = a * 2.25; // So you must convert: UQ8x8 b = a * UQ8x8(2.25); // Equivalent alternative: // UQ8x8 b = a * static_cast<UQ8x8>(2.25);
If you decide to use it and need some help then feel free to ask.
If you don’t want to use it that’s fine too.
Floats might be a bit easier if you can afford them.
@Pharap thanks so much taking your time to write this explanation. I’ll probably try using Floats but if it end up slowing the frames, I’ll give the lib a try.
For sure I’ll use it when I try to do the FPS one.
Sooo… what are the chances we can change the title of this game to something that is less likely to get the site issued a DMCA?
I’m going to have to hide this thread until it gets a new name and title screen, sorry! I think everything else can stay the name, but “Star Wars” especially in it’s font is going to be covered by their intellectual property and I don’t want to get an email from Disney lawyers please.
Can you modify that image to be named Space Battle?
I’m so sorry, I knew I shouldn’t have used it, i’ll patch it up right away!
I sure can
@bateske All references have been removed!
The game itself still has the title, you’ll have to change the in game title as well. Sorry to be a pest!
Ooh! Looks like my browser had the old one cached.
Thank you for the quick edits, page is being re-enabled!
Is this alright for the flashcart menu screen?
I encountered a glitch… it would seem the game did a value loop on the health percentage after I was hit by 2 fighters at the same time. Perhaps you need to program a mechanic that prevents a second fighter from doing damage for maybe a second after you’ve already been attacked to prevent this (and also make the game feel more finished). Otherwise nice job, perhaps just make it a bit more challenging, thanks for sharing!
|
OPCFW_CODE
|
Lalita Jat is a Premier Field Engineer from India who works on Exchange Server technologies. In this post she talks about Site Mailboxes, an exciting new collaboration feature.
This is the first post in a two part series in which we will discuss an exciting new feature in Exchange 2013 – Site Mailboxes. Using Site Mailboxes, we can integrate Exchange and SharePoint to enable users to collaborate better by leveraging the traditional strength of Exchange as an email application and SharePoint as a document repository.
In this post, I will introduce Site Mailboxes and explain how to setup SharePoint for Site Mailboxes. I will follow this up with another post detailing configuration and usage.
A site mailbox is a shared mailbox that supports SharePoint integration. This enables Outlook and Exchange to provide multiple collaboration types, where a central project oriented location aggregates documents and emails. More details about Site Mailboxes can be found at this site.
Why use Site Mailboxes? Well there are many reasons, the most common benefits are listed below:
For the integration between Exchange and SharePoint to work correctly, there are some specific things which need to be in place. We will cover many of the details in part 2 of our series, but for now keep in mind that:
There are certain assumptions or pre-requisites which need to be stated prior to actually starting the installation:
With these pre-requisites in place, we are ready to roll and start the installation!
Firstly, when installing SharePoint, ensure you select the ‘Complete’ Option:
Install Enterprise SQL 2012 SP1 (X64) or SQL 2008 R2 SP2 (X64). While we can deploy SQL on the same server, it is recommended to deploy SQL on a dedicated server to ensure sufficient resource availability. See this blog as well to check why we recommend using SQL 2012 SP1.
Thirdly, in the SharePoint Products Configuration Wizard, select Create a new server farm.
Next, we specify configuration database settings or create a new Configuration database:
Configure Central Administration Web Application with a port number between 1 to 65535:
Expect the Wizard to take some time to complete! When all is done, you should get the screen below:
While that’s it for now, in the next part of this series, we will discuss the configuration needed on SharePoint and Exchange to be able to create Site Mailboxes. We will then explore how to use Site Mailboxes as well. So stay tuned!
Contributed by Lalita Jat; posted by Arvind Shyamsundar.
Maybe this for custom integration?
Great article ...Thanks for your great information, the contents are quiet interesting. I will be waiting for your next post.
http://staygreenacademy.com">SharePoint 2013 Training
|
OPCFW_CODE
|
Linux is going quick! You might not accept the statement if you are a hard Windows or Macintosh fan but the fact is, you have to accept. In previous years, Linux has acquired a dramatic growth in popularity and usage with the help of various distributions including Mint Linux and Ubuntu. We can see some people around the globe, who are currently sticking on Microsoft Windows (We will have to exclude Mac users) despite the fact that they want to use Linux based operating systems and enjoy exclusive features of that open source community’s product. It also seems that Ubuntu and Linux Mint are the best choice to start with if you are new to the wide world of Linux.
Apparently, the problem is one of the myths in technology field, which states that installing and using Linux is a herculean task for users from other OSs. Even though we may have to agree with the case of using, we strongly defend the first myth about installing. If you are still not able to believe us, we can suggest you some cool ways for installing (or without installing) Linux based Operating Systems in your pc and using it never like before. All these ways are purely risk-free and you will face no problems or complexities during the procedure. Now, let us take you to those superb ways. As of our experience, these ways seem perfect in the case of Ubuntu Linux and Mint Linux, which are quite popular Linux distributions.
This is the most preferred and safe way for testing Linux based operating systems. Unlike Windows or Mac, Linux distributions come with an in-built feature to use the operating system even without installing the OS. In most cases, you can use all of available features in the OS and can move on with the installation procedure if you feel satisfied.
For using this method, you have to download the OS’s .iso file from the corresponding website. Once you have finished download, in Windows 7/ Windows 8, you can right click on it and select option ‘Burn to Disc’. You can insert the CD and select the bootable device. In the option use OS, you can test the whole OS in your desktop. Unless you have made your wise or unwise contributions in the partition section, the process cannot make even a single change in your pc nor your data. Obviously, Live CD is one of the best methods you can choose.
Wubi – Windows Installer
This option is available only if your desired distribution is Linux, running on Unity. This is for pc users, who love to shift to Linux but do not want to mess up (in their case) partitioning and other stuff. The manufacturer, Canonical, has launched a software called Wubi for Windows for installing Ubuntu Linux in your windows powered pc without any hassles. Sadly, this software is not available for Windows 8 computers due to some issues. Instead, Canonical advises users to create a special partition and install Linux in your pc.
You can download Wubi from website and install Ubuntu in seconds. You just have to add some details like the drive where you want to install, disk space you want to give etc. Once you have done it correctly, you can hit the finish button and the software can reboot your device with bootable mode. In the mode, you can either install Ubuntu along with Windows or wipe the whole drive and use it for Linux. Unless you face a power failure or some technical issues, Wubi is the safest way for installing Ubuntu in windows PC.
As we said before, Ubuntu is best for basic users and you can get so many ways for testing the OS as well. Online demo is a popular way to understand how Ubuntu Linux works with basic features. You can do everything without leaving your Browser Window. You have to lead to the official website of Ubuntu and go to option for Online Demo. Currently, the feature is offline but canonical also says that the service will be available soon. However, this is an odd method when compared to other, as there is no word about ‘installing’.
This is the most used and safe way to run Linux in your Windows PC. As you might know, there ismuch virtualization software like VMware, VirtualBox etc. Virtualization is all about bringing an entirely different operating system to your windows based PC. This option is quite useful when you want Ubuntu or Linux Mint pc for some hours. In that situation, it is not a wise decision to give some GBs for installing a complete OS. Of course, this software does not affect your current operating system installation or your data.
For doing this, you need a virtualization software accompanied by the operating system files. You can use either .iso file or the CD if you have a CD drive. You can select the OS version and virtual configuration of your virtual machine. Once you have given adequate details, the software will set up a virtual machine for you. You will have to follow standard installation procedure in the machine. Once you have finished the process, you can start using your Linux powered PC from the next second. As said before, it is the safest way, as it does not touch your current operating system or your data.
This option is useful if you are using a pc without cd drive. In this you method, you are actually turning your pen drive to a purely portable OS installation drive. There are several tools like LiLi available for such a conversion. You have to select the Linux distribution and the .iso file of that operating System. In seconds, the software will do the conversion without any hassles. Once you have the USB Device, you can reboot your pc and select the USB as bootable media. Then, follow the same installation method. When compared to CDs, USBs are having more speed.
In our experience, all of the mentioned ways are 99.99% safe. Remaining 0.01% has to be reduced because a power failure or a hard disk error can affect your installation. When the mentioned issue is out, all these ways are fine. We would choose Virtual Box and Online Demo if you do not want a reboot and installation in your PC. Do you have any simpler way for installing Linux distributions in windows powered PCs? Do let us know!
|
OPCFW_CODE
|
DomainOffensive (do.de) - please add existing certbot plugin to NPM
What provider would you like to see added to NPM?
DomainOffensive (do.de)
Have you checked if a certbot plugin exists?
https://pypi.org/project/certbot-dns-do/0.31.0/
Thanks in advance,
David
old version (before edit):
What provider would you like to see added to NPM?
DomainOffensive (do.de)
Have you checked if a certbot plugin exists?
Not sure if that helps, but DomainOffensive offers a LetsEncrypt API:
first method: acme.sh:
export DO_LETOKEN="02pAPacMv1yNnUzSDR75"
acme.sh --issue --dns dns_doapi -d meineDomain.de -d *.meineDomain.de
second method: GET-Request:
example
https://my.do.de/api/letsencrypt?token=1md6xRcUCTrB58kbpwAH&domain=_acme-challenge.testdomain.de&value=OVxwaDm7MgN1IRG0eSivJMlepO9CL4X8vKo6Tcns
parameters:
token= - auth token
domain= - full domain name, including _acme-challenge as well as subdomains
value= - value for TXT record
answer:
{
"kdn": "1234",
"domain": "testdomain.de",
"success": true
}
Please also see:
https://blog.ichalsroot.de/letsencrypt-wildcard-ssl-zertifikate-mit-domain-offensive/
Thanks in advance,
David
I think I probably found a certbot plugin for DomainOffensive (do) - please see here:
https://github.com/georgeto/certbot-dns-do
Is it possible to integrate this one into NPM?
Thanks,
David
The GitHub package from
https://github.com/georgeto/certbot-dns-do](https://github.com/NginxProxyManager/nginx-proxy-manager/issues/url
can also be found on PyPI:
https://pypi.org/project/certbot-dns-do/0.31.0/
I edited the original post.
Maybe @jc21 can include the certbot plugin for Domain-Offensive now? :-)
All credits go to the original author georgeto!
Thanks in advance,
David
oh yes please. this would be great
push
would apreciate this to be added
would appreciate this to be added
Would also appreciate this registrar to be added. Thanks in advance.
I was able to successfully create a build that includes the DomainOffensive plugin from georgeto.
(Thanks to Knoxell for showing me how to do this!)
I created a pull request (#2327 ) and hope that @jc21 will include the changes into the development branch.
I also created a testing version on Docker Hub.
You can modify your docker-compose.yml like this in order to check it out:
version: "3"
services:
app:
image: 'dfs90/nginx-proxy-manager-do-de'
restart: unless-stopped
ports:
- '80:80'
- '81:81'
- '443:443'
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
Please be aware that this version is based on v2.9.18 and will not be updated!
Pull request has been approved - see here.
|
GITHUB_ARCHIVE
|
randomprocedure, its use in simulating simple games, and its use in making “unpredictable” texts.
a. Make sure that you have the latest version of the
b. Copy the code from the reading into your definitions pane. You should
random-elt, and the various
procedures for generating sentences.
Write a recursive procedure,
that counts the number of odd numbers that come up when rolling
> (count-odd-rolls 10) 7 > (count-odd-rolls 10) 2 > (count-odd-rolls 10) 6 > (count-odd-rolls 10) 5
Please use direct recursion to implement
count-odd-rolls. Your procedure
should look something like
(define count-odd-rolls (lambda (n) (cond [(zero? n) 0] [(odd? (roll-a-die)) ...] [else ...])))
Consider the problem of rolling a pair of dice
n times and counting
the number of times that either a seven (7) or an eleven (11) comes up.
a. What is wrong with the following pair of procedures that are intended to accomplish this task?
;;; Procedure: ;;; pair-a-dice ;;; Parameters: ;;; [None] ;;; Purpose: ;;; Roll two six-sided dice and find their sum. ;;; Produces: ;;; roll, an integer ;;; Preconditions: ;;; [No additional] ;;; Postconditions: ;;; 2 <= roll <= 12 ;;; Across multiple calls, the various rolls have the same probabilities ;;; as we would get from rolling two dice. (define pair-a-dice (lambda () (+ (roll-a-die) (roll-a-die)))) (define tally-seven-eleven (lambda (n) (cond [(<= n 0) 0] [(or (= (pair-a-dice) 7) (= (pair-a-dice) 11)) (+ 1 (tally-seven-eleven (- n 1)))] [else (tally-seven-eleven (- n 1))])))
Hint: How many times should we roll a pair of dice to find out how
many sevens or elevens come up in
n rolls? Add an expression using
display to the
pair-a-dice procedure so that you can count how many
times it is called.
(define pair-a-dice (lambda () (display "Rolling ...") (newline) (+ (roll-a-die) (roll-a-die))))
If that isn’t enough of a hint, read the notes on this problem.
b. Write a correct version of
a. Write a zero-parameter procedure,
(heads?) that simulates the
flipping of a coin. The
heads? procedure should return
(which represents “the coin came up heads”) half the time and
(which represents “the coin came up tails”) about half the time.
> (heads?) #f > (heads?) #f > (heads?) #t > (heads?) #f > (heads?) #t
b. Write a procedure,
(count-heads n), that simulates the flipping of
n coins (using
heads? to simulate the flipping of each coin) and
returns the number of times the coin is heads. You will likely find
the following form useful.
(define count-heads (lambda (n) (let kernel ([count 0] [remaining n]) (cond [(zero? remaining) count] ...))))
count-heads to explore the distribution
heads? gives by
counting the number of heads you get in 100 flips, 1,000 flips, and
Suppose we have a list of names,
students, that represents
all of the students in a class.
(define students (list "Andi" "Brook" "Casey" "Devin" "Drew" "Dylan" "Emerson" "Frances" "Gray" "Harper" "Jamie" "Kennedy" "Morgan" "Phoenix" "Quinn" "Ryan"))
a. Write a procedure,
(random-student), that randomly selects the name
of a student from the class.
b. Write a procedure,
(random-pair), that randomly picks one student
students, then another, and then puts them together into a list.
c. What are potential problems with using
(random-pair) to select
partners from the class?
a. Using the
sentence procedure, generate about five different
b. Add a few names, verbs, adjectives, and nouns to expand the range of sentences, then generate five new sentences.
All of the procedures we’ve written so far assume that we have a uniform distribution (of dice faces, verb occurrences, or whatever). But many distributions are not uniform. Consider, for example, a bag of M&M Candies. According to Josh Madison,
I checked out M&M’s web site. According to it, each package of Milk Chocolate M&M’s should contain 24% blue, 14% brown, 16% green, 20% orange, 13% red, and 14% yellow M&M’s.
We might represent that distribution in Scheme as a list of “color + frequency” lists.
(define m&m-colors (list (list "blue" 24) (list "brown" 14) (list "green" 16) (list "orange" 20) (list "red" 13) (list "yellow" 14)))
a. You may have noted that these numbers don’t add up to 100.
Write a procedure,
(total-frequency tallies), that adds up the
second numbers in a list of the form above.
> (total-frequency m&m-colors) 101 > (total-frequency '((a 3) (b 5) (c 2) (d 8))) 18
b. Write a procedure,
(one-m&m), that “randomly” chooses an
M&M color according to the distribution above. You should
(random 101) and then pick which color depending on
> (one-m&m) "blue" > (one m&m) "orange"
c. Write a procedure,
(lots-of-m&ms n), that makes a list of
n randomly chosen M&M colors.
> (lots-of-m&ms 20) '("brown" "green" "red" "orange" "blue" "red" "blue" "blue" "yellow" "yellow" "brown" "brown" "red" "orange" "blue" "green" "blue" "red" "blue" "orange")
d. Explore the distribution you actually get using
and a large
n. For example,
> (tally-all (lots-of-m&ms 1000))
Consider the following procedure.
(define select-from-distribution (lambda (tallies) (let ([choice (random (total-frequency tallies))]) (let kernel ([selector choice] [remaining tallies]) ; (display (list 'kernel selector remaining)) (newline) (let* ([entry (car remaining)] [entry-frequency (cadr entry)]) (if (< selector entry-frequency) (car entry) (kernel (- selector entry-frequency) (cdr remaining))))))))
a. What does this procedure seem to do?
b. Uncomment the
display line and call
m&m-colors) a few times.
c. Explain, in your own words, how
d. There’s no check to ensure that
remaining is nonempty. Why
doesn’t the code need such a check?
select-from-distribution procedure selects “randomly” from
an unequally distributed set of values. One unequally distributed
set of values we’ve encountered recently is the set of tallies of
words in a book.
> (define bronte-words (file->words "/home/rebelsky/Desktop/pg1260.txt")) > (length bronte-words) 192630 > (define bronte-tallies (tally-all bronte-words)) ; Warning! This is *slow*. > (length bronte-tallies) 13828
We could poorly simulate an author by selecting words randomly according to that distribution.
> (select-from-distribution bronte-tallies) "Anybody" > (select-from-distribution bronte-tallies) "they"
Write a procedure,
(new-sentence n tallies), that generates a new
sentence of the appropriate length using the distribution given in
> (new-sentence 20 bronte-tallies) "thoughts I snoring with pleasure past no quiescence brought laugh be sake he her man in I at had to " > (new-sentence 20 bronte-tallies) "had rode want record and a raised sparkling patience introduced no been might jetty very or four that was that "
Suggest a few techniques that could be used to improve the generation of text.
If there are
n rolls that we want to count, we should only roll the
n times. However, you will find that
n and 2
n calls. Why? Because the “is it seven or
eleven” test potentially rolls the dice twice, once to see if the roll
is seven and, if not, one more time to see if the roll is eleven.
|
OPCFW_CODE
|
(Note: this is similar to is Cryomancy scientifically possible?, but not exactly the same.)
In my story, I have "magic" that amounts to telekinesis powered by the user's metabolism.
It seems that heating objects using this magic should be pretty plausible (although the amount of heat that can be generated is somewhat limited); the user metabolizes some energy, and that energy, instead of doing Work inside the user's body, causes a target object to heat up by that amount of energy. More specifically, the way I envision this working is something like the user's body produces energy via metabolism as normal ("using magic" can be though of as an additional way to trigger metabolic energy production), but the energy produced is magically teleported to a location of the user's choice and acts in a direction of the user's choice. (I think this is sufficient to blur the lines between "heat" energy and other, more useful forms of kinetic energy, thus explaining why a magic user can also do things like levitate small objects. This is playing pretty fast-and-loose with entropy, but it is magic, although the difference in entropy may factor in as a loss in efficiency.) There is also a range limit, with "lost" energy dissipating as heat between the magic user and the target. (Basically, at a distance $D$ from the target, the magic user must spend $2x$ energy to apply $x$ to the target, with the other $x$ getting lost somewhere en route, possibly as infrared radiation.)
To phrase this a little differently... magic is a lot like a combination of Newton's Cradle — a moving ball strikes a stationary mass in an inelastic collision, and the energy is transferred through the mass (without the stationary mass moving itself) to another ball — and a "super ball" — an object which, upon striking an "immovable" object, rebounds in a perfectly elastic collision such that its speed remains constant but the direction of its motion changes. All (my story's) magic brings to the party is that these effects are combined, i.e. it is like a Newton's Cradle where the stationary mass is immaterial (and can transfer energy/force through other matter occupying the same space as the immaterial "mass" with minimal effect on said matter), and can redirect the force in a different direction. (Propagation is presumably still subject to the speed of light, but for my purposes this is effectively negligible; magic only works over short distances.)
Now... it also seems like the reverse of heating things should be possible. After all, cooling is just applying an acceleration to atoms in opposition to those atoms' present velocity, and we certainly have technological means of cooling things (note the referenced question and answers thereto).
The problem is that this appears to violate thermodynamics; we can't just decrease the entropy of the universe. In particular, the "naïve" solution would be to postulate that the metabolic energy produced by the magic user perfectly counteracts the existing atomic motion of the target to be cooled, but this implies that the user's metabolism produces $k$ energy which doesn't just not affect the user directly, but effectively vanishes from the target. IOW, I just removed $2k$ energy from the universe (and incarnated Maxwell's Demon in the process).
How can I avoid violating thermodynamics in this manner, but still allow magic to cool things? I'm looking particularly for answers that can quantify how much heat needs to be dumped elsewhere (presumably into the magic user's body and/or surroundings).
|
OPCFW_CODE
|
ELEMENTARY ORGANIC CHEMISTRY
T, W, Th, F 11:15 - 12:05 PM
325 Science Classroom Building
Spring Quarter 1998
Instructor: Prof. Christopher J. Cramer, 215 Smith Hall, 624-0859
Office Hours: Tuesdays 10:00 - 11:00 AM and Fridays 2:30 - 3:30 PM in Room 215 Smith Hall. Please feel free to make an appointment for a different time if you are unable to attend normal office hours.
Web Site: http://pollux.chem.umn.edu/~cramer/3302
Texts: L. G. Wade, Jr. Organic Chemistry, Third Edition, Prentice-Hall, Inc. J. W. Simek, Solutions Manual. Students will benefit from having a molecular models set--The Fleximodel Set is recommended and is available at the bookstore.
Exams: All exams will be conducted in 325 Science Classroom Building.
Exam I Friday, April 24 (week 4)
Exam II Friday, May 15 (week 7)
Exam III Friday, June 5 (week 10)
Final Exam: Tuesday, June 9, 4 PM to 6 PM in 325 Science Classroom Building. I will not give any early final exams under any circumstances. The only grades available to individuals missing the final exam are I or F/N.
Organic Tutorial Schedule: A tutor will be available in 101G Smith Hall, the hours will be announced during the first week of classes.
Final Grade: The final grade will be calculated as follows: 50%
of the lowest hour exam score + 75% of the middle score + 100% of the highest
score for a total possible score of 225 points from the hour exams. This
weighting is designed to minimize the impact of poor performance on a single
hourly exam while maximizing the impact of a stellar performance. The final
will be worth 200 points and 2 group learning projects will be worth 50
points each. The grand total will be 525 points. The distribution of the
final grades will be approximately 20% A's, 30% B's, 40% C's, and 10% less
Regrades: Regrades will be considered only up until the day of the next exam and only if the exam was taken in pen.
Exam Absences: No make-up exams will be given. The first missed exam with a valid excuse (i.e. note from a doctor, police officer, or judge) will be replaced by a pro rata share of the final exam. The second absence will be counted as a zero and averaged as such.
Policy for "I" Grades: An "I" grade will be assigned only when a prior arrangement has been made with the instructor. It will only be considered when the final exam cannot be taken and when work to that point has been deemed satisfactory. The "I" can be made up only be taking a regularly scheduled 3302 final exam in the following quarter. If the final is not taken and work in the course completed prior to that date is not satisfactory, an F or N grade will be assigned.
Policy on Scholastic Dishonesty: I rigorously adhere to the IT policy on scholastic conduct (IT Bulletin, p. 20). I sincerely regret that many of you will labor under the unfair suspicion generated by a very small percentage of the student community which regards cheating as an acceptable method for improving one's grade. In an attempt to minimize the effects of such conduct, all exams must be taken in pen, the student will sign the exam, test proctors will be authorized to move people during the exam without explanation, and we will randomly photocopy exams for use in subsequent regrades and to verify signatures.
Exam Files: The Chemistry Department maintains a file of exam copies for many of its undergraduate courses. Copies of exams for previous 3302 courses should be found in this file. This file is located in Smith Hall Room 101D. Old exams and answer keys can also be found on the web site.
Group Learning Projects: The first group learning project will be a practice exam in order to familiarize you with my problem writing style. It will be due on Friday, April 17th. The second group learning project will concentrate on some more difficult problems and will be due on Tuesday, May 26th.
How to Succeed in This Class: 0. Do problems from the book. 1. Briefly (10 minutes, max) scan the material before coming to class. I will assign reading weekly. 2. Do more problems from the book. 3. Participate in class. There will be many opportunities in which to do this; take advantage of them! 4. Reread the chapter carefully concentrating on the material discussed in lecture. 5. Do still more problems from the book. This is the only way to really learn the material and do well on the exams. I suggest problems from the text below and note those that could be skipped. Try to avoid using the answer key until you have tried your best to answer the question. 6. Use your group partners for studying. It frequently helps to have to explain to someone else how you got your answer and vice versa. 7. Do those last few problems from the book. 8. My office hours and the tutor hours are meant for you, use them as much as you need to.
Approximate Course Schedule
Connect with 3301
Ethers and Epoxides
IR and MS, Alkynes
Alkynes, Conjugated Systems
Group Learning Project Due (4/17)
Exam I (4/24)
Reactions of Aromatics
Ketones and Aldehydes
Exam II (5/15)
Carboxylic Acid Derivatives
Enols and Enolates
Exam III (6/5)
Recommended Problems: I recommend that you do as many problems as you possibly can. It is especially helpful to do the "in-chapter" problems as you are reading in order to reinforce the material. I have assigned "end-of-chapter" problems as it is sometimes difficult to figure out which problems are relevant to the material covered in class.Chapter 10: 39, 43, 44, 45, 48, 49, 53
Chapter 13: 29(f, g, h, i, j, k, l, m, n); 32, 33, 34,
Chapter 11: 16, 19, 21, 24, 12-45, 12-46, 12-47, 13-35,
Chapter 14: 32, 34 (c, d, i, j, l, m), 35(a, i, h, j),
37 (a, b, e, f, g), 43, 45
Chapter 15: 26, 29, 30, 31 (a, b), 32 (a, b, c, d, f, i),
Chapter 16: 25 (b, c, d, e, g), 28, 29, 33, 34, 36, 39,
Chapter 17: 43, 45, 46, 48, 49, 50, 54, 56, 59, 62, 63
Chapter 18: 36, 37, 40, 41, 42, 45, 46, 48, 51, 55, 58,
Chapter 20: 28, 31, 32, 33, 34, 36, 38, 39, 40, 42
Chapter 21: 44, 47, 48, 49, 52, 53, 56, 60, 65, 66, 67,
Chapter 19: 40, 43, 44, 45, 46, 47, 48, 49, 50, 53, 55,
57, 58, 59, 60
Chapter 22: To be assigned if/when this chapter is reached.
Published by the Department of Chemistry.
Updated March 13, 1998, cjc
© 1998 by the Regents of University of Minnesota, Department of Chemistry. All rights reserved.
The University of Minnesota is an equal opportunity educator and employer.
|
OPCFW_CODE
|
Windows internals, sixth edition, part 2 available separately. Users enjoy search toolbars and pdf readers that become part of internet explorer. Contents at a glance windows internals, sixth edition, part 1 available separately chapter 1 concepts and tools chapter 2 system architecture chapter 3 system mechanisms chapter 4 management mechanisms chapter 5 processes, threads, and jobs chapter 6 security chapter 7 networking windows internals, sixth edition, part 2. To solve this problem, i recommend you use a free little application called gwx control panel to disable the update permanently. Download and install the software for your operating system on the computer or device you are connecting from and on the windowslinuxmac computer you want to connect to. I cant think of many technical books where ive gone out and bought multiple editions and there certainly isnt another where id consider buying 4 editions but for this. The nxlog community edition is used by thousands worldwide from small startup companies to large security enterprises and has over 70,000 downloads to date. You can easily share a remote desktop or control a server over any network. Windows internals, part 1, 7th edition pdf free download. Nx will not work correctly on an old fat file system.
Simply click the download button for the product you wish to evaluate. View and download ge networx nx4 installation and startup online. Windows operating system internals frequent speaker at technical conferences microsoft teched, it forum, pdcs, microsoft most valuable professional 1993, 2005 books windows internals, 4th edition pdf version ships with server 2003 resource kit inside windows 2000, 3rd edition inside windows nt, 2nd edition windows nt for openvms. Page 1 ul approved for ansisia cp012000 networx series nx4 control panel installation and startup page 2 this document shall not be reproduced in whole or in part nor shall its contents be disclosed to any third party without the written approval of ge security. What operating system do you use siemens nx unigraphics cad. Maybe its just me but when i glance at the cover it looks like the book is about windows 7 internals only. As of today we have 110,518,197 ebooks for you to download for free. Enterprise products evaluation all products of the nomachine for the enterprise range are available as evaluation software without registration for 30 days, and install with all corresponding features available. Find out how your company can benefit from plm built on salesforce.
Windows internals6 sixth editionpart 2mark russinovich david a. Sample chapters from windows internals, sixth edition, part 1. Nx is a cross platform, script driven engine for easy network sockets programming. Sysinternals suite windows sysinternals microsoft docs. Networx nx4 manuals manuals and user guides for networx nx4. In this white paper, learn how you can configure freely without requiring any custom development, and quickly update configurations as your needs evolve. The book provides numerous code examples included on diskette, as well as the source for a complete, usable filter driver. It saw mark russinovich move on to a fulltime job at microsoft where he is now the azure cto and the addition of a new coauthor, alex ionescu. System architecture, processes, threads, memory management, and more, 7th. It looks like the same layout was used for the 6th edition, i want to suggest the use of a different layout for the next editions. How to manage and set up windows server log with nxlog check the log file of nxlog c. He is a regular speaker at microsoft conferences, including technet and pdc.
The testlimit utility which you can download from the windows internals book webpage can. Published by microsoft press a division of microsoft corporation one microsoft way redmond, washington 980526399 copyright 2005 by david solomon, mark russinovich. Alex ionescu is a chief software architect and consultant expert in lowlevel system software, reverse. Windows sysinternals windows sysinternals microsoft docs. Choose from enterprise desktop for hassle free remote control of any computer, cloud server for secure. To install windows 10 on your pc, the processor cpu must support the following features. Though there already exist excellent work in describing its internals 34,39,38, there still is a lack of information about some lower level mechanisms. Windows internals book windows sysinternals microsoft docs. Windows internals and software driver development overview this seminar is designed for software engineers and architects who need to understand the details of the major windows architectural components, as well as how to create software only kernelmode drivers that serve as extensions to the windows operating system. The sysinternals web site was created in 1996 by mark russinovich to host his advanced system utilities and technical information. No annoying ads, no download limits, enjoy it and dont forget to bookmark and share the love. System architecture, processes, threads, memory management, and more.
Doubleclick on the disk image to open it and doubleclick on the nomachine program icon. The suite is a bundling of the following selected sysinternals utilities. Sysinternals freeware microsoft windows internals, 4th. Please consult your windows documentation or help for questions on file systems and disk partitions. Board installation the metal enclosure should be installed with the door opening from top to bottom. What operating system do you use siemens nx unigraphics. Windows sysinternals administrators mark russinovich and aaron margosis windows internals fifth. The sysinternals troubleshooting utilities have been rolled up into a single suite of tools. I have run nx on linux, mac os and windows both 32 bit and 64 bit. The messages that the client and the server exchange are defined by the x protocol. The nxlog community edition is open source and can be downloaded free of charge with no license costs or limitations. This report will show expander trouble device zero 0. Its flexibility allows it to be utilized in various setups and.
Troubleshooting with the windows sysinternals tools. Hence, in the following we will investigate the memory management of windows. How to manage and set up windows server log with nxlog. As you know, if you hide the update, it will unhide itself automatically within a week. April 28, 2020 download coreinfo 367 kb introduction. While adding windows server 2008 device on the nreporter, please choose log audit for facility. Windows nt file system internals presents the details of the nt io manager, the cache manager, and the memory manager from the perspective of a software developer writing a file system driver or implementing a kernelmode filter driver. Delve inside windows architecture and internals and see how core components work behind the scenes. This file contains the individual troubleshooting tools and help files.
Chapter 1 getting started with the sysinternals utilities. Turn nx on or off using the button at the top right of the nx window. Windows internals book series and has taught his windows internals class to thousands of developers and it professionals worldwide, including microsoft staff. Windows internals, sixth edition, part 2 available fall 2012. Design parametric and direct solidsurface modelling. Whether youre an it pro or a developer, youll find sysinternals utilities to help you manage, troubleshoot and diagnose your windows systems and applications. The nxlog community edition is an open source log collection tool available at no cost. Sysinternals freeware microsoft windows internals, 4th edition. Try nonedu versions or importing iges, inventor or solidworks files.
It is available for various platforms including windows and gnulinux. It provides unconstrained rapid development, and its ideal for writing,testing,experimenting with protocols,apps,servers or misc utils that communicate over a network. Nx allows usage of xconnections over real slow links like a modem or isdn. The tool was developed because there really are no alternative ways to demonstrate the inner workings of this rather complicated. May 12, 2017 maybe its just me but when i glance at the cover it looks like the book is about windows 7 internals only. Architecture, security challenges and solutions working paper pdf available march 2016 with 29,493 reads. Check file system window ntfs file system is required for nx and all related products. Freenx is a free server implementation of oss nx components. Please refer to the current ge security product catalog for detailed warranty information.
An x client is the local or remote process that requests from an x server graphics handling at the display that the server controls. Sophisticated software allowing up to 99 users to interface with up to 48 zones and eight partitions. It does not contain nontroubleshooting tools like the bsod screen saver. It is strongly suggested that prior versions of nx first be removeduninstalled before nx10. If your computer does support these extensions, you could still. Microsoftdriver developers are free to ignore them if they choose to do so, and. The nx4 control panel will send an expander trouble once each hour if it senses that no devices have been enrolled. Feb 02, 2015 i have run nx on linux, mac os and windows both 32 bit and 64 bit. Most cpus have support for these features, so if you receive this error, it is likely because the nx feature is not enabled on your system. Fix for audioissue, support for root access on windows 10 mobile build 10. No matter where you are, nomachine makes it safe and easy to quickly access a remote desktop. As for the installation, the setup wizard will take you through all steps necessary for updating nomachine. The x window system follows the clientserver paradigm.
Windows operating system, which obviously is not an opensource system. In book 1, youll plumb windows fundamentals, independent of platform server, desktop. For quick onoff access while the nx window is hidden, rightclick the nx tray icon windows or menu bar os x. In addition to the source code, binary installer packages are available for the various platforms below. Windows internals, sixth edition, part 1 ebook zenk security. Check with coreinfo on what your cpu doesnt support, or, you can say lol, what the heck and simply apply both patches to bypass nxpaesse2 altogether. Contents at a glance windows internals, sixth edition, part 1 available separately chapter 1 concepts and tools chapter 2 system architecture chapter 3 system mechanisms. The vast majority of users are going to find the windows program the most familiar and robust as most other users are running it on windows. Windows internals, sixth edition, part 2 ebook index of es. Keypad security system extender card reader control unit.1449 1249 1398 540 1326 688 420 1337 1129 540 153 855 704 355 666 1139 812 1433 261 655 794 534 199 927 965 21 76 935 897 459 1019 1332 1396 605 387 479 600 1323 454 403 953 412 1049 615 839 1285
|
OPCFW_CODE
|
[Glibc] Configure modulemap for target, not host
What's in this pull request?
The current Glibc CMakeLists.txt uses the host machine to determine which modulemap to use. The same modulemap can't be used for all platforms because headers are available in different locations on
different platforms.
Using the host machine to determine which modulemap to configure and place at a specific path in the resource dir is fine, so long as:
Only one Glibc is being compiled in a single CMake invocation.
The target machine needs the same modulemap as the host.
https://github.com/apple/swift/pull/1442 violates both of these assumptions: the Glibc module for both Linux and Android is compiled at the same time, and the Android target can't use the Linux modulemap.
This commit instead uses the target(s) to determine which modulemap to use. The modulemap is configured and placed in an OS- and architecture-specific directory in the resource dir. The path to that modulemap is referenced by the ClangImporter (since it is no longer at a path that is automatically discovered as an implicit modulemap).
lldb-import-test does not appear to pass a valid target triple to ClangImporter, so we disable tests that use it outside of OS X.
#1442 will build upon the work here by adding an Android module map:
diff --git a/stdlib/public/Glibc/CMakeLists.txt b/stdlib/public/Glibc/CMakeLists.txt
index c152ab5..f898636 100644
--- a/stdlib/public/Glibc/CMakeLists.txt
+++ b/stdlib/public/Glibc/CMakeLists.txt
@@ -18,6 +18,8 @@ foreach(SDK ${SWIFT_SDKS})
else()
set(GLIBC_ARCH_INCLUDE_PATH "${GLIBC_INCLUDE_PATH}")
endif()
+ elseif("${SDK}" STREQUAL "ANDROID")
+ set(GLIBC_INCLUDE_PATH "${SWIFT_ANDROID_SDK_PATH}/usr/include")
endif()
# Verify that the location is valid.
@@ -30,6 +32,8 @@ foreach(SDK ${SWIFT_SDKS})
set(modulemap_path "${CMAKE_CURRENT_BINARY_DIR}/${sdk}/${arch}/module.map")
if("${SDK}" STREQUAL "FREEBSD")
configure_file(module.freebsd.map.in "${modulemap_path}" @ONLY)
+ elseif("${SDK}" STREQUAL "ANDROID")
+ configure_file(module.android.map.in "${modulemap_path}" @ONLY)
else()
configure_file(module.map.in "${modulemap_path}" @ONLY)
endif()
diff --git a/lib/ClangImporter/ClangImporter.cpp b/lib/ClangImporter/ClangImporter.cpp
index 135f575..1098d51 100644
--- a/lib/ClangImporter/ClangImporter.cpp
+++ b/lib/ClangImporter/ClangImporter.cpp
@@ -351,12 +351,15 @@ getNormalInvocationArguments(std::vector<std::string> &invocationArgStrs,
});
}
} else {
// The module map used for Glibc depends on the target we're compiling for,
// and is not included in the resource directory with the other implicit
- // module maps. It's at {freebsd|linux}/{arch}/glibc/module.map.
+ // module maps. It's at {android|freebsd|linux}/{arch}/glibc/module.map.
auto GlibcModuleMapPath =
llvm::SmallString<128>(searchPathOpts.RuntimeResourcePath);
- if (triple.isOSFreeBSD()) {
+ if (triple.isAndroid()) {
+ llvm::sys::path::append(GlibcModuleMapPath, "android");
+ } else if (triple.isOSFreeBSD()) {
llvm::sys::path::append(GlibcModuleMapPath, "freebsd");
} else {
llvm::sys::path::append(GlibcModuleMapPath, "linux");
Resolved bug number: None
Before merging this pull request to apple/swift repository:
[ ] Test pull request on Swift continuous integration.
Triggering Swift CI
The swift-ci is triggered by writing a comment on this PR addressed to the GitHub user @swift-ci. Different tests will run depending on the specific comment that you use. The currently available comments are:
Smoke Testing
Platform | Comment
------------ | -------------
All supported platforms | @swift-ci Please smoke test
OS X platform | @swift-ci Please smoke test OS X platform
Linux platform | @swift-ci Please smoke test Linux platform
Validation Testing
Platform | Comment
------------ | -------------
All supported platforms | @swift-ci Please test
OS X platform | @swift-ci Please test OS X platform
Linux platform | @swift-ci Please test Linux platform
Note: Only members of the Apple organization can trigger swift-ci.
@swift-ci Please smoke test
Hmm, looks like a legitimate failure when running the swift-package-manager tests. I'll check it out, thanks!
Any comments on the overall approach would be very much appreciated!! :bow:
I haven't reviewed CMake code line-by-line, but this looks like the right direction to me.
Excellent, thanks @gribozavr! That's reassuring.
@tkremenek @gribozavr Updated! The tests should now pass, if one of you could do me the favor of asking @swift-ci to please test.
@swift-ci Please test
Hmm, looks like some OS X errors this time. Strange, since this shouldn't affect OS X at all. I'll look into it more--thanks!
Updated once again. I added a check such that -fmodule-map-file= is only specified if the Glibc module map actually exists. This fixes the test failures on OS X, which were caused because swiftc -target x86_64-unknown-linux-gnu -emit-ir was being invoked, even though the compiler was not built to support that target (and so the module map did not exist at the specified path).
This got all my tests passing on both OS X and Linux locally--fingers crossed it also works on CI!
@gribozavr, is it alright that I left a FIXME in there? Also, an alternative approach would have been to modify the tests such that swiftc -target x86_64-unknown-linux-gnu -emit-ir is only tested on Linux hosts. Would this be preferable?
It would be best to keep the current extended coverage.
Excellent. Well, this pull request has all the same test coverage, but can also handle the Android changes in #1442 -- whaddya think? Also, could you kick off a test?
@swift-ci Please test
CC @jrose-apple for Clang importer changes.
CMake changes LGTM modulo comments.
@jrose-apple Thanks for the review!! I hope I've addressed all of your comments--I'm very new to C++, let alone llvm/ADT, so I can't be sure I did. :sweat_smile:
@gribozavr All tests pass locally for me. Could you ask @swift-ci to test? Also, any additional feedback here?
@swift-ci Please test
I wonder if amending the commit to use Twine caused CI to abort the test run...?
It's probably still running, but I guess GitHub can't report status for it. I'll trigger it again.
@swift-ci Please test
@jrose-apple @gribozavr Hooray, everything passed! Does this look good to merge?
Importer changes look good to me. :+1:
CMake LGTM!
Well, if everything looks fine to everyone and the CI says it's alright, I guess I've got no choice but to merge this! :grin:
Thanks again for the reviews! This will be a big help for #1442.
|
GITHUB_ARCHIVE
|
This is an article written by Kyle Weicht, Software Engineer with the Visual Computing Engineering team at Intel Corporation. Sample code to accompany the article can be downloaded from the GitHub repository
This article outlines how you can efficiently implement the following rendering techniques on mobile devices using OpenGL* ES:
Forward rendering is conceptually the simplest rendering model. Lights are defined first and then objects in the scene are rendered in some order with light calculations done on each object as it is drawn. Although simple to implement, this technique slows down as more lights are introduced to the scene, and most realistic scenes require several lights.
Deferred lighting and deferred shading are techniques that efficiently render a scene with many more lights than traditional forward rendering. Both deferred techniques defer the lighting calculations until every object has been drawn, then applies all lighting in one pass. There are various trade-offs for each technique; however, both deferred rendering techniques can be used to achieve the same goal.
Note This article assumes familiarity with deferred rendering techniques. For a more in-depth description of deferred rendering, see Deferred Rendering in Killzone 2*.
Multiple render targets for full deferred shading are supported in OpenGL ES version 3.0 with Android* 4.3 or iOS* 7. Prior to version 3.0, OpenGL ES did not support multiple render targets, one of the requirements for deferred shading. This lack of support forced game developers to use either forward rendering or deferred lighting.
The following sections describe the implementation processes for:
The scene used in this sample is designed to show one of the largest advantages of deferred rendering – many lights. The scene consists of a lighthouse and beach with various colored lights floating around, lighting the scene.
The following process outlines how the scene is rendered using forward rendering. The forward rendering technique is implemented with an RGBA8 back buffer and a 24-bit depth buffer.
The following process outlines how the scene is rendered using deferred lighting. The deferred lighting technique is implemented with two 32-bit color buffers and a 24-bit depth buffer.
This three-pass approach uses only one color buffer at a time, meaning it will work on an OpenGL ES 2.0 device. The normals are encoded in pass 1, allowing the X and Y components to be stored with 16-bit precision in the 32-bit buffer. The final pass does the actual lighting by reading the lighting value from the lighting buffer and using the value as the light color.
Note The lights are rendered as world-space cubes. In order to perform the lighting, the pixel’s world position is calculated from its screen position and depth at that pixel.
The following process outlines how the scene is rendered using deferred shading.
This two-phase rendering approach uses multiple render targets to make up the G-buffer. The first RGBA8, color buffer, is used for storing the albedo color. The second RG16, color buffer, is used for storing the encoded normals. The depth buffer is a 24-bit buffer.
The deferred shading approach is slightly simpler than the deferred lighting technique; however, it requires multiple render targets, which is only supported in OpenGL ES 3.0 and beyond. The lights are rendered the same way as they are in the deferred lighting approach, as world-space bounding boxes. As well, the normal encoding and position reconstruction are the same as deferred lighting.
Intel's compilers may or may not optimize to the same degree for non-Intel microprocessors for optimizations that are not unique to Intel microprocessors. These optimizations include SSE2, SSE3, and SSSE3 instruction sets and other optimizations. Intel does not guarantee the availability, functionality, or effectiveness of any optimization on microprocessors not manufactured by Intel. Microprocessor-dependent optimizations in this product are intended for use with Intel microprocessors. Certain optimizations not specific to Intel microarchitecture are reserved for Intel microprocessors. Please refer to the applicable product User and Reference Guides for more information regarding the specific instruction sets covered by this notice.
Notice revision #20110804
|
OPCFW_CODE
|
Public doc "Update the OS image for your scale set"
Hello Team, Please assist on this matter.
Customer's main request is to change the image of an Existing VMSS for a custom image from a VM.
I was following Modify an Azure virtual machine scale set - Azure Virtual Machine Scale Sets | Microsoft Docs, however this is not working. I attempted this on Linux and Windows VMSS and did not work on any of those cases. I was obtaining this errors on both VMSS:
_PS /home/ariel> az vmss update --resource-group VMSStestRG --name vmss2 --set virtualMachineProfile.storageProfile.imageReference.id=/subscriptions/{subscriptionid)/resourceGroups/Sundays/providers/Microsoft.Compute/images/TEST2-image-20220311163650
(OperationNotAllowed) The Image type for a Virtual Machine Scale Set may not be changed.
Code: OperationNotAllowed
Message: The Image type for a Virtual Machine Scale Set may not be changed._
_PS /home/ariel> Update-AzVmss -ResourceGroupName VMSStestRG -VMScaleSetName vmss2 -ImageReferenceVersion 16.04.201801090
Update-AzVmss: The platform image 'canonical:0001-com-ubuntu-server-focal:20_04-lts-gen2:16.04.201801090' is not available. Verify that all fields in the storage profile are correct. For more details about storage profile information, please refer to https://aka.ms/storageprofile
ErrorCode: PlatformImageNotFound
ErrorMessage: The platform image 'canonical:0001-com-ubuntu-server-focal:20_04-lts-gen2:16.04.201801090' is not available. Verify that all fields in the storage profile are correct. For more details about storage profile information, please refer to https://aka.ms/storageprofile
ErrorTarget: imageReference
StatusCode: 404
ReasonPhrase: Not Found
OperationID : 2a3f9044-ee16-4fc0-9450-3b211806d48f
PS /home/ariel>_
Asked for assistance and with TA Cesar Solis; we proceeded to change the Image Reference directly on the Resource Explorer. Did not Allow to proceed.
Took the error and searched on WIKI(TSG). Found this Create VMSS from SIG Image Fails_Deploy - Overview (visualstudio.com); which explains that If VMSS is not created from a SIG image, it is not possible to update it to be a shared image one.
Still, proceeded to keep testing:
Created an image from a windows VM Gen2
Made it on azure compute gallery
Once completed went to VMSS >Operating System> Migrate to Azure Compute Gallery
Got this error(Same result regardless of whether the image definition is Generalized or Specialized.)
Finally, went to a new VM, created an image from it as an Image Definition.
Created a VMSS from it(from Azure Compute Gallery, previously called SIG).
Once VMSS was created, went to Operating System> Change Image reference
Selected an image from Azure Compute Gallery
Succeeded on the operation.
From this I learned the following:
Public document Modify an Azure virtual machine scale set - Azure Virtual Machine Scale Sets | Microsoft Docs makes absolutely no reference to this kind of obstacles or specifications, I believe this needs to be updated.
It is not possible to update image between OS that are different. They have to be:
- Same GEN
- Same OS
- Same OS state between VMSS and VM from where the Image is taken.
- Eligible images for this process can only be Image Definitions on Azure Compute Gallery.
Thanks and I hope this collaboration helps somehow on future related topics.
Document Details
⚠ Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.
ID: 706ceb71-6add-c678-bdb5-ee0d9386488b
Version Independent ID: bf4ab279-85fa-6a7a-8756-24af74fe1444
Content: Modify an Azure virtual machine scale set - Azure Virtual Machine Scale Sets
Content Source: articles/virtual-machine-scale-sets/virtual-machine-scale-sets-upgrade-scale-set.md
Service: virtual-machine-scale-sets
GitHub Login: @ju-shim
Microsoft Alias: jushiman
Thanks for the feedback! I have assigned the issue to the content author to investigate further and update the document as appropriate.
Any update on this? We ran into the same issue.
az vmss update --resource-group xxx--name yyy --set virtualMachineProfile.storageProfile.imageReference.id=/subscriptions/zzz/resourceGroups/weu-co-rsg-mds-cqf/providers/Microsoft.Compute/images/ubuntu2004-32086
returns
(OperationNotAllowed) The Image type for a Virtual Machine Scale Set may not be changed.
C:\Users\Alex>az version { "azure-cli": "2.30.0", "azure-cli-core": "2.30.0", "azure-cli-telemetry": "1.0.6", "extensions": { "azure-devops": "0.18.0" } }
Hello,
Thank you for reaching out - we are taking a look at the article in question and the lessons learned by the original reporter. I'll hopefully have an update soon to this thread and edits to the article.
@alex-sameli - Although I am still looking into if this info is true, it seems that Ariel found the following, which may be helpful to you:
It is not possible to update image between OS that are different. They have to be:
- Same GEN
- Same OS
- Same OS state between VMSS and VM from where the Image is taken.
- Eligible images for this process can only be Image Definitions on Azure Compute Gallery.
Thank you for your patience.
Closing this issue out. Instructions were provided above. The team will revisit the documentation at a future date. Thank you.
#please-close
|
GITHUB_ARCHIVE
|
Problem Description: W2 have a production database and need to configure a GoldenGate uni-directional replication in real time without downtime. Is there a best practice, white paper, knowledge document would help us in desiging the transition? We can start with a pair of database which is Data Guard Physical replication, or start with a brande new target database.
Generic steps provided by Metalink:
Implementing Oracle GoldenGate 0) Install OGG on the source and target systems 1) Establish what tables are to be replicated from the source and what tables they will be mapped to on the target. 2) Run defgen against the source tables and ftp the created efs file (call it defs.def for example) to the dirdef subdirectory on the target install. 3) Create an ongoing capture extract to capture changes to the archive / redo logs. 4) Add the extract via ggsci 5) add the exttrail via ggsci 6) Create a datapump to read the local trail and write a remote trail to the target machine. 7) Start the local capture and datapump 8) Create an ingoing replicat. Do not start it. 9) Initial Load the target database This can be done with native or local utilities or with GoldenGate as follows Begin Initial Load: Much of what you will do depends on having enough resources and space. Divide up your tables so that you can create several (say 10) extracts and their datapumps. Create a sourceisfile extract for each grouping of tables. They can be run in parallel if you have the horsepower. Create an initial load replicat for each extract/datapump thread. There are 3 ways I recommend to do the initial load with OGG First, do the normal prescribed manner writing to the target sode over TCP or Second, write to local disks and ftp the written trails to the target side. Run the initial load replicats against the trail files. The ftp method is fatser. or, if you have disks connected to both source and target databases, there is a fatser method. This method is especially useful for vary large databases. Begin initial load method Have your sourceisfile extracts write their trails to shared disks. You can start the initial load replicats while their trails are still being written. This reduces overall time. You are loading the target at the same time you are unloading the source. When all initial load replicats are finished, they can be stopped. End Initial Load Start the ongoing replicat(s) with HANDLECOLLISIONS turned on. When the replicats catch up, stop them and remove HANDLECOLLISIONS, and restart them. You should be migrated. You can divide up your tables into as many extracts as you have memory and CPU power to do so. If you use the ftp method, you will need at least 1.3 times the amount of data in your database on the source and on the target for intermediate trails. If you use the shared disk method, and start the initial load replicats while extraction is going on, you can get by with a lot less space N.B. Creating initial load trails for this size of data requires a special technique. Use the options megabytes 2000, maxtrails 900 for the extfile. Make the trail file name two characters. This will cause the initial load trails to look just like normal trails. Your initial load replicats will look just like regular replicats because they will be reading a trail of files, not a single file.
|
OPCFW_CODE
|
I am having a few issues with EF Core at the moment. I have some data that I need to delete, and I am struggeling to see how the fluent API works, exactly in regards to the
Considering the classic blog/post scenario from microsofts own websites, I wonder what entity, exactly the
OnDelete() is 'targeting' (for the lack of a better word) In some instances it seems to be the blog, in others, the post. Can the Cascade delete be defined from both sides (that the posts are deleted when the parent Blog is) if so i imagine the code should look like this:
model.Entity<Post>().HasOne(p => p.Blog).WithMany(b => b.Posts).HasForeignKey(p => p.BlogId).OnDelete(DeleteBehavior.Cascade)
As i understand this is saying "When a Blog is deleted, first delete all posts referencing this blog" meaning the
OnDelete(DeleteBehavior.Cascade)applies to blog, not to post.
But is this the same then?
model.Entity<Blog>().HasMany(b => b.Posts).WithOne(p => p.Blog).OnDelete(DeleteBehavior.Cascade)
OnDelete(DeleteBehavior.Cascade) apply to Post rather than blog?
Cascade delete always works in one direction - from principal entity to dependent entity, i.e. deleting the principal entity deletes the dependent entities. And for one-to- many relationships the one side is always the principal and the many side is the dependent.
Looks like you are confused by the fluent configuration. Note that each relationship consists of two ends. The fluent configuration allows you to start with one of the ends and relate it to the other end, or vice versa, but still you are configuring (defining) a single relationship. So
Entity<A>().HasOne(a => a.B).WithMany(b => b.As)
is the same as
Entity<B>().HasMany(b => b.As).WithOne(a => a.B);
and they both define one and the same relationship. Which one you choose doesn't matter, just use single configuration per relationship in order to avoid discrepancies.
With that being said,
model.Entity<Post>().HasOne(p => p.Blog).WithMany(b => b.Posts) .HasForeignKey(p => p.BlogId) .OnDelete(DeleteBehavior.Cascade);
model.Entity<Blog>().HasMany(b => b.Posts).WithOne(p => p.Blog) .HasForeignKey(p => p.BlogId) .OnDelete(DeleteBehavior.Cascade);
is one and the same and define single one-to-many relationship from
Blog is the one side and
Post is the many side, the
Blog is the principal entity and the
Post is the dependent entity, hence deleting a
Blog will delete the related
|
OPCFW_CODE
|
Presentation on theme: "Encryption is a way to transform a message so that only the sender and recipient can read, see or understand it. The mechanism is based on the use of."— Presentation transcript:
Encryption is a way to transform a message so that only the sender and recipient can read, see or understand it. The mechanism is based on the use of mathematical procedures to scramble data so that it is extremely difficult for anyone other than authorized recipients to recover the original message
The formula or algorithm converts the intended data (Credit card number, Social security number etc.) into an encoded message using a key to decode or decipher the message. Plaintext:- The message that is being protected. Key:- A series of electronic signals stored on a PC’s hard disk or transmitted as blips of data over transmission lines.
PKI:- Public Key Infrastructure creates the ability to authenticate users, maintain privacy, ensure data integrity, and process transactions without the risk of repudiation. It satisfies four e-security needs. 1. Authentication 2. Integrity 3. No repudiation- procedure that prevents sender and vendor from credibly denying that they sent or received a specific message, file etc. 4. Privacy
Cryptographic techniques are a means of securely transferring data over Internet applications. It is the science of applying complex mathematics to increase the security of electronic transactions. Basic encryption relies on two components: an algorithm and a key. Encrypting information is simple: A computer program is used that has an encryption algorithm
For encryption to work, both sender and receiver have to know the rules used to transform the original message or transaction into its coded form. A set of rules for encoding and decoding messages is called a cipher. The encoded message is called a ciphertext. A message can be decrypted only if the decryption key matches the encryption key.
3 cryptographic algorithms: Message-digest algorithms Map variable-length plaintext to fixed-length ciphertext. Secret-key algorithms Use one single key to encrypt and decrypt. Public-key algorithms Use 2 different keys – public key and private key.
It is a variable value that is used by cryptographic algorithms to produce encrypted text, or decrypt encrypted text. The length of the key reflects the difficulty to decrypt from the encrypted message. EncryptionDecryption Plaintext Ciphertext Key
It is the number of bits (bytes) in the key. A 2-bit key has four values 00, 01, 10, 11 in its key space A key of length “ n ” has a key space of 2^n distinct values. E.g. the key is 128 bits 101010101010 ….10010101111111 There are 2^128 combinations 340 282 366 920 938 463 463 374 607 431 768 211 456
CSC1720 – Introduction to Internet 10 Encrypted Text Original Text + Secret key = Encrypted Text Original TextSecret key + = Encryption Decryption
Use a secret key to encrypt a message into ciphertext. Use the same key to decrypt the ciphertext to the original message. Also called “ Symmetric cryptography ”. 11 EncryptionDecryption Plaintext Ciphertext Secret Key
All keys need to be replaced, if one key is compromised. Not practical for the Internet environment. On the other hand, the encryption speed is fast. Suitable to encrypt your personal data. CSC1720 – Introduction to Internet 12
Involves 2 distinct keys – public, private. The private key is kept secret and never be divulged, and it is password protected (Passphase). The public key is not secret and can be freely distributed, shared with anyone. It is also called “ asymmetric cryptography ”. Two keys are mathematically related, it is infeasible to derive the private key from the public key. 100 to 1000 times slower than secret-key algorithms. EncryptionDecryption Plaintext Ciphertext Public KeyPrivate Key
First, create public and private key Public key Private key Private key stored in your personal computer Public Key Directory Public Key Public key stored in the directory
15 Public Key Directory Text User A User B ’ s Public Key Encryption Encrypted Text
User A Encrypted Text Encrypted Text Insecure Channel User B
Encrypted Text User B ’ s Private key Private key stored in your personal computer Decryption Original Text User B
It maps a variable-length input message to a fixed-length output digest. It is not feasible to determine the original message based on its digest. It is impossible to find an arbitrary message that has a desired digest. It is infeasible to find two messages that have the same digest.
A hash function is a math equation that create a message digest from message. A message digest is used to create a unique digital signature from a particular document. Hash Function Original Message (Document, E-mail) Digest
1. RSA Algorithm:- RSA is the most commonly used public –key algorithm, although it is vulnerable to attack. Named after its inventors, Ron Rivest, Adi Shamir and Len Adleman of the Massachusetts Institute of Technology (MIT). RSA was first published in 1978. It is used for encryption as well as for electronic signatures
2. Data Encryption Standards (DES):- DES was developed by IBM in 1974 in response to a public solicitation form the U.S. Department of Commerce. It was adopted as a U.S. federal standard in 1977 and as a financial industry standard in 1981. DES is the first symmetric system to be widely adopted commercially. Any change to a message encrypted with DES turns the message into a mess of unintelligible characters. As a block cipher with 64 bit size, DES uses a 56-bit key to encrypt a 64- bit plaintext block into a 64-bit ciphertext.
3. 3DES:- A stronger version of DES, called Triple DES(3DES), uses three 56-bit keys to encrypt each block. The first key encrypts the data block, the second key decrypts the data block, and the third key encrypts the same data block again. The 3DES version requires a 168-bit key that makes the process quite secure and much safer than plain DES.
4. RC4 :- RC4 was designed by Ron Rivest Data Securtiy Inc. This variable length cipher is widely used on the Internet as the bulk encryption cipher in the Secure Sockets Layer (SSL) protocol, with key length ranging from 40 to 128 bits. RC4 has a reputation of being fast, although its security is unknown. 5. International Data Encryption Algorithm (IDEA):- IDEA was created in Switzerland in 1991. It offers strong encryption using a 128-bit key to encrypt 64-bit blocks.
Cryptoanalysis:- It is the science of deciphering encrypted messages without knowing the right key. 1. Chosen-plaintext attack:- The attacker uses an unknown key to encrypt any text or document. The challenge is to find the key that is known only to the attacker. 2. Known-plaintext attack:- The attacker knows the plaintext for part(s) of the ciphertext. He or she uses this information to decrypt the rest of the ciphertext.
3. Ciphertext-only attack:- The attacker has no idea what the message contains and works primarily from ciphertext, making guesses about the plaintext. Some ciphertext data might contain a common word as a starter. Certain documents begin in a predictable way that often gives away the contents.
4. Third-party attack:- An adversary breaks into the communication line between two parties (e.g. buyer and vendor). He or she uses a separate key with each party. Each party uses a different key that is easily known to the adversary. The adversary, in turn, decrypts the transmitted documents with the right key and encrypts it with the other key before it is sent to the recipient. Neither party has any idea that their communication system has been intercepted.
|
OPCFW_CODE
|
In October 2018, the first ML-generated painting “Edmond de Belamy” sold for $432,500 at an auction at Christie’s. In order to create the painting, the art collective Obvious used an open source implementation of a generative adversarial network and trained it on 15,000 portrait images from various periods. Naturally, the question arises if generative neural networks can be successful not only in creating art, but in creating new designs for media, architecture and engineering.
It is pretty obvious that creating “Edmond de Belamy” is much simpler that designing a building or an engineering part. However, the basic approach sounds feasible even for more complex tasks: Train a neural network with many old design variants along with the associated requirements, then let the network generate a design from new requirements. In order to tolerate unsuitable designs by the algorithm, an AI-assisted Design system could create many variants and let the engineer select the best one.
Because this approach seems pretty straight forward and because AI-based methods received a lot of attention, the term AI-assisted Design quickly caught on in the engineering community. However, there is currently no clear definition of the term. In particular, it is often used for processes and algorithms that do not use machine learning at all. In this blog post, we try to sort out the relevant methods in order to attempt a definition for AI-assisted design.
The beginnings of algorithmic design: Topology optimization
Engineers have used computer-based methods to generate designs from scratch well before the advent of deep learning. Starting in the late 1990s, companies like FE-Design (now part of Dassault Systèmes) and Altair pioneered an optimization technique that was able to automatically create a new shape from a given design space along with functional variables (e.g. loads, constraints). In order to achieve this, the behavior of the part is simulated repeatedly and its topology is changed with a suitable optimization technique such as gradient-based methods or bionic algorithms. While the approach is most commonly used in conjunction with a FEA-based stress analysis, it can also be applied to CFD simulations as well as modal or thermal analyses.
The result of the topology optimization is typically a bionical lattice structure with a discrete checkerboard-like appearance. In this form, such an optimized part is neither editable in a CAD system nor manufacturable. This is why topology optimization was initially only used to inspire engineers to come up with new designs. In the subsequent design process, the engineer would adopt some features of the optimized geometry while also taking into account other design objectives such as aesthetics or manufacturability. In this stage, topology optimization was thus a tool used in the conceptual phase of the design process.
From topology optimization to generative design
Designs that were inspired by topology optimization quickly became popular especially in lightweight applications. However, the process of deriving the final design involved a lot of manual effort by the design engineer. That’s why current software tools use topology optimization as part of a larger generative design process. Within this process, engineers first establish a definition of their design intent in terms of goals and constraints. New design variants are then generated using topology optimization along with some post-processing such as smoothing. The engineer can compare these suggestions and can select the most promising designs. In a final step, these variants are converted into manufacturable and editable designs and the best one is chosen.
While the concrete steps may vary, there are many different software tools available that support similar types of generative design workflows. The crucial steps that set these workflows apart from vanilla topology optimization are functionalities that ensure the manufacturability of the design. For additive manufacturing, this step can be as simple as applying a smoothing filter in a post-processing step. However, typically this involves much more complex design changes. It is quite difficult to establish manufacturability criteria or even enforce manufacturability of a general design. This is still an active area of research and a promising application for machine learning.
In practice, it is often not apparent which algorithms are used behind the scenes in the commercially available generative design software. In particular, it is typically unclear if any data-driven or machine learning based methods are used at all. Nonetheless, these solutions are often touted as AI-assisted design workflows. In reality, this always means that the approach is based on topology optimization and it sometimes means that the workflow is supported by machine learning.
It is a rather straightforward routine work for experienced engineers to assess the manufacturability of a design. However, it is difficult to encode the necessary geometric intuition into rule-based software. In contrast, machine learning can capture this implicit knowledge. Many considerations in the design process rely on this kind of geometric intuition: Is the design aesthetically pleasing? Does the placement of ribs or beads make sense? Is the part easy to grip for a robot? Will the tooling for the part be complicated? These questions arise not only for parts that are created within generative design workflows, but also within classical design processes.
Machine learning tools that are able to automatically assess such criteria can both speed up the design process and boost quality. They can in particular provide a safety net that checks the design and highlights potential errors for the engineer. Along with the warning, the ML-based system can also display similar examples from past designs. Based on this information, the engineer can then choose to ignore the findings or to correct the design. Through the feedback from the engineer, the system can continuously learn to identify not only general design patterns, but also application-specific design guidelines.
The role of generative AI
We have seen that current generative design workflows in engineering work very differently from the process that was used to create “Edmond de Belamy”. In engineering applications, new designs are discovered by relying on physics-based models and optimization techniques. Machine learning is not used to create new concepts, but rather to infuse general knowledge about manufacturability or other design objectives. In contrast, “Edmond de Belamy” was created in a purely data-driven way. This begs the question, if such kind of generative AI will play a role in engineering in the near future.
Although generative ML methods such as Generative Adversarial Networks already achieve impressive results in image, audio and video applications, they do not yet work well for 3D engineering designs. However, given the tremendous progress over the last years it is highly likely that this will change in the near future. This means, that machine learning will not only be able to detect possible errors in a design, it might also generate proposals for a correct design. Such kinds of algorithms can probably be trained on the very same data that is collected when using ML as an error checking tool.
Even under the assumption that the current generation of generative ML methods will continue to improve very quickly, purely data-driven approaches will probably be limited by two factors. First, such a process will only be able to essentially generate sophisticated variants of known designs. Second, these algorithms work by learning distributions and correlations within historic data, they are not directly capturing design intend and causal relationships. This means that while these algorithms will be able to fix small design errors by comparing local patterns over a comparable set of historic designs, they will not be able to globally adapt a design to fundamental changes to the requirements.
In order to overcome these two limitations, the continuous evolution of currently available ML methods will most likely not be enough. Instead, general breakthroughs in AI technology will be necessary. In the meantime (and probably for a very long time), the current approach makes absolute sense: Human engineers use their contextual knowledge and imagination to create a basic design layout. Physics-based methods are used to generate optimized part shapes within this layout. ML-based methods provide manufacturing constraints and quality control for automated workflows.
Can AI inspire engineers to create better designs?
We have seen in our previous blog post, that quality checking and small task automation are great use cases for ML-based methods. In this post, we established that the creative nature of generative design workflows is based on topology optimization and not on generative ML algorithms. This begs the questions, if AI technology is at all useful as a creativity tool for engineers.
I would answer this with a resounding “yes”. While it is true that the technology can only detect similarities and correlations and not causal relationships, these functionalities can be very powerful when combined with human knowledge, intuition and imagination. In a future blog post, we will detail how ML-based similarity measures enable engineers to generate new ideas based on historic design as well as experimental data.
|
OPCFW_CODE
|
- A Rich Mix of PHP Features
- What Is PHP? What Does It Do?
- A Tour of the Main PHP Features in Dreamweaver CS5
- What You Have Learned
What Is PHP? What Does It Do?
PHP is a server-side technology that builds web pages dynamically. Let’s say you have a product catalog. Instead of updating your web pages each time a product is added or removed, you can store the details in a database and use PHP to query the database and build the page automatically. Nor do you need to create a separate page for each product. Just build one page, and PHP fills in the details. Other uses of PHP include creating login systems, uploading files, and sending emails. Just about every online store, news website, blog, or social networking site uses PHP or a similar server-side technology.
<p>The time in London is <?php $now = new DateTime(); $now->setTimezone(new DateTimeZone('Europe/London')); echo $now->format('g.i a'); ?></p> <p>In Los Angeles, it's <?php $now->setTimezone(new DateTimeZone('America/Los_Angeles')); echo $now->format('g.i a'); ?></p>
The PHP code is embedded in a couple of HTML paragraphs between <?php and ?> tags. Even if you don’t understand how it works, you can probably guess that this code has something to do with dates and time zones. In fact, it displays the current time in London and Los Angeles. It doesn’t matter where you are or when you load the page, as long as the server clock is set correctly, you will always get the correct time in those two cities. If you right-click to view the source code in a browser, you see only the HTML output. All the processing is done on the web server.
In this example, the PHP code does all the work itself. But in many cases, PHP acts as an intermediary to a database. The following diagram outlines the basic process.
This is what happens when a browser requests a web page that uses PHP:
- The web server sends the page to the PHP engine—which resides on the server— for processing.
- If the PHP script doesn’t need to communicate with the database, the PHP engine generates the HTML output, and the web server sends it back to the browser.
- If the script needs to interact with the database—getting the results of a search, or inserting or updating data—the PHP engine communicates with the database server. When the results come back, the PHP engine puts everything together, and the web server sends the resulting web page back to the browser.
A lot goes on, but most requests take only a few microseconds, and then the web page is on its way to the browser with no perceptible delay from the user’s point of view.
Why choose PHP?
PHP isn’t the only server-side technology available. In fact, Dreamweaver has varying degrees of support for seven. Most have similar capabilities, and choosing which is the most suitable for your circumstances isn’t always easy. However, PHP has the following advantages:
- PHP runs on Windows, Mac OS X, and Linux. With only a few minor exceptions, code written on one operating system works on any of the others.
- It’s open source and free.
- It’s widely available.
- It’s relatively easy to learn.
- There’s a large community of active users, so help is rarely far away.
- It’s simple enough to incorporate into a small website, yet powerful enough to drive some of the busiest websites, including Facebook, Wikipedia, and Yahoo!
- In a survey of media executives by the Society of Digital Agencies (SoDA), nearly 50 percent said they regarded PHP as an important tool for their company in 2010. The figure for ASP.NET was 32 percent. Fewer than 10 percent said they regarded Ruby on Rails as important for their company.
- In the same survey, more than 50 percent said they would be hiring people with PHP skills in 2010. The only web-related skills in greater demand were Flash and ActionScript.
So are there any disadvantages in choosing PHP?
Comparing server-side technologies is difficult, but the main disadvantage of PHP is, paradoxically, that it’s easy to learn. Many people copy scripts from online tutorials without understanding the code, often leaving gaping security holes in their websites. PHP is as secure as any other server-side technology, and security-related bugs are usually dealt with very quickly. But just like the electricity in your house, it’s safe only insofar as it’s used and maintained correctly. Fortunately, it’s not difficult to write secure code, and there is emphasis on security throughout this book.
Which database should I choose?
More often than not, PHP is used in conjunction with MySQL, the most popular open-source database, which is fast, powerful, and well suited for use in websites. It’s the database that runs WordPress, Drupal, and Joomla! and is also used by high-traffic websites like Flickr, Facebook, and YouTube.
MySQL is currently owned by Oracle Corporation, one of the leading database software companies, but the Community Edition of MySQL is free. The functionality of the Community Edition is identical to the for-purchase Enterprise version. The only difference is that no support is offered with the free version. But that’s rarely a problem because of the active community willing to offer help online.
Like PHP, MySQL works on all the main operating systems, so you can develop on one system and later transfer your database to another. Also, most hosting companies offer PHP in combination with MySQL. For all these reasons, the combination of PHP and MySQL is used in this book.
PHP and MySQL have become so closely connected that many people think you can’t have one without the other. Unlike love and marriage in Frank Sinatra’s 1950s hit, you can have PHP without MySQL. PHP works with all the main database systems, including Microsoft SQL Server, Oracle, and PostgreSQL. You can easily adapt much of the code in later lessons to work with the database of your choice.
|
OPCFW_CODE
|
Kyverno admission hooks failed while following quick start guide.
Checks
[X] I have searched the existing issues.
[X] This issue is NOT security related. (Please disclose responsibly)
[X] This issue is NOT specific to the CLI. (Please open a CLI issue)
deployKF Version
0.1.5
Kubernetes Distribution
v1.26.6+k3s1
Kubernetes Version
Client Version: version.Info{Major:"1", Minor:"27", GitVersion:"v1.27.2", GitCommit:"7f6f68fdabc4df88cfea2dcf9a19b2b830f1e647", GitTreeState:"clean", BuildDate:"2023-05-17T14:20:07Z", Go
Version:"go1.20.4", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v5.0.1
Server Version: version.Info{Major:"1", Minor:"26", GitVersion:"v1.26.6+k3s1", GitCommit:"3b1919b0d55811707bd1168f0abf11cccc656c26", GitTreeState:"clean", BuildDate:"2023-06-26T17:51:14Z
", GoVersion:"go1.19.10", Compiler:"gc", Platform:"linux/amd64"}
Description
I am following the quick start guide with the Manifests Repo Mode. I installed the default kyverno version (helm chart version 3.0.1 and kyverno version 1.10.0). I have changed nothing in comparison with the quick start guide, but I encountered multiple kyverno admission webhooks failed with different policies, detailed logs are attached below. I reckon that adding permissions to the kyverno role/cluster role should fix my issues but since I am following the guides and I do not have many experiences with kyverno, I do no want to do so.
By the way, the installation procedure before the kyverno issues was easy and simple :)) I think deployKF really make the cumbersome kubeflow easier to install. Although I am new to deployKF and deployKF is also a young project itself, I think I would use it to integrate Kubeflow for my company. I would really appreciate any support from you guys.
Relevant Logs
# deploykf-mysql--restart-on-secret-updates
Sync operation to c26f3c011ba4429045df6ecaf26f34f7988d9217 failed: one or more objects failed to apply, reason: error when replacing "/dev/shm/130393359": admission webhook "validate-policy.kyverno.svc" denied the request: path: spec.rules[0].generate.clone..: failed to get the Group Version Resource for kind Secret,error when replacing "/dev/shm/1998112630": admission webhook "validate-policy.kyverno.svc" denied the request: path: spec.rules[0].generate.clone..: failed to get the Group Version Resource for kind Secret
# deploykf-auth--clone-minio-openid-secret
error when replacing "/dev/shm/1998112630": admission webhook "validate-policy.kyverno.svc" denied the request: path: spec.rules[0].generate.clone..: failed to get the Group Version Resource for kind Secret
# deploykf-auth--clone-argo-server-openid-secret
error when replacing "/dev/shm/130393359": admission webhook "validate-policy.kyverno.svc" denied the request: path: spec.rules[0].generate.clone..: failed to get the Group Version Resource for kind Secret
# deploykf-minio--restart-on-secret-updates
admission webhook "validate-policy.kyverno.svc" denied the request: path: spec.rules[0].mutate.targets.: auth check fails, require additional privileges, update the ClusterRole 'kyverno:background-controller:additional':failed to get the Group Version Resource for kind Deployment; failed to get the Group Version Resource for kind Deployment
deployKF Values (Optional)
No response
I also started receiving something that looks like https://github.com/kyverno/kyverno/issues/6287 in a cluster that's been operating for over a year.
Upgrading kyverno's helm chart version from 3.0.1 (default) to 3.0.9 fixed my issue.
@haiminh2001 be very careful about updating Kyverno, because I can confirm that all versions of Kyverno other than 1.10.0 will not work properly (secret will not be cloned, and automatic restarts will not happen when secrets are updated).
We are also talking about Kyverno in https://github.com/deployKF/deployKF/issues/180#issuecomment-2235258937.
My plan is to remove the dependency on Kyverno for cloning secrets, which should allow us to update to the latest version, because most of its problems are related to its generate type rules.
@thesuperzapper thank you for your reminder, I hope that changing only minor version should not cause any issue. From my experiences, kyverno and cert-manager integration is complicated when there is a pre-installed version in the cluster. They should be removed or expose some config to work with the pre-installed dependencies.
@haiminh2001 it actually does matter, you must use Kyverno 1.10.0 with deployKF 0.1.5.
Please see the dependency version matrix for more information about why every other version does not work.
@thesuperzapper yes I closed this Issue too soon. I rollbacked Kyverno to 1.10.0, the original issue came back. I found this log in the kyverno's admission controller pod:
webhooks/policy/validate "msg"="policy validation errors" "error"="path: spec.rules[0].generate.clone..: failed to get the Group Version Resource for kind Secret" "gvk"={"group":"kyverno.io","version":"v1","kind":"ClusterPolicy"} "gvr"={"group":"kyverno.io","version":"v1","resource":"clusterpolicies"} "name"="clone-profile-bucket-secret-d81b8cd1-team-1-prod" "namespace"="" "operation"="UPDATE" "uid"="3720e3c8-dc2c-4be3-84b5-9f77cc8a6254" "user"={"username":"system:serviceaccount:argo-cd:argocd-application-controller","uid":"6262ad31-dada-4a7c-9740-df93f1c558df","groups":["system:serviceaccounts","system:serviceaccounts:argo-cd","system:authenticated"],"extra":{"authentication.kubernetes.io/pod-name":["argo-cd-argocd-application-controller-0"],"authentication.kubernetes.io/pod-uid":["68c7c668-3a26-47e2-a7c3-92db784b2ec6"]}}
Upgrading kyverno's helm chart version from 3.0.1 (default) to 3.0.9 fixed my issue.
This fixed the issue for me, altho I have to manually edit in the argoCD UI. Thanks @haiminh2001
Upgrading kyverno's helm chart version from 3.0.1 (default) to 3.0.9 fixed my issue.
This fixed the issue for me, altho I have to manually edit in the argoCD UI. Thanks @haiminh2001
Be careful, as @thesuperzapper said that you must use Kyverno 1.10.0 with deployKF 0.1.5. I ended up using that version now. The issue still exists but I can simply downscale kyverno's controllers to zero to bypass. This walk-around is ok to me since it is only needed the first time I deploy.
|
GITHUB_ARCHIVE
|
Course pages 2011–12
No. of lectures: 16
Prerequisite: (the last lecture of) Regular Languages and Finite Automata (Part IA)
This course is a prerequisite for Optimising Compilers (Part II).
This course aims to cover the main technologies associated with implementing programming languages, viz. lexical analysis, syntax analysis, type checking, run-time data organisation and code-generation.
- Survey of execution mechanisms. The spectrum of interpreters and compilers; compile-time and run-time. Structure of a simple compiler. Java virtual machine (JVM), JIT. Simple run-time structures (stacks). Structure of interpreters for result of each stage of compilation (tokens, tree, bytecode). [3 lectures]
- Lexical analysis and syntax analysis. Recall regular expressions and finite state machine acceptors. Lexical analysis: hand-written and machine-generated. Recall context-free grammars. Ambiguity, left- and right-associativity and operator precedence. Parsing algorithms: recursive descent and machine-generated. Abstract syntax tree; expressions, declarations and commands. [2 lectures]
- Simple type-checking. Type of an expression determined by type of subexpressions; inserting coercions. [1 lecture]
- Translation phase. Translation of expressions, commands and declarations. [1 lecture]
- Code generation. Typical machine codes. Code generation from intermediate code. Simple peephole optimisation. [1 lecture]
- Object modules, linkers and run-time system. Resolving external references. Static and dynamic linking. Malloc and system calls. [1 lecture]
- Non-local variable references. Lambda-calculus as prototype, Landin’s principle of correspondence. Problems with rec and class variables. Environments, function values are closures. Static and dynamic binding (scoping). [1 lecture]
- Machine implementation of a selection of interesting things. Free variable treatment, static and dynamic chains, ML free variables. Compilation as source-to-source simplification, e.g. closure conversion. Argument passing mechanisms. Objects and inheritance; implementation of methods. Labels, goto and exceptions. Dynamic and static typing, polymorphism. Storage allocation, garbage collection. [3 lectures]
- Parser Generators. A user-level view of Lex and Yacc. [1 lecture]
- Parsing theory and practice. Phrase Structured Grammars. Chomsky classification. LL(k) and LR(k) parsing. How tools like Yacc generate parsers, and their error messages. [2 lectures]
At the end of the course students should understand the overall structure of a compiler, and will know significant details of a number of important techniques commonly used. They will be aware of the way in which language features raise challenges for compiler builders.
* Appel, A. (1997). Modern compiler implementation in Java/C/ML (3 editions). Cambridge University Press.
Aho, A.V., Sethi, R. & Ullman, J.D. (2007). Compilers: principles, techniques and tools. Addison-Wesley (2nd ed.).
Bennett, J.P. (1990). Introduction to compiling techniques: a first course using ANSI C, LEX and YACC. McGraw-Hill.
Bornat, R. (1979). Understanding and writing compilers. Macmillan.
Fischer, C.N. & LeBlanc, J. Jr (1988). Crafting a compiler. Benjamin/Cummings.
Watson, D. (1989). High-level languages and their compilers. Addison-Wesley.
|
OPCFW_CODE
|
How to append cell values of a particular column in one excel sheet as new unique columns into another excel sheet
I have two excel sheets, Player_Records and Match_Records, with one common column named Match_ID in both.
The Player_Records sheet contain career records of multiple players. The Player_ID column identifies the player but not uniquely since there are multiple records (each record has details regarding the results of a particular match) for each player. The Match_ID column identifies the match but also not uniquely since many players have an entry for the same match.
Player_Records sheet
The Match_Records sheet contain records of every match played and the Match_ID column uniquely identifies each match.
Match_Records sheet
What I want to achieve:
Append each (unique) player from the Player_Records file as a column into the Match_Records file so that a record in the Match_Records file has a column for each player (regardless of whether they played in that match).
When this is done, the Match_Records file will have a column for each player and the cell value of those columns can be boolean to indicate whether that player was part of that particular match.
How can I do this?
Create an extra column with Match IDs and Player IDs toghether in the Player_Records sheet. Create the extra columns in the Match_Records sheet with the Player IDs. For each cells of those columns search if the combination of the Match ID of that row and the Player ID of that column is in the Match ID-Player ID column of the Player_Records sheet.
Thanks, I've been attempting this but I don't know how to do the last step, how do I perform a search like that?
Step 1: create a colum with Match IDs and Player IDs
Assuming that in sheet Player_Records the column G is avaiable, put in cell G1 this formula:
=F1 & " | " & D1
Then drag it to the end of the list. If you did it correctly, the cell G86 should return the value 914567 | ODI # 4072. Clearly it contains the player ID, the " | " marker and the match ID. Attention: if the marker is likely to appear in a player or match ID, use a different marker.
Step 2: create the players' columns
You could manually write all the players ID, but with many players that would be stupid. It's better to use some formulas. Assuming that in sheet Player_Records the column H is avaiable, put in cell H1 this formula:
=IF(F2=F1,"",F2)
Then drag it to the end of the list. It will give you a list with many blank-looking cells and the players ID reported once each. If all the players ID are just numbers and assuming that in sheet Match_Records the columns from G and left from that are avaiable, put in cell G1 this formula:
=SMALL(Player_Records!$H:$H,CELL("col",G1)-6)
Then drag it horizontally until you reach a column where the result given is a #NUM! error. Clear any #NUM! error and copy-paste the values of the rest of the cells you've just filled with the formula. At this point those cells contains the players IDs as values, so you can clear the column H in the Player_Records sheet because it's of no use now. If the players IDs are not all numbers, this method won't work (but there are others).
Step 3: search for Match ID-Player ID combination
In the sheet Match_Records in the cell G2 put this formula:
=IF(COUNTIF(Player_Records!$G:$G,G$1&" | "&$E2)>0,TRUE,FALSE)
Then drag it vertically and horizontally to the ends of the list. This formula basically check if in the G column of the Player_Records sheet (the column with player and match IDs combined) is any occurrence of the combination of the player ID of the given column and the match ID of the given row with the marker " | " in between them (if you have used a different marker, change the formula accordingly). If any occurence is present, the formula returns TRUE; otherwise it returns FALSE. You can change the outcome (for example with 1 and 0 or "played" and "") by editing the formula.
|
STACK_EXCHANGE
|
- published: 13 Nov 2017
- views: 4053758
What is a blockchain and how do they work? I'll explain why blockchains are so special in simple and plain English! 💰 Want to buy Bitcoin or Ethereum? Buy for $100 and get $10 free (through my affiliate link): https://www.coinbase.com/join/59284524822a3d0b19e11134 📚 Sources can be found on my website: https://www.savjee.be/videos/simply-explained/how-does-a-blockchain-work/ 🌍 Social Twitter: https://twitter.com/Savjee Facebook: https://facebook.com/Savjee Blog: https://savjee.be ❤️ Become a Simply Explained member: https://www.youtube.com/channel/UCnxrdFPXJMeHru_b4Q_vTPQ/join
As the Founder of Proof of Talent, I spend every day focused on the job market within the crypto industry. So today, let's discuss crypto jobs, where the majority of jobs are based, what type of jobs are out there and how you can find them. It's Crypto Jobs 101. If you're looking for a job in blockchain, you can reach out to me at anytime? Get started with Proof of Talent and apply for a crypto job with the link here - proofoftalent.co/careers #Crypto #CryptoJobs #cryptocurrency
#blockchain #ProofOfwork #PoW
An explanation of cryptographic proof-of-work protocols, which are used in various cryptographic applications and in bitcoin mining. More free lessons at: http://www.khanacademy.org/video?v=9V1bipPkCTU Video by Zulfikar Ramzan. Zulfikar Ramzan is a world-leading expert in computer security and cryptography and is currently the Chief Scientist at Sourcefire. He received his Ph.D. in computer science from MIT.
The math behind cryptocurrencies. Home page: https://www.3blue1brown.com/ Brought to you by you: http://3b1b.co/btc-thanks And by Protocol Labs: https://protocol.ai/join/ Some people have asked if this channel accepts contributions in cryptocurrency form. As a matter of fact, it does: http://3b1b.co/crypto 2^256 video: https://youtu.be/S9JGmA5_unY Music by Vincent Rubinetti: https://soundcloud.com/vincerubinetti/heartbeat Here are a few other resources I'd recommend: Original Bitcoin paper: https://bitcoin.org/bitcoin.pdf Block explorer: https://blockexplorer.com/ Blog post by Michael Nielsen: https://goo.gl/BW1RV3 (This is particularly good for understanding the details of what transactions look like, which is something this video did not cover) Video by CuriousInventor: https://...
Blockchain technology is moving into the recruiting and hiring space. The Japanese employment research services company Recruit Co through their weight behind Beam Development Limited. With Layah Heilpern __ Thank You For Watching This Video! 🎞 SUBSCRIBE to our channel for daily Breaking News in Crypto here -- https://www.youtube.com/channel/UCuDg... __ 🔹 Welcome to BloxLive.TV, the world's first crypto and blockchain news network streaming around the clock. We feature breaking news, market analysis and event coverage in this dynamic and revolutionary finance and technology marketplace. __ 🔹 BloxLive.TV begins where mainstream media ends. We broadcast this Revolution Livestream! __ 💻FOLLOW US HERE: ►Twitter: https://twitter.com/bloxlivetv ►Instagram: https://www.instagram.com...
Watch live: https://ivanontech.com/live
Cryptocurrencies use a ton of electricity because of mining. In recent years people started working on a different technique called Proof-of-Stake. Not only does it use less energy, it can also be more secure. (This video is a re-upload to fix a mistake I made in the previous one. Sorry about that!) 📚 Sources for this video: https://www.savjee.be/videos/simply-explained/proof-of-stake/ 🌍 Social Twitter: https://twitter.com/savjee Facebook: https://www.facebook.com/savjee ✏️ Check out my blog https://www.savjee.be
Register to the FREE mini-course "become a blockchain developer" on eattheblocks.com to learn how to get a remote blockchain job making 100k. (Like I did myself). If you are like me, you HATE sending cvs. Wouldn't it be great if you could get AWESOME job opportunities in the Blockchain industry WITHOUT applying to any job? Well, in this video I am gonna teach you just that. You are going to learn how to make Blockchain jobs come to you! So if you are looking for a job as a Blockchain developer but hate sending CVs, make sure to watch this video to learn all my secrets :) Other Links: - Forum: https://forum.eattheblocks.com - Blog: https://eattheblocks.com - Author Website: https://www.julienklepatch.com
We live in uncertain times, and for many this may be an opportunity to learn new skills in pursuit of a new career. The blockchain is where it's at for hot job opportunities, and big companies are actively seeking programmers to fill these positions. Robert Koenig shares tips on how to ready yourself, and how to find work in blockchain technology. The Most In-Demand Hard and Soft Skills of 2020 (LinkedIn Talent Blog) https://business.linkedin.com/talent-solutions/blog/trends-and-research/2020/most-in-demand-hard-and-soft-skills Check Your Skills https://www.hackerrank.com Learn to Code Blockchain - Choose a step-by-step programming course on: https://www.bitdegree.org https://www.udemy.com https://www.dappuniversity.com https://academy.ivanontech.com Job Search Resources https://crypto...
Blockchain - Transactions Watch more videos at https://www.tutorialspoint.com/videotutorials/index.htm Lecture By: Mr. Parth Joshi, Tutorials Point India Private Limited
How transactions are verified in Bitcoin Blockchain - Longest chain rule explained Watch our earlier Blockchain videos Blockchain Simplified: https://www.youtube.com/watch?v=LWAYveDotb0&t=1s Blockchain Technology explained: https://www.youtube.com/watch?v=gVLIentRlIk&t=1s Have you wondered how transaction get approved in a Bitcoin block chain? Why you need to wait for 3 to 6 confirmations for the transactions to appear in your wallet. This video attempts to explain how a Bitcoin transaction is approved and the process behind it. Let’s assume Mr. Gobish transferred Bitcoin worth 100$ from one crypto exchange to another. Once he does the transfer, the transaction message is sent to the network and passed around all the network participants which are also called nodes. This is added to the t...
Radio Free Europe | 03 Jul 2020
WorldNews.com | 03 Jul 2020
Yahoo Daily News | 04 Jul 2020
International Business Times | 04 Jul 2020
Manila Standard Today | 04 Jul 2020
CNN | 04 Jul 2020
|
OPCFW_CODE
|
A sportsbook is a type of gambling establishment that allows you to place wagers on a variety of sporting events. These establishments are typically licensed by the state in which they operate and can also be accessed online. Many of these establishments also offer a wide range of payment options, such as credit cards, E-wallets, and checks.
The Best Line
One of the first things you should do when choosing a sportsbook is research the odds and lines of various games. This will help you determine which bets to make and how much money to wager. It’s also important to check out the customer support and deposit/withdrawal options at different sportsbooks.
The odds of a sporting event can be determined using several different formulas, but the most common is known as the best line. This figure will be determined by taking the best price that a bookie has on either side of a matchup, and then comparing it against the payouts at the sportsbook.
Another method to find the best line is to compare the payouts at the sportsbook with those of other sportsbooks that offer similar odds. This will allow you to pick the best bet that has a high chance of winning, while keeping your bankroll within your budget.
Some sportsbooks also offer a number of special bonuses and promotions that you can take advantage of to increase your winnings. These can include free bets, cashback deals, and more. The best sportsbooks offer these incentives to attract new customers and keep their existing ones coming back.
In addition, some sportsbooks also offer bonuses for accumulator bets and parlays. These bonuses can range from a fixed amount to a percentage of your winnings, and can be used to boost your earnings.
A good sportsbook will have a friendly, knowledgeable staff that can answer your questions and help you navigate their site. They should be available around the clock to respond to your needs and concerns. They should also be able to answer your queries quickly and accurately.
The Props Odds Screen
A sportsbook offers a wide variety of props on every game. These are bets that aren’t part of the traditional betting line and can provide a significant advantage over other bettors. However, some sportsbooks don’t price these props properly and can result in a loss for a bettor.
There are hundreds of prop bets offered by U.S. sportsbooks, including bets on player performance, injuries, and even special teams. These can be a great way to make a profit, but they can also result in a large loss for a bettor who doesn’t know how to handle them.
The best way to bet on props is to create a statistical model for each prop and compare it against the betting lines at different sportsbooks. You can also use a simulation to generate a median result for each prop. This will allow you to easily compare the average and median against the betting lines at various sportsbooks.
|
OPCFW_CODE
|
Methods marked with 'ForwardRef' are now always replaced by the matching methods
Otherwise, it would depend on the order of the assemblies passed to the repacker.
using System;
using System.Runtime.CompilerServices;
public static class Program
{
[MethodImpl(MethodImplOptions.ForwardRef)]
public static extern int GetNumber();
static void Main(string[] args)
{
Console.WriteLine("The number is " + GetNumber() + ".");
}
}
module Program
open System
let GetNumber() = 2
These two programs' merged assembly would depend on the order, GetNumber would either be marked as ForwardRef or would return 2, but ForwardRef is usually used for merging (eg. with the ILSupport extension).
The ForwardRef one will now always be replaced by the other one (hence the title).
Also, I've split up the 'ILRepack.cs' file, but everything is still in the same class (partial classes).
Is there any plan to continue with this PR? - It would be useful for the work I'm doing and I'd rather not fork and reimplement if necessary
@mondieu I haven't touched this in a while, because it worked. il-repack developement obviously continued, and it'll be a bit hard to update this branch with every change that happened during these two years.
According to GitHub's UI, it now has merge conflicts, but it doesn't let me see what the conflicts actually are.
@mondieu what would be your use case, where do you get your uses of ForwardRef from ?
If I try to rebase this PR, I'd like to be able to test it :)
@gluck - Hi again, and sorry for the radio silence, went on holiday and have been a little distracted otherwise:) - So here's my use case - I have an assembly that I'd like to write snippets of in IL to access features like generic ptr's and the like, which I'd then like to merge into the main assembly. Currently, because of the order of operations, I must instead merge the main assembly into the asm version so that methods are correctly overwritten (note that I'm willing to be wrong and be missing a flag or otherwise to make this happen) but here's the general idea..
class SomeAssembly
{
unsafe public T this[int index]
{
// This method will be written in optimized IL / use features not available to the C# compiler - not sure how well this will work on an indexer, but it's where I need to use it currently ..
[MethodImplAttribute(MethodImplOptions.ForwardRef)]
get {return new T();}
}
}
SomeAssembly.il then contains the implementation, which I'd like to be able to merge it into the main assembly rather than the other way round .. I'm quite happy to accept a custom attribute for this, but the general idea is that the main assembly should have it's methods overwritten by the others.
I get it, alternatives like ILSupport or around roslyn would probably be a better fit than ILRepack for that.
But the change (once cleaned up a bit) sounds fairly small, so I'll try to embed it at some point.
The problem with those is:
For ILSupport:
It is no longer in active development (last commit happened one year ago).
Requires a few hacks to work with a newer .NET SDK (or more than a few when using mono).
The syntax highlighting thing doesn't cover everything (nor does a vim script that does the same on Unices).
For Roslyn:
The official proposal isn't implemented yet (let alone shipped).
Only works on Windows as far as I know.
Then again, not many people really know IL, as can be seen in:
ILSupport and the vim script (idem, but as it is quite old, it wouldn't be that surprising). Aside from being seemingly dead, they miss some instructions and other keywords (such as .hash algorithm and the special syntax for versions).
The CIL backend of the Idris compiler (and the language-cil library it uses) are missing some instructions (eg. those dealing with TypedReferences).
Even the Prism Code Injector I wrote a year ago is wrong about the stack usage of some instructions. (I'm planning to update this today, because I might reuse the code later.)
This ought to be a bit better... (Sorry for all the complaining here.)
I apologize, the changes in this PR are too invasive (it extracts most of the file into another file, destroying diffs with forks). If this is still relevant, feel free to send a new PR that is minimal.
There are a lot of forks of ILRepack, and I'm trying to keep the changes minimal to make it easy for maintainers to diff and update. This is why I'm not doing refactorings, whitespace cleanup, formatting and other changes.
|
GITHUB_ARCHIVE
|
This tutorial will demonstrate how to configure Suricata in Network IPS on your network. Therefore, we want to use Suricata to protect other machines or servers within the network.
There are several ways to use Suricata to provide protection within the network. In this case, we will use a rule that prevents unwanted access to other TCP ports that have not been released in Suricata.
The rules used in this tutorial were the rules used in : Part 2: Install Suricata on Linux
This tutorial is part of the Suricata Linux Course.
Link to the Suricata course on YouTube:
In this scenario, we are going to use Suricata between two networks. We have the external network containing a WEB server and the internal network with a computer acting as a client. Thus, using this scenario, we assume that we want to create specific rules for accessing the WEB server. In this case, we will use Suricata to only allow HTTP traffic to the WEB server.
Consequently, Suricata will discard and log all traffic to the non-HTTP WEB server.
- Proposed scenario
- Change our suricata rule
- Configuring the suricata machine as a router
- Testing the lock
- Checking logs on suricata.
- Topology in VirtualBox
- See more:
Change our suricata rule
We need to change the rule we created earlier to ensure that Suricata blocks any TCP traffic that is not destined for port 80(HTTP). To do this, we will use the command below to create and edit the file “redes.rules ”.
sudo nano /etc/suricata/rules/redes.rules
Next, we will modify the existing rule by removing the “$HOME_NET “ and inserting the “any”. This way, we will discard TCP packets that are not to port 80 on any network.
drop tcp any any -> any !80 (msg:"TCP Scan ?"; flow:from_client;flags:S; sid:992002087;rev:1;)
After that we will save.
Next we will reset the suricata rules with the command below.
sudo kill -USR2 $(pidof suricata)
Configuring the suricata machine as a router
This tutorial will configure the Suricata machine to act as a router between the internal network “192.168.11.0/24” and the external network “203.0.113.0/24”. In addition to acting as a router, we will create a NAT rule that translates the IPs of the internal network “192.168.11.0/24” into an IP of the external network.
Thus, we create a scenario similar to inserting the Suricata between the Internet and your internal network. Additionally, this type of scenario could be used between two networks of your company.
Creating the forwarding permission
Now we need to enable forwarding on the Suricata machine. For this, we will insert a line in the “/etc/sysctl.conf” file. So, let’s use the command below to insert the line by command:
sudo nano /etc/sysctl.conf
Next we will add the line at the end of the file:
net.ipv4.ip_forward = 1
You can tell me: Juliana this line is already in the file. The answer is yes. However, on most systems this line will be commented out, especially if you have never configured the server to forward packets. If you wish, you can uncomment the line instead of inserting the line “net.ipv4.ip_forward = 1”.
Now let’s reload the file using the command :
sudo sysctl -p
Checking the interface connecting the external network
Before configuring NAT, we need to identify which is our network interface that connects our external network. That is because the client’s connections within the internal network will be routed through this interface that connects to our external network. So, let us use the command below to identify the interfaces and networks.
We can see in the figure above that our external network is “enp0s3”. That’s because this interface “enp0s3” is associated with the network that we defined as external “203.0.113.0/24”.
Creating a NAT for the internal network
Let’s edit the file that pre-reads the firewall rules. In this file, we will perform two procedures. The first will be to perform a NAT to the internal network, and the second will be to forward the traffic to the NFQUEUE. For this, we will edit the file “/etc/ufw/before.rules”. So, let’s use the command below:
sudo nano /etc/ufw/before.rules
And let’s insert the lines below.
*nat :POSTROUTING ACCEPT [0:0] -A POSTROUTING -s 192.168.11.0/24 -o enp0s3 -j MASQUERADE COMMIT
Thus, these lines will allow the translation from the internal network to the external network to be done. However, it is worth noting that in configuring these lines, we must introduce our network interface used in the external network. Thus, in our case, the interface for the external network is “enp0s3” and the network used in our internal network is “192.168.11.0/24”.
The figure above maps the internal network “192.168.11.0/24” to go out through the external interface “enp0s3”.
Sending routing traffic to NFQUEUE
Unlike what we did in the previous post, now we will configure the Suricata to work as a network IPS. Thus, we need to create rules in the firewall to send the traffic that will be routed to the Suricata NFQUEUE. Let’s change the UFW’s “before.rules” file to do this change.
-I FORWARD -j NFQUEUE
In the figure above, we commented on the previous rules “#-I INPUT -j NFQUEUE” and “#-I INPUT -j NFQUEUE”. Therefore, we commented these lines because we are no longer interested in filtering the traffic that comes to the Suricata machine but the traffic that the Suricata forwards. However, if we want to filter traffic directed to the Suricata machine, we can uncomment the INPUT and OUTPUT lines.
Below is the file “before.rules”.
# # rules.before # # Rules that should be run before the ufw command line added rules. custom # rules should be added to one of these chains: # ufw-before-input # ufw-before-output # ufw-before-forward *nat :POSTROUTING ACCEPT [0:0] -A POSTROUTING -s 192.168.11.0/24 -o enp0s3 -j MASQUERADE COMMIT # Don't delete these required lines, otherwise there will be errors *filter :ufw-before-input - [0:0] :ufw-before-output - [0:0] :ufw-before-forward - [0:0] :ufw-not-local - [0:0] # End required lines ###suricata NFQUEUE #-I INPUT -j NFQUEUE #-I OUTPUT -j NFQUEUE -I FORWARD -j NFQUEUE ### End Suricata NFQUEUE rules # don't delete the 'COMMIT' line or these rules won't be processed COMMIT
NOTE. If you choose to use another firewall, remember to redirect forwarding traffic to the Suricata NFQUEUE
Now let’s restart the UFW firewall using the commands below.
sudo ufw disable
sudo ufw enable
Testing the lock
Now let us open two ports on the external network machine with an apache WEB server. For this, we are going to use the APACHE2 server and the SSH server. Port 80 is used on WEB servers for the HTTP protocol, and port 22 is used for the SSH protocol.
In this case, we want Suricata to block access to the SSH server as the only traffic that will not be dropped will be traffic to the WEB server on port 80.
We can use the commands below to check if the WEB server and SSH protocol ports are open.
sudo netstat -anp | grep :80
sudo netstat -anp | grep :22
Now, let’s access the WEB server using a browser and typing the IP of the WEB server “203.0.113.10” on the client machine.
Next, let us try to access the SSH server installed on the machine with the WEB server. For this, we can use the command below:
In the figure below, we can see that we did not have access to the SSH server. Therefore, Suricata has blocked access to all TCP ports other than port 80.
Checking logs on suricata.
Now let’s go to the suricata machine and open the log file. For this, we will use the command below:
We can see the result of the lock log in the figure below. In this case, we can verify that the IP of the client “192.168.11.22” tried to access port 22 of the IP of the WEB server “203.0.113.10”.
Topology in VirtualBox
In the figures below, we will demonstrate our scenario used to create the Suricata experiment using VirtualBox. That way, we can create the same scenario for studies and proof of concept.
The figure below demonstrates the network configuration of the virtual machine hosting Suricata. Thus, we use the Adaptpter 1 interface to belong to the external network “External_Network”.
The figure below demonstrates the other network configuration of the virtual machine hosting Suricata. Thus, we use the Adaptpter 2 interface to belong to the external network “Internal_Network”.
The figure below demonstrates the network configuration of the virtual machine hosting the internal network client. Thus, we use the Adaptpter 1 interface to belong to the external network “Internal_Network”.
The figure below demonstrates the network configuration of the virtual machine that hosts the WEB server. Thus, we use the Adaptpter 1 interface to belong to the external network “External_Network”.
Are you enjoying the Suricata course? Comment on the YouTube channel, and feel free to make suggestions.
This tutorial is part of the Suricata Linux Course.
Part 1: Install Suricata on Linux
Part 2: Suricata Host IPS
|
OPCFW_CODE
|
The total combined usage of Insights features are billed in a virtual currency called "Insights Credits". Insight Credits are derived from conversion rates for usage of each Insight services.
The Standard and Premium pricing plans include a number of free monthly Insights credits. If usage exceeds the number of free credits, any additional credits are billed at the rate specified on your plan. The Pay-as-you-go pricing plan does not include free credits, so each credit is billed at the rate specified in the plan. For more information on the number of free credits and additional credit rates included in each plan, see PlayFab pricing.
Each title is set to a chosen performance level that is billed to the minute (Insights Credits Per Hour / 60). The cost in credits for each performance level is located at the bottom of the table on the Insights Management page:
All MAU-based pricing plans will be converted to a usage-based pricing plan on 11/01/2020. As part of the transition, a small number of titles will be placed on a paid Insights Performance level to maintain their current functionality. However, if a title would like to reduce the performance level (and thus cost) while also accepting a decrease in performance, the performance level can be adjusted on the Insights Management page in Game Manager using the above table.
Data retention is billed in Terabyte months. Every day the end of day storage size is recorded and averaged over the month. Each terabyte is billed at 50 credits per terabyte month. For example, if a studio stored 1 terabyte of data every day for a month, the total would be 1 TB/Month * 50 Credits = 50 Credits.
Below is an example scenario illustrating how one month of Insights billing is calculated for a studio:
In April, a studio used performance level 4 from 9AM to 5PM each weekday and scaled down to performance level 2 at nights and on weekends. April has 21 weekdays and 9 weekend days, which works out to:
- 21 days at performance level 4, for 8 hours
- 21 days at performance level 2, for 16 hours
- 9 days at performance level 2, for 24 hours
The credit rate per hour for each of these levels is:
- performance level 2 = 1 credits/hour
- performance level 4 = 4 credits/hour
The total monthly charge for Insights Performance is calculated as:
21 (days) * 8 (hours) * 4 (credits per hour at performance level 4) + 21 (days) * 16 (hours) * 1 (credits per hour at performance level 2) + 9 (days) * 24 (hours) * 1 (credits per hour at performance level 2) = 1,224 credits for the month.
Starting the month with no stored data, the studio added 1 TB of data every other day for 30 days. At the end of the month they averaged 15 TBs of data over the month. The total monthly charge for data retention is calculated as:
15 TB * 50 Credits per TB/month = 750 Credits for the month.
Thus, the studio's total monthly cost for Insights performance and data retention is 1,974 credits.
Row Write Overages
Each Insights Performance level has an allotted number of writes per second to the data cluster. For customers not in development mode, going beyond the number of row writes for your performance level triggers row write overages instead of event throttling. Row write overages are charged at 1 credit per 2.7 million (2,700,000) rows written. This pricing is designed to allow for specific ingestion scenarios where it may be more cost effective to have row write overages instead of raising the Insights performance level.
|
OPCFW_CODE
|
Separate names with a comma.
Discussion in 'Tech & Support' started by Malice, May 30, 2006.
Who is going to buy this once it comes out?
It's always better to buy the Windows operating system a couple of years after it comes out, so the problems can be rectified.
Once it comes out? No.... im going to leave it for a year or so, so they can work out all those kinks they WILL have
Edit : Dam you Jonty!
I will probably buy it a few months after it comes out, if not shortly after it comes out.
So I can learn it, tweak it, ect..
That is what Service Packs are for, to fix issues....
Yeah those wont help with a blue screen of death.
Remember this moment and do not challenge me again.
So Mr Malice what features are you most looking forward to from Vista? I would say easy access to my files and what not (even though i kind of have that now) plus the whole media centre side of it
I'll wait and see how well it takes off first. I may download it though and install it on my POS computer and see how it works on it.
Heh, that is why you build custom install CD's
I might buy it. IF it actually comes out.
I already have an operating system that does all the things Vista claims it's going to be able to do and more, and I don't have to buy new hardware to run it, either. Why bother with Vista?
"If you say Linux, you lose a testicle"
I ain't sayin' LINUX.
Os10? bloody macs
hah...well I am a windows admin, so I will need to know the OS...
GOtta get familiar with it...My laptop should suffice...its not even a year old yet.
You make a living off of it, so I can understand your need to learn Vista. Anyone else is just a masochist.
No way could I ever justify the price for something that really isn't going to change my computer experience that much.
I remeber stepping from 3.1 to 95, that was mind blowing
As some of you know, I'm in the process of building my computer. I'm going to load it up with the latest windows XP and in about 3 years, then Vista.
How much will vista be? because it would be aload on your cost anyway if you didn`t wait 3 years (or more!) Xp pro is only just coming down in price
I want Aero Glass, dammit!
There are going to be anywhere from 3 to 6 variations of Vista. The really cheap version will not have the neat Vista options, but will only cost a couple of hundred dollars. The top business version will probably be similarly priced to the current top business version of XP.
|
OPCFW_CODE
|
Getting to Know Microsoft StaffHub
What It Is:
Having worked in the project and portfolio management space for the first decade of my career, I sometimes coordinated with field personnel on different projects. For example, back during my time with Pacific Bell, I worked with an internal marketing team on a project to expand our GIS data for service teams trying to repair phone lines, and coordinated the time and activities of those line workers participating in the testing of the new data and related tools. I was responsible for schedules, and it was a nightmare without any centralized tracking tools. It was almost entirely a manual effort to track and manage the project. when Microsoft announced StaffHub, that’s where my mind went — to those people out in the field, most of the time without connectivity much less the tools to report back in and collaborate with the folks at headquarters.
When you think about collaboration, do you take into consideration all of the roles within your organization, and not just those with 24×7 connectivity and the ability to attend a meeting in the conference room down the hall if requested? Thankfully, Microsoft is thinking about these extended scenarios — and how the broader team benefits when all team members can actively participate in collaboration.
If you’ve not yet explored what is included in StaffHub, here is the information you need to get grounded:
There’s a short description on Staffhub.office.com:
Microsoft StaffHub is a cloud-based platform that works across all your devices. It enables firstline workers and their managers to manage time, communicate with their teams, and share content. [Read More]
But a more in-depth description on the announcement on Blogs.office.com:
There are an estimated 500-million frontline staff workers around the world in retail stores, hotels, restaurants, manufacturing and other service-related industries. These employees typically don’t have their own office, desk or computer—making it hard to access and share information important for the workday. Far too often, these workers rely on manual processes and outdated tools—cumbersome creation and printing of paper schedules, bulletin boards crammed with notes and a flurry of phone calls and text messages to cover shifts. Microsoft StaffHub is here to help.
We’re pleased to announce the worldwide general availability of Microsoft StaffHub—a new application for Office 365 designed to help staff workers manage their workday—with schedule management, information sharing and the ability to connect to other work-related apps and resources. [Read More]
The solution also comes with optional access to Microsoft StaffHub, an easy employee shift-scheduling app. StaffHub was announced as part of F1, one of Microsoft 365’s other versions for first-line workers. SMBs can also connect a couple other existing apps to their Microsoft Business Center dashboard: a simple Microsoft Bookings tool for managing online appointments, the MileIQ mileage tracking app that monitors a user’s travel for tax reimbursements, and Outlook Customer Manager, a basic CRM tool built right into Microsoft’s email client. [Read More]
Microsoft today unveiled the newest addition to its Office 365 suite with the debut of an application for shift workers and management, called StaffHub. The program is aimed at those who don’t tend to work from desktop computers and have different schedules from week to week, such as in retail, hospitality, restaurants and other industries.
The program was originally introduced in “preview” last fall… [Read More]
Microsoft StaffHub is one of the latest solutions available from Microsoft which can completely transform how you manage firstline workers. If you answer yes to any of the below questions then this blog is for you:
- Do you want to understand more about Microsoft StaffHub?
- Do you manage teams of frontline workers?
- Are you a frontline worker who is fed up of the lack of communication in your company?
- Do you work shifts in a factory or warehouse?
- Does you manager work in a different location to you?… [Read More]
Managers use the StaffHub web app to create schedules, manage requests for shift swaps or time off, and share information with the team. Employees use the mobile app to view their shifts, submit shift and time-off requests, and communicate with the team. [Read More]
On mobile or web, it’s the one-stop app for scheduling, sharing, and communicating. Microsoft StaffHub gives everyone the freedom to manage work. And get on with life. [Read More]
Microsoft StaffHub is a purpose-built app for the Firstline Workforce in Office 365. It is specifically designed to deliver the capabilities, tools and information that Firstline Workers need to work effectively and perform at their peak. This service combines scheduling, task management, documents, people and tools in one secure place – with the ability to connect to other work-related apps and resources. [Read More]
Identity and access management: Quickly create and assign a digital identity with just a phone number. Easily add and remove team members in the application and maintain the enterprise security and IT management control that you expect with Office 365.
“It has changed completely the relationship we have between management and the staff, and it helps us concentrate on what is important.” Dominique Grandjonc, General Manager, Novotel, Paris [Read More]
- Sign in to StaffHub at https://staffhub.office.com/with your Office 365 work account, such as email@example.com.
The first time you sign in, the setup wizard will start. If you have already created a team and want to create a new one, go to https://staffhub.office.com/app/setup.
- Follow the steps in wizard to add the names of everyone you want to schedule… [Read More]
StaffHub Flow Templates: Automated workflows between your favorite apps and services to get notifications, synchronized files, collected data and more. [Read More]
Introduction: StaffHub is a remarkable new tool from Microsoft that enables organizations to effectively manage their firstline workers and enable them to have a collaborative experience to achieve more in their jobs. A firstline worker can be a nurse, barista, retail worker, manufacturing worker, hospitality worker, service technician, etc. The purpose of this blog post is to walk you through initial setup and configuration of StaffHub in your Office 365 tenant to get started using the service. For a great overview of StaffHub, I recommend watching the below video: [Read More]
When a new team member is added, StaffHub (if configured to do so) will automatically create a new user account and add them to a Security Group called Deskless Workers in Azure Active Directory. However, the user will still need to be assigned a license in order to start using StaffHub. [Read More]
Microsoft Tech Community Discussions:
How many Firstline Workers did you interact with today? For me, I count around a dozen or so that range from the barista at the coffee shop, gas station attendant, waitress at the restaurant I ate lunch at, the gate agent when I boarded my plane to fly home, when the cable repair technician came… [Read More]
Community Info Center for StaffHub. The space to share experiences, engage and learn from experts. [Read More]
|
OPCFW_CODE
|
[This message is converted from WPS-PLUS to ASCII]
I see that the subject of handling multiple occurrences of the
same IP network numbers has come up again. We should make this
part of the firewall FAQ, I guess...
The most common cases where this occurs is an organization that
has umpteen network numbers (which are not InterNIC-registered)
that now needs to connect to the Internet in some way. What should be
put in the middle of the connection that will support the fact that
some IP addresses exist on BOTH sides of the configuration?
More importantly (everybody, hope that IPV6 comes in fast enough
to minimize this), how will we handle cases where N > 2 IP internetworks
have conflicting network numbers and need to be interconnected?
It is true that some companies have come out with specific system/
software combinations to solve this problem. Some products have
already been mentioned, others will be, I have no doubts about that.
Building such a product practically REQUIRES messing around in the depths
of a TCP/IP software stack in order to achieve the desired functionality.
Some people may fear the potential security impact of such software
modifications. I certainly would not feel confident if I was personally
asked to modify an IP stack to do this...
It should be remembered that IT IS POSSIBLE to achieve the desired
functionality with much more mundane technology IF:
a) the types of communications you need can all be proxied
b) you have access to "classical" proxy software that supports
auto-forwarding (a very simple functionality to implement,
many available proxies, commercial or free, do this)
c) you have two systems on which this proxy software can run
(the two systems can become a "firewall" if you want)
What needs to be done then is as simple as:
1. Configure each system to live in the IP environment of one side
of your "firewall configuration"
2. Configure an interconnection IP network (often a short Ethernet
cable between the two proxy machines). The IP network number
used on the interconnection network ONLY NEEDS TO BE KNOWN TO
THE TWO PROXY MACHINES.
3. Set up the proxy applications for the appropriate auto-forwarding
I have written a document about proxies that (among other things)
describes this setup. You may wish to take a look at it:
Like all internet drafts, it is also available by FTP and on several
ds.internic.net (US East Coast)
ftp.isi.edu (US West Coast)
munnari.oz.au (Pacific Rim)
Of course, nobody should consider this document to be "The Truth" (beware
of what you read). It is just a set of opinions from one guy in a corner,
and exactly fits the "Request For Comments" concept...
E-mail: Marc .
Disclaimer: On this forum, I only speak for myself, nobody else.
|
OPCFW_CODE
|
"""
Helper objects for plots with several stacks (e.g. data and and simulation)
WARNING: work in progress, some things are not implemented yet
"""
import numpy as np
from itertools import chain, izip
import collections
import histo_utils as h1u
class THistogramStack(collections.Sequence):
"""
Python equivalent of THStack
For the simplest cases, calling `matplotlib.axes.hist` with list arguments works,
but for automatic calculation of statistical/systematic/combined uncertainties
on the total, and ratios between stacks, a container object is helpful
"""
class Entry(object):
""" THistogramStack helper class: everything related to one histogram in the stack """
def __init__(self, hist, label=None, systVars=None, drawOpts=None):
self.hist = hist
self.label = label
self.systVars = systVars if systVars else dict() ## NOTE this can be an actual dictionary, or a small object that knows how to retrieve the variations from the file, as long as systVars[systName].up and systVars[systName].down do what is expected
self.drawOpts = drawOpts if drawOpts else dict()
def __init__(self):
self._entries = []
self._stack = None ## sum histograms (lazy, constructed when accessed and cached)
def add(self, hist, **kwargs):
""" Main method: add a histogram on top of the stack """
self._entries.append(THistogramStack.Entry(hist, **kwargs))
@property
def entries(self):
""" list of histograms (entries) used to build the stack"""
return self._entries
@property
def stacked(self):
""" list of "stack" histograms (per-bin cumulative sums) """
if not ( self._stack is not None and len(self._stack) == len(self._entries) ):
self._buildStack()
return self._stack
@property
def stackTotal(self):
""" upper stack histogram """
return self.stacked[-1]
def _buildStack(self):
self._stack = []
if len(self._entries) > 0:
h = h1u.cloneHist(self._entries[0].hist.obj)
self._stack.append(h)
for nh in self._entries[1:]:
h = h1u.cloneHist(h)
h.Add(nh.hist.obj)
self._stack.append(h)
## sequence methods -> stacked list
def __getitem__(self, i):
return self.stacked[i]
def __len__(self):
return len(self._entries)
def _defaultSystVarNames(self):
""" Get the combined (total rate and per-bin) systematics
systVarNames: systematic variations to consider (if None, all that are present are used for each histogram)
"""
return set(chain.from_iterable(contrib.systVars.iterkeys() for contrib in self._entries))
def getTotalSystematics(self, systVarNames=None):
""" Get the combined systematics
systVarNames: systematic variations to consider (if None, all that are present are used for each histogram)
"""
nBins = self.stackTotal.GetNbinsX()
binRange = xrange(1,nBins+1) ## no overflow or underflow
if systVarNames is None:
systVarNames = self._defaultSystVarNames()
systPerBin = dict((vn, np.zeros((nBins,))) for vn in systVarNames) ## including overflows
for systN, systInBins in systPerBin.iteritems():
for contrib in self._entries:
if systN == "lumi": ## TODO like this ?
pass
else:
syst = contrib.systVars[systN]
maxVarPerBin = np.array([ max(abs(syst.up(i)-syst.nom(i)), abs(syst.down(i)-syst.nom(i))) for i in iter(binRange) ])
systInBins += maxVarPerBin
systInteg = np.sum(maxVarPerBin)
totalSystInBins = np.sqrt(sum( binSysts**2 for binSysts in systPerBin.itervalues() ))
return systInteg, totalSystInBins
def getSystematicHisto(self, systVarNames=None):
""" construct a histogram of the stack total, with only systematic uncertainties """
systInteg, totalSystInBins = self.getTotalSystematics(systVarNames=systVarNames)
return h1u.histoWithErrors(self.stackTotal, totalSystInBins)
def getStatSystHisto(self, systVarNames=None):
""" construct a histogram of the stack total, with statistical+systematic uncertainties """
systInteg, totalSystInBins = self.getTotalSystematics(systVarNames=systVarNames)
return h1u.histoWithErrorsQuadAdded(self.stackTotal, totalSystInBins)
def getRelSystematicHisto(self, systVarNames=None):
""" construct a histogram of the relative systematic uncertainties for the stack total """
return h1u.histoDivByValues(self.getSystematicHisto(systVarNames))
import mplbplot.plot ## axes decorators for TH1F
class THistogramRatioPlot(object):
"""
Helper class for the common use case of a pad with two histogram stacks (MC and data or, more generally, expected and observed) and their ratio in a smaller pad below
"""
def __init__(self, expected=None, observed=None, other=None): ## FIXME more (for placement of the axes)
## TODO put placement in some kind of helper method (e.g. a staticmethod that takes the fig)
import matplotlib.pyplot as plt
import matplotlib.ticker
from mplbplot.plothelpers import formatAxes, minorTicksOn
self.fig, axes = plt.subplots(2, 1, sharex=True, gridspec_kw={"height_ratios":(4,1)}, figsize=(7.875, 7.63875)) ## ...
self.ax, self.rax = tuple(axes)
self.rax.set_ylim(.5, 1.5)
self.rax.set_ylabel("Data / MC")
self.rax.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(.2))
formatAxes(self.ax)
formatAxes(self.rax, axis="x")
minorTicksOn(self.rax.yaxis)
#self.ax = fig.add_axes((.17, .30, .8, .65), adjustable="box-forced", xlabel="", xticklabels=[]) ## left, bottom, width, height
#self.rax = fig.add_axes((.17, .13, .8, .15), adjustable="box-forced")
self.expected = expected if expected is not None else THistogramStack()
self.observed = observed if observed is not None else THistogramStack()
self.other = other if other is not None else dict() ## third category: stacks that are just overlaid but don't take part in the ratio
def __getitem__(self, ky):
return self.other[ky]
def draw(self):### TODO add opts
self.drawDistribs(self.ax)
self.drawRatio(self.rax)
def drawDistribs(self, ax=None):
""" Draw distributions on an axes object (by default the main axes associated to this plot) """
if ax is None:
ax = self.ax
## expected
exp_hists, exp_colors = izip(*((eh.hist.obj, eh.drawOpts.get("fill_color", "white")) for eh in self.expected.entries))
ax.rhist(exp_hists, histtype="stepfilled", color=exp_colors, stacked=True)
exp_statsyst = self.expected.getStatSystHisto()
ax.rerrorbar(exp_statsyst, kind="box", hatch=8*"/", ec="none", fc="none")
## observed
ax.rerrorbar(self.observed.stackTotal, kind="bar", fmt="ko")
def drawRatio(self, ax=None):
if ax is None:
ax = self.rax
ax.axhline(1., color="k") ## should be made optional, and take options for style (or use the grid settings)
rx,ry,ryerr = h1u.divide(self.observed.stackTotal, self.expected.stackTotal)
ax.errorbar(rx, ry, yerr=ryerr, fmt="ko")
## then systematics...
exp_syst_rel = self.expected.getRelSystematicHisto()
ax.rerrorbar(exp_syst_rel, kind="box", hatch=8*"/", ec="none", fc="none")
|
STACK_EDU
|
Cases came in and we’ve started shipping them Monday. Orders are flying out, and customers should start getting email notifications with tracking.
Kodi cases are shipping to us this Friday, and will go out as soon as they arrive. Because both cases are so different, their manufacturing processes are also distinctly different. One had to be done before the other.
Orders were supposed to start shipping out about a week ago. However, as soon as I got the first units back, I did a lot of thermal testing.
This new pi runs hot. I started testing with various benchmark scripts, python, etc. But results were generally inconsistent across tests. Much depended on the initial starting conditions. Should the pi have started cooler, I’d see much different results than if I had it running idle. If I just finished a test, results were different as well.
I tried other tests and settled on cpuburn-arm.
Results were consistent. I felt like I saw the most consistent and highest power draw. Which means we could theoretically get temp as quickly as possible.
Without any case, in 26-27C ambient, I got a naked raspberry pi to nearly 90C in 5-10 minutes. Okay great. Now with the Flirc case, I get it to 80C in 25 minutes. I saw the temp hang around 80C +/- 2C for 30 minutes. Seems like we hit steady state, but I think we can do better.
So we started studying the design, the gap, and the thermal material, and simulated. I ordered some new thermal pads and did some more tests that looked great. I got some arctic cool pads off amazon with a much higher thermal conductivity, and started testing.
In my lab/warehouse 26-27C which is really hot, it took 90 minutes of cpu burn before it got to 80C. Ambient matters. I ran another test on Monday, where it was 24-25C, and in 3 hours, it never went above 77C.
But let’s talk about something important. cpuburn is meant to burn as much power as possible, it’s not realistic of any situation. Yes, cpu utilization could get up to 100%, but that doesn’t mean the processor is going to burn the same power. Cpuburn is meant to try and draw the most power. Depending on the software that’s running, power will not only be different, but it will be dynamic. Even running cpuburn, on stopping, I saw a 1C drop every second. So in actuality, a real load alleviates the stress on the cpu, allowing it breathing time to cool.
I’m confident in the case and the new pad. Confident in saying that any strenuous situation will be fine without the need for active cooling or worrying about overheating.
I got in touch with arctic directly, and were shipping with their pads.
|
OPCFW_CODE
|
Metrics are the values that are calculated and presented in different views within Ignity. More information about all the different type of metrics you can use can be found here.
The first step is to add a source. A source is the database table where the results are stored. Data from multiple sources can be combined in one dashboard.
Add a source
There are 2 options to add a source:
Either Ignity creates a backend table in your dedicated cloud database >> to do this use the button ‘Add New Source’
You can connect to an already existing database table >>To do this use the button ‘Add Existing Source’.
When creating a backend table in your cloud database (option 1) you should provide a source name and check which defined attributes are available with the results.
When connecting to an already existing database (option 2) you should enter:
the database type (mySQL or Microsoft SQL server)
the host address of the database (and port if it is not the default port)
the database name
User and password to connect to the database
Default the connection to the database is made securely using the SSL/TLS protocol with a self signed certificate. You can also upload a certificate.
Once you have filled out the values you can click the button ‘Test connection’. If the info is right you should select a table from the database via the drop down list. This enables the ‘characteristics’ area. Here you assign the columns that hold the characteristic values. Click the button ‘Save changes’ to finalize the definition of a data source.
Once you added a source you can define metrics. Click on the button ‘Add metric’ (under the right source). Now you can select one of the following metrics:
With the metric ‘One value (under composite)’ you can define values after you defined (and selected) a ‘Composite value’.
If you select one of the given metrics you can enter the following information:
Column in database (only for an ‘existing source’)
Metric label >> the full name of the label
Short code >> unique shortcode for internal purposes (not visible to end users)
Source table column name (only for a ‘new source’ to be created in the cloud database)
Depending on the calculation type you can enter a selection of the following settings:
Target value (*optional)
NA (not applicable value)
Threshold value (*optional)
If you check the box ‘Explanation’, in the overview tab an information icon will appear next to the metric. If you click on it, you will get a (graphical) explanation of how the metric is calculated. Also the text you enter here is shown.
You can define that one or multiple free text comment is part of the metric. Here you can store text strings like comments. Just check the box ‘include free text comment for this metric’ and specify the title of the comment and the name of the column that should be created (or assign the right column if you choose an existing database)
Just click the plus and minus signs to add or delete comments
Press the button ‘save changes’ for every metric you define.
All defined metrics are shown in an overview
By double clicking a metric from the overview, the metric can be edited.
|
OPCFW_CODE
|
Generally, you found that this article is a feature on OSI models. Pay attention above the to the blog’s featured image; you shall see the network word labelled. So the whole thing is based on computer networks. When network word comes in our mind, it strikes the internet and all, connect one place to another. Yes, you are right. Now let us come to examples to make you understand faster.
Suppose you are using a computer and you want to send your movie’s file collection to your other drive. So you have movies in your C:/ drive, and you want to send your movie’s file to D:/ drive. In this scenario, C:/ became sender and D:/ became the receiver.
Another such example, suppose the sender or the receiver is not in the same place, but they should work together as both of them are in a single host. Like your workstation co-exists with hardware and software the same way it works between sender and receiver.
We can place hardware or software or both of them, to implement a connection between them as a single host. After implementing the connection between them, lot of problems may arise like, a sender can send unlimited data to the receiver, and we also don’t need over-limit, or at a time you have many systems to access, so you also need access control.
To control the error in the system, you also need error control. So there are almost 70 functionalities, which are essential and which may not be necessary in some cases. So those functionalities are implemented in hardware and software.
Suppose you need 70 responsibilities. You cannot implement 70 functionalities in the same place because there are other problems. Like among them, they can be dependent. In that case, groups are to perform and share the responsibilities. To perform the particular function divided into the given responsibility. Suppose in a real-world example, you build seven groups, and each group have 10 responsibilities. It creates an independent system and helps to sort the issues smoothly.
To make stable systems through national and worldwide systems must be generated which are compatible to interact with each other, there is an organization named ISO (International organization of Standardization) has built Open System Interconnection (OSI), and it is generally known as OSI model
The ISO-OSI model is a seven-layer architecture. It describes seven layers or levels in a perfect communication system.
ISO-OSI 7 layer network architecture:
|Layer No.||Layer’s Name|
|2.||Data Link Layer|
1. Physical Layer:
As its name indicates, it contains things which we can touch like your broadband cable. You can call it the source of Internet. The layer which has the medium to connect to the Internet like your router, dial-up modem, broadband cables, etc.
2. Data Link Layer:
This layer is the home of MAC Addresses (Media Access Control) and Logical Link Control (LLC). Logical Link Control identifies the protocols and helps in the transmission of data as the rules of the protocol in use. It plays a significant role in the LAN.
3. Network Layer:
Network Layer is where an IP Address reside. Everything which happens with TCP/UDP protocols occurs in this layer. So basically Network Layer moves packets from source to destination.
4. Transport Layer:
I call it Rulebook Of Packets, and this is what it means. As we have previously discussed in How Data Travels Over Network? That when we have to upload/download a file, it is first broken down into smaller parts called Data Packets. So this layer decides what should be the size of packets, how much should we wait before sending another packet, etc.
Deals with the “Transport Of Data.”
5. Session Layer:
When two computers are connected, they form a “Session.” The session may represent continuity like when you are talking on the phone its a session. It’s the reason you and the other person are interacting. This layer manages sessions; it starts sessions and terminates sessions. When you go to a website then you open a session with their server, you send and receive data, and all this happens in Session Layer.
6. Presentation Layer:
This layer represents tasks of Operating System. Network drivers reside here; this layer deals with the encryption/decryption and compression of the data as well as character encoding.
If you are using a VPN, the data gets encrypted/decrypted here.
7. Application Layer:
Application Layer is the closest layer to the user. This layer contains the program which is using the Internet. For example, your internet browser comes into layer seven, but it doesn’t connect to the Internet itself. Other six layers help it to send and receive data over the Internet.
When I was a beginner in this field I used to memorise those layers seven like.
Please do not throw sausage pizza away.
Means : P – 1, D – 2, N – 3, T – 4, S – 5, P – 6, A – 7.
For further reading, read this Wikipedia article
|
OPCFW_CODE
|
package attributeValueRemove;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class AttributeValueRemove {
public List<int[]> table;
public int[] C;
public int[] D;
public List<int[]> startRemoveValue(){
List<int[]> newTable = new ArrayList<int[]>();
for(int[] line:table){
List<Set<Integer>> stack =new ArrayList<Set<Integer>>();
for(int i=0;i<line.length;i++){
if(D[i]==1) continue;
HashSet<Integer> tmp = new HashSet<Integer>();
tmp.add(i);
if(!isConflict(line,tmp)) stack.add(tmp);
}
boolean switcher = true;
while(switcher){
switcher = false;
Set<Set<Integer>> adder = new HashSet<Set<Integer>>();
List<Set<Integer>> remover = new ArrayList<Set<Integer>>();
for(int i=0;i<stack.size()-1;i++){
for(int j=i+1;j<stack.size();j++){
Set<Integer> set1 = stack.get(i);
Set<Integer> set2 = stack.get(j);
Set<Integer> setU = union(set1, set2) ;
if(!isConflict(line,setU)){
adder.add(setU);
remover.add(set1);
remover.add(set2);
switcher = true;
}
}
}
stack.removeAll(remover);
stack.addAll(adder);
}
if(stack.size()==0){
int[] tmp = Arrays.copyOf(line, line.length);
newTable.add(tmp);
}else{
for(Set<Integer> s:stack){
int[] tmp = Arrays.copyOf(line, line.length);
for(int i:s){
tmp[i] = 0;
}
newTable.add(tmp);
}
}
}
removeRepeat(newTable);
return newTable;
}
public void setTable(List<int[]> table){
this.table = new ArrayList<int[]>();
for(int[] line: table){
this.table.add(Arrays.copyOf(line, line.length));
}
removeRepeat(this.table);
}
private boolean isConflict(int[] line,Set<Integer> ignores){
for(int[] line2:table){
if(!isLineConditionEqual(line,line2,ignores)) continue;
if(!isLineDecisionEqual(line,line2)) return true;
}
return false;
}
private boolean isLineConditionEqual(int[] line1,int[] line2,Set<Integer> ignores){
for(int i=0;i<line1.length;i++){
if(!ignores.contains(i) && line1[i]!=line2[i] && C[i]!=0 ) return false;
}
return true;
}
private boolean isLineDecisionEqual(int[] line1, int[] line2) {
for(int i=0;i<line1.length;i++){
if(line1[i]!=line2[i] && D[i]!=0 ) return false;
}
return true;
}
public static void removeRepeat(List<int[]> newTable){
l1:while(true){
for(int[] line1:newTable){
for(int[] line2:newTable){
if(line1!=line2 && arrayEquals(line1, line2)){
newTable.remove(line2);
continue l1;
}
}
}
break;
}
}
private static boolean arrayEquals(int[] line1,int[] line2){
if(line1.length!=line2.length) return false;
for(int i=0;i<line1.length;i++){
if(line1[i]!=line2[i] && line1[i]!= 0) return false;
}
return true;
}
public static Set<Integer> union(Set<Integer> s1, Set<Integer> s2){
Set<Integer> tmp = new HashSet<Integer>();
tmp.addAll(s1);
tmp.addAll(s2);
return tmp;
}
}
|
STACK_EDU
|
#Michael Schrandt 00616486
#12/6/2011
#Maps are read and drawn
import pygame
#Tile attributes
BLACK = (0, 0, 0)
WHITE = (255,255,255)
RED = (200, 75, 0)
ORANGE = (255, 75, 0)
GREEN= (0, 175, 0)
BLUE = (0, 175, 250)
PURPLE = (175, 0, 175)
YELLOW = (220, 220, 0)
colors = {0:BLACK, 1:WHITE, 2:RED, 3:GREEN, 4:BLUE}
collidable = (1,2,3)
#store the map in a 2-D array of integers
def readMap(inputFile):
try:
f = open(inputFile)
except:
return None, None, None
#if (not f):
# return None, None, None
start = f.readline().split(',')
end = f.readline().split(',')
lines = f.readlines()
f.close()
for i in range(0, len(lines)):
lines[i] = lines[i].split()
for j in range(len(lines[i])):
lines[i][j]= int(lines[i][j])
return lines, start, end
def printMap(myMap):
for i in range(len(myMap)):
for j in range(len(myMap[i])):
print (myMap[i][j],)
#draw a 2-D array to the screen
def drawMap(screen, map, viewRect, end):
#only draw visible parts (plus one extra tile around the border)
starti = int(viewRect.top/32)
endi = int((viewRect.top+viewRect.height)/32)+1
startj = int(viewRect.left/32)
endj = int((viewRect.left+viewRect.width)/32)+1
#make sure that the "border" we added even exists
if (endi > len(map)):
endi = len(map)
if (endj > len(map[0])):
endj = len(map[0])
#iterate through the array, drawing a rectangle for each tile as its
#appropriate color
for i in range(starti,endi ):
for j in range(startj,endj ):
pygame.draw.rect(screen, colors.get(map[i][j]),
[j*32-viewRect.left, i*32-viewRect.top, 31, 31], 0)
pygame.draw.rect(screen, YELLOW, [end[1] - viewRect.left, end[0]-viewRect.top, 31, 31], 0)
def drawPath(screen, path, view):
for i in range(len(path)-1):
pygame.draw.line(screen, ORANGE, (path[i].x - view.left,path[i].y-view.top), (path[i+1].x-view.left,path[i+1].y-view.top), 2)
|
STACK_EDU
|