content
stringlengths
7
2.61M
<filename>authentication/urls.py from django.urls import path from authentication.views import ( SignInView, SignUpView, SignOutView, PRView, PRDone, PRConfirm, PRComplete, PWDChangeView, PWDChangeDoneView, ) from django.urls import reverse_lazy from django.contrib.auth.views import ( PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView, PasswordChangeView, PasswordChangeDoneView, ) urlpatterns = [ path('', SignInView.as_view(), name='signin_view'), path('signup/', SignUpView.as_view(), name='signup_view'), path('signout/', SignOutView.as_view(), name='signout_view'), # path('password/reset/', PRView.as_view(), name='password_reset'), # path('password/reset/done/', PRDone.as_view() ,name='password_reset_done'), # path('password/reset/confirm/<uidb64>/<token>', PRConfirm.as_view() , name='password_reset_confirm'), # path('password/reset/complete/', PRComplete.as_view() , name='password_reset_complete'), path('password/reset/', PasswordResetView.as_view( email_template_name = 'authentication/password_reset_email.html', template_name = 'authentication/password_reset.html', ), name='password_reset'), path('password/reset/done/', PasswordResetDoneView.as_view( template_name = 'authentication/password_reset_done.html' ) , name='password_reset_done'), path('password/reset/confirm/<uidb64>/<token>', PasswordResetConfirmView.as_view( template_name = 'authentication/password_reset_confirm.html' ) , name='password_reset_confirm'), path('password/reset/complete/', PasswordResetCompleteView.as_view( template_name = 'authentication/password_reset_complete.html' ) , name='password_reset_complete'), # path( # 'password/change/', # PWDChangeView.as_view(), # name='password_change_view' # ), # path( # 'password/change/done/', # PWDChangeDoneView.as_view(), # name='password_change_done_view' # ), path( 'password/change/', PasswordChangeView.as_view( template_name = 'authentication/password_change.html', success_url = reverse_lazy('password_change_done_view') ), name='password_change_view' ), path( 'password/change/done/', PasswordResetDoneView.as_view( template_name = 'authentication/password_change_done.html' ), name='password_change_done_view' ) ] # PasswordResetView ->> Ask for Email # PasswordResetDoneView ->> Show him success email message # PasswordResetConfirmView ->> Ask to set a new password # PasswordResetCompleteView ->> Successfully set your password login
Differential NDR/LATS Interactions with the Human MOB Family Reveal a Negative Role for Human MOB2 in the Regulation of Human NDR Kinases ABSTRACT MOB proteins are integral components of signaling pathways controlling important cellular processes, such as mitotic exit, centrosome duplication, apoptosis, and cell proliferation in eukaryotes. The human MOB protein family consists of six distinct members (human MOB1A , -1B, -2, -3A, -3B, and -3C), with hMOB1A/B the best studied due to their putative tumor-suppressive functions through the regulation of NDR/LATS kinases. The roles of the other MOB proteins are less well defined. Accordingly, we characterized all six human MOB proteins in the context of NDR/LATS binding and their abilities to activate NDR/LATS kinases. hMOB3A/B/C proteins neither bind nor activate any of the four human NDR/LATS kinases. We found that both hMOB2 and hMOB1A bound to the N-terminal region of NDR1. However, our data suggest that the binding modes differ significantly. Our work revealed that hMOB2 competes with hMOB1A for NDR binding. hMOB2, in contrast to hMOB1A/B, is bound to unphosphorylated NDR. Moreover, RNA interference (RNAi) depletion of hMOB2 resulted in increased NDR kinase activity. Consistent with these findings, hMOB2 overexpression interfered with the functional roles of NDR in death receptor signaling and centrosome overduplication. In summary, our data indicate that hMOB2 is a negative regulator of human NDR kinases in biochemical and biological settings.
package com.deco2800.potatoes.util; import org.junit.Test; import java.awt.*; import java.util.Set; import static org.junit.Assert.*; public class GridUtilTest { @Test public void testFloodFill() { Integer[][] grid = { { 1, 2, 1, 1, 0 }, { 0, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0 }, { 1, 1, 0, 1, 0 }, { 0, 2, 0, 0, 0 } }; Set<Point> result = GridUtil.floodFill(grid, 1, 1); for (int x = 0; x < grid.length; x++) { for (int y = 0; y < grid.length; y++) { // All 0s except for corner if (grid[x][y] == 0 && !(x == 4 && y == 0)) { assertTrue("Expected point x:" + x + " y:" + y + " wasn't in result", result.contains(new Point(x, y))); } } } } @Test public void testFloodEdges() { Integer[][] grid = { { 1, 2, 2, 1, 0 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { 1, 0, 0, 0, 0 }, { 0, 2, 0, 0, 0 } }; int[][] expected = { { 0, 4 }, { 1, 0 }, { 1, 1 }, { 1, 2 }, { 1, 3 }, { 1, 4 }, { 2, 0 }, { 2, 4 }, { 3, 1 }, { 3, 4 }, { 4, 2 }, { 4, 3 }, { 4, 4 } }; Set<Point> result = GridUtil.floodEdges(grid, 1, 1); assertEquals("Incorrect amount of edge points", expected.length, result.size()); for (int[] point : expected) { assertTrue("Point not in result x:" + point[0] + " y:" + point[1], result.contains(new Point(point[0], point[1]))); } } @Test public void testBlend() { float[][] grid1 = { { 1, 1, 1 }, { 2, 2, 2 }, { 3, 3, 3 } }; float[][] grid2 = { { 0.5f, 0.1f, 1 }, { 0.5f, 0.1f, 1 }, { 0.5f, 0.1f, 1 } }; float[][] blended = GridUtil.blend(grid1, grid2); assertEquals("Size was not correct", grid1.length, blended.length); assertEquals("Size was not correct", grid1[0].length, blended[0].length); } @Test public void testNormalize() { float[][] grid = { { 1, 2, 1 }, { 0, 0, 0 }, { 2, 2, 1 } }; float[][] result = { { 0.5f, 1, 0.5f }, { 0, 0, 0 }, { 1, 1, 0.5f } }; GridUtil.normalize(grid); for (int x = 0; x < result.length; x++) { assertArrayEquals("Row " + x + " was not normalized", result[x], grid[x], 0.00001f); } } @Test public void testSmoothDiamondSquareAlgorithm() { // Really just testing it terminates without errors float[][] result = GridUtil.smoothDiamondSquareAlgorithm(GridUtil.seedGrid(10), 0.6f, 3); assertEquals("X size was not correct", 10, result.length); assertEquals("Y size was not correct", 10, result[0].length); } }
EXCLUSIVE: Graham King and Tim Headington have made an exclusive first-look one year deal at Warner Bros, which puts GK Films back at the studio where he made Blood Diamond, the Oscar-winning The Departed, The Town and Dark Shadows. It’s also the studio that will release the Ben Affleck-directed Argo, which was one of the standout films that premiered at the Toronto Film Festival. GK co-financed that film and King is executive producer. King’s move is a surprise but an even bigger one is that he’s bringing Jersey Boys with him. King had a four-picture put deal at Sony Pictures, which released the Johnny Depp-Angelina Jolie-starrer The Tourist. Sony is where he originally set Jersey Boys, the movie based on the story of Frankie Valli and the Four Seasons. King is moving that film–which will be directed by Jon Favreau–over to Warner Bros to be first film under this new deal. I’m told that though it is a plum project–the stage musical is a giant hit–GK and Sony didn’t see eye to eye over the budget and that is why the project is moving.
package smallweb import ( "log" "net/http" ) func webRoute(w http.ResponseWriter, r *http.Request) { log.Println(r.URL) switch { case r.URL.Path == "/": index(w, r) case r.URL.Path == "/api" || r.URL.Path == "/api/": api(w, r) default: http.Error(w, "Page Not Fount", 404) return } }
def ApiSettings() -> _ApiSettings: return _ApiSettings()
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotSame; import java.io.IOException; import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestDataNodeMultipleRegistrations { private static final Log LOG = LogFactory.getLog(TestDataNodeMultipleRegistrations.class); Configuration conf; @Before public void setUp() throws Exception { conf = new HdfsConfiguration(); } /** * start multiple NNs and single DN and verifies per BP registrations and * handshakes. * * @throws IOException */ @Test public void test2NNRegistration() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2) .nameNodePort(9928).build(); try { cluster.waitActive(); NameNode nn1 = cluster.getNameNode(0); NameNode nn2 = cluster.getNameNode(1); assertNotNull("cannot create nn1", nn1); assertNotNull("cannot create nn2", nn2); String bpid1 = nn1.getFSImage().getBlockPoolID(); String bpid2 = nn2.getFSImage().getBlockPoolID(); String cid1 = nn1.getFSImage().getClusterID(); String cid2 = nn2.getFSImage().getClusterID(); int lv1 = nn1.getFSImage().getLayoutVersion(); int lv2 = nn2.getFSImage().getLayoutVersion(); int ns1 = nn1.getFSImage().getNamespaceID(); int ns2 = nn2.getFSImage().getNamespaceID(); assertNotSame("namespace ids should be different", ns1, ns2); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri=" + nn2.getNameNodeAddress()); // check number of volumes in fsdataset DataNode dn = cluster.getDataNodes().get(0); Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo(); assertNotNull("No volumes in the fsdataset", volInfos); int i = 0; for (VolumeInfo vi : volInfos) { LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace); } // number of volumes should be 2 - [data1, data2] assertEquals("number of volumes is wrong", 2, volInfos.size()); for (BPOfferService bpos : dn.getAllBpOs()) { LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name + "; sid=" + bpos.bpRegistration.storageID + "; nna=" + bpos.nnAddr); } BPOfferService bpos1 = dn.getAllBpOs()[0]; BPOfferService bpos2 = dn.getAllBpOs()[1]; // The order of bpos is not guaranteed, so fix the order if (bpos1.nnAddr.equals(nn2.getNameNodeAddress())) { BPOfferService tmp = bpos1; bpos1 = bpos2; bpos2 = tmp; } assertEquals("wrong nn address", bpos1.nnAddr, nn1.getNameNodeAddress()); assertEquals("wrong nn address", bpos2.nnAddr, nn2.getNameNodeAddress()); assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1); assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2); assertEquals("wrong cid", dn.getClusterId(), cid1); assertEquals("cid should be same", cid2, cid1); assertEquals("namespace should be same", bpos1.bpNSInfo.namespaceID, ns1); assertEquals("namespace should be same", bpos2.bpNSInfo.namespaceID, ns2); } finally { cluster.shutdown(); } } /** * starts single nn and single dn and verifies registration and handshake * * @throws IOException */ @Test public void testFedSingleNN() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nameNodePort(9927).build(); try { NameNode nn1 = cluster.getNameNode(); assertNotNull("cannot create nn1", nn1); String bpid1 = nn1.getFSImage().getBlockPoolID(); String cid1 = nn1.getFSImage().getClusterID(); int lv1 = nn1.getFSImage().getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); // check number of vlumes in fsdataset DataNode dn = cluster.getDataNodes().get(0); Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo(); assertNotNull("No volumes in the fsdataset", volInfos); int i = 0; for (VolumeInfo vi : volInfos) { LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace); } // number of volumes should be 2 - [data1, data2] assertEquals("number of volumes is wrong", 2, volInfos.size()); for (BPOfferService bpos : dn.getAllBpOs()) { LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name + "; sid=" + bpos.bpRegistration.storageID + "; nna=" + bpos.nnAddr); } // try block report BPOfferService bpos1 = dn.getAllBpOs()[0]; bpos1.lastBlockReport = 0; bpos1.blockReport(); assertEquals("wrong nn address", bpos1.nnAddr, nn1.getNameNodeAddress()); assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1); assertEquals("wrong cid", dn.getClusterId(), cid1); cluster.shutdown(); // Ensure all the BPOfferService threads are shutdown assertEquals(0, dn.getAllBpOs().length); cluster = null; } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testClusterIdMismatch() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2). nameNodePort(9928).build(); try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0); BPOfferService [] bposs = dn.getAllBpOs(); LOG.info("dn bpos len (should be 2):" + bposs.length); Assert.assertEquals("should've registered with two namenodes", bposs.length,2); // add another namenode cluster.addNameNode(conf, 9938); bposs = dn.getAllBpOs(); LOG.info("dn bpos len (should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes", bposs.length,3); // change cluster id and another Namenode StartupOption.FORMAT.setClusterId("DifferentCID"); cluster.addNameNode(conf, 9948); NameNode nn4 = cluster.getNameNode(3); assertNotNull("cannot create nn4", nn4); bposs = dn.getAllBpOs(); LOG.info("dn bpos len (still should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes", 3, bposs.length); } finally { if(cluster != null) cluster.shutdown(); } } @Test public void testMiniDFSClusterWithMultipleNN() throws IOException { Configuration conf = new HdfsConfiguration(); // start Federated cluster and add a node. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numNameNodes(2). nameNodePort(9928).build(); Assert.assertNotNull(cluster); Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes()); // add a node cluster.addNameNode(conf, 9929); Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes()); cluster.shutdown(); // 2. start with Federation flag set conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).federation(true). nameNodePort(9928).build(); Assert.assertNotNull(cluster); Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes()); // add a node cluster.addNameNode(conf, 9929); Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes()); cluster.shutdown(); // 3. start non-federated conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).build(); Assert.assertNotNull(cluster); Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes()); // add a node try { cluster.addNameNode(conf, 9929); Assert.fail("shouldn't be able to add another NN to non federated cluster"); } catch (IOException e) { // correct Assert.assertTrue(e.getMessage().startsWith("cannot add namenode")); Assert.assertEquals("(3)Should be 1 namenodes", 1, cluster.getNumNameNodes()); } finally { cluster.shutdown(); } } }
Support for spatial planning and e-government The presented work investigates process models for participatory spatial decision processes, especially focusing on IT-supported procedures. Results of this work are process patterns according to different classes of problem complexity, accompanied by operational instructions for conducting the procedure, and requirements for the supporting software tools. The aim of this paper is to explain the need, development and application of an integrated discourse system, which can be applied to participative spatial decision processes in e-government. It describes two experiments in the field of e-participation, from which schemes of IT-support for participative spatial decision processes are derived.
<filename>setup.py """ raven-aiohttp ============= A transport for `raven-python <https://github.com/getsentry/raven-python>`_ which supports Python 3's asyncio interface. :copyright: (c) 2015 Functional Software, Inc :license: BSD, see LICENSE for more details. """ import io import os import re import sys from setuptools import setup from setuptools.command.test import test as TestCommand def get_version(): regex = r"__version__\s=\s\'(?P<version>.+?)\'" return re.search(regex, read('raven_aiohttp.py')).group('version') def read(*parts): filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts) with io.open(filename, encoding='utf-8', mode='rt') as fp: return fp.read() tests_require = [ 'flake8', 'isort', 'pytest', 'pytest-asyncio<0.6.0', # to support Python 3.5- 'pytest-cov', 'pytest-mock' ] install_requires = [ 'aiohttp>=2.0', 'raven>=5.4.0', ] class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='raven-aiohttp', version=get_version(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/getsentry/raven-aiohttp', description='An asyncio transport for raven-python', long_description=read('README.rst'), py_modules=['raven_aiohttp'], zip_safe=False, install_requires=install_requires, extras_require={ 'test': tests_require, }, cmdclass={ 'test': PyTest, }, license='BSD', classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Software Development', ], )
<gh_stars>0 from keras_segmentation.models.all_models import model_from_name from imgaug import augmenters as iaa from keras_segmentation.predict import model_from_checkpoint_path ################# Configure Here ################ import os os.environ['CUDA_VISIBLE_DEVICES'] = "1" checkpoints_saving_path = "checkpoints/cow_iris_3/set" dataset_abbr = "set" path_base = "/home/heemoon/Desktop/0_DATABASE/3_IRIS/cow/" inp_images_path = path_base+ "rgb/test" inp_annotations_path = path_base + "iii_format" model_list = [ "fcn_16_vgg", "vgg_unet", "vgg_segnet", "fcn_32_vgg", "fcn_8_vgg", "fcn_16_resnet50", "resnet50_unet", "resnet50_segnet", "fcn_32_resnet50", "fcn_8_resnet50", "fcn_8_mobilenet", "fcn_16_mobilenet", "mobilenet_unet", "mobilenet_segnet", "fcn_32_mobilenet", # "pspnet", # core dump error # "vgg_pspnet", # core dump error # "resnet50_pspnet", # core dump error # "pspnet_50", # big size over 11GB # "pspnet_101", # "unet_mini", # "unet", # "segnet", ] DO_Augment = True def custom_augmentation(): return iaa.Sequential( [ # apply the following augmenters to most images # https://imgaug.readthedocs.io/en/latest/source/overview/arithmetic.html iaa.AddToBrightness((-10, 10)), #iaa.CropAndPad(percent=(-0.25, 0.25)), #iaa.ContrastNormalization(0.5), #iaa.AllChannelsHistogramEqualization(), iaa.Affine(rotate=(-40, 40)) ]) ################################################ f = open(dataset_abbr+"_test_result_.txt", "w") for model_name in model_list: for i in range(1,6): #get model file name model_file_name = model_name+"_"+dataset_abbr+str(i) # model define print("------------ Define Model:"+model_file_name+" ------------") try: # evaluating the model model = model_from_checkpoint_path(checkpoints_saving_path+str(i)+"/"+model_file_name) result = model.evaluate_segmentation( inp_images_dir=inp_images_path, annotations_dir=inp_annotations_path ) f.write(model_file_name+"==========="+str(result)+"\n") print("==============="+model_file_name+"==============="+"\n"+str(result)) except Exception as e: print("Error: "+model_file_name+"\n",e) f.write("Error: "+model_file_name+"==========="+"\n") f.write("\n") f.close() print("end of evaluation.py")
Myocardial perfusion and left ventricular performance during exercise-induced ST-segment depression in apparently healthy subjects. Ischemic-like ST-segment depression seen during exercise in apparently healthy subjects has previously been noted, but the cause of this change is unknown. The aim of this study was to investigate the pathophysiology of this electrocardiographic change. Ten healthy subjects who developed an electrocardiographic "ischemic" pattern of ST change during treadmill exercise testing were studied. All subjects underwent both thallium-201 myocardial perfusion imaging and radionuclide angiocardiography at rest and during exercise at a time when abnormal ST changes appeared, and demonstrated a normal homogeneous pattern of thallium-201 distribution on both rest and exercise images. Overall, left ventricular ejection fraction rose from 0.60 +/- 0.06 (mean +/- SD) at rest to 0.65 +/- 0.07 with exercise. None of the subjects had regional wall motion abnormalities at rest or during exercise. These results are different from the findings observed in patients with coronary heart disease and angina pectoris in whom regional abnormalities in both perfusion and left ventricular performance have been noted during exercise. Therefore it would seem that myocardial ischemia is not likely to be a tenable explanation for the electrocardiographic "ischemic" changes in these apparently healthy subjects.
The Role of Public Administration and Bureaucracy in Thailand To restore the Thai economy within a reasonable period, the Thai government needs a continuous improvement or innovation of both public administration and bureaucracy. Since 1958 Thai public administration has been accepted a big role for the nation's modernization and has executed the goals of social and economic development. This study identifies the current status of not the only roles and functions of Thai public administration and bureaucracy, but also the Thai governmental administrative system both as part of its decisionmaking apparatus and in order to carry out those decisions. Additionally, this study examines the necessity of Thai governmental reform to improve productivity in public organizations.
<gh_stars>1000+ package multierror import ( "io" ) // ErrorFormat error print format definition type ErrorFormat func([]error, io.Writer) var ( multilinePrefix = []byte("the following errors occurred:") multilineSeparator = []byte("\n -- ") ) // MultilineFormat error print format func MultilineFormat(errs []error, w io.Writer) { w.Write(multilinePrefix) for _, err := range errs { w.Write(multilineSeparator) io.WriteString(w, err.Error()) } }
export * from './auth-client-ext.repository'; export * from './role-ext.repository'; export * from './tenant-ext.repository'; export * from './user-credentials-ext.repository'; export * from './user-level-permission-ext.repository'; export * from './user-tenant-ext.repository'; export * from './user-ext.repository'; export * from './user-level-resource.repository'; export * from './to-do.repository';
<gh_stars>0 import React, { CSSProperties, ReactNode } from "react"; import ReactDOM from "react-dom"; import ClickAwayLayer from "../../src/ClickAwayLayer"; import ClickAwayListener from "../../src/ClickAwayListener"; import StopPropagation from "../../src/StopPropagation"; import { useActions } from "../components/Actions"; import Block from "../components/Block"; import { blue, blueTransparent, orange, pink, purple, } from "../components/colors"; export default { title: "Stories/Propagation", decorators: [ (Story) => ( <ClickAwayLayer root> <div> <Story /> </div> </ClickAwayLayer> ), ], }; const Portal: React.FC<{ children: ReactNode }> = ({ children }) => { return ReactDOM.createPortal(children, document.body); }; const buttonStyle1 = { marginTop: 12, marginBottom: 24, marginLeft: 12, }; export const Scenario1 = () => { const { action } = useActions(); return ( <> <Block label="Outside" color={blue}> <ClickAwayListener onClickAway={action(`Clicked outside of ${orange} block`)} > <Block label="Inside" color={orange} animateClicks> <button onClick={(event) => { event.stopPropagation(); event.nativeEvent.stopImmediatePropagation(); }} style={buttonStyle1} > Button </button> </Block> </ClickAwayListener> <button onClick={(event) => { event.stopPropagation(); event.nativeEvent.stopImmediatePropagation(); }} style={buttonStyle1} > Button </button> </Block> </> ); }; const outsideStyle: CSSProperties = { height: 140, width: 450, }; const purpleBlockStyle: CSSProperties = { position: "absolute", left: 180, top: 40, height: (outsideStyle.height as number) - 50, width: 190, }; const pinkBlockStyle: CSSProperties = { position: "absolute", left: 320, top: 60, height: (outsideStyle.height as number) - 90, zIndex: 2, }; export const Scenario2 = () => { const { action } = useActions(); return ( <> <Block label="Outside" color={blue} style={outsideStyle}> <Portal> <ClickAwayListener onClickAway={action(`Clicked outside of ${purple} block`)} > <Block label="Block" color={purple} animateClicks style={purpleBlockStyle} > <Portal> <StopPropagation all> <ClickAwayListener onClickAway={action(`Clicked outside of ${pink} block`)} > <Block label="Portaled block" color={pink} animateClicks style={pinkBlockStyle} /> </ClickAwayListener> </StopPropagation> </Portal> </Block> </ClickAwayListener> </Portal> </Block> </> ); }; Scenario2.parameters = { docs: { inlineStories: false, iframeHeight: 230 }, }; export const Scenario3 = () => { const { action } = useActions(); return ( <> <ClickAwayListener onClickAway={action(`Clicked outside of ${orange} block`)} > <Block label="Underneath" color={orange} animateClicks invertArrowColor /> </ClickAwayListener> <ClickAwayLayer> <StopPropagation all> <Block label="Overlay" color={blueTransparent} style={{ left: -80 }}> <ClickAwayListener onClickAway={action(`Clicked outside of ${purple} block`)} > <Block label="Inside" color={purple} animateClicks style={{ marginLeft: 120 }} invertArrowColor /> </ClickAwayListener> </Block> </StopPropagation> </ClickAwayLayer> </> ); }; const buttonStyle4 = { marginTop: 12, marginRight: 24, }; export const Scenario4 = () => { const { action } = useActions(); return ( <> <div style={{ marginBottom: 24 }}> <button onClick={(event) => { event.stopPropagation(); }} style={buttonStyle4} > Button calling <code>stopPropagation</code> </button> <button onMouseUp={(event) => { event.stopPropagation(); event.nativeEvent.stopImmediatePropagation(); }} style={buttonStyle4} > Button calling <code>stopImmediatePropagation</code> </button> </div> <Block label="Outside" color={blue}> <ClickAwayListener onClickAway={action(`Clicked outside of ${orange} block`)} > <Block label="Block" color={orange} animateClicks /> </ClickAwayListener> </Block> </> ); };
/** * Flow rule manager for network statistics of a VM. */ @Component(immediate = true) @Service public class StatsFlowRuleManager implements StatsFlowRuleAdminService { private final Logger log = LoggerFactory.getLogger(getClass()); private static final byte FLOW_TYPE_SONA = 1; // VLAN private static final long MILLISECONDS = 1000L; private static final long INITIAL_DELAY = 5L; private static final long REFRESH_INTERVAL = 5L; private static final TimeUnit TIME_UNIT_SECOND = TimeUnit.SECONDS; private static final String REVERSE_PATH_STATS = "reversePathStats"; private static final String EGRESS_STATS = "egressStats"; private static final String PORT_STATS = "portStats"; private static final String MONITOR_OVERLAY = "monitorOverlay"; private static final String MONITOR_UNDERLAY = "monitorUnderlay"; private static final String OVS_DRIVER_NAME = "ovs"; private static final boolean DEFAULT_REVERSE_PATH_STATS = false; private static final boolean DEFAULT_EGRESS_STATS = false; private static final boolean DEFAULT_PORT_STATS = true; private static final boolean DEFAULT_MONITOR_OVERLAY = true; private static final boolean DEFAULT_MONITOR_UNDERLAY = true; private static final String ARBITRARY_IP = "0.0.0.0/32"; private static final int ARBITRARY_LENGTH = 32; private static final String ARBITRARY_MAC = "00:00:00:00:00:00"; private static final IpAddress NO_HOST_IP = IpAddress.valueOf("255.255.255.255"); private static final MacAddress NO_HOST_MAC = MacAddress.valueOf(ARBITRARY_MAC); private static final int ARBITRARY_IN_INTF = 0; private static final int ARBITRARY_OUT_INTF = 0; private static final boolean RECOVER_FROM_FAILURE = true; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected CoreService coreService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected FlowRuleService flowRuleService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected HostService hostService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected DeviceService deviceService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected DriverService driverService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected ComponentConfigService componentConfigService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected MastershipService mastershipService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected OpenstackNetworkService osNetworkService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected InstancePortService instPortService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected OpenstackNodeService osNodeService; @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) protected OpenstackTelemetryService telemetryService; @Property(name = REVERSE_PATH_STATS, boolValue = DEFAULT_REVERSE_PATH_STATS, label = "A flag which indicates whether to install the rules for " + "collecting the flow-based stats for reversed path.") private boolean reversePathStats = DEFAULT_REVERSE_PATH_STATS; @Property(name = EGRESS_STATS, boolValue = DEFAULT_EGRESS_STATS, label = "A flag which indicates whether to install the rules for " + "collecting the flow-based stats for egress port.") private boolean egressStats = DEFAULT_EGRESS_STATS; @Property(name = PORT_STATS, boolValue = DEFAULT_PORT_STATS, label = "A flag which indicates whether to collect port TX & RX stats.") private boolean portStats = DEFAULT_PORT_STATS; @Property(name = MONITOR_OVERLAY, boolValue = DEFAULT_MONITOR_OVERLAY, label = "A flag which indicates whether to monitor overlay network port stats.") private boolean monitorOverlay = DEFAULT_MONITOR_OVERLAY; @Property(name = MONITOR_UNDERLAY, boolValue = DEFAULT_MONITOR_UNDERLAY, label = "A flag which indicates whether to monitor underlay network port stats.") private boolean monitorUnderlay = DEFAULT_MONITOR_UNDERLAY; private ApplicationId telemetryAppId; private TelemetryCollector collector; private ScheduledFuture result; private final Set<FlowInfo> gFlowInfoSet = Sets.newHashSet(); private final Map<String, Queue<FlowInfo>> flowInfoMap = Maps.newConcurrentMap(); private static final int SOURCE_ID = 1; private static final int TARGET_ID = 2; private static final int PRIORITY_BASE = 10000; private static final int METRIC_PRIORITY_SOURCE = SOURCE_ID * PRIORITY_BASE; private static final int METRIC_PRIORITY_TARGET = TARGET_ID * PRIORITY_BASE; @Activate protected void activate() { telemetryAppId = coreService.registerApplication(OPENSTACK_TELEMETRY_APP_ID); componentConfigService.registerProperties(getClass()); start(); log.info("Started"); } @Deactivate protected void deactivate() { componentConfigService.unregisterProperties(getClass(), false); flowRuleService.removeFlowRulesById(telemetryAppId); stop(); log.info("Stopped"); } @Modified protected void modified(ComponentContext context) { readComponentConfiguration(context); log.info("Modified"); } @Override public void start() { log.info("Start publishing thread"); collector = new TelemetryCollector(); result = SharedScheduledExecutors.getSingleThreadExecutor() .scheduleAtFixedRate(collector, INITIAL_DELAY, REFRESH_INTERVAL, TIME_UNIT_SECOND, RECOVER_FROM_FAILURE); } @Override public void stop() { log.info("Stop data publishing thread"); result.cancel(true); collector = null; } @Override public void createStatFlowRule(StatsFlowRule statsFlowRule) { setStatFlowRule(statsFlowRule, true); } @Override public void deleteStatFlowRule(StatsFlowRule statsFlowRule) { setStatFlowRule(statsFlowRule, false); } @Override public Map<String, Queue<FlowInfo>> getFlowInfoMap() { return flowInfoMap; } @Override public Set<FlowInfo> getUnderlayFlowInfos() { Set<FlowInfo> flowInfos = Sets.newConcurrentHashSet(); for (Device device : getUnderlayDevices()) { if (!isEdgeSwitch(device.id())) { continue; } for (FlowEntry entry : flowRuleService.getFlowEntries(device.id())) { FlowInfo.Builder fBuilder = new DefaultFlowInfo.DefaultBuilder(); TrafficSelector selector = entry.selector(); Criterion inPort = selector.getCriterion(Criterion.Type.IN_PORT); Criterion dstIpCriterion = selector.getCriterion(Criterion.Type.IPV4_DST); if (inPort != null && dstIpCriterion != null) { IpAddress srcIp = getIpAddress(device, (PortCriterion) inPort); IpAddress dstIp = ((IPCriterion) dstIpCriterion).ip().address(); if (srcIp == null) { continue; } fBuilder.withFlowType(FLOW_TYPE_SONA) .withSrcIp(IpPrefix.valueOf(srcIp, ARBITRARY_LENGTH)) .withDstIp(IpPrefix.valueOf(dstIp, ARBITRARY_LENGTH)) .withSrcMac(getMacAddress(srcIp)) .withDstMac(getMacAddress(dstIp)) .withInputInterfaceId(getInterfaceId(srcIp)) .withOutputInterfaceId(getInterfaceId(dstIp)) .withDeviceId(entry.deviceId()); StatsInfo.Builder sBuilder = new DefaultStatsInfo.DefaultBuilder(); sBuilder.withStartupTime(System.currentTimeMillis()) .withFstPktArrTime(System.currentTimeMillis()) .withLstPktOffset((int) (REFRESH_INTERVAL * MILLISECONDS)) .withCurrAccPkts((int) entry.packets()) .withCurrAccBytes(entry.bytes()) .withErrorPkts((short) 0) .withDropPkts((short) 0); fBuilder.withStatsInfo(sBuilder.build()); FlowInfo flowInfo = mergeFlowInfo(fBuilder.build(), fBuilder, sBuilder); flowInfos.add(flowInfo); } } } return flowInfos; } @Override public Set<FlowInfo> getOverlayFlowInfos() { Set<FlowInfo> flowInfos = Sets.newConcurrentHashSet(); // obtain all flow rule entries installed by telemetry app for (FlowEntry entry : flowRuleService.getFlowEntriesById(telemetryAppId)) { FlowInfo.Builder fBuilder = new DefaultFlowInfo.DefaultBuilder(); TrafficSelector selector = entry.selector(); IPCriterion srcIp = (IPCriterion) selector.getCriterion(IPV4_SRC); IPCriterion dstIp = (IPCriterion) selector.getCriterion(IPV4_DST); IPProtocolCriterion ipProtocol = (IPProtocolCriterion) selector.getCriterion(IP_PROTO); fBuilder.withFlowType(FLOW_TYPE_SONA) .withSrcIp(srcIp.ip()) .withDstIp(dstIp.ip()); if (ipProtocol != null) { fBuilder.withProtocol((byte) ipProtocol.protocol()); if (ipProtocol.protocol() == PROTOCOL_TCP) { TcpPortCriterion tcpSrc = (TcpPortCriterion) selector.getCriterion(TCP_SRC); TcpPortCriterion tcpDst = (TcpPortCriterion) selector.getCriterion(TCP_DST); fBuilder.withSrcPort(tcpSrc.tcpPort()); fBuilder.withDstPort(tcpDst.tcpPort()); } else if (ipProtocol.protocol() == PROTOCOL_UDP) { UdpPortCriterion udpSrc = (UdpPortCriterion) selector.getCriterion(UDP_SRC); UdpPortCriterion udpDst = (UdpPortCriterion) selector.getCriterion(UDP_DST); fBuilder.withSrcPort(udpSrc.udpPort()); fBuilder.withDstPort(udpDst.udpPort()); } else { log.debug("Other protocol: {}", ipProtocol.protocol()); } } fBuilder.withSrcMac(getMacAddress(srcIp.ip().address())) .withDstMac(getMacAddress(dstIp.ip().address())) .withInputInterfaceId(getInterfaceId(srcIp.ip().address())) .withOutputInterfaceId(getInterfaceId(dstIp.ip().address())) .withVlanId(getVlanId(srcIp.ip().address())) .withDeviceId(entry.deviceId()); StatsInfo.Builder sBuilder = new DefaultStatsInfo.DefaultBuilder(); sBuilder.withStartupTime(System.currentTimeMillis()) .withFstPktArrTime(System.currentTimeMillis()) .withLstPktOffset((int) (REFRESH_INTERVAL * MILLISECONDS)) .withCurrAccPkts((int) entry.packets()) .withCurrAccBytes(entry.bytes()) .withErrorPkts((short) 0) .withDropPkts((short) 0); fBuilder.withStatsInfo(sBuilder.build()); FlowInfo flowInfo = mergeFlowInfo(fBuilder.build(), fBuilder, sBuilder); flowInfos.add(flowInfo); log.debug("FlowInfo: \n{}", flowInfo.toString()); } return flowInfos; } /** * Gets a set of flow infos by referring to overlay destination VM port. * * @return flow infos */ private Set<FlowInfo> getOverlayDstPortBasedFlowInfos() { Set<FlowInfo> flowInfos = Sets.newConcurrentHashSet(); Set<PortNumber> instPortNums = instPortService.instancePorts() .stream() .map(InstancePort::portNumber) .collect(Collectors.toSet()); Set<DeviceId> deviceIds = osNodeService.completeNodes(COMPUTE) .stream() .map(OpenstackNode::intgBridge) .collect(Collectors.toSet()); deviceIds.forEach(d -> { List<PortStatistics> stats = deviceService.getPortStatistics(d) .stream() .filter(s -> instPortNums.contains(s.portNumber())) .collect(Collectors.toList()); stats.forEach(s -> { InstancePort instPort = getInstancePort(d, s.portNumber()); flowInfos.add(buildTxFlowInfoFromInstancePort(instPort, s)); flowInfos.add(buildRxFlowInfoFromInstancePort(instPort, s)); }); }); return flowInfos; } /** * Gets a set of flow infos by referring to underlay destination port. * * @return flow infos */ private Set<FlowInfo> getUnderlayDstPortBasedFlowInfos() { Set<FlowInfo> flowInfos = Sets.newConcurrentHashSet(); for (Device d : getUnderlayDevices()) { List<PortStatistics> stats = new ArrayList<>(deviceService.getPortStatistics(d.id())); stats.forEach(s -> { Host host = hostService.getConnectedHosts(new ConnectPoint(d.id(), s.portNumber())) .stream().findFirst().orElse(null); if (host != null) { flowInfos.add(buildTxFlowInfoFromHost(host, s)); flowInfos.add(buildRxFlowInfoFromHost(host, s)); } }); } return flowInfos; } /** * Obtains a set of device instances which construct underlay network. * * @return a set of device instances */ private Set<Device> getUnderlayDevices() { Set<Device> underlayDevices = Sets.newConcurrentHashSet(); Set<DeviceId> overlayDeviceIds = osNodeService.completeNodes() .stream() .filter(n -> n.type() != CONTROLLER) .map(OpenstackNode::intgBridge) .collect(Collectors.toSet()); for (Device d : deviceService.getAvailableDevices(SWITCH)) { if (overlayDeviceIds.contains(d.id())) { continue; } underlayDevices.add(d); } return underlayDevices; } /** * Checks whether the given drivers contains OVS driver. * * @param drivers a set of drivers * @return true if the given drivers contain any OVS driver, false otherwise */ private boolean hasOvsDriver(List<Driver> drivers) { for (Driver driver : drivers) { if (OVS_DRIVER_NAME.equals(driver.name())) { return true; } } return false; } /** * Obtains the flow info generated by TX port from instance port. * * @param instPort instance port * @param stat port statistics * @return flow info */ private FlowInfo buildTxFlowInfoFromInstancePort(InstancePort instPort, PortStatistics stat) { return buildTxFlowInfo(instPort.ipAddress(), instPort.macAddress(), instPort.deviceId(), stat); } /** * Obtains the flow info generated from RX port from instance port. * * @param instPort instance port * @param stat port statistics * @return flow info */ private FlowInfo buildRxFlowInfoFromInstancePort(InstancePort instPort, PortStatistics stat) { return buildRxFlowInfo(instPort.ipAddress(), instPort.macAddress(), instPort.deviceId(), stat); } /** * Obtains the flow info generated by TX port from host. * * @param host host * @param stat port statistics * @return flow info */ private FlowInfo buildTxFlowInfoFromHost(Host host, PortStatistics stat) { IpAddress ip = host.ipAddresses().stream().findFirst().orElse(null); if (ip != null) { return buildTxFlowInfo(ip, host.mac(), host.location().deviceId(), stat); } return null; } /** * Obtains the flow info generated by RX @param host host. * * @param host host * @param stat port statistics * @return flow info */ private FlowInfo buildRxFlowInfoFromHost(Host host, PortStatistics stat) { IpAddress ip = host.ipAddresses().stream().findFirst().orElse(null); if (ip != null) { return buildRxFlowInfo(ip, host.mac(), host.location().deviceId(), stat); } return null; } /** * Obtains the flow info generated from TX port. * * @param ipAddress IP address * @param macAddress MAC address * @param deviceId device identifier * @param stat port statistics * @return flow info */ private FlowInfo buildTxFlowInfo(IpAddress ipAddress, MacAddress macAddress, DeviceId deviceId, PortStatistics stat) { FlowInfo.Builder fBuilder = new DefaultFlowInfo.DefaultBuilder(); fBuilder.withFlowType(FLOW_TYPE_SONA) .withSrcIp(IpPrefix.valueOf(ipAddress, ARBITRARY_LENGTH)) .withDstIp(IpPrefix.valueOf(ARBITRARY_IP)) .withSrcMac(macAddress) .withDstMac(NO_HOST_MAC) .withDeviceId(deviceId) .withInputInterfaceId(ARBITRARY_IN_INTF) .withOutputInterfaceId(ARBITRARY_OUT_INTF) .withVlanId(VlanId.vlanId()); StatsInfo.Builder sBuilder = new DefaultStatsInfo.DefaultBuilder(); sBuilder.withStartupTime(System.currentTimeMillis()) .withFstPktArrTime(System.currentTimeMillis()) .withLstPktOffset((int) (REFRESH_INTERVAL * MILLISECONDS)) .withCurrAccPkts((int) stat.packetsSent()) .withCurrAccBytes(stat.bytesSent()) .withErrorPkts((short) stat.packetsTxErrors()) .withDropPkts((short) stat.packetsTxDropped()); fBuilder.withStatsInfo(sBuilder.build()); return mergeFlowInfo(fBuilder.build(), fBuilder, sBuilder); } /** * Obtains the flow info generated from RX port. * * @param ipAddress IP address * @param macAddress MAC address * @param deviceId Device identifier * @param stat port statistics * @return flow info */ private FlowInfo buildRxFlowInfo(IpAddress ipAddress, MacAddress macAddress, DeviceId deviceId, PortStatistics stat) { FlowInfo.Builder fBuilder = new DefaultFlowInfo.DefaultBuilder(); fBuilder.withFlowType(FLOW_TYPE_SONA) .withSrcIp(IpPrefix.valueOf(ARBITRARY_IP)) .withDstIp(IpPrefix.valueOf(ipAddress, ARBITRARY_LENGTH)) .withSrcMac(NO_HOST_MAC) .withDstMac(macAddress) .withDeviceId(deviceId) .withInputInterfaceId(ARBITRARY_IN_INTF) .withOutputInterfaceId(ARBITRARY_OUT_INTF) .withVlanId(VlanId.vlanId()); StatsInfo.Builder sBuilder = new DefaultStatsInfo.DefaultBuilder(); sBuilder.withStartupTime(System.currentTimeMillis()) .withFstPktArrTime(System.currentTimeMillis()) .withLstPktOffset((int) (REFRESH_INTERVAL * MILLISECONDS)) .withCurrAccPkts((int) stat.packetsReceived()) .withCurrAccBytes(stat.bytesReceived()) .withErrorPkts((short) stat.packetsRxErrors()) .withDropPkts((short) stat.packetsRxDropped()); fBuilder.withStatsInfo(sBuilder.build()); return mergeFlowInfo(fBuilder.build(), fBuilder, sBuilder); } /** * Obtains instance port which associated with the given device identifier * and port number. * * @param deviceId device identifier * @param portNumber port number * @return instance port */ private InstancePort getInstancePort(DeviceId deviceId, PortNumber portNumber) { return instPortService.instancePorts().stream() .filter(p -> p.deviceId().equals(deviceId)) .filter(p -> p.portNumber().equals(portNumber)) .findFirst().orElse(null); } /** * Installs a flow rule where the source table is fromTable, while destination * table is toTable. * * @param deviceId device identifier * @param fromTable source table * @param toTable destination table * @param statsFlowRule stats flow rule * @param rulePriority rule priority * @param install installation flag */ private void connectTables(DeviceId deviceId, int fromTable, int toTable, StatsFlowRule statsFlowRule, int rulePriority, boolean install) { int srcPrefixLength = statsFlowRule.srcIpPrefix().prefixLength(); int dstPrefixLength = statsFlowRule.dstIpPrefix().prefixLength(); int prefixLength = rulePriority + srcPrefixLength + dstPrefixLength; byte protocol = statsFlowRule.ipProtocol(); TrafficSelector.Builder selectorBuilder = DefaultTrafficSelector.builder() .matchEthType(TYPE_IPV4) .matchIPSrc(statsFlowRule.srcIpPrefix()) .matchIPDst(statsFlowRule.dstIpPrefix()); if (protocol == PROTOCOL_TCP) { selectorBuilder = selectorBuilder .matchIPProtocol(statsFlowRule.ipProtocol()) .matchTcpSrc(statsFlowRule.srcTpPort()) .matchTcpDst(statsFlowRule.dstTpPort()); } else if (protocol == PROTOCOL_UDP) { selectorBuilder = selectorBuilder .matchIPProtocol(statsFlowRule.ipProtocol()) .matchUdpSrc(statsFlowRule.srcTpPort()) .matchUdpDst(statsFlowRule.dstTpPort()); } else { log.warn("Unsupported protocol {}", statsFlowRule.ipProtocol()); } TrafficTreatment.Builder treatmentBuilder = DefaultTrafficTreatment.builder(); treatmentBuilder.transition(toTable); FlowRule flowRule = DefaultFlowRule.builder() .forDevice(deviceId) .withSelector(selectorBuilder.build()) .withTreatment(treatmentBuilder.build()) .withPriority(prefixLength) .fromApp(telemetryAppId) .makePermanent() .forTable(fromTable) .build(); applyRule(flowRule, install); } /** * Installs stats related flow rule to switch. * * @param flowRule flow rule * @param install flag to install or not */ private void applyRule(FlowRule flowRule, boolean install) { FlowRuleOperations.Builder flowOpsBuilder = FlowRuleOperations.builder(); flowOpsBuilder = install ? flowOpsBuilder.add(flowRule) : flowOpsBuilder.remove(flowRule); flowRuleService.apply(flowOpsBuilder.build(new FlowRuleOperationsContext() { @Override public void onSuccess(FlowRuleOperations ops) { log.debug("Install rules for telemetry stats: \n {}", ops.toString()); } @Override public void onError(FlowRuleOperations ops) { log.debug("Failed to install rules for telemetry stats: \n {}", ops.toString()); } })); } /** * Merges old FlowInfo.StatsInfo and current FlowInfo.StatsInfo. * * @param flowInfo current FlowInfo object * @param fBuilder Builder for FlowInfo * @param sBuilder Builder for StatsInfo * @return Merged FlowInfo object */ private FlowInfo mergeFlowInfo(FlowInfo flowInfo, FlowInfo.Builder fBuilder, StatsInfo.Builder sBuilder) { for (FlowInfo gFlowInfo : gFlowInfoSet) { log.debug("Old FlowInfo:\n{}", gFlowInfo.toString()); if (gFlowInfo.roughEquals(flowInfo)) { // Get old StatsInfo object and merge the value to current object. StatsInfo oldStatsInfo = gFlowInfo.statsInfo(); sBuilder.withPrevAccPkts(oldStatsInfo.currAccPkts()); sBuilder.withPrevAccBytes(oldStatsInfo.currAccBytes()); FlowInfo newFlowInfo = fBuilder.withStatsInfo(sBuilder.build()) .build(); gFlowInfoSet.remove(gFlowInfo); gFlowInfoSet.add(newFlowInfo); log.debug("Old FlowInfo found, Merge this {}", newFlowInfo.toString()); return newFlowInfo; } } // No such record, then build the FlowInfo object and return this object. log.debug("No FlowInfo found, add new FlowInfo {}", flowInfo.toString()); FlowInfo newFlowInfo = fBuilder.withStatsInfo(sBuilder.build()).build(); gFlowInfoSet.add(newFlowInfo); return newFlowInfo; } /** * Installs flow rules for collecting both normal and reverse path flow stats. * * @param statsFlowRule flow rule used for collecting stats * @param install flow rule installation flag */ private void setStatFlowRule(StatsFlowRule statsFlowRule, boolean install) { setStatFlowRuleBase(statsFlowRule, install); // if reverse path stats is enabled, we will install flow rules for // collecting reverse path vFlow stats if (reversePathStats) { StatsFlowRule reverseFlowRule = DefaultStatsFlowRule.builder() .srcIpPrefix(statsFlowRule.dstIpPrefix()) .dstIpPrefix(statsFlowRule.srcIpPrefix()) .ipProtocol(statsFlowRule.ipProtocol()) .srcTpPort(statsFlowRule.dstTpPort()) .dstTpPort(statsFlowRule.srcTpPort()) .build(); setStatFlowRuleBase(reverseFlowRule, install); } } /** * A base method which is for installing flow rules for collecting stats. * * @param statsFlowRule flow rule used for collecting stats * @param install flow rule installation flag */ private void setStatFlowRuleBase(StatsFlowRule statsFlowRule, boolean install) { IpPrefix srcIp = statsFlowRule.srcIpPrefix(); IpPrefix dstIp = statsFlowRule.dstIpPrefix(); DeviceId srcDeviceId = getDeviceId(srcIp.address()); DeviceId dstDeviceId = getDeviceId(dstIp.address()); if (srcDeviceId == null && dstDeviceId == null) { return; } if (srcDeviceId != null) { connectTables(srcDeviceId, STAT_INBOUND_TABLE, VTAP_INBOUND_TABLE, statsFlowRule, METRIC_PRIORITY_SOURCE, install); if (install) { log.info("Install ingress stat flow rule for SrcIp:{} DstIp:{}", srcIp.toString(), dstIp.toString()); } else { log.info("Remove ingress stat flow rule for SrcIp:{} DstIp:{}", srcIp.toString(), dstIp.toString()); } } Set<IpPrefix> vxlanIps = osNetworkService.getFixedIpsByNetworkType(VXLAN); Set<IpPrefix> vlanIps = osNetworkService.getFixedIpsByNetworkType(VLAN); Set<IpPrefix> flatIps = osNetworkService.getFixedIpsByNetworkType(FLAT); int fromTable, toTable; if (dstDeviceId != null && egressStats) { IpPrefix dstIpPrefix = statsFlowRule.dstIpPrefix(); if (vxlanIps.contains(dstIpPrefix) || vlanIps.contains(dstIpPrefix)) { fromTable = STAT_OUTBOUND_TABLE; toTable = VTAP_OUTBOUND_TABLE; } else if (flatIps.contains(dstIpPrefix)) { fromTable = STAT_FLAT_OUTBOUND_TABLE; toTable = VTAP_FLAT_OUTBOUND_TABLE; } else { return; } connectTables(dstDeviceId, fromTable, toTable, statsFlowRule, METRIC_PRIORITY_TARGET, install); if (install) { log.info("Install egress stat flow rule for SrcIp:{} DstIp:{}", srcIp.toString(), dstIp.toString()); } else { log.info("Remove egress stat flow rule for SrcIp:{} DstIp:{}", srcIp.toString(), dstIp.toString()); } } } /** * Gets Device ID which the VM is located. * * @param ipAddress IP Address of host * @return Device ID */ private DeviceId getDeviceId(IpAddress ipAddress) { if (!hostService.getHostsByIp(ipAddress).isEmpty()) { Optional<Host> host = hostService.getHostsByIp(ipAddress).stream().findAny(); return host.map(host1 -> host1.location().deviceId()).orElse(null); } else { log.debug("No DeviceID is associated to {}", ipAddress.toString()); return null; } } /** * Gets VLAN ID with respect to IP Address. * * @param ipAddress IP Address of host * @return VLAN ID */ private VlanId getVlanId(IpAddress ipAddress) { if (!hostService.getHostsByIp(ipAddress).isEmpty()) { Host host = hostService.getHostsByIp(ipAddress).stream().findAny().get(); return host.vlan(); } return VlanId.vlanId(); } /** * Gets Interface ID of Switch which is connected to a host. * * @param ipAddress IP Address of host * @return Interface ID of Switch */ private int getInterfaceId(IpAddress ipAddress) { if (!hostService.getHostsByIp(ipAddress).isEmpty()) { Host host = hostService.getHostsByIp(ipAddress).stream().findAny().get(); return (int) host.location().port().toLong(); } return -1; } /** * Gets MAC Address of host. * * @param ipAddress IP Address of host * @return MAC Address of host */ private MacAddress getMacAddress(IpAddress ipAddress) { if (!hostService.getHostsByIp(ipAddress).isEmpty()) { Host host = hostService.getHostsByIp(ipAddress).stream().findAny().get(); return host.mac(); } return NO_HOST_MAC; } /** * Gets IP address of the host which is attached to the given device and port. * * @param device device * @param inPort IN port number * @return IP address */ private IpAddress getIpAddress(Device device, PortCriterion inPort) { Host host = hostService.getConnectedHosts(device.id()).stream() .filter(h -> h.location().port().equals(inPort.port())) .findAny().orElse(null); if (host != null) { return host.ipAddresses().stream().findAny().get(); } return NO_HOST_IP; } private void enqFlowInfo(FlowInfo flowInfo) { String key = flowInfo.uniqueFlowInfoKey(); Queue<FlowInfo> queue = flowInfoMap.get(key); if (queue == null) { Queue<FlowInfo> newQueue = new LinkedList<FlowInfo>(); newQueue.offer(flowInfo); flowInfoMap.put(key, newQueue); return; } queue.offer(flowInfo); while (queue.size() > DEFAULT_DATA_POINT_SIZE) { queue.remove(); // Removes a garbage data in the queue. } } /** * Checks whether the given device is edge switch or not. * * @param id device identifier * @return true if the given device is edge switch, false otherwise */ private boolean isEdgeSwitch(DeviceId id) { return !hostService.getConnectedHosts(id).isEmpty(); } /** * Extracts properties from the component configuration context. * * @param context the component context */ private void readComponentConfiguration(ComponentContext context) { Dictionary<?, ?> properties = context.getProperties(); Boolean reversePathStatsConfigured = getBooleanProperty(properties, REVERSE_PATH_STATS); if (reversePathStatsConfigured == null) { reversePathStats = DEFAULT_REVERSE_PATH_STATS; log.info("Reversed path stats flag is NOT " + "configured, default value is {}", reversePathStats); } else { reversePathStats = reversePathStatsConfigured; log.info("Configured. Reversed path stats flag is {}", reversePathStats); } Boolean egressStatsConfigured = getBooleanProperty(properties, EGRESS_STATS); if (egressStatsConfigured == null) { egressStats = DEFAULT_EGRESS_STATS; log.info("Egress stats flag is NOT " + "configured, default value is {}", egressStats); } else { egressStats = egressStatsConfigured; log.info("Configured. Egress stats flag is {}", egressStats); } Boolean portStatsConfigured = getBooleanProperty(properties, PORT_STATS); if (portStatsConfigured == null) { portStats = DEFAULT_PORT_STATS; log.info("Port stats flag is NOT " + "configured, default value is {}", portStats); } else { portStats = portStatsConfigured; log.info("Configured. Port stats flag is {}", portStats); } Boolean monitorOverlayConfigured = getBooleanProperty(properties, MONITOR_OVERLAY); if (monitorOverlayConfigured == null) { monitorOverlay = DEFAULT_MONITOR_OVERLAY; log.info("Monitor overlay flag is NOT " + "configured, default value is {}", monitorOverlay); } else { monitorOverlay = monitorOverlayConfigured; log.info("Configured. Monitor overlay flag is {}", monitorOverlay); } Boolean monitorUnderlayConfigured = getBooleanProperty(properties, MONITOR_UNDERLAY); if (monitorUnderlayConfigured == null) { monitorUnderlay = DEFAULT_MONITOR_UNDERLAY; log.info("Monitor underlay flag is NOT " + "configured, default value is {}", monitorUnderlay); } else { monitorUnderlay = monitorUnderlayConfigured; log.info("Configured. Monitor underlay flag is {}", monitorUnderlay); } } private class TelemetryCollector implements Runnable { @Override public void run() { Set<FlowInfo> filteredOverlayFlowInfos = Sets.newConcurrentHashSet(); Set<FlowInfo> filteredUnderlayFlowInfos = Sets.newConcurrentHashSet(); // we only let the master controller of the device where the // stats flow rules are installed send stats message if (monitorOverlay) { getOverlayFlowInfos().forEach(f -> { if (checkSrcDstLocalMaster(f)) { filteredOverlayFlowInfos.add(f); } }); } if (monitorUnderlay) { getUnderlayFlowInfos().forEach(f -> { if (checkSrcDstLocalMaster(f)) { filteredUnderlayFlowInfos.add(f); } }); } // we only let the master controller of the device where the port // is located to send stats message if (portStats) { if (monitorOverlay) { getOverlayDstPortBasedFlowInfos().forEach(f -> { if (checkSrcDstLocalMaster(f)) { filteredOverlayFlowInfos.add(f); } }); } if (monitorUnderlay) { getUnderlayDstPortBasedFlowInfos().forEach(f -> { if (checkSrcDstLocalMaster(f)) { filteredUnderlayFlowInfos.add(f); } }); } } if (monitorOverlay) { telemetryService.publish(filteredOverlayFlowInfos); // TODO: Refactor the following code to "TelemetryService" style. filteredOverlayFlowInfos.forEach(StatsFlowRuleManager.this::enqFlowInfo); } if (monitorUnderlay) { telemetryService.publish(filteredUnderlayFlowInfos); } } private boolean checkSrcDstLocalMaster(FlowInfo info) { DeviceId srcDeviceId = getDeviceId(info.srcIp().address()); DeviceId dstDeviceId = getDeviceId(info.dstIp().address()); boolean isSrcLocalMaster = srcDeviceId != null && mastershipService.isLocalMaster(srcDeviceId); boolean isDstLocalMaster = dstDeviceId != null && mastershipService.isLocalMaster(dstDeviceId); return isSrcLocalMaster || isDstLocalMaster; } } }
/** * Created by Semyon.Atamas on 2/27/2015. */ public class JsonUtils { private static ObjectMapper objectMapper = new ObjectMapper(); public static String toJson(Object object){ try{ return objectMapper.writeValueAsString(object); } catch (IOException e) { /*Unreachable*/ ErrorWriter.getInstance().writeExceptionToExceptionAnalyzer(e, "Json serialization", "", ""); } return ""; } public static ObjectMapper getObjectMapper() { return objectMapper; } }
<gh_stars>0 # from django.http import HttpResponse,httpResponseRedirect from django.shortcuts import render,redirect from .models import Image,Profile,Rates from django.contrib.auth.decorators import login_required from .forms import Form,NewImageForm,UpdateProForm,RateForm from django.db.models import Avg from rest_framework.response import Response from rest_framework.views import APIView from .serializer import ProfileSerializer,ImageSerializer # Create your views here. def index(request): # date = dt.date.today() image_pic = Image.objects.all() if request.method == 'POST': form = Form(request.POST) if form.is_valid(): name = form.cleaned_data['your_name'] email = form.cleaned_data['email'] recipient = Recipients(name = name,email =email) recipient.save() send_welcome_email(name,email) HttpResponseRedirect('index') else: form = Form() return render(request, 'index.html', {"NewImageForm":form,"image_pic":image_pic}) @login_required(login_url='/accounts/login/') def new_pic(request): current_user = request.user profile = Profile.objects.filter(user_name=current_user).first() if request.method == 'POST': form = NewImageForm(request.POST, request.FILES) if form.is_valid(): picture = form.save(commit=False) picture.user_name = current_user picture.profiles = profile picture.save() return redirect('addPic') else: form = NewImageForm() return render(request, 'everything/add-pic.html', {"form": form}) @login_required(login_url='/accounts/login/') def getProfile(request,users=None): user = request.user image_pic = Image.objects.filter(user_name=user) user_name = request.user profile = Profile.objects.filter(user_name=user_name).first() return render(request,'everything/profile.html',locals(),{"image_pic":image_pic}) @login_required(login_url='/accounts/login/') def editProfile(request): current_user = request.user if request.method == 'POST': form = UpdateProForm(request.POST,request.FILES) if form.is_valid(): pics = form.save(commit=False) pics.user_name = current_user pics.save() return redirect('profile') else: form = UpdateProForm() return render(request,'everything/pro_edit.html',{"test":form}) def search(request): if 'picture' in request.GET and request.GET["picture"]: search_term = request.GET.get("picture") pictures = Image.search_by_ciro(search_term) message = f"{search_term}" return render(request, 'everything/search.html',{"message":message,"pictures": pictures}) else: message = "You haven't searched for any term" return render(request, 'everything/search.html',{"message":message, "pictures": pictures}) # rating code from nyota245 github @login_required def rating(request): ''' Function to display single Project and rate it ''' current_user = request.user projects = Image.objects.filter().first() title = "Rating" project_rating = Rates.objects.filter(pic=projects).order_by("pk") current_user = request.user.id project_rated = Rates.objects.filter(user_name=current_user) design_mean_rating = [] for d_rating in project_rating: design_mean_rating.append(d_rating.design) try: designav = sum(design_mean_rating)/len(design_mean_rating) designper = design_average * 10 except ZeroDivisionError: designav = "0" designper = 0 usability_mean_rating = [] for u_rating in project_rating: usability_mean_rating.append(u_rating.usability) try: usabilityav = sum(usability_mean_rating)/len(usability_mean_rating) usabilityper = usability_average *10 except ZeroDivisionError: usabilityav = "0" usabilityper = 0 content_mean_rating = [] for c_rating in project_rating: content_mean_rating.append(c_rating.content) try: contentav = sum(content_mean_rating)/len(content_mean_rating) contentper = content_average * 10 except ZeroDivisionError: contentav = "0" contentper = 0 form = RateForm() context = { "projects":projects, "form":form, "project_rating":project_rating, "designav":designav, "contentav":contentav, "usabilityav":usabilityav, "usabilityper":usabilityper, "contentper":contentper, "designper":designper } return render(request,"rates.html",context) class ImageList(APIView): def get(self,request,format=None): projects = Image.objects.all() serializers = ImageSerializer(projects,many=True) return Response(serializers.data) class ProfileList(APIView): def get(self,request,format=None): profiles = Profile.objects.all() serializer = ProfileSerializer(profiles,many=True) return Response(serializer.data)
Lectin from Sambucus sieboldiana abrogates the anoikis resistance of colon cancer cells conferred by N-acetylglucosaminyltransferase V during hematogenous metastasis Anoikis is a form of anchorage-dependent apoptosis, and cancer cells adopt anokis-resistance molecular machinery to conduct metastasis. Here, we report that N-acetylglucosaminyltransferase V gene expression confers anoikis resistance during cancer progression. Overexpression of N-acetylglucosaminyltransferase V protected detached cancer cells from apoptotic death, and suppression or knockout of the gene sensitized cancer cells to the apoptotic death. The gene expression also stimulated anchorage-dependent as well as anchorage-independent colony formation of cancer cells following anoikis stress treatments. Importantly, treatment with the lectin from Sambucus sieboldiana significantly sensitized anoikis-induced cancer cell deaths in vitro as well as in vivo. We propose that the lectin alone or an engineered form could offer a new therapeutic treatment option for cancer patients with advanced tumors.
This corrosion: A systematic review of the association between alternative subcultures and the risk of selfharm and suicide BACKGROUND Rates of self-harm and suicide are increasing in young people. The literature suggests that individuals who identify with alternative subcultures (e.g., Goth) may be at a greater risk. OBJECTIVE To explore the prevalence of self-harm and suicide in alternative subcultures and the factors that might contribute to this increased risk. METHOD Using a systematic strategy, the databases PsycINFO, Scopus, MEDLINE and Web of Science, and the E-Thesis online service (ETHOS) were searched for English language only papers, with no restrictions in terms of date of publication. Papers were selected that included data on the relationship between either alternative subculture identity (e.g., Goth) or preference for alternative music (e.g., Heavy Metal) and self-harm or suicide. Ten quantitative papers were included: seven cross-sectional, two longitudinal and one cross-sectional state-level comparison study. Two qualitative papers were also included. Studies were assessed by two reviewers for risk of bias. RESULTS The findings indicated that individuals who associated with alternative subcultures were at a greater risk of self-harm and suicide. Whilst qualitative papers identified potential mechanisms (e.g., exposure to self-harm and the way self-harm is presented or normalized), there remains limited support for these mechanisms. CONCLUSIONS More research is required to understand the association between self-harm, suicide and alternative subculture affiliation, and the factors underlying it. Longitudinal studies and studies focusing on mechanism are particularly important. PRACTITIONER POINTS The review supports the suggestion that those who identify as belonging to an alternative subculture may be at a higher risk of self-harm and suicidal behaviour. It also presents preliminary evidence that alternative affiliation predicts self-harm over time, and that this effect holds whilst adjusting for a number of likely confounders. The findings highlight the importance of increasing the awareness of the victimization and potential risk that these groups hold and suggests areas for intervention in health, educational, and social services. The review does not, however, indicate specifically what it is about alternative subculture affiliation (or alternative music preference) that could contribute to the risk of self-harm. Consequently, studies with a greater focus on mechanisms are needed. Methodological limitations (e.g., cross-sectional studies, small sample of 'alternative' participants, westernized samples) restricted the reliability and validity of the results which impacted on the extent to which the findings could be generalized more widely.
// MIT License // Copyright (C) August 2016 Hotride #include "MainScreen.h" #include "BaseScreen.h" #include "../Config.h" #include "../Point.h" #include "../Definitions.h" #include "../OrionUO.h" #include "../QuestArrow.h" #include "../OrionWindow.h" #include "../Managers/FontsManager.h" #include "../Managers/ScreenEffectManager.h" #include "../Managers/AnimationManager.h" #include "../GUI/GUITextEntry.h" #include "../TextEngine/EntryText.h" CMainScreen g_MainScreen; CMainScreen::CMainScreen() : CBaseScreen(m_MainGump) , m_Account(nullptr) , m_Password(nullptr) , m_SavePassword(nullptr) , m_AutoLogin(nullptr) { DEBUG_TRACE_FUNCTION; m_Password = new CEntryText(32, 0, 300); } CMainScreen::~CMainScreen() { DEBUG_TRACE_FUNCTION; delete m_Password; } void CMainScreen::Init() { DEBUG_TRACE_FUNCTION; g_ConfigLoaded = false; g_GlobalScale = 1.0; Load(); #if USE_WISP g_OrionWindow.SetSize(CSize(640, 480)); g_OrionWindow.NoResize = true; #else Reset(); #endif g_OrionWindow.SetTitle("Ultima Online"); g_GL.UpdateRect(); g_EntryPointer = m_MainGump.m_PasswordFake; g_AnimationManager.ClearUnusedTextures(g_Ticks + 100000); g_QuestArrow.Enabled = false; g_TotalSendSize = 0; g_TotalRecvSize = 0; g_LightLevel = 0; g_PersonalLightLevel = 0; g_ScreenEffectManager.UseSunrise(); SmoothScreenAction = 0; m_Gump.PrepareTextures(); } void CMainScreen::ProcessSmoothAction(uint8_t action) { DEBUG_TRACE_FUNCTION; if (action == 0xFF) { action = SmoothScreenAction; } if (action == ID_SMOOTH_MS_CONNECT) { g_Orion.Connect(); } else if (action == ID_SMOOTH_MS_QUIT) { g_OrionWindow.Destroy(); } } void CMainScreen::SetAccounting(const string &account, const string &password) { DEBUG_TRACE_FUNCTION; m_Account->SetTextA(account); m_Password->SetTextA(password); const auto len = (int)password.length(); m_MainGump.m_PasswordFake->Clear(); for (int i = 0; i < len; i++) { m_MainGump.m_PasswordFake->Insert(L'*'); } } void CMainScreen::Paste() { DEBUG_TRACE_FUNCTION; if (g_EntryPointer == m_MainGump.m_PasswordFake) { m_Password->Paste(); const auto len = (int)m_Password->Length(); g_EntryPointer->Clear(); for (int i = 0; i < len; i++) { g_EntryPointer->Insert(L'*'); } } else { g_EntryPointer->Paste(); } } void CMainScreen::OnTextInput(const TextEvent &ev) { DEBUG_TRACE_FUNCTION; const auto ch = EvChar(ev); if (ch >= 0x0100 || !g_FontManager.IsPrintASCII((uint8_t)ch)) { return; } if (g_EntryPointer == nullptr) { g_EntryPointer = m_MainGump.m_PasswordFake; } if (g_EntryPointer->Length() < 16) //add char to text field { if (g_EntryPointer == m_MainGump.m_PasswordFake) { if (g_EntryPointer->Insert(L'*')) { m_Password->Insert(ch); } } else { g_EntryPointer->Insert(ch); } } m_Gump.WantRedraw = true; } void CMainScreen::OnKeyDown(const KeyEvent &ev) { DEBUG_TRACE_FUNCTION; if (g_EntryPointer == nullptr) { g_EntryPointer = m_MainGump.m_PasswordFake; } const auto key = EvKey(ev); switch (key) { case KEY_TAB: { if (g_EntryPointer == m_Account) { g_EntryPointer = m_MainGump.m_PasswordFake; } else { g_EntryPointer = m_Account; } break; } case KEY_RETURN: case KEY_RETURN2: { CreateSmoothAction(ID_SMOOTH_MS_CONNECT); break; } default: { if (g_EntryPointer == m_MainGump.m_PasswordFake) { m_Password->OnKey(nullptr, key); } g_EntryPointer->OnKey(nullptr, key); break; } } m_Gump.WantRedraw = true; } void CMainScreen::Load() { m_AutoLogin->Checked = g_Config.AutoLogin; m_Account->SetTextA(g_Config.Login); m_Account->SetPos(checked_cast<int>(g_Config.Login.length())); m_MainGump.m_PasswordFake->SetTextA(""); m_MainGump.m_PasswordFake->SetPos(0); const size_t len = g_Config.Password.length(); if (len != 0) { m_Password->SetTextA(g_Config.Password); for (int zv = 0; zv < len; zv++) { m_MainGump.m_PasswordFake->Insert(L'*'); } m_Password->SetPos(checked_cast<int>(len)); } else { m_Password->SetTextA(""); m_Password->SetPos(0); } m_SavePassword->Checked = g_Config.SavePassword; if (!m_SavePassword->Checked) { m_Password->SetTextW({}); m_MainGump.m_PasswordFake->SetTextW({}); } } void CMainScreen::Save() { g_Config.AutoLogin = m_AutoLogin->Checked; g_Config.SavePassword = m_SavePassword->Checked; g_Config.Password = m_Password->GetTextA(); g_Config.Login = m_Account->GetTextA(); } void CMainScreen::Reset() const { g_OrionWindow.RestoreWindow(); g_OrionWindow.SetSize(CSize(640, 480)); g_OrionWindow.SetWindowResizable(false); }
Economic Preparation for Retirement We define and estimate measures of economic preparation for retirement based on a complete inventory of economic resources while taking into account the risk of living to advanced old age and the risk of high out-of-pocket spending for health care services. We ask whether, in a sample of 66-69 year-olds, observed economic resources could support with high probability a life-cycle consumption path anchored at the initial level of consumption until the end of life. We account for taxes, widowing, differential mortality and out-of-pocket health spending risk. We find that 71% of persons in our target age group are adequately prepared according to our definitions, but there is substantial variation by observable characteristics: 80% of married persons are adequately prepared compared with just 55% of single persons. We estimate that a reduction in Social Security benefits of 30 percent would reduce the fraction adequately prepared by 7.8 percentage points among married persons and by as much as 10.7 percentage points among single persons.
package org.wickedsource.budgeteer.web.pages.person.details.component; import org.apache.wicket.util.tester.WicketTester; import org.junit.jupiter.api.Test; import org.wickedsource.budgeteer.web.AbstractWebTestTemplate; import org.wickedsource.budgeteer.web.pages.person.details.highlights.PersonHighlightsModel; import org.wickedsource.budgeteer.web.pages.person.details.highlights.PersonHighlightsPanel; public class PersonHighlightsPanelTest extends AbstractWebTestTemplate { @Test void render() { WicketTester tester = getTester(); PersonHighlightsModel model = new PersonHighlightsModel(1L); PersonHighlightsPanel panel = new PersonHighlightsPanel("panel", model); tester.startComponentInPage(panel); tester.assertContains("<NAME>"); } @Override protected void setupTest() { } }
Searching for Accountability: The Draft UN International Convention on the Regulation, Oversight, and Monitoring of Private Military and Security Companies Summary The proliferation of private military and security companies has attracted significant public and scholarly attention during the last decade. This comment examines the United Nations Draft International Convention on the Regulation, Oversight and Monitoring of Private Military and Security Companies (Draft Convention). It discusses the significance of the Draft Convention and then describes the approach taken to the regulation of this controversial topic. Several problematic elements of the Draft Convention are identified including the definition of prohibited activities, State responsibility for the conduct of private military and security companies and the proposed International Criminal Court referral mechanism. Finally, specific policy recommendations are made for the government of Canada as a home state and contracting state of private military and security services, irrespective of the progress of negotiations on the Draft Convention.
Impact of Interest Rate on Household Consumption in Tanzania This study seeks to determine the exact impact of interest rate on household consumption in Tanzania and identify the direction of causality between the variables. Although there have been few studies which explore the issue of interest rate and consumption, their method, time scope and geographical location has been different. This study aim to examine the relationship between interest rate on deposit and household expenditure in Tanzania using the annual time series data from the period 19902017 and employing Dynamic Ordinary Least Square (DOLS) and Granger causality test for testing causal relationship between the variables. The result revealed that there is a negative relationship between interest rate on deposit and consumption for the Tanzania. Additionally, it is observed from the estimate results that interest rate had an insignificant effect on consumption. Furthermore, the granger causality test results have shown that there is bidirectional causal relationship between interest rate and consumption. Furthermore, the result also shows that income and consumption are positively related and statistical significant at 5%. In addition, the findings supported Keynesian's Absolute Income hypothesis which emphasis consumption being a positive function of disposable income. The study recommends that there is the need for government to take urgent steps to implement policies like poverty reduction strategies, agriculture policy and Five Years Development Plans in order to improve the income base of most of households.
<gh_stars>0 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import proxy2 from openstack.ecs.v1 import server as _server from openstack.ecs.v1 import server_ext as _server_ext class Proxy(proxy2.BaseProxy): def create_server(self, **data): """ post method to create ecs server :param data: data to create ecs server see more info from support website :return: :class:`~openstack.ecs.v1.server.Servers` """ return self._create(_server.Servers, **data) def resize_server(self, server_id, **data): """ post method to modify ecs server size :param server_id: ecs server id :param data: data to create ecs server see more info from support website :return: class:`~openstack.ecs.v1._server.ResizeServer` """ return self._create(_server.ResizeServer, server_id=server_id, **data) def create_server_ext(self, **data): """ post method to create ecs server :param data: data to create ecs server see more info from support website :return: :class:`~openstack.ecs.v1.server_ext.Servers` """ return self._create(_server_ext.Servers, **data) def resize_server_ext(self, server_id, **data): """ post method to modify ecs server size :param server_id: ecs server id :param data: data to create ecs server see more info from support website :return: class:`~openstack.ecs.v1.server_ext.ResizeServer` """ return self._create(_server_ext.ResizeServer, server_id=server_id, **data) def start_server(self, **data): """ post method to do server action ,such as batch start stop and restart server :param data: data to do action see more info from support website :return: :class:`~openstack.ecs.v1.server.ServerAction` """ return self._create(_server.ServerAction, **data) def stop_server(self, **data): """ post method to do server action ,such as batch start stop and restart server :param data: data to do action see more info from support website :return: :class:`~openstack.ecs.v1.server.ServerAction` """ return self._create(_server.ServerAction, **data) def reboot_server(self, **data): """ post method to do server action ,such as batch start stop and restart server :param data: data to do action see more info from support website :return: :class:`~openstack.ecs.v1.server.ServerAction` """ return self._create(_server.ServerAction, **data) def delete_server(self, **data): """ post method to batch delete server :param data: data to do delete server such as server id list :return: :class:`~openstack.ecs.v1.server.ServerAction` """ return self._create(_server.DeleteServer, **data)
Evaluation of QRS morphological classifiers in the presence of noise. This paper analyzes the performance of four similarity measures (distances: d1, d2, and dinfinity, as well as correlation coefficient), when they are employed for morphological classification of QRS complexes by means of linear cluster formation. An important characteristic that any morphological classification method for QRS complexes should possess is the ability to perform waveform recognition despite the wide variety in which these could appear, as well as the diverse types of noise that could contaminate the signal. Evaluation of these classifiers constitutes an important problem for their selection. Evaluation was performed using electrocardiographic signals selected from the MIT-BIH database. These signals were contaminated with several noise types that are found in the environment where electrocardiograms are usually registered and processed, and the different noise waveforms were combined in an appropriate way to simulate practical situations, including some with severe noise contamination. Results are expressed in terms of probabilities of correct classification for different signal to noise ratios, allowing a comparison between the different distance measures in terms of their effectiveness.
Connection between the disease resistance of sour cherry genotypes and the carbohydrate content of the leaf and phloem tissues The objective of the present study was to establish a possible connection between disease resistance and the carbohydrate content of plant tissues by examining sour cherry genotypes with different tolerance levels in homeostasis. Research on the sour cherry Monilinia laxa interaction involved the comparison of two Hungarian cultivars (rdi bterm and Csengdi) and their offsprings by measuring the quantity of homeostatic carbohydrate fractions in their leaves and phloem tissues. The results demonstrated that the glucose quantity and the ratio of glucose and fructose to sucrose were correlated with the disease resistance of sour cherry cultivars and their hybrids. The glucose content was higher in susceptible genotypes and lower in tolerant genotypes. The hexose:sucrose ratios of susceptible genotypes were significantly higher than those of tolerant genotypes.
Carl Edwards Jr. admitted he was willing to allow one run if he could turn a double play after inheriting a scary situation Tuesday night. But Edwards turned in one of his most impressive performances of the season by striking out three consecutive batters and stranding the tying run at third base in the seventh inning of the Cubs’ 2-1 loss to the Reds. “I was going to take that,” Edwards said of the possibility of giving up a run for two outs. After experiencing control problems less than two weeks ago, Edwards has rebounded at a key time for the Cubs, who are without Koji Uehara (neck). Published in Chicago Tribune on May 23, 2016 — Print headline: "Out of sync - Offense still off as Cubs drop another series"
<filename>cypress/integration/app.ts describe("App", () => { beforeEach(() => { cy.visit("/"); }); it("has the correct title", () => { cy.title().should("equal", "Virginia-Traffic School"); }); it("should navigate to products page when first run", () => { cy.url().should("include", "/products"); }); it("should navigate to theme page when click button", function () { cy.get("a[href='/smart-edu2'].btn-success").click({ force: true }); cy.url().should("include", "/smart-edu2/home"); }); it("navigate to login page when click dashboard btn ", () => { cy.get('[data-testid="router-dashboard"]').click(); cy.url().should("include", "/login"); }); });
package main import ( "flag" "fmt" "kaepora/internal/back" "kaepora/internal/bot" "kaepora/internal/global" "kaepora/internal/web" "log" "os" "os/signal" "sync" "syscall" _ "github.com/mattn/go-sqlite3" ) func main() { log.SetFlags(0) // we syslog in prod so we don't care about time here flag.Parse() switch flag.Arg(0) { // commands not requiring a back case "version": fmt.Fprintf(os.Stdout, "Kaepora %s\n", global.Version) return case "help": fmt.Fprint(os.Stdout, help()) return } log.Printf("info: Starting Kaepora %s", global.Version) back, err := back.New( "sqlite3", "./kaepora.db", os.Getenv("KAEPORA_OOTR_API_KEY"), ) if err != nil { log.Fatal(err) } switch flag.Arg(0) { case "fixtures": if err := back.LoadFixtures(); err != nil { log.Fatal(err) } case "serve": if err := serve(back); err != nil { log.Fatal(err) } case "rerank": if err := back.Rerank(flag.Arg(1)); err != nil { log.Fatal(err) } default: fmt.Fprint(os.Stderr, help()) os.Exit(1) } } func help() string { return fmt.Sprintf(` Kaepora is a tool to manage the "Ocarina of Time: Randomizer" competitive ladder. Usage: %[1]s COMMAND [ARGS…] COMMANDS fixtures create default data for quick testing during development help display this help serve start the Discord bot version display the current version rerank SHORTCODE recompute all rankings in a league `, os.Args[0], ) } func serve(b *back.Back) error { done := make(chan struct{}) signaled := make(chan os.Signal, 1) signal.Notify(signaled, syscall.SIGINT, syscall.SIGTERM) bot, err := bot.New(b, os.Getenv("KAEPORA_DISCORD_TOKEN")) if err != nil { return err } server, err := web.NewServer(b, os.Getenv("KAEPORA_WEB_TOKEN_KEY")) if err != nil { return err } var wg sync.WaitGroup go b.Run(&wg, done) go bot.Serve(&wg, done) go server.Serve(&wg, done) sig := <-signaled log.Printf("warning: received signal %d", sig) close(done) log.Print("info: waiting for complete shutdown") wg.Wait() log.Print("info: shutdown complete") return nil }
A progressive extended protocol for the basic laparoscopic training using the pelvitrainer. UNLABELLED Abstract Purpose: We describe an extended training program using the pelvitrainer to improve the basic laparoscopic skills of the junior urologists. MATERIALS AND METHODS Ten junior residents were involved in our program that consisted of an hour of training every other day; every 3 hours represented one training session. This curriculum started with 4 inanimate tasks that included peg transfer, disc cutout, extracorporeal, and intracorporeal knot tying. Each task was practiced for one training session with an objective evaluation at the initial attempt and at the end of its session. Thereafter, the participants began to perform an anastomosis using a latex glove model of the laparoscopic urethrovesical anastmosis (L-UVA) (5 experiments). This was followed by 10 experiments of the sheep intestine model of the L-UVA. The performance in these models was evaluated by both the amount of leakage of the injected saline and the time required for completing it. Lastly, another sheep intestine model was performed 3 weeks after the end of the training program. RESULTS The continuous evaluation of these trainees showed that there was a significant decrease in the time required to perform each of the first 4 tasks at the end of their corresponding sessions compared to the base line values (p=0.000). We also detected a significant decrease in the time and the amount of leakage in sheep intestine models in the 10th attempt compared to the first one (p=0.000). However, there was no significant difference between the results at the 10th model and those of the 3 weeks retest regarding both the time (p=0.198) and the amount of leakage (p=0.076). CONCLUSIONS The use of the two described models of the L-UVA after the inanimate tasks in the pelvitrainer distributed course of training could help in the improvement and in the retention of the basic laparoscopic skills of the junior urologists.
<reponame>rzr/iotivity-1 /* ***************************************************************** * * Copyright 2015 Samsung Electronics All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * *****************************************************************/ #include <ocstack.h> #include <oic_malloc.h> #include <OCApi.h> #include <OCPlatform_impl.h> #include <oxmjustworks.h> #include <oxmrandompin.h> #include <OCProvisioningManager.h> #include <gtest/gtest.h> #define TIMEOUT 5 namespace OCProvisioningTest { using namespace OC; void resultCallback(PMResultList_t *result, int hasError) { (void)result; (void)hasError; } TEST(ProvisionInitTest, TestWithEmptyPath) { std::string dbPath(""); EXPECT_EQ(OC_STACK_OK, OCSecure::provisionInit(dbPath)); } TEST(ProvisionInitTest, TestValidPath) { std::string dbPath("./dbPath"); EXPECT_EQ(OC_STACK_OK, OCSecure::provisionInit(dbPath)); } TEST(DiscoveryTest, UnownedDevices) { DeviceList_t list; EXPECT_EQ(OC_STACK_OK, OCSecure::discoverUnownedDevices(TIMEOUT, list)); } TEST(DiscoveryTest, UnownedDevicesZeroTimeout) { DeviceList_t list; EXPECT_EQ(OC_STACK_OK, OCSecure::discoverUnownedDevices(0, list)); } TEST(DiscoveryTest, OwnedDevices) { DeviceList_t list; EXPECT_EQ(OC_STACK_OK, OCSecure::discoverOwnedDevices(TIMEOUT, list)); } TEST(DiscoveryTest, OwnedDevicesZeroTimeout) { DeviceList_t list; EXPECT_EQ(OC_STACK_OK, OCSecure::discoverOwnedDevices(0, list)); } TEST(OwnershipTest, SetOwnershipTransferCBDataNull) { EXPECT_EQ(OC_STACK_INVALID_PARAM, OCSecure::setOwnerTransferCallbackData( OIC_JUST_WORKS, NULL, NULL)); } TEST(OwnershipTest, SetOwnershipTransferCBData) { OTMCallbackData_t justWorksCBData; justWorksCBData.loadSecretCB = LoadSecretJustWorksCallback; justWorksCBData.createSecureSessionCB = CreateSecureSessionJustWorksCallback; justWorksCBData.createSelectOxmPayloadCB = CreateJustWorksSelectOxmPayload; justWorksCBData.createOwnerTransferPayloadCB = CreateJustWorksOwnerTransferPayload; EXPECT_EQ(OC_STACK_OK, OCSecure::setOwnerTransferCallbackData(OIC_JUST_WORKS, &justWorksCBData, NULL)); } TEST(OwnershipTest, SetOwnershipTransferCBDataInvalidType) { OTMCallbackData_t justWorksCBData; justWorksCBData.loadSecretCB = LoadSecretJustWorksCallback; justWorksCBData.createSecureSessionCB = CreateSecureSessionJustWorksCallback; justWorksCBData.createSelectOxmPayloadCB = CreateJustWorksSelectOxmPayload; justWorksCBData.createOwnerTransferPayloadCB = CreateJustWorksOwnerTransferPayload; EXPECT_EQ(OC_STACK_INVALID_PARAM, OCSecure::setOwnerTransferCallbackData(OIC_OXM_COUNT, &justWorksCBData, NULL)); } TEST(OwnershipTest, SetOwnershipTransferCBDataNullInputPin) { OTMCallbackData_t pinBasedCBData; pinBasedCBData.loadSecretCB = InputPinCodeCallback; pinBasedCBData.createSecureSessionCB = CreateSecureSessionRandomPinCallbak; pinBasedCBData.createSelectOxmPayloadCB = CreatePinBasedSelectOxmPayload; pinBasedCBData.createOwnerTransferPayloadCB = CreatePinBasedOwnerTransferPayload; OTMSetOwnershipTransferCallbackData(OIC_RANDOM_DEVICE_PIN, &pinBasedCBData); EXPECT_EQ(OC_STACK_INVALID_PARAM, OCSecure::setOwnerTransferCallbackData( OIC_RANDOM_DEVICE_PIN, &pinBasedCBData, NULL)); } TEST(OwnershipTest, OwnershipTransferNullCallback) { OCSecureResource device; EXPECT_EQ(OC_STACK_INVALID_PARAM, device.doOwnershipTransfer(nullptr)); } TEST(DeviceInfoTest, DevInfoFromNetwork) { DeviceList_t owned, unowned; EXPECT_EQ(OC_STACK_OK, OCSecure::getDevInfoFromNetwork(TIMEOUT, owned, unowned)); } TEST(ProvisionAclTest, ProvisionAclTestNullAcl) { OCSecureResource device; EXPECT_EQ(OC_STACK_INVALID_PARAM, device.provisionACL(nullptr, resultCallback)); } TEST(ProvisionAclTest, ProvisionAclTestNullCallback) { OCSecureResource device; OicSecAcl_t *acl = (OicSecAcl_t *)OICCalloc(1,sizeof(OicSecAcl_t)); EXPECT_EQ(OC_STACK_INVALID_PARAM, device.provisionACL(acl, nullptr)); OICFree(acl); } TEST(ProvisionAclTest, ProvisionAclTestNullCallbackNUllAcl) { OCSecureResource device; EXPECT_EQ(OC_STACK_INVALID_PARAM, device.provisionACL(nullptr, nullptr)); } TEST(ProvisionCredTest, ProvisionCredTestNullCallback) { OCSecureResource device, dev2; Credential cred; EXPECT_EQ(OC_STACK_INVALID_PARAM, device.provisionCredentials(cred, dev2, nullptr)); } TEST(ProvisionPairwiseTest, ProvisionPairwiseTestNullCallback) { OCSecureResource device, dev2; Credential cred; OicSecAcl_t *acl1 = (OicSecAcl_t *)OICCalloc(1,sizeof(OicSecAcl_t)); OicSecAcl_t *acl2 = (OicSecAcl_t *)OICCalloc(1,sizeof(OicSecAcl_t)); EXPECT_EQ(OC_STACK_INVALID_PARAM, device.provisionPairwiseDevices(cred, acl1, dev2, acl2, nullptr)); OICFree(acl1); OICFree(acl2); } }
/** * Helper to populate a list with our test database names. * * @return A new list containing the test database names. */ private static List<String> generateTestDatabaseNames() { List<String> dbNames = new ArrayList<>(); for (int i = 0; i < 3; i++) { dbNames.add(String.format("Test%1$s", i)); } return dbNames; }
The use of infrared spectroscopy to provide an estimation of the gross biochemistry associated with colorectal pathologies Introduction The gold standard assessment of tissue biopsies from colonoscopy is histopathology. Infrared spectroscopy has potential to map biochemical changes across a tissue section distinguishing between different disease states.1 This may aid the pathologist or could lead to automated histopathological processing. This study attempted to determine biochemical changes in colorectal disease using infrared spectroscopy. Methods Tissue biopsies were snap frozen at colonoscopy. 2-D spectral datasets were obtained from 10 micron thick sections of specimens, thawed to room temperature, using a Perkin Elmer infrared imaging system in transmission mode. Contiguous tissue sections stained with H&E were reviewed by a senior consultant pathologist for comparison. Reference spectra from pure biochemicals (Sigma-Aldridge) were measured. Dot products2 of these reference spectra with the mapped spectral datasets were calculated to provide a correlation estimate represented as a pseudocolour image. Ordinary least squares analysis3 was also used to estimate the relative proportions of biochemical constituents from regions of interest. Results Biochemical dot product correlation maps were obtained from normal and inflammatory tissue, hyperplastic polyps, adenomatous polyps and cancer. An example is shown in the accompanying figure. Distributions and proportions of DNA, glycogen, lipids and proteins were compared between areas of pathological interest (see figure 1). Figure 1 OC-106 Conclusion Infrared spectroscopy provides valuable biochemical information within colorectal pathologies. This information may aid the diagnosing pathologist and help develop automated histopathological processing.
Simulation of Thermal and Flow Characteristics for Optimum Design of an Automotive Catalytic Converter In the present work, the effect of a flow maldistribution on the thermal and conversion response of a monolithic catalytic converter is investigated. To achieve this goal, a combined chemical reaction and multidimensional fluid dynamic mathematical model has been developed. The present results show that flow uniformity within the monolith brick has a significant impact on light-off performance of the catalytic converter. In the case of lower flow uniformity, large portions of the monolith remain cold due to locally concentrated high velocities, and CO and HC are unconverted during the warm-up period, which leads to retardation of light-off. It has also been found that the heat-up pattern of the monolith is similar to the flow distribution profile in the early stage of the reaction. It may be concluded that flow maldistribution can cause a significant retardation of the light-off and, hence, can eventually worsen the conversion efficiency of an automotive catalytic converter.
Proper wound dressings are an essential medical supply for treating injuries. Without dressings, the wound exudate accumulates and creates breeding grounds for harmful microorganisms. Each year, sterile wound dressings are applied to millions of wounds in order to absorb wound exudate while promoting sterility. Traditional wound dressings have included various cloth and fiber materials as exudates absorbents, such as cotton pads. Unfortunately, these traditional dressings provide relatively limited absorbency and must be changed frequently. Their ability to preserve sterility is also limited, and scabs that form as the wounds heal tend to stick to the dressings. After the dressings are removed, these scabs are also removed, which can be painful and interfere with healing. Efforts have been made to improve upon these traditional wound dressings by applying a non-stick perforated film to the wound-facing side of the dressing. These non-stick films are designed to allow wound exudate to penetrate to the absorbent, while restricting the physical contact between the absorbent and wound in an effort to reduce undesirable adherence between the two. However, these perforated films do nothing to improve on the absorbency of traditional absorbent materials used in dressings. In order to improve upon absorbency, various alternative absorbent materials have been developed. For example, hydrophilic hydrocolloids and hydrogels have been created that provide a translucent or transparent absorbent layer. Such dressings can allow for general inspection of the healing wound. Unfortunately, some such dressings have the problem that the absorbent deforms and partially disintegrates upon swelling. Specifically, as the absorbent takes in fluid, it often bends and buckles such that some of the absorbent material breaks away from the dressing and enters the wound. This absorbent material can be left in the wound upon removal of the dressing, which is undesirable for cosmetic and therapeutic reasons. In addition, such materials often have relatively high moisture content prior to application, which can limit their ability to absorb additional water after they are applied. Therefore, a need exists for a wound dressing that improves on existing dressing materials and technology.
import React from 'react'; import { Route, Switch } from 'react-router'; import { ConnectedRouter } from 'connected-react-router'; import { history } from '../core/store/configureStore'; import Home from '../components/home/home'; import Character from '../components/character/character'; import Species from '../components/species/species'; import Planet from '../components/planet/planet'; import Starship from '../components/starship/starship'; import Vehicles from '../components/vehicles/vehicles'; import Films from '../components/films/films'; import Details from '../components/detail/detail'; import './app.scss'; function App(props) { return ( <> <ConnectedRouter history={history}> <> <Switch> <Route exact path="/start-wars" render={props => <Home match={props.match} {...props}/>} /> <Route path="/start-wars/character" render={props => <Character match={props.match} {...props}/>} /> <Route path="/start-wars/species" render={props => <Species match={props.match} {...props}/>} /> <Route path="/start-wars/planets" render={props => <Planet match={props.match} {...props}/>} /> <Route path="/start-wars/starships" render={props => <Starship match={props.match} {...props}/>} /> <Route path="/start-wars/vehicles" render={props => <Vehicles match={props.match} {...props}/>} /> <Route path="/start-wars/films" render={props => <Films match={props.match} {...props}/>} /> <Route path="/start-wars/details/:type/:id" render={props => <Details match={props.match} {...props}/>} /> <Route render={props => (<div>404</div>)} /> </Switch> </> </ConnectedRouter> </> ); } export default App;
Food and Drug Administration regulation of orthotic cranioplasty. OBJECTIVE To present information regarding the current federal regulation of cranial orthotics used for the treatment of deformational plagiocephaly as well as to discuss concerns raised by the Food and Drug Administration regarding the safety and effectiveness of these devices. BACKGROUND Although first introduced in 1979, the use of orthotic helmets for the treatment of deformational plagiocephaly was slow to gain acceptance. However, with the recent increase in infants presenting with this condition, numerous orthotic treatment programs have been established throughout the country. Until recently, federal regulation of this "industry" was largely ignored. REGULATION In 1995 our office was served notice that our orthosis would require clearance from the FDA. Since the FDA had never approved a medical device of this kind, clearance presented a significant challenge. However, after 3 years of providing clinical data, clearance was finally granted, and a new device category known generically as "cranial orthosis" was created. A cranial orthosis is considered to be a Class II neurology device and requires both general and special controls in order to ensure its safety and effectiveness. SUMMARY Orthotics used for the treatment of deformational plagiocephaly are regulated by the FDA and are considered Class II neurology devices. Submission of a premarket notification (510) is required prior to placing these devices on the market.
<gh_stars>0 // // Sqrat: Squirrel C++ Binding Utility // // // Copyright (c) 2009 <NAME> // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgment in the product documentation would be // appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source // distribution. // /*! \mainpage Sqrat Main Page * * \section intro_sec Introduction * * Sqrat is a C++ library for Squirrel that facilitates exposing classes and other native functionality to Squirrel scripts. It models the underlying Squirrel API closely to give access to a wider range of functionality than other binding libraries. In addition to the binding library, Sqrat features a threading library and a module import library. * * \section install_sec Installation * * Sqrat only contains C++ headers so for installation you just need to copy the files in the include directory to some common header path. * * \section sec_faq Frequently Asked Questions * * Q: My application is crashing when I call sq_close. Why is this happening?<br> * A: All Sqrat::Object instances and derived type instances must be destroyed before calling sq_close. * * \section discuss_sec Discussion and User Support * * Discussion about Sqrat happens at the Squirrel language forum, the Bindings section * http://squirrel-lang.org/forums/default.aspx?g=topics&f=4 * * \section bug_sec Bug Reporting * * Bug reports or feature enhancement requests and patches can be submitted at the SourceForge Sqrat site * https://sourceforge.net/p/scrat/sqrat/ * * You're invited to make documentation suggestions for Sqrat. Together, we can make Sqrat as easy to understand as possible! */ #if !defined(_SCRAT_MAIN_H_) #define _SCRAT_MAIN_H_ #ifdef SQMOD_PLUGIN_API #include <SqAPI.h> #else #include <squirrelex.h> #endif // SQMOD_PLUGIN_API #include "sqrat/sqratTable.h" #include "sqrat/sqratClass.h" #include "sqrat/sqratFunction.h" #include "sqrat/sqratConst.h" #include "sqrat/sqratUtil.h" #include "sqrat/sqratScript.h" #include "sqrat/sqratArray.h" #endif
Book Review: Tjitske Akkerman, Sarah L de Lange and Matthijs Rooduijn (eds), Radical Right-Wing Populist Parties in Western Europe: Into the Mainstream? The cultural history of parliamentary procedure in the United Kingdom has found its voice in Ryan Vieiras Time and Politics. The study supplies both a social and political context for the confrontation in the Commons arising from executive claims to manage debate, the latter effort taken as a means to shut down Parnellite obstructionists. Vieira begins with the railway building craze after the Reform Bill of 1832 and takes the reader through time-management developments in the Commons, culminating in Balfours coup de main of 1902, aimed at transforming the House of Commons into an executive-driven legislative engine (p. 121). Vieiras thesis centres on the misfit between the time of modernity and the time of Parliament (p. 176). Once it was clear that each member having his say in the Commons was the common enemy of rational management of the Commons calendar, the coup was inevitable. It only remained for either the Liberals or the Conservatives to seize control and deliver the clockwork to the majoritys frontbenchers. Vieiras research includes a close reading of Erskine Mays Private Journals and Gladstones Diaries. May found himself a warm friend of the Liberals, supplying proposals for reform from his fund of in-house expert knowledge. One can blame the Irish obstructionists of 1882 for the mid-way crisis (Gladstone took full advantage) but it was the advent of railway time in the 1830s and 1840s that spawned the national willingness to accept ministerial control of the calendar. Contemporary scholars took note of this development. Sir Sidney Low remarked that, as to public finance, the function of the House of Commons is no longer active (Governance of England, 1904: 90). Not only was the opposition enslaved to so many government days or budget days and so forth, but the governments backbenchers were similarly chained. Deliberation, we are informed through a recent Australian study, ranks very low when members of a legislative assembly are polled (p. 179). The trick is to undo the random wasting of that supremely valuable corporate resource time available. The solution may well begin here: how did modern legislative assemblies put themselves out of the business of deliberating to bring about better outcomes in the first place?
/** * Processes requests for both HTTP <code>GET</code> and <code>POST</code> * methods. * * @param request servlet request * @param response servlet response * @throws ServletException if a servlet-specific error occurs * @throws IOException if an I/O error occurs */ protected void processRequest(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response = EnableCors(response); DataResponse jr = new DataResponse(); jr.parameters = request.getQueryString(); try { String action = request.getParameter("action"); if (action == null) { action = ""; } switch (action) { case "CreateApplication": jr.mergeResponse(CustomDb.createApplication(request)); break; case "DeleteApplication": jr.mergeResponse(CustomDb.deleteApplication(request)); break; case "ListApplications": jr.mergeResponse(CustomDb.listApplications(request)); break; case "CreateType": break; case "ListTypes": break; case "AddObject": jr.mergeResponse(CustomDb.addObject(request)); break; case "UpdateObject": break; case "DeleteObject": break; case "GetObject": break; case "QueryObject": break; default: jr.unsupportedAction(action); } } catch (Exception e) { } try (PrintWriter out = response.getWriter()) { out.println(jr.asJson()); } }
By 1961, The Everly Brothers started active service for the 8th Battalion Marine Corps Reserves, working as artillerymen. 1969, John Lennon returned his MBE to The Queen on the grounds of the UK’s involvement in the Nigeria Biafra war, America in Vietnam, and against his latest single ‘Cold Turkey’ slipping down the charts. 1974, UK singer, songwriter Nick Drake died in his sleep aged 26 of an overdose of tryptasol an anti-depressant drug. Drake signed to Island Records when he was twenty years old, recorded the classic 1972 album Pink Moon. In 2000, Volkswagen featured the title track from Pink Moon in a television advertisement, and within a month Drake had sold more records than he had in the previous thirty years. 1976, The Band made their final performance; ‘The Last Waltz’ the show also featured Joni Mitchell, Dr John, Neil Young, Van Morrison, Neil Diamond, Eric Clapton and others. Martin Scorsese filmed the event. 1984, the cream of the British pop world gathered at S.A.R.M. Studios, London to record the historic ‘Do They Know It’s Christmas?’ The single, which was written by Bob Geldof and Midge Ure, featured Paul Young, Bono, Boy George, Sting and George Michael. It went on to sell over three million copies in the UK, becoming the bestselling record ever, and raised over £8 million ($13.6 million) worldwide. 1995, Radiohead singer Thom Yorke blacked out halfway through a show in Munich, Germany, suffering from exhaustion. 2000, a burglar broke into Alice Cooper‘s home and made off with over $6000 worth of clothes, shoes and cameras belonging to the singers daughter. The good’s were all lifted from Cooper’s house in Paradise Valley, along with four of the star’s gold discs. 2003, Michael Jackson launched a website to defend himself following allegations of sexual abuse of a 12-year old boy. The singer posted a message saying the charges were based on ‘a big lie’ and he wanted to end ‘this horrible time’ by proving they were false in court. 2003, Glen Campbell was arrested in Phoenix Arizona with a blood alcohol level of .20 after his BMW struck a Toyota Camry. He was charged with ‘extreme’ drunk driving, hit and run, and assaulting a police officer. A police officer reported that while in custody, Campbell hummed his hit ‘Rhinestone Cowboy’ repeatedly. 2003, Meat Loaf underwent heart surgery in a London hospital after being diagnosed with a condition that causes an irregular heartbeat. The 52-year-old singer had collapsed on November 17th as he performed at London’s Wembley Arena. 2007, Kevin Dubrow, the frontman with metal band Quiet Riot, was found dead in his Las Vegas home at the age of 52. Their 1983 release Metal Health was the first metal album to top the US charts. The band’s biggest hit was ‘Cum on Feel the Noize’, a cover of the Slade song which they are said to have grudgingly recorded in just one take. 2008, the legal dispute over a music contract between Michael Jackson and an Arab sheik, ended with an “amicable settlement.” Jackson had been due to fly in to the UK to give evidence at the High Court before an agreement in principle was reached. The King of Bahrain’s son, Sheikh Abdulla Bin Hamad Bin Isa Al-Khalifa, was suing Jackson for £4.7m, claiming he reneged on a music contract. Born on this day 1940, Percy Sledge, soul singer 1950, Jocelyn Brown, singer 1959, Steve Rothery, guitar, Marillion 1968, Tunde, singer, Lighthouse Family
Hypoxemia in the setting of right to left shunting through patent foramen ovale without pulmonary hypertension. Patent foramen ovale is often seen in the population but rarely observed with right to left shunting in the absence of pulmonary hypertension. Our report describes such a case where a patient with progressive shortness of breath had resolution of symptoms upon percutaneous closure. A discussion of the case and relation to similar cases is presented. A literature review along with explanation of possible contributing mechanisms in our patient's situation is explained. We also discuss several implications for practice and suggest that percutaneous closure is effective in our case and in similar situations.
The invention relates to rotational pumps and methods for controlling rotational pumps. Although research in the field of physiological control of rotary pumps dates back to the early 1990s rotary blood pumps (RBP) used as left ventricular assist devices (LVADs) were initially operated at a constant rotational speed which was adjusted individually according to the patient's need. Early clinical experience clearly showed that ventricular collapse and excessive suction are serious hazards related with the operation of these pumps. These rotational blood pumps are implanted into a human body. The inlet of the blood pump is normally connectable to the left ventricle of the human, the outlet of a pump is connectable to the aorta, downstream of the aortic valve (AoV). RBPs used as LVADs are often required to deliver the maximum possible flow rate. This may be the case in the early post-op period or when seriously impaired end-organ function requires optimum perfusion. Several approaches are known that attempt to meet this requirement by operating the pump near the collapse point of the LV, where the flow rate is as high as possible. On the other hand, it is known that excessive unloading of the LV may impair the pumping performance of the right ventricle because of the septum shift. Furthermore, it is hypothesized that the alteration of the natural flow path of the LV in combination with the greatly reduced LV wall movement due to full unloading causes recirculation and stasis inside the LV cavity. To date, there is only anecdotal evidence of thrombus formation in the LV, but atrial fibrillation can be considered to be a comparable situation in which thrombo-embolic complications are a well-known problem. Additionally, full unloading is contra-indicated for patients whose hearts may recover under assist and who are potential candidates for weaning. These facts strongly indicate that it may be better not always to operate the RBP at the point of maximum flow rate but also at a point where unloading is only partial, LV volume and LV wall movement are not minimal and at the optimum achievable washout of the LV cavity and where the aortic valve opens at least occasionally. It is the object of the invention to provide a rotational blood pump and a control method which finds and adjusts the optimum operating point under all conceivable physiological situations without requiring the attention of a physician. An operating point may be optimal with regard to the therapeutic objectives mentioned above and which can be classified into two cases: Full Assist (FA)—maximum support with closed AoV but sufficient safety margin to avoid suction, and Partial Assist (PA)—moderate support at the transition region between the opening of the AoV and a permanently-closed AoV with near-physiological LV volume, better LV washout and moderate LV loading.
Association between serum albumin and 60-day mortality in Chinese Hakka patients with non-APL acute myeloid leukemia: a retrospective cohort study Background Acute myeloid leukemia (AML) is the main type of adult leukemia, and 60-day mortality is a vital clinical problem that doctors have to face at the begin with treatment. Studies on the association between serum albumin and 60-day mortality from AML (non-APL) are limited. Methods In this retrospective cohort study, ALB was measured after admission in all patients diagnosed with primary AML from Affiliated Ganzhou Hospital of Nanchang University between January 2013 and May 2021. The outcome was all-cause, 60-day mortality. Multivariable Cox regression analyses were performed to calculate the adjusted hazard ratio (HR) and its corresponding 95% confidence interval (CI). Results This study included 394 primary AML patients. The overall 60-day mortality was 28.9% (114/394); it was 43.1% (56/130), 27.5% (36/131), and 16.5% (22/133) for ALB quantile1 (Q, <34.5g/L), quantile 2 (Q2, 34.538.5g/L), and quantile 3 (Q3, ≥ 38.6g/L), respectively (P =0.001). After adjusting for potential confounders, we found an association between a 6% decrease in 60-day mortality rate and a 1g/L increase in ALB level (HR=0.94, 95% CI: 0.890.99, P =0.015), which was associated with 38 and 70% decreases in 60-day mortality rates in Q2 (HR=0.50, 95% CI: 0.300.86, P =0.012) and Q3 (HR=0.47, 95% CI: 0.2 50.90, P =0.022), respectively, compared with that in Q1. Similar results were obtained after subgrouping based on an ALB level of 35g/L (HR=0.55, 95% CI: 0.340.88, P =0.013). Conclusions Serum albumin was significantly associated with 60-day mortality of primary AML, which has important clinical significance. Further investigation is warranted. Supplementary Information The online version contains supplementary material available at 10.1186/s12885-022-10231-0. Introduction Acute myeloid leukemia (AML) is a heterogeneous clonal myeloid neoplasm characterized by maturation arrest of hematopoietic progenitor cells, leading to uncontrolled blast proliferation. The abnormal differentiation of myeloid cells results in a high level of immature malignant cells and fewer differentiated red blood cells and platelets. AML is classified as acute promyelocytic leukemia (APL) and non-APL based upon treatment regimens. 60-day mortality, commonly known as early mortality, defined as death from any cause within 60 days of hospitalization, is a vital clinical problem, which hematologists are managing to avoid it ;Previous studies paid close attention to early mortality in APL, but much less to non-APL, especially in Hakka population. Ganzhou, situated in the southern part of Jiangxi Province, is home to an important Hakka population in China, with a population of nearly 10 million, or 10% of the world's Hakka population. Serum albumin, a routine laboratory item,is often used clinically to judge the nutritional status or physical condition of patients. This study aimed to assess the association between serum ALB and 60-day mortality among the Chinese Hakka patients with primary AML (non-APL). Study design and participants The present retrospective single-center analysis included data of consecutive patients who were primary diagnosed with AML at Affiliated Ganzhou Hospital of Nanchang University (Jiangxi Province, China) between January 1, 2013, and May 31, 2021. All participants in this study underwent bone marrow (BM) aspiration, and AML diagnosis was confirmed based on two or more ways of morphology, immunology, cytogenetic and molecular (MICM) analysis, according to the World Health Organization (WHO) classification system (version 2016). This study followed the principles of the Declaration of Helsinki and was approved by the Ethics Review Board of Affiliated Ganzhou Hospital of Nanchang University. Given the retrospective nature of the study and the use of anonymous patient data, the requirement for obtaining informed consent was waived. Patients diagnosed APL were excluded because their management and treatment were quite different from the patients with the other subtype of AML. Patients with a history of other hematological malignancies, such as myelodysplastic syndrome (MDS) or myeloproliferative neoplasms (MPN) e.g., were excluded due to secondary AML,because the level of serum ALB may be affected by the primary disease. Individuals with mixed phenotype acute leukemia (MPAL) and AML-M 6, who did not meet the WHO classification criteria (version 2016), were also excluded as they were categorized as non-AML patients from a strict sense. Patients with liver failure or nephrotic syndrome that causes hypoalbuminemia were excluded from the study. In addition, patients who were aged < 15 years or non-Hakka, did not undergo a serum ALB test in 48 hours of admission, or were lost to follow-up were also excluded; The remaining patients with or without chemotherapy were included in the study. AML patients were divided into four subtypes: AML-M 2, AML-M 4, AML-M 5, and other subgroups in this cohort study. The flowchart of the patient selection process is presented in Fig. 1. Source of data Data, including survival status, were collected from the electronic medical record system or via follow-up telephone calls. The baseline examinations included blood and BM parameters. The biomarkers included ALB, glucose (Glu), direct bilirubin (DBIL), creatine kinase isoenzyme MB, myoglobin (Myo), serum ferritin (SF), fibrinogen (Fib), BM blast e.g.. All laboratory data included measurements performed within the first 48 hours of admission to reduce the probability that serum biomarker levels were affected by anti-leukemia therapy. These parameters comprise routine testing, commonly used to evaluate the patient's physical condition. Chemotherapy was administered within 60 days of hospitalization, and none of the patients received bone marrow transplantation. Serum ALB Serum ALB levels were measured using a biochemical analyzer (AU5800. Beckman Coulter, Inc.) with an albumin determination reagent kit (Bromocresol Green method) (Zhejiang Elekon Biotechnology Co., Ltd), read our previous report, in brief. The reference interval for ALB was 35-55 g/L, followed the national standard. Every day, we did internal quality control (IQC) (Bio-Rad Laboratories, Inc. Hercules, CA, USA) with commercially available control materials before testing blood samples. Serum ALB was included in the activities of external quality control (EQA), which was hosted by the National Center for Clinical Laboratories (NCCL) thrice a year, and during this study all criteria for feedback reports were fulfilled. All data from this study, including ALB and other items or parameters, were the preliminary test results of patients after admission due to AML. ALB levels were divided into three groups based on ALB quantiles or two based on its level of 35 g/L. Outcome The primary endpoint and outcome of interest were death within 60 days of admission. Collectors of patients' clinical information at the first diagnosis were blinded to the survival data. Statistical analysis This study aimed to observe the impact of ALB on 60-day mortality in patients with AML. The patients were divided into three groups based on ALB quantiles. A descriptive analysis was asked to all participants. Continuous data were expressed as mean and standard deviation or median and interquartile range (, quartile 1-quartile 3), as appropriate. The categorical variables were expressed as proportions (%). The variables were compared using of chi-square test (categorical variables), one-way analysis of variance (normal distribution), and Kruskal-Wallis (skewed distribution) tests. Multivariate Cox regression analysis was used to assess the independent association between serum ALB levels and 60-day mortality. An extended Cox model approach was used for models that were adjusted for various covariates. Covariables were chosen on the basis of previous findings and clinical constraints. Or we adjusted for variables, of which the p-values were less than 0.005 for univariate analysis. Survival curves were plotted using of Kaplan-Meier method and were evaluated for statistical significance using log-rank tests. Subgroup analyses were stratified based on the relevant effect covariates. Dummy variables were used to indicate the missing covariancevalues if the missing data variables were greater than 10%. Analyses were stratified according to the results of the univariate analysis (p < 0.005), including sex, age, Glu, Myo level, and chemotherapy, to examine the effect of these factors on the above associations. The likelihood ratio test was used to assess the effect modification according to the respective subgroups using interaction terms between subgroup indicators and ALB. Interactions across subgroups were tested using the likelihood ratio test. All analyses were performed using R 3.3.2 (http:// www.R-proje ct. org. The R Foundation) and Free Statistics (version 1.4). Differences with a two-sided P-value of < 0.05 were considered significant. Baseline characteristics of the study participants by categories of serum ALB levels The final cohort included 394 patients (Fig. 1). None of them had the history of liver failure or nephrotic syndrome. In this study population, 174 patients (44.2%) with AML-M 2 had the highest proportion, followed by 127 patients (32.2%) with AML-M 5. The other subgroups were AML-M 0 (n = 4), AML-M 1 (n = 25), AML-M 6 (n = 2), and AML-M 7 (n = 8). The patient's baseline characteristics are presented in Table 1. The age of the patient was 55.1 ± 17.3 years (range, 15-94 years), 206 (52.3%) were men. A total of 114 patients died within 60 days after admission, including 22 cases of organ failure (respiratory failure 9 cases, heart failure 9 cases, acute renal failure 4 cases), 18 cases of hemorrhagic disease (cerebral hemorrhage 9 cases and gastrointestinal tract 2 cases, DIC 5 cases and others 2 cases), 67 cases of infectious diseases (lung 55 cases, sepsis 6 cases, others 6 cases), There were 7 cases of other causes. Association between serum ALB and 60-day mortality The Kaplan-Meier curve showed that patients in the ALB Q1 group showed the highest 60-day mortality rate, while those in the ALB Q3 group showed the lowest 60-day mortality rate (log-rank test: p < 0.0001, Fig. 2). In the extended multivariable Cox models (Table 2), the hazard ratios (HRs) of serum ALB (per 1 g/L increase) were consistently significant in all three models (HR range: 0.91-0.94). The covariates were selected with univariate analysis of risk factor (attachment Table S1). There were two dummy variables as covariance values, including Genomic risk category (101(25.63%) data missing) and SF (57(14.47%) data missing). After adjustment for all covariance, patients with ALB Q3 demonstrated a 53% decrease in 60-day mortality rate (HR = 0.47, 95% CI: 0.25-0.90, p = 0.022, model III), compared with ALB Q1. Similar results were observed when the ALB was divided into two groups based on an ALB level of 35 g/L (HR = 0.55, 95% CI: 0.34-0.88, p = 0.013, model III). Subgroup analyses To detect whether the association between serum ALB levels and 60-day mortality of AML was present in different subgroups, analyses and interactive analyses were stratified according to the confounders, including age, sex, Glu level, MYO level, and chemotherapy (Fig. 3). No significant interactions were observed in the subgroups (All p-value for interaction more than 0.05). Discussion Sixty-day mortality of AML still is vital clinical problem cared by hematologist, our study explored the association between serum ALB and 60-day mortality only in patients with primary AML. This study indicates patients with normal serum ALB levels have a lower risk of 60-day mortality, and the risk decreased with an increase in serum ALB levels, regardless of sex, age, Crea level, ECOG performance-status score, chemotherapy, e.g. The results remained robust with no or gradual adjustments. Previous studies have examined the relationship between serum albumin and survival in AML. Wang et al. examined the association between baseline serum ALB and overall survival (OS) in 243 AML patients (including those with primary and secondary AMLs) who received induction chemotherapy treatment; their results showed that ALB (per 1 g/L increase) were associated with a 9% increase in the OS rate (HR = 0.910, 95% CI: 0.878-0.943), and patients with an ALB level of > 35 g/L had an increasing OS rate of 65.7% compared with those ≤35 g/L (HR = 0.343, 95% CI: 0.241-0.48) ; The results of other studies were similar to the findings of our study. However, our study had a larger sample of patients with primary AML, since the secondary ones might have treatment-related complications and low baseline serum albumin levels, due to primary disease or chemotherapy. Furthermore, serum albumin was divided into two or three groups, to explore their association between 60-day mortality, respectively; All the results showed that serum ALB was a protective factor against 60-day mortality in patients with AML, and the protective effect became more significant as the ALB level increased. Serum albumin is a well-known surrogate of the general condition and nutritional status of comorbidities (including liver and kidney function). Serum ALB maintains the normal nutritional state of the human body and colloidal osmotic pressure, such as plasma. Furthermore, it is an indicator of chronic inflammation [6,7,. Low serum albumin levels resulting from inflammation-induced capillary leakage or disease related anorexia during acute illness are associated with poor outcomes. Additionally, Dylan et al. reports that albumin is a major antiapoptotic signaling component and is involved in the transport and metabolism of chemotherapeutic drugs for leukemia. These previous studies may help proving the association between low serum albumin and high 60-day mortality. Hematologists usually evaluate the risk of early mortality based on the clinical performance status and laboratory data. However, the definition of early mortality in AML remains controversial; it was defined as death within 60 days from the final diagnosis or the start of chemotherapy. Either 60-day mortality or early mortality remains a major clinical problem, which is the first stage toward successful treatment of AML patients. Its causes remain complicated and unclear, even though hematologists have been striving to reduce its risk. The early mortality was 21.0-37.5% reported in previous studies, and 60-day mortality was 28.9% in our study, therefore, our result was similar to previous studies. However, our study differs slightly from theirs. Most previous studies only included patients who received chemotherapy, while others without chemotherapy were excluded. Among these excluded patients, most of them, were extremely poor making them unsuitable to receive anti-leukemia treatment, due to the existing comorbidities. For example, some patients developed secondary diffuse intravascular coagulation (DIC) with severe intracerebral or pulmonary hemorrhage upon admission, and were not in a position to receive chemotherapy as they died quickly; and they were excluded subjectively. Thus, early mortality in these studies might be reduced due to patient selection bias, and our study might reflect a real association between serum ALB and 60-day mortality in the real world. This study has several noteworthy limitations. First, although it included several key covariance, unmeasured factors may have contributed to the increased risk of adverse events in patients with a low serum albumin. Second, regardless of the fact these findings raised questions regarding the potential risk for 60-day mortality, interpretation of the results is limited by the observational nature of the study; therefore, the study might not provide direct evidence for predicting 60-day mortality in AML patients. Third, this was a retrospective study; data were collected from 2013 to 2021 (over an 8-year period), and some data on the date of death were obtained by telephone follow-up and may be biased. To reduce bias, interviews with at least two or three family members were conducted to determine the exact survival time of the patients. Meanwhile, different batches of ALB reagents affected the test results to some degree. To make the value dependable, internal quality control (IQC), which becomes the integral part of daily work, is required to ensure the result is controlled before testing the clinical specimens. We have participated an external quality assessment (EQA) organized by the NCCL thrice a year to ensure the accuracy of the testing results since the 1990s. Fortunately, all EQA results were satisfied duration of this study. Meanwhile, the instrument must be calibrated twice a year as part of regular maintenance. Therefore, all the testing results were dependable. Conclusion Serum ALB may be associated with 60-day mortality in patients with AML. These results are of great significance. AML patient with serum albumin < 35 g/L, should be closely managed by the hematologist. Further research is needed to confirm and validate these associations.
Late and very late mortality in 5year survivors of childhood cancer: Changing pattern over four decadesExperience from the Nordic countries Longterm survivors of childhood cancer suffer from a higher mortality than the general population. Here we evaluate late and very late mortality, and patterns of causes of death, in 5year survivors after childhood and adolescent cancer in cases diagnosed during four decades in the five Nordic countries. The study is populationbased and uses data of the nationwide cancer registries and the cause of death registers. There were in all 37,515 incident cases, diagnosed with cancer before the age of 20 years, between 1960 and 1999. The 5year survivor cohort used in the mortality analyses consisted of 21,984 patients who were followed up for vital status until December 31, 2005 (Norway, Sweden) or 2006 (Denmark, Finland, Iceland). At the latest followup, 2,324 patients were dead. The overall standardized mortality ratio was 8.3 and the absolute excess risk was 6.2 per 1,000 personyears. The pattern of causes of death varied markedly between different groups of primary cancer diagnosis, and was highly dependent on time passed since diagnosis. With shorter followup the mortality was mainly due to primary cancer, while with longer followup, mortality due to second cancer and noncancer causes became more prominent. Mortality between 5 and 10 years after diagnosis continued to decrease in patients treated during the most recent period of time, 19901999, compared to previous periods, while mortality after 10 years changed very little with time period. We conclude that improvement of definite survival demands not only reducing early but also late and very late mortality.
/** * Logs one line in a log file with the current time as a timestamp. * @param line the line to log. * @param keepSame if true writes to the same file as the previous commit even though the hour has changed since then. * @return this object * @see #commit(java.util.Date, java.lang.String, java.lang.String, boolean) commit() */ public ZLog log (String line, boolean keepSame) { try { commit (new Date(), "", line, false); } catch (Exception ex) { ex.printStackTrace(); } return this; }
Care of people dying with malignant and cardiorespiratory disease in general practice. BACKGROUND Provision of palliative care for people dying with malignant disease is a well-characterised aspect of general practice workload. The nature of end-of-life care of people with non-malignant disease is less well described. AIM To compare the general practice care provided in the last year of life to people who died with malignant and with cardiorespiratory disease. DESIGN Case record review. SETTING Two Leicestershire general practices: one inner-city, one semi-rural; total practice population 26,000 people. METHOD General practice review of the records of all people registered with the practices who died with malignant or cardiorespiratory disease between 1 August 2000 and 31 July 2002 to determine: cause and place of death, recorded comorbidity, palliative medication prescribed, number of consultations and continuity of care, receipt and duration of palliative care. RESULTS When compared with people who died with cardiorespiratory disease, those who died with malignant disease were more likely to have had a terminal phase of their illness identified and to have been prescribed more palliative drugs. Both groups consulted a similar number of times, experienced similar continuity of care, had similar comorbidity, and were equally likely to die at home. CONCLUSION People who died with cardiorespiratory disease were less likely to be in receipt of formally identified terminal care and were likely to have had fewer drugs prescribed for palliation than people with malignant disease, yet they make similar demands of practices. They are likely to have unmet needs with respect to palliation of symptoms.
Climate Change and the Stability of Water Allocation Agreements We analyse agreements on river water allocation between riparian countries. Besides being efficient, water allocation agreements need to be stable in order to be effective in increasing the efficiency of water use. In this paper, we assess the stability of water allocation agreements, using a game theoretic model. We consider the effects of climate change and the choice of a sharing rule on stability. Our results show that both a decrease in mean river flow and an increase in the variance of river flow decrease the stability of an agreement. An agreement where the downstream country is allocated a fixed amount of water has the lowest stability compared to other sharing rules. Introduction When multiple countries share a river, they compete over available water resources. The upstream country has the first option to use water, which may obstruct the overall efficiency of water use. Cooperation between upstream and downstream countries-in the form of a water allocation agreement-may increase the efficiency of water use. Whether cooperation is stable, however, depends on the design of the water allocation agreement. The stability of water allocation agreements is the subject of this paper. In the twentieth century, 145 international agreements on water use in transboundary rivers were signed; almost 50% of these agreements cover water allocation issues. The majority of these water allocation agreements does not take into account the hydrologic variability of river flow. This is a shortcoming because variability is an important characteristic of river flow. This variability will even increase in many river basins when the effects of climate change on temperature and precipitation proceed as projected by climate simulation models. These effects are expected to increase the variability of the annual and seasonal flow patterns as well as the frequency of extreme events in many river basins. Recognition of flow variability in the design of water allocation agreements can increase the efficiency of these agreements. Several studies have addressed this issue for two common sharing rules for water allocation: proportional allocation and fixed flow allocation . Fixed flow allocations are most common but tend to be less efficient when flow variability increases. Bennett et al. compared the efficiency of fixed flow allocations with proportional allocations and found that, in many situations, proportional allocations are more efficient. Kilgour and Dinar developed a sharing rule 3 that ensures a Pareto-efficient allocation for every possible flow volume, where the level of compensation paid by receivers of water is subject to annual bargaining. Obviously, compared with a proportional or fixed flow allocation, this flexible allocation is more efficient, but it requires accurate predictions of annual river flow. In a case study of the Colorado river, Mendelsohn and Bennett found that the loss of efficiency related to a change in mean river flow (e.g. because of climate change) is higher for a proportional allocation than for a fixed allocation, the main reason being that the initial proportions used were inefficient. Another result was that the largest impact of climate change on efficiency comes from changes in the mean of river flow, not from changes in its variance. Furthermore, in an analysis of U.S. interstate water allocation compacts, Bennett and Howe found that agreement compliance is higher for proportional than for fixed flow allocations. Apart from being efficient, water allocation agreements need to be stable in order to be effective instruments to increase the efficiency of water use. Efficiency and stability of agreements are not necessarily linked. Climate change, for instance, may increase the benefits of cooperation to one country while decreasing those of the other, leaving overall efficiency equal, but possibly giving the country with decreased benefits an incentive to leave the agreement. Because agreements are signed between sovereign nations, there is usually no higher level authority that can enforce compliance. The stability of agreements therefore depends on the distribution of the benefits of cooperation to the countries involved, which can be analysed using game theory. Recent studies showed that water allocation agreements can improve the efficiency of water use and that-when benefits of cooperation are distributed properly-they can be attractive to all coun-4 tries involved. This game theoretic literature, however, does not explicitly consider the effects of climate change on river flow and agreement stability. The objective of this paper is to assess the stability of water allocation agreements when climate change affects river flow. This is done by constructing a game theoretic model of water allocation that analyses stability of three sharing rules for water allocation. Results show that both a decrease in mean river flow and an increase in variance of river flow decrease stability, and that an agreement where the downstream country is allocated a fixed amount of water has the lowest stability. The remainder of this paper is organized as follows. In sections two and three we present our model and assess stability of cooperation. In section four we illustrate the effects of climate change on the stability of cooperation for different sharing rules, using a numerical example. In section five we assess stability effects of alternative punishment strategies and asymmetric countries. In section six we discuss the results using agreements in the Nile river basin, the Orange river basin, and the South Saskatchewan river basin as illustrations, and we conclude in section seven. A model of cooperation A river is shared by two countries i ∈ {u, d}, having its source in the upstream country u and subsequently flowing through the downstream country d. Q t denotes the volume of river flow in year t that is available for use; it excludes the river flow necessary to sustain the environmental functioning of the river system and other vital services such as navigation. In year t, country i uses q i,t units of water. Because of the unidirectional flow of water, u has the first option to use water, which may limit water use by d. All water that was not used by u, is available for use by d: Benefits B i,t (q i,t ) from water use are concave with a maximum atq i,t. Clearly, if u maximizes benefits of water use, it does not have an incentive to pass water to d that has a positive marginal value to him. Yet, if the benefit to d of using more water outweighs the decrease in benefits to u, there is scope for cooperation, with u passing on water to d. There are many sharing rules to allocate water between countries. We analyse three common sharing rules: Proportional allocation (PA): u receives Q t and d receives (1 − )Q t, with 0 < < 1; Fixed upstream allocation (FU): u receives min{, Q t } and d receives max{Q t −, 0}, with 0 < < E(Q t ); Fixed downstream allocation (FD): u receives max{Q t −, 0} and d receives For cooperation to be attractive to u, we need to include non-water transfers m t paid by d to u. These non-water transfers may be monetary or in-kind transfers. There are ample examples of such non-water transfers related to river basin agreements. We assume that non-water transfers are equal to the expected value of compensation of u for benefits foregone 6 and a share of the additional benefits from cooperation. The non-water transfers, paid by d to u, are constant: where superscript c denotes cooperation, n denotes non-cooperation, and water use-and therefore benefits-depends on the sharing rule agreed upon. This method to calculate non-water transfers is related to the Nash bargaining solution; a common solution concept from non-cooperative game theory. The Nash bargaining solution of a game maximizes ( subject to x u, x d ∈ F, where F is the feasible set of payoff vectors and z = (z u, z d ) are non-cooperative payoffs . Here, the calculated nonwater transfers equal the asymmetric Nash bargaining solution. 1 We analyse the stability of cooperation using an infinitely repeated game-a common approach in the analysis of international environmental agreements -because water allocation agreements typically do not have a specified termination date. The stage game in year t is played as follows. First, a value of Q t is realized from its probability distribution. Second, the countries observe Q t and simultaneously choose their action: u chooses q u,t and d chooses m t. If complying with the agreement, u plays q u,t = q c u,t according to the selected sharing rule, and earns B c u,t = B u,t (q c u,t ). If deviating, u plays q u,t = q n u,t = min{q u,t, Q t }, and earns B n u,t = B u,t (q n u,t ). If complying with the agreement, d plays m t = m c. If deviating, d plays m t = m n = 0. Third, countries observe the strategy played by the other country and receive payoffs. 2 1 Two alternative methods to calculate non-water transfers are the Shapley value and Nucleolus, solution concepts from cooperative game theory. 2 Alternatively, one could assume a Stackelberg game where u is the leader and d is the 7 The decision to cooperate or deviate in year t is based on the expected payoff stream: We assume that both countries use trigger strategies: when a country deviates, it is punished by the other country in the form of p periods noncooperative play of the stage game, after which countries return to cooperative play (i.e. agreement strategies). Hence, the expected payoff streams to u and d for compliance in year t equal: where is the discount factor. The expected payoff streams to u and d for deviating in year t equal: The differences, D u and D d, equal the net present value (NPV) of deviating to u and d: follower. This would, however, not change the general results. From equation it follows that D u is determined by the difference between benefits of non-cooperative and cooperative play in year t, plus a "punishment" term that has a constant (negative) expected value. From equation it follows that D d is independent from the level of Q t, hence constant, for a given probability distribution of Q. Because D d is negative at Q t = E(Q t )-an agreement would not be signed if D d ≥ 0 at the expected value of river flow-it is negative for any Q t. Therefore, in the remainder of this paper, we will focus only on u's incentive to deviate. The type of punishment used here differs from Bennett and Howe, who used monetary penalties in their analysis of cooperation between US states. We assume here that there is no authority that can issue this type of penalties when a dispute occurs between nations, a characteristic of many international agreements. In an overview of existing agreements on transboundary freshwater, Beach et al. show that in half of the agreements, disputes are handled by advisory councils, governments' conflictaddressing bodies, the United Nations or other third parties. The other half of the agreements does not refer to any form of dispute resolution. The absence of a higher level authority that can issue penalties is clear; hence a reasonable punishment is non-cooperative behaviour by the other country. Analysing stability The folk theorem tells us that cooperation can be sustained in equilibrium as long as punishments are severe enough. When discounted payoffs of cooperation outweigh the sum of discounted payoffs of deviation in one year and Nash-payoffs during the subsequent punishment phase, an agreement is said to be stable. Because of the uncertainty of payoffs in this model, through the stochastic variable Q, it is not possible to assess whether cooperation is stable or not. It is, however, possible to assess the probability of stability. To do this, we need to determine a threshold value of Q t, for which the agreement is In the remainder of this paper we will use this expression as our stability indicator and refer to it simply as "stability". Both for a mean-preserving spread and for a decrease in mean river flow this area decreases in size, which negatively affects stability. Result 1 Stability of a water allocation agreement depends on the probability density function of river flow. It decreases if this density function changes by a mean-preserving spread or a decrease in mean river flow. We expect the stability of cooperation to be different for different sharing rules. To verify this expectation, we compareQ for the three sharing rules. In the comparison, we set E the water allocation is similar for each sharing rule. In calculatingQ from equation we can ignore the punishment term, because it is equal for all three sharing rules. We can also ignore B n u,t, because it is equal for all three sharing rules. Hence, we only have to compare cooperative benefits B c u,t. There are two situations when B c u,t is not equal for all three sharing rules: if Q t < E(Q t ) and if Q t > E(Q t ). Because we assume thatQ < E(Q t ), we only look at the situation where Q t < E(Q t ). If Q t < E(Q t ), we have Q t − < Q t < and using equation Result 2 Stability of a water allocation agreement depends on the sharing rule. It is higher for fixed upstream allocation than for proportional allocation and lowest for fixed downstream allocation. Taking a closer look at FU, we find that D u is maximized at Q t ≥q u,t. To see this, using equation, note that we can ignore the punishment term because it is constant. Hence, we consider the maximization problem: for FU. There are three possibilities: 3. ifq u,t ≤ Q t then q n u,t =q u,t and q c u,t =. Clearly, in the last situation, equation is maximized. We argue that the last situation includes Q t = E(Q t ), because we assume thatq u,t ≤ E(Q t ). This assumption is based on the idea that in the short term, u's economy and infrastructure are not designed to abstract and use (much) more water than is expected in a given year. 3 Because we may assume that D u < 0 for Q t = E(Q t ), we know that D u < 0 for any level of Q t. It follows thatQ does not exist for FU. Hence 1 − F(Q) equals one; FU is stable. Result 3 Water allocation agreements with fixed upstream allocation are stable for any level of river flow. Because FU is stable for any level of river flow, we will focus on PA and FD only in the next section. Numerical example To illustrate the results of the model, we use the following numerical example: The values for,, and are chosen such that at Q t = E(Q t ) the water allocation is similar for each sharing rule. Because the countries have symmetric benefit functions, the allocation is optimal when Q t = E(Q t ). 4 Further- 3 Ifq u,t E(Q t ), FU is unstable for Q t large enough. 4 Because the countries have symmetric benefit functions in this example, PA will provide a more efficient allocation than FU or FD when climate change effects occur: the total benefits of water use are maximized. This property of the model is similar to results from efficiency studies that were surveyed in the introductory section of this paper . 13 more, for each sharing rule, cooperation is attractive to both countries for Q t = E(Q t ), because countries would never agree to cooperate if there was no expected gain from cooperation. Two interesting aspects can be observed in figure 2. First, looking at the FU curve, we can observe that indeed D u < 0 for any level of Q t and that D u is maximized at Q t ≥q u,t. Second, we observe that the point where the FD curve crosses the horizontal axis (Q FD = 32.9) lies to the right of the point where the PA curve crosses the horizontal axis (Q PA = 25.6), hence, PA is more stable than FD. The decrease of D u for Q t less than ±20 is caused by the decreasing gain of deviation relative to the punishment. The stability of cooperation depends on the probability distribution of Q. In this example we use the gamma distribution to describe f (Q) and f (Q), which is an appropriate and commonly applied distribution in the literature on probabilistic hydrological forecasting. 14 The effect of a change in the mean or variance of river flow on the stability of cooperation is shown in figure 3, for both PA and FD. The mean river flow refers to the mean of f (Q), the probability density function of Q t when climate change effects occur. 5 Two interesting aspects can be observed in figure 3. First, the figure illustrates for selected levels of mean and variance that FD is less stable than PA. Second, when the mean flow is higher than Q t -which seems realistic given that E(Q) = 40-both a decrease in mean river flow and an increase in variance of river flow decreases stability., the probability density function of Q t when climate change effects occur. 5 The calculation of expected benefits is still based on E(Q) = 40-the mean of the original probability density function f (Q)-because the agreement will not be immediately adapted at the first signs of climate change effects on river flow. Governments need reliable information before they are willing to change conditions of this type of agreements; long-term observations are needed before a change in the probability distribution of river flow can be assessed. Punishment and asymmetry In this section, we assess the effects on stability of two interesting factors: alternative punishment strategies, and asymmetry in benefit functions and political power. For both factors we assess how they affect stability. Alternative punishment strategies We have argued that the only possible punishment for deviation by the other country is a trigger strategy of non-cooperative play for p periods. Variations on this type of punishment are possible. A first example is titfor-tat, where the period of punishment depends on the behaviour of the other country. If u deviates p consecutive years, the punishment period is also p years. A second example is a grim trigger strategy where the period of punishment is infinite. Both strategies and other variations, however, are similar to the strategy described above, with p = 1 and p = ∞ respectively. More interesting punishment strategies may arise when the issue of water allocation is linked to an other transboundary issue between the two countries. In the game on water allocation, d is the country that benefits most from cooperation. For issue linking to be most effective, this game should be linked to a game where u can benefit more than d, a good example of which is the facilitation of river transport by d to u. It is clear that the punishment term may increase when the two games are linked, as long as the benefits of river navigation to u are sufficiently large. From these examples it becomes clear that alternative punishment strategies change the size of the punishment term (denoted by ). To assess the effect of alternative punishment strategies, we take the derivative of equa-tion with respect to this term: An increase of leads to a similar increase of D u, decreasing the stability for each level of river flow. 6 This result holds for each sharing rule. The implication of this result is that for any agreement, the higher the absolute value of the punishment term, the higher the stability of cooperation. Asymmetry We consider both asymmetry in political power and asymmetry in benefit functions. Asymmetry in political power As exemplified by the Nile basin and described by LeMarquand, the distribution of political power has implications for the incentives for cooperation. In this model, we can incorporate this aspect through the level of, which we define here to be a measure of political power for the upstream country. When benefit functions are symmetric, Kilgour and Dinar have shown that in an efficient situation, the surplus benefit is equally shared between the two countries; in our model this implies that = 0.5. When < 0.5, d has more political power than u and therefore a stronger bargaining position. As a result, the non-water transfer from d to u is lower than in a situation with equally distributed political power. To assess the effect of political power on stability, we take the derivative of equation 6 Note that is negative, so an increase of is a lower punishment. with respect to : Equation This result holds for each sharing rule. The implication of this result is that for any agreement, the larger the political power of u relative to the political power of d, the higher the stability of cooperation. The intuition behind this result is that when is high, the non-water transfer is high, and therefore cooperation is attractive to u. Changes in the distribution of political power after an agreement has been signed have no effect on stability because the effect of on D u works via m c, which has been fixed. Asymmetry in benefit functions Asymmetry in benefit functions between countries is assessed using the same functional form of the benefit function as the one introduced in section 4. The effect of asymmetric benefit functions is simulated by scaling u's benefit function by a factor. Hence, B u,t = aq u,t − bq 2 u,t and B d,t = aq d,t − bq 2 d,t. To assess the effect on stability, we analyse how affects D u by taking the derivative of equation with 18 respect to : Equation Changes in after an agreement has been signed can also be calculated. Such a change may occur because of demographic or economic developments. This effect does not influence m c, because m c has been fixed in the agreement. Therefore, to assess the effect on stability, we analyse how affects D u by taking the derivative of equation with respect to, similar to equation, but assuming that m c is fixed: Combining equations and gives: Equation also yields a positive value. An increase of after an agreement has been signed leads to an increase of D u, decreasing the stability for each level of river flow. This result holds for each sharing rule. The implication of this result is that for any agreement, if benefits to u increase after the agreement has been signed, the stability of cooperation decreases. Discussion The analysis presented here shows that climate change affects the stability sharing rule that mixes fixed and proportional allocations. Based on acquired rights, 48 000 MCM/yr is allocated to Egypt and 4 000 MCM/yr to Sudan. Of the remaining flow, 34% is allocated to Egypt and 66% to Sudan. In an average year this gives Egypt 55 500 MCM/yr. Because almost 90% of this expected allocation is fixed, we can safely consider this a FD sharing rule. Egypt, being the downstream country, is not paying a non-water transfer to Sudan. In 1959, Egypt paid Sudan a one-time transfer of 15 million Egyptian Pounds compensation for increased storage in the Sudd el Aali reservoir that was required in the agreement. Until 1977, however, Sudan could not fully use its entitlement, so it decided to make "water loans" to Egypt of up to 1 500 MCM/yr until 1977. This is the first of two factors that might explain why Egypt is not paying Sudan anything for passing through the majority of the Nile water; non-water transfers equal zero. The second factor is the distribution of political power in the Nile basin. It is evident that Egypt is the strongest country in the Nile basin, in political, economic and military terms. In fact, the military threat that Egypt poses to Sudan can be viewed as an equivalent to a non-water transfer . Agriculture is the main water using sector in both Egypt and in Sudan. Because developments in irrigation techniques are nearly complete in Egypt, while Sudan still lacks the resources to expand its irrigated area, average yields are much higher in Egypt. Hence, benefits of water use are higher in Egypt than in Sudan. Studies of climate change effects on the hydrology of the Nile river basin find different results. Some models predict decreases while others predict increases in river flow. Arnell and Voss et al. predict water claims made by Ethiopia are two factors that are likely to decrease stability. Population growth will increase benefits of water use to Sudan, increasing its incentive to deviate. When, somewhere in the future, Ethiopia is also allocated a share of the Nile water, increased scarcity in Sudan and Egypt will increase Sudan's incentive to deviate even further. In general, predictions of climate change effects in Southern Africa indicate reduced precipitation and an increase of evaporation. There is, however, some uncertainty for the Orange river basin. Although Arnell finds that there is a great reduction in runoff by the year 2050 in Southern Africa, predictions for the Orange river basin do not clearly indicate whether and how mean and variance of river flow will change. Nevertheless, current river flow in the Orange river basin knows already large variability. Orange river basin Putting these observations into the perspective of the model developed in this paper, we can conclude that the stability of cooperation in the Orange river basin between Lesotho and RSA could be negatively affected by its Saskatchewan is not paying a non-water transfer to Alberta. There is no need for such a transfer because up to now, water use in the South Saskatchewan river basin has not been limited by water availability. Alberta, therefore, has always met its obligation to pass on 50% of river flow. In recent years, however, water use is getting close to 50% of river flow in Alberta, partly due to Alberta's fast growing economy. Water use in Saskatchewan is much lower and increasing at a lower rate. Two distinct trends affect water availability in the basin. On the one hand, climate change effects are projected to decrease mean river flow by 4-10% and to decrease low flow levels by 14-22% by 2046. On the other hand, the combined effects of population growth, economic growth, and increasing irrigation efficiency are projected to increase water use. With lower water availability and increasing water use, Alberta is expected to face water shortage in the coming decades. In theory, the use of punishment strategies enhances cooperation in a repeated game. In our model, however, punishment of u by d also decreases benefits to d, because the non-cooperative outcome gives d lower benefits than the cooperative outcome. Shortening the period of punishment is therefore always beneficial to d, which undermines its credibility of actually going to punish in case of deviation by u. It is this lack of credibility of punishment strategies that might obstruct the effective use of punishment strategies in international agreements on water allocation. Ideally, punishment is implemented in a linked game, which does not affect the benefits of the punishing country. Again, the facilitation of river transport by d to u is a good example. Mendelsohn and Bennett find that the impact of climate change on the mean of river flow is a far more important determinant for efficiency than its impact on the variance of river flow. For both the Nile and Orange river basin discussed above, where model predictions on the mean river flow are not distinct, this implies that the expected efficiency of the agreement is not expected to change because of climate change. Our model suggests that, although this conclusion may hold for efficiency, it does not hold for the stability of cooperation. Stability is affected by changes in both mean and variance of river flow. Hence, both the mean and variance of river flow have to be taken into account when negotiating agreements on water allocation. Conclusions The objective of this paper is to assess the stability of water allocation agreements when climate change affects river flow. A game theoretic model is constructed that analyses the stability of cooperation in water allocation 27 between two countries for three sharing rules. The stability of cooperation is expressed in terms of the probability that one of the two countries deviates from the specified agreement actions, given that the countries maximize their expected payoff stream (consisting of benefits of water use and nonwater transfers). Deviation from agreement actions is found unattractive to the downstream country (d) for each sharing rule. Therefore, stability only depends on the probability of deviation by the upstream country (u). Of the three sharing rules that were analysed, the fixed upstream allocation was found stable for any level of river flow (Q t ). For low levels of Q t, however, both with fixed downstream allocation and proportional allocation, u may have an incentive to deviate. The stability of agreements with these sharing rules depends on the probability distribution of Q. Results showed that both a decrease in mean river flow and an increase in variance of river flow decrease the stability of cooperation. Agreements with PA are in general more stable than agreements with FD, because with FD, u bears a larger part of the risk connected to low flows. In addition to the probability distribution of Q and the sharing rule, three other factors are identified to affect stability of cooperation. The stability of cooperation is higher (i) if the absolute value of the punishment term is higher, (ii) if u's political power is large relative to d's political power, and (iii) if u's benefits of water use are low relative to d's benefits. This paper shows that the stability of water allocation agreements can be affected by climate change. This paper adds to the analysis of water allocation agreements by focusing on stability aspects, where others have focused on efficiency aspects. Where Bennett et al. found that proportional allocations are more efficient in many situations, we find that proportional 28 allocations are less stable than fixed upstream allocations. Where Mendelsohn and Bennett found that the largest impact of climate change on efficiency comes from changes in the mean of river flow, we find that both changes in mean and variance affect stability. Because water allocation agreements need to be stable in order to increase the efficiency of water use, the results of this paper are important for the design of water allocation agreements and especially the selection of a sharing rule.
<reponame>JoanWu5/Grokking-the-coding-interview<filename>top k elements/sum of elements.py # Given an array, find the sum of all numbers between the K1’th and K2’th smallest elements of that array. # Example: # Input: [1, 3, 12, 5, 15, 11], and K1=3, K2=6 # Output: 23 # Explanation: The 3rd smallest number is 5 and 6th smallest number 15. The sum of numbers coming # between 5 and 15 is 23 (11+12). # O(N * logN) space: O(N) from heapq import * def sum_of_elements(nums, k1, k2): minheap = [] for num in nums: heappush(minheap, num) for _ in range(k1): heappop(minheap) numsum = 0 for _ in range(k2 - k1 - 1): numsum += heappop(minheap) return numsum print(sum_of_elements([1, 3, 12, 5, 15, 11], 3, 6)) print(sum_of_elements([3, 5, 8, 7], 1, 4)) # to reduce the time complexity # O(N * logk2) space:O(k2) def sum_of_elements_2(nums, k1, k2): maxheap = [] for i in range(len(nums)): if i < k2 - 1: heappush(maxheap, -nums[i]) elif nums[i] < -maxheap[0]: heappop(maxheap) heappush(maxheap, -nums[i]) numsum = 0 for _ in range(k2 - k1 - 1): numsum += -heappop(maxheap) return numsum print(sum_of_elements_2([1, 3, 12, 5, 15, 11], 3, 6)) print(sum_of_elements_2([3, 5, 8, 7], 1, 4))
Not since the days of Mao have we seen the sort of suppression of religion in China like we're seeing right now. Two recent stories from Xi Jinping's China suggest it's open season on religious believers there. Reports out of Xinjiang Province on China's western frontier say that up to one million Muslim Uighurs, the indigenous ethnic group of that region, are being held in detention camps. One U.N. official expressed concern about reports that Beijing had "turned the Uighur autonomous region into something that resembles a massive internment camp." Of course, Beijing denies the allegations and insists that it's merely cracking down on Islamic extremism. As part of this so-called "crackdown on Islamic extremism," China has banned the wearing of veils, prohibited giving children certain Muslim names, put limits on the length of beards, and made it a crime not to watch state television. Given China's horrendous human rights record, and the lack of evidence for a Uighur separatist movement, Islamist or not, it's difficult to believe China's denials. The more likely explanation is that Beijing is waging war on Islam as part of a campaign to subjugate the people of the region. A similar war is being waged on Christianity in Henan Province in central China. It's part of what's being called "the most severe systematic suppression of Christianity in the country since religious freedom was written into the Chinese constitution in 1982." The brunt of this suppression is directed at unregistered "house" churches. Hundreds have been closed down. There have been raids, "interrogations, and surveillance, and one pastor said hundreds of his congregants were questioned individually about their faith." Even registered churches haven't been spared. Reporters noted that these churches bore notices stating that "minors and party members were not allowed inside." Another church had a banner exhorting members to "implement the basic direction of the party's religious work." Make no mistake. What is meant by "the party's religious work" is the active elimination of any rivals to the Communist Party, and in particular Xi Jinping, in the shaping of the worldview of the Chinese people. As one expert told the Monitor, Xi "definitely does not want people to be faithful members of the church, because then people would profess their allegiance to the church rather than to the party, or more exactly, to Xi himself." Local officials aren't even coy about this. They openly speak about "thought reform." They're not content with mere obedience to the laws or the lack of any real challenge to the Communist Party's authority. Their goal is to eliminate any distinction between the Party and society. But history, including Chinese history, illustrates the absurdity of efforts like these. Within the last sixty years, two attempts at this kind of "reform," "The Great Leap Forward" and Mao's "Cultural Revolution," attempted to remake Chinese society along explicitly ideological lines. What was left in their wake was tens of millions dead, and not much else. This desperate need for control is why "Chinese leaders have always been suspicious of the political challenge...that Christianity poses to the Communist regime." Not because Christians threaten or even desire to replace the regime, but because their ultimate allegiance lies elsewhere. The same thing can be said about Muslims or observant Jews. Their worldview derives from something other than "Xi Jinping Thought on Socialism with Chinese Characteristics for a New Era." And that reality, and not anything believers do, is what's behind the heightened persecution. And it's why religious freedom for everyone everywhere must be a priority for both American Christians and our government.
package tui; import java.util.Map; import java.util.HashMap; import java.util.List; import java.util.ArrayList; import java.util.Arrays; /** * Write a description of class TempUI here. * * @author (your name) * @version (a version number or a date) */ public class TempUI { private HashMap<String,String> example= new HashMap<String,String>(); private ArrayList list = new ArrayList(); /** * Constructor for objects of class TempUI */ public TempUI() { example.put("start","Jens"); example.put("text","no"); list.add(example); for (String name: this.example.keySet()) { String key = name.toString(); String value = example.get(name).toString(); System.out.println(key + " " + value); } /* System.out.println(list.); */ /*for(int i = 0; i < list.size(); i++) { System.out.println(list.get(i).toString()); }*/ } }
Action Principle and Dynamic Ensemble Theory for Non-equilibrium Markov Chains An overarching action principle, the principle of minimal free action, exists for ergodic Markov chain dynamics. Using this principle and the Detailed Fluctuation Theorem, we construct a dynamic ensemble theory for non-equilibrium steady states (NESS) of Markov chains, which is in full analogy with equilibrium canonical ensemble theory. Concepts such as energy, free energy, Boltzmann macro-sates, entropy, and thermodynamic limit all have their dynamic counterparts. For reversible Markov chains, minimization of Boltzmann free action yields thermal equilibrium states, and hence provide a dynamic justification of the principle of minimal free energy. For irreversible Markov chains, minimization of Boltzmann free action selects the stable NESS, and determines its macroscopic properties, including entropy production. A quadratic approximation of free action leads to linear-response theory with reciprocal relations built-in. Hence, in so much as non-equilibrium phenomena can be modeled as Markov processes, minimal free action serves as a basic principle for both equilibrium and non-equilibrium statistical physics. Introduction. It is remarkable that variational principles underly many basic laws of physics, including those in Hamiltonian mechanics, geometric optics, quantum mechanics and equilibrium statistical mechanics. For example, the entire theory of equilibrium statistical mechanics can be formulated on the base of the principle of maximal entropy. Any equilibrium phenomenon can be claimed understood as soon as we can derive it from this principle. Search for a similar unifying action principle of nonequilibrium phenomena has been a long and inconclusive quest. The earliest versions of "minimum dissipation theorem" were stated by Helmholtz in 1869, and by Rayleigh in 1873. It was generalized substantially by Onsager, such that it serves a starting point for linear response theory. Another candidate is "the principle of minimal entropy production", studied by Prigogine and many others, and caused major confusions and debates. For a more recent exposition see reference. There are also many other proposals, including the principle of "maximal entropy production", which is in apparent contradiction with the the previous two principles. The relations between these proposed principles have been are addressed repeatedly, but not yet completely understood. We shall not elaborate on these issues as our approach will be substantially different. Recent progresses in study of stochastic thermodynamics have supplied much fresh understanding of entropy production and dissipation in non-equilibrium processes. It has become clear that entropy production can be defined at the level of individual dynamic path, and that it changes sign under time-reversal. Given a nonequilibrium boundary conditions or driving forces, the entropy production is positive for some dynamic paths and negative for their time reversal counterparts. Fluctuation Theorems [22, tell us that those paths with positive dissipation are exponentially more proba-ble than their time-reversal, which have negative dissipation. A principle of "minimal entropy production" or of "least dissipation", if taken literally, would select those paths with negative entropy production, and therefore constitutes an outright contradiction to the second law of thermodynamics. Most, if not all, non-equilibrium problems can be modeled as Markov processes. Inspired by the conceptual progresses in stochastic thermodynamics, we feel that the theory of ergodic Markov processes can be formulated as a dynamic ensemble theory. This would allow us to use all powerful tools in equilibrium theory to study nonequilibrium physics, and hence greatly facilitate theoretical understanding of non-equilibrium statistical physics. This is indeed the case, as we will demonstrate below. As enumerated in Table I, many important concepts of equilibrium statistical mechanics, including micro-state, macro-state, energy, entropy, free energy all have their dynamic counterparts in Markov processes. The only exception is temperature, whose dynamic counterpart is unity. Most importantly, we will find that there is also an overarching action principle, i.e. the principle of minimal free action, which serves as a foundation of dynamic ensemble theory for Markov chains. This principle governs all macroscopic properties of non-equilibrium steady states, including the equations of state and conditions of stability. At the quadratic level, the principle yields linear response theory with reciprocal symmetry naturally built-in. For systems with time-reversal symmetry, the principle of minimal free action selects the thermal equilibrium state with minimal free energy, and hence constitutes a dynamic justification of the principle of maximal entropy. Consequently, under the reasonable assumption that non-equilibrium phenomena can be modeled as Markov processes, minimal free action serves as a basic principle for both equilibrium and non-equilibrium statistical physics. We will develop the dynamic ensemble theory in parallel to equilibrium ensemble theory. To set the stage for comparison between equilibrium and non-equilibrium theories, we first summarize the key elements of equilibrium canonical ensemble theory and the principle of minimal free energy, using Ising model for illustration. The detailed comparison of equilibrium and non-equilibrium ensemble theories are shown in Table I. Sketch of equilibrium ensemble theory. The microstates of system are designated by a set of microscopic variables called spins, s = {s i }. The Hamiltonian H(s) is given by where H 0 (s), called the intrinsic energy, is interaction between neighboring spins, and H 1 (s) = −B s i is due to external field. H 0 (s) and H 1 (s) are respectively even and odd under flipping of spins s → −s. The canonical ensemble theory of equilibrium statistical mechanics can be constructed from the principle of minimal free energy, which is known to be equivalent to the principle of maximal entropy. Let (s) be an arbitrary probability distribution in the space of micro-states. The non-equilibrium free energy F as a functional of (s) is defined as where Tr s means summation over all spin configurations, and T = −1 is the temperature. Here we use the convention k B = 1 throughout. Minimization of F with respect to (s) leads to the equilibrium Gibbs-Boltzmann distribution: where Z = e −FEQ = Tr s e −H is the canonical partition function, and F EQ = −T log Z is the equilibrium canonical free energy. For fixed values of intrinsic energy H 0 (s) = E 0 (excluding magnetic field energy), and magnetization s = M, the totality of all micro-states is defined as a Boltzmann macro-state (E 0, M ). Its entropy is called the Boltzmann entropy S B (E 0, M ). Mathematically we have where || is the number of micro-states in set. Since for every micro-state s, there is alway one spin-reversed state −s, (E 0, M ) = (E 0, −M ) must be an even function of M. Note that in equilibrium, all micro-states inside (E 0, M ) have the same energy E 0 − BM, and hence the same probability e FEQ−(E0−BM). In fact, (E 0, M ) is a refining of the micro-canonical ensemble. In thermal equilibrium, the total probability of Boltzmann state (E 0, M ) is then where F B (E 0, M ) shall be called the Boltzmann free energy. We note that F B, E 0, M are all extensive quantities. In the thermodynamic limit (number of spins N → ∞), the distribution Eq. becomes more and more concentrated near the minimum of F B (E 0, M ). Except at a critical point, fluctuations of all intensive quantities, such as energy and Boltzmann entropy per spin, scale as N −1/2, i.e., they become non-stochastic in the thermodynamic limit. This is in fact how thermodynamics emerges from the stochastic description of statistical mechanics. Minimization of Boltzmann free energy F B (E 0, M ) determines all thermodynamic properties. In particular, the stationarity condition reads which are in fact the equations of state. The stability condition of the thermodynamic state reads unfolding of which gives positivity of specific heat and magnetic susceptibility. It is important to note that minimization of Boltzmann free energy can be understood as an application of the principle of minimal free energy in the subspace of all Boltzmann macro-states. Indeed using the equilibrium-probability restricted inside the Boltzmann state (E 0, M ) in Eq., we obtain the Boltzmann free energy Eq.. In the thermodynamic limit, the Boltzmann free energy and the canonical free energy must be asymptotically equivalent: otherwise the probability distribution Eq. would not be normalizable as N → ∞. Similarly Boltzmann entropy also become equivalent to the Gibbs entropy. The underlying physics is the equivalence of different statistical ensembles in the thermodynamic limit. The concepts of Boltzmann macro-states and Boltzmann entropy are very flexible. If we wish to study another macroscopic quantity A(s), we can refine the Boltzmann state Eq. by fixing E 0, M and A. Then Eq. becomes a joint pdf for E 0, M and A, which can be deemed as a straightforward generalization of Einstein's theory for thermodynamic fluctuations. Principle of minimal free action. We shall now construct an ensemble theory for discrete time Markov chain dynamics with discrete-valued state variables. Generalization of our theory to continuous time Markov processes and non-Markov processes has technical but not conceptual difficulty. We will use N to denote the number of time steps of dynamic paths. We will take the limit N → ∞, which is the dynamic analogue of thermodynamic limit. We will not assume that the system is large, hence our theory will be applicable both for large and small systems. Let X k be the discrete-valued state variables of the system at time step k. A dynamic path is described by an ordered sequence N = X N... X 1 X 0, where time propagates from right to left. Let p 0 (X 0 ) be the initial probability distribution, and P (X|Y ) the transition probability. The probability of a path N assigned by the Markov chain dynamics is We define the action of a dynamic path N as To formulate an action principle for Markov chain dynamics, it is valuable to study a generic path probability distribution, denoted by q( N ), that is different from Eq.. We define the dynamic entropy and the free action of a path pdf q( N ) as: where D(q||p) ≡ N q( N ) log q( N )/p( N ) is the relative entropy, which is known to be nonnegative and vanishes only for q( N ) = p( N ). Just as the free energy is minimized by the equilibrium Gibbs-Boltzmann distribution, the free action Eq. is minimized by q = p as defined in Eq.. The proofs are exactly the same in the equilibrium and non-equilibrium cases. This is the Principle of Minimal Free Action, which is mathematically equivalent to the definition of Markov chain dynamics. E( N ) and H are the dynamic analogues of Hamiltonian and Gibbs entropy, whereas is the analogue of free energy. The dynamic analogue of temperature is unity. Dynamic ensemble theory for NESS. Our main interest in this work is non-equilibrium steady states (NESS) of time-homogeneous ergodic Markov chains. For this purpose, it is convenient to study cyclic dynamic paths such that X N = X 0. We shall call such a path a loop ℓ N = ℓ(X N X 1 ), and define its action E(ℓ N ) as Using the famous Perron-Fronenius theorem, we can prove the following limit: where p SS (X 1 ) is the steady state pdf. Further summing Eq. over X 1, we obtain unity. More specifically we have the following identity: If we use Eq. for the action in Eq., and restrict the summation in Eq. to all loops, we see that the principle of minimal free action still holds: the free action Eq. is minimized by the pdf p(ℓ N ) = e −E(ℓN ). Restriction to loops is very convenient for the study of NESS, as we no longer have to worry about initial conditions. The time-reversal of a loop ℓ N = ℓ(X N X 1 ) is defined as ℓ * = (X * 1 X * N ), where X * is the time-reversal of state X. Let us define the symmetric action (ℓ) and antisymmetric action (ℓ) of a loop ℓ as: (ℓ) and (ℓ) are, respectively, even and odd under time reversal. Using the Detailed Fluctuation Theorem, it can be easily shown that (ℓ) is the total entropy production of the loop ℓ. In the absence of driving force, p(ℓ) must be reversible, and hence (ℓ) vanishes identically. The resulting Markov chain is then reversible. We shall define a dynamic Boltzmann entropy: Here || is understood as the number of paths in the set. The pdf of (, ) is then given by Here B (, ) is the dynamic analogue of Boltzmann free energy, and will be called Boltzmann free action: which completely determines the probability distribution Eq.. Because H B (, ) is even in, we easily see that P (, ) = P (, −)e. Summing this relation over, we obtain the famous Steady-State Fluctuation Theorem: P () = P (−) e. Note that H B,, are all extensive in number of time steps N. As N becomes large, P (, ) becomes more and more concentrated near the minimum of B (, ), with a width scaling sub-extensively as √ N. The N → ∞ limit is clearly the dynamic counterpart of thermodynamic limit. Mathematicians call this large deviation limit, and would demand a mathematical proof for its existence. We shall argue that its existence is intuitive. The most probable values (,) are determined by minimization of the Boltzmann free action B : These results are the dynamic analogues of Eqs., and are valid only in the sense of extensive variables in N. Specifically Eq. (20a) determines the equations of state for NESS, (20b) is the condition of stability, and also determines the small fluctuations of,. Finally Eq. (20c) is demanded by normalization of probability, and signifies the equivalence of different dynamic ensembles. All these identities have analogues in equilibrium ensemble theory. A simple example. We discuss a simple example to illustrate the dynamic ensemble theory. Consider a particle hopping randomly on a circle. An external force is applied so that the particle hops asymmetrically. The probabilities that the particle hop clockwise or counter-clockwise, or idle, are respectively z() −1 e −0+, z() −1 e −0− and z() −1, where is the driving force, and z() = 1 + 2e −0 cosh is a normalization constant. Let N ± (ℓ), N 0 (ℓ) be the numbers of CW, CCW, and idle steps in a loop ℓ, and p ± (ℓ) = N ± (ℓ)/N, p 0 (ℓ) = N 0 (ℓ)/N are the empirical frequencies. p ±, p 0 are the macroscopic variables we use to characterize each loop, which satisfy p + + p − + p 0 = 1. The action E(ℓ) is Comparing this with Eq. we see that the entropy production of the loop ℓ is (ℓ) = 2N (p + (ℓ) − p − (ℓ)). The dynamic Boltzmann entropy as a function of macroscopic variables p ±, p 0 can also be easily calculated: Substituting these back into Eq. and minimizing, we find the mean entropy production: Stability of the NESS can be easily verified by computing the second order derivative of B (p +, p − ). We note that this example can be solved using elementary method. Here we use it only to illustrate the structure of dynamic ensemble theory. In future publications, we will use the dynamic ensemble theory to study more realistic manybody non-equilibrium problems. Quadratic Approximation. Consider a system driven by two weak external forces 1, 2. We can expand the antisymmetric action (ℓ) in terms of 1,2 : where Q 1 (ℓ), Q 2 (ℓ) are called dissipative currents, and are odd under time-reversal, Q 1,2 (ℓ * ) = −Q 1,2 (ℓ). The symmetric action (ℓ) also depends on, via the normalization condition ℓ exp(−(ℓ) + (ℓ)/2) = 1. A simple calculation shows that (with Einstein's summation convention used) where L = Q Q 0 is the correlation function of dissipative currents in the absence of 1,2. In the last step, we have expanded in terms of, and have used the fact that Q 0 ≡ 0, i.e., time-reversal symmetry in the equilibrium case. It is most convenient to treat the dynamic Boltzmann entropy as a function of 0 and Q. As such, H B ( 0, Q) is independent of. Furthermore, because of timereversal symmetry, H B ( 0, Q) must be even in Q. We expand H B and B up to Q 2 : As such H B ( 0, Q) becomes identical to the dissipation functional defined by Onsager, if we identify with generalized forces. The average currents are determined by minimization of B ( 0, Q), which yield result Q = L. The reciprocal relations are already encoded by the symmetry current correlations L = L. It is however important to note that B ( 0, Q) does not describe dissipation of energy, or production of entropy. Also it would be completely wrong to call Eq. the principle of least dissipation of energy, or the principle of minimal entropy production. As we have shown through out this work, the entropy production changes sign under time-reversal, has no upper bound or lower bound, and certainly can not achieve minimum or maximum at the steady state. In fact, as we have demonstrated, it is the principle of minimal free action that governs the physics of non-equilibrium steady states. From minimal free action to minimal free energy. Similar to the equilibrium ensemble theory, the dynamic ensemble theory can be used to study arbitrary macrovariables. All we need to do is to refine the definition of dynamic Boltzmann entropy Eq. by specifying the additional macro-variable A(ℓ) that we aim to study: As an example, we may study the empirical pair distribution of a loop ℓ: which gives the frequency of transition from state x to state y during the dynamic path ℓ N. It is normalized as x,y f (x, y; ℓ) = 1, and hence is an intensive quantity. The symmetric and antisymmetric actions of the loop can be calculated in terms of f (x, y; ℓ): x,y f (x, y; ℓ) log (P (y|x)P (x|y)), (28a) (ℓ) = x,y f (x, y; ℓ) log P (y|x) P (x|y). The empirical state distribution, which gives the frequency of state x appearing in the path ℓ, can also be calculated in terms of f (x, y; ℓ): We can similarly define the dynamic Boltzmann entropy and free action as functional of f (x, y; ℓ), and construct the pdf of f (x, y): In fact, B can be calculated using results of large deviation theory : where f 1 (x) = y f (x, y) = y f (y, x) is the marginal distribution of f (x, y). Again H B is extensive in N, and distribution of f (x, y) becomes concentrated near the saddle pointf (x, y), which must satisfyf (x, y) = P (y|x)f 1 (x). But this precisely means thatf (x, y) is the stationary pair distribution, andf 1 (x) = p SS (x) is the stationary state distribution. Note that (ℓ), (ℓ), f (x, y; ℓ), f 1 (x, ℓ) are all properties of individual dynamic path. Mathematically Eqs. and (30c) say that these macro-variables converge to their means in probability. A dynamic path is called typical if it minimizes the Boltzmann free action. Typical paths are selected by the principle of minimal free action. Our results then say that a typical dynamic path exhibits average entropy production, average symmetric action, and average distribution of transition frequencyf (X, Y ), as well as average distribution of states p SS (X). For time-reversal Markov chains, we know that p SS (X) is the Gibbs-Boltzmann distribution, which minimize the free energy. This means that if we pick a long, typical dynamic path, and construct its empirical state distribution, we will precisely find the Gibbs-Boltzmann distribution-This is ergodicity at work in a Markov chain model! In another word, the principle of minimal free action gives a dynamic justification of the principle of minimal free energy. This work is supported by NSFC via grant #11674217, as well as Shanghai Municipal Education Commission and Shanghai Education Development Foundation via "Shu Guang" project. * Electronic address: xxing@sjtu.edu.cn
POS1461-HPRTHE DEVELOPMENT OF A NOVEL EPRO DELIVERY SYSTEM TO MEASURE PATIENT QUALITY OF LIFE IN ROUTINE CLINICAL CARE: AN ANALYSIS OF 5 YEARS OF EXPERIENCE Registry studies and clinical trials are increasingly incorporating patient reported outcomes (PROs) to measure the full burden of disease and better measure the efficacy and value of medicines; however, the burden of paper-based surveys, time constraints, and privacy concerns impede the widespread use of PROs in routine clinical care.To develop a simple and secure technological solution to incorporate validated PROs into routine clinical care for patients with rheumatic diseases, and to assess the patient response to functional assessment of chronic illness therapy fatigue (FACIT-F), patient health questionnaire-2 (PHQ-2), and healthcare resource utilization (HCRU) questionnaires delivered using this ePRO method.A novel ePRO questionnaire delivery system was developed by Software4Specialists in partnership with OPAL Rheumatology. Validated PRO questionnaires were sent from the patients electronic medical record (Audit4, Software4Specialists) and delivered to the patients email address at time intervals specified by the rheumatologist (defaults to quarterly) or completed in the clinic waiting room prior to the consultation using a tablet or the patients smart phone (in-practice). Completed questionnaires were encrypted and returned directly to the patients Audit4 electronic medical record held on the clinicians server for review at the next clinical consultation. The link to the PRO questionnaire expired within 28 days if the questionnaire was not completed, and the questionnaires were automatically cancelled if 2 consecutive links expired. This technology was made available to up to 111 rheumatologists located in 42 clinics in 6 states/territories in Australia, and the use of this technology to furnish the clinical consultation was voluntary for clinicians and patients. Deidentified clinical data was extracted from the servers of participating rheumatologists and aggregated across all sites.1 Data collected between April 2016-Dec 2020 was analysed descriptively.Between April 2016-Dec 2020, 99,505 FACIT-F, PHQ-2 and HCRU questionnaires have been delivered to 5,784 patients from 39 of 42 contributing clinics (93%). 85% of questionnaires were delivered via email and 15% in-practice. Overall, 85% of patients completed at least one questionnaire, and of all questionnaires sent, 73% were completed. These rates have remained consistent over time. The completion rates were higher when questionnaires were delivered to patients in-practice compared to email (96% vs 69%). Females were more likely to engage with the questionnaires than males (87% vs 81%), and older patients were slightly more likely to complete all questionnaires delivered. 69% of questionnaires sent via email were completed on the day they were delivered and 94% were completed within 7 days. The median (IQR) number of questionnaires completed per patient was 3 and the median (IQR) time since the first questionnaire was completed was 13 months.The novel Audit4 ePRO delivery system is an effective tool for incorporating PROs into routine clinical care to capture data directly from the patient on the impact of their condition on their quality of life. The data generated provides a unique opportunity to understand the full burden of disease for patients in the real-world setting and the impact of interventions.Littlejohn GO, Tymms KE, Smith T, Griffiths HT. Using big data from real-world Australian rheumatology encounters to enhance clinical care and research. Clin Exp Rheum 2020:38: 874 -880.The authors acknowledge the members of OPAL Rheumatology Ltd and their patients for providing clinical data for this study, and Software4Specialists Pty Ltd for providing the Audit4 platform.Kathleen Tymms: None declared, Tegan Smith: None declared, Claire Deakin: None declared, Tim Freeman: None declared, David Hoffman: None declared, Dana Segelov: None declared, Hedley Griffiths Consultant of: AbbVie, Gilead, Novartis and Lilly., Sabina Ciciriello: None declared, Peter Youssef: None declared, David Mathers: None declared, Catherine OSullivan: None declared, Geoff Littlejohn Consultant of: Over the last 5 years Geoffrey Littlejohn has received educational grants and consulting fees from AbbVie, Bristol Myers Squibb, Eli Lilly, Gilead, Novartis, Pfizer, Janssen, Sandoz, Sanofi and Seqirus
def testMessageChallstr(): message = psclient.Message( "|challstr|4|314159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745028410270193852110555964462294895493038196442881097566593344612847564823", DummyConnection() ) assert message.challstr == "4|314159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651328230664709384460955058223172535940812848111745028410270193852110555964462294895493038196442881097566593344612847564823"
Late malignant transformation of giant cell tumor of bone 41 years after primary surgery. Giant cell tumor of bone is an uncommon benign tumor that frequently recurs locally. Spontaneous malignant transformation of conventional giant cell tumor of bone is rare and usually occurs with irradiation.This article describes a case of malignant transformation of a giant cell tumor 41 years after initial curettage and subsequent resection. A 68-year-old man presented with a 6-month history of left hip pain. He had been diagnosed 41 years previously with giant cell tumor in the left femoral neck treated by simple curettage and bone grafting, followed by resection of the femoral head 1 year later for local recurrence. On presentation, radiographs revealed a destructive lesion in the left proximal femur. Incisional biopsy revealed recurrence of giant cell tumor with suspected malignant transformation. The patient underwent en bloc resection of the proximal femur with adequately wide margins and reconstruction of the hip joint with a prosthesis. Pathological findings showed malignant transformation of a giant cell tumor to osteosarcoma and leiomyosarcoma. No recurrence or metastasis developed during 2-year follow-up. Benign local recurrences usually arise in the first 3 postoperative years, whereas malignant transformation tends to take longer than 3 years. To the authors' knowledge, the 41-year interval from primary surgery to diagnosis of malignancy for the current patient is the longest interval reported among cases in which patients received no radiation therapy.
#include <time.h> #include <stdlib.h> #include <stdio.h> double bank_balance = 0.0; double ivan_account = 0.0; double petar_account = 0.0; pthread_mutex_t mutex; void deposit(double amount, double *to) { // Adds amount to account *to // Adds amount to total bank balance pthread_mutex_lock(&mutex); *to += amount; bank_balance += amount; pthread_mutex_unlock(&mutex); // return NULL; } void withdraw(double amount, double *from) { // Removes amount from account *from // Removes amount from total bank balance pthread_mutex_lock(&mutex); *from -= amount; bank_balance -= amount; pthread_mutex_unlock(&mutex); // return NULL; } void *ivan(void *arg) { int i; for(i = 0; i < 1000000; i++) { if(rand() % 2 <= 1) { deposit(rand() % 10000, &ivan_account); } else { withdraw(rand() % 10000, &ivan_account); } } } void *petar(void *arg) { int i; for(i = 0; i < 1000000; i++) { if(rand() % 1000 <= 500) { deposit(rand() % 10000, &petar_account); } else { withdraw(rand() % 10000, &petar_account); } } } int main() { srand(time(NULL)); // Something very clever pthread_mutex_init(&mutex, NULL); pthread_t thread1; pthread_t thread2; pthread_create(&thread1, NULL, ivan, NULL); pthread_create(&thread2, NULL, petar, NULL); pthread_join(thread1, NULL); pthread_join(thread2, NULL); pthread_mutex_destroy(&mutex, NULL); printf("%f %f\n", (ivan_account + petar_account), bank_balance); return 0; }
<reponame>ProjectET/Mantle package slimeknights.mantle.recipe.helper; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonPrimitive; import com.google.gson.JsonSyntaxException; import lombok.RequiredArgsConstructor; import net.minecraft.core.Registry; import net.minecraft.nbt.CompoundTag; import net.minecraft.network.FriendlyByteBuf; import net.minecraft.resources.ResourceLocation; import net.minecraft.tags.SerializationTags; import net.minecraft.tags.Tag; import net.minecraft.util.GsonHelper; import net.minecraft.world.item.Item; import net.minecraft.world.item.ItemStack; import net.minecraft.world.level.ItemLike; import io.github.fabricators_of_create.porting_lib.crafting.CraftingHelper; import java.util.Objects; import java.util.function.Supplier; /** * Class representing an item stack output. Supports both direct stacks and tag output, behaving like an ingredient used for output */ public abstract class ItemOutput implements Supplier<ItemStack> { /** * Gets the item output of this recipe * @return Item output */ @Override public abstract ItemStack get(); /** * Writes this output to JSON * @return Json element */ public abstract JsonElement serialize(); /** * Creates a new output for the given stack * @param stack Stack * @return Output */ public static ItemOutput fromStack(ItemStack stack) { return new OfStack(stack); } /** * Creates a new output for the given item * @param item Item * @param count Stack count * @return Output */ public static ItemOutput fromItem(ItemLike item, int count) { return new OfItem(item.asItem(), count); } /** * Creates a new output for the given item * @param item Item * @return Output */ public static ItemOutput fromItem(ItemLike item) { return fromItem(item, 1); } /** * Creates a new output for the given tag * @param tag Tag * @return Output */ public static ItemOutput fromTag(Tag<Item> tag, int count) { return new OfTagPreference(tag, count); } /** * Reads an item output from JSON * @param element Json element * @return Read output */ public static ItemOutput fromJson(JsonElement element) { if (element.isJsonPrimitive()) { return fromItem(GsonHelper.convertToItem(element, "item")); } if (!element.isJsonObject()) { throw new JsonSyntaxException("Invalid item output, must be a string or an object"); } // if it has a tag, parse as tag JsonObject json = element.getAsJsonObject(); if (json.has("tag")) { String name = GsonHelper.getAsString(json, "tag"); Tag<Item> tag = SerializationTags.getInstance().getTagOrThrow(Registry.ITEM_REGISTRY, new ResourceLocation(name), n -> new JsonSyntaxException("Unknown tag " + n + " for item output")); int count = GsonHelper.getAsInt(json, "count", 1); return fromTag(tag, count); } // default: parse as item stack using Forge return fromStack(CraftingHelper.getItemStack(json, true)); } /** * Writes this output to the packet buffer * @param buffer Packet buffer instance */ public void write(FriendlyByteBuf buffer) { buffer.writeItem(get()); } /** * Reads an item output from the packet buffer * @param buffer Buffer instance * @return Item output */ public static ItemOutput read(FriendlyByteBuf buffer) { return fromStack(buffer.readItem()); } /** Class for an output that is just an item, simplifies NBT for serializing as vanilla forces NBT to be set for tools and forge goes through extra steps when NBT is set */ @RequiredArgsConstructor private static class OfItem extends ItemOutput { private final Item item; private final int count; private ItemStack cachedStack; @Override public ItemStack get() { if (cachedStack == null) { cachedStack = new ItemStack(item, count); } return cachedStack; } @Override public JsonElement serialize() { String itemName = Objects.requireNonNull(Registry.ITEM.getKey(item)).toString(); if (count > 1) { JsonObject json = new JsonObject(); json.addProperty("item", itemName); json.addProperty("count", count); return json; } else { return new JsonPrimitive(itemName); } } } /** Class for an output that is just a stack */ @RequiredArgsConstructor private static class OfStack extends ItemOutput { private final ItemStack stack; @Override public ItemStack get() { return stack; } @Override public JsonElement serialize() { String itemName = Objects.requireNonNull(Registry.ITEM.getKey(stack.getItem())).toString(); int count = stack.getCount(); // if the item has NBT or a count, write as object if (stack.hasTag() || count > 1) { JsonObject jsonResult = new JsonObject(); jsonResult.addProperty("item", itemName); if (count > 1) { jsonResult.addProperty("count", count); } CompoundTag nbt = stack.getTag(); if (nbt != null) { jsonResult.addProperty("nbt", nbt.toString()); } return jsonResult; } else { return new JsonPrimitive(itemName); } } } /** Class for an output from a tag preference */ @RequiredArgsConstructor private static class OfTagPreference extends ItemOutput { private final Tag<Item> tag; private final int count; private ItemStack cachedResult = null; @Override public ItemStack get() { // cache the result from the tag preference to save effort, especially helpful if the tag becomes invalid // this object should only exist in recipes so no need to invalidate the cache if (cachedResult == null) { cachedResult = TagPreference.getItems().getPreference(tag) .map(item -> new ItemStack(item, count)) .orElse(ItemStack.EMPTY); } return cachedResult; } @Override public JsonElement serialize() { JsonObject json = new JsonObject(); json.addProperty("tag", SerializationTags.getInstance().getIdOrThrow(Registry.ITEM_REGISTRY, tag, () -> new IllegalStateException("Unregistered tag " + tag)).toString()); if (count != 1) { json.addProperty("count", count); } return json; } } }
Vigilant: out-of-band detection of failures in virtual machines What do our computer systems do all day? How do we make sure they continue doing it when failures occur? Traditional approaches to answering these questions often involve in-band monitoring agents. However in-band agents suffer from several drawbacks: they need to be written or customized for every workload (operating system and possibly also application), they comprise potential security liabilities, and are themselves affected by adverse conditions in the monitored systems. Virtualization technology makes it possible to encapsulate an entire operating system or application instance within a virtual object that can then be easily monitored and manipulated without any knowledge of the contents or behavior of that object. This can be done out-of-band, using general purpose agents that do not reside inside the object, and hence are not affected by the behavior of the object. This paper describes Vigilant, a novel way of monitoring virtual machines for problems. Vigilant requires no specialized agents inside a virtual object it is monitoring. Instead, it uses the hypervisor to directly monitor the resource requests and utilization of an object. Machine learning methods are then used to analyze the readings. Our experimental results show that problems can be detected out-of-band with high accuracy. Using Vigilant we demonstrate that out-of-band monitoring using virtualization and machine learning can accurately identify faults in the guest OS, while avoiding the many pitfalls associated with in-band monitoring.
<reponame>pwr-pbrwio/PBR20M2 //////////////////////////////////////////////////////////////////////////////// // checkstyle: Checks Java source code for adherence to a set of rules. // Copyright (C) 2001-2018 the original author or authors. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA //////////////////////////////////////////////////////////////////////////////// package com.puppycrawl.tools.checkstyle.checks.metrics; import static com.puppycrawl.tools.checkstyle.checks.metrics.ClassFanOutComplexityCheck.MSG_KEY; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Assert; import org.junit.Test; import com.puppycrawl.tools.checkstyle.AbstractModuleTestSupport; import com.puppycrawl.tools.checkstyle.DefaultConfiguration; import com.puppycrawl.tools.checkstyle.api.CheckstyleException; import com.puppycrawl.tools.checkstyle.api.TokenTypes; import com.puppycrawl.tools.checkstyle.utils.CommonUtil; public class ClassFanOutComplexityCheckTest extends AbstractModuleTestSupport { @Override protected String getPackageLocation() { return "com/puppycrawl/tools/checkstyle/checks/metrics/classfanoutcomplexity"; } @Test public void test() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); final String[] expected = { "6:1: " + getCheckMessage(MSG_KEY, 3, 0), "38:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexity.java"), expected); } @Test public void testExcludedPackagesDirectPackages() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludedPackages", "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity.inputs.c," + "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity.inputs.b"); final String[] expected = { "8:1: " + getCheckMessage(MSG_KEY, 2, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexityExcludedPackagesDirectPackages.java"), expected); } @Test public void testExcludedPackagesCommonPackages() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludedPackages", "com.puppycrawl.tools.checkstyle.checks.metrics.inputs.a"); final String[] expected = { "8:1: " + getCheckMessage(MSG_KEY, 2, 0), "12:5: " + getCheckMessage(MSG_KEY, 2, 0), "18:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexityExcludedPackagesCommonPackage.java"), expected); } @Test public void testExcludedPackagesCommonPackagesWithEndingDot() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludedPackages", "com.puppycrawl.tools.checkstyle.checks.metrics.inputs.a."); try { createChecker(checkConfig); fail("exception expected"); } catch (CheckstyleException ex) { final String messageStart = "cannot initialize module com.puppycrawl.tools.checkstyle.TreeWalker - " + "Cannot set property 'excludedPackages' to " + "'com.puppycrawl.tools.checkstyle.checks.metrics.inputs.a.' in module " + "com.puppycrawl.tools.checkstyle.checks.metrics." + "ClassFanOutComplexityCheck"; assertTrue("Invalid exception message, should start with: " + messageStart, ex.getMessage().startsWith(messageStart)); } } @Test public void testExcludedPackagesAllIgnored() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludedPackages", "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity.inputs.a.aa," + "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity." + "inputs.a.ab," + "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity.inputs.b," + "com.puppycrawl.tools.checkstyle.checks.metrics.classfanoutcomplexity.inputs.c"); final String[] expected = CommonUtil.EMPTY_STRING_ARRAY; verify(checkConfig, getPath("InputClassFanOutComplexityExcludedPackagesAllIgnored.java"), expected); } @Test public void test15() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); final String[] expected = { "9:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexity15Extensions.java"), expected); } @Test public void testDefaultConfiguration() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); createChecker(checkConfig); final String[] expected = CommonUtil.EMPTY_STRING_ARRAY; verify(checkConfig, getPath("InputClassFanOutComplexity.java"), expected); } @Test public void testGetAcceptableTokens() { final ClassFanOutComplexityCheck classFanOutComplexityCheckObj = new ClassFanOutComplexityCheck(); final int[] actual = classFanOutComplexityCheckObj.getAcceptableTokens(); final int[] expected = { TokenTypes.PACKAGE_DEF, TokenTypes.IMPORT, TokenTypes.CLASS_DEF, TokenTypes.EXTENDS_CLAUSE, TokenTypes.IMPLEMENTS_CLAUSE, TokenTypes.ANNOTATION, TokenTypes.INTERFACE_DEF, TokenTypes.ENUM_DEF, TokenTypes.TYPE, TokenTypes.LITERAL_NEW, TokenTypes.LITERAL_THROWS, TokenTypes.ANNOTATION_DEF, }; Assert.assertNotNull("Acceptable tokens should not be null", actual); Assert.assertArrayEquals("Invalid acceptable tokens", expected, actual); } @Test public void testRegularExpression() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludeClassesRegexps", "^Inner.*"); final String[] expected = { "6:1: " + getCheckMessage(MSG_KEY, 2, 0), "38:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexity.java"), expected); } @Test public void testEmptyRegularExpression() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); checkConfig.addAttribute("excludeClassesRegexps", ""); final String[] expected = { "6:1: " + getCheckMessage(MSG_KEY, 3, 0), "38:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexity.java"), expected); } @Test public void testWithMultiDimensionalArray() throws Exception { final DefaultConfiguration moduleConfig = createModuleConfig(ClassFanOutComplexityCheck.class); moduleConfig.addAttribute("max", "0"); final String[] expected = CommonUtil.EMPTY_STRING_ARRAY; verify(moduleConfig, getPath("InputClassFanOutComplexityMultiDimensionalArray.java"), expected); } @Test public void testPackageName() throws Exception { final DefaultConfiguration moduleConfig = createModuleConfig(ClassFanOutComplexityCheck.class); moduleConfig.addAttribute("max", "0"); final String[] expected = CommonUtil.EMPTY_STRING_ARRAY; verify(moduleConfig, getPath("InputClassFanOutComplexityPackageName.java"), expected); } @Test public void testExtends() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); final String[] expected = { "3:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexityExtends.java"), expected); } @Test public void testImplements() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); final String[] expected = { "3:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexityImplements.java"), expected); } @Test public void testAnnotation() throws Exception { final DefaultConfiguration checkConfig = createModuleConfig(ClassFanOutComplexityCheck.class); checkConfig.addAttribute("max", "0"); final String[] expected = { "9:1: " + getCheckMessage(MSG_KEY, 2, 0), "25:5: " + getCheckMessage(MSG_KEY, 2, 0), "34:5: " + getCheckMessage(MSG_KEY, 3, 0), "44:5: " + getCheckMessage(MSG_KEY, 2, 0), "59:1: " + getCheckMessage(MSG_KEY, 1, 0), "79:1: " + getCheckMessage(MSG_KEY, 1, 0), "82:1: " + getCheckMessage(MSG_KEY, 1, 0), }; verify(checkConfig, getPath("InputClassFanOutComplexityAnnotations.java"), expected); } }
For those who were there, DMX's recent concert at New York hip-hop institution S.O.B.'s was nothing short of legendary. Looking no worse for wear after two recent prison stints, DMX prowled the stage, communicating in screams and barks, putting the thrill of life in the fully-packed room. It's tempting to say that, for the spiritually-inclined Yonkers MC, the evening was something of a salvation, even if only for a moment. However, the question remained: was DMX's new music any good? The Weigh In, DMX's recently-released mixtape, looks to clear up this question. We're dealing with a DMX who has indeed lost a step on the mic -- while prime X was never the nicest with his lyrics, he trended towards frenetic instrumentals that required less technique, and more plain tenacity to keep up with them. On The Weigh In, he mostly sticks to slower beats, growling over mid-tempo melodicisms. This isn't only weird, it feels below X -- he's out of place, warbling hooks on "That's My Baby," which features a Tyrese guest spot (why doesn't that dude sing more?). These aren't bad songs per se, but when juxtaposed with the truly iconic, era-defining music that DMX was responsible for, these songs do not hold up. “There’s a new sheriff in town, one that’s been re-elected,” DMX raps on “Where My Dogs At,” a Western-inflected tune that recalls the soundtrack work of Ennio Morricone. The beat stomps, and X rises to the occasion with his vintage intensity. “I’ll make you smile ear-to-ear like the Joker,” he continues, forcing the line into rap through sheer force of personality. It’s terrifying. It’s one of two songs that can run with the big dogs in DMX’s classic-filled catalog, including “Ruff Ryders’ Anthem,” “Party Up,” “Where Da Hood At?” and “X Gon’ Give It To Ya,” perhaps his most enduring hit. “Wright Or Wrong,” the mixtape’s closing track, recalls the insular, hookily paranoid world of his earlier work, a chorus of DMX’s chanting him on as he tries to talk himself back up to the heights he once ascended to. He opens the song with the couplet, “I’ve been doing this for a minute/So with or without you, I’mma get it,” expressing the same rugged self-reliance that made him a superstar. The Weigh In shouldn’t be taken as a full entry into DMX’s catalog; instead, consider it a document of a once-vital artist rediscovering his voice, finding what it means to be himself again. Sure, The Weigh In isn’t a good mixtape by any stretch of the imagination, but it contains one great song. For DMX, at this point, that’s good enough.
Case report: Optical coherence tomography for monitoring biologic therapy in psoriasis and atopic dermatitis Biologic therapies are increasingly used to treat chronic inflammatory skin diseases such as psoriasis and atopic dermatitis. In clinical practice, scores based on evaluation of objective and subjective symptoms are used to assess disease severity, leading to evaluation of treatment goals with clinical decisions on treatment initiation, switch to another treatment modality or to discontinue current treatment. However, this visual-based scoring is relatively subjective and inaccurate due to inter- and intraobserver reliability. Optical coherence tomography (OCT) is a fast, high-resolution, in vivo imaging modality that enables the visualization of skin structure and vasculature. We evaluated the use of OCT for quantification and monitoring of skin inflammation to improve objective assessment of disease activity in patients with psoriasis and atopic dermatitis. We assessed the following imaging parameters including epidermal thickness, vascular density, plexus depth, vessel diameter, and vessel count. A total of four patients with psoriasis or atopic dermatitis were treated with biologic agents according to current treatment guidelines. OCT was used to monitor their individual treatment response in a target lesion representing disease activity for 52 weeks. Psoriatic and eczema lesions exhibited higher epidermal thickness, increased vascular density, and higher vessel count compared to uninvolved skin. An upward shift of the superficial vascular plexus accompanied by smaller vessel diameters was seen in psoriasis in contrast to atopic dermatitis, where larger vessels were observed. A response to biologic therapy was characterized by normalization of the imaging parameters in the target lesions in comparison to uninvolved skin during the observation period of 52 weeks. Optical coherence tomography potentially serves as an instrument to monitor biologic therapy in inflammatory skin diseases. Imaging parameters may enable objective quantification of inflammation in psoriasis or atopic dermatitis in selected representative skin areas. OCT may reveal persistent subclinical inflammation in atopic dermatitis beyond clinical remission. Biologic therapies are increasingly used to treat chronic inflammatory skin diseases such as psoriasis and atopic dermatitis. In clinical practice, scores based on evaluation of objective and subjective symptoms are used to assess disease severity, leading to evaluation of treatment goals with clinical decisions on treatment initiation, switch to another treatment modality or to discontinue current treatment. However, this visual-based scoring is relatively subjective and inaccurate due to inter-and intraobserver reliability. Optical coherence tomography (OCT) is a fast, high-resolution, in vivo imaging modality that enables the visualization of skin structure and vasculature. We evaluated the use of OCT for quantification and monitoring of skin inflammation to improve objective assessment of disease activity in patients with psoriasis and atopic dermatitis. We assessed the following imaging parameters including epidermal thickness, vascular density, plexus depth, vessel diameter, and vessel count. A total of four patients with psoriasis or atopic dermatitis were treated with biologic agents according to current treatment guidelines. OCT was used to monitor their individual treatment response in a target lesion representing disease activity for 52 weeks. Psoriatic and eczema lesions exhibited higher epidermal thickness, increased vascular density, and higher vessel count compared to uninvolved skin. An upward shift of the superficial vascular plexus accompanied by smaller vessel diameters was seen in psoriasis in contrast to atopic dermatitis, where larger vessels were observed. A response to biologic therapy was characterized by normalization of the imaging parameters in the target lesions in comparison to uninvolved skin during the observation period of 52 weeks. Optical coherence tomography potentially serves as an instrument to monitor biologic therapy in inflammatory skin diseases. Imaging parameters may enable objective Introduction Psoriasis and atopic dermatitis are both common inflammatory skin diseases. About 30% of the patients with psoriasis and about 10% with atopic dermatitis require a systemic therapy due to their disease severity. Due to their safety profile and efficacy, biologic agents, and small molecules are being increasingly used. A main drawback of these innovative treatments is that they may lose efficacy over time and that not all patients respond to a selected therapy. Also, in some cases the current dosing is not needed and potentially exposes the individuals to adverse events that may be avoided by adjusted dosing that fits their individual needs. Currently, however, indication and continuation of treatment are mainly dependent on clinical scores. These clinical scores are based on visual examination that might be biased by the experience of the clinician resulting in inter-and intraobserver variabilities. Facing these limitations, we aimed to develop an objective and more reliable evaluation method using image-based scoring to enable objective guidance of clinical decision making to avoid delayed and inconsistent therapy decisions. Optical coherence tomography (OCT) is a fast, highresolution, in vivo imaging method with growing influence in the dermatological practice, especially in non-melanoma skin cancer. The imaging technique of OCT is based on Michelson interferometry. In the assessment of inflammatory skin diseases, additional information on vascular network is of special interest. OCT angiography based on speckle variance detection allows the visualization of cutaneous vasculature. It was reported that different vascular patterns and shapes (dots, blobs, coils, lines, curves, and serpiginous vessels) could distinguish healthy skin from lesional inflammatory skin. In psoriatic skin, structural changes and alterations in vessel density and size were shown. Previous studies demonstrated the correlation between histopathological findings and structural features of psoriasis and chronic inflammation apparent in OCT. In atopic dermatitis, epidermal hypertrophy and vascular depth were discussed as important parameters for disease severity. In the past 20 years, OCT was used for single timepoint observations and short-term monitoring of psoriasis and atopic dermatitis. Based on these observations, here, we performed an interim analysis of a large ongoing prospective, long-term, observational study using a clinically approved OCT system for monitoring patients with psoriasis and atopic dermatitis undergoing biologic treatment over 52 weeks. Subjects and biologic therapies The study was conducted according to the guidelines of the Declaration of Helsinki and ethical approval was obtained from the Ethics Committee of the University of Lbeck. Adult subjects eligible for inclusion were diagnosed with moderate to severe plaque psoriasis or atopic dermatitis and received treatment according to current treatment guidelines. Written informed consent was obtained from all patients. OCT data of three patients with psoriasis and one patient with atopic dermatitis was analyzed. They underwent biologic treatment with anti-IL-17 ixekizumab (n = 1), anti-IL-23 risankizumab (n = 1), anti-TNF- certolizumab (n = 1) or anti-IL-4R dupilumab (n = 1). At baseline, week 2, 4, 16, 28, 40, and 52 of treatment, clinical scores were determined and OCT scans were performed at one lesional site and one perilesional control site for each patient (Figure 1). Clinical scoring Visual-based scores such as psoriasis area and severity index (PASI), eczema area and severity index (EASI), and body surface area (BSA) were determined by an experienced physician. Additional digital dermoscopy images were captured with DermoGenius ultra polarized (DermoScan GmbH, Regensburg, Germany). To date, whole body OCT scanners do not exist and largearea scanning cannot be performed within a reasonable time. Thus, we must select target lesions for small-area imaging. The total sign score (TSS) was used to evaluate selected skin areas. For psoriasis, three clinical signs including erythema, induration, and desquamation were assessed using a 5-point scale (0 absent, 1 mild, 2 moderate, 3 severe, and 4 very severe) Selection of target lesions and control sites for optical coherence tomography (OCT). Patient 3 was a 45-year-old male patient with plaque psoriasis showing PASI = 10.8 and BSA = 10% at baseline. A representative target lesion (blue) on his trunk was selected for 52 weeks monitoring. A perilesional location was selected as the control site (gray). The scans were performed repetitively at the same lesional and non-lesional location. Optical coherence tomography We utilized a clinically approved VivoSight Dx OCT scanner (Michelson Diagnostics, Maidstone, Kent, UK). The emission wavelength is 1,305 nm. The lateral and axial resolution is <7.5 m and <5 m, respectively. The scan area is 6 mm 6 mm. The penetration depth is about 1.2 mm. A three-dimensional image stack with 6 mm width records a sequence of 180 B-scans with an interslice spacing of 33.3 m. Images were acquired as vertical B-scans and en-face scans. The acquisition time of an image stack is about 30 s generating both structural and vascular images. The en-face scans are reconstructed images to show skin layers at a constant depth from the skin surface. For an image stack with 1.2 mm depth, a sequence of 120 en-face scans with an interslice spacing of 10 m can be obtained. Patients were at rest prior to scanning. Measurement conditions (location, patient position, and room temperature) were kept constant. Three regions of the same target lesion and three regions of its perilesional, clinically uninvolved skin area as reference were repetitively imaged. The target lesion was located either in upper extremities or trunk. Comparable to conventional computer-assisted dermoscopy, we used system-integrated overview images of the body and skin areas to make sure that measurements are done in consistent areas. The laser scanning handpiece uses a plastic tip as a distance holder between optical component and skin surface. The distance holder was gently placed onto the skin to avoid pressure-or shear-induced effects on the vessel diameter. Terminal hair was carefully trimmed. Scales were not removed. According to the patients' verbal feedback, the OCT scans do not lead to any patient discomfort. Imaging parameters Clinical signs such as erythema and skin thickening can result from microvascular changes and epidermal hypertrophy, respectively. Thus, we defined epidermal thickness, vascular density, depth, diameter, and vessel count as imaging parameters for quantification of skin inflammation (Figure 2). The calculation of vascular density, depth, and diameter was assisted by the proprietary vessel analysis software (Michelson Diagnostics, Maidstone, Kent, UK). All measures were performed by the same operator. Epidermal thickness ET We used the integrated on-screen ruler tool to measure the maximum epidermal thickness (ET) per image stack, then a mean ET of three image stacks was calculated with standard deviation. ET L is the epidermal thickness of the target lesion and ET C of the control at baseline. The same indices were used analogously for the other imaging parameters. Vascular density The vascular density was calculated at the depth of the superficial plexus. It is the density of the top of the superficial plexus calculated over the depth range, which is the plexus depth ± 60 m. Plexus depth The plexus depth is the depth of the top of the superficial plexus, where the density reaches 50% of the maximum vessel density. Vessel diameter The vessel diameter is a modal value calculated at the depth of the superficial plexus. It is the diameter of the majority of the vessels. Vessel count N The vessel count N was calculated using ImageJ 1.53k (U.S. National Institutes of Health, Bethesda, MD, USA). We anticipated that the cross sections of the elongated capillaries could be most accurately measured at about 200 m depth, as the last few en-face scans capture undesired projection artifacts from the superficial vessels. The en-face images were thresholded and converted into binary images. Adjacent or overlapping vessels were divided by watershed segmentation. The vessels were counted by automatic particle analyzer using Sobel edge detection. Response rate We obtained fit coefficients using MATLAB R2021b (The MathWorks, Natick, MA, USA) to fit ET to an exponential equation ET = A exp(− t) + ET R as a function of time t based on least squares algorithm, where ET R is the estimated minimum ET during 52 weeks of therapy and A is an adjusting parameter. The number of weeks of therapy t R required for ET reduction achieving 1.25 ET R is then given by If physiologically achievable, instead of k ET R, t R can also be calculated for 0.5 ET L (50% reduction of ET L from baseline). Statistical analysis We used Pearson correlation coefficient r to show the correlation between ET or N and the clinical scores. Results Description of patients at baseline Patient 1 was a 52-year-old male with psoriasis (PASI = 19.5, BSA = 37%) who was treated with ixekizumab. He had no previous systemic therapy. Patient 4 was a 50-year-old female with atopic dermatitis (EASI = 17, BSA = 25%) who was treated with dupilumab. She had no previous systemic therapy. In patient 1 and 2, the target lesions were located on their right arms. In patient 3 and 4, the target lesions were located on their trunk. The control area was their perilesional skin. Structural imaging parameters Epidermal thickness In line with clinical and histopathological findings, at baseline we measured a thicker ET L in psoriasis due to hyperkeratosis (Figure 3, top). A similar observation was made in atopic dermatitis where the increased ET L was due to spongiosis and/or lichenification (Figure 3, bottom). At baseline, the mean ET L was 405.8 m (±20.8 m) in psoriatic skin and 270 m (±25.5 m) in atopic dermatitis. ET C ranged from 90 to 110 m and was similar in uninvolved skin of patients with psoriasis and atopic dermatitis. The thickened ET L decreased during the observation period under therapy (Figure 4, row 1). According to t R, 0.5 ET L was achieved as follows: ixekizumab (after 3.95 weeks), certolizumab (after 4.23 weeks), risankizumab (after 6.08 weeks), and dupilumab (after 22.13 weeks). 1.25 ET R was achieved as follows: ixekizumab (after 4.31 weeks), certolizumab (after 6.52 weeks), risankizumab (after 11.23 weeks), and dupilumab (after 33.89 weeks). In good agreement with the OCT measurements, the clinical severity was reduced in the target lesion (TSS) and in the global assessment (PASI or EASI) (Supplementary Vascular imaging parameters Vascular patterns and shapes We used the terminology as proposed by Ulrich et al. Figure 2). Psoriatic skin exhibited "dotted" or "pinpoint-like" vessels resulting from vessel elongation within extended dermal papillae. Lichenification is often observed in chronic eczema. Coarsening and wrinkling of the lichenified skin may change the direction of the capillary loops causing a striped pattern. Thus, "comma-like" vessels were observed in atopic dermatitis. Uninvolved skin typically has "linear" vessels generating a reticular pattern. Vascular density, plexus depth, and vessel diameter Vascular parameters decreased or normalized during therapy course (Figure 4, rows 2-4). Before treatment the mean Optical coherence tomography (OCT) imaging parameters for objective quantification of skin inflammation. Epidermal thickness (ET), vascular density, plexus depth, vessel diameter, and vessel count N were calculated in unaffected skin (control) and in target lesions (psoriasis and atopic dermatitis) at baseline. Lesional inflammatory skin exhibited increased epidermal thickness denoted by green lines and alterations in vasculature in comparison to control sites. The vascular density is exemplarily shown at 400 m depth and the vessel diameter is shown at 600 m depth. The plexus depth is indicated by white arrows. The vessel count was performed at 200 m depth. L in psoriatic skin was 4.6% (±1.9%), which was higher than C with 2.1% (±1.23%). L in atopic dermatitis was 24.5% (±8.4%), while C was 6.3% (±1.24%). The higher L from baseline decreased after therapy start as a result of reduction in vessel diameter and/or vessel count (Supplementary Figure 3). We defined the location of the origin in the upper left of the image according to the general convention in image processing. This means that plexus depth and optical axis z have the same direction. For plots, the origin is located in the lower left. We observed an upward shift of the plexus = L - C by −83.4 m (± 42.3 m) in psoriatic skin L when compared to C. At baseline, the mean L in psoriatic skin was 35.2 m (± 6.9 m), which was smaller than C with 58.6 m (±26.8 m). We did not observe an upward shift in the patient with atopic dermatitis. In contrast to the psoriatic lesions, in atopic dermatitis L was 81 m (±3.1 m), which was larger than C with 57.7 m (±6.9 m). Vessel count The mean N L in psoriasis and atopic dermatitis was 531.4 (±285.9) and 987 (±37.5), respectively. The mean N C was 35.08 (±14.27). After 52 weeks, we observed a full normalization of the vessel count (Figure 4, row 5). Clinical response The results of this case series indicated a good correlation with the clinical scores. A 75% reduction from baseline in the TSS (TSS-75) was achieved under ixekizumab after 3 weeks and under certolizumab after 3.5 weeks. In this observation period, TSS-75 under risankizumab was not achieved. After 3 weeks, TSS-75 was achieved under dupilumab. Discussion Optical coherence tomography is a suitable imaging tool for the investigation and monitoring of inflammatory skin diseases. The VivoSight OCT offers user-friendly handling and fast scanning, which is important for its application as Monitoring therapy response using optical coherence tomography (OCT). In psoriasis (P1-P3) and atopic dermatitis (A4), the epidermal thickness (ET) was fitted to calculate the exponential decay rate or response rate. The vessel count N was calculated using ImageJ (U.S. National Institutes of Health, Bethesda, MD, USA). After 52 weeks of treatment, we observed a full normalization of the increased epidermal thickness and the vessel count. The vascular parameters such as vascular density, vessel diameter, and plexus depth were calculated by using the proprietary VivoSight vessel analysis software (Michelson Diagnostics, Maidstone, Kent, UK). Prior to treatment, the vascular density in inflamed skin was higher than in the control areas. Interestingly, we observed an upward shift of the plexus depth and smaller vessel diameters in psoriatic skin in comparison to the control at baseline. Eczematous skin showed larger vessel diameters. As a potential sign of therapy response, the vascular parameters normalized during therapy course. For correlation, the total sign score (TSS) was used to clinically assess the target lesions and their control sites. For psoriasis, erythema E, induration I, and desquamation D were assessed using a 5-point scale. For atopic dermatitis, six clinical signs including erythema E, papulation I, excoriation Ex, lichenification Li, crusting C, and dryness Dr were graded using a 4-point scale. in inflammatory skin diseases. Furthermore, OCT with dynamic contrast allows color-coding of different epithelial cell layers based on micromotions of cellular structures. So far, these technical upgrades have not yet been incorporated into a clinically approved OCT system for dermatological applications. The future goal is to obtain contactless, whole body scans via rapid scanning of large body sites as commercial OCT systems only provide small fields of view. To date, we are dependent on the selection of target lesions for imaging. While psoriatic lesions tend to recur at the same skin sites that were affected previously, in atopic dermatitis the eczema lesions tend to shift location. Under this aspect, monitoring of target lesions could potentially miss an eczema flare or even psoriasis that exacerbates on a different body part. Psoriasis and atopic dermatitis are associated with lower skin hydration. Consequently, effects of laser-tissue interactions such as scattering (e.g., scaling, hyperkeratosis, and lichenification) and shielding (e.g., shadow artifacts caused by hair or crusts) could lower optical penetration and might masquerade as a loss of perfusion. In an attempt to define imaging biomarkers for inflammatory skin diseases, we demonstrated the application of OCT for monitoring biologic therapy in psoriasis and atopic dermatitis based on four case studies. Biomarkers are objective, quantifiable, and reproducible. We should differentiate between robust and weak imaging biomarkers. Robust biomarkers might be ET and N, as these parameters are less influenced by internal and external factors. In this study, manual measurements of the maximum ET still remain vulnerable to intraobserver variability, which was minimized by repeated measurements. Hence, further studies should incorporate computer-assisted analysis of the ET. Weaker biomarkers such as and might be highly influenced by internal stress factors of the subject (comparable to the "white coat effect") and outdoor temperatures (hot temperatures lead to vasodilation and cold temperatures lead to vasoconstriction) as we observed intraindividual variations in control sites. In this work, we provided a detailed description on vascular alterations in psoriasis and atopic dermatitis compared to non-lesional skin. Our findings were consistent with previous observations reported on higher vascularization and characteristic vascular pattern in psoriasis. While "dotted" vessels were previously described in psoriasis, we firstly described the appearance of "comma-like" vessels in atopic dermatitis with lichenification. We detected an upward shift of the plexus in psoriasis. We anticipated that a plexus shift results from an elongation of the capillary loops, which is more distinctive in severe psoriasis. At baseline, no shift of the plexus depth was observed in our patient with atopic dermatitis. Byers et al. described deeper vascular layers in atopic dermatitis so that we need to make further investigations before coming to a conclusion. We observed large vessels and high vascular densities in atopic dermatitis that were consistent with former studies. Manfredini et al. reported on a reduction of dermal edema and vascularization under dupilumab. In our analysis, we also observed a normalization of the vasculature under therapy. Further, we observed that vessel elongation in psoriasis appeared with smaller capillary loop diameters. Evidently, a larger case number is required to confirm our preliminary observations. The aim of biologic treatment especially in atopic dermatitis is to improve the skin barrier. We observed a good correlation between ET and TSS. Our results on the decrease of ET under therapy were in good agreement with previous studies using topical and non-biologic systemic therapies. In contrast to conventional therapies of psoriasis, we observed an accelerated decrease of ET. Similar results on ET in atopic dermatitis were reported under dupilumab. We also anticipated that ET may be a robust imaging biomarker as already stated by Byers et al.. The rapid decrease in ET can be regarded as an exponential decay. We interpreted the decay rate as a response rate. In theory, = 0 means steady state. Values of > 0 implicate therapy response meaning the larger the higher the response. < 0 refers to therapy failure. Patient 2 was bio-experienced with a long record of pretreatments, therefore, a slower therapy response in TSS was seen under risankizumab. Interestingly, in patient 4 with atopic dermatitis higher vessel density and diameter were observed in the control area at baseline indicating subclinical inflammation in clinically healthy-appearing skin. In addition, although a rapid improvement of patient 4 was clinically observed shown as TSS-75 after 3 weeks, using OCT we were able to detect a prolonged epidermal thickening for t R (1.25 ET R ) = 33.89 weeks as a sign of persistent disease activity. Comparable observations were reported by Byers et al.. The understanding of subclinical inflammation highlights the importance of therapy continuation to avoid the risk of relapse. Conclusion Preliminary observations of this work showed that OCT may be suitable for objective quantification of structural (epidermal thickness) and vascular parameters (vascular density, depth, diameter, and count). These parameters may serve as objective imaging biomarkers for monitoring therapeutic effects in psoriasis and atopic dermatitis. The relatively short acquisition time of OCT is an important demand for medical imaging to minimize time burden for patients. Therapy response may be characterized by reduction of epidermal thickness and normalization of vascular network. Potential imaging biomarkers such as epidermal thickness and vessel count exhibited rapid changes. The calculated response rate may serve as a useful parameter in the assessment of therapeutic effects. Additional diagnostic value of OCT angiography could be seen in the detection of subclinical inflammation that implicates the need for therapy continuation beyond clinical remission. In this preliminary evaluation, all patients have responded well to their systemic treatment. In the next step, we will also evaluate insufficient therapy responses or therapy failures in a large prospective, long-term, observational study. Additional imaging biomarkers revealing desquamation, excoriation, and lichenification should be considered in upcoming studies. The vision of the future is to provide "OCT-guided therapy" enhancing the current dermatological assessment and contributing to personalized medicine. Data availability statement The original contributions presented in this study are included in the article/Supplementary material, further inquiries can be directed to the corresponding author. Ethics statement The studies involving human participants were reviewed and approved by Ethics Committee of the University of Lbeck, Germany. The patients/participants provided their written informed consent to participate in this study. Author contributions LH-W and HY contributed to data acquisition. LH-W performed the data analysis and wrote the main manuscript text. All authors contributed to manuscript revision, read, and approved the submitted version. Funding This project was supported by the Clinician-Scientist-Program (LH-W) and by structured funding of the Cluster of Excellence EXC2167 "Precision Medicine in Chronic Inflammation." We acknowledge financial support by Land Schleswig-Holstein within the funding program "Open Access Publikationsfonds."
The invention is particularly suitable for use in the retreading of a radial truck tire having a belt structure of metal reinforcement cords, wherein the belt structure is damaged and requires removal of a number of belt plies sufficient to adversely affect the ability of the belt structure to restrict expansion of the tire, when inflated. In such cases, it is necessary to prevent over-expansion of the tire casing when the restraint of the belt structure is removed. The invention is directed to solving this problem. Briefly stated, the invention is in the provision of a narrow, inextensible band centrally around the outer peripheral surface of the tire casing after the worn tread and damaged belt plies have been removed. The band is narrow enough to restrict expansion of the tire casing, when inflated, but not wide enough to radically alter the reinforcement characteristics of the belt structure being rebuilt. U.S. Pat. No. 1,678,211 discloses the use of a single metal cord, or wide metal band, to restrict expansion of the tire at its centerline for the purpose of ballooning out the sidewalls of the tire to increase the width of the tire tread and hopefully improve traction of the tire. U.S. Pat. No. 3,464,873 is concerned with the conversion of a bias tire to a bias-belted tire by removing the tread of the bias tire and then interposing a breaker structure between the buffed tire casing and a newly formed tread on the casing. The patent discloses an inextensible breaker structure for placement around the outer periphery of the bias tire after it is distorted and reduced to the rolling radius of the tire. Such a step would be impractical and detrimental to the retreading of a radial tire, since the rolling radius of a radial tire varies substantially from the normal radius of the tire, when inflated. Thus, both patents are concerned with totally different technology and techniques which are simply not suitable in the retreading of a radial truck tire having a belt structure which is severely damaged and needs repair or replacement.
This invention relates to cartons, and more particularly, to a beverage container carton having a carrying handle. In the marketing of soft drinks, beer and other beverages, it is well known to sell those retail consumer products in containers, such as cans, glass bottles, PET bottles or other containers which are grouped together in packs of four, six, eight, ten, twelve, twenty-four or any number of containers. Particularly in the case of twelve packs, it is common to package the containers in cartons so as to make it easier to handle the product for the wholesaler and the retailer, as well as for the retail consumer. A wide variety of different types of container cartons are known. One particular type that has found significant commercial success over the years is a so-called wraparound or sleeve-style carton. In a wraparound carton, a number of containers, e.g., twelve, are wrapped in a paperboard box or carton having a top and bottom wall panels, side wall panels, and end flaps on each end. The end flaps at each end of the top, bottom and side panels are sealed one to the other, thereby providing a closed end and sealed package or carton for the containers. With this and many types of container carton packages, a carrying handle is often provided on the carton so that both the retailer and retail consumer can more easily carry the carton. A number of different carrying handles are known in the wraparound carton art. The overall purpose of such carton handles is to provide an easy to use handle that is structurally sound so the users can pick up and carry the wraparound carton simply through use of the handle structure without concern that the carton or handle will rip or fail. Wraparound cartons of this type are commonly punched or die-cut from paperboard material. Different paperboard characteristics such as the composition and thickness offer differing amounts of strength, particularly tear strength, to the material and, as such, the resulting carton. Naturally, thicker, denser and stronger paperboard stock is typically more expensive and carton manufacturers who produce great quantities of paperboard cartons are interested in providing the most economical carton without sacrificing functionality, including carton strength. Paperboard stock that is thinner and made from non-virgin pulp is often less expensive than thicker stock made from virgin pulp, but the strength characteristics of such paperboard stock are also often lower. As such, carton designers endeavor to utilize the most economical paperboard stock while providing the requisite functionality and strength to the carton design. Moreover, the use of different types of paperboard has a significantly different environmental impact. For example, two common types of paperboard utilized in beverage container cartons are coated recycled board (CRB) and coated unbleached kraftboard (CUK). As the name implies, the CRB is made from 100% recycled components, while the CUK is made from only 20% recycled materials. Environmental impact analysis has shown that the use of CRB is drastically more environmentally beneficial than the use of CUK with significant reductions in wood use, net energy consumption, overall energy for production, sulfur dioxide and nitrogen oxide generation (SO2 and NOx), greenhouse gas emissions, hazardous air pollutants (HAP), volatile organic compounds (VOCs), total reduced sulfur (TRS), wastewater generation, biochemical and chemical oxygen demand (BOD and COD), and solid waste. In some prior art carton designs, the top panel portions of the carton are adhesively bonded together so as to overlap and for closing the wraparound carton and reinforcing a handle area between two handle openings in the top panel portions. One particular design disclosed in U.S. Pat. No. 6,170,741 includes a separate sheet or insert of material for reinforcement of the inner top panel portions which is bonded to it, presumably to allow for the use of cheaper paperboard stock without sacrificing carton strength in the top panel and handle areas. In particular, the carton disclosed in the '741 patent includes a first blank forming the various panels, including the handle containing panel, when folded into the wraparound configuration. An added separate sheet of paperboard is an insert that is glued to the interior surface of the top panel to reinforce the top panel handle area. While this arrangement may allow for the use of thinner and/or weaker, less expensive carton materials, it greatly reduces the production and assembly rates and manufacturing efficiency for the carton. The need to produce the insert in a separate manufacturing operation, the need to match, align and join the insert with the primary blank, and the need to acquire, utilize and maintain specialized equipment for the process results in increased cost and increased process complexity. The need to match, align and join the insert with the primary blank requires slower line speeds, results in more quality control problems, greater cost and complexity and greater paperboard consumption. Taking this into consideration, it is one object of this invention to provide a beverage carton which has the necessary tear strength and rigidity in the area surrounding the handle, but is more cost-effective, utilizing thinner and/or environmentally friendly paperboard and which can be produced at or near top line speeds and production rates. Accordingly, it has been another objective of this invention to provide a novel carrying handle for a carton and, particularly, for a wraparound type carton, where the handle's structural components are formed directly from the carton blank. And with this type of handle, it is another objective of this invention to provide an improved carrying handle structure which maintains the structural integrity of the wraparound carton through the distribution chain until it is chosen by a retail consumer, which is very easy to render usable, and to use, by the retail consumer once the carton has been so chosen, and which does not adversely impact on the structural integrity of the carton when the handle is punched out of the carton blank.
<reponame>skypies/flightdb2<filename>analysis/straightlinedisplacement.go package analysis import ( "fmt" "github.com/skypies/geo/sfo" "github.com/skypies/util/histogram" fdb "github.com/skypies/flightdb" "github.com/skypies/flightdb/report" ) func init() { report.HandleReport("straightlinedisplacement", StraightLineDisplacementReporter, "Lateral displacement from the line {refpoint} to {refpoint2}") } func StraightLineDisplacementReporter(r *report.Report, f *fdb.Flight, tis []fdb.TrackIntersection) (report.FlightReportOutcome, error){ if r.ReferencePoint.IsNil() { return report.RejectedByReport, fmt.Errorf("report option {refpoint} not defined") } else if r.ReferencePoint2.IsNil() { return report.RejectedByReport, fmt.Errorf("report option {refpoint2} not defined") } wp1,wp2 := r.ReferencePoint.Name, r.ReferencePoint2.Name line := sfo.KFixes[wp1].LineTo(sfo.KFixes[wp2]) for _,wp := range []string{wp1,wp2} { if !f.HasWaypoint(wp) { r.I[fmt.Sprintf("[C] Flights without %s", wp)]++ return report.RejectedByReport, nil } } typePicked,track := f.PreferredTrack([]string{"ADSB", "MLAT", "FOIA"}) if typePicked == "" { r.I["[D] Skipped, no ADSB or FOIA track avail"]++ return report.RejectedByReport,nil } r.I[fmt.Sprintf("[D] <b>Accepted for displacement analysis %s-%s</b>", wp1, wp2)]++ r.I[fmt.Sprintf("[Y] <b>ALL VALUES IN METRES</b>")]++ clipped := fdb.Track(track.ClipTo(f.Waypoints[wp1], f.Waypoints[wp2])) sampled := clipped.SampleEveryDist(1.0, false) // Uses integers; so displacement in metres hist := histogram.Histogram{ValMin:0, ValMax:1000, NumBuckets:20} for _,tp := range sampled { distKM := line.ClosestDistance(tp.Latlong) distM := int(distKM * 1000.0) hist.Add(histogram.ScalarVal(distM)) r.H.Add(histogram.ScalarVal(distM)) } row := []string{ r.Links(f), "<code>" + f.IdentString() + "</code>", "<pre>" + hist.String() + "</pre>", } r.AddRow(&row, &row) return report.Accepted, nil }
#include "auto_f2c.h" #include "auto_c.h" #include "auto_types.h" #ifdef PTHREADS /*The parallel version of make_fa is only tested on Pthreads This will probably work on the MPI version, but I want to keep it here until until I get a chance to test it.*/ #define PTHREADS_PARALLEL_FA /* There are not needed anymore, but I am going to keep them around for a bit until I am sure that everything works without the mutexs. The homecont stuff doesn NOT currently worked multithreaded since it has several global variables that need to be gotten rid of. */ /* #define PTHREADS_USE_FUNI_MUTEX #define PTHREADS_USE_BCNI_MUTEX #define PTHREADS_USE_ICNI_MUTEX */ pthread_mutex_t mutex_for_funi = PTHREAD_MUTEX_INITIALIZER; #endif void *setubv_make_aa_bb_cc(void * arg) { /* System generated locals */ integer dbc_dim1, dicd_dim1, dfdu_dim1, dfdp_dim1; /* Local variables */ integer i, j, k, l, m; integer k1, l1; integer i1,j1; integer ib, ic, jj; doublereal dt; integer ib1, ic1; integer jp1; doublereal ddt; #ifdef MANIFOLD integer udotps_off; #endif setubv_parallel_arglist *larg = (setubv_parallel_arglist *)arg; doublereal *dicd, *ficd, *dfdp, *dfdu, *uold; doublereal *f; doublereal *u, **wploc; doublereal *dbc, *fbc, *uic, *uio, *prm, *uid, *uip, *ubc0, *ubc1; doublereal **ups = larg->ups; doublereal **upoldp = larg->upoldp; doublereal **udotps = larg->udotps; doublereal **uoldps = larg->uoldps; doublereal ***aa = larg->aa; doublereal ***bb = larg->bb; doublereal ***cc = larg->cc; doublereal **wp = larg->wp; doublereal **wt = larg->wt; #ifdef USAGE struct rusage *setubv_make_aa_bb_cc_usage,*fa_usage; usage_start(&setubv_make_aa_bb_cc_usage); #endif if (larg->nint > 0) { dicd = (doublereal *)MALLOC(sizeof(doublereal)*(larg->nint)*(larg->ndim + NPARX)); ficd = (doublereal *)MALLOC(sizeof(doublereal)*(larg->nint)); } else ficd = dicd = NULL; dfdp = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)*NPARX); dfdu = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)*(larg->ndim)); uold = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); f = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); u = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); wploc= DMATRIX(larg->ncol+1, larg->ncol); dbc = (doublereal *)MALLOC(sizeof(doublereal)*(larg->nbc)*(2*larg->ndim + NPARX)); fbc = (doublereal *)MALLOC(sizeof(doublereal)*(larg->nbc)); uic = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); uio = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); prm = (doublereal *)MALLOC(sizeof(doublereal)*NPARX); uid = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); uip = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); ubc0 = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); ubc1 = (doublereal *)MALLOC(sizeof(doublereal)*(larg->ndim)); dicd_dim1 = larg->nint; dbc_dim1 = larg->nbc; dfdu_dim1 = larg->ndim; dfdp_dim1 = larg->ndim; /* Generate AA and BB: */ /* Partition the mesh intervals */ /*jj will be replaced with loop_start and loop_end*/ for (jj = larg->loop_start; jj < larg->loop_end; ++jj) { j = jj; jp1 = j + 1; dt = larg->dtm[j]; ddt = 1. / dt; for (ic = 0; ic < larg->ncol; ++ic) { for (ib = 0; ib < larg->ncol + 1; ++ib) { wploc[ib][ic] = ddt * wp[ib][ic]; } } /*this loop uses the loop_offset variable since up and uoldps and sent by the MPI version in their entirety, but loop_start and loop_end have been shifted. The loop_offset variable contains the original value of loop_start and removes the shift*/ for (ic = 0; ic < larg->ncol; ++ic) { for (k = 0; k < larg->ndim; ++k) { u[k] = wt[larg->ncol][ic] * ups[jp1 + larg->loop_offset][k]; uold[k] = wt[larg->ncol][ic] * uoldps[jp1 + larg->loop_offset][k]; for (l = 0; l < larg->ncol; ++l) { l1 = l * larg->ndim + k; u[k] += wt[l][ic] * ups[j + larg->loop_offset][l1]; uold[k] += wt[l][ic] * uoldps[j + larg->loop_offset][l1]; } } for (i = 0; i < NPARX; ++i) { prm[i] = larg->par[i]; } /* Ok this is a little wierd, so hold tight. This function is actually a pointer to a wrapper function, which eventually calls the user defined func_. Which wrapper is used depends on what kind of problem it is. The need for the mutex is because some of these wrappers use a common block for temporary storage NOTE!!!: The icni and bcni wrappers do the same thing, so if they ever get parallelized they need to be checked as well. */ #ifdef PTHREADS_USE_FUNI_MUTEX #ifdef PTHREADS pthread_mutex_lock(&mutex_for_funi); #endif #endif (*(larg->funi))(larg->iap, larg->rap, larg->ndim, u, uold, larg->icp, prm, 2, f, dfdu, dfdp); #ifdef PTHREADS_USE_FUNI_MUTEX #ifdef PTHREADS pthread_mutex_unlock(&mutex_for_funi); #endif #endif ic1 = ic * (larg->ndim); for (ib = 0; ib < larg->ncol + 1; ++ib) { double wt_tmp=wt[ib][ic]; double wploc_tmp=wploc[ib][ic]; ib1 = ib * larg->ndim; for (i = 0; i < larg->ndim; ++i) { aa[jj][ic1 + i][ib1 + i] = wploc_tmp; for (k = 0; k < larg->ndim; ++k) { aa[jj][ic1 + i][ib1 + k] -= wt_tmp * ARRAY2D(dfdu, i, k); } } } for (i = 0; i < larg->ndim; ++i) { for (k = 0; k < larg->ncb; ++k) { bb[jj][ic1 + i][k] = -ARRAY2D(dfdp, i, larg->icp[k]); } } } } /* Generate CC : */ /* Boundary conditions : */ if (larg->nbc > 0) { for (i = 0; i < larg->ndim; ++i) { ubc0[i] = ups[0][i]; ubc1[i] = ups[larg->na][i]; } #ifdef PTHREADS_USE_BCNI_MUTEX #ifdef PTHREADS pthread_mutex_lock(&mutex_for_funi); #endif #endif (*(larg->bcni))(larg->iap, larg->rap, larg->ndim, larg->par, larg->icp, larg->nbc, ubc0, ubc1, fbc, 2, dbc); #ifdef PTHREADS_USE_BCNI_MUTEX #ifdef PTHREADS pthread_mutex_unlock(&mutex_for_funi); #endif #endif for (i = 0; i < larg->nbc; ++i) { for (k = 0; k < larg->ndim; ++k) { /*NOTE!! This needs to split up. Only the first processor does the first part and only the last processors does the last part.*/ if(larg->loop_offset + larg->loop_start == 0) { cc[0][i][k] = ARRAY2D(dbc, i, k); } if(larg->loop_offset + larg->loop_end == larg->na) { cc[larg->na-1 - larg->loop_offset][i][larg->nra + k] = ARRAY2D(dbc ,i , larg->ndim + k); } } } } /* Integral constraints : */ if (larg->nint > 0) { for (jj = larg->loop_start; jj < larg->loop_end; ++jj) { j = jj; jp1 = j + 1; for (k = 0; k < (larg->ncol + 1); ++k) { for (i = 0; i < larg->ndim; ++i) { i1 = k * larg->ndim + i; j1 = j; if (k+1 == (larg->ncol + 1)) { i1 = i; } if (k+1 == (larg->ncol + 1)) { j1 = jp1; } uic[i] = ups[j1 + larg->loop_offset][i1]; uio[i] = uoldps[j1 + larg->loop_offset][i1]; uid[i] = udotps[j1 + larg->loop_offset][i1]; uip[i] = upoldp[j1 + larg->loop_offset][i1]; } #ifdef PTHREADS_USE_ICNI_MUTEX #ifdef PTHREADS pthread_mutex_lock(&mutex_for_funi); #endif #endif (*(larg->icni))(larg->iap, larg->rap, larg->ndim, larg->par, larg->icp, larg->nint, uic, uio, uid, uip, ficd, 2, dicd); #ifdef PTHREADS_USE_ICNI_MUTEX #ifdef PTHREADS pthread_mutex_unlock(&mutex_for_funi); #endif #endif for (m = 0; m < larg->nint; ++m) { for (i = 0; i < larg->ndim; ++i) { k1 = k * larg->ndim + i; cc[jj][larg->nbc + m][k1] = larg->dtm[j] * larg->wi[k ] * ARRAY2D(dicd, m, i); } } } } } /* Pseudo-arclength equation : */ #ifdef MANIFOLD udotps_off=larg->iap->ntst + 1; #endif for (jj = larg->loop_start; jj < larg->loop_end; ++jj) { #ifdef MANIFOLD for (m = 0; m < larg->nalc; ++m) { #endif for (i = 0; i < larg->ndim; ++i) { for (k = 0; k < larg->ncol; ++k) { k1 = k * larg->ndim + i; #ifndef MANIFOLD cc[jj][larg->nrc - 1][k1] = larg->dtm[jj] * larg->thu[i] * larg->wi[k] * udotps[jj + larg->loop_offset][k1]; #else cc[jj][larg->nrc - 1][k1] = larg->dtm[jj] * larg->thu[i] * larg->wi[k] * udotps[jj + larg->loop_offset + m * udotps_off][k1]; #endif } #ifndef MANIFOLD cc[jj][larg->nrc -1][larg->nra + i] = larg->dtm[jj] * larg->thu[i] * larg->wi[larg->ncol] * udotps[jj + 1 + larg->loop_offset][i]; #else cc[jj][larg->nrc -1][larg->nra + i] = larg->dtm[jj] * larg->thu[i] * larg->wi[larg->ncol] * udotps[jj + 1 + larg->loop_offset + m*udotps_off][i]; } #endif } } #ifdef PTHREADS_PARALLEL_FA #ifdef USAGE usage_start(&fa_usage); #endif setubv_make_fa(*larg); #ifdef USAGE usage_end(fa_usage,"setubv make fa"); #endif #endif FREE(dicd ); FREE(ficd ); FREE(dfdp ); FREE(dfdu ); FREE(uold ); FREE(f ); FREE(u ); FREE_DMATRIX(wploc); FREE(dbc ); FREE(fbc ); FREE(uic ); FREE(uio ); FREE(prm ); FREE(uid ); FREE(uip ); FREE(ubc0 ); FREE(ubc1 ); #ifdef USAGE usage_end(setubv_make_aa_bb_cc_usage,"in setubv worker"); #endif return NULL; } #ifdef PTHREADS int setubv_threads_wrapper(setubv_parallel_arglist data) { setubv_parallel_arglist *send_data; int i; pthread_t *th; void * retval; pthread_attr_t attr; int retcode; #ifdef USAGE struct timeval *pthreads_create,*pthreads_join,*pthreads_all; time_start(&pthreads_create); time_start(&pthreads_all); #endif th = (pthread_t *)MALLOC(sizeof(pthread_t)*global_num_procs); send_data = (setubv_parallel_arglist *)MALLOC(sizeof(setubv_parallel_arglist)*global_num_procs); pthread_attr_init(&attr); pthread_attr_setscope(&attr,PTHREAD_SCOPE_SYSTEM); for(i=0;i<global_num_procs;i++) { setubv_parallel_arglist_copy(&send_data[i],data); send_data[i].loop_start = (i*(data.na))/global_num_procs; send_data[i].loop_end = ((i+1)*(data.na))/global_num_procs; send_data[i].loop_offset = 0; retcode = pthread_create(&th[i], &attr, setubv_make_aa_bb_cc, (void *) &send_data[i]); if (retcode != 0) fprintf(stderr, "create %d failed %d\n", i, retcode); } #ifdef USAGE time_end(pthreads_create,"setubv pthreads create",fp9); time_start(&pthreads_join); #endif for(i=0;i<global_num_procs;i++) { retcode = pthread_join(th[i], &retval); if (retcode != 0) fprintf(stderr, "join %d failed %d\n", i, retcode); } FREE(send_data); FREE(th); #ifdef USAGE time_end(pthreads_join,"setubv pthreads join",fp9); time_end(pthreads_all,"setubv pthreads all",fp9); #endif return 0; } #endif #ifdef MPI int setubv_mpi_wrapper(setubv_parallel_arglist data) { integer loop_start,loop_end; integer loop_start_tmp,loop_end_tmp; integer loop_offset; int i,comm_size; int *aa_counts,*aa_displacements; int *bb_counts,*bb_displacements; int *cc_counts,*cc_displacements; int *dtm_counts,*dtm_displacements; MPI_Comm_size(MPI_COMM_WORLD,&comm_size); aa_counts=(int *)MALLOC(sizeof(int)*comm_size); aa_displacements=(int *)MALLOC(sizeof(int)*comm_size); bb_counts=(int *)MALLOC(sizeof(int)*comm_size); bb_displacements=(int *)MALLOC(sizeof(int)*comm_size); cc_counts=(int *)MALLOC(sizeof(int)*comm_size); cc_displacements=(int *)MALLOC(sizeof(int)*comm_size); dtm_counts=(int *)MALLOC(sizeof(int)*comm_size); dtm_displacements=(int *)MALLOC(sizeof(int)*comm_size); aa_counts[0] = 0; aa_displacements[0] = 0; bb_counts[0] = 0; bb_displacements[0] = 0; cc_counts[0] = 0; cc_displacements[0] = 0; dtm_counts[0] = 0; dtm_displacements[0] = 0; for(i=1;i<comm_size;i++){ /*Send message to get worker into setubv mode*/ { int message=AUTO_MPI_SETUBV_MESSAGE; MPI_Send(&message,1,MPI_INT,i,0,MPI_COMM_WORLD); } loop_start = ((i-1)*(data.na))/(comm_size - 1); loop_end = ((i)*(data.na))/(comm_size - 1); aa_counts[i] = (data.nca)*(data.nra)*(loop_end-loop_start); aa_displacements[i] = (data.nca)*(data.nra)*loop_start; bb_counts[i] = (data.ncb)*(data.nra)*(loop_end-loop_start); bb_displacements[i] = (data.ncb)*(data.nra)*loop_start; cc_counts[i] = (data.nca)*(data.nrc)*(loop_end-loop_start); cc_displacements[i] = (data.nca)*(data.nrc)*loop_start; dtm_counts[i] = (loop_end-loop_start); dtm_displacements[i] = (loop_start); loop_start_tmp = 0; loop_end_tmp = loop_end-loop_start; MPI_Send(&loop_start_tmp ,1,MPI_LONG,i,0,MPI_COMM_WORLD); MPI_Send(&loop_end_tmp ,1,MPI_LONG,i,0,MPI_COMM_WORLD); loop_offset = loop_start; MPI_Send(&loop_offset ,1,MPI_LONG,i,0,MPI_COMM_WORLD); } { integer params[11]; params[0]=data.na; params[1]=data.ndim; params[2]=data.ips; params[3]=data.ncol; params[4]=data.nbc; params[5]=data.nint; params[6]=data.ncb; params[7]=data.nrc; params[8]=data.nra; params[9]=data.nca; params[10]=data.ndxloc; MPI_Bcast(params ,11,MPI_LONG,0,MPI_COMM_WORLD); } { int position=0; void *buffer; int bufsize; int size_int,size_double; int niap,nrap; /* Here we compute the number of elements in the iap and rap structures. Since each of the structures is homogeneous we just divide the total size by the size of the individual elements.*/ niap = sizeof(iap_type)/sizeof(integer); nrap = sizeof(rap_type)/sizeof(doublereal); MPI_Pack_size(niap+NPARX2,MPI_LONG,MPI_COMM_WORLD,&size_int); MPI_Pack_size(nrap+NPARX2+ (data.ndxloc)*(data.ndim)*(data.ncol)+ (data.ndxloc)*(data.ndim)*(data.ncol)+ (data.ncol + 1)*(data.ncol)+ (data.ncol + 1)*(data.ncol)+ (data.ncol + 1)+ (data.ndxloc)*(data.ndim)*(data.ncol)+ (data.ndxloc)*(data.ndim)*(data.ncol)+ (data.ndim)*8+ NPARX+ NPARX, MPI_DOUBLE,MPI_COMM_WORLD,&size_double); bufsize = size_int + size_double; buffer=MALLOC((unsigned)bufsize); MPI_Pack(data.iap ,niap,MPI_LONG,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.rap ,nrap,MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); /**********************************************/ MPI_Pack(data.par ,NPARX2,MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.icp ,NPARX2,MPI_LONG,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.ups ,(data.ndxloc)*(data.ndim)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.uoldps ,(data.ndxloc)*(data.ndim)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.wp ,(data.ncol + 1)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.wt ,(data.ncol + 1)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.wi ,(data.ncol + 1),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.udotps ,(data.ndxloc)*(data.ndim)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.upoldp ,(data.ndxloc)*(data.ndim)*(data.ncol),MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.thu ,(data.ndim)*8,MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.thl ,NPARX,MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Pack(data.rldot ,NPARX,MPI_DOUBLE,buffer,bufsize,&position,MPI_COMM_WORLD); MPI_Bcast(buffer ,position,MPI_PACKED,0,MPI_COMM_WORLD); } MPI_Scatterv(data.dtm ,dtm_counts,dtm_displacements,MPI_DOUBLE, NULL,0,MPI_DOUBLE, 0,MPI_COMM_WORLD); /* Worker runs here */ return 0; } #endif int setubv_default_wrapper(setubv_parallel_arglist data) { setubv_make_aa_bb_cc((void *)&data); return 0; } #ifndef MANIFOLD int setubv(integer ndim, integer ips, integer na, integer ncol, integer nbc, integer nint, integer ncb, integer nrc, integer nra, integer nca, FUNI_TYPE((*funi)), BCNI_TYPE((*bcni)), ICNI_TYPE((*icni)), integer ndxloc, iap_type *iap, rap_type *rap, doublereal *par, integer *icp, doublereal rds, doublereal ***aa, doublereal ***bb, doublereal ***cc, doublereal **dd, doublereal **fa, doublereal *fc, doublereal *rlcur, doublereal *rlold, doublereal *rldot, doublereal **ups, doublereal **uoldps, doublereal **udotps, doublereal **upoldp, doublereal **dups, doublereal *dtm, doublereal *thl, doublereal *thu, doublereal **p0, doublereal **p1) #else int setubv(integer ndim, integer ips, integer na, integer ncol, integer nbc, integer nint, integer nalc, integer ncb, integer nrc, integer nra, integer nca, FUNI_TYPE((*funi)), BCNI_TYPE((*bcni)), ICNI_TYPE((*icni)), integer ndxloc, iap_type *iap, rap_type *rap, doublereal *par, integer *icp, doublereal *rds, doublereal ***aa, doublereal ***bb, doublereal ***cc, doublereal **dd, doublereal **fa, doublereal *fc, doublereal *rlcur, doublereal *rlold, doublereal *rldot, doublereal **ups, doublereal **uoldps, doublereal **udotps, doublereal **upoldp, doublereal **dups, doublereal *dtm, doublereal *thl, doublereal *thu, doublereal **p0, doublereal **p1) #endif { /* Local variables */ integer i, j, k; doublereal *wi, **wp, **wt; #ifdef USAGE struct rusage *initialization_usage,*fc_usage,*parallel_overhead_usage; usage_start(&initialization_usage); #endif wi = (doublereal *)MALLOC(sizeof(doublereal)*(ncol+1) ); wp = DMATRIX(ncol+1, ncol); wt = DMATRIX(ncol+1, ncol); wint(ncol + 1, wi); genwts(ncol, ncol + 1, wt, wp); /* Initialize to zero. */ for (i = 0; i < nrc; ++i) { fc[i] = 0.; for (k = 0; k < ncb; ++k) { dd[i][k] = 0.; } } /* Set constants. */ for (i = 0; i < ncb; ++i) { par[icp[i]] = rlcur[i]; } /* NA is the local node's mesh interval number. */ for (i = 0; i < na; ++i) { for (j = 0; j < nra; ++j) { for (k = 0; k < nca; ++k) { aa[i][j][k] = 0.; } } for (j = 0; j < nra; ++j) { for (k = 0; k < ncb; ++k) { bb[i][j][k] = 0.; } } for (j = 0; j < nrc; ++j) { for (k = 0; k < nca; ++k) { cc[i][j][k] = 0.; } } } /* ** Time evolution computations (parabolic systems) */ if (ips == 14 || ips == 16) { rap->tivp = rlold[0]; } #ifdef USAGE usage_end(initialization_usage,"setubv initialization"); #endif { setubv_parallel_arglist arglist; #ifndef MANIFOLD setubv_parallel_arglist_constructor(ndim, ips, na, ncol, nbc, nint, ncb, nrc, nra, nca, funi, icni, ndxloc, iap, rap, par, icp, aa, bb, cc, dd, fa, fc, ups, uoldps, udotps, upoldp, dtm, wp, wt, wi, thu, thl, rldot, bcni, &arglist); #else setubv_parallel_arglist_constructor(ndim, ips, na, ncol, nbc, nint, nalc, ncb, nrc, nra, nca, funi, icni, ndxloc, iap, rap, par, icp, aa, bb, cc, dd, fa, fc, ups, uoldps, udotps, upoldp, dtm, wp, wt, wi, thu, thl, rldot, bcni, &arglist); #endif switch(global_setubv_type) { #ifdef PTHREADS case SETUBV_PTHREADS: setubv_threads_wrapper(arglist); break; #endif #ifdef MPI case SETUBV_MPI: if(global_verbose_flag) printf("Setubv MPI start\n"); setubv_mpi_wrapper(arglist); if(global_verbose_flag) printf("Setubv MPI end\n"); break; #endif default: setubv_default_wrapper(arglist); break; } #ifndef PTHREADS_PARALLEL_FA #ifdef USAGE usage_start(&fa_usage); #endif setubv_make_fa(arglist); #ifdef USAGE usage_end(fa_usage,"setubv make fa"); #endif #endif #ifdef USAGE usage_start(&fc_usage); #endif setubv_make_fc_dd(arglist,dups,rlcur,rlold,rds); #ifdef USAGE usage_end(fc_usage,"setubv make fc"); #endif } FREE(wi ); FREE_DMATRIX(wp); FREE_DMATRIX(wt); return 0; } void setubv_make_fa(setubv_parallel_arglist larg) { integer i,j,k,l; integer ic,k1,ib; integer jj,jp1,l1,ic1; doublereal dt,ddt; doublereal **ups = larg.ups; doublereal **uoldps = larg.uoldps; doublereal **wp = larg.wp; doublereal **wt = larg.wt; doublereal **fa = larg.fa; doublereal **wploc= DMATRIX(larg.ncol+1, larg.ncol); doublereal *dfdp = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)*NPARX); doublereal *dfdu = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)*(larg.ndim)); doublereal *u = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *uold = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *f = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *prm = (doublereal *)MALLOC(sizeof(doublereal)*NPARX); for (jj = larg.loop_start; jj < larg.loop_end; ++jj) { j = jj; jp1 = j + 1; dt = larg.dtm[j]; ddt = 1. / dt; for (ic = 0; ic < larg.ncol; ++ic) { for (ib = 0; ib < larg.ncol + 1; ++ib) { wploc[ib][ic] = ddt * wp[ib][ic]; } } for (ic = 0; ic < larg.ncol; ++ic) { for (k = 0; k < larg.ndim; ++k) { u[k] = wt[larg.ncol][ic] * ups[jp1][k]; uold[k] = wt[larg.ncol][ic] * uoldps[jp1][k]; for (l = 0; l < larg.ncol; ++l) { l1 = l * larg.ndim + k; u[k] += wt[l][ic] * ups[j + larg.loop_offset][l1]; uold[k] += wt[l][ic] * uoldps[j + larg.loop_offset][l1]; } } for (i = 0; i < NPARX; ++i) { prm[i] = larg.par[i]; } #ifdef PTHREADS_USE_FUNI_MUTEX #ifdef PTHREADS pthread_mutex_lock(&mutex_for_funi); #endif #endif (*(larg.funi))(larg.iap, larg.rap, larg.ndim, u, uold, larg.icp, prm, 2, f, dfdu, dfdp); #ifdef PTHREADS_USE_FUNI_MUTEX #ifdef PTHREADS pthread_mutex_unlock(&mutex_for_funi); #endif #endif ic1 = ic * (larg.ndim); for (i = 0; i < larg.ndim; ++i) { fa[ic1 + i][jj] = f[i] - wploc[larg.ncol][ic] * ups[jp1 + larg.loop_offset][i]; for (k = 0; k < larg.ncol; ++k) { k1 = k * larg.ndim + i; fa[ic1 + i][jj] -= wploc[k][ic] * ups[j + larg.loop_offset][k1]; } } } } FREE_DMATRIX(wploc); FREE(dfdp); FREE(dfdu); FREE(u); FREE(uold); FREE(f); FREE(prm); } #ifndef MANIFOLD void setubv_make_fc_dd(setubv_parallel_arglist larg, doublereal **dups, doublereal *rlcur, doublereal *rlold, doublereal rds) { #else void setubv_make_fc_dd(setubv_parallel_arglist larg, doublereal **dups, doublereal *rlcur, doublereal *rlold, doublereal *rds) { #endif integer i,j,jj,jp1,k,i1,m,j1; doublereal rlsum; doublereal **dd = larg.dd; doublereal **ups = larg.ups; doublereal **uoldps = larg.uoldps; doublereal **udotps = larg.udotps; doublereal **upoldp = larg.upoldp; integer dbc_dim1 = larg.nbc; doublereal *dbc = (doublereal *)MALLOC(sizeof(doublereal)*(larg.nbc)*(2*larg.ndim + NPARX)); doublereal *fbc = (doublereal *)MALLOC(sizeof(doublereal)*(larg.nbc)); doublereal *ubc0 = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *ubc1 = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); integer dicd_dim1 = larg.nint; doublereal *dicd = NULL; doublereal *ficd = NULL; doublereal *uic = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *uio = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *uid = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); doublereal *uip = (doublereal *)MALLOC(sizeof(doublereal)*(larg.ndim)); #ifdef MANIFOLD integer udotps_off; #endif if (larg.nint > 0) { dicd = (doublereal *)MALLOC(sizeof(doublereal)*(larg.nint)*(larg.ndim + NPARX)); ficd = (doublereal *)MALLOC(sizeof(doublereal)*(larg.nint)); } /* Boundary condition part of FC */ if (larg.nbc > 0) { for (i = 0; i < larg.ndim; ++i) { ubc0[i] = ups[0][i]; ubc1[i] = ups[larg.na][i]; } (*(larg.bcni))(larg.iap, larg.rap, larg.ndim, larg.par, larg.icp, larg.nbc, ubc0, ubc1, fbc, 2, dbc); for (i = 0; i < larg.nbc; ++i) { larg.fc[i] = -fbc[i]; for (k = 0; k < larg.ncb; ++k) { dd[i][k] = ARRAY2D(dbc, i, (larg.ndim *2) + larg.icp[k]); } } /* Save difference : */ for (j = 0; j < larg.na + 1; ++j) { for (i = 0; i < larg.nra; ++i) { dups[j][i] = ups[j][i] - uoldps[j][i]; } } } /* Integral constraint part of FC */ if (larg.nint > 0) { for (jj = larg.loop_start; jj < larg.loop_end; ++jj) { j = jj; jp1 = j + 1; for (k = 0; k < (larg.ncol + 1); ++k) { for (i = 0; i < larg.ndim; ++i) { i1 = k * larg.ndim + i; j1 = j; if (k+1 == (larg.ncol + 1)) { i1 = i; } if (k+1 == (larg.ncol + 1)) { j1 = jp1; } uic[i] = ups[j1][i1]; uio[i] = uoldps[j1][i1]; uid[i] = udotps[j1][i1]; uip[i] = upoldp[j1][i1]; } (*(larg.icni))(larg.iap, larg.rap, larg.ndim, larg.par, larg.icp, larg.nint, uic, uio, uid, uip, ficd, 2, dicd); for (m = 0; m < larg.nint; ++m) { larg.fc[larg.nbc + m] -= larg.dtm[j] * larg.wi[k] * ficd[m]; for (i = 0; i < larg.ncb; ++i) { dd[larg.nbc + m][i] += larg.dtm[j] * larg.wi[k] * ARRAY2D(dicd, m, larg.ndim + larg.icp[i]); } } } } } #ifndef MANIFOLD for (i = 0; i < larg.ncb; ++i) { dd[larg.nrc-1][i] = larg.thl[larg.icp[i]] * larg.rldot[i]; } rlsum = 0.; for (i = 0; i < larg.ncb; ++i) { rlsum += larg.thl[larg.icp[i]] * (rlcur[i] - rlold[i]) * larg.rldot[i]; } larg.fc[larg.nrc-1] = rds - rinpr(larg.iap, &(larg.ndim), &(larg.ndxloc), larg.udotps, dups, larg.dtm, larg.thu) - rlsum; #else udotps_off=(larg.iap->ntst + 1)*(larg.iap->ndim * larg.iap->ncol); for (m = 0; m < larg.nalc; ++m) { for (i = 0; i < larg.ncb; ++i) { dd[larg.nbc+larg.nint+m][i] = larg.thl[larg.icp[i]] * larg.rldot[i+m*NPARX]; } rlsum = 0.; for (i = 0; i < larg.ncb; ++i) { rlsum += larg.thl[larg.icp[i]] * (rlcur[i] - rlold[i]) * larg.rldot[i+m*NPARX]; } larg.fc[larg.nrc-1+m] = rds[m] - rinpr(larg.iap, &(larg.ndim), &(larg.ndxloc), larg.udotps, dups, larg.dtm, larg.thu) - rlsum; } #endif FREE(dbc); FREE(fbc); FREE(ubc0); FREE(ubc1); FREE(dicd); FREE(ficd); FREE(uic); FREE(uio); FREE(uid); FREE(uip); } /* Copy a setubv_parallel_arglist */ void setubv_parallel_arglist_copy(setubv_parallel_arglist *output, const setubv_parallel_arglist input) { memcpy(output,&input,sizeof(setubv_parallel_arglist)); } /* Fill in a setubv_parallel_arglist for the individual variables */ #ifndef MANIFOLD void setubv_parallel_arglist_constructor(integer ndim, integer ips, integer na, integer ncol, integer nbc, integer nint, integer ncb, integer nrc, integer nra, integer nca, FUNI_TYPE((*funi)), ICNI_TYPE((*icni)), integer ndxloc, iap_type *iap, rap_type *rap, doublereal *par, integer *icp, doublereal ***aa, doublereal ***bb, doublereal ***cc, doublereal **dd, doublereal **fa, doublereal *fc, doublereal **ups, doublereal **uoldps, doublereal **udotps, doublereal **upoldp, doublereal *dtm, doublereal **wp, doublereal **wt, doublereal *wi, doublereal *thu, doublereal *thl, doublereal *rldot, BCNI_TYPE((*bcni)), setubv_parallel_arglist *data) { #else void setubv_parallel_arglist_constructor(integer ndim, integer ips, integer na, integer ncol, integer nbc, integer nint, integer nalc, integer ncb, integer nrc, integer nra, integer nca, FUNI_TYPE((*funi)), ICNI_TYPE((*icni)), integer ndxloc, iap_type *iap, rap_type *rap, doublereal *par, integer *icp, doublereal ***aa, doublereal ***bb, doublereal ***cc, doublereal **dd, doublereal **fa, doublereal *fc, doublereal **ups, doublereal **uoldps, doublereal **udotps, doublereal **upoldp, doublereal *dtm, doublereal **wp, doublereal **wt, doublereal *wi, doublereal *thu, doublereal *thl, doublereal *rldot, BCNI_TYPE((*bcni)), setubv_parallel_arglist *data) { #endif data->ndim = ndim; data->ips = ips; data->ncol = ncol; data->nbc = nbc; data->nint = nint; #ifdef MANIFOLD data->nalc = nalc; #endif data->ncb = ncb; data->nrc = nrc; data->nra = nra; data->nca = nca; data->na = na; data->funi = funi; data->icni = icni; data->ndxloc = ndxloc; data->iap = iap; data->rap = rap; data->par = par; data->icp = icp; data->aa = aa; data->bb = bb; data->cc = cc; data->dd = dd; data->fa = fa; data->fc = fc; data->ups = ups; data->uoldps = uoldps; data->udotps = udotps; data->upoldp = upoldp; data->dtm = dtm; data->loop_start = 0; data->loop_end = na; data->loop_offset = 0; data->wp = wp; data->wt = wt; data->wi = wi; data->thu = thu; data->thl = thl; data->rldot = rldot; data->bcni = bcni; }
<gh_stars>0 package io.katharsis.resource.mock.repository; import io.katharsis.resource.mock.repository.util.Relation; import java.util.Iterator; import java.util.concurrent.ConcurrentMap; public abstract class AbstractRelationShipRepository<T> { abstract ConcurrentMap<Relation<T>, Integer> getRepo(); public void setRelation(T source, Long targetId, String fieldName) { removeRelations(fieldName); if (targetId != null) { getRepo().put(new Relation<>(source, targetId, fieldName), 0); } } public void setRelations(T source, Iterable<Long> targetIds, String fieldName) { removeRelations(fieldName); if (targetIds != null) { for (Long targetId : targetIds) { getRepo().put(new Relation<>(source, targetId, fieldName), 0); } } } public void addRelations(T source, Iterable<Long> targetIds, String fieldName) { for (Long targetId : targetIds) { getRepo().put(new Relation<>(source, targetId, fieldName), 0); } } public void removeRelations(T source, Iterable<Long> targetIds, String fieldName) { for (Long targetId : targetIds) { Iterator<Relation<T>> iterator = getRepo().keySet().iterator(); while (iterator.hasNext()) { Relation<T> next = iterator.next(); if (next.getFieldName().equals(fieldName) && next.getTargetId().equals(targetId)) { iterator.remove(); } } } } public void removeRelations(String fieldName) { Iterator<Relation<T>> iterator = getRepo().keySet().iterator(); while (iterator.hasNext()) { Relation<T> next = iterator.next(); if (next.getFieldName().equals(fieldName)) { iterator.remove(); } } } }
‎Prime Minister Stephen Harper has renewed his condemnation of Russian President Vladimir Putin, even as the G7 summit seemed to balk at Ukraine's plea for weapons in its struggle with Russian-backed separatists. Harper told U.S. television channel CNBC in an interview that Putin does not share Western values and has "no place" at the summit, which chose not to invite the Russian leader for the second year in a row. "We are having a discussion on the shared interests of the Western democratic world," Harper said Sunday. "Mr. Putin, who is in no way part of that, has no place at the table, and I don't believe there's any leader who would defend Mr. Putin having a place." German Chancellor Angela Merkel said G7 members agreed Monday that sanctions on Russia must remain in place as long as Russia fails to respect a ceasefire agreed to in Minsk in February. Harper ‎said the Russian economy has little in common with Western ones. "Mr. Putin runs an entirely different system … he runs an economy that is dominated by oligarchs and criminal syndicates. It is not at all like our economy, it doesn't share our interests, it doesn't share our values, and so I think we need to have discussions where we can really rally the shared interests of the Western democratic world." Harper added that Putin's presence in what was previously the G8 was not productive. "His presence in the past, quite frankly, was undermining the coherence and effectiveness of this organization, and I don't think there is much appetite to have him back. Certainly Canada, and I know others, would strongly oppose him ever returning."
1. Field of the Invention This invention relates to iron, nickel, cobalt and/or chromium rich amorphous alloys that contain refractory metals and low boron content. 2. Description of the Prior Art Chen et al. in U.S. Pat. No. 3,856,513, issued Dec. 24, 1974, have disclosed glassy alloys consisting essentially of about 60 to 90 atom percent of at least one element of iron, nickel, cobalt, vanadium and chromium, about 10 to 30 atom percent of at least one element of phosphorus, boron and carbon and about 0.1 to 15 atom percent of at least one element of aluminum, silicon, tin, germanium, indium, antimony and beryllium. Up to about one-fourth of the metal may be replaced by elements which commonly alloy with iron and nickel, such as molybdenum, titanium, manganese, tungsten, zirconium, hafnium and copper. Chen et al. also discloses wires of glassy alloys having the general formula T.sub.i X.sub.j, where T is a transition metal and X is an element selected from the group consisting of phosphorus, boron, carbon, aluminum, silicon, tin, germanium, indium, beryllium and antimony, and where "i" ranges from about 70 to 87 atom percent and "j" ranges from about 13 to 30 atom percent. More recently, Masumoto et al. have disclosed iron-chromium glassy alloys consisting essentially of about 1 to 40 atom percent chromium, 7 to 35 atom percent of at least one of carbon, boron and phosphorus and the balance iron. Up to about 40 atom percent of at least one of nickel and cobalt, up to 20 atom percent of at least one of molybdenum, zirconium, titanium and manganese and up to about 10 atom percent of at least one of vanadium, niobium, tungsten, tantalum and copper may also be employed. Elements useful for improving mechanical properties include molybdenum, zirconium, titanium, vanadium, niobium, tantalum, tungsten, copper and manganese, while elements effective for improving the heat resistance include molybdenum, zirconium, titanium, vanadium, niobium, tantalum and tungsten. Efforts to develop new compositions which are easily formed in the glassy state with superior mechanical properties and which at the same time retain high thermal stability are continuing. Substantial amounts of metalloid elements (typically 15 to 25 atom percent) are usually found most suitable for producing the glassy state under reasonable quenching conditions of at least about 10.sup.5 .degree. C./sec, consistent with forming a ductile product. However, such high metalloid content combined with a high refractory metal content also may result in increasing brittleness of the glassy alloy in the asquenched state.
/* * Interprets a ctype=cvalue pair and adds it to the given match specification. * */ void match_parse_property(Match *match, const char *ctype, const char *cvalue) { assert(match != NULL); DLOG("ctype=*%s*, cvalue=*%s*\n", ctype, cvalue); if (strcmp(ctype, "class") == 0) { regex_free(match->class); match->class = regex_new(cvalue); return; } if (strcmp(ctype, "instance") == 0) { regex_free(match->instance); match->instance = regex_new(cvalue); return; } if (strcmp(ctype, "window_role") == 0) { regex_free(match->window_role); match->window_role = regex_new(cvalue); return; } if (strcmp(ctype, "con_id") == 0) { if (strcmp(cvalue, "__focused__") == 0) { match->con_id = focused; return; } long parsed; if (!parse_long(cvalue, &parsed, 0)) { ELOG("Could not parse con id \"%s\"\n", cvalue); match->error = sstrdup("invalid con_id"); } else { match->con_id = (Con *)parsed; DLOG("id as int = %p\n", match->con_id); } return; } if (strcmp(ctype, "id") == 0) { long parsed; if (!parse_long(cvalue, &parsed, 0)) { ELOG("Could not parse window id \"%s\"\n", cvalue); match->error = sstrdup("invalid id"); } else { match->id = parsed; DLOG("window id as int = %d\n", match->id); } return; } if (strcmp(ctype, "window_type") == 0) { if (strcasecmp(cvalue, "normal") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_NORMAL; } else if (strcasecmp(cvalue, "dialog") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_DIALOG; } else if (strcasecmp(cvalue, "utility") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_UTILITY; } else if (strcasecmp(cvalue, "toolbar") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_TOOLBAR; } else if (strcasecmp(cvalue, "splash") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_SPLASH; } else if (strcasecmp(cvalue, "menu") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_MENU; } else if (strcasecmp(cvalue, "dropdown_menu") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_DROPDOWN_MENU; } else if (strcasecmp(cvalue, "popup_menu") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_POPUP_MENU; } else if (strcasecmp(cvalue, "tooltip") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_TOOLTIP; } else if (strcasecmp(cvalue, "notification") == 0) { match->window_type = A__NET_WM_WINDOW_TYPE_NOTIFICATION; } else { ELOG("unknown window_type value \"%s\"\n", cvalue); match->error = sstrdup("unknown window_type value"); } return; } if (strcmp(ctype, "con_mark") == 0) { regex_free(match->mark); match->mark = regex_new(cvalue); return; } if (strcmp(ctype, "title") == 0) { regex_free(match->title); match->title = regex_new(cvalue); return; } if (strcmp(ctype, "urgent") == 0) { if (strcasecmp(cvalue, "latest") == 0 || strcasecmp(cvalue, "newest") == 0 || strcasecmp(cvalue, "recent") == 0 || strcasecmp(cvalue, "last") == 0) { match->urgent = U_LATEST; } else if (strcasecmp(cvalue, "oldest") == 0 || strcasecmp(cvalue, "first") == 0) { match->urgent = U_OLDEST; } return; } if (strcmp(ctype, "workspace") == 0) { regex_free(match->workspace); match->workspace = regex_new(cvalue); return; } if (strcmp(ctype, "tiling") == 0) { match->window_mode = WM_TILING; return; } if (strcmp(ctype, "floating") == 0) { match->window_mode = WM_FLOATING; return; } ELOG("Unknown criterion: %s\n", ctype); }
import os from os import path from eiffel_loop.scons.c_library import LIBRARY_INFO from eiffel_loop.package import TAR_GZ_SOFTWARE_PACKAGE info = LIBRARY_INFO ('source/taglib.getlib') pkg = TAR_GZ_SOFTWARE_PACKAGE (info.url, info.c_dev, info.extracted) # create links to `include' and `test_dir' links = { info.Var_include : info.include, info.Var_test_data : info.test_data } for link_dir in links.keys (): if links [link_dir]: print link_dir, ':', links [link_dir] if not path.exists (link_dir): print path.join (pkg.unpacked_dir, links [link_dir]) os.symlink (path.join (pkg.unpacked_dir, links [link_dir]), link_dir)
Diversity of denizens of the atherosclerotic plaque: not all monocytes are created equal. The atherosclerotic plaque typically harbors cells of several lineages whose conversations, mediated by extracellular or cell surfaceassociated messengers, influence decisively the biology and clinical consequences of the lesion. Early vascular biology studies defined the resting state of the endothelium, characterized by the elaboration of antithrombotic and vasodilatory mediators. The activated endothelium recruits inflammatory leukocytes, favors clot accumulation, participates in angiogenesis, and can influence the behavior of subjacent smooth muscle cells in ways that favor atherogenesis and vasoconstriction (Figure). More recently, we have come to appreciate that the endothelial cell not only can exhibit a spectrum of functions, but that some may arise postnatally from bone marrowderived precursors.1 Thus, the heterogeneity of endothelial cells depends not only on the mutability of their function but also on their origin. The diversity of endothelium depends not only on lineage but also location, with increasingly well-understood differences between arterial, microvascular, and venous endothelial cells. Figure. Heterogeneity of major cell types in atherosclerotic plaques. Vascular biologists have long recognized heterogeneity of endothelial cells and smooth muscle cells, now understood to result from local mediator milieu, biomechanical stimuli, and different embryological origins. Indeed, recent data suggest that both of these intrinsic vascular cell types can arise in postnatal life from bone marrowderived precursors. Immunological dogma recognizes several T-cell populations, exemplified here by the Th1 and Th2 subsets, which on the balance exert opposing influences on atherogenesis. New data now establish the relevance to atherosclerosis and hyperlipidemia of monocyte heterogeneity. Monocytes that bear high levels of the markers Ly6c/Gr-1 and P-selectin glycoprotein ligand exhibit more proinflammatory functions than their
/** * Created by willianzhao on 6/8/14. */ public class MapFileClient { Configuration config; public MapFileClient(Configuration config) { this.config = config; } public void createEventMapFile(String endDateStr) throws Exception { EventsMapFileFactory eventMapFile = null; eventMapFile = new EventsMapFileFactory(config); eventMapFile.loadMapFile(); } public void createTicketMapFile(String startDateStr, String endDateStr) throws Exception { TicketsMapFileFactory ticketMapFile = new TicketsMapFileFactory(config, startDateStr, endDateStr); ticketMapFile.loadMapFile(); } public void createUserMapFIle(String endDateStr) throws Exception { UserMapFileFactory userMapFile = new UserMapFileFactory(config); userMapFile.loadMapFile(); } public void createUserContactsMapFIle(String endDateStr) throws Exception { UserContactsMapFileFactory userContactsMapFile = new UserContactsMapFileFactory(config); userContactsMapFile.loadMapFile(); } public void createZipcodeLookupMapFIle() throws Exception { GeonamesZipcodeMapFileFactory geonamesMapfile = new GeonamesZipcodeMapFileFactory(config); geonamesMapfile.loadMapFile(); } }
import {ErrorWithMessage} from "./ErrorWithMessage"; /** * Connection error */ export class UnknownError extends ErrorWithMessage { /** * Unknown error * * @return this */ public static createUnknownError() { return new this("Unknown error."); } }
#python2 import math def abs(x): return x if x > 0 else -x def main(): n = int(raw_input().strip()) a = map(int, raw_input().strip().split()) m = max(a) b = [[0] * (m + 1) for _ in xrange(n + 1)] for i in xrange(1, n + 1): mn = b[i - 1][0] for j in xrange(m + 1): mn = min(mn, b[i - 1][j]) b[i][j] = mn + abs(a[i - 1] - j) print min(b[n]) if __name__ == '__main__': main()
package rtypes import ( "encoding/hex" "encoding/json" "fmt" "math/big" "strings" "time" cptypes "github.com/lianxiangcloud/linkchain/libs/cryptonote/types" "github.com/lianxiangcloud/linkchain/libs/common" cmn "github.com/lianxiangcloud/linkchain/libs/common" "github.com/lianxiangcloud/linkchain/libs/crypto" "github.com/lianxiangcloud/linkchain/libs/hexutil" "github.com/lianxiangcloud/linkchain/libs/p2p" "github.com/lianxiangcloud/linkchain/libs/ser" "github.com/lianxiangcloud/linkchain/types" ) type ResultValidators struct { BlockHeight uint64 `json:"block_height"` LastHeightChanged uint64 `json:"last_changed_height"` Validators []*types.Validator `json:"validators"` } func (r ResultValidators) MarshalJSON() ([]byte, error) { type data ResultValidators enc := data(r) return ser.MarshalJSON(enc) } func (r *ResultValidators) UnmarshalJSON(input []byte) error { type data ResultValidators enc := data(*r) err := ser.UnmarshalJSON(input, &enc) if err == nil { *r = ResultValidators(enc) } return err } type PeerStateInfo struct { NodeAddress string `json:"node_address"` PeerState json.RawMessage `json:"peer_state"` } // UNSTABLE type ResultConsensusState struct { RoundState json.RawMessage `json:"round_state"` } type ResultDumpConsensusState struct { RoundState json.RawMessage `json:"round_state"` Peers []PeerStateInfo `json:"peers"` } type ResultBlockHeader struct { *types.Header } func (b ResultBlockHeader) MarshalJSON() ([]byte, error) { return ser.MarshalJSON(b.Header) } // Single block (with meta) type ResultBlock struct { BlockMeta *types.BlockMeta `json:"block_meta"` Block *types.Block `json:"block"` } func (r ResultBlock) MarshalJSON() ([]byte, error) { type data ResultBlock enc := data(r) return ser.MarshalJSON(enc) } func (r *ResultBlock) UnmarshalJSON(input []byte) error { type data ResultBlock enc := data(*r) err := ser.UnmarshalJSON(input, &enc) if err == nil { *r = ResultBlock(enc) } return err } // Info about the node's syncing state type SyncInfo struct { LatestBlockHash cmn.HexBytes `json:"latest_block_hash"` LatestAppHash cmn.HexBytes `json:"latest_app_hash"` LatestBlockHeight uint64 `json:"latest_block_height"` LatestBlockTime time.Time `json:"latest_block_time"` CatchingUp bool `json:"catching_up"` } // Info about the node's validator type ValidatorInfo struct { Address cmn.HexBytes `json:"address"` PubKey crypto.PubKey `json:"pub_key"` VotingPower int64 `json:"voting_power"` } // Node Status type ResultStatus struct { NodeInfo p2p.NodeInfo `json:"node_info"` SyncInfo SyncInfo `json:"sync_info"` ValidatorInfo ValidatorInfo `json:"validator_info"` } // Is TxIndexing enabled func (s *ResultStatus) TxIndexEnabled() bool { if s == nil { return false } for _, s := range s.NodeInfo.Other { info := strings.Split(s, "=") if len(info) == 2 && info[0] == "tx_index" { return info[1] == "on" } } return false } func (s ResultStatus) MarshalJSON() ([]byte, error) { type data ResultStatus enc := data(s) return ser.MarshalJSON(enc) } type Peer struct { p2p.NodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` } // Info about peer connections type ResultNetInfo struct { Listening bool `json:"listening"` Listeners []string `json:"listeners"` NPeers int `json:"n_peers"` Peers []Peer `json:"peers"` } func (r ResultNetInfo) MarshalJSON() ([]byte, error) { type data ResultNetInfo enc := data(r) return ser.MarshalJSON(enc) } type WholeBlock struct { Block *RPCBlock `json:"block"` Receipts types.Receipts `json:"receipts"` } func NewWholeBlock(block *types.Block, receipts types.Receipts) *WholeBlock { return &WholeBlock{ Block: NewRPCBlock(block, true, true), Receipts: receipts, } } type ReceiptsWithBlockHeight struct { BlockHeight uint64 `json:"height"` Receipts types.Receipts `json:"receipts"` } func NewReceiptsWithBlockHeight(blockHeight uint64, receipts types.Receipts) *ReceiptsWithBlockHeight { return &ReceiptsWithBlockHeight{ BlockHeight: blockHeight, Receipts: receipts, } } type BalanceRecordsWithBlockHeight struct { BlockHeight uint64 `json:"height"` BlockBalanceRecords *types.BlockBalanceRecords `json:"block_balance_records"` } func NewBalanceRecordsWithBlockMsg(blockHeight uint64, bbr *types.BlockBalanceRecords) *BalanceRecordsWithBlockHeight { return &BalanceRecordsWithBlockHeight{ BlockHeight: blockHeight, BlockBalanceRecords: bbr, } } type ITX interface{} type txsAlias Txs type Txs []ITX func (t Txs) MarshalJSON() ([]byte, error) { ec := txsAlias(t) return ser.MarshalJSON(ec) } func (t *Txs) UnmarshalJSON(input []byte) error { dec := &txsAlias{} err := ser.UnmarshalJSON(input, dec) if err == nil { *t = Txs(*dec) } return err } type rpcBlockAlias RPCBlock type RPCBlock struct { Height *hexutil.Big `json:"number"` Hash *common.Hash `json:"hash"` Coinbase *common.Address `json:"miner"` Time *hexutil.Big `json:"timestamp"` ParentHash common.Hash `json:"parentHash"` DataHash common.Hash `json:"transactionsRoot"` StateHash common.Hash `json:"stateRoot"` ReceiptHash common.Hash `json:"receiptsRoot"` GasLimit hexutil.Uint64 `json:"gasLimit"` GasUsed hexutil.Uint64 `json:"gasUsed"` Bloom types.Bloom `json:"logsBloom"` Txs Txs `json:"transactions"` TokenOutputSeqs map[string]int64 `json:"token_output_seqs"` } // NewRPCBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. func NewRPCBlock(b *types.Block, inclTx bool, fullTx bool) *RPCBlock { if b == nil || b.Header == nil { return nil } head := b.Header // copies the header once hash := b.Hash() block := &RPCBlock{ Height: (*hexutil.Big)(big.NewInt(int64(head.Height))), Hash: &hash, Coinbase: &head.Coinbase, Time: (*hexutil.Big)(big.NewInt(int64(head.Time))), ParentHash: head.ParentHash, DataHash: head.DataHash, StateHash: b.StateHash, ReceiptHash: head.ReceiptHash, GasLimit: hexutil.Uint64(head.GasLimit), GasUsed: hexutil.Uint64(head.GasUsed), Bloom: head.Bloom(), } if !inclTx { return block } formatTx := func(tx types.Tx, index uint64) interface{} { return tx.Hash() } if fullTx { formatTx = func(tx types.Tx, index uint64) interface{} { return NewRPCTx(tx, nil) } } txs := b.Txs transactions := make(Txs, 0, len(txs)) for i, tx := range txs { if v := formatTx(tx, uint64(i)); v != nil { transactions = append(transactions, v) } } block.Txs = transactions return block } // NewRPCBlockUTXO converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. // only return utxo txs. func NewRPCBlockUTXO(b *types.Block, inclTx bool, fullTx bool, tokenOutputSeqs map[string]int64) *RPCBlock { if b == nil || b.Header == nil { return nil } head := b.Header // copies the header once hash := b.Hash() block := &RPCBlock{ Height: (*hexutil.Big)(big.NewInt(int64(head.Height))), Hash: &hash, Coinbase: &head.Coinbase, Time: (*hexutil.Big)(big.NewInt(int64(head.Time))), ParentHash: head.ParentHash, DataHash: head.DataHash, StateHash: b.StateHash, ReceiptHash: head.ReceiptHash, GasLimit: hexutil.Uint64(head.GasLimit), GasUsed: hexutil.Uint64(head.GasUsed), Bloom: head.Bloom(), TokenOutputSeqs: tokenOutputSeqs, } if !inclTx { return block } formatTx := func(tx types.Tx, index uint64) interface{} { return tx.Hash() } if fullTx { formatTx = func(tx types.Tx, index uint64) interface{} { return NewRPCTx(tx, nil) } } txs := b.Txs transactions := make(Txs, 0, len(txs)) for i, tx := range txs { switch t := tx.(type) { case *types.UTXOTransaction: if v := formatTx(t, uint64(i)); v != nil { transactions = append(transactions, v) } default: } } block.Txs = transactions return block } type RPCBalanceRecord struct { From common.Address `json:"from"` To common.Address `json:"to"` FromAddressType hexutil.Uint `json:"from_address_type"` ToAddressType hexutil.Uint `json:"to_address_type"` Type string `json:"type"` TokenID common.Address `json:"token_id"` Amount *hexutil.Big `json:"amount"` Hash common.Hash `json:"hash"` } type RPCTxBalanceRecords struct { Hash common.Hash `json:"hash"` Type string `json:"type"` Records []RPCBalanceRecord `json:"records"` Payloads []*hexutil.Bytes `json:"payloads"` Nonce hexutil.Uint64 `json:"nonce"` GasLimit hexutil.Uint64 `json:"gas_limit"` GasPrice *hexutil.Big `json:"gas_price"` From common.Address `json:"from"` To common.Address `json:"to"` TokenId common.Address `json:"token_id"` } type RPCBlockBalanceRecords struct { BlockTime hexutil.Uint64 `json:"block_time"` BlockHash common.Hash `json:"block_hash"` TxRecords []*RPCTxBalanceRecords `json:"tx_records"` } func NewRPCBlockBalanceRecord(bbr *types.BlockBalanceRecords) *RPCBlockBalanceRecords { txRecords := make([]*RPCTxBalanceRecords, 0) for _, tx := range bbr.TxRecords { records := make([]RPCBalanceRecord, 0) for _, br := range tx.Records { record := RPCBalanceRecord{ From: br.From, To: br.To, FromAddressType: hexutil.Uint(br.FromAddressType), ToAddressType: hexutil.Uint(br.ToAddressType), Type: br.Type, TokenID: br.TokenID, Amount: (*hexutil.Big)(br.Amount), Hash: br.Hash, } records = append(records, record) } payloads := make([]*hexutil.Bytes, 0) for _, payload := range tx.Payloads { payloads = append(payloads, (*hexutil.Bytes)(&payload)) } txRecord := &RPCTxBalanceRecords{ Hash: tx.Hash, Type: tx.Type, Payloads: payloads, Nonce: hexutil.Uint64(tx.Nonce), GasLimit: hexutil.Uint64(tx.GasLimit), GasPrice: (*hexutil.Big)(tx.GasPrice), Records: records, From: tx.From, To: tx.To, TokenId: tx.TokenId, } txRecords = append(txRecords, txRecord) } rbbr := &RPCBlockBalanceRecords{ BlockTime: hexutil.Uint64(bbr.BlockTime), BlockHash: bbr.BlockHash, TxRecords: txRecords, } return rbbr } type iRPCTx interface { TypeName() string Hash() common.Hash From() (common.Address, error) } //RPCTx represents a RPCTx that will serialize to the RPC representation of a tx. type rpcTxAlias RPCTx type RPCTx struct { TxType string `json:"txType"` TxHash common.Hash `json:"txHash"` SignHash *common.Hash `json:"signHash,omitempty"` From *common.Address `json:"from,omitempty"` Tx types.Tx `json:"tx"` TxEntry *types.TxEntry `json:"txEntry,omitempty"` } func (t RPCTx) MarshalJSON() ([]byte, error) { ec := rpcTxAlias(t) return ser.MarshalJSON(ec) } func (t *RPCTx) UnmarshalJSON(input []byte) error { dec := &rpcTxAlias{} err := ser.UnmarshalJSON(input, dec) if err == nil { *t = RPCTx(*dec) } return err } type signHasher interface { SignHash() common.Hash } // NewRPCTx returns a tx that will serialize to the RPC // representation, with the given location metadata set (if available). func NewRPCTx(tx types.Tx, entry *types.TxEntry) *RPCTx { if tx == nil { return nil } itx, ok := tx.(iRPCTx) if !ok { return nil } rpcTx := &RPCTx{ TxEntry: entry, TxType: itx.TypeName(), TxHash: itx.Hash(), Tx: tx, } if rpcTx.TxType == types.TxNormal || rpcTx.TxType == types.TxToken { if sh, ok := tx.(signHasher); ok { signHash := sh.SignHash() rpcTx.SignHash = &signHash } } if from, _ := itx.From(); from != common.EmptyAddress { rpcTx.From = &from } return rpcTx } type TxRecordReq struct { TxHash string `json:"tx_hash"` Type string `json:"type"` } // SignTransactionResult represents a RLP encoded signed transaction. type SignTransactionResult struct { Raw hexutil.Bytes `json:"raw"` Tx types.Tx `json:"tx"` } type RPCKey cptypes.Key func (k RPCKey) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%x"`, k[:])), nil } func (k *RPCKey) UnmarshalJSON(input []byte) error { bytes, err := hex.DecodeString(string(input[1 : len(input)-1])) if err != nil { return err } copy(k[:], bytes) return nil } type RPCOutput struct { Out RPCKey `json:"out"` //UnlockTime uint64 `json:"unlock_time"` Height uint64 `json:"height"` Commit RPCKey `json:"commit"` TokenID common.Address `json:"token"` }
n = int(raw_input()) A = map(int, raw_input().split()) q = int(raw_input()) Q = [0] * q for i in xrange(q): Q[i] = int(raw_input()) Q = [(x,i) for i,x in enumerate(Q,1)] A.sort() Q.sort() c = 0 A.append(10**12) Res = {} for x,i in Q: while x>= A[c]: c+=1 Res[i] = c for i in xrange(1,q+1): print Res[i]
VANCOUVER – Tae Hyun Bang doubled up to win $100,000 in bonus money, and Kajan Johnson and Kiichi Kunimoto each earned $50,000 bonuses for their performances at Saturday’s UFC 174 event. Bang and Johnson earned the “Fight of the Night,” while Bang and Kunimoto each won “Performance of the Night” honors. UFC officials announced the winners at the night’s post-event news conference, which MMAjunkie attended. Bang (17-8 MMA, 1-1 UFC) was on his way to a split decision with Johnson (19-11-1 MMA, 0-1 UFC), the scorecards revealed after the fight. So he just made sure those cards wouldn’t matter. With one massive right hand, right on the button, Bang knocked Johnson out to end the fight in the third round. Kunimoto (17-5 MMA, 2-0 UFC) pulled off the night’s biggest upset, tapping out Daniel Sarafian (8-5 MMA, 1-3 UFC) in the first round in the featured bout on the prelims. Sarafian was more than a 5-to-1 favorite in the fight. UFC 174 took place at Rogers Arena in Vancouver. The main card aired on pay-per-view following prelims on FX and UFC Fight Pass. For complete coverage of UFC 174, stay tuned to the UFC Events section of the site. (Pictured: Tae Hyun Bang) * * * * MMAjunkie’s John Morgan recaps the UFC 174 card, including the biggest surprises and disappointments:
MCMC particle filter-based vehicle tracking method using multiple hypotheses and appearance model In this study, we propose a multiple vehicle tracking method using multiple hypotheses and the appearance model. The multiple hypotheses are associated with multiple tracks using track-to-multiple hypotheses association method. A target state is estimated using the maximum a posteriori probability estimation method. The posterior probability is proportional to the product of a priori probability and the likelihood that is calculated using similarities of multiple hypotheses and the appearance model. The posterior probability density function is estimated using the Markov chain Monte Carlo particle filter. An optimal posterior target state is determined using a sample with the maximum a posteriori probability. Our experimental results show that the proposed method can improve multiple objects tracking precision as well as multiple object tracking accuracy.
#include<bits/stdc++.h> using namespace std; typedef long long int ll; int main(){ ll t; cin>> t; for(ll y=1; y<=t; y++){ ll n, m; cin>> n>> m; ll a[m][2], e=1000000; vector<ll> v, w; for(ll i=0; i<m; i++){ cin>> a[i][0]>> a[i][1]; v.push_back(a[i][0]*e+ i+i); v.push_back(a[i][1]*e+ i+i+1); w.push_back(a[i][0]); } sort(w.begin(),w.end()); w.push_back(0); for(ll i=m-1; i>=0; i--) w[i]+=w[i+1]; sort(v.begin(),v.end()); ll s=0, c=0, ans=0; for(ll i=m+m-1; i>=0; i--){ //cout<< v[i]<< "."; if(v[i]%2==1){ s=w[m-c]; ll k=v[i]%e; k/=2; if(a[k][0]>a[k][1]){ s-=a[k][0]; c--; } if(c<n){ ll d=(n-c-1)*(a[k][1])+a[k][0]+s; ans=max(ans,d); } else{ ans=max(ans,w[m-n]); } if(a[k][0]>a[k][1]){ s+=a[k][0]; c++; } } else c++; }//cout<< endl; cout<< ans<< endl; } }
<gh_stars>0 # -*- coding: utf-8 -*- from nose.tools import ok_, with_setup from fcsite.models import set_user, bbs, users from tests.models import getdb def setup_module(module): getdb().execute("DELETE FROM User") getdb().execute("DELETE FROM BBS") def _setup_testdata(count): def impl(): users.insert("foo", "<PASSWORD>", 1, 0x0) set_user(users.find_by_id(1)) for i in range(count): bbs.post(u"あいうえお" + str(i)) return impl def teardown_testdata(): users.delete_by_id(1) getdb().execute("DELETE FROM BBS") @with_setup(_setup_testdata(5), teardown_testdata) def test_count_posts(): assert bbs.count_posts() == 5 @with_setup(_setup_testdata(0), teardown_testdata) def test_count_posts0(): assert bbs.count_posts() == 0
The Trusted Listener: The Influence of Anthropomorphic Eye Design of Social Robots on User's Perception of Trustworthiness Nowadays, social robots have become human's important companions. The anthropomorphic features of robots, which are important in building natural user experience and trustable human-robot partnership, have attracted increasing attention. Among these features, eyes attract most audience's attention and are particularly important. This study aims to investigate the influence of robot eye design on users trustworthiness perception. Specifically, a simulation robot model was developed. Three sets of experiments involving sixty-six participants were conducted to investigate the effects of (i) visual complexity of eye design, (ii) blink rate, and (iii) gaze aversion of social robots on users perceived trustworthiness. Results indicate that high visual complexity and gaze aversion lead to higher perceived trustworthiness and reveal a positive correlation between the perceived anthropomorphic effect of eye design and users perceived trust, while a non-significant effect of blink rate has been found. Preliminary suggestions are provided for the design of social robots in future works.
<commit_before>import codecs from setuptools import find_packages, setup import digestive setup( name='digestive', version=digestive.__version__, url='https://github.com/akaIDIOT/Digestive', packages=find_packages(), description='Run several digest algorithms on the same data efficiently', author='Mattijs Ugen', author_email=codecs.encode('nxnvqvbg@hfref.abercyl.tvguho.pbz', 'rot_13'), license='ISC', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: OSI Approved :: ISC License (ISCL)', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], install_requires=['decorator'], tests_require=['pytest', 'mock'], entry_points={ 'console_scripts': { 'digestive = digestive.main:main' } } ) <commit_msg>Include decorator requirement for tests as well One would think setup.py would include runtime deps with test deps, but no... References #6 <commit_after>import codecs from setuptools import find_packages, setup import digestive requires = ['decorator'] setup( name='digestive', version=digestive.__version__, url='https://github.com/akaIDIOT/Digestive', packages=find_packages(), description='Run several digest algorithms on the same data efficiently', author='Mattijs Ugen', author_email=codecs.encode('nxnvqvbg@hfref.abercyl.tvguho.pbz', 'rot_13'), license='ISC', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: OSI Approved :: ISC License (ISCL)', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], install_requires=requires, tests_require=requires + ['pytest', 'mock', 'decorator'], entry_points={ 'console_scripts': { 'digestive = digestive.main:main' } } )
// Gets the base::FilePath for a named file in the test folder. base::FilePath TestFile(const std::string& ascii_name) { base::FilePath path; EXPECT_TRUE(base::PathService::Get(chrome::DIR_TEST_DATA, &path)); path = path.Append(kTestFilesFolderInTestData); path = path.AppendASCII(ascii_name); base::ScopedAllowBlockingForTesting allow_blocking; EXPECT_TRUE(base::PathExists(path)); return path; }
/** * Open the connection to the GridShibCA server in this.conn. * @throws java.io.IOException */ protected void openConnection() throws IOException { GridShibCAClientLogger.debugMessage("Establishing connection to " + this.url); this.conn = (HttpURLConnection) this.url.openConnection(); this.conn.setRequestProperty("accept", "text/plain"); this.conn.setRequestProperty("User-Agent", "GridShibCA-JWS/" + GridShibCAProperties.getProperty("Version")); if (this.url.getProtocol().equals("https") && (this.mySSLSocketFactory != null)) { HttpsURLConnection sconn = (HttpsURLConnection) this.conn; GridShibCAClientLogger.debugMessage("Using my trustStore with my CAs"); sconn.setSSLSocketFactory(this.mySSLSocketFactory); } this.conn.setDoOutput(true); }
/** * * @param reg Number of registers to clear * @param ... Variable argument list containing registers to clear */ void zero_reg(int reg, ...) { va_list valist; va_start(valist, reg); for (ARM_U_WORD i = 0; i < reg; i++) { gpr.registers[va_arg(valist, int)].data = 0; } va_end(valist); }
/// Returns an error if the input string is not a valid integer. /// /// Used by CLAP. pub fn validate_int(s: String) -> Result<(), String> { macro_rules! abort { () => { return Err(format!("expected integer, found `{}`", s)); }; } if s != "0" { for (idx, char) in s.chars().enumerate() { if idx == 0 { if !char.is_numeric() || char == '0' { abort!() } } else { if !char.is_numeric() { abort!() } } } } Ok(()) }
package model import ( "testing" "os" ) //------------------------------------------------------------------------------ // TestRelationshipConfiguration01 tests the basic functions of the relationshipConfiguration package. func TestRelationshipConfiguration01(t *testing.T) { filename := "test.yaml" // cleanup routine defer func() {os.Remove(filename)}() relationshipConfiguration, _ := NewRelationshipConfiguration("tenant", "tenant", "context", "tenant", "V1.0.0", "") relationshipConfiguration.Save(filename) relationshipConfiguration.Load(filename) relationshipConfiguration.Show() } //------------------------------------------------------------------------------
Power management integrated circuits (PMICs) are employed for managing the power requirements of devices. For example, PMICs are employed to perform power conversion and power control functions. Some PMIC chips incorporate discrete magnetic devices for high frequency power conversion applications. The magnetic devices often occupy about 2 to 3 times the area of a PMIC itself. In order to reduce the whole package size, integrated thin-film magnetics are developed using standard CMOS manufacturing processes. However, the present processes for fabricating integrated thin-film magnetics yield devices with geometric constraints and limited cross section aspect ratio. For instance, the metal thickness and magnetic core thickness are limited to a few um (e.g., 2 to 5 um). The limited thickness usually results in large DC resistance (e.g., more than 1Ω), and low quality factor (e.g., less than 10). From the foregoing discussion, it is desirable to provide a process for forming integrated magnetic devices which solves the above-mentioned problems.
Twin explosions on Monday hit the regional headquarters of Turkey's main pro-Kurdish party in two cities amid escalating tensions in the run up to June 7 legislative elections, a party official said. Six people were injured in the blast at the office of the People's Democratic Party (HDP) in the southern city of Adana, three of them seriously, the official told AFP. The official said the frontal facade of the building sustained damage. Another blast occurred in nearby Mersin in southern Turkey when a flower sent to the party office exploded, the party official said. There were no casualties. The motives and causes of the blasts were not immediately clear. The HDP's co-chairman Selahattin Demirtas was due to address a rally in Mersin later in the day. In April, unidentified assailants opened fire on the HDP headquarters in the capital Ankara, with no casualties. The government condemned that attack as a blow to Turkey's democracy and stability. Tensions are mounting ahead of Turkey's key elections next month in which the HDP is seeking to clear the 10 percent quota to take seats in the parliament. The HDP's success could dent the ruling AKP party's plans to reach a thumping majority in the 550-seat parliament in order to change the constitution and create a presidential system. President Recep Tayyip Erdogan, who steered Turkey as prime minister for 11 years, has appealed to his supporters to help elect 400 AKP lawmakers in June's vote, giving him the backing to rewrite the constitution -- and assume full executive powers himself.
<filename>app/src/main/java/com/aishang/app/ui/about/AboutActivity.java package com.aishang.app.ui.about; import android.content.Intent; import android.os.Bundle; import android.support.v7.widget.Toolbar; import android.view.View; import android.widget.RelativeLayout; import butterknife.Bind; import butterknife.ButterKnife; import butterknife.OnClick; import com.aishang.app.R; import com.aishang.app.ui.base.BaseActivity; public class AboutActivity extends BaseActivity { @Bind(R.id.toolbar) Toolbar toolbar; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_about); ButterKnife.bind(this); initToolbar(); } private void initToolbar() { toolbar.setTitle(""); this.setSupportActionBar(toolbar); toolbar.setNavigationIcon(R.mipmap.iconfont_livesvg); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onBackPressed(); } }); } @OnClick(R.id.know_we) void onKnowWeClick() { Intent intent = new Intent(this, KnowWeActivity.class); this.startActivity(intent); } @OnClick(R.id.play_travel) void OnTravalCkick() { Intent intent = new Intent(this, TravelActivity.class); this.startActivity(intent); } @OnClick(R.id.borker) void onBorkerClick() { Intent intent = new Intent(this, BrokerActivity.class); this.startActivity(intent); } @OnClick(R.id.join_we) void onJointWeClick() { Intent intent = new Intent(this, HongBaoActivity.class); this.startActivity(intent); } @OnClick(R.id.issues) void onIssuesClick() { Intent intent = new Intent(this, IssuesActivity.class); this.startActivity(intent); } @OnClick(R.id.member) void onMemberClick() { Intent intent = new Intent(this, MemberRuleActivity.class); this.startActivity(intent); } }
Q: Macbook Pro 2018, trackpad issue because of swollen battery or faulty trackpad? I have a weird trackpad issue on my 15" 2018 Macbook Pro since a few days now. I suspect it's caused by a swollen battery, but I'm not totally sure. Here is the issue,: Force Click almost never works, if it does it's only for a few minutes after booting in the morning right click (two fingers click) works fine for a little while after start up then stops working on part or the trackpad after a while the longer I use the computer the larger the area where I can't right click gets, starting from the upper middle and extending downwards after a few hours I can only right-click on the sides and bottom third of the trackpad and sometimes even a single click (one finger click) doesn't work on the upper middle of the trackpad Given that the issue worsens depending on how long I use the computer I think it's a hardware and not software issue. SMC and NVRAM + PRAM reset didn't help. I have then noticed that my battery might be swollen, at least the bottom of the device is as pressing lightly on the lower right side makes the upper left rubber side lift by a few millimeters, same for the other side. To be clear, the trackpad itself seems perfectly flat, but the underside of the computer seems swollen. However I'm not totally convinced it is the battery as System Report seems to consider it healthy. Furthermore, I'm not sure a swollen battery could affect the trackpad, as if I understand it correctly, on newer trackpads the clicks aren't mechanical but generated by pressure sensors. I can't get my computer to an Apple Authorized Service Provider (no official Apple Store in my area) before a few days/a week and also went out of warranty about 2 month ago. So I'm turning to stack exchange to have an opinion on the issue and see if the device is still safe to use for a little while, as apparently swollen batteries are potentially dangerous. A: We can only speculate wether the issue is caused by the swollen battery. HOWEVER A swollen battery can be a hazard and should be swapped either way. Apple offers extra warranty for batteries that are swollen (2-3 years from purchase depending on the device). An AASP (Apple Authorized Service Provider) should be able to swap the Top Case (which will include the battery as well as the trackpad) under the extended warranty free of charge.
// // Source code recreated from a .class file by IntelliJ IDEA // (powered by FernFlower decompiler) // package org.apache.flink.connector.clickhouse.internal; import java.util.Arrays; import java.util.stream.Collectors; /** Create an insert/update/delete ClickHouse statement. */ public class ClickHouseStatementFactory { private static final String EMPTY = ""; private ClickHouseStatementFactory() {} public static String getInsertIntoStatement(String tableName, String[] fieldNames) { String columns = Arrays.stream(fieldNames) .map(ClickHouseStatementFactory::quoteIdentifier) .collect(Collectors.joining(", ")); String placeholders = Arrays.stream(fieldNames).map((f) -> "?").collect(Collectors.joining(", ")); return String.join( EMPTY, "INSERT INTO ", quoteIdentifier(tableName), "(", columns, ") VALUES (", placeholders, ")"); } public static String getUpdateStatement( String tableName, String[] fieldNames, String[] conditionFields, String clusterName) { String setClause = Arrays.stream(fieldNames) .map((f) -> quoteIdentifier(f) + "=?") .collect(Collectors.joining(", ")); String conditionClause = Arrays.stream(conditionFields) .map((f) -> quoteIdentifier(f) + "=?") .collect(Collectors.joining(" AND ")); String onClusterClause = ""; if (clusterName != null) { onClusterClause = " ON CLUSTER " + quoteIdentifier(clusterName); } return String.join( EMPTY, "ALTER TABLE ", quoteIdentifier(tableName), onClusterClause, " UPDATE ", setClause, " WHERE ", conditionClause); } public static String getDeleteStatement( String tableName, String[] conditionFields, String clusterName) { String conditionClause = Arrays.stream(conditionFields) .map((f) -> quoteIdentifier(f) + "=?") .collect(Collectors.joining(" AND ")); String onClusterClause = ""; if (clusterName != null) { onClusterClause = " ON CLUSTER " + quoteIdentifier(clusterName); } return String.join( EMPTY, "ALTER TABLE ", quoteIdentifier(tableName), onClusterClause, " DELETE WHERE ", conditionClause); } public static String getRowExistsStatement(String tableName, String[] conditionFields) { String fieldExpressions = Arrays.stream(conditionFields) .map((f) -> quoteIdentifier(f) + "=?") .collect(Collectors.joining(" AND ")); return String.join( EMPTY, "SELECT 1 FROM ", quoteIdentifier(tableName), " WHERE ", fieldExpressions); } public static String quoteIdentifier(String identifier) { return String.join(EMPTY, "`", identifier, "`"); } }
Inter Pipeline Ltd. says it will proceed with construction of the largest project in its history, a $3.5-billion petrochemical facility that will benefit from financial backing of the Province of Alberta. The Heartland Petrochemical Complex, an integrated propane dehydrogenation (PDH) and polypropylene (PP) plant, will be designed to convert approximately 22,000 bbls/d of propane into 525,000 tonnes per year of polymer grade propylene. Propane feedstock for the PDH plant will be sourced from Inter Pipeline’s Redwater Olefinic Fractionator as well as several other third party fractionators in the region. The project will receive $200 million in royalty credits awarded through the province's Petrochemical Diversification Program in December 2016. Stay current on industry headlines, upcoming events and gain access to specialty reports by subscribing to our free daily oil and gas e-news alert. Sign me up The program also awarded up to $300 million to Pembina Pipeline for its proposed petrochemical plant, which has yet to receive a final investment decision. Alberta’s petrochemical industry is currently entirely ethane-based. Both the Pembina and Inter Pipeline projects are aimed creating value from western Canada’s stranded and abundant propane. Detailed engineering for the PDH facility was awarded to Fluor Corporation in 2013 and is now approximately 85 percent complete, the company said on Monday. Inter Pipeline has also completed early civil work at the site in preparation for facility construction activities in early 2018. Linde Engineering was awarded the front end engineering design contract for the integrated PP facility in 2017, and work is currently approximately 70 percent complete. Construction of this component of the complex is scheduled to begin in the second half of 2018. Inter Pipeline expects to earn an average of $400 million to $500 million per year from the Heartland Petrochemical Complex once operational in late 2021. Image: Rendering of the Heartland Petrochemical Complex. Source: Inter Pipeline Ltd.
package ca.uhn.hl7v2.hoh.relay.listener; import ca.uhn.hl7v2.model.Message; import org.springframework.beans.factory.NamedBean; import ca.uhn.hl7v2.protocol.ApplicationRouter.AppRoutingData; import ca.uhn.hl7v2.protocol.ReceivingApplication; public interface IRelayListener extends NamedBean { void registerApplication(AppRoutingData theAppRouting, ReceivingApplication<? extends Message> theReceivingApplication); }
import { bindable } from "aurelia-framework"; import { AdaptiveEnsembleConfig } from "../core/models/AdaptiveEnsembleConfig"; import { activationStrategy } from 'aurelia-router'; export class AdaptiveEnsembleConfigControl { @bindable config: AdaptiveEnsembleConfig; ageFunctions: string[] = ["NumberOfLabelsBased", "NumberTrainingInstancesBased"]; bind() { this.setDefaultConfig(); } private setDefaultConfig() { this.config = { epsilon: 0.000001, minTraingInstances: 2, retainmentFraction: 0.1, a: 3, //min val 2 c: 100, //min val 2 ageFunction: "NumberOfLabelsBased", individualPLTProperties: { hd: 32768 } }; } }
Internet of Concrete for Optimal Curing Improper curing is one of the causes that has reduced the lifespan of several concrete structures and necessitated extensive costly repairs. The manual water curing is still a popular method, especially in developing countries with hot climates. However, using this method, it is extremely difficult to achieve effective curing and ensure long-term strength and durability. It also has an adverse impact on the aesthetics of the concrete surface and is both uneconomical and labor intensive. This work presents a conceptual design of an Internet of Concrete (IoC) based automatic water curing system for concrete. It consists of three layers: cyber-physical, edge and cloud. The cyber-physical layer includes multiple distributed embeddable wireless temperature and relative humidity sensors, a smart water valve, water sprinklers, and the concrete element. At the edge layer, sensor data will be stored, analyzed, and processed in real time closer to the data source for faster response. Long-term data for future concrete durability studies will be stored in the cloud layer. The proposed system replenishes the concrete with the exact amount of water needed to properly cure the concrete without the use of manpower, ensuring economical curing, optimal strength, and long-term durability. The system also has several advantages beyond providing optimal curing.
def generate_oemol(smiles): mol = oechem.OEMol() chargeEngine = oequacpac.OEAM1BCCCharges() oechem.OEParseSmiles(mol, smiles) oechem.OEAddExplicitHydrogens(mol) status = omega(mol) if not status: print("Something went wrong in `generate_oemol({})!".format(smiles)) oechem.OETriposAtomNames(mol) oequacpac.OESetNeutralpHModel(mol) oequacpac.OEAssignCharges(mol, chargeEngine) _ = generateTopologyFromOEMol(mol) return mol
Having a podcast is beneficial to your real estate business because it’s easy networking, it’s inexpensive, and you’ll become more articulate and knowledgable about your market and the industry. We are on the cusp of the audio revolution, which is going to blow video marketing out of the water. Why? Because audio content such as podcasts and audiobooks allow us to multitask in a world where we seem to never have enough time. Video marketing took off because it was easier to consume than the written word. However, many videos require you to stop what you are doing and watch them. Audio only requires you to listen. Time is the only currency, and creating a podcast allows you to trade off of that. Many people are turned off at the idea of attending networking events and meetings because they take up valuable time and don’t always offer desired results. However, that doesn’t mean that networking can’t help you grow your business. Podcasting is a more modern and powerful way for agents and brokers to network and expand their reach. Brokers who are trying to attract and train top talent for their team should interview various industry leaders and coaches to gain authority and credibility. Agents should interview local business owners, real estate vendors and other area figures to make their name more synonymous with the city in which they sell real estate. Any time that you interview someone for an episode of your real estate podcast, you should ask that they share the episode with all of their followers too, which will help you capture the attention of new people for free. Using your podcast to network this way helps to make each interviewee a more loyal referral partner because they will be grateful for the exposure you provide them. One of the greatest benefits of starting a real estate podcast is that it allows you to create a great deal of content both cheaply and easily. Even if you don’t have a website to host your podcast, you can launch it on the SoundCloud platform. It’s free to get started. If you want to invest in a microphone you can, but you can get started without one and use your iPhone to record each episode instead. Post-recording production can easily be done by a member of your team, or you could outsource it very inexpensively through platforms like Fiverr. Another benefit of podcasting is that you can record and produce episodes in your pajamas in the comfort of your home, which makes them even easier than videos. Whether you are an agent or a broker, you are constantly speaking with people. Many of those people are strangers you are soliciting. If you are going to be successful as an agent or a broker, you need to be a talented and savvy speaker. One of the best ways to improve your skills as a speaker is through practice — lots and lots of practice. Creating a real estate podcast will give you a lot of valuable practice that will also allow you to engage with prospects and develop more valuable connections. The sooner you launch your real estate podcast, the easier it will be for you to dominate the local market share on audio. It only takes a few minutes to get started, so there is no reason you can’t launch your real estate podcast today.