content
stringlengths 7
2.61M
|
---|
import json
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
from seahub.share.models import FileShare, UploadLinkShare
class AdminLibrariesTest(BaseTestCase):
def setUp(self):
self.libraries_url = reverse('api-v2.1-admin-libraries')
def tearDown(self):
self.remove_repo()
def test_can_get(self):
self.login_as(self.admin)
resp = self.client.get(self.libraries_url)
json_resp = json.loads(resp.content)
assert len(json_resp['repos']) > 0
def test_can_search_by_name(self):
self.login_as(self.admin)
repo_name = self.repo.repo_name
searched_args = repo_name[0:1]
url = self.libraries_url + '?name=%s' % searched_args
resp = self.client.get(url)
json_resp = json.loads(resp.content)
assert json_resp['name'] == searched_args
assert searched_args in json_resp['repos'][0]['name']
def test_get_with_invalid_user_permission(self):
self.login_as(self.user)
resp = self.client.get(self.libraries_url)
self.assertEqual(403, resp.status_code)
def test_can_create(self):
self.login_as(self.admin)
repo_name = randstring(6)
repo_owner = self.user.username
data = {
'name': repo_name,
'owner': repo_owner,
}
resp = self.client.post(self.libraries_url, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['name'] == repo_name
assert json_resp['owner'] == repo_owner
self.remove_repo(json_resp['id'])
def test_can_create_without_owner_parameter(self):
self.login_as(self.admin)
repo_name = randstring(6)
data = {
'name': repo_name,
}
resp = self.client.post(self.libraries_url, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['name'] == repo_name
assert json_resp['owner'] == self.admin.username
self.remove_repo(json_resp['id'])
def test_create_with_invalid_user_permission(self):
self.login_as(self.user)
repo_name = randstring(6)
repo_owner = self.user.username
data = {
'name': repo_name,
'owner': repo_owner,
}
resp = self.client.post(self.libraries_url, data)
self.assertEqual(403, resp.status_code)
def test_create_with_invalid_name_parameter(self):
self.login_as(self.admin)
repo_name = randstring(6)
repo_owner = self.user.username
data = {
'invalid_name': repo_name,
'owner': repo_owner,
}
resp = self.client.post(self.libraries_url, data)
self.assertEqual(400, resp.status_code)
def test_create_with_unexisted_user(self):
self.login_as(self.admin)
repo_name = randstring(6)
repo_owner = <EMAIL>' % randstring(6)
data = {
'name': repo_name,
'owner': repo_owner,
}
resp = self.client.post(self.libraries_url, data)
self.assertEqual(404, resp.status_code)
class AdminLibraryTest(BaseTestCase):
def setUp(self):
self.user_name = self.user.username
self.admin_name = self.admin.username
self.repo_id= self.repo.repo_id
self.library_url = reverse('api-v2.1-admin-library', args=[self.repo_id])
self.fs_share = FileShare.objects.create_dir_link(self.user.username,
self.repo_id, self.folder, None, None)
self.fs_upload = UploadLinkShare.objects.create_upload_link_share(self.user.username,
self.repo_id, self.folder, None, None)
def test_can_update_status_to_read_only(self):
self.login_as(self.admin)
data = 'status=%s' % 'read-only'
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['status'] == 'read-only'
data = 'status=%s' % 'normal'
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['status'] == 'normal'
def test_update_status_with_invalid_args(self):
self.login_as(self.admin)
data = 'status=%s' % 'invalid_args'
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(400, resp.status_code)
def test_can_get(self):
self.login_as(self.admin)
resp = self.client.get(self.library_url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['owner'] == self.user_name
assert json_resp['name'] == self.repo.repo_name
assert json_resp['status'] == 'normal'
def test_get_with_invalid_user_permission(self):
self.login_as(self.user)
resp = self.client.get(self.library_url)
self.assertEqual(403, resp.status_code)
def test_can_not_transfer_library_to_owner(self):
self.login_as(self.admin)
data = 'owner=%s' % self.user_name
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(400, resp.status_code)
def test_can_transfer(self):
self.login_as(self.admin)
data = 'owner=%s' % self.admin_name
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['owner'] == self.admin_name
def test_transfer_group_invalid_user_permission(self):
self.login_as(self.user)
data = 'owner=%s' % self.admin_name
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_transfer_group_invalid_args(self):
self.login_as(self.admin)
# new owner not exist
data = 'owner=<EMAIL>'
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(404, resp.status_code)
def test_can_delete(self):
self.login_as(self.admin)
resp = self.client.delete(self.library_url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['success'] is True
def test_delete_with_invalid_user_permission(self):
self.login_as(self.user)
resp = self.client.delete(self.library_url)
self.assertEqual(403, resp.status_code)
def test_reshare_to_share_links_after_transfer_repo(self):
self.login_as(self.admin)
assert len(UploadLinkShare.objects.all()) == 1
data = 'owner=%s' % self.admin_name
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['owner'] == self.admin_name
def test_reshare_to_upload_links_after_transfer_repo(self):
self.login_as(self.admin)
assert len(UploadLinkShare.objects.all()) == 1
data = 'owner=%s' % self.admin_name
resp = self.client.put(self.library_url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['owner'] == self.admin_name
|
<filename>moe/moe-core/moe.apple/moe.platform.ios/src/main/java/apple/avfoundation/AVAudioNode.java<gh_stars>0
/*
Copyright 2014-2016 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apple.avfoundation;
import apple.NSObject;
import apple.audiotoolbox.AUAudioUnit;
import apple.foundation.NSArray;
import apple.foundation.NSMethodSignature;
import apple.foundation.NSSet;
import org.moe.natj.c.ann.FunctionPtr;
import org.moe.natj.general.NatJ;
import org.moe.natj.general.Pointer;
import org.moe.natj.general.ann.Generated;
import org.moe.natj.general.ann.Library;
import org.moe.natj.general.ann.Mapped;
import org.moe.natj.general.ann.MappedReturn;
import org.moe.natj.general.ann.NInt;
import org.moe.natj.general.ann.NUInt;
import org.moe.natj.general.ann.Owned;
import org.moe.natj.general.ann.Runtime;
import org.moe.natj.general.ptr.VoidPtr;
import org.moe.natj.objc.Class;
import org.moe.natj.objc.ObjCRuntime;
import org.moe.natj.objc.SEL;
import org.moe.natj.objc.ann.ObjCBlock;
import org.moe.natj.objc.ann.ObjCClassBinding;
import org.moe.natj.objc.ann.Selector;
import org.moe.natj.objc.map.ObjCObjectMapper;
@Generated
@Library("AVFoundation")
@Runtime(ObjCRuntime.class)
@ObjCClassBinding
public class AVAudioNode extends NSObject {
static {
NatJ.register();
}
@Generated
protected AVAudioNode(Pointer peer) {
super(peer);
}
@Generated
@Selector("accessInstanceVariablesDirectly")
public static native boolean accessInstanceVariablesDirectly();
@Generated
@Owned
@Selector("alloc")
public static native AVAudioNode alloc();
@Generated
@Selector("allocWithZone:")
@MappedReturn(ObjCObjectMapper.class)
public static native Object allocWithZone(VoidPtr zone);
@Generated
@Selector("automaticallyNotifiesObserversForKey:")
public static native boolean automaticallyNotifiesObserversForKey(String key);
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:")
public static native void cancelPreviousPerformRequestsWithTarget(@Mapped(ObjCObjectMapper.class) Object aTarget);
@Generated
@Selector("cancelPreviousPerformRequestsWithTarget:selector:object:")
public static native void cancelPreviousPerformRequestsWithTargetSelectorObject(
@Mapped(ObjCObjectMapper.class) Object aTarget, SEL aSelector,
@Mapped(ObjCObjectMapper.class) Object anArgument);
@Generated
@Selector("classFallbacksForKeyedArchiver")
public static native NSArray<String> classFallbacksForKeyedArchiver();
@Generated
@Selector("classForKeyedUnarchiver")
public static native Class classForKeyedUnarchiver();
@Generated
@Selector("debugDescription")
public static native String debugDescription_static();
@Generated
@Selector("description")
public static native String description_static();
@Generated
@Selector("hash")
@NUInt
public static native long hash_static();
@Generated
@Selector("instanceMethodForSelector:")
@FunctionPtr(name = "call_instanceMethodForSelector_ret")
public static native NSObject.Function_instanceMethodForSelector_ret instanceMethodForSelector(SEL aSelector);
@Generated
@Selector("instanceMethodSignatureForSelector:")
public static native NSMethodSignature instanceMethodSignatureForSelector(SEL aSelector);
@Generated
@Selector("instancesRespondToSelector:")
public static native boolean instancesRespondToSelector(SEL aSelector);
@Generated
@Selector("isSubclassOfClass:")
public static native boolean isSubclassOfClass(Class aClass);
@Generated
@Selector("keyPathsForValuesAffectingValueForKey:")
public static native NSSet<String> keyPathsForValuesAffectingValueForKey(String key);
@Generated
@Owned
@Selector("new")
@MappedReturn(ObjCObjectMapper.class)
public static native Object new_objc();
@Generated
@Selector("resolveClassMethod:")
public static native boolean resolveClassMethod(SEL sel);
@Generated
@Selector("resolveInstanceMethod:")
public static native boolean resolveInstanceMethod(SEL sel);
@Generated
@Selector("setVersion:")
public static native void setVersion_static(@NInt long aVersion);
@Generated
@Selector("superclass")
public static native Class superclass_static();
@Generated
@Selector("version")
@NInt
public static native long version_static();
@Generated
@Selector("engine")
public native AVAudioEngine engine();
@Generated
@Selector("init")
public native AVAudioNode init();
@Generated
@Selector("inputFormatForBus:")
public native AVAudioFormat inputFormatForBus(@NUInt long bus);
@Generated
@Selector("installTapOnBus:bufferSize:format:block:")
public native void installTapOnBusBufferSizeFormatBlock(@NUInt long bus, int bufferSize, AVAudioFormat format,
@ObjCBlock(name = "call_installTapOnBusBufferSizeFormatBlock") Block_installTapOnBusBufferSizeFormatBlock tapBlock);
@Generated
@Selector("lastRenderTime")
public native AVAudioTime lastRenderTime();
@Generated
@Selector("nameForInputBus:")
public native String nameForInputBus(@NUInt long bus);
@Generated
@Selector("nameForOutputBus:")
public native String nameForOutputBus(@NUInt long bus);
@Generated
@Selector("numberOfInputs")
@NUInt
public native long numberOfInputs();
@Generated
@Selector("numberOfOutputs")
@NUInt
public native long numberOfOutputs();
@Generated
@Selector("outputFormatForBus:")
public native AVAudioFormat outputFormatForBus(@NUInt long bus);
@Generated
@Selector("removeTapOnBus:")
public native void removeTapOnBus(@NUInt long bus);
@Generated
@Selector("reset")
public native void reset();
@Runtime(ObjCRuntime.class)
@Generated
public interface Block_installTapOnBusBufferSizeFormatBlock {
@Generated
void call_installTapOnBusBufferSizeFormatBlock(AVAudioPCMBuffer arg0, AVAudioTime arg1);
}
@Generated
@Selector("AUAudioUnit")
public native AUAudioUnit AUAudioUnit();
@Generated
@Selector("latency")
public native double latency();
@Generated
@Selector("outputPresentationLatency")
public native double outputPresentationLatency();
}
|
#include <stdio.h>
volatile int x;
extern void abort ();
__attribute__((weak))
void foobar (void) { x++; }
int main (void)
{
foobar ();
if (x != -1)
abort ();
printf ("PASS\n");
return 0;
}
|
The Conservatives in London have reportedly held secret discussions about a potential breakaway from the national party, amid fears of an electoral wipeout in the local elections next month.
Senior Tories have hosted a series of meetings over the past year in order to draw up plans for a separate party which would boast its own brand, policies and figurehead separate to Theresa May.
The disclosure is deeply embarrassing for Mrs May, who is braced for the party’s worst performance in the capital in its 184-year history, when the ballots are cast next month. |
Supply chain management: theory, practice and future challenges Purpose The purpose of this paper is to critically assess current developments in the theory and practice of supply management and through such an assessment to identify barriers, possibilities and key trends.Design/methodology/approach The paper is based on a threeyear detailed study of six supply chains which encompassed 72 companies in Europe. The focal firms in each instance were sophisticated, bluechip corporations operating on an international scale. Managers across at least four echelons of the supply chain were interviewed and the supply chains were traced and observed.Findings The paper reveals that supply management is, at best, still emergent in terms of both theory and practice. Few practitioners were able or even seriously aspired to extend their reach across the supply chain in the manner prescribed in much modern theory. The paper identifies the range of key barriers and enablers to supply management and it concludes with an assessment of the main trends.Research limitations/imp... |
A Block Krylov Subspace Implementation of the Time-Parallel Paraexp Method and its Extension for Nonlinear Partial Differential Equations A parallel time integration method for nonlinear partial differential equations is proposed. It is based on a new implementation of the Paraexp method for linear partial differential equations (PDEs) employing a block Krylov subspace method. For nonlinear PDEs the algorithm is based on our Paraexp implementation within a waveform relaxation. The initial value problem is solved iteratively on a complete time interval. Nonlinear terms are treated as a source term, provided by the solution from the previous iteration. At each iteration, the problem is decoupled into independent subproblems by the principle of superposition. The decoupled subproblems are solved fast by exponential integration, based on a block Krylov method. The new time integration is demonstrated for the one-dimensional advection-diffusion equation and the viscous Burgers equation. Numerical experiments confirm excellent parallel scaling for the linear advection-diffusion problem, and good scaling in case the nonlinear Burgers equation is simulated. 1. Introduction. Recent developments in hardware architecture, bringing to practice computers with hundreds of thousands of cores, urge the creation of new, as well as a major revision of existing numerical algorithms. To be efficient on such massively parallel platforms, the algorithms need to employ all possible means to parallelize the computations. When solving partial differential equations (PDEs) with a time-dependent solution, an important way to parallelize the computations is, next to the parallelization across space, parallelization across time. This adds a new dimension of parallelism with which the simulations can be implemented. In this paper we present a new time-parallel integration method extending the Paraexp method to nonlinear partial differential equations using Krylov methods and waveform relaxation. Several approaches to parallelize the simulation of time-dependent solutions in time can be distinguished. The first important class of the methods are the waveform relaxation methods, including the space-time multigrid methods for parabolic PDEs. The key idea is to start with an approximation to the numerical solution for the whole time interval of interest and update the solution, solving an easier-to-solve approximate system in time. The Parareal method, which attracted significant attention recently, is a prime example related to the class of waveform relaxation methods. Parallel Runge-Kutta methods and general linear methods, where the parallelism is determined and restricted by the number of stages or steps, form another class of the time-parallel methods. Time-parallel schemes can also be obtained by treating the time as an additional space dimension and solving for values at all time steps at once. This approach requires significantly more memory and is used, e.g., in multidimensional tensor computations and (discontinuous) Galerkin finite element methods. Recently a "parallel full approximation scheme in space and time" (PFASST) was introduced, which is based on multigrid methods. PFASST was observed to have a generally improved parallel efficiency compared to Parareal. Also, we mention parallel methods that facilitate parallelism by replacing the exact solves in implicit schemes by approximate ones, and parallel methods based on operator splitting. Finally, there is the Paraexp method for parallel integration of linear initial-value problems. This algorithm is based on the fact that linear initial-value problems can be decomposed into homogeneous and nonhomogeneous subproblems. The homogeneous subproblems can be solved fast by an exponential integrator, while the nonhomogeneous subproblems can be solved a traditional time-stepping method. In this paper we propose a time parallel method which is based on the matrix exponential time stepping technique and waveform relaxation. Our method adopts the Paraexp method within a waveform relaxation framework, which enables the extension of parallel time integration to nonlinear PDEs. The method is inspired by and relies on recent techniques in Krylov subspace matrix exponential methods such as shiftand-invert acceleration, restarting and using block Krylov subspaces to handle nonautonomous PDEs efficiently. The method also relies on a singular value decomposition (SVD) of source terms in the PDE, which is used to construct the block Krylov subspace. To improve the efficiency of the method, the SVD is truncated by retaining only the relatively large singular values. We show in theory and in practice that for a source term, that can be approximated by a smooth function, the singular values decay algebraically with time interval size. In that case, a truncated SVD approximation of the source term is adequate. The contribution of this paper is more specifically as follows. First, to solve systems of linear ODEs, we propose an implementation of the Paraexp method, based on the exponential block Krylov (EBK) method, introduced in. The EBK method is a Krylov method that approximates the exact solution of a system of nonhomogeneous linear ordinary differential equations by a projection onto a block Krylov subspace. Our Paraexp implementation does not involve a splitting of homogeneous and nonhomogeneous subproblems, which leads to a better parallel effiency. Second, we extend our EBK-based implementation of the Paraexp method to nonlinear initialvalue problems. We solve the problem iteratively on a chosen time interval. In our case, the nonlinear term of the PDE is incorporated as a source term, which is updated after every iteration with the latest solution. Third, we show that our Paraexp-EBK (PEBK) implementation has promising properties for time-parallelization. The PEBK method can be seen as a special continuous-time waveform relaxation method, which is typically more efficient than standard waveform relaxation methods. The PEBK method is tested for the one-dimensional advection-diffusion equation and the viscous Burgers equation in an extensive series of numerical experiments. Since we are interested in time-parallel solvers for large-scale applications in computational fluid dynamics (CFD), such as turbulent flow simulations, we also test our method with respect to the diffusion/advection ratio and grid resolution, reflecting high Reynoldsnumber conditions. For example, the parallel efficiency of the Parareal algorithm was found to decrease considerably with increasing number of processors for the advection equation. In contrast, the EBK method was found to have excellent weak scaling for linear problems. The paper is organized as follows. In Section 2 we describe the exponential time integration scheme and its parallelization. In Section 3, we present numerical experiments with the advection-diffusion equation, and with the viscous Burgers equation in Section 4. Finally, a discussion and conclusions are outlined in Section 5. 2. Exponential time-integration. Our time integration method for linear and nonlinear PDEs is based on the exponential block Krylov (EBK) method. In this section, we first provide a brief description of the EBK method. Then, we extend the EBK method to integrate nonlinear PDEs in an iterative way. Finally, the parallelization of the time integrator is discussed. 2.1. Exponential block Krylov method. The EBK method is a time integrator for linear systems of nonhomogeneous ordinary differential equations (ODEs). Details of the method are given in. We follow the method of lines approach, i.e., the PDE is discretized in space first. We start with linear PDEs. After applying a spatial discretization to the PDE, we obtain the initial value problem, where u(t) is a vector function u(t) : R → R n, A ∈ R nn is a square matrix, and g(t) ∈ R n a time-dependent source term. The vector u can readily be identified with the solution to the PDE in terms of its values taken on a computational grid in physical space. In Section 2.3, we will introduce a more general term g(t, u(t)), which contains the nonlinear terms of the PDE. The matrix A is typically large and sparse, depending on the spatial discretization method. The dimension of the system, n, corresponds to the number of degrees of freedom used in the spatial discretization. Exponential integrators approximate the exact solution of the semi-discrete system (2.1), formulated in terms of the matrix exponential. The first step in the EBK method is to approximate g(t) with a function that can be treated easier. Here, g(t) is approximated based on a truncated singular value decomposition (SVD) as follows. The source term is sampled at s points in time, 0 = t 1 < t 2 <... < t s−1 < t s = ∆T, in the integration interval , and the source samples form the matrix We make a natural assumption that the typical number of time samples s, necessary for a good approximation of g(t), is much lower than n: s ≪ n. Since we assume s ≪ n, it is more economical to calculate the so-called thin SVD of G, instead of the full SVD, without loss of accuracy. In this case, the thin SVD of G is where ∈ R ss is a diagonal matrix containing the singular values 1 ≥ 2 ≥... ≥ s, and ∈ R ns, ∈ R ss are matrices with orthonormal columns. The thin SVD can be approximated by a truncation in which the m < s largest singular values are retained. As seen from this truncated SVD, the samples of g(t) can be approximated by linear combinations of the first m columns of, i.e., where U ∈ R nm is formed by the first m columns of, p(t) ∈ R m is obtained by an interpolation of the coefficients in these linear combinations. There are several possible choices for p(t), among which, cubic piecewise polynomials. Then, the approximation error in the source term, g(t) − U p(t), can be easily controlled within a desired tolerance, depending on the number of samples in , and the number of singular values truncated (see ). The number of retained singular values, m, is equal to the block width in the block Krylov subspace. The efficiency of the EBK method therefore depends on how many singular values are required for an accurate approximation of the source term (2.4). In applications of the method this parameter m can be varied and practical convergence can be assessed. If the subinterval ∆T is small and the source function g(t) can be well approximated by a smooth function, then the singular values of the samples of g decrease rapidly, thus allowing for an efficient low rank approximation in (2.4). Furthermore, let the subinterval length ∆T be small in the sense that (∆T ) 2 ≪ ∆T (in dimensionless time units). Of course, the assumption that ∆T is small, is not always realistic and we comment on how it can be partially relaxed below in Remark 1. Denote by g (j) the jth derivative of g and assume that the constants are bounded. We can then show that the singular values of the samples decrease, starting from a thin QR factorization of G. Consider the thin QR factorization of G, G = QR, where Q ∈ R ns has orthonormal columns and R ∈ R ss is an upper triangular matrix with nonnegative diagonal entries. As Theorem 1 in states, the entries r ik of R satisfy and r jk = 0 for k < j because R is upper triangular. In this estimate the constants C j depend only on the points t 1,..., t s at which the samples are computed and do not depend on g and ∆T. Since G and R have the same singular values, we consider now the singular values of R. According to , where R j is a matrix obtained from R by skipping j rows or columns, chosen arbitrarily. Since the entries in R decrease rowwise as |r jk | = O(∆T ) j−1, to have the sharpest estimate in (2.5), we choose R j to be the matrix R with the first j rows skipped. To bound the 2-norm of R j we use and note that for j = 1,..., s − 1 Thus, we obtain R j 2 = √ s − j O(∆T ) j−1, which, together with (2.5), yields the following result. Theorem 2.1. Let g(t) : R → R n be a smooth function such that the constants are bounded. Furthermore, let the subinterval length ∆T be small in the sense that (∆T ) 2 ≪ ∆T. Then for the singular values j, j = 1,..., s, of the sample matrix Remark 1. We may extend the result of Theorem 2.1 to the case of a large ∆T if the constants M j are bounded more strongly or even decay with j. Indeed, if ∆T is large, we can consider the functiong() := g(t + q), with q chosen such that := ∆T /q is small. As the functiong takes the same values for ∈ as the function g for ∈ , Theorem 2.1 formally holds forg with ∆T replaced by and M j multiplied by q j. The coefficients in the O symbol in (2.6) may now grow with the powers of ∆T, thus rendering the result meaningless unless a stricter assumption on M j is made. The truncated SVD approximation of the source term (2.4) facilitates the solution of the initial value problem (IVP) in (2.1) by the block Krylov subspace method. Here, the block Krylov subspace at iteration l is defined as Furthermore, the block Krylov subspace method can be accelerated by the shift-andinvert technique and, to keep the Krylov subspace dimension bounded, implemented with restarting. Parallelization of linear problems. In this section, the parallelization of the linear ODE solution is discussed. As we will see, the method is equivalent to the Paraexp method, but there are differences in implementation leading to a better parallel efficiency (see Section 3.2). Linear IVPs can be solved parallel in time by using the principle of superposition. First, the time intervals is divided into P nonoverlapping subintervals, where P is also the number of processors that can be used in parallel. We introduce the shift of u(t) with respect to the initial condition, u(t) = u 0 + u(t). The shifted variable u(t) solves the IVP with homogeneous initial conditions, The source term is approximated using s time samples per subinterval, as illustrated in Fig. 2.1. The shifted IVP (2.8) can be decoupled into independent subproblems by the principle of superposition. To each subinterval we associate a part of the source term g(t), defined for j = 1,..., P as The expected parallel speedup is based on the observation that the solution to (2.8) is given by a variation-of-constant formula (see, e.g., ), where the integrals Tj Tj−1... ds can be evaluated independently and in parallel. More precisely, by exploiting the linearity of the problem we decompose the IVP (2.20) into where v j (t) is referred to as a subsolution of the total problem. The subproblems are independent and can be solved in parallel. We integrate the subproblem individually with the EBK method. Observe that the source is only nonzero on the subinterval [T j−1, T j ). We follow the ideas of the Paraexp method and note that the nonhomogeneous part of the ODE requires most of the computational work in the EBK method. An accurate SVD approximation of the source term (2.4) generally requires more singular values to be retained, increasing the dimensions of the block Krylov subspace (2.7). According to the principle of superposition, the solution of the original problem is then given as The summation of the subsolutions, v j (t) is the only communication required between the parallel processes. The parallel algorithm is summarized in Fig. 2.2. In principle, this algorithm is identical to the Paraexp method. The only practical difference is that in our implementation both the nonhomogeneous and the homogeneous part of the subproblems are solved by the EBK method. The original version of the Paraexp method assumes a convential time integration method to solve the "difficult" nonhomogeneous part, and a Krylov subspace method for exponential integration of the homogeneous part. Our implementation of the Paraexp method is compared numerically with the original one in Section 3.2. Finally, additional parallelism could be exploited within the block Krylov subspace method itself. If the block size is m, then the m matrix-vector products can be executed entirely in parallel, see. This approach could be applied in combination with the parallelization described in this section. Algorithm parallel time integration. Solve homogeneous part of the ODE (2.12), for t ∈ . end for Construct solution u(t), see (2.13). 2.3. Treatment of nonlinearities. The EBK method, designed to solve a linear system of nonhomogeneous ODEs (2.1), can be extended to handle nonlinear systems of ODEs by including the nonlinearities in the source term. The system is then solved iteratively, in such a way that the iterand u k (t) is updated on the entire interval : (2.14) where k denotes the iteration index. We proceed in a few steps. First, the problem is reduced to the form of Eq. (2.1). The nonlinear IVP is therefore approximated by evaluating the source term with the current iterand, u k (t). Because the current iterand u k (t) is a known function, we can write the source term as an explicit function of time, The resulting initial value problem is, This system is solved by the EBK method to achieve one iteration. This process is continued until the solution is sufficiently converged. This approach is similar to applying Picard or fixed-point iterations on the nonlinear term. The source term can be further improved for nonlinear behaviour by using the Jacobian matrix, similar to a Newton-Rhapson method. In this case, we introduce the average Jacobian matrix, which is averaged over the interval of integration . Generally, the solution varies in time and so does the Jacobian matrix. The nonlinear remainder, with respect to the time-averaged state, is contained in the source term. Using this correction, we then find the recursive relation, When converged, u k+1 (t) = u k (t), the terms containing J k eventually disappear. Remark 2. The iteration (2.18) is well known in the literature on waveform relaxation methods. Its convergence is given, e.g., in and, in case of an inexact iteration, in. These results, in particular, show that the iteration (2.18) converges superlinearly on finite time intervals if the norm of the matrix exponential of t(A + J k ) decays exponentially with t, i.e., the eigenvalues of A + J k lie in the left half-plane. 2.4. Parallelization of nonlinear problems. Nonlinear IVPs are solved in an iterative way with the PEBK method. We follow the waveform relaxation approach, that is, the problem is solved iteratively on the entire time interval of interest, . The original problem is decomposed into a number of independent, "easier" subproblems on that can be solved iteratively, and in parallel. At each iteration the nonlinear term is treated as source term, which gives us a linear system of ODEs that can be integrated in parallel, see Section 2.2. In the case of nonlinear problems, we take the average of the Jacobian matrix on each subinterval, which gives us the piecewise constant function for j = 1,..., P : The IVP is then shifted with respect to the initial condition, where, The shifted IVP (2.20) can be then solved in parallel. We follow an approach similar to the Paraexp method, as explained in Section 2.2. Here, we define Now, the problem is decomposed into P independent subproblems. The subsolutions v j result from convergence of v j,k obeying: which can be integrated with the EBK method. Here, we have introduced a second subindex k to indicate the jth subsolution at the kth iteration. The total solution can then be updated according to the principle of superposition, We consider the parallel efficiency in an idealized setting, and estimate a theoretical upper bound here, assuming that communication among the processors can be carried out very efficiently. The computational cost can then be simply estimated by the number of iterations required for the numerical solution to converge. Suppose the computation time of a single EBK iteration is 0. The maximum parallel speedup is then, where K 1 is number of iterations for serial time integration, and K P for parallel time integration. The theoretical upper bound of the parallel efficiency is then, High parallel efficiency can be achieved if the parallelization does not slow down the convergence of the numerical solution. As will be demonstrated in Section 4, we typically observe that K P is not significantly larger than K 1, and a near-optimal efficiency is achieved in various relevant cases. For comparison, the parallel efficiency of the Parareal algorithm is formally bounded by 1/K P. In our case, this upper bound is improved by a factor K 1. Parareal is an iterative method for the parallelization of sequential time integration methods, whereas the EBK method, for nonlinear problems, is an iterative method to start with, and its parallelization does not necessarily increase the total number of iterations. Note the PFASST method has a similar estimate of parallel efficiency as the PEBK method. Advection-diffusion equation. In this section, we present results of numerical tests where the space-discretized advection-diffusion equation is solved with the EBK method. We demonstrate the consistency and stability of the EBK method for the linear advection-diffusion equation, which is a PDE describing a large variety of transport phenomena. The spatial discretization, using central finite differencing for sake of illustration, of the PDE yields a linear system of ODEs. The time integration can be parallelized as described in Section 2.2. We illustrate the principle of parallelization for different physical parameters of the advection-diffusion equation, before we move on to nonlinear PDEs in Section 4. Convergence of the numerical solution is observed for different values of the physical parameters. In our implementation of the EBK method, the IVP projected onto the block Krylov subspace is solved with the function ode15s in MATLAB (2013b). The relative tolerance of the PEBK solver is denoted by tol. In our tests, the block Krylov subspace is restarted every 20 iterations. For the truncated SVD approximation (2.4), p(t) are chosen to be piecewise cubic polynomials, although other types of approximations are possible as well, and are not crucial for the performance of the PEBK method. The sample points per subinterval are Chebyshev nodes. This is not crucial but gives a slightly better approximation than with uniform sample points. Homogeneous PDE. We consider the advection-diffusion equation with a short pulse initial condition, to clearly distinguish the seperate effects of advection and diffusion. The PDE and the periodic boundary conditions are as follows where a ∈ R is the advection velocity, and ∈ R the diffusivity coefficient. Both parameters are constant in space and time. The PDE is first discretized in space with a second-order central finite difference scheme. The corresponding semi-discrete system of ODEs is then where the matrix A results from the discretization of the spatial derivatives, and represents both the diffusive and the advective term. In (3.2), the substitution u = v + u 0 has been applied, with u(t) being the vector function containing the values of the numerical solution on the mesh at time t, with u 0 = u. The substitution leads to homogeneous initial conditions. In this case, the source term is constant in time, and its SVD polynomial approximation (2.4) is exact with m = 1. This allows us to focus on the two remaining parameters: the grid resolution and the tolerance of the EBK solver. The linear IVP (3.2) is decoupled into independent subproblems by partitioning of the source term (Au 0 ) on the time interval. The superposition of the subsolutions is illustrated in Fig. 3.1, in which the time interval of interest,, has been partitioned into four equal subintervals, to be integrated on four processors. The sum of the initial condition and the subsolutions gives the final solution on the entire time interval, see Section 2.2. In the following numerical experiments, P = 8 subintervals are used. The advection-diffusion equation is solved for three different combinations of a and, see The coefficients are calculated using numerical quadrature with high precision. The error in (3.3) shows second-order convergence with ∆x, as expected from the truncation error of the finite difference scheme. The convergence plots show that we are able to control the error of the parallel time integration method. In this case, the final error can be made to depend only on the spatial resolution and the tolerance of the EBK method. Also, the EBK method has no principal restrictions on the timestep size, as it directly approximates the exact solution of Eq. (3.2) by Krylov subspace projections. Parallel efficiency. In the previous examples, we solved homogeneous IVPs. Nonhomogeneous problems are generally more expensive to solve, because an accurate SVD approximation of the source term (2.4) requires more singular values, which increases the block width of the block Krylov subspace (2.7). Parallel speedup can then be expected by splitting the nonhomogeneous problem into subproblems (2.12), which require less individual effort to solve by the PEBK method. To measure the parallel efficiency of our algorithm for nonhomogeneous IVPs, we introduce a source term f (x, t) in the advection-diffusion equation with = 10 −2, and a = 1. The source term is chosen such that the solution is a series of five travelling pulses The mesh width of the spatial discretization is ∆x = 10 −3, such that the error due to the spatial discretization is small compared to the time integration error. The mesh width gives a semi-discrete system with a 1000 1000 matrix, which is a suitable problem size for testing the EBK method. The tolerance of the EBK method is set to 10 −4. The SVD polynomial approximation is constructed from 100 time samples per subinterval. The singular values are plotted in Fig. 3.4, which reveals that the first two singular values are several orders larger than the rest. Therefore, we retain only the first two singular values in the truncated SVD. The decay of singular values is guaranteed by the upper bound from Theorem 2.1. We have verified that the SVD approximation is sufficiently accurate, i.e., the approximation error, measured in the L 2 -norm, is less than the tolerance of the EBK method. We compare the parallel efficiency of the Paraexp-EBK implementation with a convential implementation of the Paraexp algorithm, where the nonhomogeneous problem is integrated with the Crank-Nicolson (CN) scheme. The linear system is solved directly using MATLAB. The matrix exponential propagator, for the homogeneous problem in the Paraexp method, is realized with an Arnoldi method using the shift-and-invert technique. In order to have a Courant number of one, we take ∆t = 10 −3 (for ∆x = 10 −3 and a = 1). According to the Paraexp method, the time step size needs to be decreased in parallel computation by a factor P 1/2q, where q is the order of the time integration method, in order to control the error. In case of the Crank-Nicolson scheme, we have q = 2. As discussed in Section 2.2, there is no communication required between processors, except for the superposition of the solutions to the subproblems at the end. Therefore, we are able to test the parallel algorithm on a serial computer by measuring the computation time of each independent subproblem. The computation time of the serial time integration is denoted 0. For the parallel time integration, we measure the computation times of the nonhomogeneous and the homogeneous part of the subproblems separately, denoted by 1 and 2 respectively. The parallel speedup can then be estimated as where we take the maximum value of 1 and 2 over all parallel processes. The timings of the EBK method and Paraexp are listed in Table 3.1. The parallel efficiency of both methods is illustrated in Figure 3.3. Note that the parallel efficiency of the standard Paraexp method steadily decreases, whereas the PEBK method maintains a constant efficiency level around 90%. The decrease in efficiency of the standard Paraexp method is due to the reduced time step size in the Crank-Nicolson scheme, with respect to its serial implementation. The nonhomogeneous part of the subproblems requires more computation time as the number of processors increases. The numerical test confirms the initial assumption that parallel speedup with the EBK method can be achieved by decomposing the originial problem into simpler independent subproblems (2.12). Furthermore, the Paraexp implementation using the EBK method, appears to be more efficient than one using a traditional time-stepping method. Table 3.1: Parallel effiency of the Paraxp-EBK method and the Paraexp/Crank-Nicolson method for the advection-diffusion equation, with number of processors P. Timing 0 corresponds to the serial algorithm. For the parallel algorithm, 1 and 2 are timings of the nonhomogeneous and homogeneous problem respectively. Burgers equation. In the previous section, the PEBK method was applied to a linear PDE. In this section, the performance of the PEBK method is tested on a nonlinear PDE, the viscous Burgers equation. Travelling wave. Consider the viscous Burgers equation, where ∈ R denotes the diffusivity (or viscosity) coefficient. In the following experiments, we take = 10 −2, such that the problem is dominated by the nonlinear convective term. As in the previous example, the boundary conditions are periodic. Exact solutions to the Burgers equation can be found by the Cole-Hopf transformation. In this test case, we construct a desired solution by introducing an appropriate source term, as shown for the advection-diffusion equation in Section 3.2. The the source term balances the dissipation of energy due to diffusion, and prevents the solution of vanishing in the limit t → ∞. The Burgers equation is an important equation in fluid dynamics. It can be seen as a simplification of the Navier-Stokes equation, which retains the nonlinear convective term, uu x. In the limit → 0, the nonlinearity may produce discontinuous solutions, or shocks, so that a typical solution may resemble a sawtooth wave. A sawtooth wave can be represented by an infinite Fourier series. A smooth version of the sawtooth wave can be obtained by the modified Fourier series, where k is the wavenumber, k max a cutoff wavenumber, and (k, ) is a smoothing function, which supresses the amplitudes associated to high wavenumbers: Here, is the smoothing parameter. The smoothing function (k, ) is motivated by the viscosity-dependent inertial spectrum of the Burgers equation found by Chorin & Hald. The smoothing function for = 0.1 is shown in Figure 4.1a. As the smoothing function rapidly decreases with wavenumber, we choose a cutoff wavenumber of k max = 100. The value = 0.1 is found to produce a smooth version of a sawtooth wave, as shown in Figure 4.1b. We consider a wave travelling in the positive x-direction by introducing the parametrization = x − 1 2 t + 1 2 in (4.1). The source term is then readily found by substituting the chosen solution (4.2) into the Burgers equation (4.1), The space derivatives in the Burgers equation are discretized with a second-order central finite difference schemes, using a uniform mesh width ∆x > 0. The error of the numerical solution is again measured in the relative ℓ 2 -norm, cf. (3.3). The tolerance of the PEBK method is set to 10 −4. The SVD approximation of the source term, which includes the nonlinear term, is constructed from s = 50 samples per subinterval, and reveals that m = 12 singular values are sufficient in the truncated SVD, so that the error of the SVD approximation is less than the tolerance of the PEBK method. The nonlinear system of ODEs is solved iteratively, as outlined in Section 2.3. Figure 4.2 shows the error history at different grid resolutions. Here, the error converges to a value that depends on the mesh width. In other words, the final error of the time integration method is much smaller than the error due to the spatial discretization. The PEBK method is parallelized as discussed in Section 2.4. The time interval can be partitioned into P subintervals with a uniform subinterval size ∆T. In the following experiment, the complete time interval is extended with each subinterval added, T = P ∆T. The mesh width is ∆x = 2.5 10 −3. Figure 4.3a shows that increasing the number of processors and hence the simulated time, generally increases the number of iterations required to achieve the same level of accuracy. The increased number of required iterations implies a decrease in theoretical parallel efficiency, which can be estimated by the ratio K 1 /K P, see (2.26). Next, the final time is kept constant, and the subinterval size reduces with an increase in the number of processors, ∆T = T /P. Figure 4.3b shows that the number of iterations is roughly independent of the number of processors, i.e., in this case, the parallel efficiency, K 1 /K P, does not decrease with P. Parallel speedup might also be improved by the fact that the SVD approximation converges faster on smaller subintervals, see Theorem 2.1. That is, fewer singular values have to be retained in the truncated SVD in order to achieve a certain accuracy. Also, the number of samples of the source term could be decreased on smaller subintervals. Parallel efficiency. In this section, we test the parallel efficiency of the PEBK method for the viscous Burgers equation. In the following experiments, the source term is chosen such that the solution of the spatially discretized system is equal to the exact solution described in Section 4.1. In other words, the source term accounts for the error by the spatial discretization, and we measure only the error due to the time integration. The spatial discretization is here performed with a mesh width of ∆x = 2 10 −3. If the subinterval size, ∆T, is fixed, the number of iterations generally increases in case the number of processors P increases, see Fig. 4.3. Increasing the number of iterations would reduce the parallel efficiency, according to (2.26). We therefore fix the final time T, such that the subinterval size decreases with increasing P. Also, the total number of samples over is fixed, such that the local number of samples decreases with increasing P. To keep the global distribution of samples constant in the parallel computations, the local sample points are uniformly distributed instead of Chebyshev points as used in previous experiments. In our experiments, we use ∆T = 0.2/P and s = 128/P. Furthermore, the number of (retained) singular values is m = 12, and the tolerance of the EBK method is set to 10 −4. The error history for different P is shown in Fig. 4.4. The results show that the convergence rate of the PEBK method can improve by increasing P. The nonlinear corrections, see (2.18), appear to be more effective on smaller subintervals. As opposed to the linear problem in Section 3.2, communication between the parallel processes is here required because of the waveform relaxation method. The parallel communication overhead is assumed to be very small, such that the parallel computations can be emulated on a serial computer. Per iteration of the PEBK method, the computation time of each individual process is measured, after which the maximum is stored, i.e., the computation time of the slowest process. The total computation time of the PEBK method is then taken as the sum, over the total number of iterations performed, of the maxima. In this experiment, we have measured a total of ten PEBK iterations. The computation times for = 10 −1 and = 10 −2 are shown in Fig. 4.5a, which clearly illustrate a parallel speedup. The slightly higher timings of = 10 −1 could be attributed to the increased stiffness of the problem. Figure 4.5b shows that the parallel efficiency of the PEBK method steadily decreases with the number of (virtual) processors, P. There is no significant difference between the performance of the method at these two values of the viscosity coefficient. The parallel efficiency might even be further tuned in practice, based on the previous observation that the required number of iterations could decrease with higher P. Also, the number of singular values, m, could possibly be reduced on smaller subintervals, based on (2.1), which would further enhance the potential parallel speedup. The PFASST algorithm shows a good parallel efficiency for the viscous Burgers equation as well. It is unknown whether changing the viscosity coefficient has an impact on the performance of PFASST. Our observations point in the direction of a good parallel efficiency of the PEBK method for simulations of the Navier-Stokes equation at high Reynolds numbers. For these problems there is a high demand for highly efficient parallel solvers. The Parareal algorithm was for example reported to perform poorly in advection-dominated problems. The solution features a large scale mode with wavenumber one, and a smaller scale mode with wavenumber k 0 > 1. This combination allows the construction of an arbitrarily wide dynamic range. The factor 1/k 0 is included in compliance with an assumed energy distribution of 2 ∝ k −2, where(k) is the Fourier transform, u(x, t) exp(−2ik(x + t)) dx dt, (4.6) where we have used the fact that the solution is periodic in space and time. The previous experiment, see Fig. 4.3a, is repeated for the manufactured solution (4.5) with different values of k 0. Figure 4.6 illustrates that widening the spectrum does not significantly affect the convergence of the PEBK iterations. Remarkably, the curves appear to form pairs based on P. Conclusions. We propose an implementation of the Paraexp method with enhanced parallelism based on the exponential block Krylov (EBK) method. Furthermore, the method, Paraexp-EBK (PEBK), is extended to solve nonlinear PDEs iteratively by a waveform relaxation method. The nonlinear terms are represented by a source term in a nonhomogeneous linear system of ODEs. Each iteration the source term is updated with the latest solution. The convergence of the iterative process can be accelerated by adding a correction term based on the Jacobian matrix of the nonlinear term. Each iteration the initial value problem can then be decoupled into independent subproblems, which can be solved parallel in time. Essentially, we implement the Paraexp method within a waveform relaxation approach in order to integrate nonlinear PDEs. Also, the Paraxp-EBK (PEBK) method is used to integrate both the homogeneous and the nonhomogeneous parts of the subproblems. This is in contrast to the original Paraexp method, which assumes a convential time integration method for the nonhomogeneous parts. The PEBK method is tested on the advection-diffusion equation for which we demonstrate the parallelization concept for linear PDEs. The parallelization also works in cases without diffusion present, in which the PDE is purely hyperbolic. The parallel efficiency is compared with a Crank-Nicolson (CN) scheme parallelized with the Paraexp algorithm. The parallel efficiency of the PEBK method remains roughly constant around 90%. On the other hand, the parallel efficiency of the CN/Paraexp combination steadily decreases with the number of processors. As a model nonlinear PDE, the viscous Burgers equation is solved. The number of waveform relaxation iterations required for a certain error tolerance increases, when the relative importance of nonlinearity grows by decreasing the viscosity coefficient. Good parallel efficiency of the EBK method was observed for different values of the viscosity coefficient. Since the nonlinear convective term in the Burgers equation is shared by the Navier-Stokes equation, the presented results give a hint of the potential of the PEBK method as an efficient parallel solver in turbulent fluid dynamics, where nonlinearity plays a key role. The question remains how to treat the pressure in the incompressible Navier-Stokes equation, which enforces the incompressibility constraint on the velocity field (see for possible approaches in ). This will be explored in future work. |
<gh_stars>0
/* ***********************************************************************
> File Name: ./Tree/UniqueBinarySearchTreesII_95.cpp
> Author: zzy
> Mail: <EMAIL>
> Created Time: Tue 13 Aug 2019 04:48:05 PM CST
********************************************************************** */
#include <stdio.h>
#include <vector>
#include <string>
#include <stdio.h>
#include <climits>
#include <gtest/gtest.h>
using std::vector;
using std::string;
struct TreeNode {
int val;
TreeNode *left;
TreeNode *right;
TreeNode(int x) : val(x), left(NULL), right(NULL) {}
};
class Solution {
public:
vector<TreeNode*> generate_trees(int begin, int end) {
vector<TreeNode*> all_tree;
if (begin > end) {
all_tree.push_back(nullptr);
}
for (int i = begin; i <= end; ++i) {
vector<TreeNode*> left_tree = generate_trees(begin, i - 1);
vector<TreeNode*> right_tree = generate_trees(i + 1, end);
for (auto l: left_tree) {
for (auto r: right_tree) {
TreeNode* root = new TreeNode(i);
root->left = l;
root->right = r;
all_tree.push_back(root);
}
}
}
return all_tree;
}
vector<TreeNode*> generateTrees(int n) {
vector<TreeNode*> all_tree;
if (n == 0) {
return all_tree;
}
return generate_trees(1, n);
}
};
TEST(testCase,test0) {
}
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc,argv);
return RUN_ALL_TESTS();
}
|
Optimal linearization trajectories for tangent linear models We examine differential equations where nonlinearity is a result of the advection part of the total derivative or the use of quadratic algebraic constraints between state variables (such as the ideal gas law). We show that these types of nonlinearity can be accounted for in the tangent linear model by a suitable choice of the linearization trajectory. Using this optimal linearization trajectory, we show that the tangent linear model can be used to reproduce the exact nonlinear error growth of perturbations for more than 200 days in a quasigeostrophic model and more than (the equivalent of) 150 days in the Lorenz 96 model. We introduce an iterative method, purely based on tangent linear integrations, that converges to this optimal linearization trajectory. |
Identification and Validation of a Novel Immune-Related lncRNA Signature for Bladder Cancer Purpose We aimed to construct an immune-related long noncoding ribonucleic acids (irlncRNA) signature to evaluate the prognosis of patients without specific expression level of these irlncRNA. Methods The raw transcriptome data were downloaded from The Cancer Genome Atlas (TCGA), irlncRNAs were filtered out using an online immune related gene database and coexpression analysis, differently expressed irlncRNA (DEirlncRNA) pairs were identified by univariate analysis. The areas under curve (AUC) were compared and the Akaike information criterion (AIC) values of receiver operating curve (ROC) was counted, the most optimal model was constructed to divide bladder cancer patients into high- and low-risk groups using the cut-off point of ROC. Then, we evaluated them from multiple perspectives, such as survival time, clinic-pathological characteristics, immune-related cells infiltrating, chemotherapeutics efficacy and immune checkpoint inhibitors. Results 14 DEirlncRNA pairs were included in this signature. Patients in high-risk groups demonstrated apparent shorter survival time, more aggressive clinic-pathological characteristics, different immune-related cells infiltrating status, lower chemotherapeutics efficacy. Conclusion The irlncRNA signature demonstrated a promising prediction value for bladder cancer patients and was important in guiding clinical treatment. INTRODUCTION Bladder cancer is a common malignant neoplasm with 81,190 new cases and 17,240 deaths having occurred in the USA in 2018, this categorization includes more than 700,000 living cases, and leads to approximately 150,000 deaths per year worldwide. The main risk factor for bladder urothelial carcinoma (UC), which account for approximately 90% of all bladder cancers, is tobacco smoking; patients with a history of smoking have a 2.5 times elevated risk compared to non-smokers. Because of the fact that the impact of tobacco in the development of this disease possesses obvious hysteresis, regions with high incidence rates generally had a high smoking prevalence 20-30 years ago, including regions such as the US, Spain and other developed countries. Nonetheless, the immune checkpoint inhibitors (ICIs) can prevent the evasion of the immune system and the proliferation of cancer cells, and these inhibitors have revolutionized the treatment strategy of UC. The recent studies showed that the response to ICIs could be influenced by different immune cell infiltration. The infiltration of immune cells influenced the escape or evasion of the immune system for cancers to some extent. Cisplatin has been approved by the Food and Drug Administration (FDA) for the systemic treatment of bladder cancer for over fifty years, and most clinical guidelines recommend that patients with muscle-invasive bladder cancer adopt neoadjuvant cisplatin-based chemotherapy. Docetaxel has been proven to possess ideal antitumor activity for UC, regardless of whether it is used as a single agent or in combination with other chemotherapeutic agents. Additionally, it provides a salvage therapy when immunotherapy or checkpoint inhibitors are unsuitable for patients. Long noncoding RNAs (lncRNAs), which refer to a type of RNA that is longer than 200 nt, account for approximately 80% of the human transcriptome. They are mostly located in the nucleus and regulate gene expression via epigenetic regulation, transcriptional regulation and posttranscriptional regulation. Thus, some lncRNAs have the potential to act as biomarkers and therapeutic targets for many types of cancers, including UC. In recent years, lncRNAs have been proven to contribute to cancers through genomic or transcriptomic alterations, and they are able to affect the immune microenvironment because lncRNAs can cause tumor immune cell infiltration by regulating the expression of genes that are related to immune cell activation or cell lineage development. Recent studies have shown that immune infiltration signatures are likely to be promising tools to diagnose, evaluate, and treat a variety of cancers. LncRNAs play a significant role in the construction of these signatures. A recent study proved that an 11-lncRNA signature was a novel and significant prognostic factor for breast cancer. A 7-lncRNA signature associated with tumor immune infiltration has been identified via computational immune and lncRNA profiling analysis, and this signature was considered to be a predictive biomarker of ICI responses among non-small-cell lung cancer patients. Additionally, Zhang et al. established a 10 immune-related lncRNA signature that is associated with hepatocellular carcinoma (HCC) progression and prognosis for the prediction survival for HCC. A recent study provided a new immune gene-related lncRNA signature for distinguishing glioma groups, as well as for the diagnosis and treatment of glioma. Retrieve Transcriptome Data and Identify Immune-Related Differentially Expressed lncRNAs The raw transcriptome data of transitional cell papilloma and carcinoma types in bladder cancer were downloaded from TCGA (https://tcga-data.nci.nih.gov/tcga/). Afterwards, the raw data were annotated, and the lncRNAs were filtered out from the mRNAs by the human gene transfer format files that were obtained in Ensembl (http://asia.ensembl.org). The ImmPort online database offers a list of immune related rgenes, and the screening criteria coefficients of > 0.6 and a p-value of < 0.001 were set to identify irlncRNAs via coexpression analysis in R studio. Afterwards, the differentially expressed irlncRNAs between bladder cancer and normal paracarcinoma tissues were identified through the use of limma R package with the filter logFC > 1 and FDR <0.05. Construct DEirlncRNA Pairs We compared each of the expression levels among the DEirlncRNAs that we have previously obtained, and constructed a 0-or-1 matrix with the criterion that the expression level of lncRNA A is higher than that of lncRNA B, which would provide a value of 1; otherwise, the value was 0. Then, the contrasted 0-or-1 matrix was further screened. We used survival R package with p < 0.01 as filter. And we could offer this code if necessary. If the lncRNA pairs containing any lncRNAs had no expression quantity (which indicates that these lncRNA pairs had no value for the prediction of survival outcomes), then these pairs were filtered out. The number of pairs with a value of 0 or 1 must have accounted for at least 20% of all of the irlncRNA pairs, unless they could not be used to construct the risk model. Retrieve Related Clinical Information of Patients We downloaded clinical information of the bladder cancer patients in TCGA, and cases without complete clinical data, such as survival time, were removed. Calculate the Patients' RiskScore With a Novel Risk Model First, after the analysis of the single factor, a Lasso regression with 10 times cross proof was performed 1,000 times, and the p value was 0.05, in addition to 1,000 times of random stimulation for each time. We counted the number of occurrences in the Lasso regression for each irlncRNA pair; if the frequency of the irlncRNA pairs was greater than 100, then the pairs were further subjected to univariate and multivariate Cox hazard analyses and fourteen lncRNA pairs with p < 0.05 in multivariate Cox hazard analyses were used to construct the irlncRNA model. An area under curve (AUC) value for each model was calculated, and each curve was drawn. We obtained the maximum and ideal AUC values when the curve reached the peak, the programming statements were terminated, and the model was considered to be the most ideal at this time. We plotted 1-, 3-and 5-year receiver operating characteristic (ROC) curves and calculated their AUC values. The ROC curves reflecting the 1-, 3-and 5-year survival rates of bladder cancer patients were plotted, and their specific AUCs were calculated. The RiskScore was calculated with the use of the following formula: RiskScore = S k i=1 biSi. Patients with a RiskScore in the one-year ROC curve that was higher than the turning point (which was identified by calculating the Akaike information criterion values) were regarded as having higher risks for dying in 5 years and were classified into the high-risk group; otherwise, the patients were classified into the low-risk group and were more likely to live longer than 5 years. Verify the irlncRNA Model in Clinical Conditions A Kaplan-Meier analysis was used to compare the lifetimes of patients between the two groups, in order to validate of the cut-off point. We used R tools to visualize the survival curves and risk scores of every patient. For further clinical uses of this model, we used band diagrams to show the results of the chi-square tests, which explored the underlying connection between the clinicopathological characteristics and the model that we constructed. The risk score differences among the different clinicopathological characteristics were calculated via the Wilcoxon signed-rank test, and we showed the results graphically via box diagrams. Univariate and multivariate Cox regression analyses were performed to prove that the irlncRNA model was a valuable model to independently predict the prognosis of bladder cancer patients, and the results were illustrated via forest maps. R packages, including survival, ggplot2 and pHeatmap, were used to complete the previous analyses. Explore the Relevance Between Immune Cells and RiskScore The immune infiltration status of all of the samples that were retrieved from the TCGA was calculated by using 7 methods, including XCELL, TIMER, QUANTISEQ, CIBERSORT, CIBERSORT-ABS, EPIC and MCPcounter, to explore the relationship between immune-cell characteristics and risk. A series of Wilcoxon signed-rank tests were performed to compare the infiltrating immune cell content between the low-and high-risk groups, and box charts were used to show the results. We also explored whether risk scores of the patients were closely related to the infiltrated immune cells by calculating the Spearman rank coefficient, and we used lollipop diagrams to visualize the results. We set the threshold at p < 0.05. Identify the Relationship Between Chemosensitivity and the Risk Model We used the half inhibitory concentration (IC50) of chemotherapeutic drugs, including gemcitabine, gefitinib, cisplatin and docetaxel, in the clinical cases from the TCGA as a reference value to assess the constructed model for clinical bladder cancer chemotherapeutic prognosis. Wilcoxon signedrank tests were performed to investigate the difference in IC50 between the low-and high-risk groups, and the result is illustrated via box diagrams. Analyses of the Expressed Immunosuppressive Molecules Related to ICIs In order to explore whether our risk model had significant relevance with ICI-related biomarkers express level, we visualize the results in violin plot using ggstatsplot R package. Retrieve Transcriptome Data and Identify Immune-Related Differentially Expressed lncRNAs We downloaded transcriptome profiling data of bladder cancer from the TCGA, including 411 paracarcinoma normal samples and 19 tumor samples. We also downloaded GTF files in Ensembl to transfer Ensembl IDs to gene symbol IDs, and we performed a coexpression analysis between lncRNAs and irgenes (immune-related genes). In total, 315 irlncRNAs were identified, 116 of which were considered to be differentially expressed irlncRNAS (DEirlncRNA), 23 of which were downregulated and 93 of which were upregulated (Table S1, Figures 1A, B). Construct DEirlncRNA Pairs The 116 DEirlncRNAs were identified with the limma R package in R studio, and 4,561 pairs were identified. A total of 467 pairs were regarded as being valid, and a univariate Cox model was performed. Fourteen pairs (p < 0.05) were used to construct a multivariate Cox model using stepwise regression (Figures 2A, B). Afterwards, we constructed the ROC curve of 14 pairs of irlncRNAs, and the AUC of the ROC curve that was 0.780, which was used to identify the most valuable lncRNA pairs to construct the most satisfying risk model. We calculated that the maximum cut-off point (the maximum inflection point) on the 5-year ROC curve, was 2.373 ( Figure 3A). In addition, the AUC values for the one-, three-and five-year ROC curves were 0.780, 0.828 and 0.856, respectively ( Figure 3B). Additionally, ROC curves including 5 years and other clinical characteristics were drawn to compare the optimality of our model, and the AUC value of the risk score was much higher than that of the others ( Figure 3C). The clinical data of 409 bladder cancer cases were retrieved from the TCGA, and 400 of cases were valuable after removing the samples without incomplete data. The cut-off point (2.373) of the risk score divided all of the samples into two groups: high-risk and low-risk groups. Verify the irlncRNA Model in the Clinic After distinguishing between the groups, 89 cases were considered to be high-risk, and 311 cases were considered to be low-risk. Two diagrams illustrated the risk scores and survival times for all of the samples ( Figure 4A). More patients died, and the survival time apparently decreased as the risk score increased. The Kaplan-Meier analysis and the corresponding survival curves showed that the patients with low riskScores lived much longer than patients with high riskScores (p < 0.001) ( Figure 4B). Additionally, the analysis and curved showed that almost all of the patients with high RiskScores lived fewer than 5 years, and that approximately 50% of the patients in the low-risk group remained alive then. In order to explore the relationship between the clinicopathological characteristics and the risk of bladder cancer, we performed a set of chi-square tests, including age, sex, grade, stage and risk scores. The strip charts show the overall results, and age, tumor grade and tumor stage exhibit extremely close relationships with risk ( Figure 5A). Additionally, age ( Figure 5B), tumor grade ( Figure 5C) and tumor stage (except stage I) ( Figure 5D) exhibit extremely close relationships with risk, whereas males and females had the same risk ( Figure 5E) Figure 5G). Estimating Tumor-Infiltrating Immune Cells and Immunosuppressive Molecules With a Risk Assessment Model Due to the fact that the irlncRNA model was initially related to immune-related genes, we then explored the relevance of this model to the immune microenvironment. A series of Wilcoxon signed-rank tests indicated that more immune cells, including fibroblasts, endothelial cells, monocytes, macrophages and neutrophils, infiltrated the tumor microenvironment in patients with a higher RiskScore, whereas the low-risk group was negatively associated with activated myeloid dendritic cells, CD4+ T cells and eosinophils ( Figure S1). Some types of cells with samples sizes that were too small or that had opposite results in different databases were removed. The diagram illustrating the result of the Spearman correlation analysis is summarized in Figure 6A. Additionally, we explored whether our model had relevance to ICI-related gene expression and found that patients in the high-risk group had a positive correlation with a high expression level of TNFRSF9 (p < 0.05) ( Figure 6B), whereas CTLA4 did not exhibit a significant difference ( Figure 6C). One major reason for this may be due to the small sample size. Identify the Relationship Between Chemosensitivity and the Risk Model To investigate whether the efficacy of some frequently-used chemotherapeutics was associated with risk, we compared the common drug sensitivity (which was represented by IC50) between patients with low-or high-risk scores. The results showed that high-risk patients had a higher IC50 for gefitinib (p = 0.0091) (Figure 6D), and a lower IC50 for cisplatin (p = 0.00052) ( Figure 6E) and for docetaxel (p < 0.0001) ( Figure 6F), whereas there was no significant difference for gemcitabine (p = 0.21) ( Figure 6G). Therefore, this suggested that the irlncRNA model could be used as a predictor for chemosensitivity. DISCUSSION The most of recent studies have focused on the establishment of signatures for RNAs (other than protein-coding RNAs) to predict the prognosis of patients with malignancies. Motivated by the significance of irlncRNAs, we attempted to establish an applicable signature with DEirlncRNA combinations to investigate potential functions regarding immunotherapy response in this study. First, we identified immune-related genes by processing raw data that were retrieved in the TCGA and subsequently further performed a coexpression analysis, as well as a differentially expressed analysis to classify the irlncRNAs. Second, followed by screening with an iteration loop and a 0or-1 matrix, a univariate Cox regression analysis (combined with a multivariate Cox regression analysis) was used to determine the DEirlncRNAs signature. We also used an independent prognostic analysis to incorporate the risk scores and other clinical parameters for the validation of whether these indicators were capable of independently distinguishing the outcome. After discriminating the high-and low-risk groups by the cut-off value of the risk scores, we calculated the AUC value of the ROC curve at 1-, 3-and 5-years to validate the candidate signature, as well as evaluating the survival outcome. Consequently, we investigated the correlation between the difference in the RiskScore under this novel signature and several common clinicopathologic features. Variances in intratumoral immune infiltrating cells have a profound impact on the treatment responses to immune checkpoint inhibitors. To investigate the relationship between the RiskScore and immune infiltrating cells, we used several commonly known approaches to calculate the status of the i m m u n e i n fi l t r a t i n g c e l l s. B a s e d o n t h e i m m u n e microenvironment analysis, it is reasonable to personally imply therapeutic benefits from chemotherapy and immunotherapy of each patient through the use of immune scores. In addition, previous studies have indicated that the modulating of the immune microenvironment may be essential for improving the radiotherapy-induced antitumor response. Luo et al. reported that tumor mutation burden (TMB) was associated with the infiltration of activated CD4(+) memory T cells in the immune microenvironment. Our signature also indicated that, a highrisk score was negatively associated with a high sensitivity to chemotherapeutic drugs, such as cisplatin, gemcitabine, and docetaxel, rather than to the commonly administered chemotherapeutics in bladder cancer, such as gefitinib. Though the signature was only significantly associated with ICI-related biomarkers like TNFRSF9, instead of CTLA4, LAG3, HAVCR2 and PDCD1, thus proving that the efficacy of immunotherapy still presents an underprivileged position within the treatment of bladder cancer. However, we believed that the specific mechanism and biomarkers should be identified and validated, due to the different subtypes of immune infiltrating cells and the immune-related functional phenotypes in bladder cancer. Our algorithm helped us identify DEirlncRNAs and construct the most significant irlncRNA pair. We could detect the pairs with higher or lower expression rather than examining the exact expression levels of every irlncRNA. In addition, lncRNAs we identified were associated with immune genes, which remodel the immune microenvironment and active immune cells. Thus, our signature had an advantage of clinical practicability to distinguish high or low risk for bladder cancer patients. To get a more accurate prediction on risk for patients, a modified method called Lasso penalized modeling was performed. Besides, every AUC value used to identify the best model followed by comparison with other common-used clinical parameters, the AIC values were used to get the best cut-off point for model fitting rather than distinguishing just by the median value. We also revaluated the survival outcome, analyzed the efficacy of some chemotherapeutics, tumor infiltration and ICIs to prove that our signature worked well. However, there were some limitations of our study. Specifically, one example was that the data in the TCGA project were relatively insufficient for identifying initial irlncRNAs, and datasets from another independent database were required. Therefore, the constructed signature requires validation from an external database because the expression levels vary in each case. Unfortunately, we failed to implement the validation of the entire signature and the survival outcomes because of the failure to retrieve ideal GEO datasets. Consequently, we plan to collect clinical samples for RNA-seq, verify our risk model in future experiments, as well as to establish more reliable clinical connections for this novel signature. Moreover, we can divide these patients with the combination of the RiskScore and other characteristics to achieve more accurate and personalized prognosis judgements. A variety of methods were performed to confirm this novel algorithm, which was used to establish our signature in this study, so we believed our signature was of significance in spite of the lack of further validation. However, external validation could be beneficial. CONCLUSION LncRNAs have been proven to possess good prognostic value and can be potential therapeutic targets for many malignant tumors. In this study, we constructed a novel 14-irlncRNA-pairs signature to evaluate the prognose of bladder cancer patients. Patients were able to be divided into high-and low-risk patients via our signature, and patients in the high-risk group had a much shorter survival time. This signature showed obviously better prognostic value than other clinicopathological characteristics, such as age, tumor stage and tumor grade. Additionally, the patients' sensitivities to chemotherapeutic drugs and the infiltration of immune cells in the tumor microenvironment showed significant differences between the patients in the highand low-risk groups. In summary, our novel signature may be a valuable predictor for the prognosis of bladder cancer patients and can be used for chemotherapy drug selection the clinical settings in the future. However, the verification in further experiments or other datasets can also be meaningful for this irlncRNA signature. DATA AVAILABILITY STATEMENT The original contributions presented in the study are included in the article/Supplementary Material. Further inquiries can be directed to the corresponding author. |
<filename>activejdbc/src/main/java/org/javalite/activejdbc/dialects/package-info.java
/**
* Classes to manage database dialects, used internally.
*/
package org.javalite.activejdbc.dialects; |
Jets as precision probes in electron-nucleus collisions at the Electron-Ion Collider We discuss the prospects of using jets as precision probes in electron-nucleus collisions at the Electron-Ion Collider. Jets produced in deep-inelastic scattering can be calibrated by a measurement of the scattered electron. Such electron-jet"tag and probe"measurements call for an approach that is orthogonal to most HERA jet measurements as well as previous studies of jets at the EIC. We present observables such as the electron-jet momentum balance, azimuthal correlations and jet substructure, which can provide constrain the parton transport coefficient in nuclei. We compare simulations and analytical calculations and provide estimates of the expected medium effects. Implications for detector design at the EIC are discussed. I. INTRODUCTION The Electron-Ion Collider (EIC) will be the first electron-nucleus (e-A) collider and will produce the first jets in nuclear deep-inelastic scattering (DIS). Jet measurements can extend traditional semi-inclusive DIS (SIDIS) to elucidate parton-nucleus interactions, the 3D structure of nuclei, and the parton-to-hadron transition, which are among the physics goals of the EIC. Most studies discussed in the EIC white paper are based on single-hadron measurements. But since 2011, a wide range of jet observables have been developed for the study of the quark-gluon plasma (QGP) at RHIC and the LHC. Jet measurements yield a better proxy to parton kinematics than hadrons and are easier to interpret because they avoid the need for fragmentation functions. Moreover, modern jet substructure techniques offer new methods to explore QCD dynamics and control nonperturbative effects. Jet studies at the EIC have been proposed to measure unpolarized and polarized parton distribution functions (PDFs) of the proton and photon, along with the gluon and quark polarization, spin-orbit dynamics, nucleon transverse momentum dependent (TMD) PDFs, generalized parton distributions, gluon saturation and fragmentation in nuclei. We focus on tagged jets as precision probes of the nucleus via electron-jet correlations, which has recently been described in Ref.. Despite the success of QCD in describing the strong interaction, the physics of parton interactions with QCD matter is not fully understood, as not everything can be calculated perturbatively. This is true both for the "hot" * Correspondence email address: marratia@berkeley.edu QCD matter produced in high energy nucleus-nucleus collisions, and the "cold" QCD matter probed via jet production in pp, p-A, p-p and e-A collisions. Consequently, much of the theoretical work over the last two decades on the QGP provides a basis to build upon at the EIC, which will unleash the precision era of QCD in nuclei. Naturally, the experiments at HERA-the first and only electron-proton collider-stand as a reference for EIC jet measurements. We propose an approach different from that used for most jet measurements at HERA. Focusing on electroproduction in DIS, this work also differs from recent work by Aschenauer et al. that focuses on jet photoproduction and gluon-initiated processes in e-p collisions. We study DIS jet production, eA → e + jet + X for event-by-event control of the kinematics (x, Q 2 ) that constrain the struck-quark momentum. We refer to this approach as electron-jet "tag and probe" studies. We identify several physics goals and identify approaches to realize them. The paper is organized as follows: in Section II we describe the requirements and some experimental implications of the "tag and probe" measurements with electronjet correlations; in Section III we describe the Pythia8 simulation and the basic kinematic distributions of jet production; in Section IV we describe key observables with projected rates; in Section V we discuss implications for EIC detectors; and we conclude in Section VI. II. REQUIREMENTS FOR TAG AND PROBE STUDIES In heavy-ion collisions, jets serve as "auto-generated" probes because they are produced in initial partonic hard scatterings prior to the formation of the QGP. As with any probe, its power relies on its calibration. In hadronic col- lisions, nature provides "auto-calibrated" processes such as -jet and Z-jet production. The mean free path of electroweak bosons in QCD matter is large whereas the jet interacts strongly, so coincidence measurements are a powerful way to constrain kinematics and systematically explore jet quenching in the QGP. Analogously, the virtual photon and the struck quark balance in DIS at leading order (eq → e q). We propose to use this process as a "tag and probe" to study the quark-nucleus interactions, as illustrated in Figure 1 for a proton target. This approach differs from inclusive DIS, where the electron is considered the probe-our probe is the struck quark instead. Its color charge makes it suitable to study QCD in nuclei. Unlike hadronic collisions, the electron is a fundamental particle and carries no color charge which simplifies the theory and provides a cleaner experimental environment suitable for accurate jet measurements. DIS offers a nearly pure quark-jet sample with little background from the underlying event. The nucleus has a high density of gluons at low temperature, which does not become highly excited in the collision. Consequently, the most challenging aspects of studying parton-QCD matter interactions in heavy ion or proton-nucleus collisions do not apply in this case. The basic requirements for "tag and probe" studies include: 1. Kinematics such that the leading-order DIS process dominates. 2. Event kinematics constrained by the electron measurement only. 3. The jet must be matched to the struck quark by separating it from the beam remnant. We explore the implications of each of these requirements in turn. Initially, satisfying requirement 1 may appear straightforward. After all, the leading-order (LO) DIS diagram ( * q → * q) is a pure electroweak process, whereas the higher-order DIS processes such as photon-gluon fusion ( * g → qq) or gluon bremsstrahlung ( * q → qg) are suppressed by s. However, almost all jet studies at HERA suppressed the LO process by using the Breit Frame, in which the * points toward the positive z-direction with 3-momentum magnitude Q. At LO DIS, the struck quark flips its momentum from an incoming −Q/2 to +Q/2 in the z-direction, which is why the Breit frame is known as the "brick-wall frame". The LO DIS process produces a jet with zero transverse momentum, p jet T, in the Breit frame, modulo the intrinsic transverse momentum of the quarks and the gluon radiation. Due to higher-order emissions, jets can pass that selection because multiple jets can balance each other's p jet T with respect to the * direction. The typical requirement of p jet T > 4 GeV/c used at HERA effectively suppresses the LO DIS contribution, which was called "Quark-Parton Model background", and provides sensitivity to the gluon PDF and the strong coupling constant s. The choice of reference frame is not a trivial one; one cannot simply transform the results presented in the Breit frame for the jet cross sections to another frame because of the minimum p jet T cut typically imposed. This cut ensures that theoretical calculations that require a scale related to the jet itself in addition to the Q 2 of the event is large enough for perturbative calculations to converge. In this work we show that jets with low p jet T in the Breit Frame are not only measurable and calculable, but offer a crucial tool at the EIC. Instead of the Breit frame, we present results in the laboratory frame. Recent work by Liu et al. showed that the use of the e-A center-ofmass (CM) reference frame, which is related to the lab frame by a simple rapidity boost in the beam direction, provides a clear way to connect e-A results to hadron colliders. See also Ref.. We address requirement 1 by analyzing jets in the laboratory frame which is dominated by the LO DIS process. Higher-order DIS processes are still present, but they can be taken into account by using e-p collisions as a baseline when studying e-A collisions. Moreover, NNLO calculations show that the contribution from photon and gluoninitiated processes are at the level of a few percent for Q 2 > 25 GeV 2. Considering requirement 2, we note that the measurement of the scattered electron defines inclusive DIS and thus will likely drive the design of the EIC detectors. However, the energy and angular resolution of the scattered electron translates to a relative resolution of x with a prefactor of 1/y; this follows from the relation x = Q 2 /ys. Consequently, the resolution of x diverges as y → 0. The limitation of the "electron method" to constrain x and Q 2 was bypassed at HERA by using methods that rely on the hadronic final state, such as the Jacquet-Blondel method. Using the Jacquet-Blondel method would not work for "tag and probe" studies, as it would amount to calibrating the jet probes with themselves. Consequently, the need to determine the kinematics purely from the scattered electron limits the ability to use low-y events. Given detector response projections such as those presented in Ref., we note that even in the case of electron measurements with a combination of tracker and crystal calorimeter (with zero constant term and 2% stochastic term for < −2) the resulting resolution in x deteriorates rapidly for values of y < 0.1. We therefore conclude that the tag and probe method requires events with y > 0.1. The exact value of the y cut can be optimized based upon the actual detector performance. We identify the kinematic selection criteria needed to meet requirement 3 and present the results in Section III D after we introduce our simulations and show the kinematic distributions of jets expected at the EIC in the next section. III. SIMULATIONS We use Pythia8 to generate neutral-current DIS events in e-p collisions with energies of 20 GeV for the initial state electron and 100 GeV for the proton, resulting in a center-of-mass energy of √ s = 89 GeV. While proton beam energies of up to 250 GeV are considered in the EIC designs, the per-nucleon energy of the nuclear beams is reduced by a factor of Z/A, which is ≈ 0.4 for heavy nuclei. We select particles with p T > 250 MeV/c and || < 4.5 in the lab frame 1, excluding neutrinos and the scattered electron (which we identify as the highest p e T electron in the event). The asymmetry of the beam energies creates a boost of the e-A center-of-mass frame relative to the laboratory frame given by lab = CM + 0.5 ln(E p /E e ) = CM + 0.80 for the kinematics considered here. We use the Fastjet3.3 package to reconstruct jets with the anti-k T algorithm and R = 1.0. For most studies, we use the standard recombination scheme ("Escheme"), where the jet clustering just combines 4-vectors, but we also present some results with the "winner-takeall" (WTA) scheme where the jet axis is aligned with the more energetic branch in each clustering step. Our choice of the distance parameter R = 1.0 follows the HERA experiments where it was found that this large value reduces hadronization corrections for inclusive jet spectra to the percent level. At the EIC, smaller R values might help to tame power corrections for jet substructure observables which we leave for future work, see also Ref.. Pythia8 uses leading-order matrix elements matched with the showering algorithm and the subsequent hadronization. For DIS, Pythia8 relies on the DIRE dipole shower to generate high order emissions. Our simulations do not include QED radiative corrections or detector response. Initial and final-state QED radiative corrections "smear" the extracted x or Q 2 from the measured electron angle and momentum with respect to the Born-level values. We select observables that minimize the sensitivity to radiative corrections, and further reduce radiative effects in three ways: require inelasticity y < 0.85, which removes the most sensitive phase space; construct ratios of cross sections (semi-inclusive DIS jet cross sections and inclusive DIS cross section); and bin in p e T. The p e T variable is insensitive to initial-state QED radiation and has reduced sensitivity to collinear final-state radiation. Moreover, ratios between measurements in e-A and e-p data will further suppress the impact of radiative corrections. We use the EPPS16 nuclear PDFs for the P b nucleus, to approximate hard scatterings in e-A collisions in our e-p sample. Of course, the underlying event in e-A is not simulated in this approach. However, due to the absence of multi-parton interactions in DIS, the underlying event is expected to be small compared to p-A collisions. 1 We follow the HERA convention to define the coordinate system we use throughout this paper. The z direction is defined along the beam axis and the electron beam goes towards negative z. The pseudorapidity is defined as = − ln, where the polar angle is defined with respect to the proton (ion) direction. We do not include the impact of Fermi motion in our simulations which is only relevant for the very high-x region. We require Q 2 > 1 GeV 2, the invariant mass of the hadronic final state W 2 > 10 GeV 2, and the inelasticity of the event of 0.1 < y < 0.85. The lower elasticity limit avoids the region where one cannot constrain the event kinematics with the electron (as discussed in Section II), whereas the upper limit avoids the phase space in which QED radiative corrections are large. We do not simulate photoproduction processes which are defined 2 by Q 2 ≈ 0. The photoproduction process is similar to jet production in hadron collisions, which includes all the complications we aim to avoid as well as sensitivity the relatively poorly known photon PDFs. Therefore, photoproduction of jets is a background for this study, and can be reduced to a negligible level by requiring large values of Q 2. We simulate 10 7 events to ensure the statistical precision of the Monte Carlo simulation. The projected rates correspond to an integrated luminosity of 10 fb −1, which can be collected in a few months of e-p running. While the cross sections for hard processes in e-A are higher by a factor of A, the luminosity expected for ions is smaller approximately by a factor of A, leading to similar rates for e-A and e-p collisions at the EIC. Figure 2 shows the expected yield of electrons and jets for 10 fb −1 integrated luminosity, as a function of p T in the lab frame. The p T in the lab frame is equivalent to the p T in the electron-nucleon center-of-mass frame as it is invariant under boosts in the longitudinal direction. In addition, we apply a cut on the azimuthal angle between the electron and the jet | jet − e − | < 0.4, which suppresses jets arising from the fragmentation of the beam remnant as we will show in Section III D. A. Differential cross section and event kinematics The transverse momentum spectra reach up to p T ≈ 35 GeV/c. The electron and jet distributions generally agree well since only a single jet is produced in DIS. This is not the case at low p T, where s is larger and parton branching processes/out-of-jet emissions generate low p jet T jets that do not pass the selection criteria. In addition, hadronization effects becomes more important at low p jet T. Collecting 10 fb −1 of data would yield statistical uncertainties at the sub-percent level. Of course, this depends on detector acceptance, efficiencies, and triggering. The high luminosity of the EIC will allow for a comparison of several different nuclei, along with detailed studies required to constrain systematic uncertainties. The electron transverse momentum and rapidity are not variables commonly used to characterize the event kinematics in DIS, but they are closely related to Q 2 and where e is the pseudorapidity of the electron in the electronnucleon CM frame andt and are the Mandelstam variables. Figure 3 shows p e T and x distributions for events passing the cuts listed above. The observed "strip" is the result of the inelasticity selection. In particular, events with low Q 2 /high x yield low inelasticity (y = Q 2 /sx), which is removed by our requirement y > 0.1. Nevertheless, we obtain a wide coverage in x with jets, spanning the shadowing, anti-shadowing and EMC regions in e-A collisions. While these regions have been studied before in inclusive DIS and SIDIS in fixed-target experiments, the EIC energies will allow the measurement of jets over a wide range of Q 2. Figure 4 shows the jet pseudorapidity and energy in the lab frame. The exact shape of the distribution is due to the inelasticity selection, the asymmetric nature of the collision, and the rapidity boost of ∆ ≈ 0.8 due to different beam energies. The jet energy at mid-rapidity ( jet ≈ 0) is limited to ≈ 30 GeV, whereas in the backward direction it reaches only about ≈ 20 GeV, as it is limited by the electron beam energy. On the other hand, jets with energies in the range 50-100 GeV are produced in the forward direction ( jet > 1.0). Figure 5 shows the number of particles in the jets as a function of p jet T for charged particles, photons from the decay of neutral mesons, and neutral hadrons. There is a gradual increase with p jet T. We checked that there is no significant change with pseudorapidity of the jet within the range | jet | < 3.0. Therefore, the particle multiplicity does not depend on the jet energy, but only on its p jet T. We also find no Q 2 dependence within 1-1000 GeV 2. C. Number of jet constituents While jet algorithms can in principle "find" jets with low transverse momentum which may contain only very few particles, the question is whether useful information can be extracted from these "mini-jets". The answer depends on the observable under consideration and requires a comparison to perturbative QCD calculations including QCD scale uncertainty estimates, which increase at low p jet T. While a generic cut on particle multiplicity or transverse momentum is somewhat arbitrary, we follow here the precedents set by experiments at HERA and RHIC, and require p jet T ≥ 4 GeV/c. D. Separation of struck-quark and beam-remnant fragmentation As noted in requirement 3 in Section II, using the struck quark as a tagged probe requires kinematic cuts to select jets arising from that quark. One of the benefits of the collider mode is that beam remnants continue to move in the beam direction while the particles produced by the fragmentation of the struck quark might be separated. This picture is complicated by the process of hadronization. As noted by Aschenauer et al., hadrons from beamremnant and struck-quark fragmentation largely overlap in rapidity for all Q 2 accessible at the EIC. The separation of struck-quark and beam-remnant fragmentation is central for theoretical studies to interpret the data, as relevant factorization theorems apply to the struck-quark fragmentation only 3. Recent theoretical studies have focused on this issue. In this work, we explore the beam-remnant separation in an empirical way by using the hadronization model in Pythia8 and compare results using jets and hadrons. As an aid in identifying the struck-quark fragments, we construct polar plots tracking the scattered electron and struck quark as well as jets and hadrons. Examples are shown in Figure 6. The top half of each circle shows the pseudorapidity and 3-momentum of the scattered electron in the angular and radial direction, respectively. The bottom half shows the rapidity and momentum of the hadronic partners. Polar plots of the scattered electron and struck quark are shown on the left, jets in the middle, and hadrons on the right. The top panel shows where the reaction products go when the struck quark x is low, from 0.008 to 0.01. As expected for DIS off quarks at low-x, the struck quark travels to negative rapidity, i.e in the electron-going direction as seen in the top left panel. The top middle panel shows two clear sources of jets: one corresponding to the struck quark and the other to the beam remnant. The two jet sources are quite well separated in rapidity, making a selection of the struck quark jet straightforward in this case. We found that a minimum of Q 2 > 25 GeV 2 is needed to achieve this clean separation for this kinematic interval; decreasing Q 2 leads to a worsening of the separation. The right hand plot shows the distribution of single hadrons. While a correlation with the rapidity of the parent quarks is present, it is significantly smeared for lower p T hadrons, making the experimental separation of struck quark and beam remnant products more difficult than with jets. The | jet − e − | < 0.4 cut in the middle and right plots requires the electron and jet to be back-to-back in azimuthal angle, as explained below. This clear identification of the struck-quark at low-x guarantees access to the dense gluon-dominated matter at small x which requires selecting DIS off a parton which is itself at small x. This parton then transits the dense matter on its way to the detector. Comparing jets from such partons in scattering from different nuclei will allow us to quantify the transport properties of the dense matter. The bottom panels show a similar set of polar plots selecting 10 < p e T < 30 GeV/c and Q 2 > 100 GeV 2. The left hand side shows that in this case, the scattered quarks start to go in the hadron beam-going direction, but they are still dominantly at pseudorapidities less than 2. The middle panel shows that the separation of the struck quark and beam remnant jets is also clearly feasible for these kinematics, even though the rapidity separation is smaller. The smearing for single hadrons, however, is much larger, as visible on the right side. For this electron p T range, Q 2 > 100 GeV 2 is required to obtain the separation with jets; significantly lower Q 2 values lead to a much larger overlap. We conclude that the prospect for separating the struck quark and beam remnants looks very promising with jets. IV. OBSERVABLES We now turn to jet observables of interest for probing properties of gluon-dominated matter in nucleons and nuclei. Sections IV A and IV B show the transverse momentum and azimuthal balance of the electron and jets; section IV C shows the azimuthal balance but for jets defined with the WTA recombination scheme; section IV D describes the groomed jet radius. A. Transverse momentum balance A key measurement sensitive to the mechanism of quark energy loss in the nucleus is the ratio of the electron to jet transverse momentum, since the electron tags the struck-quark p T. Figure 7 shows the transverse momentum balance between the scattered electron and jet for 10 < p e T < 15 GeV/c and p jet T > 4 GeV/c. The distribution peaks around unity as expected for DIS. The width of the distribution arises from initial state radiation, outof-jet emissions and hadronization. Applying a cut on the azimuthal difference between the scattered electron and jets | jet − e − | < 0.4 suppresses low-p jet T jets not associated with the scattered electron, i.e jets from beam remnant fragmentation. For this kinematic selection, the average x is 0.11 and the average is 1.1 TeV, where is the virtual photon en-ergy, i.e. the struck-quark energy, in the rest frame of the nucleon 4 ( = Q 2 /2mx with m the nucleon mass). The same x region is accessible in fixed-target experiments, for example those ongoing at the Jefferson Laboratory CEBAF, but with values of only a few GeV (or equivalently, low Q 2 ). This illustrates that EIC experiments will explore kinematics that represent terra incognita even in "known" x regions. In particular, we would be able to answer "how does the nucleus react to a fast moving quark" at the TeV scale, whereas all previous fixed-target experiments reached values of O(10 GeV). Given the large number of events expected at the EIC, it will be possible to bin finely in either x or, once radiative corrections are applied. At the EIC we will be able to explore in detail the kinematic dependence of the jet transport coefficient,q, wher qL describes the typical transverse momentum squared acquired by a parton traversing the medium of length L. The kinematic dependence ofq in cold nuclear matter is under active investigation, see for example recent work in Refs.. The kinematic coverage of EIC SIDIS data (hadron and jet) will be several orders of magnitude larger than the existing SIDIS data and will be much more precise; therefore, it will allow for definitive conclusions on the properties of the jet transport coefficientq. In general, these results may also illuminate studies of the QGP in heavy-ion collisions. Energy loss studies at the EIC will provide a more accurate measurement ofq in nuclei than is likely to be achieved in p-A collisions. There are several reasons: DIS in e-A has much less background than the underlying event in p-A collisions; DIS provides an almost pure quark probe instead of quark-gluon fractions that depend on kinematics; in DIS a virtual photon interacts with the quark, experiencing no initial state scattering and leaving a medium that is static and not affected by QCD multiparton interactions; event-by-event tagging of the struck quark in DIS improves the precision of the measurement and theoretical calculations; and the EIC luminosity will offer superb statistics. Figure 8 shows Pythia8 results for the azimuthal difference | jet − e − | between the scattered electron and jets. The azimuthal angle here is related to the transverse momentum imbalance q ⊥ = | p jet T + p e T | in the plane transverse to the beam direction. The distribution peaks at zero as expected from LO DIS where the electron and jet are produced back-to-back. The finite width of the distribution is driven by the intrinsic k T of the partons and gluon radiation. As shown by Liu et al., in the limit that the transverse momentum imbalance q ⊥ is much smaller than the electron transverse momentum, this observable in e-p collisions provides clean access to the quark TMD PDF and to the Sivers effect in transversely polarized scattering. In particular this observable is insensitive to final state TMD effects, which provides a way to overcome the daunting task of a simultaneous extraction of TMD parton densities and fragmentation functions. We show the theoretical calculation of Ref. in Figure 8 (solid black) which agrees well with the Pythia8 simulation. B. Azimuthal correlation A comparison of the cross section in e-p and e-A collisions is sensitive to p jet T broadening effects due to multiple scatterings in the medium. Such measurements are needed to quantifyq in nuclei, as shown by Liu et al.. Following Refs., the final state multiple scatterings of the struck quark/jet can be combined with the TMD distribution. Effectively, this leads to a modification of the resummed Sudakov exponent which can be expressed in terms ofqL. As we have shown in Section III, electron-jet correlations at the EIC will sample 0.008 < x < 0.7, which covers the shadowing, anti-shadowing and EMC regions. Electron-jet correlations in different kinematic bins will map these nuclear effects in 3D including potentially a parton flavor-separation. Azimuthal correlations provide a clean channel to explore nuclear tomography, extending traditional measurements based on hadrons. A different definition of the transverse momentum measuring the imbalance between the electron and jet in SIDIS was considered by Gutierrez-Reyes et al. This is sensitive to TMD PDFs and involves TMD evolution equations also for the final state jet. This observable can provide important complementary information for nucleon and nuclear tomography and is particularly useful when the WTA axis is used, which we describe in the next section.. The projected statistical uncertainties are negligible and not shown. C. Winner-take-all jets The standard recombination scheme of jet reconstruction algorithms is the E-scheme, where at each step in the clustering the jet axis is defined by summing 4-vectors. The resulting jet axis is sensitive to recoil effects due to soft radiation in the jet. In contrast, the jet axis obtained with the WTA scheme is by construction insensitive to soft radiation. At each step of the clustering, the jet axis is defined to be aligned with the more energetic particle. Therefore, this jet axis tracks collinear radiation. Recently various observables involving the WTA axis have been proposed. Potential applications include studies of the QGP, hadronization and studies of the intrinsic parton k T using jets in SIDIS. In particular, comparisons between jets reconstructed with the standard E-and WTA scheme in e-p and e-A collisions will shed light on the modification of collinear and/or soft fragmentation in nuclei and allow for quantitative studies of the jet broadening mechanism. We consider the same observable as discussed in the previous section IV B and investigate differences of the azimuthal angular correlation | jet − e − | between the electron and jet when the standard or WTA jet axis is used. We note that as expected no significant difference between the p jet T spectra is observed since the clustering metric is the same for both recombination schemes. Figure 9 shows the electron-jet azimuthal correlation for three intervals of p e T for E-scheme and WTA jets. For both cases the distribution gets narrower with increasing p e T. However, the WTA jets show a significantly broader distribution for all p e T intervals. We expect these observables to be relevant for studies in e-A collisions and a promising target for EIC detector considerations. Our results also motivate further theoretical efforts in this direction. D. Groomed jets Driven by LHC experiments, the field of jet substructure has grown rapidly in the last few years. See Ref. for recent reviews. An example is the soft drop momentum sharing fraction, z g, which is related to the Altarelli-Parisi splitting function and is modified in heavy-ion collisions. These studies rely on jet grooming algorithms such as "soft drop". Soft drop declustering isolates soft and wide-angle radiation inside the jet. Nonperturbative effects such as hadronization corrections can be suppressed or enhanced depending on the observable under consideration, see for example recent work in Refs., or improve the sensitivity to TMD PDFs. We anticipate that jet substructure and jet grooming will have an important role at the EIC, just as at the LHC and RHIC for precision tests of QCD and studies of the medium properties. For example, Ringer et al. showed that the so-called groomed jet radius has sensitivity to the jet transport coefficient similar to electron-jet correlations. Probing the same physics with independent observables offers an important cross-check to ensure the consistency and predictive power of theoretical calculations and can be used in global extractions ofq. We expect that other observables will allow for similar studies where groomed jets can be used as well calibrated probes of nuclear effects in e-A collisions. Here we study soft drop groomed jets at the EIC focusing on the experimental feasibility of grooming low p jet T jets with modest constituent number. We use the SoftDrop algorithm as implemented in the Fast-Jet package. The typical p jet T used in jet grooming studies at the LHC is O(100 GeV/c) but at the EIC the range will be ≈ 10 − 35 GeV/c, which is similar to the range explored at RHIC in p-p collisions (20 < p jet T < 30 GeV/c for anti-k T jets with R =0.4). The particle multiplicities in e-p collisions are smaller than in p-p. Consequently, we investigate how many particles are groomed away and how large the transverse momentum difference is before and after grooming at the EIC. We choose the grooming parameters z cut =0.1, and = 0, 2, which are often used in experimental studies at the LHC. Varying offers a way to explore different QCD dynamics and to gauge the sensitivity to soft radiation. The choice of =0 ( = 2) corresponds to more (less) aggressive grooming. Figure 10 shows the number of particles in jets as a function of the ungroomed p jet T with and without grooming. The difference grows with p jet T and it reaches about ≈ 2 particles on average for the = 0 case and ≈ 0.5 particles for = 2. Figure 11 shows the p jet T that is removed from the jet by the grooming procedure for the two grooming parameters = 0, 2. We observe that the average value grows roughly linearly with ungroomed p jet T and at 30 GeV/c it reaches ≈ 2.0 GeV/c for = 0 and ≈ 0.2 GeV/c for = 2. We note that the standard deviation is large with respect to the average value, which indicates large fluctuations when using groomed jets. From Figures 10 and 11 we conclude that the prospects of performing grooming at the EIC, even with = 0, look promising. Depending on the observable under consideration it can be advantageous to choose a larger value of z cut in order to extend the regime where perturbative calculations are applicable. Detailed detector simulations to quantify measurement effects on groomed variables is an important next step as well as detailed comparisons to theoretical calculations. E. The groomed jet radius and jet transport Recent work by Ringer et al. showed that the jet groomed radius R g, or equivalently, the angle between the two branches that pass the soft drop requirement, provides a new opportunity to investigate jet broadening effects. It is orthogonal to other observables that use more traditional jet variables such as the azimuthal angle and p jet T. Figure 12 shows the groomed radius for jets recoiling against the scattered electron for two different p e T intervals. Here we consider the cases = 0 and 2 as well as ∞. The limiting case of = ∞ corresponds to no grooming and R g is the opening angle of the last two branches of the jet that were clustered together. The R g distribution for = ∞ is broad and peaks toward large values, with little dependence on p e T. This distribution is dominated by power corrections and nonperturbative physics. Removing low momentum, wide-angle branches shifts the R g distribution toward smaller values. As expected, = 0 yields a larger shift than = 2. We also observe that the shifts due to grooming are more significant for higher p e T, which might be interpreted as a result of increased phase-space for soft radiation. While the approach used in Ref. to study p jet T broadening effects was developed for studies of the QGP, the same framework is applicable to studies of medium effects in the nucleus. In fact, the theory simplifies tremendously in e-p or e-A collisions because of the initial state electron and the large quark jet fraction. Here we work with the assumption of a pure quark jet sample; in the future this can be improved using the results of Ref.. While the next-to-leading logarithmic corrections for this observable are known, we limit ourselves to a leading-logarithmic calculation as we are here mostly interested in the modification in e-A colli-sions. Nonperturbative hadronization effects are included through a convolution with a model shape function which depends on a single parameter. The size of hadronization corrections can be determined in e-p collisions by comparing to data or simulations, see for more details. Figure 13 shows Pythia8 results (green histogram) for = 0 and 20 < p e T < 35 GeV/c, which was also shown in the right panel of Figure 12. The perturbative leadinglogarithmic calculation of the groomed jet radius including hadronization effects (solid black) has a similar shape as the Pythia8 results, though the Pythia8 distribution is slightly shifted to the right. The other curves show the result when medium effects due to incoherent multiple scatterings of the two branches inside the nucleus are included. We parametrize the cold nuclear matter effects here analogously to the electron-jet azimuthal correlation considered in section IV B above and choose the same valuesqL = 0.2 GeV 2 and 0.8 GeV 2 (dashed) accordingly. The broadening effects are clearly visible and of similar magnitude as for the electron-jet azimuthal correlation observable. These results demonstrate that jet substructure observables offer novel and independent probes of nuclear effects at the EIC. V. EXPERIMENTAL ASPECTS The modification of jet observables in e-A collisions compared to e-p are predicted to be at the few percent level. This places strict limits on systematic uncertainties of the measurements, and should inform detector designs for the EIC. A disadvantage of jet measurements compared to single hadrons is that precise energy measurements are much more challenging. One of the most accurate jet energy measurements was performed by the ZEUS collaboration at HERA with its high resolution uranium-scintillator calorimeter, yielding a jet energy scale (JES) uncertainty of ±1% for jets with a transverse energy in the lab frame larger than 10 GeV, and ±3% for lower-energy jets. As jets have a rapidly falling spectrum, this energy scale uncertainty translates to an uncertainty of 5-10% for the p jet T spectra. Experiments at the LHC are close to achieving the goal of ±1% JES as well. It seems unlikely that EIC detectors will improve this. The JES uncertainty will thus likely be a limiting factor for jet measurements at the EIC. Even for observables that do not require energy information per se, such as azimuthal differences between electrons and jets, the JES uncertainty enters as a second-order effect. For example, if a given observable depends on p jet T, an unfolding procedure in more than one dimension will be needed. In particular, the azimuthal difference between jets and electrons has a rather strong p jet T dependence, as seen in Figure 9. Unlike fixed-target experiments that can use dual target techniques, data from e-A and e-p will be taken at different times and runs at a collider. Consequently, timedependent changes in detector response will limit the cancellation in the e-A/e-p ratio and therefore drive the systematic uncertainties. Moreover, one of the most power-ful calibration tools used by the HERA experiments was the momentum balance between the scattered electron and jets in neutral-current DIS. That effectively anchors the JES to the electromagnetic energy scale uncertainty, which is known much more precisely. That method is not available in for our tag and probe studies because it would use the same physics we want to study (at HERA, electron-jet correlations were primarily a calibration tool). This will increase the systematic uncertainty on the JES. Measuring ratios of cross sections in e-A and e-p collisions, allows some of the JES uncertainty to be canceled. In order to achieve an accuracy of 1% in p jet T spectra measurements, one would need to reach a residual systematic uncertainty of 0.2% in the e-A/e-p ratio. We have shown that key observables such as the electron-jet azimuthal correlation is rather sensitive to jet p jet T and therefore to p jet T smearing effects. Detailed detector simulations will be needed to see how residual JES uncertainties would translate to systematic uncertainties. We have shown that theoretical calculations predict nuclear effects of the order of O(10%) or less, for both azimuthal correlations and the jet groomed radius. Given that the EIC jet measurements will likely be dominated by systematic uncertainties and the accuracy goal is at the percent-level, uncertainties due to luminosity and trigger efficiency will play an important role. We note that these are typically suppressed to the subpercent level in fixed-target DIS experiments with the use of dual-targets but in collider mode they will be non negligible. We again anticipate that the leading systematic uncertainty in the e-A/e-p ratios will be related to time-dependent effects in the trigger and luminosity calibrations. VI. SUMMARY AND CONCLUSIONS We have explored the potential of jets at the EIC as a precision tool for studies of the nucleus. We discussed requirements for semi-inclusive deep inelastic scattering "tag and probe" studies where the scattered electron fixes the jet kinematics, leading to an approach orthogonal to the HERA jet measurements, as well as to all previous projections of jet measurements at the EIC. The kinematic reach for jet measurements at the EIC is found to be roughly 0.008 < x < 0.7 and Q 2 > 25 GeV 2 for √ s = 89 GeV. While the inclusive DIS measurements will have an extended kinematic reach, jets measurements will be indispensable for the study of quark-nucleus interactions, the quark-structure of nuclei in 3D, to tag the parton flavor and to separate current and target fragmentation. We identified several key observables for electron-jet studies, including the transverse momentum balance and the azimuthal angular correlation. We demonstrated the feasibility of groomed jets at the EIC, to provide new tools for controlling hadronization effects. We presented comparisons to theoretical calculations where medium ef-fects are included for both electron-jet correlations and jet substructure. Using information from different observables will be crucial to determine the jet transport coefficientq. We also presented a study of the winnertake-all scheme for jet reconstruction, which will help to gauge the modification of soft and collinear fragmentation in the nucleus. Important future work includes studies with detector response simulations and more detailed comparisons to theoretical calculations. |
Gut microbiota display alternative profiles in patients with early-onset colorectal cancer Background The incidence of early-onset colorectal cancer (EOCRC) is increasing worldwide. This study aimed to explore whether there is an alternative gut microbiota profile in patients with early-onset colorectal cancer. Methods A total of 24 patients with EOCRC, 43 patients with late-onset colorectal cancer and 31 young volunteers were included in this study. The diversity of their fecal bacteria was explored using 16S ribosomal RNA gene sequencing. Cluster of ortholog genes (COG) functional annotation and Kyoto encyclopedia of genes and genomes (KEGG) were used to detect enrichment pathways among the three groups. Results Community separations were observed among the three groups. The Shannon index of the EOCRC group was significantly lower than the LOCRC group (P=0.007) and the NC group (P=0.008). Both PCoA analysis (Principal co-ordinates analysis, P=0.001) and NMDS (non-metric multidimensional scaling, stress=0.167, P=0.001) analysis indicated significant difference in beta diversity among the three groups. Fusobacteria, Bacteroidetes, and Clostridia were the most abundant bacteria in the EOCRC group, LOCRC group, and NC group, respectively. The results of COG showed that transcription (P=0.01398), defense mechanisms (P=0.04304), inorganic ion transport and metabolism (P=0.00225) and cell wall/membrane/envelope biogenesis (P=0.02534) were differentially expressed among the three groups. The KEGG modules involved in membrane transport (P=0.00856) and porphyrin and chlorophyll metabolism (P=0.04909) were differentially expressed among the three groups. Conclusion Early-onset colorectal cancer patients have a different gastrointestinal microbiota derangement compared to late-onset colorectal cancer patients. This dysbiosis can be reflected in the species diversity of the microbiota, the abundance of bacteria, and the abnormal functional predictions. Introduction Colorectal cancer is the third most common cancer in terms of incidence and second in terms of cancer-related mortality worldwide (). Approximately ten percent of all patients initially diagnosed with colorectal cancer are younger than 50 years of age (). Early-onset colorectal cancer (EOCRC) is generally defined as colorectal cancer diagnosed before the age of 50 years (). The incidence of late-onset colorectal cancer has declined due to preventive screening recommendations over the past 10 years (;;Sinicrope, 2022). However, the incidence and cancer-related mortality of EOCRC have increased significantly and will continue to show an increasing trend over in next 10 years (;;). EOCRC always displays adverse clinical and histopathological features, yet the causes are unclear (;;). In addition to the inherent genetic factors such as family history and germline gene mutations, poor dietary habits, smoking, alcohol, and antibiotics were considered risk factors for EOCRC (;). These risk factors can interact with the gut microbiota (Song and Chan, 2019), and their effects on the host can all be directly reflected by changes in the structure and abundance of the gut microbiota. The gut microbiota, as an ecosystem in direct contact with the gut mucosa, is the potential cause of colorectal cancer. Alterations in the structure of the intestinal microbiota can contribute to the development and progression of intestinal diseases. Increased abundance of certain specific microorganisms (Fusobacterium nucleatum, Prevotella intermedia, Bacteroides fragilis, Porphyromonas asaccharolytica, etc.) can increase the risk of colorectal carcinogenesis through inflammatory responses, evasion of tumor immune responses, and activation of pre-tumor signaling pathways (e.g., b-catenin) (;Wong and Yu, 2019). However, probiotics such as Lactobacillus and Streptococcus thermophilus were significantly less abundant in the gut of colorectal cancer patients (). Most of the current data used to explore the microbiota structure of patients with colorectal cancer are derived from late-onset colorectal cancer (), with few studies characterizing the gut microbiota in early-onset colorectal cancer. In this study, we propose to use high-throughput DNA sequencing technology to analyze the gut microbiota of early onset colorectal cancer patients from our center and to conduct a preliminary. Sample collection The fecal specimens of all patients in this study were obtained from the Department of Colorectal Surgery, Second Affiliated Hospital of Harbin Medical University from July 2018 to June 2020. The inclusion criteria for this study were: 1) Patients with colorectal cancer diagnosed with histopathology, and healthy young volunteers without tumors by gastroscopy; 2) Consent for us to collect their feces. The exclusion criteria were: 1) Had taken antibiotics, probiotics, corticosteroids or received fecal microbiota transplantation treatment within 3 months prior to sample collection; 2) Had a familial history of colorectal cancer; 3) Had used evacuant or undergone colonoscopy within 1 week prior to sample collection; 4) Had undergone abdominal surgery or other invasive treatment within 3 months prior to sample collection; 5) Had been diagnosed with multiple primary cancers; 6) Had a history of other cancer or inflammatory bowel disease; 7) Contamination of specimens as a result of failure to collect according to prescribed protocols (Di ); 8) Incomplete clinical information. The recruited sporadic CRC patients were divided into two groups based on age: the EOCRC group, aged < 50 years; LOCRC group, aged ≥ 55 years. All recruited young healthy volunteers were less than 50 years of age and they were included in the NC group. Clinical and pathological characteristics of CRC patients including age, gender, body mass index (BMI), history of drinking, tumor location, histological classification of tumors, and TNM stage were collected. The collected information of healthy volunteers included age, gender, BMI, and history of drinking. The stools were rapidly frozen in liquid nitrogen for 30 seconds after acquisition and stored at -80°C until DNA was extracted. 16S ribosomal RNA gene sequencing Microbial DNA was extracted from fecal samples using the E.Z.N.A. @ Soil DNA Kit (Omega Bio-tek, Norcross, GA, U.S.) according to the manufacturer's protocol. The specific steps were performed according to the instructions. Final DNA concentration and purification were determined by NanoDrop 2000 UVVisspetrophotometer (Thermo Scientific, Wilmington, USA), and DNA quality was checked by 1% agarose gel electrophoresis. The extracted DNA was stored in a refrigerator at -80°C. The V3-V4 hypervariable regions (the 338F~806R regions) of the bacterial 16S rRNA gene were amplified by high-throughput sequencing on a thermal cycler PCR system (GeneAmp 9700, ABI, USA) with primer sequences: 338F: 5'-ACTCCTACGGGAGGCAGCAG-3', 806R: 5 '-GGACTACHVGGGTWTCTAAT-3'. The amplified DNA was further purified using the AxyPrep DNA Gel Extraction Kit (Axygen Biosciences, Union City, CA, USA) and quantified using QuantiFluor ™ -ST (Promega, USA) according to manufacturer's established guidelines. Then, the normalized equimolar concentrations of each amplicon were pooled and sequenced on the Illumina MiSeq platform (Illumina, San Diego, USA) using 2 300 bp chemistry according to the standard protocol from Majorbio bio Pharm Technology Co. (Shanghai, China). Processing of sequencing data The raw fastq files were filtering and trimming using Trimmomatic and merged by FLASH with the following criteria: (i) The reads were truncated at any site receiving an average quality score <20 over a 50 bp sliding window. (ii) Sequences whose overlap being longer than 10 bp were merged according to their overlap with mismatch no more than 2 bp. (iii)Sequences of each sample were separated according to barcodes (exactly matching) and Primers (allowing 2 nucleotide mismatching), and reads containing ambiguous bases were removed. Operational taxonomic units (OTUs) were calculated via clustering by average neighbor principle at 97% genetic similarity using UPARSE (version 7.1 http://drive5.com/uparse/). The chimeric sequences were identified and deleted after the comparison of the identified taxa. The classification of each 16S rRNA gene sequence was analyzed against the Silva (SSU123) 16S rRNA database using the RDP classifier algorithm (http://rdp.cme.msu.edu/) with a 70% confidence level (threshold). Analysis of processed sequencing data Alpha diversity between the three groups was compared using Shannon index, Simpson index and the Simpson index. Beta diversity comparison between the three groups was done by PCoA analysis (Principal co-ordinates analysis), NMDS (Nonmetric multidimensional scale) analysis and PLS-DA analysis (partial least squares discriminant analysis). PCoA analysis and NMDS analysis were performed using the unweighted UniFrac distance algorithm and weighted UniFrac distance algorithm, and adonis analysis (permutational MANOVA) was used for otherness test. Then, based on the obtained community abundance data, a hypothesis test was performed using rigorous statistical methods to assess the significance level of species abundance differences between the microbial communities of the three groups of samples, and to obtain significantly different species between groups. LEfSe (linear discriminant analysis coupled with effect size analysis) performed linear discriminant analysis (LDA) on samples according to different grouping conditions based on taxonomic composition to find out the significantly different influences on the sample delineation of groups or species that had a significant differential impact on the sample delineation. The OTU abundance table was normalized by PICRUSt1. The effect of the number of copies of the 16S marker gene in the species genome was removed; then the COG corresponding to the OTU was obtained by the greengene corresponding to each OTU family information and KEGG Ortholog (KO) information for each OTU; and calculate the abundance of each COG and KO abundance. According to the information of COG database, the descriptive information of each COG and its functional information can be parsed from the eggNOG database to obtain the potential functional abundance spectrum; according to the information of KEGG database, the KO Pathway can be obtained, and the abundance of each potential functional category can be calculated according to the OTU abundance. Statistical analysis The software mothur (version_1.30.2) was used for Alpha diversity analysis. Principal component analysis and principal coordinates analysis were statistically analysed and plotted using R (version 3.3.1). In NMDS analysis, Quantitative Insight Into Microbial Ecology 1 (QIIME, version_1.9.1) was applied to calculate the distance matrix of beta diversity, and then the R packages "vegan" and "mixOmics" were used for analysis and mapping. LEfSe (http://huttenhower.sph.harvard.edu/galaxy/ roottool_id=lefse_upload) was used for multilevel species difference discriminant analysis; PICRUSt (version_1.1.0) software was used for functional prediction. All statistical calculations were performed in R 3.3.1. The Kruskal-Wallia H test was used to compare the differences in the measurement data between the three groups, and the Mann Whitney U test was used to compare the differences between two pairs. P-value < 0.05 was considered to be statistically significant, and the correction of the Pvalue is responsible for the false discovery rate (FDR). Results Basic clinical characteristics of patients and raw data management A total of 24 EOCRC patients, 43 LOCRC patients and 31 healthy volunteers were recruited in this study. Their demographic characteristics are shown in Table 1. We collected 98 samples and obtained a total of 5,362,431 sequence fragments with a total length of 2,261,064,976 bps. The length of all samples ranged from 204 to 528 bp, with an average of 422 bp. Species assessment and species composition analysis We performed OTU clustering on all valid sequences, and selected OTUs with the number of sequences greater than or equal to 5 in at least three samples and the sum of sequence numbers greater than or equal to 20, and finally obtained 714 OTUs, and the rank abundance curves are shown in Figure S1A. The Shannon curves of all samples can rapidly reach the plateau, indicating that the sequencing depth met the requirements. ( Figure S1B) We performed alpha diversity analysis on the three groups and found that the Shannon diversity index of the EOCRC group was significantly lower than that of the LOCRC group (P=0.007) as well as that of the NC group (P=0.008). ( Figure 1A) And the Simpson index of the EOCRC group was significantly lower than that of the LOCRC group (P=0.013) and that of NC group (P=0.011). ( Figure 1B) The Venn diagram showed that at the genus level, the number of bacterial genera was higher in LOCRC group than EOCRC and NC groups, and the three groups shared 247 bacterial genera, with only 16 unique genera in EOCRC group. ( Figure 1C) Beta-diversity analysis of gut microbiota We analyzed the difference of beta diversity among the three groups by PCoA, NMDS and PLS-DA. PCoA based on unweighted unifrac distance showed significant differences on the OTU level among the three groups (R=0.0695, P=0.001), and adonis analysis showed significant differences between the EOCRC and LOCRC groups (P=0.0003) and between the EOCRC and NC groups (P=0.0002). (Figure 2A) PCoA based on weighted unifrac distances also showed significant differences among the three groups on OTU the level (R= 0.0726, P=0.001). ( Figure S2A) The results of the NMDS analysis on the OTU level were measured by the NMDS intensity index based on unweighted unifrac distance (stress=0.167, P=0.001, Figure 2B). The corresponding values based on weighted unifrac distance were as follows: OTU level (stress=0.136, P=0.001), genus level (stress=0.140, P=0.001) and phylum level (stress=0.073, P=0.001), as shown in Figures S2B-D. PLS-DA showed a clear separation of the three groups on the OTU level ( Figure 2C). These data indicated that EOCRC harbored a peculiar microbiota. Gut microbiota dysbiosis in EOCRC We performed LEfSe to investigate the composition of fecal microbiota in the three groups and identify taxa that were differentially abundant in the EOCRC (linear discriminant analysis (LDA) score > 3.5, P-value < 0.05). There were 48 bacterial taxa whose relative abundances were significantly distinct among the three groups, with 14, 12 and 23 taxa increasing in the EOCRC, LOCRC and NC groups, respectively ( Figure 3A). As show in Figure 3B, were considered as the most significant markers. We performed kruskal-wallis test on the abundance of bacteria in the three groups at different levels to verify the results of LEfSe analysis ( Table 2). As shown in Table 2, in the EOCRC group, Fusobacteria was more abundant on the level of phylum (P<0.001), class (P<0.001), order (P<0.001), family (P<0.001) and genus (P<0.001); and Porphyromonas was more abundant on the genus level (P<0.001), but the proportion of Porphyromonas was low. And in the LOCRC group, the proportion of Bacteroidetes were significantly higher on the level of phylum (P=0.001113), class (P=0.001113), and order (P=0.001113). And Prevotellaceae was more abundant in the LOCRC group on the family level (P<0.001). In the NC group, Clostridia was more abundant on the class level (P=0.002217) and the order level (P=0.002217), and Firmicutes was enriched on the phylum level (P=0.002079). Another abundant bacterium in the NC group is Actinobacteria, which is more abundant at the phylum level and at the phylum level (all P-values=0.002197). These results were consistent with the LEfSe analysis. Therefore, we concluded that the specific bacteria in gut B C A Alpha diversity analysis of gut microbiota in the three groups of patients. A: The comparison of Shannon index among the three groups. The Shannon index of the EOCRC group was significantly lower than the LOCRC group (P=0.007) and the NC group (P=0.008); B: The comparison of Simpson index among the three groups. The Simpson index of the EOCRC group was significantly higher than the LOCRC group (P=0.013) and the NC group (P=0.011); C: Venn diagram analysis of the three groups on the genus level. The three groups had 247 genera in common, with 16 unique genera in the EOCRC group, 30 unique genera in the LOCRC group and 29 unique genera in the NC group. LEfSe algorithms were performed on the three groups. (A): Cladogram measured from the LEfSe analysis. (B): The LDA scores were obtained by linear regression analysis (LDA). The threshold for the linear discriminant analysis score was set at 3.5. The larger the LDA score, the greater the difference between the three groups. Functional analysis of fecal microbiota To study the functional and metabolic changes of the fecal microbial communities, we compared the measured sequences with the suggested database for the GOG and the KEGG module abundance from bacterial species. The COG potential functional annotation results showed that the EOCRC group as well as the LOCRC group were inferior in the following functions: transcription (P=0.01398) and defense mechanisms (P=0.04304). ( Figure 4A, Figure S3A) Meanwhile, the three groups showed significant differences in the functions such as inorganic ion transport and metabolism (P=0.00225) and cell wall/membrane/envelope biogenesis (P=0.02534). (Figure 4A) Moreover, the KEGG modules involved in membrane transport (ko02010, P=0.00856) and porphyrin and chlorophyll metabolism (ko00860, P=0.04909) were overrepresented in the NC group compared with the EOCRC group and LOCRC group. ( Figure 4B; Figure S3B) Discussion The structure of the colorectal cancer population is gradually changing, and the rapidly increasing incidence of early-onset colorectal cancer requires vigilance (;Sinicrope, 2022). The heterogeneity of clinical and molecular features of early-onset colorectal cancer is quite distinct, which means that it may be independent of traditional colorectal cancer (;). As research progresses, the characteristics of the intestinal flora can be a major consideration in the etiology of many cancers (). Various studies have shown significant differences in the characteristics of gut microbiome across age, while the gut microbiome was considered to definite risk factor for colorectal cancer (O'Toole and Jeffery, 2015;Garrett, 2019;Wong and Yu, 2019). Therefore, we are more interested in The function prediction of the three groups. A: The differences of Cluster of Ortholog Genes (COG) function. B: The abundance differences of the Kyoto Encyclopedia of Genes and Genomes (KEGG) pathway. 0.01 < corrected P-value ≤ 0.05 marked as *; 0.001 < corrected P-value ≤ 0.01 marked as **. clarifying the characteristics of gut microbiome in EOCRC. We selected patients with sporadic early-onset colorectal cancer from our center and recruited young healthy volunteers and late onset colorectal cancer patients with matched demographic characteristics. We initially delineated the gut flora of patients with sporadic colorectal cancer. Prior studies have shown that imbalanced gut flora in CRC is usually manifested by a decrease in alpha diversity, however studies derived from Chinese populations suggest that the species diversity of gut microbiota of CRC patients is not different from that of healthy populations (;;). A metagenomic sequencing based study suggested that the faecal alpha diversity separation estimates of EOCRC patients were significantly lower than those of the LOCRC patients and healthy young volunteers (). In this study, we found that EOCRC patients had significantly lower alpha diversity than the gut flora of LOCRC patients and healthy young volunteers. The abundance of gut microbiota in the EOCRC group was significantly lower than that in the LOCRC group and NC group, and the number of bacterial genera in the EOCRC group was the lowest of the three groups. The alpha diversity and richness of the gut microbiota are generally considered to be independent of age (). However, according to our findings, in colorectal cancer patients, the species diversity and abundance were significantly lower in young patients. Meanwhile, significant differences were found in the beta diversity of gut microbiota among the three groups for overall comparison as well as for pairwise comparisons. Combined with alpha diversity analysis and the microbiota variability analysis, it is reasonable to assume that there are some specificities in the gut microbiota of early-onset patients. We compared the differences in abundant gut microbiota among the three groups. The proportion of Bacteroides in CRC patients, including EOCRC patients and in LOCRC patients was higher than that in NC patients (16.89 ± 17.17 vs. 16.18 ± 15.83 vs. 9.409 ± 10.83). But there was no obvious statistical difference among the three groups. Members of the genus Bacteroides account for a major fraction of the gut microbiome and colonize different parts of the colon (). Bacteroides fragilis toxin can induce tumorigenesis through various pathways including IL17, signal transducer and activator of transcription 3 and nuclear factor-kB signaling in colonic epithelial cells (). The Bacteroidetes were significantly enriched in the LOCRC group, and further analysis revealed that this part of the difference might be derived from a higher proportion of Prevotellaceae. Previous study has shown that Prevotellaceae was more abundant in CRC patients (). However, there were only a small number of studies focusing on the association between Prevotellaceae and colorectal cancer. And exploring the role of Prevotellaceae in colorectal carcinogenesis may be a topic for future research. Fusobacterium is one of the definitive causative agents of CRC, and numerous studies have suggested that it can lead to colorectal carcinogenesis and progression (;;;). In addition, Fusobacterium can promote chemoresistance in colorectal cancer by modulating autophagy, which can lead to poor prognosis in colorectal cancer patients (). A previous study based on 16S rRNA gene sequencing suggested that Fusobacterium could serve as a differentially abundant genus marker for EOCRC, which could validate the results of the present study (). Another study based on integrated metagenomic sequencing suggested that Bacteroides vulgatus and Flavonifractor plautii are unique taxon signatures for EOCRC, while Fusobacterium is a unique taxa signature for the LOCRC group (). We suggest that differences in results are more likely to result from differences in sequencing methods and sample sources. Based on our study, Fusobacterium may play an important role in the gut microbiota of EOCRC patients, although it is present in lower proportions. Another genus enriched in the EOCRC group is Porphyromonas, and different species contained in it could promote colorectal carcinogenesis through butyrate-induced senescence or hematopoietic NLRP3 inflammasome (;). In addition, we found a decrease in Clostridia in both the EOCRC group and the LOCRC group. Clostridia contains a variety of butyric acid-producing bacteria that can inhibit colorectal cancer development by modulating various signaling pathways and gut microbiota (Montalban-Arques et al., ;). Through functional prediction, we found some changes in certain COG functions and KEGG pathways in each group. Compared with healthy volunteers, the EOCRC and LOCRC groups showed a significant decrease in some functions (such as transcription and defense mechanisms) and some KEGG pathways (such as membrane transport and porphyrin and chlorophyll metabolism). However, we speculated that these distinctions were more derived from the differences between CRC patients and healthy individuals. Although there was no clear mechanism to suggest the difference between gut microbiota and cellular function, we speculated that the gut microbiota can interact and regulate each other through certain specific signaling pathways with the host (). The functional changes in different groups necessarily produce tumorigenic or protective effects and may serve as targets for the next treatment of colorectal cancer. Although our work has several novel findings, several limitations remain. The sample size of the control group (LOCRC group and NC group) of this study was adequate, but the sample size of the target population of our study needs to be larger. In addition, the male-to-female ratio of CRC patients in this study was slightly skewed, which may cause the findings of this study to be unrepresentative of the entire colorectal cancer population. Furthermore, metagenomic sequencing of the corresponding populations may give more convincing results. In conclusion, our study suggests that patients with earlyonset colorectal cancer have a unique gut microbial profile. Gut microbes could be another characteristic of early-onset colorectal cancer. We hope that this study will provide some insight into the use of gut microbes as biomarkers for predicting the risk of early-onset colorectal cancer and contribute to the prevention and treatment of early-onset colorectal cancer. Data availability statement The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found below: NCBI, PRJNA883949. |
Estimating the density of the people and counting the number of people in a crowd environment for human safety The video-surveillance systems are popularly used in crowd monitoring and people detection in that crowd is estimated for security of humans in public places and also managing the resources. In India number of accidents have occurred due to crowd environment in public areas like Shopping malls, Airports, pilgrimage, Temples, Political Meetings etc.,. In order to avoid such accidents, we need to estimate the density of people in crowd environment. In this paper, our proposed method is divided into two fold. In first, we propose density estimation of the crowd size. Secondly, count the number of people in the crowd. As crowd density increases, the occlusion between the people also increases. In order to avoid such problem in crowd we can use Improved Adaptive K-GMM Background subtraction method to extract the exact foreground in real time applications to avoid the estimation problem. By applying boundary detection algorithm, we can estimate the size of the crowd. The number of people in a crowd is counted by using algorithm canny edge detector, connected component labeling method and bounding box with centroid method. This paper proposes a real time video surveillance system. Our proposed method is compared with existing method. The above proposed works are compared with different datasets like IBM, KTH, CAVIAR, PETS2009 and CROWD etc. It can be used for both testing and training phases. The aim of this work is to analyze performance of estimation and counting peoples with different datasets. |
/**
* @author Hassan Mushtaq
* @since 6/6/18
*/
@RunWith(SpringRunner.class)
@WebMvcTest(value = InvitationController.class)
@AutoConfigureMockMvc(secure = false)
public class InvitationControllerTest {
@MockBean
private InvitationService invitationService;
@MockBean
private Migration migration;
@Autowired
private MockMvc mockMvc;
@Test
public void sendInviteShouldSaveInviteAndSendEmail() throws Exception {
MockHttpServletRequestBuilder request = post("/invitations")
.contentType(MediaType.APPLICATION_JSON_UTF8_VALUE)
.content("{\"email\" : \"something@something.com\", \"role\" : \"REGULAR\"}");
mockMvc.perform(request)
.andExpect(status().isCreated())
.andExpect(jsonPath("$._links.self.href", startsWith("http://localhost/invitations")));
Mockito.verify(invitationService).sendInvite(Mockito.any(Invitation.class));
}
@Test
public void getAllInvitesShouldReturnInvitations() throws Exception {
Invitation invitation1 = new Invitation("anc@abc.com", Role.REGULAR);
List<Invitation> invitations = Arrays.asList(invitation1);
when(invitationService.findAll()).thenReturn(invitations);
MockHttpServletRequestBuilder request = get("/invitations");
MvcResult result = mockMvc.perform(request)
.andExpect(status().isOk())
.andExpect(content().contentType(MediaTypes.HAL_JSON_UTF8_VALUE))
.andReturn();
assertTrue(result.getResponse().getContentAsString().contains("\"email\":\"anc@abc.com\""));
assertTrue(result.getResponse().getContentAsString().contains("\"role\":\"REGULAR\""));
assertTrue(result.getResponse().getContentAsString().contains("\"createDate\""));
}
@Test
public void getInviteByIdShouldReturnInvitation() throws Exception {
String id = "someid";
Invitation invitation = new Invitation("anc@abc.com", Role.REGULAR);
when(invitationService.findById(id)).thenReturn(invitation);
MockHttpServletRequestBuilder request = get("/invitations/" + id);
MvcResult result = mockMvc.perform(request)
.andExpect(status().isOk())
.andExpect(content().contentType(MediaTypes.HAL_JSON_UTF8_VALUE))
.andReturn();
assertTrue(result.getResponse().getContentAsString().contains("\"email\":\"anc@abc.com\""));
assertTrue(result.getResponse().getContentAsString().contains("\"role\":\"REGULAR\""));
assertTrue(result.getResponse().getContentAsString().contains("\"createDate\""));
}
@Test
public void getInviteByIdShouldReturnNotFoundIfMissingInvitation() throws Exception {
String id = "someid";
when(invitationService.findById(id)).thenReturn(null);
MockHttpServletRequestBuilder request = get("/invitations/" + id);
mockMvc.perform(request)
.andExpect(status().isNotFound());
}
} |
. By taking peritoneal diffusion curves of several substances characterizing the uraemia it is possible, taking into consideration a creatinine clearance of 0.1 ml X s-1, to calculate the quantity of dialysate necessary for this, regarding also the residual renal function at a differently long duration of the dialysis cycle. Moreover, by this way also the clearances of other investigated substances can be calculated. Apart from a sufficient detoxication optimized duration of the disease and consumption of the dialysate are the advances of the method. |
// GetSingle returns value pointed by specific field of from source WatchProjectInvitationsRequest
func (fp *WatchProjectInvitationsRequest_FieldTerminalPath) GetSingle(source *WatchProjectInvitationsRequest) (interface{}, bool) {
switch fp.selector {
case WatchProjectInvitationsRequest_FieldPathSelectorType:
return source.GetType(), source != nil
case WatchProjectInvitationsRequest_FieldPathSelectorParent:
res := source.GetParent()
return res, res != nil
case WatchProjectInvitationsRequest_FieldPathSelectorPageSize:
return source.GetPageSize(), source != nil
case WatchProjectInvitationsRequest_FieldPathSelectorPageToken:
res := source.GetPageToken()
return res, res != nil
case WatchProjectInvitationsRequest_FieldPathSelectorOrderBy:
res := source.GetOrderBy()
return res, res != nil
case WatchProjectInvitationsRequest_FieldPathSelectorResumeToken:
return source.GetResumeToken(), source != nil
case WatchProjectInvitationsRequest_FieldPathSelectorFilter:
res := source.GetFilter()
return res, res != nil
case WatchProjectInvitationsRequest_FieldPathSelectorFieldMask:
res := source.GetFieldMask()
return res, res != nil
case WatchProjectInvitationsRequest_FieldPathSelectorView:
return source.GetView(), source != nil
case WatchProjectInvitationsRequest_FieldPathSelectorMaxChunkSize:
return source.GetMaxChunkSize(), source != nil
default:
panic(fmt.Sprintf("Invalid selector for WatchProjectInvitationsRequest: %d", fp.selector))
}
} |
A MAN WHO shared an audio recording of a hate preacher’s speech justifying the 2015 terror attacks in Paris has today been jailed for four and a half years.
Taha Hussain shared a link to the speech on WhatsApp and also used the app Telegram to share an article which offered advice to fighters who could not travel abroad to “kill the kuffar [unbeliever] in their own country”.
Hussain, from Slough near London, was found guilty on 31 July at the Old Bailey of seven terrorism offences and sentenced at Kingston Crown Court on 21 September.
All the counts relate to links or files he sent to others, using WhatsApp and Telegram, which glorified acts of terrorism. He was found not guilty of three other counts.
Deborah Walsh from the Crown Prosecution Service said: “The CPS case against Hussain was that he sent the material knowing it would be understood by its recipients as a direct or indirect encouragement to the commission, instigation or preparation of acts of terrorism.
He was recklessly providing a service to others that enabled them have access to publications that promoted violence and even gave advice on ways to kill people.
Email “Man gets four and a half years in prison for sending extremist material on WhatsApp”.
Feedback on “Man gets four and a half years in prison for sending extremist material on WhatsApp”. |
# Generated by Django 4.0 on 2022-01-03 21:17
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0013_alter_post_title_tag'),
('notification', '0008_alter_notification_message_and_more'),
]
operations = [
migrations.CreateModel(
name='Click',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entity', models.CharField(blank=True, max_length=255)),
('clicked_datetime', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.post')),
],
),
]
|
<reponame>kshedden/tda<gh_stars>1-10
package tda
import (
"math"
"gonum.org/v1/gonum/floats"
)
// ConvexPeel supports calculation of a sequence of convex hulls for a
// point set.
type ConvexPeel struct {
// The points we are working with
x []float64
y []float64
// The angles of all points with respect to the reference
// point
ang []float64
// The points that have been masked because they were already
// peeled
skip []bool
// The points that have been masked because they were already
// peeled, or because they have collinearity with points that
// are further from the reference point.
skip2 []bool
// The index positions of the current hull points
hullPtsPos []int
// The centroid of the current set of points
centroid [2]float64
}
// NewConvexPeel calculates a sequence of peeled convex hulls for the
// given points.
func NewConvexPeel(x, y []float64) *ConvexPeel {
if len(x) != len(y) {
panic("Incompatible lengths")
}
// These are modified internally, so make copies.
u := make([]float64, len(x))
copy(u, x)
x = u
u = make([]float64, len(y))
copy(u, y)
y = u
cp := &ConvexPeel{
x: x,
y: y,
ang: make([]float64, len(x)),
skip: make([]bool, len(x)),
skip2: make([]bool, len(x)),
}
cp.run()
return cp
}
func (cp *ConvexPeel) run() {
cp.sort()
cp.getCentroid()
cp.setSkip()
cp.findHull()
}
func (cp *ConvexPeel) getCentroid() {
cp.centroid = [2]float64{0, 0}
n := 0
for i := range cp.skip {
if cp.skip[i] {
continue
}
n++
cp.centroid[0] += cp.x[i]
cp.centroid[1] += cp.y[i]
}
cp.centroid[0] /= float64(n)
cp.centroid[1] /= float64(n)
}
// Centroid returns the centroid of the current point set, i.e. the
// points that have not been peeled.
func (cp *ConvexPeel) Centroid() [2]float64 {
return cp.centroid
}
// NumPoints returns the number of active points (i.e. the number of
// points that have not been peeled).
func (cp *ConvexPeel) NumPoints() int {
n := 0
for i := range cp.skip {
if !cp.skip[i] {
n++
}
}
return n
}
// sort finds a reference point, and sorts the points by angle
// relative to this reference point.
func (cp *ConvexPeel) sort() {
// Find a reference point with the least y coordinate. If
// there are ties at the least y coordinate, choose the one
// with least x coordinate.
jj := -1
var ymin float64
for i := range cp.y {
if !cp.skip[i] {
if jj == -1 || cp.y[i] < ymin || (cp.y[i] == ymin && cp.x[i] < cp.x[jj]) {
ymin = cp.y[i]
jj = i
}
}
}
// Angles with respect to the reference point.
for i := range cp.x {
cp.ang[i] = math.Atan2(cp.y[i]-cp.y[jj], cp.x[i]-cp.x[jj])
}
ii := make([]int, len(cp.x))
floats.Argsort(cp.ang, ii)
// In case of ties, make sure the reference point is first
for k := range ii {
if ii[k] == jj {
if k != 0 {
ii[0], ii[k] = ii[k], ii[0]
cp.ang[0], cp.ang[k] = cp.ang[k], cp.ang[0]
}
break
}
}
u := make([]float64, len(cp.x))
for j, i := range ii {
u[j] = cp.x[i]
}
u, cp.x = cp.x, u
for j, i := range ii {
u[j] = cp.y[i]
}
cp.y = u
v := make([]bool, len(cp.x))
for j, i := range ii {
v[j] = cp.skip[i]
}
cp.skip = v
}
// Peel removes the current hull points and recomputes the hull.
func (cp *ConvexPeel) Peel() {
for _, i := range cp.hullPtsPos {
cp.skip[i] = true
}
cp.run()
}
// Reset returns to the original state, with no points having been peeled.
func (cp *ConvexPeel) Reset() {
for i := range cp.skip {
cp.skip[i] = false
}
cp.run()
}
// Stats obtains the area, perimeter, and centroid for a series of convex peel
// profiles of a point set. The convex peel is constructed for a grid of npoints
// depth values spanning from from high to low.
func (cp *ConvexPeel) Stats(depth []float64) []Stat {
cp.Reset()
for j := 1; j < len(depth); j++ {
if depth[j] >= depth[j-1] {
panic("depth values must be decreasing")
}
}
var stats []Stat
for _, f := range depth {
cp.PeelTo(f)
stat := Stat{
Depth: f,
Area: cp.Area(),
Perimeter: cp.Perimeter(),
Centroid: cp.Centroid(),
}
stats = append(stats, stat)
}
return stats
}
// PeelTo peels until no more than the given fraction of points
// remains.
func (cp *ConvexPeel) PeelTo(frac float64) {
if frac <= 0 || frac >= 1 {
panic("frac must be in (0, 1)\n")
}
for {
n := 0
for i := range cp.skip {
if !cp.skip[i] {
n++
}
}
if float64(n) < frac*float64(len(cp.x)) {
break
}
cp.Peel()
}
}
// cross computes the cross product among three points. The sign of
// the result indicates whether there is a left turn or a right turn
// when traversing the three points.
func (cp *ConvexPeel) cross(i0, i1, i2 int) float64 {
f := (cp.x[i1] - cp.x[i0]) * (cp.y[i2] - cp.y[i0])
g := (cp.y[i1] - cp.y[i0]) * (cp.x[i2] - cp.x[i0])
return f - g
}
// setSkip identifies points that need to be skipped either because
// they have been previously peeled off, or because they are not the
// longest point along a ray beginning at the reference point.
func (cp *ConvexPeel) setSkip() {
tol := 1e-12
n := len(cp.skip)
copy(cp.skip2, cp.skip)
var di []float64
var i, j int
for i < n {
if cp.skip2[i] {
i++
continue
}
// Find a run of points with equal angle
di = di[0:0]
for j = i; j < n && math.Abs(cp.ang[j]-cp.ang[i]) < tol; j++ {
if cp.skip2[j] {
di = append(di, 0)
continue
}
dx := cp.x[j] - cp.x[0]
dy := cp.y[j] - cp.y[0]
di = append(di, dx*dx+dy*dy)
}
mx := floats.Max(di)
for j = i; j < n && math.Abs(cp.ang[j]-cp.ang[i]) < tol; j++ {
if cp.skip2[j] {
continue
}
if di[j-i] < mx {
cp.skip2[j] = true
}
}
i = j
}
cp.skip2[0] = false
}
func (cp *ConvexPeel) findHull() {
pts := cp.hullPtsPos[0:0]
for i := range cp.skip2 {
if cp.skip2[i] {
continue
}
for len(pts) > 1 && cp.cross(pts[len(pts)-2], pts[len(pts)-1], i) <= 0 {
pts = pts[0 : len(pts)-1]
}
pts = append(pts, i)
}
cp.hullPtsPos = pts
}
// Perimeter returns the perimeter of the current convex hull.
func (cp *ConvexPeel) Perimeter() float64 {
var per float64
pts := cp.hullPtsPos
for i := range pts {
j := (len(pts) + i - 1) % len(pts)
dx := cp.x[pts[i]] - cp.x[pts[j]]
dy := cp.y[pts[i]] - cp.y[pts[j]]
per += math.Sqrt(dx*dx + dy*dy)
}
return per
}
// HullPoints returns the points that are on the current convex hull.
func (cp *ConvexPeel) HullPoints(buf [][2]float64) [][2]float64 {
buf = buf[0:0]
pts := cp.hullPtsPos
for _, i := range pts {
buf = append(buf, [2]float64{cp.x[i], cp.y[i]})
}
return buf
}
// Area returns the area of the current convex hull.
func (cp *ConvexPeel) Area() float64 {
pts := cp.hullPtsPos
// Calculate the centroid of the hull points
var center [2]float64
for i := range pts {
center[0] += cp.x[pts[i]]
center[1] += cp.y[pts[i]]
}
center[0] /= float64(len(pts))
center[1] /= float64(len(pts))
j := len(pts) - 1
dx := cp.x[pts[j]] - center[0]
dy := cp.y[pts[j]] - center[1]
a := math.Sqrt(dx*dx + dy*dy)
var area float64
for i := range pts {
j := (len(pts) + i - 1) % len(pts)
dx = cp.x[pts[i]] - center[0]
dy = cp.y[pts[i]] - center[1]
b := math.Sqrt(dx*dx + dy*dy)
dx = cp.x[pts[i]] - cp.x[pts[j]]
dy = cp.y[pts[i]] - cp.y[pts[j]]
c := math.Sqrt(dx*dx + dy*dy)
s := (a + b + c) / 2
area += math.Sqrt(s * (s - a) * (s - b) * (s - c))
a = b
}
return area
}
|
import {
useToast,
Modal,
ModalOverlay,
ModalContent,
ModalHeader,
ModalFooter,
ModalBody,
ModalCloseButton,
Button,
useDisclosure,
} from '@chakra-ui/react'
import { useSession } from 'next-auth/client'
import React, { useState } from 'react'
import { FaEdit } from 'react-icons/fa'
import { RiDeleteBin5Line } from 'react-icons/ri'
import { ImCancelCircle } from 'react-icons/im'
import { GiGiftOfKnowledge } from 'react-icons/gi'
import NewReply from '../../components/forum/NewReply'
import LikeButton from '../common/LikeButton'
import TextContainer from '../common/TextContainer'
import { getCurrentWeek, renderMdToHtml, timeSince } from '../common/Util'
import { deleteReply, Reply, updateReplyLikes } from './ForumAPI'
import NewPost from './NewPost'
import { nanoid } from 'nanoid'
import DisplayName from '../profile/DisplayName'
import { useUserId } from '../store/user'
import { useUser } from '../profile/UserAPI'
import { useCurrentModule } from '../store/module'
const ReplyListItem = ({ reply }: { reply: Reply }): JSX.Element => {
const currentReply = reply
const lastEdited = timeSince(currentReply.edited_date)
const [editing, setEditing] = useState(false)
const [session] = useSession()
const toast = useToast()
const { isOpen, onOpen, onClose } = useDisclosure()
const userId = useUserId()
const { user, isLoading: userLoading } = useUser()
const role = userLoading ? 'student' : user.role
const {
state: { moduleId },
} = useCurrentModule()
return (
<TextContainer>
<a className="flex items-center border-b border-grey-200 flex-grow py-2 dark:bg-gray-800">
<div className="flex justify-between px-6 flex-grow">
<div className="text-sm title-font font-medium text-gray-400 dark:text-gray-100">
<DisplayName author_id={currentReply.author_id} />
</div>
<div className="text-sm title-font font-medium text-gray-400 dark:text-gray-100">
{lastEdited} ago {currentReply.is_edited ? '(edited)' : ''}
</div>
</div>
</a>
<div className="px-6 py-4">
{editing ? (
<NewReply
postId={currentReply.post_id}
content={currentReply.content}
id={currentReply.id}
label="Edit comment"
setEditing={setEditing}
/>
) : (
<p className="leading-relaxed mb-6">
{
<span
className="prose-sm lg:prose dark:text-white"
dangerouslySetInnerHTML={{ __html: renderMdToHtml(currentReply.content) }}
/>
}
</p>
)}
<div className="flex justify-items-end">
{(session && userId === currentReply.author_id) || role === 'admin' ? (
<>
<button
onClick={() => setEditing(!editing)}
className="text-gray-400 inline-flex items-center text-sm">
{editing ? (
<>
<ImCancelCircle className="w-4 h-4 mr-1" />
<span>cancel</span>
</>
) : (
<>
<span>edit</span>
<FaEdit className="w-4 h-4 ml-1" />
</>
)}
</button>
<button
onClick={onOpen}
className="text-gray-400 inline-flex items-center text-sm mx-2">
{editing ? (
<>
<GiGiftOfKnowledge className="w-4 h-4 ml-1" />
<span>make wiki</span>
</>
) : (
<>
<span>make wiki</span>
<GiGiftOfKnowledge className="w-4 h-4 ml-1" />
</>
)}
</button>
<button
onClick={() => {
setEditing(!editing)
toast({
title: 'Deleted',
status: 'warning',
duration: 5000,
isClosable: true,
position: 'top-right',
})
deleteReply(moduleId, currentReply.id, currentReply.post_id)
}}
className="text-gray-400 mr-2 inline-flex items-center text-sm">
{editing ? (
<>
<RiDeleteBin5Line className="w-4 h-4" />
<span>delete</span>
</>
) : (
''
)}
</button>
<Modal isOpen={isOpen} onClose={onClose} size="xl">
<ModalOverlay />
<ModalContent>
<ModalHeader></ModalHeader>
<ModalCloseButton />
<ModalBody>
<NewPost
label="Make into wiki"
currentPost={{
id: nanoid(),
author_id: currentReply.author_id,
title: '',
content: currentReply.content,
created_date: 0,
edited_date: 0,
tags: ['Wiki'],
week: getCurrentWeek(),
reply_count: 0,
up_votes: 0,
is_edited: false,
}}
/>
</ModalBody>
<ModalFooter>
<Button colorScheme="blue" mr={3} onClick={onClose}>
Cancel
</Button>
</ModalFooter>
</ModalContent>
</Modal>
</>
) : (
<></>
)}
<LikeButton
key={currentReply.id}
likeCount={currentReply.up_votes}
handleUpdate={() => {
updateReplyLikes(
moduleId,
currentReply.up_votes + 1,
currentReply.id,
currentReply.post_id
)
}}
/>
</div>
</div>
</TextContainer>
)
}
export default ReplyListItem
|
<reponame>kaije/toggl-to-jira<gh_stars>0
export default interface JiraWorkLog {
id?: number;
issueKey: string;
timeSpentSeconds: number;
comment: string;
started: string;
}
|
<reponame>skyplaying/mitojs<filename>packages/wx-mini-performance/src/utils/index.ts
import { setUrlQuery, variableTypeDetection, generateUUID } from '@zyf2e/monitor-utils'
import { STORAGE_KEY } from '../constant'
export function noop() {}
// wx
export function getDeviceId(): string {
let deviceId: string = wx.getStorageSync(STORAGE_KEY.deviceId)
if (!deviceId) {
const deviceId = generateUUID()
wx.setStorageSync(STORAGE_KEY.deviceId, deviceId)
}
return deviceId
}
export function getPageUrl(setQuery = true) {
if (!variableTypeDetection.isFunction(getCurrentPages)) {
return ''
}
const pages = getCurrentPages() // 在App里调用该方法,页面还没有生成,长度为0
if (!pages.length) {
return 'App'
}
const page = pages[pages.length - 1]
return setQuery ? setUrlQuery(page.route, page.options) : page.route
}
|
At first, Keira Knightley thought everything was going to be fine.
Her pregnancy had been delightful, so she’d give birth to her first child and then continue working at her normal pace. A Broadway show and two films in a year? Try her.
But after Knightley had her daughter, Edie, things didn’t go according to plan. She was hormonal, for one. And tired. Because Edie never seemed to sleep.
Still, she intended to keep her obligations. She performed eight times a week in a stage production of Therese Raquin and then filmed a supporting role in the drama Collateral Beauty.
But in the summer of 2016, staring down the lead role in the period drama Colette, Knightley decided she needed a break.
Director Wash Westmoreland wasn’t exactly thrilled to push the start date on Colette — “no one welcomes that news,” he said — but that delay ended up being “the best thing that ever happened”.
The filmmaker was able to spend the year finessing the script about the renowned French novelist, who initially wrote under her husband’s name until her work became so successful in the early 1900s that she fought for recognition.
Alas, Edie — then age two — still wasn’t sleeping regularly when production began in Budapest in 2017. But by then, Knightley had moved out of what she describes as the “Oh, my God, how am I a mother?” stage and into the “I am the mother” one.
And it still does, Knightley said. On her way from London to the Toronto International Film Festival, where Colette screened after premiering earlier this year at Sundance, she was seated next to the actress Rosamund Pike on the plane.
Sitting in a hotel conference room, her Chanel flats looking almost too nice against the brashly patterned carpet, Knightley has delved into discussing motherhood even though it’s a topic she thinks the media generally mishandles.
She’s bothered by the societal norm that men serve predominantly as providers while women are expected to juggle both maternal and career obligations.
Knightley spends a lot of time thinking about gender roles. Growing up, when she began to think about an acting career, it was the male parts she dreamt of having. At age 12, she spent one summer obsessively watching The Godfather, dreaming of playing Michael Corleone. She liked that he was a morally ambiguous hero.
Colette, she felt, was a hero. Knightley wanted a bit of her courage and felt like she was “standing tall” when she embodied her.
In the past year, in particular, Knightley said she’s been grappling with how much to use her voice. She’s happy to be asked about “more than lip gloss” — she’s been a face of Chanel for a decade now — but it’s new to her.
And the #MeToo movement. Knightley said she attended two Time’s Up meetings in the UK, which she found interesting. But she felt slightly out of place.
She continues to doubt herself as the discussion moves to social media, which she doesn’t use. She says her non-work days are “too [expletive] boring” to document on Instagram, since they’re mostly composed of dropping off and picking up Edie from preschool. And the idea of sharing “This is what I think!” on Twitter without being asked makes her squeamish.
Meanwhile, Knightley has earned a reputation for playing the leading lady in period dramas such as Pride & Prejudice, which brought her an Oscar nomination in 2006, and Atonement — something she returns to with Colette.
But she tends to find that period films feature more interesting female characters who aren’t just window dressing or exploitation fodder.
Westmoreland and his late husband and filmmaking partner, Richard Glatzer, had talked about Knightley playing Colette at various points over the 17 years they spent trying to get the movie produced. After making the film ‘Still Alice’ together, the two men watched Julianne Moore accept her lead actress Oscar while Glatzer was in the ICU at Cedars Sinai.
There, as his body deteriorated from fighting ALS, he used his toe on a speaking device to spell out what he wanted Westmoreland to make next: Colette.
Colette releases in the UAE on December 6. |
/**
* Copyright (c) 2015 The JobX Project
* <p>
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.jobxhub.server.service;
import com.google.common.collect.Lists;
import com.jobxhub.common.util.CommonUtils;
import com.jobxhub.common.util.collection.HashMap;
import com.jobxhub.server.domain.AgentGroupBean;
import com.jobxhub.server.domain.GroupBean;
import com.jobxhub.server.dao.GroupDao;
import com.jobxhub.server.dto.Agent;
import com.jobxhub.server.dto.Group;
import com.jobxhub.server.tag.PageBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@Service
public class GroupService {
@Autowired
private GroupDao groupDao;
@Autowired
private AgentService agentService;
public void getByPageBean(PageBean pageBean) {
List<GroupBean> groupBeans = groupDao.getByPageBean(pageBean);
if (CommonUtils.notEmpty(groupBeans)) {
int count = groupDao.getCount(pageBean.getFilter());
List<Group> groups = new ArrayList<Group>(0);
for (GroupBean groupBean:groupBeans) {
Group group = Group.transfer.apply(groupBean);
int agentCount = groupDao.getAgentCount(group.getGroupId());
group.setAgentCount(agentCount);
groups.add(group);
}
pageBean.setResult(groups);
pageBean.setTotalCount(count);
}
}
public List<Group> getAll() {
return Lists.transform(groupDao.getAll(),Group.transfer);
}
public List<Group> getForAgent() {
List<AgentGroupBean> agentGroups = groupDao.getForAgent();
Group noGroup = new Group();
noGroup.setGroupName("未分组");
noGroup.setGroupId(0L);
Map<Long, Group> groupMap = new HashMap<Long, Group>(0);
if (CommonUtils.notEmpty(agentGroups)) {
for (AgentGroupBean agentGroup : agentGroups) {
Agent agent = new Agent();
agent.setAgentId(agentGroup.getAgentId());
agent.setName(agentGroup.getAgentName());
agent.setHost(agentGroup.getAgentHost());
if (agentGroup.getGroupId() == null) {
noGroup.getAgentList().add(agent);
} else {
if (groupMap.get(agentGroup.getGroupId()) == null) {
Group group = new Group();
group.setGroupId(agentGroup.getGroupId());
group.setGroupName(agentGroup.getGroupName());
group.getAgentList().add(agent);
groupMap.put(agentGroup.getGroupId(), group);
} else {
groupMap.get(agentGroup.getGroupId()).getAgentList().add(agent);
}
}
}
}
List<Group> groups = new ArrayList<Group>(0);
groups.add(noGroup);
for (Map.Entry<Long, Group> entry : groupMap.entrySet()) {
groups.add(entry.getValue());
}
return groups;
}
public void merge(Group group) {
GroupBean groupBean = GroupBean.transfer.apply(group);
if (groupBean.getGroupId() == null) {
groupDao.save(groupBean);
group.setGroupId(groupBean.getGroupId());
//新增关联关系
groupDao.saveGroup(group.getGroupId(),group.getAgentIds());
}else {
groupDao.update(groupBean);
//删除原因的管理关系
groupDao.deleteGroup(groupBean.getGroupId());
//保存现在的关联关系
groupDao.saveGroup(groupBean.getGroupId(),group.getAgentIds());
}
}
public boolean existsName(Long groupId, String groupName) {
Map<String,Object> filter = new HashMap<String, Object>(0);
filter.put("groupId",groupId);
filter.put("groupName",groupName);
return groupDao.existsCount(filter) > 0;
}
public Group getById(Long groupId) {
GroupBean groupBean = groupDao.getById(groupId);
if (groupBean!=null) {
Group group = Group.transfer.apply(groupBean);
List<Agent> agentList = agentService.getByGroup(group.getGroupId());
group.setAgentList(agentList);
return group;
}
return null;
}
}
|
Detection of Mycobacterium tuberculosis in clinical specimens by polymerase chain reaction method. An insertion sequence repeated multiple times in the chromosome of Mycobacterium tuberculosis was used as a target for amplification using the polymerase chain reaction (PCR) assay for detecting Mycobacterium tuberculosis in clinical specimens. The sequences of primers were 5'-CCTGCGAGCGTAGGCGTCGG-3' (primer 1) and 5'-CTCGTCCAGCGCCGCTTCGG-3' (primer 2). One cycle of amplification consisted of denaturing at 94 degrees C for 2 min, primer annealing at 68 degrees C for 2 min, and extension at 72 degrees C for 2 min. DNA (5 fg) extracted from M. tuberculosis was detected by gel electrophoresis and Southern blot hybridization after 40 cycles of amplification. The amplification products were not obtained by DNA extracted from M. kansasii, M. intracellulare, M. avium, M. fortuitum, Escherichia coli, Klebsiella pneumoniae, Pseudomonas aeruginosa, Legionella pneumophila and Staphylococcus aureus; only from the M. tuberculosis complex. PCR results were compared with conventional cultural, pathological and microscopic findings in the detection of M. tuberculosis in 112 clinical specimens. There were 25 specimens that were positive for M. tuberculosis by cultural or pathological examination, of which 20 (80%) were positive by PCR. PCR detected the organism in 5 (83%) of 6 smear-positive specimens and 15 (79%) of 19 smear-negative specimens in which culture or pathology revealed M. tuberculosis. In addition, 2 smear-negative specimens and 8 smear-negative and culture-negative specimens were positive by PCR. These 10 samples were collected from the patients suspected as having tuberculosis by the clinical diagnosis based on the clinical history, characteristic radiographs, a positive PPD skin test and the effectiveness of anti-tuberculous drugs.(ABSTRACT TRUNCATED AT 250 WORDS) |
<reponame>rafael233/spack
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import argparse
import llnl.util.tty as tty
from llnl.util.tty.colify import colify
from llnl.util.filesystem import working_dir
import spack.paths
import spack.repo
from spack.util.executable import which
from spack.cmd import spack_is_git_repo
description = "query packages associated with particular git revisions"
section = "developer"
level = "long"
def setup_parser(subparser):
sp = subparser.add_subparsers(
metavar='SUBCOMMAND', dest='pkg_command')
add_parser = sp.add_parser('add', help=pkg_add.__doc__)
add_parser.add_argument('packages', nargs=argparse.REMAINDER,
help="names of packages to add to git repo")
list_parser = sp.add_parser('list', help=pkg_list.__doc__)
list_parser.add_argument('rev', default='HEAD', nargs='?',
help="revision to list packages for")
diff_parser = sp.add_parser('diff', help=pkg_diff.__doc__)
diff_parser.add_argument(
'rev1', nargs='?', default='HEAD^',
help="revision to compare against")
diff_parser.add_argument(
'rev2', nargs='?', default='HEAD',
help="revision to compare to rev1 (default is HEAD)")
add_parser = sp.add_parser('added', help=pkg_added.__doc__)
add_parser.add_argument(
'rev1', nargs='?', default='HEAD^',
help="revision to compare against")
add_parser.add_argument(
'rev2', nargs='?', default='HEAD',
help="revision to compare to rev1 (default is HEAD)")
rm_parser = sp.add_parser('removed', help=pkg_removed.__doc__)
rm_parser.add_argument(
'rev1', nargs='?', default='HEAD^',
help="revision to compare against")
rm_parser.add_argument(
'rev2', nargs='?', default='HEAD',
help="revision to compare to rev1 (default is HEAD)")
def list_packages(rev):
pkgpath = os.path.join(spack.paths.packages_path, 'packages')
relpath = pkgpath[len(spack.paths.prefix + os.path.sep):] + os.path.sep
git = which('git', required=True)
with working_dir(spack.paths.prefix):
output = git('ls-tree', '--full-tree', '--name-only', rev, relpath,
output=str)
return sorted(line[len(relpath):] for line in output.split('\n') if line)
def pkg_add(args):
"""Add a package to the git stage."""
for pkg_name in args.packages:
filename = spack.repo.path.filename_for_package_name(pkg_name)
if not os.path.isfile(filename):
tty.die("No such package: %s. Path does not exist:" %
pkg_name, filename)
git = which('git', required=True)
with working_dir(spack.paths.prefix):
git('-C', spack.paths.packages_path, 'add', filename)
def pkg_list(args):
"""List packages associated with a particular spack git revision."""
colify(list_packages(args.rev))
def diff_packages(rev1, rev2):
p1 = set(list_packages(rev1))
p2 = set(list_packages(rev2))
return p1.difference(p2), p2.difference(p1)
def pkg_diff(args):
"""Compare packages available in two different git revisions."""
u1, u2 = diff_packages(args.rev1, args.rev2)
if u1:
print("%s:" % args.rev1)
colify(sorted(u1), indent=4)
if u1:
print()
if u2:
print("%s:" % args.rev2)
colify(sorted(u2), indent=4)
def pkg_removed(args):
"""Show packages removed since a commit."""
u1, u2 = diff_packages(args.rev1, args.rev2)
if u1:
colify(sorted(u1))
def pkg_added(args):
"""Show packages added since a commit."""
u1, u2 = diff_packages(args.rev1, args.rev2)
if u2:
colify(sorted(u2))
def pkg(parser, args):
if not spack_is_git_repo():
tty.die("This spack is not a git clone. Can't use 'spack pkg'")
action = {'add': pkg_add,
'diff': pkg_diff,
'list': pkg_list,
'removed': pkg_removed,
'added': pkg_added}
action[args.pkg_command](args)
|
<reponame>baSSiLL/dotnetinstaller
#include "StdAfx.h"
#include "ComponentsStatus.h"
ComponentsStatus::ComponentsStatus()
: m_all_required(true)
, m_all_optional(true)
, m_all_checked(true)
{
}
ComponentsStatus::ComponentsStatus(const ComponentsStatus& rhs)
{
operator=(rhs);
}
ComponentsStatus& ComponentsStatus::operator=(const ComponentsStatus& s)
{
m_all_required = s.all_required();
m_all_optional = s.all_optional();
m_all_checked = s.all_checked();
return * this;
}
void ComponentsStatus::add_required(bool value)
{
m_all_required &= value;
}
void ComponentsStatus::add_optional(bool value)
{
m_all_optional &= value;
}
void ComponentsStatus::add_checked(bool value)
{
m_all_checked &= value;
}
|
Bonds of DirectBuy Holdings Inc. plunged for a second day after a judge in Connecticut rejected a nationwide settlement of a class-action lawsuit over the consumer club's sales practices.
DirectBuy 's $335 million of 12 percent notes due in February 2017 dropped as much as 7.5 cents to a mid-price of 35.5 cents on the dollar, according to Brownstone Investment Group LLC. The bonds are trading at around the lowest level since they were issued in January.
The debt, which was raised by the Merrillville, Ind.- based company through a private offering led by JPMorgan Chase & Co. in January, has plunged from 99.2 cents in February, according to Trace, the bond-price reporting system of the Financial Industry Regulatory Authority.
U.S. District Judge Janet Hall, said in Bridgeport Monday the proposed class-action settlement would have allowed DirectBuy to settle claims the court estimates may be worth more than $2 billion for as little as $15 million. DirectBuy didn't admit any wrongdoing in the agreement. |
def unique_class_ids(self):
return compute_unique_class_ids(
tf.concat((distribute_utils.aggregate(self.support_class_ids),
distribute_utils.aggregate(self.query_class_ids)), -1)) |
#pragma once
// Requires C++20
#include "../Functional.hpp"
namespace hsd
{
struct default_t {};
constexpr default_t default_v{};
template <typename S, typename F>
struct defaultcall_t;
template <typename F, typename Result, typename... Args>
struct defaultcall_t<Result(Args...), F>;
template <usize Id, typename F, typename Result, typename... Args, typename T>
constexpr auto default_cast(defaultcall_t<Result(Args...), F> const&, T&& x);
template <usize Id, typename F, typename Result, typename... Args>
constexpr auto default_cast(defaultcall_t<Result(Args...), F> const& c, default_t);
template <typename F, typename Result, typename... Args>
struct defaultcall_t<Result(Args...), F>
{
tuple<Args...> default_args;
F func;
constexpr defaultcall_t(F&& func, Args&&... args)
: default_args(forward<Args>(args)...), func(forward<F>(func))
{}
template <typename... U>
constexpr Result operator()(U&&... args) const
{
// Because GCC is stupid and doesn't support proper capturing
return [this]<usize... Seq>(index_sequence<Seq...>, auto&&... args)
{
#define default_cast_func default_cast<Seq, F, Result, Args...>
if constexpr (UnwrapInvocable<
F, decltype(default_cast_func(*this, forward<U>(args)))...
>)
{
return func(default_cast_func(*this, forward<U>(args))...).unwrap();
}
else
{
return func(default_cast_func(*this, forward<U>(args))...);
}
#undef default_cast_func
}(index_sequence_for<Args...>{}, forward<U>(args)...);
}
};
template <typename S, typename F, typename... Args>
defaultcall_t<S, F> make_defaultcall(F&& func, Args&&... args)
{
return defaultcall_t<S, F>(forward<F>(func), forward<Args>(args)...);
}
template <usize Id, typename F, typename Result, typename... Args, typename T>
requires (!IsSame<T, default_t>)
constexpr auto default_cast(defaultcall_t<Result(Args...), F> const&, T&& x)
{
return forward<T>(x);
}
template <usize Id, typename F, typename Result, typename... Args>
constexpr auto default_cast(defaultcall_t<Result(Args...), F> const& c, default_t)
{
return c.default_args.template get<Id>();
}
template <typename Res, typename... Args>
defaultcall_t(Res(*)(Args...), Args&&...)
-> defaultcall_t<Res(Args...), Res(*)(Args...)>;
template <typename Res, typename... Args>
defaultcall_t(Res(&)(Args...), Args&&...)
-> defaultcall_t<Res(Args...), Res(&)(Args...)>;
template <typename Res, typename... Args>
defaultcall_t(function<Res(Args...)>&&, Args&&...)
-> defaultcall_t<Res(Args...), function<Res(Args...)>>;
template < typename Func, typename... Args,
typename Op = decltype(&remove_reference_t<Func>::operator()) >
defaultcall_t(Func&&, Args&&...)
-> defaultcall_t<typename functional_helper::as_function<Op>::type, Func>;
}
|
Can Tense Be Subject to Grammatical Illusion? Part 1: A Design of an ERP Study on the Processing of Tense and Aspect Mismatches in Compound Future Constructions in Polish This two-part paper is concerned with the processing of two types of compound future in Polish, with infinitival and participial complements. In the first part we present a design and predictions of an ERP study whose goal was to monitor the EEG correlates of two types of temporal mismatches: i) tense mismatches between the future auxiliary and the past tense modifier wczoraj (yesterday) relative to the jutro (tomorrow) baseline and ii) aspect mismatches between the future auxiliary and the perfective aspect of the lexical complement relative to the imperfective baseline. In addition, we wanted to assess whether matching tense specifications in different words of a sentence can cause grammatical illusions. To this aim, we tested whether the presence of the adverb wczoraj (yesterday) (specified for ) could give rise to an illusion of grammaticality for perfectives as l -participles (allegedly marked), but not as infinitives (not having any speci-fication). The study and its results as well as a general discussion of the findings will be presented in Part II of the paper. |
<reponame>tmpereira/hsp
''' submodulo que faz os graficos do hsp'''
## cria uma imagem baseado na intensidade
def intt(datta,b):
import matplotlib.pyplot as plt
import numpy as np
xx = 2
yy = 2
sel = datta ['wn'] > b
ver = datta['r'][:,sel]
ver = ver[:,0]
dplot = np.zeros(datta['dx']*datta['dy']);
dplot[datta['sel']] = ver
dplot =dplot.reshape(datta['dx'],datta['dy'])
plt.figure()
plt.pcolor(dplot, vmin=np.min(ver), vmax=np.max(ver))
plt.clim(np.min(ver),np.max(ver))
plt.colorbar()
l = 'imagem da intensidade ' +str(b)+ ' cm-1 \n ' + str(datta['filename'])[:-4]
plt.title(l)
plt.show()
## cria uma imagem baseado em area de uma banda
def area(data,a,b):
import numpy as np
import matplotlib.pyplot as plt
sel = (data['wn'] > (a)) & (data['wn'] < (b) )
r = data['r'][:,sel]
area = np.trapz(r)
print(area.min)
dplot = np.zeros(data['dx']*data['dy']);
dplot[data['sel']] = area
dplot =dplot.reshape(data['dx'],data['dy'])
plt.figure()
plt.pcolor(dplot, vmin=np.min(area), vmax=np.max(area))
plt.clim(np.min(area),np.max(area))
plt.colorbar()
l = 'imagem da area da banda entre ' +str(a)+ ' cm-1'+str(b)+ ' cm-1 \n ' + str(data['filename'])[:-4]
plt.title(l)
plt.show()
## cria uma imagem baseado alpha de uma região entre ini1 e fim1 alpha e definido pela eq spci = alpha*meanspc
def mean(data,ini1,fim1):
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
sel = np.logical_and(data['wn'] > ini1,data['wn'] < fim1)
r1 = data['r'][:,sel]
media = np.mean(r1,axis=0).reshape(-1,1)
meansvalue = np.zeros((data['r'].shape[0]))
y = r1[:,:].T
xx= np.vstack((r1.mean(axis=0),np.ones_like(r1[1,:]))).T
alpha= np.linalg.lstsq(xx, y,rcond =-1)[0][0].T
meansvalue = (alpha)
dplot = np.zeros(data['dx']*data['dy']);
dplot[data['sel']] = meansvalue
dplot =dplot.reshape(data['dx'],data['dy'])
plt.pcolor(dplot,vmin=np.min(meansvalue), vmax=np.max(meansvalue))
plt.clim(np.min(meansvalue),np.max(meansvalue))
plt.colorbar()
l = 'imagem da meanspc entre ' +str(ini1)+ ' cm-1'+str(fim1)+ ' cm-1 \n ' + str(data['filename'])[:-4]
plt.title(l)
return dplot
# plota nspc espectros aleatorios
def pplot(data,nspc):
import numpy as np
import matplotlib.pyplot as plt
r = data['r']
k = np.random.randint(0,r.shape[0],(nspc),dtype='uint32')
plt.figure()
for i in k:
plt.plot(data['wn'],r[i][:])
plt.xlabel(' Número de onda')
plt.show()
# plota uma imagem na qual o intensidade do pixel é um coeff do emsc
def emsc(datta,b):
import matplotlib.pyplot as plt
import numpy as np
ver = datta['EMSC_coeff'][datta['sel'],b]
dplot = np.zeros(datta['dx']*datta['dy']);
dplot[datta['sel']] = ver
dplot =dplot.reshape(datta['dx'],datta['dy'])
plt.figure()
plt.pcolor(dplot)
plt.clim(np.min(ver),np.max(ver))
plt.colorbar()
l = 'histograma do coeficente EMSC' + str(datta['filename'])[:-4]
plt.title(l)
plt.show()
## cria uma imagem baseado na intensidade e permite a vizualização do espectro de cada pixel
def int_plt(dados,wnsel):
import numpy as np
import matplotlib.pyplot as plt
r = dados['r'].copy()
dx = dados['dx']
dy = dados['dy']
sel = dados['sel']
wn = dados['wn']
rr = np.zeros((sel.shape[0],r.shape[1]))
rr[sel,:] = r
sel = wn>wnsel
r = 0
rr = rr.reshape(dx,dy,-1)
plt.close('all')
plt.figure(1)
z = np.arange(0,rr.shape[2])
z = z[sel][0]
plt.pcolor(rr[:,:,z])
x = 15
y = 15
while (x > 14 or y > 14 ):
ver = plt.ginput(1)
x = int(ver[0][0])
y = int(ver[0][1])
print(x,' ',y)
plt.close(2)
plt.figure(2)
plt.plot(wn,rr[y,x,:])
plt.title('espectro do pixel x='+str(x)+' y= '+ str(y))
plt.figure(1)
plt.close(1)
plt.close(2)
## cria uma imagem baseado na area entre a e b e permite a vizualização do espectro de cada pixel
def area_plt(dados,a,b):
import numpy as np
import matplotlib.pyplot as plt
sel = (dados['wn'] > (a)) & (dados['wn'] < (b) )
r = dados['r'][:,sel]
area = np.trapz(r)
print(area.min)
dplot = np.zeros(dados['dx']*dados['dy']);
dplot[dados['sel']] = area
dplot =dplot.reshape(dados['dx'],dados['dy'])
plt.figure(1)
plt.pcolor(dplot, vmin=np.min(area), vmax=np.max(area))
plt.clim(np.min(area),np.max(area))
plt.colorbar()
l = 'imagem da area da banda entre ' +str(a)+ ' cm-1'+str(b)+ ' cm-1 \n ' + str(dados['filename'])[:-4]
plt.title(l)
r = dados['r'].copy()
dx = dados['dx']
dy = dados['dy']
sel = dados['sel']
wn = dados['wn']
rr = np.zeros((sel.shape[0],r.shape[1]))
rr[sel,:] = r
rr = rr.reshape(dx,dy,-1)
x = 15
y = 15
while (x > 14 or y > 14 ):
ver = plt.ginput(1)
x = int(ver[0][0])
y = int(ver[0][1])
print(x,' ',y)
plt.close(2)
plt.figure(2)
plt.plot(wn,rr[y,x,:])
plt.title('espectro do pixel x='+str(x)+' y= '+ str(y))
plt.figure(1)
plt.close(1)
plt.close(2) |
Spectrum of cervical lesions observed in 500 cases: Carcinoma cervix the leading cause of death in females. INTRODUCTION The present study was done to know the various histopathological variants in cervical lesions, to know the age incidence and to know the preventive measures and early detection of cervical cancer. MATERIALS AND METHODS This is a retrospective and prospective study done on all the hysterectomy specimens and on punch biopsy samples sent for histopathology. We studied a total of 500 cases for a period of 3 years. RESULTS Out of the 500 cases, 395 cases were non-neoplastic and 105 cases were neoplastic lesions. Chronic cervicitis was the most common non-neoplastic lesion and large cell non-keratinizing squamous cell carcinoma was the most common malignant lesion. CONCLUSION The main aim of presenting this study is to highlight the various histopthological variants, age incidence and to educate about the preventive measures and early detection of cervical cancer. |
// NewParty create a Party from a party creation token
func NewParty(creationToken PartyCreationToken, stringifyUUID string) (*Party, error) {
party := new(Party)
var err error
party.PartyName = creationToken.PartyName
party.PartyUUID, err = uuid.Parse(stringifyUUID)
if err != nil {
return nil, err
}
party.Players = make(map[string]*Player)
party.CircuitConfig = creationToken.CircuitConfig
return party, nil
} |
<reponame>lilrara/gopherbot
package main
import (
"time"
"github.com/hybridgroup/gopherbot"
)
func main() {
antenna := gopherbot.Antenna()
for {
antenna.On()
time.Sleep(500 * time.Millisecond)
antenna.Off()
time.Sleep(500 * time.Millisecond)
}
}
|
/*!
* Ouptut documentation for the protocol as a markdown file
* \param isBigEndian should be true for big endian documentation, else the documentation is little endian.
* \param inlinecss is the css to use for the markdown output, if blank use default.
*/
void ProtocolParser::outputMarkdown(bool isBigEndian, std::string inlinecss)
{
std::string basepath = support.outputpath;
if (!docsDir.empty())
basepath = docsDir;
std::string filename = basepath + name + ".markdown";
std::string filecontents = "\n\n";
ProtocolFile file(filename, support, false);
std::vector<std::string> packetids;
for(std::size_t i = 0; i < packets.size(); i++)
packets.at(i)->appendIds(packetids);
removeDuplicates(packetids);
if (hasAboutSection())
{
if(title.empty())
filecontents += "# " + name + " Protocol\n\n";
else
filecontents += "# " + title + "\n\n";
if(!comment.empty())
filecontents += outputLongComment("", comment) + "\n\n";
if(title.empty())
{
if(!support.version.empty())
filecontents += name + " protocol version is " + support.version + ".\n\n";
if(!support.api.empty())
filecontents += name + " protocol API is " + support.api + ".\n\n";
}
else
{
if(!support.version.empty())
filecontents += title + " version is " + support.version + ".\n\n";
if(!support.api.empty())
filecontents += title + " API is " + support.api + ".\n\n";
}
}
for(std::size_t i = 0; i < alldocumentsinorder.size(); i++)
{
if(alldocumentsinorder.at(i) == NULL)
continue;
if(alldocumentsinorder.at(i)->isHidden() && !support.showAllItems)
continue;
filecontents += alldocumentsinorder.at(i)->getTopLevelMarkdown(true, packetids);
filecontents += "\n";
}
if (hasAboutSection())
filecontents += getAboutSection(isBigEndian);
if(!titlePage.empty())
file.write("Title:" + title + "\n\n");
if (latexEnabled)
{
file.write("Base Header Level: 1 \n");
file.write("LaTeX Header Level: " + std::to_string(latexHeader) + " \n");
file.write("\n");
}
if (!nocss)
{
file.write("<style>\n");
if(inlinecss.empty())
file.write(getDefaultInlinCSS());
else
file.write(inlinecss);
file.write("</style>\n");
file.write("\n");
}
if(tableOfContents)
{
std::string temp = getTableOfContents(filecontents);
temp += "----------------------------\n\n";
temp += filecontents;
filecontents = temp;
}
if(!titlePage.empty())
{
std::string temp = titlePage;
temp += "\n----------------------------\n\n";
temp += filecontents;
filecontents = temp;
}
replaceinplace(filecontents, "\n---", "<div class=\"page-break\"></div>\n\n\n---");
replaceinplace(filecontents, "°", "°");
file.write(filecontents);
file.flush();
std::string htmlfile = basepath + name + ".html";
std::cout << "Writing HTML documentation to " << htmlfile << std::endl;
#if defined(__APPLE__) && defined(__MACH__)
std::system(("/usr/local/bin/MultiMarkdown " + filename + " > " + htmlfile).c_str());
#else
std::system(("multimarkdown " + filename + " > " + htmlfile).c_str());
#endif
if (latexEnabled)
{
std::string latexfile = basepath + name + ".tex";
std::cout << "Writing LaTeX documentation to " << latexfile << "\n";
#if defined(__APPLE__) && defined(__MACH__)
std::system(("/usr/local/bin/MultiMarkdown " + filename + " > " + latexfile + " --to=latex").c_str());
#else
std::system(("multimarkdown " + filename + " > " + latexfile + " --to=latex").c_str());
#endif
}
} |
The Organization of Services for Children with Diabetes in the United Kingdom: Report of the British Paediatric Association Working Party A British Paediatric Association Working Party was set up in 1987 to examine the organization of services for children with diabetes in the United Kingdom. A questionnaire survey identified 360 consultant paediatricians providing care for children with diabetes in 205 Districts or Health Boards. Sixtythree per cent of paediatricians saw children in a designated paediatric diabetic clinic, 61% reported that a diabetes nurse specialist regularly attended the clinic, and 70% that a dietitian did so. Haemoglobin A1 or other glycosylated proteins were regularly measured by 91% of paediatricians, 76% regularly tested for urinary protein, and 79% and 86% checked blood pressure and eyes, respectively. However, 27% of the Districts or Health Boards in the survey had no designated paediatric diabetic clinic. When the data were analysed by assigning paediatricians to categories according to their degree of specialization in diabetes only 33% paediatricians could be described as having a specialist interest in diabetes. There were significant differences in the services provided by the specialist paediatricians when compared with the nonspecialists, particularly with respect to the professional staff regularly seeing children in clinics and services to the adolescents. The Working Party recommends that services for children with diabetes may be improved by encouraging at least one paediatrician in each District to develop a special expertise in diabetes. Designated children's diabetic clinics with appropriate supporting staff and services should be available in all Districts. |
/**
* Package for test sorting class.
*
* @author <NAME> (mailto:<EMAIL>)
* @version $1$
* @since 14.08.2017
*/
package ru.job4j.additionaltask; |
package info.phosco.forms.viewer.tabbed.model.browser;
import info.phosco.forms.translate.element.ElementList;
import info.phosco.forms.translate.element.application.FormModule;
import info.phosco.forms.translate.element.application.ModuleAttributes;
import info.phosco.forms.translate.element.parameter.FormParameter;
import info.phosco.forms.viewer.resource.Resource;
import info.phosco.forms.viewer.tabbed.browser.BrowserTreeNode;
import info.phosco.forms.viewer.tabbed.model.NodeType;
public class ParameterTreeFactory {
private ParameterTreeFactory() {
}
@SuppressWarnings("unchecked")
public static BrowserTreeNode build(FormModule module) {
BrowserTreeNode parameter = new BrowserTreeNode(0, NodeType.FOLDER, Resource.getString("node.name.forms.parameters"));
for (FormParameter p : (ElementList<FormParameter>) module.getProperty(ModuleAttributes.PARAMETER_LIST)) {
BrowserTreeNode pnode = new BrowserTreeNode(p.getOffset(), NodeType.ATTRIBUTES, p.getName(), null, ThumbnailFactory.get(p.getType()));
parameter.add(pnode);
}
return parameter;
}
}
|
Optimizing wireless networks for heterogeneous spatial loads Large-scale wireless networks must be designed such that there are no gaps in coverage. It is also desirable to minimize the number of access points used in order to minimize the cost of equipment, installation and maintenance. In this paper, we propose a grid-based approximation algorithm to compute the placement of access points. The algorithm minimizes the number of access points required while ensuring that the received SNR at each location is sufficient to meet the offered load at that location. The algorithm can be used when the offered load is spatially non-homogenous or when access points can be operated in different modes. The proposed algorithm is guaranteed to find a solution with at most O(copt log copt) access points, if there exists a solution to the access point placement problem using copt access points. |
. OBJECTIVE To identify the prevalence of nursing diagnosis of fluid volume excess and their defining characteristics in hemodialysis patients and the association between them. METHOD Cross-sectional study conducted in two steps. We interviewed 100 patients between the months of December 2012 and April 2013 in a teaching hospital and one hemodialysis clinic. The inference was performed by diagnostician nurses between July and September 2013. RESULTS The diagnostic studied was identified in 82% of patients. The characteristics that were statistically associated: bounding pulses, pulmonary congestion, jugular vein distention, edema, change in electrolytes, weight gain, intake greater than output and abnormal breath sounds. Among these, edema and weight gain had the highest chances for the development of this diagnostic. CONCLUSION The analyzed diagnostic is prevalent in this population and eight characteristics presented significant association. |
import * as assert from 'assert';
import { build } from '../../../api';
import { AppRunner } from '../AppRunner';
describe('cspnonce', function() {
this.timeout(10000);
let r: AppRunner;
// hooks
before('build app', () => build({ cwd: __dirname }));
before('start runner', async () => {
r = await new AppRunner().start(__dirname);
});
after(() => r && r.end());
it('sapper.cspnonce replaced with CSP nonce \'rAnd0m123\' injected via \'res.locals.nonce\'', async () => {
await r.load('/');
assert.equal(
await r.page.$eval('#hasNonce', node => node.getAttribute('nonce')), 'rAnd0m123'
);
assert.equal(
await r.page.$eval('#hasNonceAgain', node => node.getAttribute('nonce')), 'rAnd0m123'
);
});
});
|
Less than a month after Melanie Schmuck died, the Haines Shoe House she co-owned opened to visitors for a new season on the first day of spring.
Hours are 11 a.m. until 5 p.m. Wednesday. It will also be open the same hours Fridays, Saturdays and Sundays until Memorial Day, with additional days after that.
Jeff Schmuck, Melanie's husband, said he will handle behind-the-scenes aspects of the business. The pair bought the iconic landmark, which can be seen from Route 30, in 2015.
"We have a great team of helpers and tour guides that helped Mel out in the past, as well as some fresh faces helping out," Schmuck said by email.
"It was not an easy decision (to reopen), as the shoe became synonymous with Mel. The famous roadside attraction means so much to us, we simply must try. Mel would have wanted to keep it going for future generations, and so do I."
Melanie Schmuck died Feb. 28. She was 38. Her death was announced on the Shoe House's Facebook page, which said she died of "health complications."
On March 9, the Southern Pennsylvania Volkswagen Club and members of the community drove by the shoe house, 197 Shoe House Road in Hellam Township, in honor of Melanie Schmuck.
According to her obituary, Schmuck loved German cars, as well as "enjoyed running ... collecting Pez dispensers, antiquing, yard sales, and flea markets."
Jeff Schmuck said visitors can buy some Beck's Ice Cream when they visit, though Melanie's treats, which she baked for her business "Mellie's Makery," will no longer be available.
"Mel took great pride in running the shoe house and it shows in the legacy she left," Schmuck said. "I truly believe she is now, and will forever be part of the Haines Shoe House fascinating history."
Glenda Colon, of Lancaster County, brought her mom, Nolka Maldonado, and stepdad, Carmelo Pandora, who are visiting from Orlando, to see the shoe house. They didn’t know it was opening day until they got here. They ended up being the first visitors of the season.
Colon said she first saw the shoe house about four years ago when she drove by on Route 30. She’s been telling her mom ever since about the house shaped like a shoe. Her mom didn’t believe it was true until she saw it with her own eyes.
The trio thought the tour was inexpensive, just $5 each for adults, so they decided to go inside.
"It is awesome in there," Colon said after their tour finished. "The history, the way it was built ..."
Steve and Vickie Dixon, of York, knew Melanie and Jeff Schmuck because the couples played cards together.
"We wanted to support Mel's passion," Vickie Dixon said of why they stopped out on opening day.
Steve Dixon, a member of the York Motorcycle Club, said the club often used the shoe house as a stop in its rides. Before the Schmucks bought the house, it had been neglected, he said. But Melanie put so much of herself into the house when she fixed it up. Now, he said, being inside the house is such an experience.
"It's like you step back in time," Steve Dixon said.
At Melanie Schmuck's funeral, one of her friends encouraged everyone to wave to the shoe house when they drive past, since Melanie will always be part of the place. That gave Vickie Dixon pause.
"I always now look at the shoe house and I see Mel," she said. "I go by once a week and wave."
Virginia Druck, of Lower Windsor Township, stopped by with her mother, Brenda Coulter, of Shrewsbury Township.
The pair had visited a couple times last year.
"We knew the owner, and she was a sweetheart," Druck said.
Coulter said she had shared interests with Melanie Schmuck, in that they both liked Volkswagens. And, Coulter said, Schmuck was the nicest person. Coulter was at the shoe house once when a running race was being held there. Even though she didn't run, Schmuck gave her a race medal, Coulter said, smiling.
Druck and Coulter stopped by on Wednesday, but they didn't take a tour. That wasn't why they came this time, they said. They came to honor Melanie Schmuck.
"We're just here to feel her spirit," Druck said. "Because this is where she loved to be." |
Application of Electroencephalography to the Study of Cognitive and Brain Functions in Schizophrenia The electroencephalogram (EEG) recorded from the human scalp is widely used to study cognitive and brain functions in schizophrenia. Current research efforts are primarily devoted to the assessment of event-related potentials (ERPs) and event-related oscillations (EROs), extracted from the ongoing EEG, in patients with schizophrenia and in clinically unaffected individuals who, due to their family history and current mental status, are at high risk for developing schizophrenia. In this article, we discuss the potential usefulness of ERPs and EROs as genetic vulnerability markers, as pathophysiological markers, and as markers of possible ongoing progressive cognitive and cortical deterioration in schizophrenia. Our main purpose is to illustrate that these neurophysiological measures can offer valuable quantitative biological markers of basic pathophysiological mechanisms and cognitive dysfunctions in schizophrenia, yet they may not be specific to current psychiatry's diagnosis and classification. These biological markers can provide unique information on the nature and extent of cognitive and brain dysfunction in schizophrenia. Moreover, they can be utilized to gain deeper theoretical insights into illness etiology and pathophysiology and may lead to improvements in early detection and more effective and targeted treatment of schizophrenia. We conclude by addressing several key methodological, conceptual, and interpretative issues involved in this research field and by suggesting future research directions. Introduction The electroencephalogram (EEG) recorded from the human scalp provides a powerful noninvasive tool for studying the brain mechanisms of attention and information processing in health and disease. In contrast to blood flow neuroimaging techniques, such as magnetic resonance imaging (MRI), the EEG provides a direct and ''real-time'' index of neuronal activities at a millisecond scale of resolution that is relatively easy and inexpensive to implement. Due to its high temporal resolution, the EEG is ideally suited to examine the rapidly changing patterns of brain activities that underlie human cognitive function and dysfunction. The scalp EEG is believed to reflect mainly the summated postsynaptic potentials from large synchronously activated populations of pyramidal cells in the cerebral cortex. The recorded EEG activities show changes over time, which are often rhythmic or oscillatory in the sense that they alternate regularly. The rhythmic activities in the resting or ''spontaneous'' EEG are usually divided into several frequency bands (delta: <4 Hz; theta: 4-8 Hz; alpha: 8-12 Hz; beta: 12-30 Hz; and gamma: 30-70 Hz or higher, centered at 40 Hz), which are associated with different behavioral states, ranging from sleep and drowsiness to relaxation and heightened alertness and mental concentration, 1,2,7,8 yet there exists little consensus on the precise frequency limits of each band. The EEG has a well-established value and role in the clinical assessment, diagnosis, and management of patients with certain neurological disorders, such as sleep disorders and epilepsy. 7 The EEG also shows systematic changes when a person processes a specific external or internal stimulus event, such as a light flash or a sound or an internal thought. In general, 2 types of changes temporally related to a sensory, cognitive, or motor event may occur in the EEG (figure 1). 9 One type of change involves the more traditional, well-examined event-related potentials (ERPs). The other type of event-related EEG change is the focus of more recent research efforts and variously referred to as event-related synchronization and eventrelated desynchronization, 9 event-related spectral perturbation, 10 or event-related oscillations (EROs). ERPs are a series of scalp-positive and -negative voltage deflections, waves, or components that are strictly time and phase locked to the onset of a particular stimulus event. ERPs can be extracted from the EEG by time domain analysis and averaging the EEG activity following multiple stimulus repetitions. The obtained ERPs may be broadly subdivided into early or sensory-evoked components (eg, auditory-evoked brainstem potentials, P50, N100), which emerge within the first 50-100 ms or so after stimulus onset and basically reflect stimulus detection, and later or cognitive-related components (eg, mismatch negativity , P300, N400), which primarily index stimulus context. While some of the early sensory-evoked components are well established as a clinical test to assess the integrity of the afferent sensory pathways (eg, hearing, vision) of individual patients, 7,14 the cognitive-related components reflect more complex brain functions and have, to date, mainly been utilized as a research tool instead of a clinical tool. EROs are changes in the frequency power spectrum of the ongoing EEG, which can be either strictly time and phase locked (evoked) or more loosely time-related and non-phase locked (induced) to the eliciting event. 15 EROs can be extracted by means of time-frequency domain analysis and averaging the obtained EEG power spectrograms following the same stimuli. An important issue awaiting clarification is whether ERPs reflect a transient neural-evoked response that is added, but unrelated, to the ongoing EEG oscillations, result from a reorganization or phase resetting of ongoing oscillations by a given stimulus event, or are generated by a combination of both or other mechanisms. 16 In general, it seems that ERPs and EROs can give valuable complementary insights into the basic mechanisms of cognitive and higher brain functions, such as perception, attention, memory, motor control, and language. In this article, we discuss the application of ERPs and EROs to the study of cognitive and brain functions in schizophrenia. We focus on the results of more recent studies of patients with schizophrenia as well as of clinically unaffected persons who, by reason of their family history and current mental status, are at high risk for developing schizophrenia (for excellent reviews of older EEG/ERP work on schizophrenia, see Zubin et al, 17 Pritchard, 18 and Friedman 19 ). We discuss selected examples of the potential usefulness of ERPs and EROs as genetic vulnerability markers, as pathophysiological markers, and as markers of possible ongoing progressive cognitive and cortical deterioration in schizophrenia. Our main purpose is to illustrate that these neurophysiological measures can offer valuable biological markers of basic pathophysiological mechanisms and cognitive dysfunctions in schizophrenia, yet they may not be specific to current psychiatry's diagnosis and classification. These quantitative biological markers can be utilized to gain deeper theoretical insights into the etiology and pathophysiology of complex and heterogeneous psychiatric disorders, such as schizophrenia, and may lead to more accurate early detection and more effective and targeted treatments. We conclude by addressing briefly several important methodological, conceptual, and interpretative issues involved in this research field and by suggesting future research directions. Five-level multiresolution decomposition of a human averaged auditory event-related potential (ERP) based on the discrete wavelet transform (DWT), using the Daubechies D5 wavelet as a mother wavelet. The application of a 5-level, dyadic DWT decomposes the scalp-recorded event-related electroencephalogram (EEG) signal (upper panel) in the time-frequency plane at different resolution levels, or scales, into a set of basic orthogonal individual signal components (lower panels) that approximately correspond to the main rhythmic activities traditionally used to classify the ongoing EEG. The result of the DWT yields 5 detail functions and corresponding sets of wavelet detail coefficients (D1-D5), representing the energy of the EEG signal as a function of time in the 63-to 125-Hz (D1, gamma), 31-to 63-Hz (D2, gamma), 16-to 31-Hz (D3, beta), 8-to 16-Hz (D4, alpha), and 4-to 8-Hz (D5, theta) frequency bands, and one final set of wavelet approximation coefficients (A5), representing the activity of the remaining part of the signal in the 0-to 4-Hz (delta) frequency band. Note that the frequency band limits are rounded and that the y-axis is scaled differently for each frequency band. By applying the inverse DWT, the detail (D1-D5) and approximation (A5) signal components, as well as the original signal (ERP 5 A5 D5 D4 D3 D2 D1), can be reconstructed from the wavelet coefficients for each scale. 184,189 The ERP was recorded at the midline central scalp electrode from a healthy adult subject in response to taskirrelevant frequent standard 1000-Hz tones (100-ms duration, 10-ms rise/fall, using a variable interstimulus interval of 1300-1700 ms), while the subject performed a visual oddball task. The data indicate that the auditory ERP (upper panel) coincides with auditory transient-evoked (phase locked) oscillations in multiple frequency bands (lower panels), including early evoked gamma (31-63 Hz)band responses that peak around 20 and 60 ms and an early evoked beta (16-31 Hz)-band response that peaks around 30 ms after tone onset. O. van der Stelt and A. Belger ERPs and EROs in Schizophrenia A variety of ERPs and EROs have been examined in schizophrenia, including the auditory-evoked P50 ''sensory gating'' response. 6,25,27,28 Here, we select to consider the P300, MMN, and EROs in the gamma-band frequency range, referred to as the gamma-band response (GBR), as biological markers of potentially distinct pathophysiological mechanisms and cognitive dysfunctions in schizophrenia. P300 as a Vulnerability Marker A vulnerability marker, or endophenotype, 24 may be defined as ''a heritable trait, associated with a causative pathophysiological factor in an inherited disease.'' 20 A vulnerability marker may be distinguished from a risk factor, which refers to any characteristic that has predictive validity, but not etiological significance, for developing a psychiatric disorder. 29 Thus, in theory, while both can be used to predict psychiatric disease, only a vulnerability marker can offer basic insights into illness etiology and pathophysiology and may contribute to the rational development of psychiatric diagnostic systems and therapeutic treatments that are based on the underlying illness causes and biology rather than on the final overt complex clinical phenotypes. Basic Studies. The P300 (P3 or P3b) refers to a late scalp-positive ERP component that is usually recorded in an auditory or visual ''oddball'' experimental paradigm in which a subject detects an infrequent deviant or taskrelevant ''target'' stimulus (eg, a 1000-Hz tone) randomly presented within a series of frequent nontarget or ''standard'' stimuli (eg, 1500-Hz tones). The size or amplitude of the P300 elicited by task-relevant target stimuli is typically largest over the medial central and parietal scalp locations, and its peak latency, depending on stimulus, task, and subject factors, may occur between about 300 and 1000 ms after stimulus onset. The P300 is a relatively slow, low-frequency neuroelectrical event and linked to stimulus-evoked delta and theta oscillations. 13,34 The P300 is believed to index stimulus significance and the amount of attention allocated to the eliciting stimulus event, being maximal to task-relevant or attended stimuli and being absent or small to task-irrelevant or unattended stimuli. 35 While the P300 reflects primarily cognitive factors, this component can also be sensitive to constitutional factors (eg, age, sex) and to natural (eg, circadian rhythm, menstrual cycle) and environmentally induced (eg, exercise, caffeine, nicotine, psychotropic medications) changes in the subject's arousal state. 33,35,36 Similarly, the P300 can be sensitive to individual differences in major personality trait dimensions, such as sensation seeking or novelty seeking. 37,38 The intracerebral origin of the P300 is poorly understood but most likely involves the complex summation of activity from multiple brain regions, particularly the various association areas of the cerebral cortex and the limbic system. 32, Similarly, the neurochemical substrates of the P300 are unclear but presumably involve various neurotransmitter systems in the brain. 32,42 The P300 most likely reflects the summation of multiple, simultaneously occurring cognitive and brain processes 39 that are engaged during the active processing of behaviorally significant stimulus events and functionally linked to attentional resource allocation and memory updating operations in the brain. 31,33,36,40,43 Genetically, the P300 is considered multifactorial and a complex quantitative trait. Family and twin studies have indicated that P300 characteristics are heritable, 26, but reported heritability estimates vary greatly (ranging between about 0.3 and 0.7) as a function of experimental task paradigm, P300 measure (eg, amplitude vs latency), scalp location (eg, frontal vs parietal), stimulus type (eg, target vs nontarget), stimulus modality (eg, visual vs auditory), age, and sex. Information about the molecular genetic basis of P300 is thus far limited and comes mostly from psychiatric genetic research, which will be discussed later. Clinical Studies. Numerous studies have shown that schizophrenia patients display a smaller than normal auditory P300 over the midline central and parietal scalp electrode locations, 17,18,44,47, as well as a distinct leftsmaller-than-right voltage asymmetry at temporal scalp sites. Auditory P300 amplitude abnormalities over the posterior scalp have been detected in schizophrenia patients at the initial and advanced stages of illness 60,64,65 and remain detectable even in patients free of clinical symptoms and in relative remission. 55 Additionally, schizophrenia patients often show a prolonged auditory P300 latency relative to healthy control subjects. 50,56,57 Schizophrenia patients have also been found to generate a significantly smaller and/or delayed visual P300 over the posterior scalp than healthy control subjects, 56 but this finding seems less robust 55 and the size of patientcontrol group differences is usually smaller. 56,63 As compared with the auditory P300, the visual P300 in schizophrenia seems more sensitive to clinical state variables, such as current antipsychotic medication status and clinical symptom severity. 53,55 Accordingly, it has been suggested that the visual P300 could serve primarily as a clinical state marker, whereas the auditory P300 could offer a trait or vulnerability marker of schizophrenia. 18,52,53,55,63 Additional supporting evidence for the idea that the auditory P300 indexes a genetic and biological vulnerability to schizophrenia has come from several, though not all, 19,61,66 family-based ''high-risk'' studies showing that this ERP component is also impaired in clinically unaffected family members who, by reason of their family history, are at high risk for developing schizophrenia. 44,47,50,67 Similarly, persons with either psychometrically 68 or clinically 69,70 defined schizotypal personality traits, who are assumed to carry the same latent vulnerability as schizophrenia patients, display P300 amplitude abnormalities. Similarly, we recently observed that auditory P300 amplitude abnormalities over the posterior scalp, similar to those detected under identical experimental conditions in recently ill and chronically ill schizophrenia patients, are present in putatively ''prodromal'' patients who, on the basis of both their family history and current mental status, 74 are at ultrahigh risk for developing a first psychotic episode (figure 2). 64 Longitudinal follow-up data and large patient samples, however, are required to determine whether the auditory P300 has indeed predictive validity for later schizophrenia or other psychiatric disorders in these ultrahigh-risk populations. An earlier family-based longitudinal prospective study reported that both the auditory and visual P300 recorded from adolescent subjects, irre-spective of their family history risk status, has predictive validity, not for a diagnostically specific psychiatric outcome but for later global behavioral maladjustment in general. 75 The auditory and visual P300 seem to be impaired not only in schizophrenia but in a variety of psychiatric and neurological disorders, including bipolar affective disorder, attention-deficit hyperactivity disorder, and substance use disorders. 19,26,32,33,52, In particular, impaired generation of the visual P300 has been found to characterize individuals with alcoholism as well as clinically unaffected family members, such as children of alcoholics, who are at high genetic risk for developing alcoholism. 26,76,77, Longitudinal follow-up data from this and related research fields indicate that the visual P300 recorded from children and adolescents, regardless of their family history risk status, has predictive validity, not specifically for alcoholism but more generally for a spectrum of ''disinhibitory'' behavioral syndromes, including childhood externalizing disorders and adult antisocial personality disorder and substance abuse. 81,85,86 Relatively little is yet known about the molecular genetic bases of the P300 and of its disruption in schizophrenia. Blackwood et al 67 reported that within a large Scottish family, a translocation breakpoint region on chromosome 1q42 shows evidence for linkage to schizophrenia as well as to unipolar and bipolar affective disorders and that translocation carriers, either clinically, subjects with recent-onset schizophrenia (n 5 10; mean age 5 21.3 years, SD 5 3.2; mean illness duration 5 0.6 years, SD 5 0.3), subjects with chronic schizophrenia (n 5 14; mean age 5 37.5 years, SD 5 7.5; mean illness duration 5 12.2 years, SD 5 6.8), young healthy control subjects (n 5 14; mean age 5 22.5 years, SD 5 2.0) who are age matched to the high-risk and recent-onset groups, and older healthy control subjects (n 5 14; mean age 5 34.1 years, SD 5 10.9) who are age matched to the chronic patient group. The ERPs were collected while subjects performed an auditory oddball task. The auditory stimuli were pure sinusoidal tones (100-ms duration, 10-ms rise/fall), consisting of frequent standard stimuli (1000-Hz tones, P 5 91.5%) and infrequent deviant or target stimuli (1064-Hz tones, P 5 8.5%), presented binaurally through inserted earphones in a random order using a constant interstimulus interval of 1500 ms. Study participants were instructed to pay attention to the auditory stimuli and to make a button-press response only to the infrequent target stimuli, emphasizing both speed and accuracy. P300 refers to the late scalp-positive ERP component associated with target detection. High-risk, recent-onset, and chronic schizophrenia patient groups all showed significantly smaller P300 amplitudes at Pz and/or Cz relative to healthy age-matched control subjects. Older healthy control subjects displayed significantly smaller P300 amplitudes at Pz compared with younger healthy control subjects. Reprinted with permission from Schizophr Res., 64 Copyright 2005, Elsevier. affected or not, exhibit auditory P300 abnormalities. This chromosomal site harbors 2 recently described genes labeled ''Disrupted In Schizophrenia 1 and 2'' (DISC1 and DISC2), which seem to be involved in both early brain development and adult neurogenesis and neural plasticity. 87,88 Additionally, a significant association has been observed in schizophrenia patient and healthy subject populations between the auditory P300 and catechol-O-methyltransferase (COMT) genotype, 89,90 a gene that also has been implicated in schizophrenia vulnerability, 87,88 but another study failed to observe a significant association between the auditory P300 and COMT polymorphism. 91 Genetic linkage analyses of data from the Consortium on the Genetics of Alcoholism (COGA), a large, multicenter family-based study, have yielded several chromosomal regions, in particular on chromosomes 2, 4, 5, 6, 13, and 17, that appear to be linked to the visual P300 amplitude. 26,48,49 The chromosomal regions linked to the visual P300 each contain many genes, including those encoding glutamate and acetylcholine receptors, and usually show also linkage, though in a wider region of linkage, to clinical diagnoses of alcoholism and related disorders. 92 The reported linkage of the visual P300 amplitude to an area on chromosome 5 is noteworthy because this region has also been implicated in schizophrenia 93 and contains the Engrailed-1 gene (En1), which codes for a protein that is brain expressed and implicated in neuronal differentiation. 94 Furthermore, COGA has reported both linkage and association between visual-evoked theta and delta oscillations accompanying the visual P300 generation and a muscarinic cholinergic receptor M2 gene (CHRM2) on chromosome 7. 95 While variants within or close to the CHRM2 locus have also been found to influence risk for alcoholism and major depressive disorder, 96 these findings suggest that central muscarinic acetylcholine receptor systems influence the generation of event-related theta and delta neuronal oscillations associated with the P300 and with basic cognitive and memory functions in healthy subjects. 13,97 These findings may have relevance to the visual P300 abnormalities observed in schizophrenia because central muscarinic acetylcholine receptor abnormalities have also been implicated in the pathophysiology and cognitive dysfunction of schizophrenia. 98,99 Conclusion. The visual and auditory P300 seem to be impaired not only in schizophrenia but in a variety of psychiatric disorders characterized by cognitive and brain dysfunction. Initial data from psychiatric genetic studies similarly suggest that the putative genes (eg, DISC1, COMT, CHRM2) that underlie these neuroelectrical measures are not diagnostically specific and are involved, to a lesser or greater extent, in multiple psychiatric clinical phenotypes. These findings suggest that P300 abnormalities detected over the posterior scalp in schizophrenia reflect a more general biological and cognitive vulnerability or risk factor that cuts across current psychiatric diagnostic categories. Conceivably, the actual behavioral outcome in any particular individual carrying this vulnerability depends on other genetic and environmental factors specific to the individual. 37,72, This interpretation does not preclude the possibility that the P300 also reflects, in its scalp topography or subcomponents, other or more diagnostically specific illness pathology, 100 such as the left-smaller-than-right temporal scalp voltage asymmetry often observed in schizophrenia patients. In addition, evidence is emerging that P300 can be utilized successfully as an intermediate phenotype in identifying clinically unaffected vulnerability-gene carriers 26,67 and, hence, increasing the power of genetic linkage and association studies of complex psychiatric disorders. 92 From a clinical perspective, the P300 while unlikely providing a diagnostically specific marker of risk could become useful for the early detection and prediction of psychopathology in general. It remains to be determined, however, whether P300 represents a risk factor or truly a vulnerability marker that is causally related to later manifestation of psychopathology. Furthermore, several other key conceptual and methodological issues remain to be addressed, which will be considered later. MMN as a Marker of Possible Progressive Pathology Current theories about the origin of schizophrenia postulate that not only early neurodevelopmental processes but also later progressive, perhaps neurodegenerative, processes are involved in the etiology and pathogenesis of the illness. The possibility has recently been raised that the MMN component of the ERP could offer a biological marker of postonset progressive cognitive and cortical deterioration in schizophrenia. 107,108 If empirically validated, the MMN could have profound theoretical and clinical implications for understanding and treating schizophrenia. Basic Studies. The MMN refers to a scalp-negative ERP component that is usually recorded in a ''passive'' auditory oddball paradigm. 3, In this paradigm, subjects passively listen to a series of frequent standard and infrequent deviant auditory stimuli while they are resting or are involved in the attentive processing of visual information, such as reading a book or performing a visual discrimination task. The MMN typically occurs between about 100 and 250 ms following the infrequent, physically deviant auditory stimuli and reaches maximal voltages over frontal and central scalp locations. Although this ERP component may be affected by subject's attention in certain situations, 113,114 the MMN is essentially an automatic, preattentive brain response because its generation is not dependent on the subject's attention toward the eliciting deviant auditory stimuli. 3, Human and animal studies suggest that the MMN is generated primarily within the supratemporal plane in or near the primary auditory cortex, with possible additional generating sources in the frontal lobe. 3, The MMN is believed to reflect a ''mismatch'' or comparison process between the current deviant acoustic input and a neuronal sensory memory trace representing the physical features of the preceding standard stimuli. 3,109,110,112 The MMN represents the initial processing step in a biologically important series of cognitive and brain events involved in alerting and redirecting the organism's attention toward novel or deviant, potentially significant, auditory stimulus events in the environment. Because the MMN provides a unique objective measure of auditory discrimination and sensory memory, this component offers a powerful research tool for basic cognitive neuroscience as well as for clinical and other applications. 3, Clinical Studies. Shelley et al 121 were the first to report that the MMN elicited by a deviation in tone duration is substantially reduced in chronically ill, medicated schizophrenia patients. Since then many studies have confirmed that MMN responses elicited by a change in tone duration as well as in tone frequency are often markedly reduced in chronic schizophrenia patients as compared with healthy control subjects. 54,65,107,108,116, The MMN amplitude deficits seen in chronic schizophrenia patients seem to parallel deficits in tone-matching performance 122 and do not seem to be affected by antipsychotic medications. 108 Moreover, MMN deficits appear to be relatively specific to schizophrenia in that they are not prominent features of unipolar and bipolar affective disorders. 126 Abnormalities of MMN, however, have also been observed in dyslexia, 129 as well as in normal aging and in various neurological disorders, 130 although the observed patterns of MMN deficits in these conditions appear to be different than the pattern seen in chronic schizophrenia. Additionally, impaired MMN generation in chronically ill schizophrenia patients is associated with higher order cognitive deficits 54 and global impairments in social and everyday functioning 127,128 but generally does not show a consistent relationship to clinical symptoms. 108 Moreover, schizophrenia-like deficits in MMN generation can be experimentally induced in nonhuman primates following either systemic or local infusion of N-methyl-D-aspartate (NMDA) antagonists directly in the auditory cortex, 116 as well as in healthy humans following administration of the NMDA receptor antagonist ketamine, 131 supporting recent glutamate/NMDA pathophysiological theories of schizophrenia. 87,88,116,132,133 The genetics of MMN are as yet poorly understood. It has been reported that the tone duration-deviant MMN is smaller than normal, not only in schizophrenia patients but also in clinically unaffected biological relatives, 123 but another family study was unable to confirm the latter finding. 134 MMN abnormalities similar to those seen in schizophrenia have been found in adolescents and young adults with 22q11 deletion or velocardiofacial syndrome, which is associated with markedly elevated rates of major psychiatric disorders, including schizophrenia, in early adulthood, with COMT polymorphism modifying the severity of MMN abnormalities in this syndrome. 135 These findings seem to implicate that genetically mediated alterations of catecholaminergic, especially dopaminergic, neurotransmission contribute to MMN abnormalities and elevated neuropsychiatric risk in this syndrome. However, a study of twin pairs discordant for schizophrenia reports that MMN amplitudes are smaller than normal in schizophrenia patients but are normal in their unaffected co-twins, 136 suggesting that MMN abnormalities reflect state rather than trait characteristics of schizophrenia. Cross-sectional studies of schizophrenia patients at the initial and advanced stages of illness suggest that MMN abnormalities are present selectively or predominantly in chronically ill patients, whereas recently ill and/ or first-episode patients display MMN potentials that do not differ markedly from those recorded in healthy agematched control subjects. 63,65,107 Similarly, in accordance with the findings of a recent study, 137 we observed that putatively prodromal patients at high imminent risk for developing a first psychotic episode exhibit, as a group, tone frequency-deviant MMN responses that do not differ significantly from those seen in healthy age-matched control subjects (figure 3), while marked differences between these 2 groups existed in the auditory P300 ( figure 2). Moreover, a recent meta-analysis of MMN in schizophrenia 108 reports that the effect size of patientcontrol group differences in the MMN elicited by a change in tone frequency as well as in tone duration shows significant positive correlations with illness duration (both r > 0.67, P = 0.05). Although all studies included were limited by utilizing cross-sectional, and not longitudinal, designs, these observations raise the possibility that MMN impairments in schizophrenia do not reflect trait and premorbid deficits but develop over time and index postonset progressive cognitive and cortical deterioration in the illness. 107,108 Indeed, preliminary longitudinal follow-up data suggest that MMN, at least elicited by tone frequency-deviant stimuli and at the overall group level of analysis, is not impaired at first hospitalization for schizophrenia but declines significantly during the early course of the illness, paralleling MRI-based measures of perionset progressive gray matter loss within the left auditory cortex of these patients. 138 Alternatively, incorporating the possibility that MMN partially reflects also trait and premorbid features of schizophrenia, 65,123 MMN is impaired at illness onset and indexes progressive pathology but only in a subgroup of recently ill schizophrenia patients who have preexisting neurocognitive deficits and/or go on to have a less favorable outcome and a chronic course of illness. 65 Conclusion. Chronically ill schizophrenia patients seem to exhibit not only high-level attention-dependent cogni-tive processing deficits, as indexed by P300 abnormalities, but also preattentive deficits in auditory discrimination at the initial level of the auditory cortex, as manifested by MMN abnormalities. Because MMN abnormalities observed in schizophrenia appear to be sensitive to premorbid cognitive status 65 and family history risk status, 123 as well as being much more dominant in chronically ill patients than in recently ill or first-episode patients, 63,65,107,108 these abnormalities have been hypothesized to reflect both premorbid or trait characteristics of schizophrenia and postonset progressive illness pathology in brain regions mediating auditory perception and language processing. On this view, the MMN could offer a unique clinical tool that helps to develop and monitor therapeutic interventions aimed at halting, delaying, or even preventing putative progressive cortical and cognitive deterioration in schizophrenia. Several critical issues, however, remain to be addressed before any conclusions about the MMN in schizophrenia can be drawn with confidence. As already indicated above, longitudinal prospective studies are required to determine whether the severity of MMN abnormalities truly increases across the preonset, recent-onset, and chronic stages of schizophrenia. Longitudinal followup MRI data suggest that temporal lobe cortical structures involved in MMN generation, as well as frontal and other brain regions, do show ongoing progressive volume reductions during the early stages of schizophrenia, initiating prior to or around the initial manifestation of full-blown psychotic symptoms. Additionally, several studies have reported that MMN potentials recorded at the frontal, but not at the temporal, scalp electrode locations are impaired in chronic schizophrenia patients. 124,125 These observations have been interpreted as indicating that the hypothesized frontal generating sources of MMN involved in behavioral orienting, rather than the preattentive sensory mismatch detectors in auditory cortex, are compromised in schizophrenia. Alternatively, the dissociation of MMN at frontal vs temporal sites could reflect impaired coactivation or functional ''disconnection'' of the temporal and frontal cortical regions involved in auditory change detection and orienting. 125 To disentangle possible temporal and frontal contributions to MMN abnormalities in schizophrenia, detailed topographic studies are required that record the MMN at many scalp electrodes and utilize advanced signal spatial enhancing methods and anatomical MRI-based information from each study participant in an effort to correct the distortion due to volume conduction of MMN signals through the skull and scalp. 9,10,142 GBR as a Pathophysiological Marker Event-related neuronal oscillations in the gamma-band frequency range (GBRs) are hypothesized to be fundamental to normal brain function and cognition 12 Fig. 3. Group mean event-related potential (ERP) difference waveforms, formed by subtracting the ERPs to auditory standard stimuli from the ERPs to auditory deviant stimuli, recorded at midline frontal (Fz) and central (Cz) scalp locations, superimposed for subjects at high risk for schizophrenia (n 5 9; mean age 5 21.9 years, SD 5 4.5) and healthy age-matched control subjects (n 5 10; mean age 5 21.9 years, SD 5 3.5). The ERPs were collected using a passive auditory oddball paradigm while subjects performed a visual oddball task. The auditory stimuli were pure sinusoidal tones (100-ms duration, 10-ms rise/fall), consisting of frequent standard stimuli (1000-Hz tones, P 5 97.1%) and infrequent deviant stimuli (1064-Hz tones, P 5 2.9%), presented binaurally through inserted earphones in a random order using a variable interstimulus interval (ISI) of 1300-1700 ms. The visual stimuli were pictures, consisting of frequent standard stimuli (squares), infrequent novel stimuli (unique familiar objects), and infrequent deviant or target stimuli (circles), and were presented on a computer screen in a random order using a constant ISI of 1500 ms. Study participants were instructed to pay attention to the visual stimuli, while ignoring the auditory stimuli, and to make a buttonpress response only to the infrequent visual target stimuli, emphasizing both speed and accuracy. Mismatch negativity (MMN) refers to the mismatch negativity ERP component associated with auditory change detection. MMN peak latency, peak amplitude, and mean amplitude across the 100-to 250-ms poststimulus latency range did not differ significantly between groups (eg, MMN at Fz, high-risk vs control: peak latency 172 and to their disruption in schizophrenia. Accordingly, the study of GBRs holds promise of unraveling a basic pathophysiological mechanism mediating widespread cognitive dysfunction in schizophrenia. Moreover, research into their underlying neuronal generating mechanisms could lead to pathophysiologically based treatment interventions in schizophrenia, 152,153 even if GBRs merely index a ''final common pathway'' 154 or set of molecular and cellular alterations that are genetically and etiologically heterogeneous and downstream consequences of the primary pathogenic process in the illness. 133,155 Basic Studies. Recent basic cognitive neuroscience studies suggest that gamma oscillations and their enhancement and phase synchronization during information processing play a key role in a wide variety of cognitive and brain functions in animals and humans. 12,13,15,153 GBRs have been observed during various types of information processing across sensory modalities and across species and at multiple levels of spatial analysis, from microscopic (eg, single unit) to macroscopic (eg, scalp EEG) measurements. According to Galambos, 156 3 types of GBRs may be distinguished: firstly, the steady-state-evoked GBR to repetitive stimulation at different frequencies; secondly, the transient-evoked GBR that is phase locked to the onset of a transient stimulus; and lastly, the induced GBR that is not phase locked to stimulus onset. It is generally assumed that evoked GBRs primarily index sensory processing and reflect cortical responses due to changes in afferent activity, whereas induced GBRs are cognitive in nature and generated by changes in functional connectivity within neuronal networks. 9,15,157 While stimulus-evoked GBRs are linked to sensory processing, they can be modulated by top-down attentional influences. 158,159 Little is as yet known about the genetics of GBRs. A recent study reports significant associations of human auditory-evoked and induced GBRs with genetic polymorphisms of the dopamine receptor D4 (DRD4) and dopamine transporter (DAT1) but not with COMT polymorphism. 160 Currently dominant theories hypothesize that stimulusinduced GBRs reflect the dynamic integration or temporal ''binding'' of spatially distinct neuronal activities within and between brain regions to enable the emergence of coherent perception, thinking, and action. 12,15, According to this view, induced GBRs reflect the operation of a fundamental brain integrative mechanism that counterbalances the distributed anatomical and functional organization of brain activity and are involved in a wide range of cognitive functions, such as sensory discrimination, perception, selective attention, working memory, sensory-motor integration, and motor control. It has been suggested that, while the fast gamma oscillations seem to be most clearly involved in neural synchrony, they ultimately have to be understood in the context of the slower, lower frequencies (eg, beta, alpha, theta) as different frequencies could dynamically interact with each other (eg, gamma-to-beta transitions, multifrequency synchrony) and could reveal different dimensions or aspects of the hypothesized brain integration processes. 13,149,161 The neuroanatomy and neurophysiology of GBRs are complex and not yet fully understood. Animal and human studies indicate that GBRs are locally generated in many areas of the brain, 12,148,149,153, including the hippocampus and cerebral cortex, suggesting that these neuronal responses are not unitary brain events associated with a specific sensory or cognitive process, such as temporal binding, but involve a wide variety of anatomically and functionally distinct types of GBRs. 13 The cellular mechanisms of GBRs are believed to involve networks of gamma-aminobutyric acid (GABA)ergic interneurons that are driven both by phasic synaptic excitation and inhibition and by electrical coupling between interneuron dendrites and between pyramidal cell axons. 153,162,163 Synchronization of gamma activity within such networks of inhibitory interneurons is thought to propagate downstream in cortical microcircuits synchronizing pyramidal cell firing and enabling coherent cortical information processing. The combination of GABAergic synaptic and electrical coupling could represent a general mechanism for the synchronization of neuronal population activity in a variety of brain regions and structures. Clinical Studies. Several studies have shown that schizophrenia patients display reduced power and phase synchronization of steady-state auditory-evoked GBRs to clicks, tone pips, or amplitude-modulated tones presented at 40 Hz but not at lower rates of stimulation. A slower buildup of steady-state visual-evoked potential amplitudes following stimulus onset has also been reported in schizophrenia, 168 as well as a significant relationship between the latency of the steady-state visual-evoked potential and auditory hallucinations in schizophrenia patients. 169 Another study found no significant overall patient-control group differences but reported that steady-state auditory-evoked GBRs are markedly enhanced in schizophrenia patients taking new generation or atypical antipsychotic medications as compared with patients on conventional or typical antipsychotics. 170 Results have been mixed as to whether the deficits observed in schizophrenia patients are also present in persons with schizotypal personality traits. 165,170 Additionally, early auditory transient-evoked GBRs, which typically are maximal over the centrofrontal scalp and emerge within the first 50 ms following stimulus onset, have been reported to be reduced in power in schizophrenia patients relative to healthy control subjects, 171 but other studies detected no significant deficits in early auditory-evoked GBRs in schizophrenia patients. 172,173 Also, we recently observed little or no significant abnormalities in the latency, power, and degree of intertrial phase locking of early auditory transient-evoked GBRs to task-irrelevant pure tones in high-risk, recent-onset, and chronic schizophrenia patients, nor were abnormalities observed in the simultaneously occurring auditoryevoked P50, N100, and P200 potentials of the broadband ERPs (O. van der Stelt and A. Belger, unpublished data). Several studies have found that the integrity of stimulusevoked GBRs in schizophrenia patients varies according to the nature and severity of their current clinical symptoms. Reduced auditory-evoked GBRs have been noted in nonparanoid, but not in paranoid, schizophrenia subtypes. 174 Similarly, increased levels of clinical negative symptoms in schizophrenia patients seem to be associated with diminished auditory-evoked GBRs, whereas increased positive symptoms are associated with enlarged auditory-evoked GBRs. 159,175 Moreover, exceptionally large gamma-band rhythms have been measured simultaneously during the occurrence of somatic hallucinations in a schizophrenia patient. 176 Furthermore, it has been demonstrated that patients with schizophrenia exhibit impaired phase locking and phase coherence, along with normal power, of early visual transient-evoked GBRs over the occipital scalp during the perception of visual Gestalt patterns, 177 suggesting that the temporal synchronization of stimulusevoked gamma oscillations within the visual cortex is disrupted in schizophrenia. A subsequent study confirmed these findings and also demonstrated that the degree of phase locking of the occipital GBR phase locked to reaction time is positively related to the severity of clinical symptoms, in particular visual hallucinations, thought disorder, and disorganization. 178 A recent study observed that visual-induced GBRs, but not evoked GBRs, are disrupted in schizophrenia patients. 179 Another study, however, found that visual-induced GBRs during a Gestalt perception task are not abnormal in schizophrenia patients. 180 Instead, this study found that the synchronization of oscillations in the lower, beta frequency range is reduced and accompanies performance deficits in schizophrenia patients. While the basis for the discrepancy between results remains to be elucidated, these findings underline the importance of assessing both evoked and induced variants of oscillatory neuronal responses in multiple frequency bands in schizophrenia. Finally, abnormalities of evoked and induced GBRs do not appear to be diagnostically specific to schizophrenia but can be found in several other psychiatric and neurological disorders, including attention-deficit hyperactivity disorder, autism, epilepsy, and Alzheimer's disease. 152,159 Conclusion. Several studies have reported abnormalities in the enhancement and phase synchronization of GBRs during various types of sensory and cognitive information processing in schizophrenia patients. These results provide supporting evidence for the view that neural synchrony is disrupted in schizophrenia. GBR alterations in schizophrenia, however, are not invariant across studies but seem to vary as a function of stimulus and task-specific factors and patient sample characteristics, 152 including the type of antipsychotic medication being used by patients 170 and the nature and severity of their current clinical symptoms. 159,169,178 Accordingly, although the study of GBRs in schizophrenia is relatively recent, the currently available data seem to suggest that GBR abnormalities represent a clinical state marker, rather than a trait marker, of schizophrenia. An intriguing finding is the observed relationship in schizophrenia patients between sensory-evoked GBRs and the severity of clinical positive symptoms, particularly hallucinations. 159,169,178 These data suggest that GBRs could provide important theoretical insights into the generative brain mechanisms that give rise to perceptual disturbances and hallucinations in schizophrenia. Correspondingly, it has been hypothesized that the correlations between enlarged GBRs and hallucinations in schizophrenia reflect cortical hyperexcitability and abnormally increased neural synchrony of thalamocortical networks, leading to incoherent or ''underconstrained'' perception and disturbed conscious experience. 147,176,181 It remains to be determined whether the observed GBR abnormalities in schizophrenia reflect disrupted local or within-area temporal synchronization or large-scale or between-area synchronization because the underlying bioelectrical generating sources of these abnormalities cannot be directly inferred and visualized, but only indirectly estimated, on the basis of scalp EEG data alone. The observations that early sensory-evoked, presumably locally generated, GBRs are often preserved in schizophrenia, 179 as well as the finding of reduced synchronization of beta rather than gamma oscillations in schizophrenia patients 180 seem to favor the interpretation that impaired long-range functional coordination of neuronal activation is basic to the pathophysiology and cognitive dysfunction of schizophrenia. Important goals for future EEG studies are the characterization of large-scale integration of brain activity and the assessment of dynamic relationships between neuronal oscillations in different frequency bands during information processing in schizophrenia. Additionally, the pathophysiological significance of GBR abnormalities in schizophrenia has not yet empirically been established. Basic neuroscience studies indicate that networks of GABAergic interneurons and glutamatergic pyramidal cells are critically involved in synchronizing cortical gamma activities. 153,162,163 Accordingly, it may be hypothesized that scalp GBR abnormalities in schizophrenia reflect altered chemical transmission and/or electrical coupling within such oscillating interneuronal networks, in line with current theories that implicate disrupted GABA as well as glutamate/NMDA neurotransmission in the pathophysiology and cognitive dysfunction of schizophrenia. 87,88,132,133,182 Yet, to substantiate the putative role of GABAergic mechanisms in GBR abnormalities in schizophrenia, clinical studies are required that demonstrate systematic effects of therapeutic treatment interventions targeted on these underlying cellular mechanisms on the production of GBRs in schizophrenia patients. Unresolved Issues and Future Directions The main thrust of this article is that ERPs and EROs can offer valuable biological markers of basic pathophysiological mechanisms and cognitive dysfunctions in schizophrenia, even though they may not be specific to current psychiatry's diagnosis and classification. These quantitative biological markers can provide unique information on the nature and extent of cognitive and brain dysfunction in schizophrenia. Moreover, they could play a critically helpful role in the development of effective therapeutic treatment interventions that are focused on specific pathophysiological mechanisms and cognitive dysfunctions rather than on the clinical symptoms of schizophrenia. Also, evidence is emerging that they can be useful as an intermediate phenotype 24 in identifying clinically unaffected and affected vulnerability-gene carriers 26,67 and, hence, facilitating the genetic dissection of complex psychiatric disorders, 92 including schizophrenia and alcoholism and related disorders. Notwithstanding, several important methodological, conceptual, and interpretative issues remain to be addressed if further progress is to be made in this research field. Initially, a researcher interested in examining ERPs and EROs in schizophrenia is confronted by numerous, partially arbitrary, choices related to experimental design and paradigm (eg, passive vs active paradigm, stimulus characteristics), data acquisition (eg, filter characteristics, reference location), data processing (eg, artifact control), data analysis (eg, Fourier-based vs wavelet analysis), data quantification, and statistical analysis (eg, univariate vs multivariate). Different choices may produce different, or worse, conflicting study findings and conclusions. Fortunately, at least for the recording of cognitive ERPs 183 and multiresolution wavelet decomposition of ERPs, 184 recording guidelines and analysis protocols have recently been published that may help to resolve this problem. An additional methodological source of variance is that electrophysiological abnormalities in schizophrenia are usually not invariant across studies and patient samples but that they can be moderated by stimulus and taskspecific variables (eg, stimulus properties, task difficulty, or novelty) and subject sample characteristics (eg, clinical symptom severity, medication status, smoking, and drug use), including basic personal variables as age and sex. 19,152 While the variance associated with such ''moderator'' variables 185 should be carefully controlled in patient-control group comparisons, assessing and understanding their moderating effects are also conceptually important because these variables may specify the appropriate external and internal conditions under which, or the subpopulations in which, the cognitive dysfunction and pathophysiology of schizophrenia are most reliably be expressed in a certain event-related EEG signal. Furthermore, the anatomical substrates and functional role in brain information processing of some eventrelated EEG signals (eg, MMN) are fairly well known, but many scalp EEG signals (eg, P300) are complex and, thus far, not so well understood, which limits the interpretation of observed alterations in these signals in schizophrenia. Detailed topographic studies, utilizing theoretically and empirically well-founded experimental paradigms, advanced signal decomposition, spatial enhancing, and signalmodeling techniques, and MRI-based anatomical information are required if more specific anatomical, functional, and cognitive interpretations of scalp EEG abnormalities are to be made in schizophrenia research. 9,10,142 One promising approach is to utilize also functional neuroimaging or magnetoencephalography data collected from the same study participants using identical or similar experimental paradigms 186 in an attempt to delineate the precise brain processes and structures that underlie scalp-recorded EEG abnormalities in schizophrenia. Additionally, it is commonly assumed that ERPs and EROs as potential endophenotypes are simpler and more amenable to genetic dissection than the complex overt phenotype associated with the clinical psychiatric disease status itself, but the database on the genetic, environmental, and epigenetic factors that mediate human eventrelated EEG signals is growing but is, as yet, relatively small. Also, it seems that different event-related EEG signals, such as P300, MMN, and GBR, reflect biologically and cognitively distinct brain mechanisms and, hence, could reflect distinct pathophysiological mechanisms and cognitive dysfunctions in schizophrenia, but evidence that each of the markers is also mediated by a partially distinct set of genes has only recently been reported. 187 Moreover, a number of EEG abnormalities have been observed cross-sectionally in the initial and later stages of schizophrenia, but whether these abnormalities are primary and causal or merely a correlate or secondary phenomenon accompanying the clinical illness remain to be determined. For eg, if the P300 truly reflects a vulnerability marker, then psychological or pharmacological therapeutic interventions that succeed in modifying the P300 in high-risk populations should have a modifying effect on the incidence of later psychopathology in these populations. By contrast, if the P300 merely reflects a correlate or risk factor, then interventions should not make a difference in outcome. Longitudinal prospective studies are urgently needed to assess the timing, severity, and etiological validity of electrophysiological abnormalities across the subject's lifespan and illness. Furthermore, assuming that schizophrenia is a clinically and etiologically heterogeneous disorder, 37,155,188 and given the imperfection of current psychiatric diagnostic systems to capture this heterogeneity, family-based high-risk studies should incorporate large subject sample sizes and effective research strategies in which both schizophrenia patients and their unaffected biological relatives are characterized in terms of both the putative vulnerability marker and the clinical disorder because the relatives of patients without the marker may not be at high risk for developing the subtype of the disorder that is associated with the marker under investigation. 37 Finally, further studies are needed that evaluate the empirical relationshipsamong different EEG abnormalities in schizophrenia 54,63 and the relationships of the individual EEG abnormalities to neuroimaging, neurocognitive, biochemical, and molecular genetic data obtained from the same subjects. 59,67 Because each research method and technique has its own strengths and limitations, as well as offering a different but complementary level of description and analysis of schizophrenia pathology, studies utilizing the EEG inconjunction withother researchtoolswill ultimately lead to a more comprehensive description and better understanding of the cognitive and brain functions that are altered in schizophrenia. |
/**
* Test object creation with factory.
*/
@Test(timeout = 1000)
public void testFactory()
{
JSFunctionBody functionBody = JSFunctionBody.factory.create();
assertNotNull("JSFunctionBody object must not be null.", functionBody);
assertEquals("Source code must be empty.", "", functionBody.toString());
functionBody = JSFunctionBody.factory.create("foobar");
assertNotNull("JSFunctionBody object must not be null.", functionBody);
assertEquals("Source code must match initial value.", "foobar", functionBody.toString());
} |
Influenza epidemiology in patients admitted to sentinel Australian hospitals in 2016: the Influenza Complications Alert Network (FluCAN). During the period 1 April to 30 October 2016 (the 2016 influenza season), 1,952 patients were admitted with confirmed influenza to one of 17 FluCAN sentinel hospitals. Of these, 46% were elderly (e65 years), 18% were children (<16 years), 5% were Aboriginal and Torres Strait Islander peoples, 3% were pregnant and 76% had chronic co-morbidities. |
from .encoder_bert import BERTEncoder
from .encoder_gpt import GPTEncoder
from .encoder_gpt2 import GPT2Encoder
from .encoder_transfoxl import TransfoXLEncoder
from .encoder_electra import ElectraEncoder |
Phase coexistence in the charge ordering transition in CaMn7O12 The structural phase transition in CaMn7O12 has been investigated by using high-resolution synchrotron and neutron powder diffraction. Both measurements show a phase coexistence phenomenon: between 409 and 448 K two different crystallographic phases coexist in the material. The first one is trigonal and it has a charge ordering (CO) of the Mn3+ and Mn4+ ions, while the second one is cubic and charge delocalized (CD). The volume fraction of the CD phase increases with temperature from zero at 400 K up to 100% about 460 K. Both phases have domains of at least 150 nm at each temperature in the PS region. A percolation scenario assuming a growth of the volume of the highly conducting CD regions at the expense of the volume of the insulating CO matrix is discussed and it is found to be in agreement with literature data of the CaMn7O12 resistivity. |
<filename>packages/react/src/typography/components/Paragraph/index.ts
export { default as Paragraph } from './Paragraph';
export * from './Paragraph';
|
Many computer systems have been designed with a bus architecture. These computer systems typically have a processing component coupled to a bus. Other system components are also typically coupled to the bus. Such other components include display components like VGA and video memory, input/output (I/O) components, system memory and storage devices, and other devices accessible to the processing component via the bus. One such computer system is the IBM Personal Computer (PC) manufactured by the IBM Corporation of Armonk, N.Y.
In order to display information to a user of a computer system, the computer system must generate a video image and display the image on a display device. A typical means for generating a displayable image is to create a bitmap of the image in video memory. A bitmap is an array of one bit memory elements that correspond to the array of picture elements (pixels) on a typical video screen. Methods for manipulating bitmaps to create video images is a technique well known in the art.
In prior art computer systems with a processing component and a display component coupled to a bus, the processing component typically carries most of the burden in creating the bitmaps to be displayed by the display component. In addition to creating bitmaps, the processing component must transfer the bitmaps from the processing component over a relatively slow bus to the video memory and back. Therefore, bus speed becomes a critical factor in determining the performance of a particular application running on the computer system. Other factors that influence the performance in these computer systems include (1) processing component speed, (2) data bus width, and (3) time to transfer data from the display component (VGA) to video memory and back.
Several prior art methods exist for improving the performance of video graphics applications in bus-oriented computer systems. These methods include (1) creating special hardware to offload from the processing component the task of updating video memory and creating and transferring bitmaps, (2) using dual-ported video dynamic random access memory (DRAM) instead of single-ported DRAM to implement the video memory, and (3) using single-ported DRAM to implement video memory, but using video data buffers inside the display component and more complicated arbitration schemes to provide more frequent video memory access to the processing component. Each of these prior art methods are described below.
Regarding the first method of using special video hardware, most of the graphic coprocessors fall into this category. In these systems, the processing component merely writes a command word into the graphics coprocessor which in turn, manipulates the video memory to generate the video image as instructed by the processing component. These systems, however are more expensive to implement, since additional components (i.e. graphic coprocessor and support hardware) are required. Another disadvantage of this method is that extensive software drivers have to be written in order for the existing software to run on machines equipped with such coprocessors. Moreover, use of a coprocessor may render the computer system incompatible with existing components.
The second prior art method of using dual-ported DRAM is also a more expensive option. In these systems, the display component is coupled to one port of the video memory while the processing component is coupled to the second memory port. The number of processing component accesses to video memory is thereby increased. Other than higher cost, this method suffers from the disadvantage of the need for additional logic to reformat the output data.
The third prior art method for improving the performance of video graphics applications is to use video data buffers inside the display component and more complicated arbitration schemes to provide more frequent video memory access to the processing component. In this manner, less expensive single-ported DRAM may be used for video memory. The overall performance of this method is still limited by the bus timing bandwidth for a particular computer system. No significant performance increase may therefore be achieved by this method.
Thus, a better means for communication between a processing component, a display component, and video memory is needed to shorten video memory and I/O access time and thus diminish or eliminate the bus contention. |
I’m a user. It’s hard to admit, but I’ve been using it almost my entire life. I hurt myself every time. There are songs about it, there are articles about it and sometimes they even talk about it on T.V. It has taken me many years, but I think it might be just about time to rid it from my life.
The N-word. Let’s be honest, you knew this was coming. No columnist who writes about race can avoid this topic and still respect their work at the end of the day. I decided when I first became a columnist that I would stop using the word before I wrote anything about it. However, it has been quite a while and I cannot seem to give it up. Something about it is enticing. It feels great to be able to use a word that others cannot unless you authorize them to do so. The word seems to make songs sound better, jokes funnier and it can make an insult stand out further.
I recently read that the NFL is attempting to adopt a rule penalizing the N-word. This is obviously going to be a very controversial rule change, as any athlete knows that it is hard to control your language when things don’t go your way. These days, it’s even hard for fans to refrain from saying the word. Quite recently, Marcus Smart, a basketball player for Oklahoma State, was involved in an altercation with a fan over the alleged use of the word. In January, Madonna caused quite a bit of controversy after using the word to refer to her son in an Instagram post. She defended this, stating that the use of the word is all about intention, and that it did not indicate that she is racist. Nonetheless, the takeaway from all of these issues is that the word creates a discussion whenever it is used no matter the race of the user.
The problem with the N-word is not in its use. I devoutly believe that the problem lies in its current state of limbo.
It is used as a term of endearment, yet can also be an insult. The word can be said, but only by African-Americans. It was originally used to let black people know that they were less than human. However, today it is used with an “a” at the end, which somehow elicits a completely different meaning. I don’t know about you, but when I pronounce a word differently than it was intended I am not foolish enough to actually think it means something else. Changing the ending of the word is not nearly enough to distance it from its racist origins.
In my opinion, there are two ways in which the problem of the N-word can be solved. The first way being the obvious: If everybody stops saying it, it will eventually fade from our vocabulary. However, the reason why this method hasn’t been working is because the word is so definitively engraved in African-American vocabulary and culture. It would be nearly impossible to rid the world of the word. Which leads to our second option, which is to embrace the word and start allowing everybody to say it. It becomes nearly impossible to argue that it is no longer racist and is a term of endearment if non-black people are not allowed to use it. How is the word not racist if only one race is associated with it?
The N-word is out there, and the truth is that nearly everyone is saying it, whether in public or in private. If well-known celebrities are using the word on social media Currently, the word presents a problem that causes fights, proliferates racism and quite bluntly just pisses people off. Whether we decide to keep it and change its meaning, attempt to rid our vernacular of it forever, or if you think the current state of the word is acceptable, just remember that it’s not me, it’s you.
Earlier this month, the Congressional Budget Office, a nonpartisan federal agency, released a report titled, “The Effects of a Minimum-Wage Increase on Employment and Family Income.” First, the good news: A 39 percent increase in the federal minimum wage from $7.25 to $10.10 would lift 900,000 families out of poverty while simultaneously increasing the incomes of 16.5 million low-wage workers. The CBO also determined that an increase in the minimum wage would lead to the reduction of 500,000 jobs.
The Red competed in its last Barton Hall meet Saturday at the Marc Deneault Memorial Invitational — an event named in memory of Marc Deneault ’01, a former Red sprinter who tragically passed in a car crash. Cornell hosted more than 20 schools teams and multiple club challengers, leaving the competition with almost 35 ECAC/IC4A qualifications and 19 event wins. |
/**
* Creates the set of pixels in the target view that are common between the two scenes.
*
* @param dbSimilar Storage with feature pixel coordinates
* @param numCommon Number of features that are common between the two scenes in this view
* @param viewID The ID of the view
*/
@SuppressWarnings("IntegerDivisionInFloatingPointContext")
private void loadViewZeroCommonObservations( LookUpSimilarImages dbSimilar,
CameraPinholeBrown cameraPrior,
int numCommon,
String viewID ) {
dbSimilar.lookupPixelFeats(viewID, dbPixels);
zeroViewPixels.resetResize(numCommon);
for (int featureIdx = 0; featureIdx < zeroFeatureToCommonIndex.size; featureIdx++) {
int commonIdx = zeroFeatureToCommonIndex.get(featureIdx);
if (commonIdx == -1) {
continue;
}
Point2D_F64 p = zeroViewPixels.get(commonIdx);
p.setTo(dbPixels.get(featureIdx));
p.x -= cameraPrior.cx;
p.y -= cameraPrior.cy;
}
} |
def velocityDistribution(n_collisions, data_folder):
vel = getStationaryState(n_collisions, data_folder)
seaborn.set_style('whitegrid')
seaborn.kdeplot(vel[:,0], bw=0.5)
histPlot = plt.hist(vel, density=True, bins='auto')
return histPlot |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package lab3_jefryhernandez_delmerespinal;
import java.util.Date;
/**
*
* @author Owner
*/
public class Maiar extends Integrante {
private boolean sombrero;
private boolean baston;
public Maiar() {
super();
}
public Maiar(boolean sombrero, boolean baston, String nombre, String apellido, double altura, Date fecha_nacimiento) {
super(nombre, apellido, altura, fecha_nacimiento);
this.sombrero = sombrero;
this.baston = baston;
if (sombrero) {
ataque = 200;
defensa = 0;
curacion = 200;
} else if (baston) {
ataque = 200;
defensa = 0;
curacion = 250;
}
}
public boolean isSombrero() {
return sombrero;
}
public void setSombrero(boolean sombrero) {
this.sombrero = sombrero;
}
public boolean isBaston() {
return baston;
}
public void setBaston(boolean baston) {
this.baston = baston;
}
@Override
public String toString() {
return getNombre();
}
}
|
/**
* The implementation of input global parameters.
*
* @author JavaSaBr
*/
public class InputGlobalShaderNodeElement extends GlobalShaderNodeElement {
public InputGlobalShaderNodeElement(@NotNull final ShaderNodesContainer container, @NotNull final ShaderGenerationInfo object) {
super(container, object);
}
@Override
@FXThread
protected @NotNull String getTitleText() {
return PluginMessages.NODE_ELEMENT_GLOBAL_INPUT;
}
@Override
@FXThread
public @Nullable ShaderNodeParameter parameterFor(final @NotNull ShaderNodeVariable variable,
final boolean fromOutputMapping, final boolean input) {
if (fromOutputMapping) return null;
return super.parameterFor(variable, fromOutputMapping, input);
}
@Override
@FXThread
protected void fillParameters(@NotNull final VBox container) {
super.fillParameters(container);
final ShaderGenerationInfo info = getObject();
final ShaderNodeVariable vertexGlobal = info.getVertexGlobal();
final List<ShaderNodeVariable> fragmentGlobals = info.getFragmentGlobals();
FXUtils.addToPane(new OutputShaderNodeParameter(this, vertexGlobal), container);
for (final ShaderNodeVariable fragmentGlobal : fragmentGlobals) {
FXUtils.addToPane(new OutputShaderNodeParameter(this, fragmentGlobal), container);
}
}
} |
Iowa’s Governor says he met New Jersey’s outgoing Governor at twice-yearly meetings, and Governor Tom Vilsack says his heart goes out to everyone involved, including the citizens of New Jersey. Garden State Governor Jim McGreevey, who is married and has two kids, announced yesterday that he’s gay and had an affair with a man. Vilsack says his “heart goes out to a friend who for whatever reason is struggling and has personal issues.” Vilsack says he felt the same way when Connecticut’s Governor resigned recently after a bribery scandal. Vilsack says “anytime a governor is impacted negatively by press or by publicity or by a personal situation or mistakes in judgment or whatever it might be, you feel for ’em.” Vilsack, who vacations in New Jersey with his family, helped McGreevey campaign for governor three years ago. |
"""
utilities - Several utilities useful when using pyrwt
=====================================================
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division
import numpy as np
def hardThreshold(y, thld):
"""
Hard thresholds the input signal y with the threshold value
thld.
Parameters
----------
y : array-like, shape = Arbitrary dimension
Finite length signal (implicitly periodized)
thld : float
Value by which to threshold the input signal
Returns
-------
x : array-like, shape = Same dimension of y
Hard thresholded output ``x = (abs(y)>thld)*y``
Examples
--------
>>> from rwt.utilities import makeSignal, hardThreshold
>>> y = makeSignal('WernerSorrows', 8)
>>> print hardThreshold(y, thld=1)
[1.5545, 5.3175, 0, 1.6956, -1.2678, 0, 1.7332, 0]
See Also
--------
softThreshold
"""
x = np.zeros_like(y)
ind = np.abs(y) > thld
x[ind] = y[ind]
return x
def softThreshold(y, thld):
"""
Soft thresholds the input signal y with the threshold value
thld.
Parameters
----------
y : array-like, shape = Arbitrary dimension
Finite length signal (implicitly periodized)
thld : float
Value by which to threshold the input signal
Returns
-------
x : array-like, shape = Same dimension as y
Soft thresholded output x = ``sign(y)(abs(y)-thld)_+``
Examples
--------
>>> from rwt.utilities import makeSignal, hardThreshold
>>> y = makeSignal('Doppler', 8)
>>> print softThreshold(y, thld=0.2)
[0, 0, 0, -0.0703, 0, 0.2001, 0.0483, 0]
See Also
--------
hardThreshold
"""
x = np.abs(y) - thld
x[x<0] = 0
x[y<0] = -x[y<0]
return x
def makeSignal(signal_name='AllSig', N=512):
"""
Creates artificial test signal identical to the
standard test signals proposed and used by <NAME> and <NAME>
in WaveLab (- a matlab toolbox developed by Donoho et al. the statistics
department at Stanford University).
Parameters
----------
signal_name : string, optional (default='AllSig')
Name of the desired signal. Supported values:
* 'AllSig' (Returns a list with all the signals)
* 'HeaviSine'
* 'Bumps'
* 'Blocks'
* 'Doppler'
* 'Ramp'
* 'Cusp'
* 'Sing'
* 'HiSine'
* 'LoSine'
* 'LinChirp'
* 'TwoChirp'
* 'QuadChirp'
* 'MishMash'
* '<NAME>' (Heisenberg)
* 'Leopold' (Kronecker)
N : integer, optional (default=512)
Length in samples of the desired signal
Returns
-------
x : array/list of arrays, shape = [N]
References
----------
WaveLab can be accessed at
www_url: http://playfair.stanford.edu/~wavelab/
Also see various articles by D.L. Donoho et al. at
web_url: http://playfair.stanford.edu/
"""
t = np.linspace(1, N, N)/N
signals = []
if signal_name in ('HeaviSine', 'AllSig'):
y = 4 * np.sin(4*np.pi*t) - np.sign(t - 0.3) - sign(0.72 - t)
signals.append(y)
if signal_name in ('Bumps', 'AllSig'):
pos = np.array([ .1, .13, .15, .23, .25, .40, .44, .65, .76, .78, .81])
hgt = np.array([ 4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 5.1, 4.2])
wth = np.array([.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005])
y = np.zeros_like(t)
for p, h, w in zip(pos, hgt, wth):
y += h/(1 + np.abs((t - p)/w))**4
signals.append(y)
if signal_name in ('Blocks', 'AllSig'):
pos = np.array([ .1, .13, .15, .23, .25, .40, .44, .65, .76, .78, .81])
hgt = np.array([ 4, -5, 3, -4, 5, -4.2, 2.1, 4.3, -3.1, 2.1, -4.2])
y = np.zeros_like(t)
for p, h in zip(pos, hgt):
y += (1 + np.abs(t - p))*h/2
signals.append(y)
if signal_name in ('Doppler', 'AllSig'):
y = np.sqrt(t * (1-t)) * np.sin((2*np.pi*1.05) / (t+.05))
signals.append(y)
if signal_name in ('Ramp', 'AllSig'):
y = t.copy()
y[t >= .37] -= 1
signals.append(y)
if signal_name in ('Cusp', 'AllSig'):
y = np.sqrt(np.abs(t - 0.37))
signals.append(y)
if signal_name in ('Sing', 'AllSig'):
k = np.floor(N * .37)
y = 1 / np.abs(t - (k+.5)/N)
signals.append(y)
if signal_name in ('HiSine', 'AllSig'):
y = np.sin(N*0.6902*np.pi*t)
signals.append(y)
if signal_name in ('LoSine', 'AllSig'):
y = np.sin(N*0.3333*np.pi*t)
signals.append(y)
if signal_name in ('LinChirp', 'AllSig'):
y = np.sin(N*0.125*np.pi*t*t)
signals.append(y)
if signal_name in ('TwoChirp', 'AllSig'):
y = np.sin(N*np.pi*t*t) + np.sin(N*np.pi/3*t*t)
signals.append(y)
if signal_name in ('QuadChirp', 'AllSig'):
y = np.sin(N*np.pi/3*t*t*t)
signals.append(y)
if signal_name in ('MishMash', 'AllSig'):
#
# QuadChirp + LinChirp + HiSine
#
y = np.sin(N*np.pi/3*t*t*t) + np.sin(N*0.125*np.pi*t*t) + np.sin(N*0.6902*np.pi*t)
signals.append(y)
if signal_name in ('WernerSorrows', 'AllSig'):
y = np.sin(N/2*np.pi*t*t*t)
y += np.sin(N*0.6902*np.pi*t)
y += np.sin(N*np.pi*t*t)
pos = np.array([.1, .13, .15, .23, .25, .40, .44, .65, .76, .78, .81])
hgt = np.array([4, 5, 3, 4, 5, 4.2, 2.1, 4.3, 3.1, 5.1, 4.2])
wth = np.array([.005, .005, .006, .01, .01, .03, .01, .01, .005, .008, .005])
for p, h, w in zip(pos, hgt, wth):
y += h/(1 + np.abs((t - p)/w))**4
signals.append(y)
if signal_name in ('Leopold', 'AllSig'):
y = (t == np.floor(.37 * N)/N).astype(np.float)
signals.append(y)
if len(signals) == 1:
return signals[0]
return signals |
// Copyright 2020 <NAME>. All Rights Reserved.
#pragma once
#include "PrFirebaseModule.h"
#include "PrFirebaseDefines.h"
#include "Misc/CoreDelegates.h"
#include "PrFirebasePerformanceModule.generated.h"
class UPrFirebasePerformanceModule;
USTRUCT(Blueprintable, BlueprintType)
struct FPrFirebasePerformanceTrace
{
GENERATED_BODY()
public:
FPrFirebasePerformanceTrace();
FPrFirebasePerformanceTrace(TWeakObjectPtr<UPrFirebasePerformanceModule> PerformanceModule, const FString& Identifier);
FPrFirebasePerformanceTrace(TWeakObjectPtr<UPrFirebasePerformanceModule> PerformanceModule, int32 Index);
void Stop();
void IncrementMetric(const FString& Name, int32 Value);
void SetMetric(const FString& Name, int32 Value);
void SetAttribute(const FString& Name, const FString& Value);
void RemoveAttribute(const FString& Name);
int32 GetTraceIndex() const;
private:
UPROPERTY()
TWeakObjectPtr<UPrFirebasePerformanceModule> Module;
UPROPERTY()
int32 TraceIndex;
};
USTRUCT()
struct FPrFirebasePerformanceScopeTimeTrace
{
GENERATED_BODY()
public:
FPrFirebasePerformanceScopeTimeTrace();
FPrFirebasePerformanceScopeTimeTrace(TWeakObjectPtr<UPrFirebasePerformanceModule> PerformanceModule, const FString& Identifier);
~FPrFirebasePerformanceScopeTimeTrace();
private:
TOptional<FPrFirebasePerformanceTrace> Trace;
};
UCLASS()
class PRFIREBASE_API UPrFirebasePerformanceModule : public UPrFirebaseModule
{
GENERATED_BODY()
public:
UPrFirebasePerformanceModule();
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void SetAttributeForAllTraces(const FString& Name, const FString& Value);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void RemoveAttributeForAllTraces(const FString& Name);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void SetMetricForAllTraces(const FString& Name, int32 Value);
int32 StartTraceWithoutWrapper(const FString& Identifier);
void StopTraceWithoutWrapper(int32 TraceIndex);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
FPrFirebasePerformanceTrace StartTrace(const FString& Identifier);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void StopTrace(const FPrFirebasePerformanceTrace& Trace);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void IncrementMetric(const FPrFirebasePerformanceTrace& Trace, const FString& Name, int32 Value);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void SetMetric(const FPrFirebasePerformanceTrace& Trace, const FString& Name, int32 Value);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void SetAttribute(const FPrFirebasePerformanceTrace& Trace, const FString& Name, const FString& Value);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Trace")
void RemoveAttribute(const FPrFirebasePerformanceTrace& Trace, const FString& Name);
UFUNCTION(BlueprintCallable, Category = "Firebase|Performance|Utils")
void AppliactionLaunched();
virtual bool IsAvailable() const final { return WITH_FIREBASE_PERFORMANCE; }
protected:
virtual void InternalStartTrace(int32 TraceIndex, const FString& Identifier) { Firebase_NotImplemented(); }
virtual void InternalStopTrace(int32 TraceIndex) { Firebase_NotImplemented(); }
virtual void InternalIncrementMetric(int32 TraceIndex, const FString& Name, int32 Value) { Firebase_NotImplemented(); }
virtual void InternalSetMetric(int32 TraceIndex, const FString& Name, int32 Value) { Firebase_NotImplemented(); }
virtual void InternalSetAttribute(int32 TraceIndex, const FString& Name, const FString& Value) { Firebase_NotImplemented(); }
virtual void InternalRemoveAttribute(int32 TraceIndex, const FString& Name) { Firebase_NotImplemented(); }
void InternalLaunch_AnyThread();
void SetTemperature(FCoreDelegates::ETemperatureSeverity Temp);
void SetPowerMode(bool bLowPowerMode);
void StartWatch();
private:
int32 LastTraceIndex;
bool bAppliactionLaunched;
bool bAppliactionFirstFrame;
TMap<FString, FString> GlobalAttributes;
TMap<FString, int32> GlobalMetrics;
TSet<int32> ExistingTraceIndices;
TOptional<FPrFirebasePerformanceTrace> AppLaunchTrace;
TOptional<FPrFirebasePerformanceTrace> AvFrameTrace;
TOptional<FDateTime> AvFrameTime;
int64 AvFrameCounter;
void OnAppliactionLaunched();
void OnEndFrame();
};
#define PRF_SCOPE_TIME(_Identifier_) \
FPrFirebasePerformanceScopeTimeTrace _PrFirebasePerformanceScopeTimeTrace_##_TraceName_##__LINE__(GPrFirebasePerformanceModule, #_Identifier_);
extern PRFIREBASE_API TWeakObjectPtr<UPrFirebasePerformanceModule> GPrFirebasePerformanceModule;
|
import os
def processFiles(dirPath, dstFilePath):
new_f = open(dstFilePath, 'w')
for file in os.listdir(dirPath):
file_path = os.path.join(dirPath, file)
f = open(file_path, 'r')
line = f.readline()
while (line):
new_f.write(line)
line = f.readline()
if __name__ == '__main__':
processFiles('C:\\Users\\shankai\\Desktop\\result','data/sendBiaoqingWithPrefetchNoOrigin.txt') |
/*
* QR Code generator demo (C++)
*
* Run this command-line program with no arguments. The program computes a bunch of demonstration
* QR Codes and prints them to the console. Also, the SVG code for one QR Code is printed as a sample.
*
* Copyright (c) Project Nayuki. (MIT License)
* https://www.nayuki.io/page/qr-code-generator-library
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* - The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* - The Software is provided "as is", without warranty of any kind, express or
* implied, including but not limited to the warranties of merchantability,
* fitness for a particular purpose and noninfringement. In no event shall the
* authors or copyright holders be liable for any claim, damages or other
* liability, whether in an action of contract, tort or otherwise, arising from,
* out of or in connection with the Software or the use or other dealings in the
* Software.
*/
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <string>
#include <vector>
#include "QrCode.hpp"
using std::uint8_t;
using qrcodegen::QrCode;
using qrcodegen::QrSegment;
// Function prototypes
static void doBasicDemo();
static void doVarietyDemo();
static void doSegmentDemo();
static void doMaskDemo();
static void printQr(const QrCode &qr);
// The main application program.
int main2() {
doBasicDemo();
doVarietyDemo();
doSegmentDemo();
doMaskDemo();
return EXIT_SUCCESS;
}
/*---- Demo suite ----*/
// Creates a single QR Code, then prints it to the console.
static void doBasicDemo() {
const char *text = "Hello, world!"; // User-supplied text
const QrCode::Ecc errCorLvl = QrCode::Ecc::LOW; // Error correction level
// Make and print the QR Code symbol
const QrCode qr = QrCode::encodeText(text, errCorLvl);
printQr(qr);
std::cout << qr.toSvgString(4) << std::endl;
}
// Creates a variety of QR Codes that exercise different features of the library, and prints each one to the console.
static void doVarietyDemo() {
// Numeric mode encoding (3.33 bits per digit)
const QrCode qr0 = QrCode::encodeText("314159265358979323846264338327950288419716939937510", QrCode::Ecc::MEDIUM);
printQr(qr0);
// Alphanumeric mode encoding (5.5 bits per character)
const QrCode qr1 = QrCode::encodeText("DOLLAR-AMOUNT:$39.87 PERCENTAGE:100.00% OPERATIONS:+-*/", QrCode::Ecc::HIGH);
printQr(qr1);
// Unicode text as UTF-8
const QrCode qr2 = QrCode::encodeText("\xE3\x81\x93\xE3\x82\x93\xE3\x81\xAB\xE3\x81\xA1wa\xE3\x80\x81"
"\xE4\xB8\x96\xE7\x95\x8C\xEF\xBC\x81\x20\xCE\xB1\xCE\xB2\xCE\xB3\xCE\xB4", QrCode::Ecc::QUARTILE);
printQr(qr2);
// Moderately large QR Code using longer text (from <NAME>'s Alice in Wonderland)
const QrCode qr3 = QrCode::encodeText(
"Alice was beginning to get very tired of sitting by her sister on the bank, "
"and of having nothing to do: once or twice she had peeped into the book her sister was reading, "
"but it had no pictures or conversations in it, 'and what is the use of a book,' thought Alice "
"'without pictures or conversations?' So she was considering in her own mind (as well as she could, "
"for the hot day made her feel very sleepy and stupid), whether the pleasure of making a "
"daisy-chain would be worth the trouble of getting up and picking the daisies, when suddenly "
"a White Rabbit with pink eyes ran close by her.", QrCode::Ecc::HIGH);
printQr(qr3);
}
// Creates QR Codes with manually specified segments for better compactness.
static void doSegmentDemo() {
// Illustration "silver"
const char *silver0 = "THE SQUARE ROOT OF 2 IS 1.";
const char *silver1 = "41421356237309504880168872420969807856967187537694807317667973799";
const QrCode qr0 = QrCode::encodeText(
(std::string(silver0) + silver1).c_str(),
QrCode::Ecc::LOW);
printQr(qr0);
const QrCode qr1 = QrCode::encodeSegments(
{QrSegment::makeAlphanumeric(silver0), QrSegment::makeNumeric(silver1)},
QrCode::Ecc::LOW);
printQr(qr1);
// Illustration "golden"
const char *golden0 = "Golden ratio \xCF\x86 = 1.";
const char *golden1 = "6180339887498948482045868343656381177203091798057628621354486227052604628189024497072072041893911374";
const char *golden2 = "......";
const QrCode qr2 = QrCode::encodeText(
(std::string(golden0) + golden1 + golden2).c_str(),
QrCode::Ecc::LOW);
printQr(qr2);
std::vector<uint8_t> bytes(golden0, golden0 + std::strlen(golden0));
const QrCode qr3 = QrCode::encodeSegments(
{QrSegment::makeBytes(bytes), QrSegment::makeNumeric(golden1), QrSegment::makeAlphanumeric(golden2)},
QrCode::Ecc::LOW);
printQr(qr3);
// Illustration "Madoka": kanji, kana, Cyrillic, full-width Latin, Greek characters
const char *madoka = // Encoded in UTF-8
"\xE3\x80\x8C\xE9\xAD\x94\xE6\xB3\x95\xE5"
"\xB0\x91\xE5\xA5\xB3\xE3\x81\xBE\xE3\x81"
"\xA9\xE3\x81\x8B\xE2\x98\x86\xE3\x83\x9E"
"\xE3\x82\xAE\xE3\x82\xAB\xE3\x80\x8D\xE3"
"\x81\xA3\xE3\x81\xA6\xE3\x80\x81\xE3\x80"
"\x80\xD0\x98\xD0\x90\xD0\x98\xE3\x80\x80"
"\xEF\xBD\x84\xEF\xBD\x85\xEF\xBD\x93\xEF"
"\xBD\x95\xE3\x80\x80\xCE\xBA\xCE\xB1\xEF"
"\xBC\x9F";
const QrCode qr4 = QrCode::encodeText(madoka, QrCode::Ecc::LOW);
printQr(qr4);
const std::vector<int> kanjiChars{ // Kanji mode encoding (13 bits per character)
0x0035, 0x1002, 0x0FC0, 0x0AED, 0x0AD7,
0x015C, 0x0147, 0x0129, 0x0059, 0x01BD,
0x018D, 0x018A, 0x0036, 0x0141, 0x0144,
0x0001, 0x0000, 0x0249, 0x0240, 0x0249,
0x0000, 0x0104, 0x0105, 0x0113, 0x0115,
0x0000, 0x0208, 0x01FF, 0x0008,
};
qrcodegen::BitBuffer bb;
for (int c : kanjiChars)
bb.appendBits(static_cast<std::uint32_t>(c), 13);
const QrCode qr5 = QrCode::encodeSegments(
{QrSegment(QrSegment::Mode::KANJI, static_cast<int>(kanjiChars.size()), bb)},
QrCode::Ecc::LOW);
printQr(qr5);
}
// Creates QR Codes with the same size and contents but different mask patterns.
static void doMaskDemo() {
// Project Nayuki URL
std::vector<QrSegment> segs0 = QrSegment::makeSegments("https://www.nayuki.io/");
printQr(QrCode::encodeSegments(segs0, QrCode::Ecc::HIGH, QrCode::MIN_VERSION, QrCode::MAX_VERSION, -1, true)); // Automatic mask
printQr(QrCode::encodeSegments(segs0, QrCode::Ecc::HIGH, QrCode::MIN_VERSION, QrCode::MAX_VERSION, 3, true)); // Force mask 3
// Chinese text as UTF-8
std::vector<QrSegment> segs1 = QrSegment::makeSegments(
"\xE7\xB6\xAD\xE5\x9F\xBA\xE7\x99\xBE\xE7\xA7\x91\xEF\xBC\x88\x57\x69\x6B\x69\x70"
"\x65\x64\x69\x61\xEF\xBC\x8C\xE8\x81\x86\xE8\x81\xBD\x69\x2F\xCB\x8C\x77\xC9\xAA"
"\x6B\xE1\xB5\xBB\xCB\x88\x70\x69\xCB\x90\x64\x69\x2E\xC9\x99\x2F\xEF\xBC\x89\xE6"
"\x98\xAF\xE4\xB8\x80\xE5\x80\x8B\xE8\x87\xAA\xE7\x94\xB1\xE5\x85\xA7\xE5\xAE\xB9"
"\xE3\x80\x81\xE5\x85\xAC\xE9\x96\x8B\xE7\xB7\xA8\xE8\xBC\xAF\xE4\xB8\x94\xE5\xA4"
"\x9A\xE8\xAA\x9E\xE8\xA8\x80\xE7\x9A\x84\xE7\xB6\xB2\xE8\xB7\xAF\xE7\x99\xBE\xE7"
"\xA7\x91\xE5\x85\xA8\xE6\x9B\xB8\xE5\x8D\x94\xE4\xBD\x9C\xE8\xA8\x88\xE7\x95\xAB");
printQr(QrCode::encodeSegments(segs1, QrCode::Ecc::MEDIUM, QrCode::MIN_VERSION, QrCode::MAX_VERSION, 0, true)); // Force mask 0
printQr(QrCode::encodeSegments(segs1, QrCode::Ecc::MEDIUM, QrCode::MIN_VERSION, QrCode::MAX_VERSION, 1, true)); // Force mask 1
printQr(QrCode::encodeSegments(segs1, QrCode::Ecc::MEDIUM, QrCode::MIN_VERSION, QrCode::MAX_VERSION, 5, true)); // Force mask 5
printQr(QrCode::encodeSegments(segs1, QrCode::Ecc::MEDIUM, QrCode::MIN_VERSION, QrCode::MAX_VERSION, 7, true)); // Force mask 7
}
/*---- Utilities ----*/
// Prints the given QrCode object to the console.
static void printQr(const QrCode &qr) {
int border = 4;
for (int y = -border; y < qr.getSize() + border; y++) {
for (int x = -border; x < qr.getSize() + border; x++) {
std::cout << (qr.getModule(x, y) ? "##" : " ");
}
std::cout << std::endl;
}
std::cout << std::endl;
}
|
/* tslint:disable:no-console */
export * from "./service";
|
a,b,c=tuple(map(int,input().split()))
count=1
sum=0
while count<=c:
sum+=a*count
count+=1
#print(sum)
if b>=sum:
print(0)
else:
print(sum-b) |
// ReadAllBody read all body in []byte
func (r *Request) ReadAllBody() []byte {
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if D.IsErr(err) {
return nil
}
return b
} |
import {TEST_NET} from "lib/constants"
import publicConfig from "lib/publicConfig"
import Head from "next/head"
export const BASE_HTML_TITLE = `Flow ${
publicConfig.network === TEST_NET ? "Testnet" : "Canarynet"
} Faucet`
export default function PageTitle({children}: {children?: string}) {
const title = [children, BASE_HTML_TITLE].filter(Boolean).join(" - ")
return (
<Head>
<title>{title}</title>
<meta property="og:title" content={title} key="title" />
</Head>
)
}
|
Factors Affecting Isolation and Propagation of Bovine Coronavirus in Human Rectal Tumor-18 Cell Line Bovine coronavirus (BCV) is an important cause of calf enterocolitis and respiratory disease. It is the second major cause of viral diarrhea in calves, with rotavirus being the first. At the Wisconsin Animal Health Laboratory during 1993-1994, BCV was detected in 93 cases of calf scours out of 1,058 bovine fecal samples examined by direct electron microscopy. Electron microscopy is used commonly for the diagnosis of enteric viruses, including BCV. The advantages of electron microscopy are that diagnosis can be made rapidly and multiple pathogens, a common feature in enteritis, can be detected simultaneously. Using electron microscopy, more than a dozen novel enteric viruses have been described in the last 2 decades. However, electron microscopy has some limitations; approximately 1 million viral particles should be present to detect a virus by electron microscopy. Thus, it lacks sensitivity and can lead to false-negative results. In addition, some viruses, especially coronaviruses, can be confused morphologically with nonviral particles such as intestinal brush border epithelium and with other morphologically similar viruses, leading to false-positive results. Virus isolation is not commonly used for the diagnosis of BCV. However, 1 advantage is that the virus propagated in cell culture can be used for further antigenic and genomic characterization. To improve BCV isolation from clinical samples, factors affecting its isolation and propagation in human rectal tumor18 (HRT18) cell line were investigated. In a previous study, 10 HRT-18 was found to be a suitable cell line for BCV isolation. In this study, BCV propagated in vitro showed a change in hemagglutination pattern from that of the BCV from the original clinical samples. It is not known if this change correlates with changes in antigenicity and immunogenicity of the virus. HRT-18 and human colon tumor-8 (HCT-8) cells are derived from adenocarcinomas of human rectum and colon, respectively. 16 The 51 samples included in this study were provided by the Wisconsin Animal Health Laboratory (WAHL), Madison (n = 27), the California Veterinary Diagnostic Laboratory (CVDL), San Bernardino (n = 6), and another diagnostic laboratory (referred to as VDL for confidentiality) (n = 18). Samples were obtained as 20% fecal suspensions in phosphate-buffered saline (PBS) (pH 7.2), as |
Novel Insights Into Rheumatoid Arthritis Through Characterization of Concordant Changes in DNA Methylation and Gene Expression in Synovial Biopsies of Patients With Differing Numbers of Swollen Joints In this study, we sought to characterize synovial tissue obtained from individuals with arthralgia and disease-specific auto-antibodies and patients with established rheumatoid arthritis (RA), by applying an integrative multi-omics approach where we investigated differences at the level of DNA methylation and gene expression in relation to disease pathogenesis. We performed concurrent whole-genome bisulphite sequencing and RNA-Sequencing on synovial tissue obtained from the knee and ankle from 4 auto-antibody positive arthralgia patients and thirteen RA patients. Through multi-omics factor analysis we observed that the latent factor explaining the variance in gene expression and DNA methylation was associated with Swollen Joint Count 66 (SJC66), with patients with SJC66 of 9 or more displaying separation from the rest. Interrogating these observed differences revealed activation of the immune response as well as dysregulation of cell adhesion pathways at the level of both DNA methylation and gene expression. We observed differences for 59 genes in particular at the level of both transcript expression and DNA methylation. Our results highlight the utility of genome-wide multi-omics profiling of synovial samples for improved understanding of changes associated with disease spread in arthralgia and RA patients, and point to novel candidate targets for the treatment of the disease. INTRODUCTION Rheumatoid arthritis (RA) is a complex, multifactorial, and chronic autoimmune disease that primarily affects the synovial tissue in joints. It affects about 1% of the population and manifests with significant unmet medical need. Investigating pathogenesis during different stages of disease, or across a spectrum of disease severity, is critical to optimize appropriate therapeutic interventions that affect disease progression. Diagnosis of established RA coincides with the development of painful and swollen joints, although circulating autoantibodies can be detected up to 10 years before diagnosis. Clinically manifested joint swelling reflects synovial tissue inflammation (synovitis), which is characterized by infiltration into the synovium of multiple immune cell types , with up to 18 distinct infiltrating cell populations being reported on in a recent single cell transcriptomics analysis. As disease progresses, synovial fibroblasts adopt an increasingly aggressive and invasive phenotype, promoting further inflammation and joint damage together with other processes induced by the inflammatory environment, such as the differentiation of bone-resorbing osteoclasts. Disease progression in early RA is often associated with the involvement of an increasing number of inflamed joints, but the mechanisms responsible for this spread of disease are poorly understood. Moreover, differences in the rate of disease manifestation and variability of response to therapy indicate that different pathophysiological mechanisms are implicated in disease development and progression compared to disease etiology. An increasing body of evidence indicates that epigenetic modifications play an important role in the regulation of RA pathogenesis. Several array-based studies have reported widespread differences in DNA methylation among peripheral blood cells from RA patients and controls, suggesting that epigenetic modifications in circulating cells associate with disease. However, wider conclusions may be limited by the unknown correlation of these effects to synovial cells directly at the site of inflammation. Epigenetic modifications have also been implicated in modulating the function of synovial fibroblasts in RA, through comparisons of DNA methylation patterns of cultured cells isolated from RA and osteoarthritis patients. Such studies have identified DNA methylation patterns that distinguish RA from other forms of arthritis, along with regulatory elements and biomarkers related to the pathological phenotype of RA. However, to identify novel candidate genes for therapeutic interventions that affect disease progression, it is important to study samples from patients across different stages of disease. Two such studies using cultured fibroblast-like synoviocytes found small but statistically significant patterns of hypomethylation in patients with longstanding RA compared to those with early RA, suggesting that the DNA methylome could be associated with the transformation of synovial fibroblasts into invasive cells capable of joint destruction and the resulting disease progression. Here, we gain insights into RA heterogeneity by combining whole-genome DNA methylome and transcriptome analyses from the same synovial biopsies and stratifying patients according to the number of swollen joints, a clinical parameter reflective of disease evolution. We find that swollen joint count based on 66 joints (SJC66), which reflects the amount and spread of inflamed synovial tissue, is associated with major changes in gene transcription and DNA methylation at promoters. By crossinterrogating differentially methylated genomic regions and their associated genes, we reveal novel candidate loci associated with the spread of the disease across joints. Sample Description Synovial tissue was collected via a mini-arthroscopic procedure as described previously from patients at the Amsterdam University Medical Center, University of Amsterdam. A total of 17 samples were obtained from three cohorts. The first cohort contained individuals that had either arthralgia and/or a positive family history for RA, but without arthritis (as determined by a clinician), and that were positive for IgM Rheumatoid Factor (IgM-RF) and/or Anti-Citrullinated Protein Antibodies (ACPA) (Pre-synoviomics; n = 4). The second cohort consisted of individuals that at inclusion were Disease Modifying Anti-Rheumatic Drug (DMARD)-nave with early arthritis, as defined by a disease duration of less than 1 year (Synoviomics; n = 9). The third cohort contained samples from patients with established RA on active treatment with a disease duration of more than one year who had at least one swollen joint suitable for synovial tissue sampling (Synoviomics II; n = 4). For all analyses, samples were prepared simultaneously to mitigate batch effects. All subjects provided written informed consent and the collection and use of the samples received Institutional Review Board review and approval. Characteristics of patients included in this study are listed in Table S1. RNA-Sequencing and Whole Genome Bisulphite Sequencing Flash-frozen synovial tissue biopsies were utilized to simultaneously isolate RNA and DNA using an AllPrep DNA/ RNA Mini kit (Qiagen), with QIAshredder spin columns (Qiagen) used to disrupt the tissue. RNA samples were quantified and their integrity assessed using Qubit RNA Broad Range Assay Kit (Thermo Fisher Scientific) and an Agilent 2100 Bioanalyzer RNA 6000 Nano Kit (Agilent Technologies), respectively. Depending on sample yield, DNA samples were quantified using Qubit DNA BR or Qubit DNA HS kits (Thermo Fisher Scientific). RNA-Seq libraries were generated from 150 ng of total RNA. The TruSeq ® Stranded Total RNA LT was used with a Ribo-Zero ™ Human/Mouse/Rat kit (Illumina), following the 'Low Sample' protocol except for two modifications. Firstly, the time of the 'Elution 2 -Frag -Prime' program was reduced from 8 to 6 min to increase the length of the RNA fragments. Secondly, 11 instead of 15 cycles were used to enrich the DNA fragments. Libraries were quantified with a KAPA Library Quantification Kit (KAPA Biosystems) on a QuantStudio 12 K Flex Real-Time PCR System (Thermo Fisher Scientific). Five samples were multiplexed per lane and libraries were clustered and sequenced using HiSeq ® PE Cluster Kit v3 -cBot ™ and HiSeq ® SBS Kit v3 kits (Illumina). Paired-end sequencing (2 x 76 bp) was performed using a HiSeq 1500 (Illumina) to a depth of~40 M reads per sample. Whole Genome Bisulphite Sequencing (WGBS) libraries were generated using an EpiGnome Methyl-Seq Library Preparation Kit (Epicentre, now Illumina) from 100 ng of sample DNA. Bisulphite conversion was performed using an EZ DNA Methylation-Lightning Kit (Zymo Research). Each bisulphite conversion reaction contained 500 pg of unmethylated lambda DNA (Promega), which was used as a control to verify that bisulphite conversion efficiencies were at least 98%. Libraries were quantified using a KAPA Library Quantification Kit (KAPA Biosystems) on a 7900HT Real-Time PCR System (Thermo Fisher Scientific). Six samples were multiplexed per lane and libraries were clustered and sequenced using HiSeq ® PE Cluster Kit v4 -cBot ™ and HiSeq ® SBS Kit v4 kits (Illumina). Pairedend sequencing (2 x 125 bp) was performed using a HiSeq 1500 (Illumina) to a depth of~600M reads per sample. To provide sufficient coverage each batch was sequenced over 2 high output runs. Exploratory Analyses of DNA Methylation and Gene Expression To concurrently explore the DNA methylation and gene expression data, Multi-Omics Factor Analysis (MOFA) was applied (v1.0.0). In short, MOFA performs unsupervised dimensionality reduction simultaneously across multiple data modalities from the same sample through a small number of inferred latent factors, enabling the detection of co-ordinated changes between the different data modalities. Here, we used the 5,000 most variable CpGs and genes as input and with the number of latent factors set to 9, the tolerance to 0.1 and the factor threshold to 0.02. All subsequent analyses were performed in R (v3.5.0). Gene-level counts were generated from the transcript abundances using tximport (v1.12.0). Allosome-associated genes were removed to mitigate obvious sex effects. Differential Gene Expression (DGE) analyses were conducted using DESeq2 (v1.22.2) where SJC66 high was compared with SJC66 low whilst correcting for sex, age and DMARD usage using the following design formula: Gene Expression ∼ sex + age + DMARD usage + SJC66 dichotomized DMARD usage was a binary variable defined by the usage of any medication: conventional DMARDs (cDMARD, including methotrexate) or biological DMARDs (bDMARD). As Kallisto provided abundance levels for individual transcripts, Gene Differential Expression (GDE) analyses were also conducted to identify genes where particular transcripts were differentially expressed. In short, differential expression analyses was performed using DESeq2 and the resulting p-values were combined using the Lancaster aggregation method found in aggregation (v1.0.1), where observations were weighted by the base expression. DGEs and GDEs were defined as genes with a false discovery rate (FDR)-adjusted p-value less than 0.05. DNA Methylation Data Analysis Quality assessment of the raw reads was performed using FastQC (v0.11.2). Adapter and quality trimming was performed using Skewer (v0.1.123) and a quality filter of 20. To assess bisulphite conversion rates, Bismark (v0.14.1) was used to align the reads to the genome of the phage lambda, and again for alignment to the GRCh38 build of the human genome. Postalignment filtering of unmapped reads, reads aligning at multiple locations and reads with a mapping score lower than 10 was carried out using SAMtools. All subsequent analyses were performed in R (v3.5.0). CpG loci located on the allosomes were removed to mitigate the sex effect. The differential methylation analyses were performed using dmrseq (v1.2.5), where we contrasted SJC66 high with SJC66 low whilst correcting for sex, age and DMARD usage using the following design formula: Differentially Methylated Regions (DMRs) were annotated using ChipPeakAnno (v3.16.1) to genes if the DMR was located within 5,000 bp upstream or 1,000 bp downstream of the gene as obtained from Gencode (v29). Integrated DNA Methylation and Gene Expression Analysis Integrated analyses were based on the DMRs and GDEs found through the separate DNA methylation and gene expression analyses. The overlap between DMRs and GDEs were called Genes displaying both Differential Expression and Methylation (GDEMs). For each GDEM the median percentage methylation was calculated for all constituent CpGs per sample and correlated with the log 2 transformed expression counts to obtain the Pearson correlation coefficient. Confidence intervals (95%) were calculated through 1000 bootstraps for each GDEM. In short, 17 samples were randomly drawn from the original samples with replacement, whereupon the Pearson correlation coefficient was calculated. This process was repeated 1000 times to generate the empirical distribution function, which was then used to estimate the confidence intervals. The aforementioned bootstrapping approach was performed using the boot (v1.3) package. For inferential purposes, p-values were calculated by means of a permutation approach. In short, per GDEM, 1000 sets of consecutive CpGs equal to the length of the observed DMR were sampled and correlated with the gene expression signal as described above, after which the proportion of correlation coefficients higher than the observed correlation coefficient was calculated yielding the p-value. Functional Enrichment, Cell Type Enrichment and Protein-Protein Interaction Network Analyses Gene set enrichment analyses were performed using fgsea (v1.8.0) using the Metabase pathways terms as reference. Metabase pathway terms with an FDR-adjusted p-value less than 0.05 were considered significant. Cell proportions were imputed using xCell (v1.1.0), where transcripts per million were used to estimate the proportion of each of the 64 immune and stromal cell types. Subsequent linear regressions were performed to calculate the p-values to assess statistical significance. Again, we compared SJC66 high with SJC66 low also correcting for age, sex and DMARD usage. Protein-protein interaction (PPI) network analyses were performed using the STRING (v11) database, in order to identify whether a set of genes was over-represented for interactions. In short, the PPI analysis returned networks of genes where the encoded proteins interacted, co-expressed or coevolved with one another, based on text mining, curated databases, and experimental data. Data Availability The datasets generated and analyzed for this study can be found in the ArrayExpress repository under accession number E-MTAB-6638 and E-MTAB-6684 for WGBS and RNA-Seq, respectively. All code is hosted on GitHub at https://github. com/enricoferrero/BTCURE. Swollen Joint Count Is Associated With the Latent Factor Explaining Variance in Gene Expression and DNA Methylation We profiled the DNA methylome and transcriptome of 17 synovial tissue samples (Table S1) using whole genome bisulphite sequencing (WGBS) and RNA-sequencing (RNA-Seq). We initially attempted to link gene expression changes to disease duration as well as to cross-patient variations in the Disease Activity Score of 28 joints (DAS28) variables, but these analyses resulted in weak and non-biologically relevant signals, and were ultimately deemed inconclusive for this set of samples (data not shown). Principal Component Analysis (PCA) indicated that variation in both DNA methylation and gene expression were independently correlated with the swollen joint count in 66 joints (SJC66; Figures 1A, B). Comparison of the first principal component of both the DNA methylation and gene expression data suggested agreement with samples 33, 19, 3, 12A and 25 broadly separating from the other samples for both modalities ( Figure 1C). While sample 33 appeared to be an outlier based on the DNA methylation data the removal thereof did not alter the correlation substantially ( Figure S1). To further explore DNA methylation and gene expression at a genome-wide level in an integrative fashion, we performed variance decomposition using multi-omics factor analysis (MOFA). MOFA infers a set of latent factors that capture sources of variability across different measured -omic modalities of the same samples. We found that most variance (approx. 70%) was better explained by gene expression as compared to DNA methylation ( Figure 1D). Further decomposition of the variance identified 8 latent factors, with LF1 explaining 40% of the variance in gene expression, whereas variance in methylation was more evenly distributed amongst all eight latent factors. Focusing on LF1, we observed a marked separation between samples with SJC66 of 9 and above compared to those with SJC66 of 8 or less ( Figures 1E and S2A, B). While most samples were obtained from the knee, two were obtained from the ankle. Interrogation of the first latent factor did not indicate any correlation with the source of the sample ( Figure S2C). Importantly, sex was also strongly associated with LF1 (p-value = 8.8E-03; Figure S2D) where samples with SJC66 of 9 and above were mostly males and most samples with SJC66 equal to or fewer than 8 were mostly females. Therefore, subsequent comparative analyses accounted for the imbalance in sex by removal of allosome-associated genes as well as including sex as covariate in linear models. females. Therefore, subsequent comparative analyses accounted for the imbalance in sex by removal of allosome-associated genes as well as including sex as covariate in linear models. Differences in DNA Methylation and Gene Expression in Patients With High and Lower Numbers of Swollen Joints Are Associated With Immune Response and Cell Adhesion Pathways Having observed genome-wide differences through exploratory analyses in both DNA methylation and gene expression data that associated with SJC66, we next investigated which regions and genes were differentially methylated and expressed. At this point we investigated DNA methylation and gene expression separately. While samples with SJC66 = 0 were included for the aforementioned exploratory analyses, they were excluded from subsequent comparative analyses as they were medically not a homogeneous group consisting of very early arthritis as well as late arthritis with no swelling following medication. The swollen samples were stratified according to the separation observed in the exploratory analyses ( Figure 1E) and we compared SJC66 high (SJC66 ≥ 9) with SJC66 low (SJC66 < 8) correcting for age, sex and DMARD usage. Comparative methylation analysis identified 3,536 DMRs ( Figure 2A and Table S2), where 2,140 were hypomethylated and 1,396 were hypermethylated. The majority of DMRs were located within 1 Kb of a TSS or in distal regions ( Figure 2B). Notably, the most statistically significant DMRs spanned regions larger than 10 Kb (Table S2). Figure 2D) and exceeded 15 Kb in length. While the MIRLET7BHGassociated DMR was hypermethylated, the MIR10B-associated DMR was hypomethylated when comparing SJC66 high with SJC66 low. Functional analysis of the DMRs identified several pathways with evidence of differential methylation. Among the top 10, we observed that the NGF/TrkA MAPK pathway, immune response pathways including antigen presentation by MHC class II, macrophage and dendritic cell phenotype shifts, as well as CCR3 signalling in eosinophils, were hypermethylated ( Figure 2E and Table S3). Comparing SJC66 high with SJC66 low at the gene expression level identified 142 DGEs, of which 106 up-regulated and 36 down-regulated ( Figure 3A and Table S4). The top 2 DGEs, chemokine ligand 13 (CCL13) ( Figure 3B) and C-Type Lectin Domain Containing 10A (CLEC10A) ( Figure 3C), were both found to be more highly expressed in the SJC66 high samples. Functional analyses revealed a striking similarity with the differential methylation results, where genes associated with antigen presentation by MHC class II as well as the NGF/TrkA MAPK pathways were overexpressed ( Figure 3D and Table S5). Transcripts Associated With Differentially Methylated and Expressed Genes Display Concordant Expression and Identify Nodal Points for Key Interactions Having observed pathway-concordant differences in DNA methylation and gene expression, we were interested in identifying specific genes that were both differentially methylated and expressed. Although DNA methylation has traditionally been associated with gene-level expression, emerging evidence shows that it also regulates alternative splicing. Therefore, we complemented our Differential Gene Expression (DGE) analysis, which masks transcript-level dynamics, with Gene Differential Expression (GDE) analysis, which identifies genes that display transcript-level differences, by combining the p-values of individual transcripts associated with a single gene. We identified 290 genes that displayed perturbations in the expression of their transcripts (GDEs ; Table S6). Combining the GDEs with the DMRs yielded 97 unique DMRs associated with 59 unique genes, which we termed Genes Differentially Expressed and Methylated (GDEMs) (Figures 4A, B). We observed that most transcripts, those A B D C FIGURE 3 | SJC66 high versus SJC66 low differential expression analysis. (A) Volcano plots depicting the -log 10 (p-value) on the y-axis relative to the difference in log 2 fold change when comparing the SJC66 high with SJC66 low samples on the x-axis. Colours represent the non-significant genes (orange), the significant genes (blue) and the significant genes with a log 2 fold change of larger than 1 (green; "sig. Figure 4B). Investigation of the methylation status of the gene overlaid onto the transcript location indicated that the observed DMRs for KIAA1191, KLC1, CDC16, FASTK and GPR132 were located in the promoter shared by most transcripts ( Figure 4C). The mechanisms that could cause opposite direction of expression in these genes are currently unclear although studies in Arabidopsis have identified a methylation reader complex that can enhance rather than suppress gene transcription in the presence of methylation. Our work therefore supports the rationale for further validation and mechanistic studies. Functionally, we observed that the 59 GDEMs were primarily over-represented for immune response-associated pathways, specifically T-lymphocyte associated pathways ( Figure 5A). We interrogated which of the GDEMs encoded known interaction partners by querying the STRING database for documented interactions. Almost half (46%) of the GDEMs encoded for interacting proteins with the most connected GDEMs being ITGB2 (11 interactions) and LCP2 (9 interactions) ( Figure 5C). Both ITGB2 and LCP2 were differentially methylated at multiple locations, with the largest visible differences occurring within the TSS and downstream thereof ( Figures 5D, E). Transcript-wise, ITGB2-transcripts ENST00000498666 and ENST00000397852 as well as LCP2 transcript ENST00000046794 were most differentially expressed. Expression Quantitative Trait Methylation (eQTM) analyses confirmed strong inverse correlations between the differences in methylation in the promoter regions of ITGB2 (21:44918461-44921815) and LCP2 (5:170295513-170298924) with transcripts ENST00000498666 (r = -0.9; p-value = 1E-04) and ENST00000046794 (r = -0.9; p-value = 1E-04), respectively ( Table 1). While the association between the ITGB2 promoter DMR and ENST00000397852 expression was non-significant (p-value = 0.2353), the correlation coefficient remained high (r = -0.87) ( Table S7). Nonetheless, given the centrality of ITGB2 and LCP2 among the GDEMs would make them interesting candidates in future targeted studies. Estimated Cellular Composition Suggests Lower Proportion of Neuronal Cells in SJC66 high Systematic differences in DNA methylation and gene expression could reflect changes in the cellular composition. To this end, we estimated the cellular composition using the transcriptome data as input for xCell, which is capable of estimating enrichment scores of 64 immune and stromal cell types. By comparing the estimated proportions from SJC66 high with SJC66 low we identified significant differences for 6 cell types: neurons, dendritic cells (DCs: all, conventional and immature), megakaryocytes and platelets ( Figure 6 and Table S8). Expectedly, higher enrichment scores for DCs (all subtypes) and platelets were estimated for the SJC66 high samples. By contrast, lower proportions of neuron and megakaryocyte signatures were observed for the SJC66 high samples. Notably, the difference in neuronal enrichment was found to be the most statistically significant with an almost fourfold difference when comparing SJC66 high with SJC66 low. DISCUSSION In this study, we highlight insights into RA progression by combining the outputs of parallel whole-genome DNA methylome and transcriptome analyses on extracted preparations of synovial biopsies, from auto-antibody positive individuals, early arthritis patients and patients with established RA, stratified by the number of swollen joints. We observed that synovia from patients with a higher number of swollen joints (SJC66 ≥ 9) were different at the level of DNA methylation and gene expression from synovia from patients with a lower number of swollen joints. Specifically comparing SJC66 high with SJC66 low revealed 3536 DMRs and 142 DGEs, with both datasets primarily enriched for pathways associated with immune responses. The most significant difference in methylation was found spanning the promoter regions of MIRLET7B and MIR10B. Interestingly, mouse miR-let-7b has been shown to provoke arthritic joint inflammation by remodeling nave myeloid cells into M1 macrophages via TLR-7 ligation and can augment disease severity. MIR10B has been shown to regulate Th17 cells in patients with ankylosing spondylitis but no studies have specifically associated it with RA. At the level of transcription, CCL13 and CLEC10A were found to be the most differentially expressed. CCL13 (MCP-4) is an extensively studied chemokine that is thought to be involved with RA pathogenesis and disease progression. By contrast, not much is known about the role of CLEC10A in RA besides it being highly expressed on immature dendritic cells (DCs), monocyte-derived DCs and alternatively activated macrophages, as well as having been observed in the inflamed synovium of patients with active RA. Chemokines CCL13, CCL8, CXCL11, CXCL10, and CXCL9 regulate the recruitment of leukocytes into tissue and have therefore been implicated in the pathogenesis of RA. Differential methylation was observed in the vicinity of the promoter for CCL13, CXCL11, and CXCL9. Such results support a role for epigenetic/transcriptional processes in the spread of pathology to additional joints. While definitive mechanisms of joint spreading remain elusive, possible roles for immune cell migration due to chemokine expression could be further evaluated based on our data. Altogether, we observed that 3% of DMRs associate with 20% of the differentially expressed GDEs. It is not surprising that not all DMRs could be linked to genes as a large number are found in distal intergenic (18.6%) or intronic (28.3%) regions, making any functional inference challenging. Of the genes that presented Expression quantitative trait methylation (eQTM) analysis of the GDEMs representing the correlation between methylation and transcript expression. Key in table legend: Gene = HGNC gene symbol, DMR = co-ordinates of the DMR (GRCh38), DMR p-value = p-value associated to the differential methylation analysis, EnsT = Ensembl transcript ID, DTE p-value = p-value associated to the differential transcript expression analysis, eQTM r = Pearson correlation coefficient for the methylation-expression correlation and the 95% confidence intervals, eQTM pvalue = p-value associated to the methylation-expression correlation. An extended parsable table including the full eQTM analysis for all GDEMs can be found in Table S6. both differentially expression and methylation, protein-protein interaction networks indicated that half encoded for interacting proteins, suggesting that the observed GDEMs function together. The most interconnected GDEMs appeared to be ITGB2 and LCP2, with multiple regions of differential methylation observed surrounding both genes. While we observed transcript differential expression, most transcripts belonging to ITGB2 and LCP2 behaved similarly and all displayed reasonably strong inverse correlations with the DMRs located in the promoter area. ITGB2 encodes an integrin, which would typically be involved in cell-surface mediated signalling. We observed that the gene encoding ITGB2's interaction partner integrin alpha L (ITGAL) was also differentially methylated and expressed. ITGB2 and ITGAL together form lymphocyte function-associated antigen (LFA-1), which interacts with intracellular adhesion molecule 1 (ICAM-1 or CD54) resulting in an enhanced immune cell influx into the synovial tissue. Inhibiting LFA-1 has been reported to reduce inflammation and joint destruction in murine models of arthritis. Functionally, the 59 GDEMs were primarily over-represented for immune response-associated pathways, specifically Tlymphocyte associated ones. It would be fascinating to understand why the DNA methylome and transcriptome between patients with a SJC66 of 9 and above relative to patients with a SJC66 of 8 present with such a sudden split instead of a gradual difference. It is clear that inflammation is likely an important factor contributing to the observed differences as patients with a high SJC66 also generally present higher levels of inflammation as expressed through using erythrocyte sedimentation rate (ESR) or concentration C-reactive protein (CRP). Our transcriptomic data indeed suggested an increased proportion of immune cells among SJC66 high samples, as would be expected while pathology develops and cells migrate into the affected joints. Previous work has shown that clinically manifest arthritis in established RA is associated with increased infiltration of leukocytes. In synovial tissue samples from clinically involved joints, scores for infiltration by DCs are consistently higher than in clinically uninvolved joints obtained simultaneously from the same RA patients. Importantly, when comparing different clinically inflamed joints from the same RA patient simultaneously, leukocyte infiltration in one inflamed joint was shown to be representative of that in other inflamed joints, supporting the notion that leukocytes migrate from one joint to another. Indeed, there is continuous influx of leukocytes into the joints in established RA. We postulate that if synovial leukocytes exhibit properties that would facilitate cell migration, arthritis might spread from one inflamed joint to another. The results presented here support a disease mechanism in which, after development of clinically established RA, inflammatory and cell adhesion-associated processes play a key role in the progression of RA to greater joint involvement. Interestingly, we also observed differences of neuronal signatures suggesting a lower relative enrichment of neurons among SJC66 high samples. In addition, enrichment analyses on the DMRs suggested hypermethylation of genes encoding nociception receptors, which are typically associated with peripheral sensory neurons. A similar decrease in neuronal signature has previously been associated with RA severity, where the authors noted a potential role in the maladaptive response towards damage. This is consistent with a more general loss of anti-inflammatory control by the nervous system in RA. Important to note is that our observations are based on estimates made by xCell, which can only calculate enrichment scores based on signatures rather than absolute values of cells. We are therefore unable to discern whether a population increased in size or a different population had decreased. Ideally, a similar estimate would have been generated based on the DNA methylation data, but the currently available reference datasets do not include the cell types measured available in xCell. There are two limitations of this study, namely the fact that sex confounds the separation between SJC66 high and SJC66 low and the limited sample size. While we have sought to mitigate the confounding effect of sex by removing genes and CpGs located on the allosomes as well as by including sex as a covariate in our analyses, we acknowledge that we cannot fully eliminate the possibility that a sex effect is present. Accordingly, validation studies would be necessary where the DMEGs are verified using a larger, independent cohort while controlling for an interaction effect between sex and SJC66. The observed differences in transcript expression could be validated using a quantitative PCR approach with primers designed specifically against particular transcripts. Similarly, for validating the DMRs, targeted bisulphite-sequencing using primers for the regions of interest would be a cost-effective approach. In conclusion, our study constitutes an exploratory analysis of whole genome DNA methylation and gene expression data performed on primary synovial tissue material from autoantibody positive arthralgia patients without arthritis as well as patients with early and established RA patients. Where previous studies investigated cells from patients with RA versus disease controls and were potentially limited by their use of cultured cells, we focus on an integrative analysis of epigenetic marks and alternative splicing associated with swelling spread, providing novel insights into the mechanisms of disease progression towards more severe phenotypes. Nonetheless, further validation is necessary if the identified target genes are to be used for monitoring or treatment of the swelling and associated inflammatory processes in the joints of RA patients. DATA AVAILABILITY STATEMENT The datasets presented in this study can be found in online repositories. The names of the repository/repositories and accession number(s) can be found below: https://www.ebi.ac. uk/arrayexpress/, E-MTAB-6638; https://www.ebi.ac.uk/ arrayexpress/, E-MTAB-6684. ETHICS STATEMENT The studies involving human participants were reviewed and approved by AMC medisch ethisch toetsingscommissie under MEC 02/152, MEC 05/107, and MEC 07/253 for the Synoviomics, Pre-synoviomics, and Synoviomics II cohorts, respectively. Written informed consent was obtained from the individual(s) for the publication of any potentially identifiable images or data included in this article. AUTHOR CONTRIBUTIONS EF, HL, DT, CL, MS, DG, PT, and RP conceived the study. KM and GR carried out the laboratory experimental work. EF and CL conceived the analytical design. EF and AL performed the data analysis. HL, DT, CL, MM, PH, and WJ helped supervise the secondary data analyses. EF, AL, KM, HL, and DT led the writing of the manuscript. All authors contributed to the article and approved the submitted version. ACKNOWLEDGMENTS We would like to thank the BTCure research consortium for providing access to the samples, Victor Neduva (Target and Pathway Validation, Target Sciences, GSK) for transferring the raw data files, Erika Cule (Research Statistics, GSK) for input into statistical methodology, and Yee Ying Chang (Clinical Genetics, Genome Diagnostics Laboratory, Amsterdam UMC, University of Amsterdam) for implementing the eQTM function in R. This manuscript has been released as a Pre-Print at https://papers. ssrn.com/sol3/papers.cfm?abstract_id=3576744. |
Washington (CNN) The 320 U.S. troops at Al Asad air base in Iraq are now coming under "regular" mortar and rocket fire from nearby ISIS fighters, according to Pentagon spokesman Col. Steven Warren. While the attacks are "completely ineffective," it is raising continuing concern that U.S. forces in Iraq can be kept safe and at least technically out of a combat role, a separate defense official said. The Pentagon would not say whether security measures had changed at the base.
An American soldier stands guard at the Taji base complex which hosts Iraqi and US troops and is located thirty kilometres north of the capital Baghdad on December 29, 2014. T
No U.S. troops have been injured, but it's not clear if any of the hundreds of Iraqi forces at the base have suffered casualties.
Most of the rounds are impacting near the perimeter of the sprawling base, according to the official. The troops at Al Asad are mainly Marines, and are part of the "advise and assist" effort. Since December 20, they have been helping Iraqi units there with learning how to conduct air support, mission planning and intelligence gathering.
In another effort at the air base at Taji, about 170 troops from the 1st Infantry Division on December 27 began training four battalions of Iraqi forces in a new six-week program being run by the U.S. military.
All of the training being developed by the U.S. is in part aimed at getting Iraqi forces ready to be able to fight to retake Mosul from ISIS. While some initial military movements near Mosul by the Iraqis have taken place, it could still be weeks before a full military campaign begins, the official said. |
You’ve read about all the jostling behind the scenes in Washington’s quest to reform health-care: Big Pharma cutting a $80 billion backdoor deal with the White House, health insurers fighting tooth and nail against a public option, all affected parties and industries positioning themselves to reap the benefits of an overhaul of our $2.5 trillion health-care system. But there’s another industry, one you’ve likely heard less about in the debate, that also stands to win or lose from reform: medical device makers.
The companies bringing you artificial hips, stents, defibrillators, and much more, medical device makers have not cut a deal with the White House or Democratic lawmakers, and face new taxes costing $20 billion or more if the legislation now circulating in Congress becomes law. But as writer Peter Stone points out in his story “Take Two Kickbacks” in Mother Jones‘ November/December issue, a lot more than tougher taxes is in order to reform the fraud-ridden, flawed medical device industry.
Stone’s story highlights the prevalence of doctors receiving lucrative kickbacks in exchange for using and promoting a company’s medical products. This kind of illegal plying is so widespread, Stone reports, that between June 2006 and July 2009, device makers paid $535 million to the federal government for illegal marketing activities. One example: In 2006, Stone writes, device maker Medtronic “agreed to pay the feds $40 million to settle allegations that from 1998 through 2003 it had set up sham consulting and royalty agreements, trips to strip clubs in Tennessee, and other incentives to entice surgeons to use its spinal products.” Though the consequences of these kinds of deals can be fatal, they’re hardly novel in an industry plagued by graft and fraud.
If Stone’s story shows us anything, it’s that, like health insurers and drug makers, the medical device industry is long, long overdue for reform, too. |
Inception Network for Baybyin Handwriting Recognition When the Philippine government legislated that the Baybyin should become the Philippines ' national writing system, educational institutions and other cultural organizations started planning to incorporate this writing method into the existing educational curriculum. One way back to implement this writing method is by using a smartphone app. As of October 2018, an eLearning app named Learn Baybyin has been in the Android Playstore. This eLearning app will teach anyone how to read and write the script for Baybyin. This mobile app is powered by a convolutional neural network (CNN) and was trained with 94% accuracy on the validation data. This study aims to replace the existing CNN model with an inception network because this type of neural network can successfully classify patterns or details regardless of its size in an image. After testing an inception model five times, the average validation accuracy was 96.2% which is higher than the current CNN model. The inception network got a higher validation accuracy than the previous LSTM (92.9%) and CNN (94%) model. Because of this, the inception network will replace the current model used by the eLearning app Learn Baybyin to recognize handwritten characters. |
A tomographic algorithm to determine tip-tilt information from laser guide stars Laser Guide Stars (LGS) have greatly increased the sky-coverage of Adaptive Optics (AO) systems. Due to the up-link turbulence experienced by LGSs, a Natural Guide Star (NGS) is still required, preventing full sky-coverage. We present a method of obtaining partial tip-tilt information from LGSs alone in multi-LGS tomographic LGS AO systems. The method of LGS up-link tip-tilt determination is derived using a geometric approach, then an alteration to the Learn and Apply algorithm for tomographic AO is made to accommodate up-link tip-tilt. Simulation results are presented, verifying that the technique shows good performance in correcting high altitude tip-tilt, but not that from low altitudes. We suggest that the method is combined with multiple far off-axis tip-tilt NGSs to provide gains in performance and sky-coverage over current tomographic AO systems. INTRODUCTION The use of Laser Guide Stars (LGSs) in Adaptive Optics (AO) has greatly increased the area of the sky available for correction, from ≈ 10% up to ≈ 85% (?). In turn this has led to a vast increase in the number of astronomical science targets which can be observed using AO. The laser experiences turbulence whilst traveling upwards to form an artificial guide star, so its position will move in the sky. It is thought that this effect renders all 'tip-tilt' information gained from LGS Wave-Front Sensors (WFS) useless, as it is a function of LGS 'up-link' movement and the desired 'down-link' tip-tilt, which have previously been considered to be entangled irretrievably. It has even been suggested that the tip-tilt the laser acquires on the up-link is the reciprocal of the global tip-tilt on the down-link path, hence little tip-tilt will be observed on the WFS at all (?). To correctly obtain the science path tip-tilt, a Natural Guide Star (NGS) must still be used (?). As a tip-tilt WFS requires relatively few photons and the anisoplanatic patch size is large for tip-tilt modes, the requirements on a NGS are much lessened (?). Nonetheless, requiring a tip-tilt NGS still limits the sky-coverage of an LGS AO system. Tomographic LGS configurations, such as Laser Tomographic AO (LTAO)(?), Multi-Object AO (MOAO) (??) and Multi-Conjugate AO (MCAO) (??), are coming online. E-mail: a.p.reeves@durham.ac.uk (APR); These AO configurations use information from a number of LGSs, off-axis from the science target, to estimate the science path turbulence. Such systems overcome the so called cone-effect where the LGS samples a cone of turbulence in the science path rather than the full cylinder of turbulence seen by light from the science target (?). They can also achieve a large corrected field of view in the case of MCAO, or a large 'field of regard' in the case of MOAO. Tomographic LGS systems still require a NGS to estimate tip-tilt modes, limiting their potential sky coverage. Suitable NGS are notoriously absent from much of the sky around the galactic poles, where many scientifically interesting targets exist (?). Methods to obtain all correction information from LGSs alone have been proposed. Some have not yet been implemented due to the requirement of complex laser schemes and/or auxiliary beam viewing telescopes (??????). ? discusses the use of the temporal delay between the launch time of a laser pulse and the time it is received by the telescope to estimate up-link tip-tilt. This technique requires the use of the full telescope aperture for laser launch, which implies some problems with scattered light and fluorescence. More recently, ? has proposed an LGS assisted lucky imaging system, which could provide full sky AO coverage but entails discarding some science flux and would not be suitable for spectroscopy. ? explored the potential usage of LGS AO with no tip-tilt signal, allowing 100% sky-coverage. It was found that for some applications, a dedicated NGS tip-tilt star was not required and a telescopes fast guiding system was adequate. In this paper we propose a method to retrieve partial tip-tilt information from a number of LGSs in existing or currently proposed tomographic AO systems from only the systems wavefront sensor measurements. This is similar to that proposed by ?, though we do not required the knowledge of the exact sky position of one of the LGS. We aim to improve AO performance across the whole sky over AO performed with no tip-tilt NGS, potentially relaxing the requirement for, or for some applications eliminating the need for, a NGS. If the tip-tilt modes measured across the full-aperture are the same as that across the beam launch aperture, it is clear that the tip-tilt signal would indeed be irretrievable from LGS WFSs, as they would be reciprocal and little atmospheric tip-tilt would be observed from a LGS WFS at all. In § 2, we show that the tip-tilt modes across the beam launch telescope are uncorrelated with those over the whole aperture, opening the possibility of LGS up-link tip-tilt determination. The algorithm for up-link tip-tilt determination is derived in § 3 and an adaptation to the LA algorithm proposed by ? for MOAO is suggested as a practical method for its use. Results from simulation verifying the technique are presented in § 5. Finally, in § 6 we discuss the practical uses of an LGS up-link retrieving AO system, and how it may provide increases in LTAO sky-coverage and performance by combination with ground layer tip-tilt correction. CORRELATION OF TIP-TILT BETWEEN TELESCOPE AND BEAM-LAUNCH APERTURES If the global tip-tilt across the telescope aperture is identical to that over the beam launch telescope, any tip-tilt encountered by the LGS up-link path will have an equal but opposite effect on the return path (though with a slight change due to temporal delay (?)). Consequently little tip-tilt will be observed on the LGS WFS and the tip-tilt component of the science path can not be determined by that WFS. This is referred to as tip-tilt 'reciprocity' and is the case if the laser is launched from the full aperture of the telescope. All current facility LGS AO systems use a separate Laser beam Launch Telescope (LLT). On these telescopes and those planned for the future, DLLT << D, where DLLT denotes the diameter of the LLT and D is the size of the telescope aperture. Determination of LGS up-link tip-tilt can only be possible if the up-link and down-link tip-tilt components are uncorrelated or it will not be fully observed by the WFS. The covariance between two concentric Zernike modes of different radii in Kolmogorov turbulence is shown in equation (?), where represents the fractional size relationship between the two apertures, n and p are the radial orders of the two Zernike polynomials, R is the radius of the telescope, Jn+1 and Jp+1 are Bessel functions of the first kind, k is the wave number of the light and r0 is the atmospheric Fried parameter (?). The covariance between concentric tip-tilt modes of different radii are plotted in Figure 1 (where n, p = 1). A plot of the correlation of tip-tilt modes in ten thousand simulated random Kolmogorov phase screens is also plotted. It is evident that the correlation of tip-tilt modes between small and large apertures in the regime where DLLT /D < 0.1 is less than 0.1. This is true for both the theoretical expression and the simulated phase screens, which match closely in such a regime. This result means that tip-tilt modes will not be reciprocal and will be visible on an LGS WFS. Observed tip-tilt will be some function of the turbulence encountered by the laser as it propagates up to form an artificial guide star and the global tip-tilt across the telescope aperture as it propagates back. It should be noted that the tip-tilt observed by the laser will be larger than that seen by the full aperture due to greater spatial averaging on larger scales. Despite being uncorrelated, it is possible that by chance there is a similar component of tip-tilt across both the launch and telescope apertures, in which case it will not be observed on a LGS WFS. If multiple LGSs are used, then the chance of the same component of tip-tilt being present across all launch paths is significantly reduced. Retrieving down-link turbulence induced slopes As demonstrated in the previous section, the measurement from a LGS WFS is a function of the atmospheric turbulence the laser propagates through on the way up to form a guide star and the turbulence the return light propagates through as it travels back down to the telescope. For AO correction of an astronomical science target the two components must be separated and only the latter is required. If using a Shack-Hartmann or Pyramid WFS, WFS measurements will be in the form of slopes representing the gradients of the measured phase within any given sub-aperture. The use of such a gradient measuring WFS is assumed in the following derivations. We can express slopes measured by an LGS WFS as the sum of the laser up-link induced slopes and the down-link turbulence induced slopes, wheres is a vector representing the slopes measured on a WFS,s l is a vector representing the slopes caused by LGS up-link turbulence andst is a vector representing the slopes caused by down-link turbulence. For AO correction of a natural astronomical science target we must obtainst. Note that LGS up-link turbulence results exclusively in tiptilt modes being observed on the WFS and no higher order modes, sos l will be homogeneous in the x and y directions. For an AO system with a single LGS and no external reference, determiningst is not possible as there is not enough information to determines l. In a tomographic system, there is more information about the turbulence sampled by the LGSs on the up-link, andst can be computed. For the remainder of this section we consider a trivial 2-dimensional, tomographic, two LGS AO geometry, where both LGSs are centre-launched. The following approach can be scaled to many centre launched LGSs, though the mathematics quickly becomes cumbersome with more than three. The LGSs are labeled LGS and LGS and the observing WFSs as WFS and WFS. Slopes measured on WFSs are denoted ass ands respectively. This geometry is illustrated in Figure 2. WFS observes the area of turbulence which causes the up-link tilt on WFS, hence we postulate that there is a transformT which relates the down-link turbulence induced slopes,s t, to up-link induced tip-tilt measured on WFS,s l, We initially consider the simple situation illustrated in Figure 2, where a single turbulent layer at a height h, which features only a tilt in the section where LGS overlaps with the field of view (FOV) of sub-aperture n on the WFS observing LGS. In this case it is clear that such a transform, T, exists and can be trivially computed as WFS is unaffected by up-link turbulence sos l = 0, hences l =T s. In general howevers t will not be known, as LGS will also experience up-link tip-tilt. For this general case, and T s =s l +T sl. We now have 2 equations in and, to solve for 2 unknowns,s l ands l. Re-arranging equation, Actual position of LGS due to uplink turbulence Turbulence layer at height, h Figure 2. The geometry of the LGS system under consideration. One turbulent layer is shown, which features only a tilt at the point that LGS overlaps with the field of view of sub-aperture n on WFS. and substituting into equation, Finally, re-arranging fors l, and similarly fors l s ands are the WFS measurements and theT transforms can be obtained by considering the geometry of the system i.e., where sub-apertures from a WFS observe the up-link path of the other laser(s). It is now possible to calculate the turbulence induced slopes, asst =s −s l. These are the slopes which would have been measured from a guide star with no up-link tip-tilt effects, and can be used to perform the AO reconstruction without the requirement of an NGS for tip-tilt measurement. The above analysis can be performed for more complex LGS AO systems with many LGSs in other geometries. In general there will be more than one discrete turbulent layer in the atmosphere, hence the measurement of a particular element ins t which overlaps with LGS will not just represent the turbulence at height h, but will be the sum of measurements from all turbulent layers. This represents some noise in the measurement ofs l. The noise is mitigated by increasing the number of LGSs, such that other layers from non-overlapping heights average to zero, leaving only the common measurement of the slope at the point LGS overlaps with the layer at altitude h. For the centre launched case, the slopes due to downlink turbulence, st, cannot be determined for a turbulent layer at the ground. For a layer at this height,st =s t, s l =s l,s =s, and there is no-longer more than one independent equation from which to determines l ands l. Further to this issue, if the lasers are centre launched, then the beam paths are likely to be obscured by the "shadow" of the secondary obscuration, the turbulence that the laser passes near the ground layer is not measured. An AO system that launches LGSs from different points within the telescope aperture could potentially overcome these limitations. In this cases l =s l at the ground layer, so both can be determined. Depending on the laser launch scheme, it is also possible that the low layer turbulence in the beam path could be observed. Such as scheme does however entail other difficulties, such as scattering from launching the laser directly off the primary mirror. A system with LGSs launched from outside the telescope aperture (side launched) is unlikely to be suitable for this method of LGS up-link tip-tilt correction as a LGS's launch path is not observed by other LGS WFSs. It is possible that outer WFS sub-apertures could be used as they may correlate strongly with the launch path turbulence, though this is outside the scope of this work. Obtaining LGS up-link transforms The LGS up-link matrices describing the response of LGS motion to WFS measurements are defined in equation, they relate down-link turbulence measurements of a WFS to the predicted up-link path of another LGS. They can be calculated by considering the geometry illustrated in Figure 2 and the effect of a layer at a height h with a small region of turbulence where the FOV of sub-aperture n overlaps with LGS beam, at height H. For a given sub-aperture, s l is the slope measured due to up-link tip-tilt on WFS. It is related to the slope measured on a corresponding sub-aperture on WFS which views LGS at height h, s t. It is shown in Appendix A that The system has only a finite vertical resolution (defined by the number of sub-apertures and LGS asterism separation) to correct for turbulent layers. Sub-aperture n will observe the integrated turbulence between where the LGS enters its FOV and where is exits its FOV. Different groups of sub-apertures on a WFS will correspond to different turbulent layer heights for which they can predict up-link tip-tilt on other LGS WFSs. Recalling that s l is homogeneous in where hn denotes the centre of the vertical height 'bin' resolvable by the sub-aperture n. By considering the system geometry, including the launch angle of LGS and, and respectively, we show in Appendix B that hn can be expressed as Again, this relationship can be extended for any number of LGS WFSs, where WFS observes the path of LGS. The final step in creating an LGS up-link transform is to tailor the matrix to the required atmospheric turbulence vertical profile. Each column in the matrix shown in equation represents a vertical height bin resolvable by the tomographic LGS AO system. If a turbulence profile is known, then columns that represent heights where there is negligible turbulence can be set to zero. This step will reduce the noise contributed by 'false layers' which could otherwise be detected, where random perturbations from real turbulent layers or noise could seem like turbulence at a height where no turbulence is present. Such a profile can be obtained from either the tomographic AO system itself or an external profiling instrument. A LEARN AND APPLY APPROACH The geometric approach described in the previous sections to estimate and recover LGS tip-tilt modes is clearly highly idealised. It requires knowledge of the turbulence C 2 n vertical profile and that the calibration of the LGS WFSs and pointing of the LGSs is perfect. It would also not take into account our understanding of atmospheric turbulence statistics to improve correction. As correlation between adjacent sub-apertures can be significant (?), information from subapertures around those which view the up-link path of another LGS can be used to improve estimation of its up-link tip-tilt. This also allows an estimate to be made of ground layer tip-tilt, even for centre launched LGS AO systems, from sub-apertures surrounding the central obscuration. Learn and Apply (LA) is a method used in tomographic AO systems, such as MOAO and LTAO, for open-loop tomographic reconstruction which accounts for atmospheric turbulence statistics and the calibration of an AO system (?). Instead of using a purely geometrical approach for LGS up-link tip-tilt determination, LA can be modified to implicitly account for up-link tip-tilt. LA is briefly described below. If there is a linear relationship between off-axis WFS measurements,s off, and WFS measurements on-axis to the direction of a science target,son, one can writ where is the tomographic reconstructor. If can be obtained, it can be used to calculate pseudo WFS measurements in the direction of a potential science target, which can then be used to calculate DM commands to provide correction in that direction. To estimate it, a large number of uncorrected WFS measurements of both on and off-axis slopes may be taken. In this case, the set of on-axis measurements are denoted asMon and the set of off-axis measurements asM off. Vidal et al. show that a tomographic reconstructor for these specific measurements,, can be expressed a If the number of measurements taken to formMon and M off approaches infinity, the tomographic reconstructor approaches, a general reconstructor which can reconstruct any set of slopes for the given guide star geometry and atmospheric turbulence profile. In this limit, the expression MonM t off andM offM t off approach OnOff and OffOff, the covariance matrices between on-axis and off-axis slopes, and off-axis and off-axis slopes respectively. The generalised tomographic reconstructor may now be expressed a If the profile of the atmosphere and system calibration is well known, both covariance matrices can be calculated purely analytically from statistical descriptions of turbulence. As this situation is not often the case, an alternative is to record some data from the system in open loop to create a 'raw' covariance matrix which contains information regarding the atmospheric profile and AO system calibration. The 'raw' covariance matrix cannot be used alone to create the tomographic reconstructor as it is not generalised and would also contain errors due to noise. It can though be used to act as a reference to fit an analytically generated covariance matrix which is generalised and not prone to noise effects. In this way LA creates a generalised tomographic reconstructor which accounts for AO system calibration and our statistical description of atmospheric turbulence. This process is termed the 'learn' stage of the reconstructor. Once both covariance matrices have been computed, the reconstructor,, can be formed and 'applied' to off-axis slopes to give an estimation of on-axis slopes. The LA algorithm has been tested successfully both in the laboratory and on-sky by the CANARY MOAO demonstrator (?). We propose that the LA algorithm is also applicable for LGS tip-tilt determination, as it was demonstrated in § 3 that the required science direction slopes are a linear function of the off-axis LGS measurements. The advantages of using LA are many fold. LGS tip-tilt determination can account for system alignment and LGS pointing. The mathematics shown in § 3 does not have to be repeated for higher numbers of LGS, which quickly becomes cumbersome. The turbulence profile does not have to be externally measured to a very high vertical resolution. Finally and perhaps most importantly, the use of covariance matrices implicitly includes information about LGS up-link from sub-apertures near to those identified as geometrically observing a LGS beam. To use LA, it must be altered to account for the fact that the tip-tilt signal from LGS WFSs is no longer removed. The analytical form of slope covariance matrices in this case must be derived. We consider the covariance between two WFS separated slope measurements with the definition given in equation, ss = (st + s l )(s t + s l ) = sts t + sts l + s l s t + s l s l. Of these terms, the first is only a result of down-link turbulence and is the same as the covariance matrix which would be required in conventional Learn and Apply. This term can be calculated in a form similar to that which ? use to obtain the covariance matrices between separated NGS WFS measurements with some modification to account for the cone effect associated with LGSs. The second and third terms describe the relationship between the observed down-link turbulence and the tip and tilt observed by another WFSs due to the patch of turbulence that the lasers pass through on their up-link paths. They can be calculated again by considering the separation of each sub-aperture on the down-link with the launch path for each laser. As they are formed by a large tip or tilt from one WFS correlating with measurements from a single, or small number of, sub-aperture(s) from another WFS, it is expected that they will appear as a matrix of vertical and horizontal stripes. The final term is the covariance between the up-link induced tip-tilt measurements. This value is dependent upon the separation between the two laser paths at an altitude layer and as it is a result of only tip and tilt, it is constant for each pair of WFSs. Assuming a centre launched case, this term will be large for low altitude layers, where the up-link laser paths overlap and small at high layers where the laser paths are separated. As it is constant, this value reduces the contrast of the the covariance matrices and so effectively make them less useful. Hence, it is again expected that this approach will work well at higher layers where this term is small, but less well for low layers where it will dominate. The simulations performed in § 5 use a LA approach to predict LGS up-link tip-tilt. We do not yet attempt to derive the analytical form of the required covariance matrices. We instead rely on the fact that we can simulate a very large number of uncorrelated phase screens to create a large set of 'learn' slopes to compute covariance matrices between offaxis and on-axis slopes. Deriving the analytical covariance matrices could improve on the performance we show and would be essential for use on-sky. Simulation set-up In the following simulations we use the modified, tip-tilt retrieval LA based algorithm to perform LGS AO correction with no NGS tip-tilt WFS and compare these results to the currently used LTAO configuration, where tip-tilt information is removed from LGS slopes, and a low-order NGS used to get tip-tilt information. We show a "best-case" scenario for LTAO, where the tip-tilt NGS is on-axis. To show that the tip-tilt determination is working correctly, we also simulate an LGS tomographic system where LGS tip-tilt information is removed but no NGS is used for tip-tilt correction. In all simulated AO modes the lasers are launched from the centre of the telescope aperture, behind the secondary obscuration. The code used to perform these simulations has been written solely in the Python programming language and incorporates full physical optical propagation of LGS as they pass up through the atmosphere to form a guide star. This is necessary to accurately estimate the LGS up-link path as the Fresnel number for a typical LGS beam is ≈ 1, meaning diffraction cannot be ignored (?). A geometric ray tracing method is used to calculate the wavefront measured on the WFS from turbulence encountered as light passes down through the atmosphere. The PSF formed by the LGS uplink is then convolved with each sub-aperture PSF to give realistic simulation of LGS up-link turbulence. Focal anisoplanatism (cone effect) is included when propagating light down from an LGS. These simulations do not include either read noise or photon shot noise. Simulation parameters are shown in Table 5.1. The code, "Soapy", is available publicly and is free for use 1. The phase screens used in these simulation have been made significantly larger than the size of the simulated telescope aperture. This mitigates the periodic nature of some phase screen generation algorithms and ensures that there is sufficient power in low order modes at the spatial scale of the telescope aperture (?). Simulated covariance matrices The creation of the tomographic LGS tip-tilt reconstructor depends upon the covariance matrices between the various WFSs, the form of which was derived in equation. For the results presented in this paper we simulate the covariance matrices by recording many open loop AO system frames on uncorrelated turbulence phase screens, until the covariance matrices converge to the theoretical case. This process must be repeated for every atmospheric and AO configuration simulated. An example simulated covariance matrix between two LGS WFSs with up-link included, with a single turbulence layer at an altitude of 14km, is shown in Figure 3. The covariance matrix has been deconstructed into its constituent terms by simultaneous simulation of LGS with and without the effects of LGS up-link tip-tilt observing in the same direction. The final covariance matrix used to create the tip-tilt LGS tomographic reconstructor is the sum of these terms. From Figure 3, it is possible to see that the terms match our qualitative predictions. The first term, sts t, (topleft) is identical to that caused by down-link turbulence and hence looks similar to those used for conventional LA LTAO, though with down-link tip-tilt included. The final term, s l s l, (top-right) is the result of covariance between the path of the two lasers and is hence constant for each x-y pair. The middle terms, sts l and s l s t, (bottom left and right) are dominated by the strong covariance between up-link tip-tilt on one WFS and a small number of subapertures on the other WFS which observe its up-link path. Hence they are seen as horizontal and vertical "stripes" of high slope covariance. Performance versus turbulence altitude In § 3 and § 4 it was predicted that the method of including LGS up-link tip-tilt would be more effective for turbulence at higher altitudes. To further investigate this hypothesis, AO correction performance versus the altitude of a single turbulence layer is simulated, with results shown in Figure 4. In line with our predictions, the results show low performance of LGS up-link tip-tilt determination when low layer turbulence is present. More promisingly, they also show that the methods works well to correct high layer turbulence, where the correction may even approach that of LTAO using an on-axis NGS for tip-tilt correction. Discontinuities in Performance with multi-layer turbulence profile To give an impression of how the algorithm may perform with no NGS under more realistic atmospheric turbulence conditions, we perform simulations with the profile shown in Figure 5. This contains a strong ground layer, which is unlikely to be well corrected by the up-link tip-tilt determination, as well as significant higher layers, for which the benefit of including the tip-tilt correction may become apparent. Figure 6 shows the performance of LTAO with an onaxis NGS tip-tilt, LTAO with tip-tilt retrieval and LTAO with no tip-tilt correction versus increasing seeing strength. With a strong ground layer present the method is clearly not as effective as when there is only high layer turbulence. It does still provide a small improvement over correction with no tip-tilt correction. To further investigate how this small improvement could aid spectrographic instruments, the Ensquared Energy into a potential spectrograph spaxel is plotted against the size of the spaxel in Figure 7. For these results the Fried parameter, r0, is 14 cm. As previously discussed, ? have explored how LGS AO with no tip-tilt correction may be useful for some science cases due to the increased throughput. Our method can provide higher throughput, still without the use of any NGSs. For instance, a spectrograph with spaxel size of 100 mas will receive around 5% more light per spaxel for our simulate conditions. Altitude (km) Figure 5. The atmospheric turbulence profile used in the simulations. It contains high layers of significant strength, which will require a tomographic system to correct. The profile also includes a strong ground layer, which we do not expect to be well corrected by the LGS tip-tilt determination. On-axis tip-tilt NGS Uplink tip-tilt retrieval No tip-tilt correction Figure 6. Performance of LTAO with an on-axis tip-tilt NGS (dashed), with no NGS and the tip-tilt retrieval (solid) and with no tip-tilt correction (dotted). The tip-tilt retrieval method performs slightly better than with no tip-tilt correction, but the presence of ground layer turbulence has degraded performance significantly., with no NGS and the tip-tilt retrieval (solid) and with no tip-tilt correction(dotted). Though never approaching that of LTAO with an on-axis NGS, the tiptilt retrieval improves throughput over the case when no tip-tilt correction is performed. Improvement of LGS only AO The method presented in this paper has been shown to correct well for high layer turbulence above a tomographic LGS AO system. As was predicted in the derivation of the algorithm, it does not correct well for low layer turbulence. At most major observing sites the ground layer of turbulence is a significant contributor to overall seeing strength (???), especially when combined with other effects such as seeing caused by the telescope structure itself (??). It is also possible that a turbulence profile containing more than five layers may further reduce performance further. A profile with only five layers was chosen due to the computational load of performing physical light propagation between each layer. It is intended to optimise the code to allow simulations with a higher number of turbulence layers to be performed for future works. With this in mind, the up-link tip-tilt method alone is unlikely to provide AO correction approaching that of LTAO with NGS tip-tilt guide stars. However, for some science cases which require low-order correction in a section of the sky sparsely populated by suitable tip-tilt NGS, accounting for up-link turbulence may still be of some use. We have shown that it may provide a modest improvement in system throughput. It can be implemented without great hardware modification, and only requires a change in tomographic reconstructor in the real-time controller. Combination with ground layer NGS tip-tilt correction The greatest potential impact of this method of accounting for LGS up-link turbulence is in combination with a number of far off-axis tip-tilt NGS. With the LGS tip-tilt retrieval, high layer turbulence can be corrected for, and the LGS can still correct for high order ground layer turbulence. This leaves only turbulence near the ground for which the AO system requires tip-tilt information. The ground layer is common to all directions, so can consequently been corrected by a number of very far off-axis NGS. This can greatly increase the sky-coverage of LTAO systems. On the other hand, performance could be improved for existing LTAO configurations when using the current furthest off-axis tip-tilt NGS. In this case much of the wavefront error is from high altitude tip-tilt modes which are not well sampled by the far off-axis tip-tilt NGS. If the LGS tip-tilt up-link determination is implemented, performance may be significantly increased. In future work we will continue to further investigate the implementation of an AO configuration where ground layer tip-tilt is corrected using far off-axis NGS tip-tilt references. We will also examine the potential performance and sky coverage gains of existing LTAO with far off-axis tip-tilt. CONCLUSIONS We have demonstrated theoretically and in simulation the viability of a tomographic LGS reconstructor which determines tip-tilt information by accounting for the up-link path through atmospheric turbulence of each LGS. This is proven possible geometrically and implemented using a LA based technique to utilise information based on correlations of adjacent sub-apertures. The algorithm shows good performance when correcting for high layer turbulence, close to that of LTAO with an on-axis tip-tilt NGS. The performance when low altitude turbulence is present is much degraded, though still an improvement over having no tip-tilt correction. Though the method may be of some use as an LGS only AO reconstructor for low spatial order science cases, we mainly envisage it augmenting LTAO and MOAO by allowing further off-axis tip-tilt NGSs and hence improving AO corrected sky coverage. It also allows for greater LTAO performance with existing tip-tilt NGS seperations. In future works we will expand on these themes and quantify the available improvements. One major conclusion from this work is that LGS uplink turbulence does not have to be simply ignored and discarded as is currently the case. It is not irretrievably entangled with the down-link turbulence, and as such can provide useful information about the atmosphere above the telescope. acknowledge financial support from STFC (grant code: ST/L00075X/1). The data used for the results in this publication is available on request to the author. APPENDIX A: FORMULATING THE RELATIONSHIP BETWEEN A SUB-APERTURE MEASUREMENT AND UP-LINK LGS JITTER. To find the relationship between a sub-aperture measurement on WFS, s t and the measured up-link on LGS, s l we consider Figure A1, which shows the effect of a small tilt on LGS up-link. where is the launch angle of LGS and x is dependent on the tip-tilt LGS passes through at height h. Only a finite number of LGS up-link offsets at a turbulent layer can be predicted by the above method. This is a result of finite resolution of the WFS being used. For a centre launched tomographic LGS, only half the number of WFS sub-apertures view the path of another LGS, hence only half can be used to predict up-link LGS motion. Turbulence which affects the path of an LGS can only be measured in vertical 'bins' where the measurement is the sum of the turbulence in the sub-aperture FOV within the vertical bin. The heights of these 'bins' can be calculated by considering the geometry of the system, again illustrated in Figure 2., are the launch angles for LGS and respectively, H is the height of the LGS constellation, Ds is the displacement for the centre of sub-aperture n and the LGS launch position with is assumed to be the centre of the telescope pupil, D is the diameter of the telescope pupil. (B1) s can be obtained by considering the displacement on the ground between LGS position and the centre of the subaperture, Ds + H. s = Ds + H H. (B2) Ds is dependent on the sub-aperture of interest, n, where the position of the centre of a sub-aperture is (n + 0.5)d, and d is the diameter of sub-aperture. Finally, an expression for the height of the centre of the resolved height bin is, h, can be obtained. This paper has been typeset from a T E X/L A T E X file prepared by the author. |
How Large is Vietnam's Informal Economy? The purpose of this article is to estimate the size of the informal economy in Vietnam, describe its development from 1995 to 2015, and assess the country's potential tax loss from this activity. The MIMIC model indicates that the informal economy accounts for between 15 per cent and 27 per cent of GDP. The informal economy has grown sharply in Vietnam since 2007, while its size has decreased in other comparable nations. Potential tax revenue lost annually amounts to between 3.3 per cent and 5 per cent of gross domestic product. |
Ann Furedi, chief executive of the British Pregnancy Advisory Service which carries out 55,000 terminations a year, said: “I don’t see that there is a problem in law that needs to be resolved.”
The comments come as Britain’s most senior prosecutor prepares to release a paper justifying his decision not to prosecute two doctors over an abortion scandal exposed by The Daily Telegraph last year.
The decision by Keir Starmer, the Director of Public Prosecutions last month, prompted calls from MPs for the 1967 Abortion Act to be changed to make so-called “gender election” abortion to be explicitly illegal.
But in an interview on the BBC’s Newsnight programme, Miss Furedi said: “I have run the British Pregnancy Advisory Service for 10 years and I have never heard of a pregnant woman walking into a clinic and I want an abortion because I don’t want a girl or I don’t want a boy.
“Women who come into the clinics have a whole complex set of reasons why they don’t want a pregnancy and the circumstances of the pregnancy may be part of that.
“The law at the moment works reasonably well – the law allows a doctor to recommend, to make a decision in good faith that a woman can have an abortion if he or she believes that it would be damaging to a woman’s mental health to continue her pregnancy.”
Two doctors were exposed by an undercover Daily Telegraph investigation offering to abort babies because of their gender.
But Miss Furedi suggested that the only women asking for abortions on sex grounds were journalists trying to catch out doctors for undercover investigations.
“In the 60,000 abortions we do every year my staff tells me that the only people who walk into a clinic asking for an abortion because the foetus is the wrong sex are journalists.”
Miss Furedi continued: “I don’t see that there is a problem in law that needs to be resolved except perhaps that there is a problem with newspaper journalists being set up to entrap doctors and create a climate where you have to wonder how doctors are expected to operate when they are genuinely concerned about whether the person sitting in front of them is trying to set them up,”
The comments were dismissed by Tory MP Sarah Wollaston, a practising GP, who said: “The wording of the Act is ambiguous and it would be a sensible thing to look at that wording and put it beyond all doubt that gender selection abortion is illegal.”
She said this needed “to be made absolutely crystal clear to doctors working in clinics.”
She added: “I am in favour of women having a choice about abortion but that choice does have limits and anything that condones this kind of misogynist practices should not be allowed and that should be explicit.”
David Burrowes, a Conservative MP and member of the Pro-Life all-party Parliamentary group, said: "On this issue the Abortion Act is not fit for purpose, with the safeguards in place being effectively meaningless because there is no enforcmenet when they are breached."
The Telegraph’s probe prompted a 19 month investigation. However prosecutors at the Crown Prosecution Service told the doctors that they will not be charged even though there was enough evidence, because it did not consider the issue to be in the public interest.
Mr Starmer has not commented on his reasons not to prosecute and is due to issue his reasons in the coming days, before he stands down as DPP at the end of this month.
The CPS said the key reason for the decision was that the doctors would still be investigated by the General Medical Council.
Mr Starmer was criticised by his predecessor as DPP Lord Macdonald of River Glaven, who said the decision had been “very dubious”.
Lord Macdonald said this amounted to letting them “avoid criminal action” because of their professional status — undermining the basic principle that “everyone is equal under the law”. |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 22:39:49 2020
@author: <NAME>
"""
import numpy as np
from scipy import interpolate
import math
import matplotlib.pyplot as plt
#------------------------------ README ---------------------------------------#
# The objective of the code is to generate Spline trajectroies for a given set
# of waypoints and waypoint poses
#
# Input Parameters
# 1) Waypoints (2D numpy array <(x_val, y_val))
# 2) angles (in radians)
# 3) pose_length (a hyperparameter for tuning the trajectory based on the pose angle)
# 4) degree (the degree of the spline function (range : 1 to 5))
# 5) sf (smoothness factor of the spline trajectory (enter a +ve integer))
# 6) graph_enable (1 - plot graph ; 0 - do not plot graph)
#
# Outputs
# array of 2 Arrays : out[0] - array of the x values
# out[1] - array of the y values
#
# function call : bspline (waypoints, angles, pose_length, degree, sf, graph_enable)
#-----------------------------------------------------------------------------#
# ----------------- defining the function for b spline ----------------#
def bspline (waypoints, angles, pose_length, degree, sf, graph_enable = 0):
#---------- extracting the x and y values into different arrays----#
x = waypoints[:,0]
y = waypoints[:,1]
x_t = []
y_t = []
#---------- generating the extra waypoints for pose ---------------#
for i in range(len(x)):
#x_t.append(x[i])
x_t.append(x[i]+pose_length*math.cos(angles[i]-math.pi))
x_t.append(x[i])
#y_t.append(y[i])
y_t.append(y[i]+pose_length*math.sin(angles[i]-math.pi))
y_t.append(y[i])
#----------- generating spline trajectory -------------------------#
tck,u = interpolate.splprep([x_t,y_t],k=2,s=0)
u=np.linspace(0,1,num=100,endpoint=True)
out = interpolate.splev(u,tck)
#----------- plot graph if graph_enable = 1 -----------------------#
if (graph_enable == 1):
plt.figure()
plt.plot(x_t, y_t, 'ro', out[0], out[1], 'b')
plt.plot(waypoints[:,0],waypoints[:,1],'yo',markersize = 7)
plt.legend(['Appended Points', 'Interpolated B-spline',
'Waypoints'],loc='best')
plt.axis([min(x)-1, max(x)+1, min(y)-1, max(y)+1])
plt.title('B-Spline interpolation')
plt.show()
return out
def tuple_dist(p1,p2):
return math.sqrt( (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1] ** 2) )
#def get_waypoint_indices(trajectory,points,tol = 0.1):
# traj_list = zip(list(trajectory[0]),list(trajectory[1]))
# print("trajectory list:{}".format(traj_list))
# result = [0]
# point_ind = 1
# for i,v in enumerate(traj_list):
# if (i != 0):
# val = (points[point_ind][0],points[point_ind][1])
# print("current point:{}".format(traj_list[i]))
# print("next waypoint:{}".format(point_ind
# if ( tuple_dist(traj_list[i],val) < tol):
# result.append(i)
# return result
def main():
#---------------- waypoint values for testing purposes ONLY --------------#
waypoints = np.array( [(3 , 1), (2.5, 4), (0, 5), (-2, 0),
(-3, 0), (-2.5, -4), (0, -1), (2.5, -4), (3, -1)])
#------------------- angle values for testing purposes ONLY --------------#
angles = np.array([math.pi/4, math.pi/2, math.pi/4, math.pi/2, math.pi/4,
math.pi/4, math.pi/2, math.pi/4, math.pi/2, math.pi/4])
#-------------------------------------------------------------------------#
pose_length = 0.5 #pose_length values for testing purposes ONLY
degree = 3 #degree values for testing purposes ONLY
sf = 0 #smoothness factor values for testing purposes ONLY
spline_vals = bspline(waypoints, angles, pose_length, degree, sf, 1)
print (spline_vals)
if __name__ == '__main__':
main()
|
def from_clu2elm_dict(self, clu2elm_dict):
self.clu2elm_dict = {c:set(el) for c, el in clu2elm_dict.items()}
self.clusters = sorted(list(self.clu2elm_dict.keys()))
self.n_clusters = len(self.clusters)
self.elm2clu_dict = self.to_elm2clu_dict()
self.elements = sorted(list(self.elm2clu_dict.keys()))
self.n_elements = len(self.elements)
self.validate_clustering()
self.clu_size_seq = self.find_clu_size_seq()
self.is_disjoint = self.find_num_overlap() == 0
return self |
package pgxp.pto.entity;
import java.io.Serializable;
import java.util.Map;
import java.util.Objects;
import java.util.UUID;
import javax.persistence.Basic;
import javax.persistence.Cacheable;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
import javax.validation.constraints.NotNull;
import javax.validation.constraints.Size;
import javax.xml.bind.annotation.XmlRootElement;
import org.hibernate.annotations.GenericGenerator;
import org.hibernate.annotations.DynamicInsert;
import org.hibernate.annotations.DynamicUpdate;
import org.hibernate.search.annotations.Analyze;
import org.hibernate.search.annotations.Analyzer;
import org.hibernate.search.annotations.Field;
import org.hibernate.search.annotations.Index;
import org.hibernate.search.annotations.Indexed;
import org.hibernate.search.annotations.Store;
@Entity
@DynamicInsert
@DynamicUpdate
@Indexed
@Cacheable
@XmlRootElement
@Analyzer
@Table(name = "pagina")
@NamedQueries({
@NamedQuery(name = "Pagina.findAll", query = "SELECT m FROM Pagina m WHERE m.validado is false")})
public class Pagina implements Serializable {
@Id
@GeneratedValue(generator = "uuid")
@GenericGenerator(name = "uuid", strategy = "uuid2")
@Column(unique = true)
private UUID id;
@ManyToOne
@JoinColumn
private Arquivo arquivo;
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String persons = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String organizations = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String groups = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String places = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String events = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String artprods = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String abstracts = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String things = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String times = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String numerics = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String unknowns = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String consumerGoods = "";
@Column(length = 2560)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String other = "";
@Basic(optional = false)
@NotNull
@Size(min = 1, max = 128)
@Column(nullable = false, length = 128)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String description;
@Column(length = 204800)
@Field(index = Index.YES, analyze = Analyze.YES, store = Store.NO)
private String texto;
private Boolean validado;
public UUID getId() {
return id;
}
public void setId(UUID id) {
this.id = id;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Arquivo getArquivo() {
return arquivo;
}
public void setArquivo(Arquivo arquivo) {
this.arquivo = arquivo;
}
public String getTexto() {
return texto;
}
public void setTexto(String texto) {
this.texto = texto;
}
public String getPersons() {
return persons;
}
public void setPersons(String persons) {
this.persons = persons;
}
public String getOrganizations() {
return organizations;
}
public void setOrganizations(String organizations) {
this.organizations = organizations;
}
public String getGroups() {
return groups;
}
public void setGroups(String groups) {
this.groups = groups;
}
public String getPlaces() {
return places;
}
public void setPlaces(String places) {
this.places = places;
}
public String getEvents() {
return events;
}
public void setEvents(String events) {
this.events = events;
}
public String getArtprods() {
return artprods;
}
public void setArtprods(String artprods) {
this.artprods = artprods;
}
public String getAbstracts() {
return abstracts;
}
public void setAbstracts(String abstracts) {
this.abstracts = abstracts;
}
public String getThings() {
return things;
}
public void setThings(String things) {
this.things = things;
}
public String getTimes() {
return times;
}
public void setTimes(String times) {
this.times = times;
}
public String getNumerics() {
return numerics;
}
public void setNumerics(String numerics) {
this.numerics = numerics;
}
public String getUnknowns() {
return unknowns;
}
public void setUnknowns(String unknowns) {
this.unknowns = unknowns;
}
public String getConsumerGoods() {
return consumerGoods;
}
public void setConsumerGoods(String consumerGoods) {
this.consumerGoods = consumerGoods;
}
public String getOther() {
return other;
}
public void setOther(String other) {
this.other = other;
}
public Boolean getValidado() {
return validado;
}
public void setValidado(Boolean validado) {
this.validado = validado;
}
@Override
public int hashCode() {
int hash = 7;
hash = 17 * hash + Objects.hashCode(this.id);
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final Pagina other = (Pagina) obj;
return Objects.equals(this.id, other.id);
}
@Override
public String toString() {
return "Pagina{" + "id=" + id + ", arquivo=" + arquivo + ", description=" + description + ", texto=" + texto + '}';
}
public void carregar(Map<String, String> mapa) {
mapa.entrySet().forEach((entry) -> {
String key = entry.getKey();
String value = entry.getValue();
if (value.toLowerCase().contains("person")) {
persons = key + ";" + persons;
}
if (value.toLowerCase().contains("organization")) {
organizations = key + ";" + organizations;
}
if (value.toLowerCase().contains("group")) {
groups = key + ";" + groups;
}
if (value.toLowerCase().contains("event")) {
events = key + ";" + events;
}
if (value.toLowerCase().contains("place") || value.toLowerCase().contains("location")) {
places = key + ";" + places;
}
if (value.toLowerCase().contains("artprod") || value.toLowerCase().contains("work_of_art")) {
artprods = key + ";" + artprods;
}
if (value.toLowerCase().contains("abstract")) {
abstracts = key + ";" + abstracts;
}
if (value.toLowerCase().contains("thing") || value.toLowerCase().contains("other")) {
things = key + ";" + things;
}
if (value.toLowerCase().contains("time")) {
times = key + ";" + times;
}
if (value.toLowerCase().contains("numeric")) {
numerics = key + ";" + numerics;
}
if (value.toLowerCase().contains("consumer_good")) {
consumerGoods = key + ";" + consumerGoods;
}
if (value.toLowerCase().contains("unknown")) {
unknowns = key + ";" + unknowns;
}
});
}
}
|
To avoid fees tied to 'contamination' of loads, residents will have to drop off and sort materials at a Waterville site.
WINSLOW — Changes to the town’s recycling system are afoot. Starting Dec. 1, Winslow residents and businesses will no longer bring their recycling to the library, but to I Recycle in Waterville.
The Town Council unanimously approved the decision at its meeting last week.
Town Manager Mike Heavener said that mixed-use loads had been contaminated with nonrecyclable materials “very frequently” and that the system was no longer cost-effective.
“We’re paying more in an effort to try to recycle, but we can’t recycle because of what’s being thrown into the bins,” Heavener said.
The town’s recycling is ultimately transported to ecomaine in Portland for processing. Ecomaine charges extra fees if deliveries contain over 5 percent of nonrecyclable material or contamination. Global buyers of ecomaine’s recyclables – mostly China – once accepted contamination rates of up to 40 percent, but raised the market standards this year, which necessitated the fees.
Central Maine Disposal maintains and empties the recycling containers at the town’s library. Heavener said that if trash is spotted in the recycling bin, the entire bin gets hauled to the landfill instead of to ecomaine.
The town pays $4,040 a month to offer its current zero-sort recycling program. Using the I Recycle facilities will cost $1,200 a month, saving Winslow an estimated $34,080 annually.
At I Recycle, individuals must sort their recyclable materials by type and an attendant will be present to help advise and prevent trash from getting mixed in. I Recycle is located on Armory Road. Winslow residents will not need a sticker to use the facility.
“I had my wife do some undercover work. She took some stuff over just to see if it was user-friendly, and she liked it,” Heavener told a small audience at the council meeting.
I Recycle is open from Tuesday through Saturday from 8 a.m. to 3 p.m.
Councilor Jerry Quirion voiced concern that individuals with traditional work hours might have difficulty making it to the facility during operational hours.
“I’m not saying we shouldn’t use it. I’m just saying that’s one thing that’s going to be hard,” he said.
The dumpsters will be removed from the library lot by Nov. 30. |
T.J. Dillashaw and Urijah Faber are two of the best bantamweight fighters on the planet who just happen to be training partners with Team Alpha Male in Sacramento, Calif.
No problem, according to UFC president Dana White.
White said Wednesday on UFC Tonight that Dillashaw and Faber would be down to fight each other. Dillashaw is the current UFC bantamweight champion and Faber is one of the very best in the division, undefeated in non-title fights throughout his illustrious career.
"That's possible," White said. "Those guys have made it very clear that they will fight each other. We'll see what happens."
Dillashaw-Faber might happen one day, but it won't be what is next for either one of them. White said last week that former champion Renan Barao will get the next title shot against Dillashaw following injuries to top contenders Dominick Cruz and Raphael Assuncao. As for Faber, White said the UFC has someone else in mind for him.
"That's not the plan right now here today, but it's definitely a fight I'd like to see," White said of Faber-Dillashaw.
Both men have not been as unequivocal about an intra-Team Alpha Male fight as White is. Dillashaw considers Faber a mentor and would prefer not to fight him. Faber said Monday on The MMA Hour with Ariel Helwani that it is not his desire to compete against Dillashaw at this juncture, but left the door open for a potential bout.
Cruz was set to get the next shot at Dillashaw before getting injured and needing a third ACL surgery. That would have been far and away the most interesting fight in the 135-pound division. Assuncao has also earned his spot -- he beat Dillashaw in 2013 by split decision.
Faber could end up facing a rematch with Francisco Rivera. The two met at UFC 178 in September and Faber accidentally poked Rivera in the eye, directly leading to Faber's neck crank submission finish. Rivera is challenging the result with the Nevada Athletic Commission (NAC) and will have his case heard Monday. |
package statuscheck
import (
"context"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func BasicCheck(ctx context.Context, object client.Object) (bool, error) {
data, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object)
if err != nil {
return false, errors.Wrap(err, "cannot transform to unstructured")
}
resource := &unstructured.Unstructured{}
resource.SetUnstructuredContent(data)
return UnstructuredCheck(ctx, resource)
}
|
<filename>PS_OC_cnodejs/PS_OC_cnodejs/Section/Topic/TopicDetail/Model/TopicDetailModel.h
//
// TopicDetailModel.h
// PS_OC_cnodejs
//
// Created by <NAME> on 2017/12/6.
// Copyright © 2017年 <NAME>. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "AuthorModel.h"
@interface ReplyModel : NSObject
@property (nonatomic, copy) NSString *ID;
@property (nonatomic, strong) AuthorModel *author;
@property (nonatomic, copy) NSString *content;
@property (nonatomic, strong) NSArray *ups;
@property (nonatomic, copy) NSString *create_at;
@property (nonatomic, copy) NSString *reply_id;
@property (nonatomic, assign) BOOL is_uped;
@end
@interface TopicDetailModel : NSObject
@property (nonatomic, copy) NSString *ID;
@property (nonatomic, copy) NSString *author_id;
@property (nonatomic, copy) NSString *tab;
@property (nonatomic, copy) NSString *content;
@property (nonatomic, copy) NSString *title;
@property (nonatomic, copy) NSString *last_reply_at;
@property (nonatomic, copy) NSString *good;
@property (nonatomic, copy) NSString *top;
@property (nonatomic, assign) NSInteger reply_count;
@property (nonatomic, assign) NSInteger visit_count;
@property (nonatomic, copy) NSString *create_at;
@property (nonatomic, strong) AuthorModel *author;
@property (nonatomic, strong) NSArray<ReplyModel *> *replies;
@end
|
<reponame>abuzreq/Taksim
package asp4j.lang;
import java.util.Arrays;
import java.util.Objects;
/**
*
* @author hbeck May 30, 2013
*/
public class TermImpl implements Term {
private final String symbol;
private final Term[] args;
public TermImpl(String functionSymbol, Term... args) {
this.symbol = functionSymbol;
if (args == null || args.length==0) {
this.args = null;
} else {
this.args = args;
}
}
@Override
public int arity() {
if (args == null) {
return 0;
}
return args.length;
}
@Override
public Term getArg(int idx) {
if (args == null) {
return null;
}
return args[idx];
}
@Override
public String symbol() {
return symbol;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(symbol);
if (args!=null && args.length>0) {
sb.append("(").append(args[0].toString());
for (int i=1;i<args.length;i++) {
sb.append(",").append(args[i].toString());
}
sb.append(")");
}
return sb.toString();
}
@Override
public int hashCode() {
int hash = 7;
hash = 53 * hash + Objects.hashCode(this.symbol);
hash = 53 * hash + Arrays.deepHashCode(this.args);
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (!(obj instanceof Term)) {
return false;
}
final Term other = (Term) obj;
if (!Objects.equals(this.symbol(), other.symbol())) {
return false;
}
if (this.arity()!=other.arity()) {
return false;
}
for (int i=0; i<this.args.length; i++) {
if (!this.getArg(i).equals(other.getArg(i))) {
return false;
}
}
return true;
}
}
|
<reponame>docker-ms/microservice-lsync
package model
import (
pbMs "microservice"
)
type SnapshotGenGoroutinesGroup struct {
Conversations []pbMs.Conversation
UserId2ConvIds map[string][]string
GroupIds []string
ConversationIds []string
}
type GroupsAndUsers struct {
Groups []*pbMs.Group
MemberUsers []*pbMs.MemberUser
UserId2GroupIds map[string][]string
}
|
The power of modularity: the financial consequences of computer and code architecture In the 1970s, 1980s and 1990s, computer designers created a series of option-rich modular design architectures in both hardware and software. But a pure design is, strictly speaking, only an idea. Unless the design is reifiedmade real, brought into realityit cannot affect the physical world and cannot be used or consumed. In order to affect the world and be valued, a design idea must be first completed and then made into something. Those actions in turn require human effort and human organization. Designs need the economy for several purposes: to implement design processes so that the designs can be completed; to carry out design instructions so that the designs can be realized; to transfer artifacts to users who value them; and to get designers and producers paid for their efforts. Designs influence the economy by creating perceptions of financial value. These perceptions in turn motivate investment and the creation of new economic institutions. Option-rich and modular architectures are extremely effective conduits of value, but their evolution may be difficult to control. In this talk, I will adopt the designs point of view in order to understand the economic institutions and mechanisms by which new designs and new artifacts come into existence. |
Enhancement of visual cortex plasticity by dark exposure Dark rearing is known to delay the time course of the critical period for ocular dominance plasticity in the visual cortex. Recent evidence suggests that a period of dark exposure (DE) may enhance or reinstate plasticity even after closure of the critical period, mediated through modification of the excitatoryinhibitory balance and/or removal of structural brakes on plasticity. Here, we investigated the effects of a week of DE on the recovery from a month of monocular deprivation (MD) in the primary visual cortex (V1) of juvenile mice. Optical imaging of intrinsic signals revealed that ocular dominance in V1 of mice that had received DE recovered slightly more quickly than of mice that had not, but the level of recovery after three weeks was similar in both groups. Two-photon calcium imaging showed no significant difference in the recovery of orientation selectivity of excitatory neurons between the two groups. Parvalbumin-positive (PV+) interneurons exhibited a smaller ocular dominance shift during MD but again no differences in subsequent recovery. The percentage of PV+ cells surrounded by perineuronal nets, a structural brake on plasticity, was lower in mice with than those without DE. Overall, DE causes a modest enhancement of mouse visual cortex plasticity. This article is part of the themed issue Integrating Hebbian and homeostatic plasticity. |
<filename>cordova/cordova-webapp-android-tests/webapp/comm.py
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Cici,Li<<EMAIL>>
# <NAME> <<EMAIL>>
import os
import sys
import commands
import shutil
import glob
import fnmatch
import re
import json
from os.path import join, getsize
reload(sys)
sys.setdefaultencoding("utf-8")
script_path = os.path.realpath(__file__)
const_path = os.path.dirname(script_path)
tool_path = const_path + "/../tools/"
plugin_tool = const_path + "/../tools/cordova-plugin-crosswalk-webview/"
testapp_path = "/tmp/cordova-sampleapp/"
def setUp():
global ARCH, MODE, device, CROSSWALK_VERSION, CROSSWALK_BRANCH, PACK_TYPE
device = os.environ.get('DEVICE_ID')
if not device:
print (" get env error\n")
sys.exit(1)
f_arch = open(const_path + "/../arch.txt", 'r')
arch_tmp = f_arch.read()
if arch_tmp.strip("\n\t") == "arm":
ARCH = "arm"
elif arch_tmp.strip("\n\t") == "x86":
ARCH = "x86"
elif arch_tmp.strip("\n\t") == "arm64":
ARCH = "arm64"
elif arch_tmp.strip("\n\t") == "x86_64":
ARCH = "x86_64"
else:
print (
" get arch error, the content of arch.txt should be 'arm' or 'x86' or arm64 or x86_64\n")
sys.exit(1)
f_arch.close()
f_mode = open(const_path + "/../mode.txt", 'r')
mode_tmp = f_mode.read()
if mode_tmp.strip("\n\t") == "shared":
MODE = "shared"
elif mode_tmp.strip("\n\t") == "embedded":
MODE = "embedded"
elif mode_tmp.strip("\n\t") == "lite":
MODE = "lite"
else:
print (
" get mode error, the content of mode.txt should be 'shared' or 'embedded' or 'lite'\n")
sys.exit(1)
f_mode.close()
f_pack_type = open(const_path + "/../pack-type", 'r')
pack_type_tmp = f_pack_type.read()
if pack_type_tmp.strip("\n\t") == "local":
PACK_TYPE = "local"
elif pack_type_tmp.strip("\n\t") == "npm":
PACK_TYPE = "npm"
else:
print (
" get pack type error, the content of pack-type should be 'local' or 'npm'\n")
sys.exit(1)
f_pack_type.close()
with open(const_path + "/../VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
CROSSWALK_BRANCH = pkg_version_json["crosswalk-branch"]
def checkFileSize(file_path, min_size, max_size, self):
print "Check file size from %s --------------> START" % file_path
size = getsize(file_path)/1024/1024
print "this file is %s MB" % size
self.assertTrue(size > min_size)
self.assertTrue(size < max_size)
print "Check file size from %s --------------> OK" % file_path
def installWebviewPlugin(pkg_mode, self, multiple_apks = None):
print "Install Crosswalk WebView Plugin --------------> START"
pkg_mode_tmp = "core"
if pkg_mode == "shared":
pkg_mode_tmp = "shared"
xwalk_version = "%s" % CROSSWALK_VERSION
if CROSSWALK_BRANCH == "beta":
xwalk_version = "org.xwalk:xwalk_%s_library_beta:%s" % (pkg_mode_tmp, CROSSWALK_VERSION)
plugin_crosswalk_source = plugin_tool
if PACK_TYPE == "npm":
plugin_crosswalk_source = "cordova-plugin-crosswalk-webview"
plugin_install_cmd = "cordova plugin add %s --variable XWALK_MODE=\"%s\"" \
" --variable XWALK_VERSION=\"%s\"" % (plugin_crosswalk_source, pkg_mode, xwalk_version)
if multiple_apks is not None:
plugin_install_cmd = plugin_install_cmd + " --variable XWALKMULTIPLEAPK=\"%s\"" % multiple_apks
print plugin_install_cmd
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
def create(appname, pkgname, mode, sourcecodepath, replace_index_list, self, extra_plugin = None, multiple_apks = None):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Create project %s ----------------> START" % appname
cmd = "cordova create %s %s %s" % (appname, pkgname, appname)
createstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, createstatus[0])
print "\nGenerate project %s ----------------> OK\n" % appname
result = commands.getstatusoutput("ls")
self.assertIn(appname, result[1])
project_root = os.path.join(tool_path, appname)
os.chdir(project_root)
if not replace_key(os.path.join(project_root, 'config.xml'),
'<widget android-activityName="%s"' % appname, '<widget'):
print "replace key '<widget' failed."
return False
if not replace_key(os.path.join(project_root, 'config.xml'),
' <allow-navigation href="*" />\n</widget>', '</widget>'):
print "replace key '</widget>' failed."
return False
print "Add android platforms to this project --------------> START"
cordova_platform_cmd = "cordova platform add android"
platformstatus = commands.getstatusoutput(cordova_platform_cmd)
self.assertEquals(0, platformstatus[0])
installWebviewPlugin(mode, self, multiple_apks)
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(project_root, "www", "index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "www")))
do_copy(sourcecodepath, os.path.join(tool_path, appname, "www"))
def buildGoogleApp(appname, sourcecodepath, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Build project %s ----------------> START" % appname
if sourcecodepath is None:
print "sourcecodepath can't be none"
return False
if checkContains(appname, "CIRC"):
cordova_app = os.path.join(tool_path, "circ")
create_cmd = "cca create " + appname + " --link-to circ/package"
elif checkContains(appname, "EH"):
cordova_app = os.path.join(tool_path, "workshop-cca-eh")
create_cmd = "cca create " + appname + " --link-to workshop-cca-eh/workshop/step4"
if os.path.exists(cordova_app):
do_remove(glob.glob(cordova_app))
if not do_copy(sourcecodepath, cordova_app):
return False
print create_cmd
buildstatus = commands.getstatusoutput(create_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(os.path.join(tool_path, appname))
print "Add android platforms to this project --------------> START"
add_android_cmd = "cca platform add android"
addstatus = commands.getstatusoutput(add_android_cmd)
self.assertEquals(0, addstatus[0])
print "uninstall webview default plugin from this project --------------> START"
plugin_uninstall_webview = "cordova plugin remove cordova-plugin-crosswalk-webview"
uninstallStatus = commands.getstatusoutput(plugin_uninstall_webview)
self.assertEquals(0, uninstallStatus[0])
installWebviewPlugin(MODE, self)
build_cmd = "cca build android"
if ARCH == "x86_64" or ARCH == "arm64":
build_cmd = "cca build android --xwalk64bit"
buildstatus = commands.getstatusoutput(build_cmd)
self.assertEquals(0, buildstatus[0])
checkApkExist(appname, self)
def build(appname, isDebug, self, isCopy=False, isMultipleApk=True):
os.chdir(os.path.join(tool_path, appname))
print "Build project %s ----------------> START" % appname
pack_arch_tmp = ARCH
if ARCH == "x86_64":
pack_arch_tmp = "x86 --xwalk64bit"
elif ARCH == "arm64":
pack_arch_tmp = "arm --xwalk64bit"
cmd_mode = ""
apk_name_mode = "debug"
if isDebug == 1:
print "build debug app"
cmd_mode = "--debug"
elif isDebug == -1:
print "build release app"
cmd_mode = "--release"
apk_name_mode = "release-unsigned"
cmd = "cordova build android %s -- --gradleArg=-PcdvBuildArch=%s" % (cmd_mode, pack_arch_tmp)
print cmd
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, buildstatus[0])
print "\nBuild project %s ----------------> OK\n" % appname
checkApkExist(appname, self, isCopy, isMultipleApk, apk_name_mode)
def checkApkExist(appname, self, isCopy=False, isMultipleApk=True, apk_name_mode="debug"):
print "Check %s Apk Exist ----------------> START" % appname
outputs_dir = os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk")
apk_name = "android-%s.apk" % apk_name_mode
if isMultipleApk == True and MODE == "embedded":
apk_name_arch = "armv7"
if ARCH != "arm":
apk_name_arch = ARCH
apk_name = "android-%s-%s.apk" % (apk_name_arch, apk_name_mode)
if not os.path.exists(os.path.join(outputs_dir, apk_name)):
apk_name = "%s-%s-%s.apk" % (appname, apk_name_arch, apk_name_mode)
else:
if not os.path.exists(os.path.join(outputs_dir, apk_name)):
apk_name = "%s-%s.apk" % (appname, apk_name_mode)
self.assertTrue(os.path.exists(os.path.join(outputs_dir, apk_name)))
if isCopy == True:
self.assertTrue(do_copy(os.path.join(outputs_dir, apk_name), os.path.join(testapp_path, "%s.apk" % appname)))
print "Check %s Apk Exist ----------------> OK" % appname
def run(appname, self):
os.chdir(os.path.join(tool_path, appname))
print "Run project %s ----------------> START" % appname
cmd = "cordova run android"
print cmd
runstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, runstatus[0])
self.assertIn("LAUNCH SUCCESS", runstatus[1])
print "\nRun project %s ----------------> OK\n" % appname
def app_install(appname, pkgname, self):
print "Install APK ----------------> START"
os.chdir(testapp_path)
apk_file = commands.getstatusoutput("ls | grep %s" % appname)[1]
if apk_file == "":
print "Error: No app: %s found in directory: %s" % (appname, testapp_path)
cmd_inst = "adb -s " + device + " install -r " + apk_file
print cmd_inst
inststatus = commands.getstatusoutput(cmd_inst)
self.assertEquals(0, inststatus[0])
print "Install APK ----------------> OK"
self.assertTrue(check_app_installed(pkgname, self))
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def check_app_installed(pkgname, self):
print "Check if app is installed ----------------> START"
cmd_find = "adb -s " + device + \
" shell pm list packages |grep %s" % pkgname
pmstatus = commands.getstatusoutput(cmd_find)
if pmstatus[0] == 0:
print "App is installed."
return True
else:
print "App is uninstalled."
return False
def app_launch(appname, pkgname, self):
print "Launch APK ----------------> START"
cmd = "adb -s " + device + " shell am start -n %s/.%s" % (pkgname, appname)
launchstatus = commands.getstatusoutput(cmd)
self.assertNotIn("error", launchstatus[1].lower())
print "Launch APK ----------------> OK"
# Find whether the app have launched
def check_app_launched(pkgname, self):
cmd_acti = "adb -s " + device + " shell ps | grep %s" % pkgname
launched = commands.getstatusoutput(cmd_acti)
if launched[0] != 0:
print "App haven't launched."
return False
else:
print "App is have launched."
return True
def app_stop(pkgname, self):
print "Stop APK ----------------> START"
cmd = "adb -s " + device + " shell am force-stop %s" % pkgname
stopstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, stopstatus[0])
print "Stop APK ----------------> OK"
def app_uninstall(pkgname, self):
print "Uninstall APK ----------------> START"
cmd_uninst = "adb -s " + device + " uninstall %s" % (pkgname)
unistatus = commands.getstatusoutput(cmd_uninst)
self.assertEquals(0, unistatus[0])
print "Uninstall APK ----------------> OK"
def replace_key(file_path, content, key):
print "Replace value ----------------> START"
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
print "Fail to replace: %s with: %s in file: %s" % (content, key, file_path)
return False
print "Replace value ----------------> OK"
return True
def do_remove(target_file_list=None):
for i_file in target_file_list:
print "Removing %s" % i_file
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
print "Fail to remove file %s: %s" % (i_file, e)
return False
return True
def do_copy(src_item=None, dest_item=None):
print "Copying %s to %s" % (src_item, dest_item)
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
print "Create non-existent dir: %s" % os.path.dirname(dest_item)
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
print "Fail to copy file %s: %s" % (src_item, e)
return False
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
|
package datacite.oai.provider.util;
/*******************************************************************************
* Copyright (c) 2011 DataCite
*
* All rights reserved. This program and the accompanying
* materials are made available under the terms of the
* Apache License, Version 2.0 which accompanies
* this distribution, and is available at
* http://www.apache.org/licenses/LICENSE-2.0
*
*******************************************************************************/
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import org.apache.log4j.Logger;
import org.oclc.oai.server.verb.BadResumptionTokenException;
public class ResumptionToken {
private static final Logger logger = Logger.getLogger(ResumptionToken.class);
private final String delimiter = ",";
private final int tokenCount = 6; //we store 6 elements in the token
private String resumeId;
private String from;
private String until;
private String set;
private String prefix;
private int count;
public ResumptionToken(){
resumeId = "";
from = "";
until = "";
set = "";
prefix = "";
count = -1;
}
public ResumptionToken(String token) throws BadResumptionTokenException{
this();
this.parseToken(token);
}
//*************************
// Public methods
//*************************
public String toString(){
return resumeId+delimiter+from+delimiter+until+delimiter+count+delimiter+set+delimiter+prefix;
}
public String getId(){
return resumeId;
}
public void setId(String resumeId){
this.resumeId = resumeId;
}
public String getFromDate(){
return from;
}
public void setFromDate(String from){
this.from = from;
}
public String getUntilDate(){
return until;
}
public void setUntilDate(String until){
this.until = until;
}
public int getRecordCount(){
return count;
}
public void setRecordCount(int count){
this.count = count;
}
public String getSet(){
return set;
}
public void setSet(String set){
this.set = set;
}
public String getPrefix(){
return prefix;
}
public void setPrefix(String prefix){
this.prefix = prefix;
}
//***************************
// Private Methods
//***************************
private void parseToken(String token) throws BadResumptionTokenException{
try {
StringTokenizer tokenizer = new StringTokenizer(token,this.delimiter);
if (tokenizer.countTokens()!=this.tokenCount){
logger.error("Found "+tokenizer.countTokens()+" tokens. Expecting "+this.tokenCount+" token="+token);
throw new BadResumptionTokenException();
}
resumeId = tokenizer.nextToken();
from = tokenizer.nextToken();
until = tokenizer.nextToken();
count = Integer.parseInt(tokenizer.nextToken());
set = tokenizer.nextToken();
prefix = tokenizer.nextToken();
}
catch (NoSuchElementException e) {
logger.error("Element missing from token."+e.getMessage()+" token="+token);
throw new BadResumptionTokenException();
}
catch (Exception e){
logger.error("Exception! "+e.getMessage()+ " token="+token);
throw new BadResumptionTokenException();
}
}
}
|
// SPDX-License-Identifier: GPL-2.0
/*
* KVM dirty page logging test
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#define _GNU_SOURCE /* for program_invocation_name */
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <semaphore.h>
#include <sys/types.h>
#include <signal.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
#include "kvm_util.h"
#include "test_util.h"
#include "guest_modes.h"
#include "processor.h"
#define VCPU_ID 1
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* How many pages to dirty for each guest loop */
#define TEST_PAGES_PER_LOOP 1024
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
#define TEST_HOST_LOOP_N 32UL
/* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
#if defined(__s390x__)
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
# define test_bit_le(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define set_bit_le(nr, addr) \
set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define clear_bit_le(nr, addr) \
clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_set_bit_le(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_clear_bit_le(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
#else
# define test_bit_le test_bit
# define set_bit_le set_bit
# define clear_bit_le clear_bit
# define test_and_set_bit_le test_and_set_bit
# define test_and_clear_bit_le test_and_clear_bit
#endif
#define TEST_DIRTY_RING_COUNT 65536
#define SIG_IPI SIGUSR1
/*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from
* the host. READ/WRITE_ONCE() should also be used with anything
* that may change.
*/
static uint64_t host_page_size;
static uint64_t guest_page_size;
static uint64_t guest_num_pages;
static uint64_t random_array[TEST_PAGES_PER_LOOP];
static uint64_t iteration;
/*
* Guest physical memory offset of the testing memory slot.
* This will be set to the topmost valid physical address minus
* the test memory size.
*/
static uint64_t guest_test_phys_mem;
/*
* Guest virtual memory offset of the testing memory slot.
* Must not conflict with identity mapped test code.
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
/*
* Continuously write to the first 8 bytes of a random pages within
* the testing memory region.
*/
static void guest_code(void)
{
uint64_t addr;
int i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
* when a page of the segment is written to for the very first time.
* To compensate this specialty in this test, we need to touch all
* pages during the first iteration.
*/
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
*(uint64_t *)addr = READ_ONCE(iteration);
}
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr = align_down(addr, host_page_size);
*(uint64_t *)addr = READ_ONCE(iteration);
}
/* Tell the host that we need more random numbers */
GUEST_SYNC(1);
}
}
/* Host variables */
static bool host_quit;
/* Points to the test VM memory region on which we track dirty logs */
static void *host_test_mem;
static uint64_t host_num_pages;
/* For statistics only */
static uint64_t host_dirty_count;
static uint64_t host_clear_count;
static uint64_t host_track_next_count;
/* Whether dirty ring reset is requested, or finished */
static sem_t sem_vcpu_stop;
static sem_t sem_vcpu_cont;
/*
* This is only set by main thread, and only cleared by vcpu thread. It is
* used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
* is the only place that we'll guarantee both "dirty bit" and "dirty data"
* will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
* after setting dirty bit but before the data is written.
*/
static atomic_t vcpu_sync_stop_requested;
/*
* This is updated by the vcpu thread to tell the host whether it's a
* ring-full event. It should only be read until a sem_wait() of
* sem_vcpu_stop and before vcpu continues to run.
*/
static bool dirty_ring_vcpu_ring_full;
/*
* This is only used for verifying the dirty pages. Dirty ring has a very
* tricky case when the ring just got full, kvm will do userspace exit due to
* ring full. When that happens, the very last PFN is set but actually the
* data is not changed (the guest WRITE is not really applied yet), because
* we found that the dirty ring is full, refused to continue the vcpu, and
* recorded the dirty gfn with the old contents.
*
* For this specific case, it's safe to skip checking this pfn for this
* bit, because it's a redundant bit, and when the write happens later the bit
* will be set again. We use this variable to always keep track of the latest
* dirty gfn we've collected, so that if a mismatch of data found later in the
* verifying process, we let it pass.
*/
static uint64_t dirty_ring_last_page;
enum log_mode_t {
/* Only use KVM_GET_DIRTY_LOG for logging */
LOG_MODE_DIRTY_LOG = 0,
/* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
LOG_MODE_CLEAR_LOG = 1,
/* Use dirty ring for logging */
LOG_MODE_DIRTY_RING = 2,
LOG_MODE_NUM,
/* Run all supported modes */
LOG_MODE_ALL = LOG_MODE_NUM,
};
/* Mode of logging to test. Default is to run all supported modes */
static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
/* Logging mode for current run */
static enum log_mode_t host_log_mode;
static pthread_t vcpu_thread;
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
static void vcpu_kick(void)
{
pthread_kill(vcpu_thread, SIG_IPI);
}
/*
* In our test we do signal tricks, let's use a better version of
* sem_wait to avoid signal interrupts
*/
static void sem_wait_until(sem_t *sem)
{
int ret;
do
ret = sem_wait(sem);
while (ret == -1 && errno == EINTR);
}
static bool clear_log_supported(void)
{
return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
}
static void clear_log_create_vm_done(struct kvm_vm *vm)
{
struct kvm_enable_cap cap = {};
u64 manual_caps;
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
cap.args[0] = manual_caps;
vm_enable_cap(vm, &cap);
}
static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages)
{
kvm_vm_get_dirty_log(vm, slot, bitmap);
}
static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages)
{
kvm_vm_get_dirty_log(vm, slot, bitmap);
kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
}
/* Should only be called after a GUEST_SYNC */
static void vcpu_handle_sync_stop(void)
{
if (atomic_read(&vcpu_sync_stop_requested)) {
/* It means main thread is sleeping waiting */
atomic_set(&vcpu_sync_stop_requested, false);
sem_post(&sem_vcpu_stop);
sem_wait_until(&sem_vcpu_cont);
}
}
static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
"vcpu run failed: errno=%d", err);
TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
vcpu_handle_sync_stop();
}
static bool dirty_ring_supported(void)
{
return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
}
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
{
/*
* Switch to dirty ring mode after VM creation but before any
* of the vcpu creation.
*/
vm_enable_dirty_ring(vm, test_dirty_ring_count *
sizeof(struct kvm_dirty_gfn));
}
static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
{
return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
}
static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
{
gfn->flags = KVM_DIRTY_GFN_F_RESET;
}
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
int slot, void *bitmap,
uint32_t num_pages, uint32_t *fetch_index)
{
struct kvm_dirty_gfn *cur;
uint32_t count = 0;
while (true) {
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
if (!dirty_gfn_is_dirtied(cur))
break;
TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
"%u != %u", cur->slot, slot);
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
"0x%llx >= 0x%x", cur->offset, num_pages);
//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
set_bit_le(cur->offset, bitmap);
dirty_ring_last_page = cur->offset;
dirty_gfn_set_collected(cur);
(*fetch_index)++;
count++;
}
return count;
}
static void dirty_ring_wait_vcpu(void)
{
/* This makes sure that hardware PML cache flushed */
vcpu_kick();
sem_wait_until(&sem_vcpu_stop);
}
static void dirty_ring_continue_vcpu(void)
{
pr_info("Notifying vcpu to continue\n");
sem_post(&sem_vcpu_cont);
}
static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages)
{
/* We only have one vcpu */
static uint32_t fetch_index = 0;
uint32_t count = 0, cleared;
bool continued_vcpu = false;
dirty_ring_wait_vcpu();
if (!dirty_ring_vcpu_ring_full) {
/*
* This is not a ring-full event, it's safe to allow
* vcpu to continue
*/
dirty_ring_continue_vcpu();
continued_vcpu = true;
}
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
slot, bitmap, num_pages, &fetch_index);
cleared = kvm_vm_reset_dirty_ring(vm);
/* Cleared pages should be the same as collected */
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);
if (!continued_vcpu) {
TEST_ASSERT(dirty_ring_vcpu_ring_full,
"Didn't continue vcpu even without ring full");
dirty_ring_continue_vcpu();
}
pr_info("Iteration %ld collected %u pages\n", iteration, count);
}
static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
/* A ucall-sync or ring-full event is allowed */
if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
/* We should allow this to continue */
;
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
(ret == -1 && err == EINTR)) {
/* Update the flag first before pause */
WRITE_ONCE(dirty_ring_vcpu_ring_full,
run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
sem_post(&sem_vcpu_stop);
pr_info("vcpu stops because %s...\n",
dirty_ring_vcpu_ring_full ?
"dirty ring is full" : "vcpu is kicked out");
sem_wait_until(&sem_vcpu_cont);
pr_info("vcpu continues now.\n");
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
"exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
}
static void dirty_ring_before_vcpu_join(void)
{
/* Kick another round of vcpu just to make sure it will quit */
sem_post(&sem_vcpu_cont);
}
struct log_mode {
const char *name;
/* Return true if this mode is supported, otherwise false */
bool (*supported)(void);
/* Hook when the vm creation is done (before vcpu creation) */
void (*create_vm_done)(struct kvm_vm *vm);
/* Hook to collect the dirty pages into the bitmap provided */
void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
.collect_dirty_pages = dirty_log_collect_dirty_pages,
.after_vcpu_run = default_after_vcpu_run,
},
{
.name = "clear-log",
.supported = clear_log_supported,
.create_vm_done = clear_log_create_vm_done,
.collect_dirty_pages = clear_log_collect_dirty_pages,
.after_vcpu_run = default_after_vcpu_run,
},
{
.name = "dirty-ring",
.supported = dirty_ring_supported,
.create_vm_done = dirty_ring_create_vm_done,
.collect_dirty_pages = dirty_ring_collect_dirty_pages,
.before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run,
},
};
/*
* We use this bitmap to track some pages that should have its dirty
* bit set in the _next_ iteration. For example, if we detected the
* page value changed to current iteration but at the same time the
* page bit is cleared in the latest bitmap, then the system must
* report that write in the next get dirty log call.
*/
static unsigned long *host_bmap_track;
static void log_modes_dump(void)
{
int i;
printf("all");
for (i = 0; i < LOG_MODE_NUM; i++)
printf(", %s", log_modes[i].name);
printf("\n");
}
static bool log_mode_supported(void)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->supported)
return mode->supported();
return true;
}
static void log_mode_create_vm_done(struct kvm_vm *vm)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->create_vm_done)
mode->create_vm_done(vm);
}
static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
void *bitmap, uint32_t num_pages)
{
struct log_mode *mode = &log_modes[host_log_mode];
TEST_ASSERT(mode->collect_dirty_pages != NULL,
"collect_dirty_pages() is required for any log mode!");
mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
}
static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->after_vcpu_run)
mode->after_vcpu_run(vm, ret, err);
}
static void log_mode_before_vcpu_join(void)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->before_vcpu_join)
mode->before_vcpu_join();
}
static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
for (i = 0; i < size; i++)
guest_array[i] = random();
}
static void *vcpu_worker(void *data)
{
int ret, vcpu_fd;
struct kvm_vm *vm = data;
uint64_t *guest_array;
uint64_t pages_count = 0;
struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
+ sizeof(sigset_t));
sigset_t *sigset = (sigset_t *) &sigmask->sigset;
vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
/*
* SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
* ioctl to return with -EINTR, but it is still pending and we need
* to accept it with the sigwait.
*/
sigmask->len = 8;
pthread_sigmask(0, NULL, sigset);
sigdelset(sigset, SIG_IPI);
vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
sigemptyset(sigset);
sigaddset(sigset, SIG_IPI);
guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
while (!READ_ONCE(host_quit)) {
/* Clear any existing kick signals */
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
ret = ioctl(vcpu_fd, KVM_RUN, NULL);
if (ret == -1 && errno == EINTR) {
int sig = -1;
sigwait(sigset, &sig);
assert(sig == SIG_IPI);
}
log_mode_after_vcpu_run(vm, ret, errno);
}
pr_info("Dirtied %"PRIu64" pages\n", pages_count);
return NULL;
}
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
{
uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page;
uint64_t *value_ptr;
uint64_t min_iter = 0;
for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
if (test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++;
TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit "
"set in this iteration but it is missing",
page);
}
if (test_and_clear_bit_le(page, bmap)) {
bool matched;
host_dirty_count++;
/*
* If the bit is set, the value written onto
* the corresponding page should be either the
* previous iteration number or the current one.
*/
matched = (*value_ptr == iteration ||
*value_ptr == iteration - 1);
if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
/*
* Short answer: this case is special
* only for dirty ring test where the
* page is the last page before a kvm
* dirty ring full in iteration N-2.
*
* Long answer: Assuming ring size R,
* one possible condition is:
*
* main thr vcpu thr
* -------- --------
* iter=1
* write 1 to page 0~(R-1)
* full, vmexit
* collect 0~(R-1)
* kick vcpu
* write 1 to (R-1)~(2R-2)
* full, vmexit
* iter=2
* collect (R-1)~(2R-2)
* kick vcpu
* write 1 to (2R-2)
* (NOTE!!! "1" cached in cpu reg)
* write 2 to (2R-1)~(3R-3)
* full, vmexit
* iter=3
* collect (2R-2)~(3R-3)
* (here if we read value on page
* "2R-2" is 1, while iter=3!!!)
*
* This however can only happen once per iteration.
*/
min_iter = iteration - 1;
continue;
} else if (page == dirty_ring_last_page) {
/*
* Please refer to comments in
* dirty_ring_last_page.
*/
continue;
}
}
TEST_ASSERT(matched,
"Set page %"PRIu64" value %"PRIu64
" incorrect (iteration=%"PRIu64")",
page, *value_ptr, iteration);
} else {
host_clear_count++;
/*
* If cleared, the value written can be any
* value smaller or equals to the iteration
* number. Note that the value can be exactly
* (iteration-1) if that write can happen
* like this:
*
* (1) increase loop count to "iteration-1"
* (2) write to page P happens (with value
* "iteration-1")
* (3) get dirty log for "iteration-1"; we'll
* see that page P bit is set (dirtied),
* and not set the bit in host_bmap_track
* (4) increase loop count to "iteration"
* (which is current iteration)
* (5) get dirty log for current iteration,
* we'll see that page P is cleared, with
* value "iteration-1".
*/
TEST_ASSERT(*value_ptr <= iteration,
"Clear page %"PRIu64" value %"PRIu64
" incorrect (iteration=%"PRIu64")",
page, *value_ptr, iteration);
if (*value_ptr == iteration) {
/*
* This page is _just_ modified; it
* should report its dirtyness in the
* next run
*/
set_bit_le(page, host_bmap_track);
}
}
}
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
uint64_t extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name);
#ifdef __x86_64__
vm_create_irqchip(vm);
#endif
log_mode_create_vm_done(vm);
vm_vcpu_add_default(vm, vcpuid, guest_code);
return vm;
}
#define DIRTY_MEM_BITS 30 /* 1G */
#define PAGE_SHIFT_4K 12
struct test_params {
unsigned long iterations;
unsigned long interval;
uint64_t phys_offset;
};
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
struct kvm_vm *vm;
unsigned long *bmap;
if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported",
log_modes[host_log_mode].name);
return;
}
/*
* We reserve page table for 2 times of extra dirty mem which
* will definitely cover the original (1G+) test range. Here
* we do the calculation with 4K page size which is the
* smallest so the page number will be enough for all archs
* (e.g., 64K page size guest will need even less memory for
* page tables).
*/
vm = create_vm(mode, VCPU_ID,
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
guest_code);
guest_page_size = vm_get_page_size(vm);
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
guest_num_pages = (1ul << (DIRTY_MEM_BITS -
vm_get_page_shift(vm))) + 3;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_page_size = getpagesize();
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!p->phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else {
guest_test_phys_mem = p->phys_offset;
}
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_zalloc(host_num_pages);
host_bmap_track = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
guest_test_phys_mem,
TEST_MEM_SLOT_INDEX,
guest_num_pages,
KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
ucall_init(vm, NULL);
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
sync_global_to_guest(vm, guest_page_size);
sync_global_to_guest(vm, guest_test_virt_mem);
sync_global_to_guest(vm, guest_num_pages);
/* Start the iterations */
iteration = 1;
sync_global_to_guest(vm, iteration);
host_quit = false;
host_dirty_count = 0;
host_clear_count = 0;
host_track_next_count = 0;
pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
while (iteration < p->iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(p->interval * 1000);
log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
bmap, host_num_pages);
/*
* See vcpu_sync_stop_requested definition for details on why
* we need to stop vcpu when verify data.
*/
atomic_set(&vcpu_sync_stop_requested, true);
sem_wait_until(&sem_vcpu_stop);
/*
* NOTE: for dirty ring, it's possible that we didn't stop at
* GUEST_SYNC but instead we stopped because ring is full;
* that's okay too because ring full means we're only missing
* the flush of the last page, and since we handle the last
* page specially verification will succeed anyway.
*/
assert(host_log_mode == LOG_MODE_DIRTY_RING ||
atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap);
sem_post(&sem_vcpu_cont);
iteration++;
sync_global_to_guest(vm, iteration);
}
/* Tell the vcpu thread to quit */
host_quit = true;
log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
"track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
host_track_next_count);
free(bmap);
free(host_bmap_track);
ucall_uninit(vm);
kvm_vm_free(vm);
}
static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-i iterations] [-I interval] "
"[-p offset] [-m mode]\n", name);
puts("");
printf(" -c: specify dirty ring size, in number of entries\n");
printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
TEST_DIRTY_RING_COUNT);
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
TEST_HOST_LOOP_INTERVAL);
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
printf(" -M: specify the host logging mode "
"(default: run all log modes). Supported modes: \n\t");
log_modes_dump();
guest_modes_help();
puts("");
exit(0);
}
int main(int argc, char *argv[])
{
struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.interval = TEST_HOST_LOOP_INTERVAL,
};
int opt, i;
sigset_t sigset;
sem_init(&sem_vcpu_stop, 0, 0);
sem_init(&sem_vcpu_cont, 0, 0);
guest_modes_append_default();
while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
switch (opt) {
case 'c':
test_dirty_ring_count = strtol(optarg, NULL, 10);
break;
case 'i':
p.iterations = strtol(optarg, NULL, 10);
break;
case 'I':
p.interval = strtol(optarg, NULL, 10);
break;
case 'p':
p.phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
guest_modes_cmdline(optarg);
break;
case 'M':
if (!strcmp(optarg, "all")) {
host_log_mode_option = LOG_MODE_ALL;
break;
}
for (i = 0; i < LOG_MODE_NUM; i++) {
if (!strcmp(optarg, log_modes[i].name)) {
pr_info("Setting log mode to: '%s'\n",
optarg);
host_log_mode_option = i;
break;
}
}
if (i == LOG_MODE_NUM) {
printf("Log mode '%s' invalid. Please choose "
"from: ", optarg);
log_modes_dump();
exit(1);
}
break;
case 'h':
default:
help(argv[0]);
break;
}
}
TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
p.iterations, p.interval);
srandom(time(0));
/* Ensure that vCPU threads start with SIG_IPI blocked. */
sigemptyset(&sigset);
sigaddset(&sigset, SIG_IPI);
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
if (host_log_mode_option == LOG_MODE_ALL) {
/* Run each log mode */
for (i = 0; i < LOG_MODE_NUM; i++) {
pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
host_log_mode = i;
for_each_guest_mode(run_test, &p);
}
} else {
host_log_mode = host_log_mode_option;
for_each_guest_mode(run_test, &p);
}
return 0;
}
|
<reponame>technorav3nn/deathblows-iocord.ts<gh_stars>1-10
import {
APIButtonComponent,
APIButtonComponentWithCustomId,
APIButtonComponentWithURL,
} from "discord-api-types";
import { Client } from "../..";
import IInteractionRes from "../../interfaces/IInteractionRes";
import BaseComponent from "./BaseComponent";
import Interaction from "./Interaction";
export default class ButtonComponentBuilder extends BaseComponent {
public style: number;
public label: string;
public custom_id: string;
public type: number;
public url: string;
public client: Client;
public disabled: boolean;
constructor(data?: APIButtonComponent & APIButtonComponentWithCustomId) {
super(2);
if (data) {
this.type = 2;
this.style = data.style || 2;
this.label = data.label || null;
this.custom_id = data.custom_id || null;
this.disabled = data.disabled || false;
}
}
setStyle(style: number) {
this.style = style;
return this;
}
setLabel(label: string) {
this.label = label;
return this;
}
setCustomId(id: string) {
this.custom_id = id;
return this;
}
/**
* @deprecated Use interactionCreate event instead
* @todo Create a better version
*/
onClick(cb: (interaction: Interaction) => any, disposeTime: number, client: Client) {
client.on("interactionCreate", (i) => {
if (i.data.custom_id === this.custom_id) {
return;
}
cb(i);
});
setTimeout(() => {
client.removeListener("interactionCreate", (i) => {
if (i.data.custom_id === this.custom_id) {
return;
}
cb(i);
});
}, disposeTime);
return this;
}
}
|
A Comparison of Health Risk and Costs Across Private Insurance Markets Supplemental Digital Content is available in the text. Background: The Patient Protection and Affordable Care Act (PPACA) established new parameters for the individual and small group health insurance markets starting in 2014. We study these 2 reformed markets by comparing health risk and costs to the more mature large employer market. Study Data: For 2017, claims data for all enrollees in PPACA-compliant individual and small group market plans as well as claims data from a sample of large employer market enrollees. Variables and Methodology: Risk scores and total (unadjusted and risk-adjusted) per-member-per-month (PMPM) allowed charges. Differences across markets in enrollment duration, age, and geographic distribution are addressed. The analysis is descriptive. Results: Compared with large employer market enrollees, health risk was 3% lower among PPACA small group market enrollees and 20% higher among PPACA individual market enrollees. After adjusting for differences in health risk, enrollees in the PPACA individual market had 27% lower PMPM allowed charges than enrollees in the large employer market and enrollees in the PPACA small group market had 12% lower PMPM allowed charges than enrollees in the large employer market. Conclusions: On average, the PPACA individual market enrolls sicker individuals than the 2 group markets. But this does not translate to higher health costs; in fact, enrollees in the PPACA individual market accumulate lower allowed charges than enrollees in the large employer market. Lower-income enrollees particularly accumulate lower allowed charges. Narrower networks and increased enrollee cost-sharing among individual market plans, though they may reduce the value of coverage, likely significantly reduce allowed charges. |
/*
* #!
* Ontopia Engine
* #-
* Copyright (C) 2001 - 2013 The Ontopia Project
* #-
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* !#
*/
package net.ontopia.topicmaps.xml;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Reader;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import net.ontopia.infoset.core.LocatorIF;
import net.ontopia.topicmaps.core.AssociationIF;
import net.ontopia.topicmaps.core.AssociationRoleIF;
import net.ontopia.topicmaps.core.ConstraintViolationException;
import net.ontopia.topicmaps.core.DataTypes;
import net.ontopia.topicmaps.core.OccurrenceIF;
import net.ontopia.topicmaps.core.ReifiableIF;
import net.ontopia.topicmaps.core.ScopedIF;
import net.ontopia.topicmaps.core.TopicIF;
import net.ontopia.topicmaps.core.TopicMapIF;
import net.ontopia.topicmaps.core.TopicMapWriterIF;
import net.ontopia.topicmaps.core.TopicNameIF;
import net.ontopia.topicmaps.core.TypedIF;
import net.ontopia.topicmaps.core.VariantNameIF;
import net.ontopia.topicmaps.core.index.ClassInstanceIndexIF;
import net.ontopia.topicmaps.utils.DuplicateSuppressionUtils;
import net.ontopia.topicmaps.utils.PSI;
import net.ontopia.utils.CompactHashSet;
import net.ontopia.utils.IteratorComparator;
import net.ontopia.utils.OntopiaRuntimeException;
import net.ontopia.xml.CanonicalPrinter;
import org.xml.sax.Attributes;
import org.xml.sax.helpers.AttributesImpl;
/**
* PUBLIC: A topic map writer that writes topic maps out to the format
* defined in ISO 13250-4: Topic Maps -- Canonicalization. The format
* is also known as Canonical XTM, but should not be confused with
* that defined by Ontopia. The current implementation conforms to the
* final standard (ISO 13250-4:2009).
*
* @since 2.0.3
*/
public class CanonicalXTMWriter implements TopicMapWriterIF {
private static final String EL_SUBJECTLOCATORS = "subjectLocators";
private static final String EL_SUBJECTIDENTIFIERS = "subjectIdentifiers";
private static final String EL_ITEMIDENTIFIERS = "itemIdentifiers";
private static final String EL_SCOPE = "scope";
private static final String EL_ROLE = "role";
private static final String EL_ASSOCIATION = "association";
private static final String EL_OCCURRENCE = "occurrence";
private static final String EL_VARIANT = "variant";
private static final String EL_NAME = "name";
private static final String EL_TOPIC = "topic";
private static final String EL_TOPICMAP = "topicMap";
private static final String AT_NUMBER = "number";
private CanonicalPrinter out;
private AttributesImpl EMPTY;
private Map tmIndex; // Maps TMObjectIFs to corresponding index within parent
private Map extraRoles; // TopicIF -> List<AssocRoleIFs for type-instance>
private String base;
private String strippedBase;
private TopicIF typeInstance; // possibly fake
private TopicIF instance; // possibly fake
private TopicIF type; // possibly fake
private static TopicMapIF tmForFake;
// should only be used by fake nested classes.
private final AssociationComparator associationComparator =
new AssociationComparator();
private final AssociationRoleComparator associationRoleComparator =
new AssociationRoleComparator();
private final NameComparator nameComparator =
new NameComparator();
private final OccurrenceComparator occurrenceComparator =
new OccurrenceComparator();
private final LocatorComparator locatorComparator =
new LocatorComparator();
private final TopicComparator topicComparator = new TopicComparator();
private final VariantComparator variantComparator =
new VariantComparator();
private Comparator indexComparator;
private Set startNewlineElem;
private static final char[] LINEBREAK = { (char) 0x0A };
public CanonicalXTMWriter(File file) throws IOException {
this.out = new CanonicalPrinter(new FileOutputStream(file), true);
init();
}
public CanonicalXTMWriter(OutputStream out) {
this.out = new CanonicalPrinter(out, false);
init();
}
/**
* PUBLIC: Creates a canonicalizer that writes to the given Writer
* in whatever encoding that Writer uses. <b>Warning:</b> Canonical
* XTM requires the output encoding to be UTF-8, so for correct
* results the given Writer <i>must</i> produce UTF-8. Using this
* method is <b>not</b> recommended.
*/
public CanonicalXTMWriter(Writer out) {
this.out = new CanonicalPrinter(out, false);
init();
}
private void init() {
this.EMPTY = new AttributesImpl();
this.startNewlineElem = new CompactHashSet(12);
this.extraRoles = new HashMap();
startNewlineElem.add(EL_TOPICMAP);
startNewlineElem.add(EL_TOPIC);
startNewlineElem.add(EL_NAME);
startNewlineElem.add(EL_VARIANT);
startNewlineElem.add(EL_OCCURRENCE);
startNewlineElem.add(EL_ASSOCIATION);
startNewlineElem.add(EL_ROLE);
startNewlineElem.add(EL_SCOPE);
startNewlineElem.add(EL_ITEMIDENTIFIERS);
startNewlineElem.add(EL_SUBJECTIDENTIFIERS);
startNewlineElem.add(EL_SUBJECTLOCATORS);
}
@Override
public void write(TopicMapIF topicmap) {
DuplicateSuppressionUtils.removeDuplicates(topicmap);
tmForFake = topicmap;
base = topicmap.getStore().getBaseAddress().getAddress();
strippedBase = stripLocator(base);
Object[] topics = getTopics(topicmap);
Object[] associations = getAssociations(topicmap);
recordIndexes(topics, associations);
out.startDocument();
startElement(EL_TOPICMAP, reifier(topicmap));
writeLocators(topicmap.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
for (int ix = 0; ix < topics.length; ix++)
write((TopicIF) topics[ix]);
for (int ix = 0; ix < associations.length; ix++)
write((AssociationIF) associations[ix], ix + 1);
endElement(EL_TOPICMAP);
out.endDocument();
}
/**
* Maps topics, topic names, variant names, occurrences, associations and
* association roles to an index value (given as a string).
* Index value is the canonically ordered position in the parent object.
* @param topics The topic (with names and occurrences) make indexes for.
* @param associations (with roles) to make indexes for.
* Post: the paramaters 'topics' and 'associations are in canonical order.
*/
private void recordIndexes(Object[] topics, Object[] associations) {
// Create necessary objects
tmIndex = new HashMap();
indexComparator = new IndexComparator(tmIndex);
// Sort the topics in canonical order.
Arrays.sort(topics, topicComparator);
// Map each topic to its canonical position within the topic map.
for (int i = 0; i < topics.length; i++)
tmIndex.put(topics[i], new Integer(i + 1));
// Sort associations in canonical order
Arrays.sort(associations, associationComparator);
// For each association (in canonical order) of the topic map
for (int i = 0; i < associations.length; i++) {
AssociationIF assoc = (AssociationIF) associations[i];
// Map the association to it's position within the topic map.
tmIndex.put(assoc, new Integer(i + 1));
Object roles[] = assoc.getRoles().toArray();
Arrays.sort(roles, associationRoleComparator);
// For each association role (in canonical order) of the association
for (int j = 0; j < roles.length; j++)
// Map the role to it's position within the association.
tmIndex.put(roles[j], new Integer(j + 1));
}
}
private void write(TopicIF topic) {
AttributesImpl attributes = new AttributesImpl();
attributes.addAttribute("", "", AT_NUMBER, null, "" + tmIndex.get(topic));
startElement(EL_TOPIC, attributes);
attributes.clear();
writeLocators(topic.getSubjectIdentifiers(), EL_SUBJECTIDENTIFIERS);
writeLocators(topic.getSubjectLocators(), EL_SUBJECTLOCATORS);
writeLocators(topic.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
Object[] names = topic.getTopicNames().toArray();
Arrays.sort(names, nameComparator);
for (int ix = 0; ix < names.length; ix++)
write((TopicNameIF) names[ix], ix + 1);
Object[] occurrences = makeFakes(topic.getOccurrences().toArray());
Arrays.sort(occurrences, occurrenceComparator);
for (int ix = 0; ix < occurrences.length; ix++)
write((OccurrenceIF) occurrences[ix], ix + 1);
Collection r = new ArrayList(topic.getRoles());
Collection extras = (Collection) extraRoles.get(topic);
if (extras != null)
r.addAll(extras);
Object[] roles = r.toArray();
Arrays.sort(roles, associationRoleComparator);
for (int ix = 0; ix < roles.length; ix++) {
AssociationRoleIF currentRole = (AssociationRoleIF)roles[ix];
AssociationIF currentAssociation = currentRole.getAssociation();
AttributesImpl roleAttributes = new AttributesImpl();
String refValue = "association."
+ tmIndex.get(currentAssociation)
+ ".role."
+ tmIndex.get(currentRole);
roleAttributes.addAttribute("", "", "ref", null, refValue);
startElement("rolePlayed", roleAttributes);
endElement("rolePlayed");
}
endElement(EL_TOPIC);
}
private void write(TopicNameIF basename, int number) {
AttributesImpl attributes = reifier(basename);
attributes.addAttribute("", "", AT_NUMBER, null, "" + number);
startElement(EL_NAME, attributes);
attributes.clear();
write(basename.getValue());
writeType(basename);
write(basename.getScope());
Object[] variants = basename.getVariants().toArray();
Arrays.sort(variants, variantComparator);
for (int ix = 0; ix < variants.length; ix++)
write((VariantNameIF) variants[ix], ix + 1);
writeLocators(basename.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
endElement(EL_NAME);
}
private void write(VariantNameIF variant, int number) {
AttributesImpl attributes = reifier(variant);
attributes.addAttribute("", "", AT_NUMBER, null, "" + number);
startElement(EL_VARIANT, attributes);
attributes.clear();
if (Objects.equals(variant.getDataType(), DataTypes.TYPE_URI)) {
LocatorIF locator = variant.getLocator();
if (locator != null)
write(normaliseLocatorReference(locator.getAddress()));
} else {
String value = variant.getValue();
if (value != null)
write(value);
}
write(variant.getDataType(), "datatype");
write(variant.getScope());
writeLocators(variant.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
endElement(EL_VARIANT);
}
private Object[] makeFakes(Object[] occs) {
for (int ix = 0; ix < occs.length; ix++) {
OccurrenceIF original = (OccurrenceIF) occs[ix];
occs[ix] = new FakeOccurrence(original);
}
return occs;
}
private void write(OccurrenceIF occurrence, int number) {
AttributesImpl attributes = reifier(occurrence);
attributes.addAttribute("", "", AT_NUMBER, null, "" + number);
startElement(EL_OCCURRENCE, attributes);
attributes.clear();
write(occurrence.getValue()); // normalized in FakeOccurrence below
write(occurrence.getDataType(), "datatype");
writeType(occurrence);
write(occurrence.getScope());
writeLocators(occurrence.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
endElement(EL_OCCURRENCE);
}
private void write(AssociationIF association, int number) {
AttributesImpl attributes = reifier(association);
attributes.addAttribute("", "", AT_NUMBER, null, "" + number);
startElement(EL_ASSOCIATION, attributes);
attributes.clear();
writeType(association);
Object[] roles = association.getRoles().toArray();
Arrays.sort(roles, associationRoleComparator);
for (int ix = 0; ix < roles.length; ix++)
write((AssociationRoleIF) roles[ix], ix + 1);
write(association.getScope());
writeLocators(association.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
endElement(EL_ASSOCIATION);
}
private void write(AssociationRoleIF role, int number) {
AttributesImpl attributes = reifier(role);
attributes.addAttribute("", "", AT_NUMBER, null, "" + number);
startElement(EL_ROLE, attributes);
attributes.clear();
startElement("player", topicRef(role.getPlayer()));
endElement("player");
writeType(role);
writeLocators(role.getItemIdentifiers(), EL_ITEMIDENTIFIERS);
endElement(EL_ROLE);
}
private void write(Collection scope) {
if (scope.isEmpty())
return;
startElement(EL_SCOPE, EMPTY);
Object[] topics = scope.toArray();
Arrays.sort(topics, indexComparator);
for (int ix = 0; ix < topics.length; ix++) {
startElement("scopingTopic", topicRef((TopicIF) topics[ix]));
endElement("scopingTopic");
}
endElement(EL_SCOPE);
}
private void writeType(TypedIF object) {
TopicIF topic = object.getType();
if (topic == null) {
throw new OntopiaRuntimeException("TypedIF had null type: " + object);
}
startElement("type", topicRef(topic));
endElement("type");
}
private void write(String value) {
if (value == null)
throw new OntopiaRuntimeException("Object had null value");
startElement("value", EMPTY);
out.characters(value.toCharArray(), 0, value.length());
endElement("value");
}
private void write(LocatorIF uri, String element) {
startElement(element, EMPTY);
String value = uri.getAddress();
out.characters(value.toCharArray(), 0, value.length());
endElement(element);
}
private void write(LocatorIF locator) {
String address = normaliseLocatorReference(locator.getAddress());
startElement("locator", EMPTY);
out.characters(address.toCharArray(), 0, address.length());
endElement("locator");
}
private void writeLocators(Collection locators, String elementName) {
Object locs[] = locators.toArray();
Arrays.sort(locs, locatorComparator);
if (locs.length > 0) {
startElement(elementName, EMPTY);
for (int i = 0; i < locs.length; i++) {
LocatorIF loc = (LocatorIF) locs[i];
write(loc);
}
endElement(elementName);
}
}
// --- XML handling
private void startElement(String element, Attributes atts) {
out.startElement("", "", element, atts);
if (startNewlineElem.contains(element))
writeln();
}
private void endElement(String element) {
out.endElement("", "", element);
writeln();
}
private void writeln() {
out.characters(LINEBREAK, 0, 1);
}
// --- Helpers
private AttributesImpl reifier(ReifiableIF reified) {
TopicIF reifier = reified.getReifier();
if (reifier == null)
return EMPTY;
AttributesImpl atts = new AttributesImpl();
atts.addAttribute("", "", "reifier", null,
String.valueOf(tmIndex.get(reifier)));
return atts;
}
/**
* @return an attribute list with a reference to a given topic.
*/
private Attributes topicRef(TopicIF topic) {
AttributesImpl atts = new AttributesImpl();
atts.addAttribute("", "", "topicref", null, "" + tmIndex.get(topic));
return atts;
}
/**
* @return an array with all the topics of a given topic map.
*/
private Object[] getTopics(TopicMapIF topicmap) {
Collection topics = new ArrayList(topicmap.getTopics().size() + 4);
topics.addAll(topicmap.getTopics());
// add the type-instance PSI topics, if necessary
ClassInstanceIndexIF index = (ClassInstanceIndexIF) topicmap
.getIndex("net.ontopia.topicmaps.core.index.ClassInstanceIndexIF");
if (!index.getTopicTypes().isEmpty()) {
typeInstance = getTopic(topicmap, PSI.getSAMTypeInstance(), topics);
instance = getTopic(topicmap, PSI.getSAMInstance(), topics);
type = getTopic(topicmap, PSI.getSAMType(), topics);
}
return topics.toArray();
}
/**
* @return an array with all the associations a given topic map.
*/
private Object[] getAssociations(TopicMapIF topicmap) {
ClassInstanceIndexIF index = (ClassInstanceIndexIF) topicmap
.getIndex("net.ontopia.topicmaps.core.index.ClassInstanceIndexIF");
if (index.getTopicTypes().isEmpty())
return topicmap.getAssociations().toArray();
Collection assocs = new ArrayList(topicmap.getAssociations());
Iterator it = index.getTopicTypes().iterator();
while (it.hasNext()) {
TopicIF thetype = (TopicIF) it.next();
Iterator it2 = index.getTopics(thetype).iterator();
while (it2.hasNext()) {
TopicIF theinstance = (TopicIF) it2.next();
AssociationIF assoc = new FakeAssociation(thetype, theinstance);
recordRole(thetype, assoc.getRolesByType(type));
recordRole(theinstance, assoc.getRolesByType(instance));
assocs.add(assoc);
}
}
return assocs.toArray();
}
private void recordRole(TopicIF topic, Collection roles) {
Collection extra = (Collection) extraRoles.get(topic);
if (extra == null) {
extra = new ArrayList();
extraRoles.put(topic, extra);
}
extra.addAll(roles);
}
/**
* Return the topic with a given PSI. Create a fake topic for it if
* does not exist, and add it to the topics collection.
*/
private TopicIF getTopic(TopicMapIF tm, LocatorIF indicator,
Collection topics) {
TopicIF topic = tm.getTopicBySubjectIdentifier(indicator);
if (topic == null) {
topic = new FakeTopic(indicator, tmForFake);
topics.add(topic);
}
return topic;
}
// --- Datatype normalisation
private String normalizeNumber(String number) {
if (number.indexOf('.') > -1)
return normalizeDecimal(number);
else
return normalizeInteger(number);
}
// NOTE: The following two methods are copied from tinyTiM, donated
// by <NAME>
private static String normalizeInteger(final String value) {
final String val = value.trim();
int len = val.length();
if (len == 0)
throw new IllegalArgumentException("Illegal integer value: " + value);
int idx = 0;
boolean negative = false;
switch (val.charAt(idx)) {
case '-':
idx++;
negative = true;
break;
case '+':
idx++;
break;
}
// Skip leading zeros if any
while (idx < len && val.charAt(idx) == '0') {
idx++;
}
if (idx == len) {
return "0";
}
final String normalized = val.substring(idx);
len = normalized.length();
// Check if everything is a digit
for (int i = 0; i < len; i++) {
if (!Character.isDigit(normalized.charAt(i))) {
throw new IllegalArgumentException("Illegal integer value: " + value);
}
}
return negative && normalized.charAt(0) != 0 ? '-' + normalized : normalized;
}
private static String normalizeDecimal(final String value) {
final String val = value.trim();
int len = val.length();
if (len == 0)
throw new IllegalArgumentException("Illegal decimal value: " + value);
int idx = 0;
boolean negative = false;
switch (val.charAt(idx)) {
case '-':
idx++;
negative = true;
break;
case '+':
idx++;
break;
}
// Skip leading zeros if any
while (idx < len && val.charAt(idx) == '0') {
idx++;
}
if (idx == len) {
return "0.0";
}
StringBuilder normalized = new StringBuilder(len);
if (val.charAt(idx) == '.') {
normalized.append('0');
}
else {
while (idx < len && val.charAt(idx) != '.') {
char c = val.charAt(idx);
if (!Character.isDigit(c)) {
throw new IllegalArgumentException("Illegal decimal value: " + value);
}
normalized.append(c);
idx++;
}
}
normalized.append('.');
len--;
while (len >= idx && val.charAt(len) == '0') {
len--;
}
if (len <= idx) {
normalized.append('0');
if (normalized.charAt(0) == '0') {
return "0.0";
}
}
else {
// idx points to the '.', increment it
idx++;
while (idx <= len) {
char c = val.charAt(idx);
if (!Character.isDigit(c)) {
throw new IllegalArgumentException("Illegal decimal value: " + value);
}
normalized.append(c);
idx++;
}
}
return negative ? '-' + normalized.toString() : normalized.toString();
}
/**
* Normalise a given locator reference according to CXTM spec.
*/
private String normaliseLocatorReference(String reference) {
String retVal = reference.substring(longestCommonPath(reference,
strippedBase).length());
if (retVal.startsWith("/"))
retVal = retVal.substring(1);
return retVal;
}
/**
* Returns the longest common path of two Strings.
* The longest common path is the longest common prefix that ends with a '/'.
* If one string is a prefix of the other, the the longest common path is
* the shortest (i.e. the one that is a prefix of the other).
*/
private String longestCommonPath(String source1, String source2) {
String retVal = "";
if (source1.startsWith(source2))
retVal = source2;
else if (source2.startsWith(source1))
retVal = source1;
else {
int i = 0;
int lastSlashIndex = 0;
while (i < source1.length() && i < source2.length()
&& source1.charAt(i) == source2.charAt(i)) {
if (source1.charAt(i) == '/')
lastSlashIndex = i;
i++;
}
if (lastSlashIndex == -1)
retVal = "";
else
retVal = source1.substring(0, lastSlashIndex);
}
return retVal;
}
/**
* Remove the fragment- and query-parts of a given locatorString.
* @param locatorString The string from which to remove parts.
* @return The string after the necessary removing parts.
*/
private String stripLocator(String locatorString) {
String retVal = locatorString;
int queryIndex = retVal.indexOf('?');
if (queryIndex > 0)
retVal = retVal.substring(0, queryIndex);
int hashIndex = retVal.indexOf('#');
if (hashIndex > 0)
retVal = retVal.substring(0, hashIndex);
return retVal;
}
/**
* CanonicalXTMWriter has no additional properties.
* @param properties
*/
@Override
public void setAdditionalProperties(Map<String, Object> properties) {
// no-op
}
// --- Comparators
abstract class AbstractComparator implements Comparator {
protected int compareLocatorSet(Collection c1, Collection c2) {
if (c1.size() < c2.size())
return -1;
if (c1.size() > c2.size())
return 1;
// INV: locator sets must now be of equal size.
Object locators1[] = c1.toArray();
Object locators2[] = c2.toArray();
Arrays.sort(locators1, locatorComparator);
Arrays.sort(locators2, locatorComparator);
for (int i = 0; i < locators1.length; i++) {
int currentCmp = compareLocator((LocatorIF) locators1[i],
(LocatorIF) locators2[i]);
if (currentCmp != 0)
return currentCmp;
}
return 0;
}
protected int compareTopicSet(Collection c1, Collection c2) {
int cmp = c1.size() - c2.size();
Iterator it1 = c1.iterator();
Iterator it2 = c2.iterator();
while (cmp == 0 && it1.hasNext()) {
TopicIF t1 = (TopicIF) it1.next();
TopicIF t2 = (TopicIF) it2.next();
cmp = compareTopic(t1, t2);
}
return cmp;
}
protected int compareSet(Collection c1, Collection c2, Comparator comp) {
int cmp = c1.size() - c2.size();
Iterator it1 = c1.iterator();
Iterator it2 = c2.iterator();
while (cmp == 0 && it1.hasNext()) {
cmp = comp.compare(it1.next(), it2.next());
}
return cmp;
}
protected int compareString(String s1, String s2) {
if ((s1 == null) && (s2 == null)) return 0;
if (s1 == null) return -1;
if (s2 == null) return 1;
return s1.compareTo(s2);
}
protected int compareLocator(LocatorIF l1, LocatorIF l2) {
if (Objects.equals(l1, l2)) return 0;
if (l1 == null) return -1;
if (l2 == null) return 1;
int cmp = normaliseLocatorReference(l1.getAddress())
.compareTo(normaliseLocatorReference(l2.getAddress()));
if (cmp == 0)
cmp = l1.getNotation().compareTo(l2.getNotation());
return cmp;
}
protected int compareTopic(TopicIF t1, TopicIF t2) {
if (Objects.equals(t1, t2)) return 0;
if (t1 == null) return -1;
if (t2 == null) return 1;
int pos1 = ((Integer)tmIndex.get(t1)).intValue();
int pos2 = ((Integer)tmIndex.get(t2)).intValue();
return pos1 - pos2;
}
protected int compareAssociation(AssociationIF a1, AssociationIF a2) {
if (Objects.equals(a1, a2)) return 0;
if (a1 == null) return -1;
if (a2 == null) return 1;
int pos1 = ((Integer)tmIndex.get(a1)).intValue();
int pos2 = ((Integer)tmIndex.get(a2)).intValue();
return pos1 - pos2;
}
}
public static class IndexComparator implements Comparator {
private Map indexMap;
public IndexComparator(Map indexMap) {
this.indexMap = indexMap;
}
@Override
public int compare(Object o1, Object o2) {
Integer index1 = (Integer)indexMap.get(o1);
Integer index2 = (Integer)indexMap.get(o2);
if (index1 == null) {
if (index2 == null)
return 0;
return -1;
}
if (index2 == null)
return 1;
return index1.intValue() - index2.intValue();
}
}
class LocatorComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
return compareLocator((LocatorIF)o1, (LocatorIF)o2);
}
}
class TopicComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
TopicIF t1 = (TopicIF) o1;
TopicIF t2 = (TopicIF) o2;
int cmp = compareLocatorSet(t1.getSubjectIdentifiers(),
t2.getSubjectIdentifiers());
if (cmp == 0)
cmp = compareLocatorSet(t1.getSubjectLocators(),
t2.getSubjectLocators());
if (cmp == 0)
cmp = compareLocatorSet(t1.getItemIdentifiers(),
t2.getItemIdentifiers());
return cmp;
}
}
class NameComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
TopicNameIF bn1 = (TopicNameIF) o1;
TopicNameIF bn2 = (TopicNameIF) o2;
int cmp = compareString(bn1.getValue(), bn2.getValue());
// FIXME: Compare by type here when we can!
if (cmp == 0)
cmp = compareTopicSet(bn1.getScope(), bn2.getScope());
return cmp;
}
}
class SetComparator extends AbstractComparator {
private Comparator elementComparator;
public SetComparator(Comparator elementComparator) {
this.elementComparator = elementComparator;
}
@Override
public int compare(Object o1, Object o2) {
Collection c1 = (Collection) o1;
Collection c2 = (Collection) o2;
int cmp = c1.size() - c2.size();
Iterator it1 = c1.iterator();
Iterator it2 = c2.iterator();
while (cmp == 0 && it1.hasNext()) {
cmp = elementComparator.compare(it1.next(), it2.next());
}
return cmp;
}
}
class VariantComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
VariantNameIF vn1 = (VariantNameIF) o1;
VariantNameIF vn2 = (VariantNameIF) o2;
int cmp = compareString(vn1.getValue(), vn2.getValue());
if (cmp == 0)
cmp = compareLocator(vn1.getLocator(), vn2.getLocator());
if (cmp == 0)
cmp = compareTopicSet(vn1.getScope(), vn2.getScope());
return cmp;
}
}
class OccurrenceComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
OccurrenceIF occ1 = (OccurrenceIF) o1;
OccurrenceIF occ2 = (OccurrenceIF) o2;
int cmp = compareString(occ1.getValue(), occ2.getValue());
if (cmp == 0)
cmp = compareLocator(occ1.getDataType(), occ2.getDataType());
if (cmp == 0)
cmp = compareTopic(occ1.getType(), occ2.getType());
if (cmp == 0)
cmp = compareTopicSet(occ1.getScope(), occ2.getScope());
return cmp;
}
}
class AssociationComparator extends AbstractComparator {
private Comparator collectionComparator;
public AssociationComparator() {
collectionComparator = new CollectionSizeFirstComparator(
new RoleInAssociationComparator());
}
@Override
public int compare(Object o1, Object o2) {
AssociationIF assoc1 = (AssociationIF) o1;
AssociationIF assoc2 = (AssociationIF) o2;
int cmp = compareTopic(assoc1.getType(), assoc2.getType());
if (cmp == 0)
cmp = collectionComparator.compare(assoc1.getRoles(),
assoc2.getRoles());
if (cmp == 0)
cmp = compareTopicSet(assoc1.getScope(), assoc2.getScope());
return cmp;
}
}
class RoleInAssociationComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
AssociationRoleIF role1 = (AssociationRoleIF) o1;
AssociationRoleIF role2 = (AssociationRoleIF) o2;
int cmp = compareTopic(role1.getPlayer(), role2.getPlayer());
if (cmp == 0)
cmp = compareTopic(role1.getType(), role2.getType());
// No need to compare the parent assocaitions since this comparator only
// compares roles within one assocaition.
return cmp;
}
}
class AssociationRoleComparator extends AbstractComparator {
@Override
public int compare(Object o1, Object o2) {
AssociationRoleIF role1 = (AssociationRoleIF) o1;
AssociationRoleIF role2 = (AssociationRoleIF) o2;
int cmp = compareTopic(role1.getPlayer(), role2.getPlayer());
if (cmp == 0)
cmp = compareTopic(role1.getType(), role2.getType());
if (cmp == 0)
cmp = compareAssociation(role1.getAssociation(),
role2.getAssociation());
return cmp;
}
}
/**
* Comparator for Collections.
* Collections of fewer elements are ordered before Collections with more.
* Collecitons of equal size are sorted, and then compared element-wise.
*/
class CollectionSizeFirstComparator extends CollectionComparator {
public CollectionSizeFirstComparator (Comparator elementComparator) {
super(elementComparator);
}
public CollectionSizeFirstComparator (Comparator betweenComparator,
Comparator withinComparator) {
super(betweenComparator, withinComparator);
}
@Override
public int compare(Object o1, Object o2) {
if (Objects.equals(o1, o2)) return 0;
Collection c1 = (Collection)o1;
Collection c2 = (Collection)o2;
// Order Collection in increasing order by size.
if (c1.size() > c2.size())
return 1;
if (c1.size() < c2.size())
return -1;
return super.compare(c1, c2);
}
}
/**
* Comparator for Collections that first compares the elements, and then
* the size of the collection.
* The Collecitons are sorted, and then compared element-wise.
* If the Collections are of equal size, the one with fewer elements is
* ordered first.
*/
private class CollectionComparator implements Comparator {
// Compares elements within collection.
private Comparator betweenComp;
// Compares elements between two collections.
private Comparator withinComp;
private IteratorComparator iteratorComparator; // Compares elements.
/**
* Constructs a CollectionComparator that uses elementComparator for
* comparison.
* @param elementComparator Compares individual elements, both within a
* colleciton and for elements in two different collections.
*/
public CollectionComparator (Comparator elementComparator) {
this(elementComparator, elementComparator);
}
/**
* Constructs a CollectionComparator that uses withinComparator and
* betweenComparator for comparison.
* @param withinComparator Compares individual elements within a
* collection.
* @param betweenComparator Compares individual elements between two
* collections.
*/
public CollectionComparator (Comparator betweenComparator,
Comparator withinComparator) {
this.betweenComp = betweenComparator;
this.withinComp = withinComparator;
iteratorComparator = new IteratorComparator(betweenComp);
}
@Override
public int compare(Object o1, Object o2) {
if (o1 == o2) return 0;
Collection c1 = (Collection)o1;
Collection c2 = (Collection)o2;
return iteratorComparator.compare(sort(c1, withinComp).iterator(),
sort(c2, withinComp).iterator());
}
}
/**
* Sort the given collection with the given comparator.
*/
private SortedSet sort(Collection collection, Comparator comparator) {
SortedSet sorted = new TreeSet(comparator);
Iterator it = collection.iterator();
while (it.hasNext()) {
sorted.add(it.next());
}
return sorted;
}
// --- Fake wrappers
abstract class FakeScoped implements ScopedIF {
@Override
public Collection getScope() {
return Collections.EMPTY_SET;
}
@Override
public void addTheme(TopicIF theme) { /* no-op */ }
@Override
public void removeTheme(TopicIF theme) { /* no-op */ }
@Override
public String getObjectId() {
return null;
}
@Override
public boolean isReadOnly() {
return true;
}
@Override
public TopicMapIF getTopicMap() {
return null;
}
@Override
public Collection getItemIdentifiers() {
return Collections.EMPTY_SET;
}
@Override
public void addItemIdentifier(LocatorIF source_locator) { /* no-op */ }
@Override
public void removeItemIdentifier(LocatorIF source_locator) { /* no-op */ }
public Collection getTypes() {
return Collections.EMPTY_SET;
}
public void addType(TopicIF type) { /* no-op */ }
public void removeType(TopicIF type) { /* no-op */ }
@Override
public void remove() {}
}
class FakeTopic extends FakeScoped implements TopicIF {
private Collection indicator;
private TopicMapIF tmForFake;
public FakeTopic(LocatorIF indicator, TopicMapIF tmForFake) {
this.tmForFake = tmForFake;
this.indicator = Collections.singleton(indicator);
}
@Override
public Collection getSubjectIdentifiers() {
return indicator;
}
@Override
public Collection getSubjectLocators() {
return Collections.EMPTY_SET;
}
@Override
public Collection getTopicNames() {
return Collections.EMPTY_SET;
}
@Override
public Collection<TopicNameIF> getTopicNamesByType(TopicIF type) {
return Collections.EMPTY_SET;
}
@Override
public Collection getOccurrences() {
return Collections.EMPTY_SET;
}
@Override
public Collection<OccurrenceIF> getOccurrencesByType(TopicIF type) {
return Collections.EMPTY_SET;
}
@Override
public Collection getRoles() {
return Collections.EMPTY_SET;
}
@Override
public Collection getRolesByType(TopicIF roletype) {
return Collections.EMPTY_SET;
}
@Override
public Collection getRolesByType(TopicIF roletype, TopicIF assoc_type) {
return Collections.EMPTY_SET;
}
@Override
public Collection<AssociationIF> getAssociations() {
return Collections.EMPTY_SET;
}
@Override
public Collection<AssociationIF> getAssociationsByType(TopicIF type) {
return Collections.EMPTY_SET;
}
@Override
public TopicMapIF getTopicMap() {
return tmForFake;
}
@Override
public void addSubjectLocator(LocatorIF subject_locator) throws ConstraintViolationException { /* no-op */ }
@Override
public void removeSubjectLocator(LocatorIF subject_locator) { /* no-op */ }
@Override
public void addSubjectIdentifier(LocatorIF subject_indicator) { /* no-op */ }
@Override
public void removeSubjectIdentifier(LocatorIF subject_indicator) { /* no-op */ }
@Override
public void merge(TopicIF topic) { /* no-op */ }
@Override
public ReifiableIF getReified() {
return null;
}
}
class FakeAssociation extends FakeScoped implements AssociationIF {
private Collection roles;
public FakeAssociation(TopicIF t, TopicIF i) {
roles = new ArrayList(2);
roles.add(new FakeRole(this, type, t));
roles.add(new FakeRole(this, instance, i));
}
@Override
public Collection getRoles() {
return roles;
}
@Override
public Collection getRoleTypes() {
return null;
}
@Override
public Collection getRolesByType(TopicIF roletype) {
Collection rolesoftype = new ArrayList();
Iterator it = roles.iterator();
while (it.hasNext()) {
AssociationRoleIF role = (AssociationRoleIF) it.next();
if (roletype.equals(role.getType()))
rolesoftype.add(role);
}
return rolesoftype;
}
@Override
public TopicIF getType() {
return typeInstance;
}
@Override
public void setType(TopicIF type) { /* no-op */ }
@Override
public TopicIF getReifier() {
return null;
}
@Override
public void setReifier(TopicIF reifier) { /* no-op */ }
}
class FakeRole extends FakeScoped implements AssociationRoleIF {
private AssociationIF association;
private TopicIF type;
private TopicIF player;
public FakeRole(AssociationIF association, TopicIF type, TopicIF player) {
this.association = association;
this.type = type;
this.player = player;
}
@Override
public TopicIF getType() {
return type;
}
@Override
public AssociationIF getAssociation() {
return association;
}
@Override
public TopicIF getPlayer() {
return player;
}
@Override
public void setType(TopicIF type) { /* no-op */ }
@Override
public void setPlayer(TopicIF player) { /* no-op */ }
@Override
public TopicIF getReifier() {
return null;
}
@Override
public void setReifier(TopicIF reifier) { /* no-op */ }
}
// we need this class because occurrences are output ordered by normalized
// value, and not by the literal value
class FakeOccurrence implements OccurrenceIF {
private OccurrenceIF occ;
private String value;
public FakeOccurrence(OccurrenceIF occ) {
this.occ = occ;
LocatorIF datatype = occ.getDataType();
if (datatype.equals(DataTypes.TYPE_URI)) {
LocatorIF locator = occ.getLocator();
this.value = normaliseLocatorReference(locator.getAddress());
} else if (datatype.equals(DataTypes.TYPE_INTEGER) ||
datatype.equals(DataTypes.TYPE_DECIMAL))
this.value = normalizeNumber(occ.getValue());
else
this.value = occ.getValue();
}
@Override
public TopicIF getTopic() {
return occ.getTopic();
}
@Override
public LocatorIF getDataType() {
return occ.getDataType();
}
@Override
public String getValue() {
return value;
}
@Override
public Reader getReader() {
throw new UnsupportedOperationException();
}
@Override
public void setValue(String value) {
throw new UnsupportedOperationException();
}
@Override
public LocatorIF getLocator() {
throw new UnsupportedOperationException();
}
@Override
public void setLocator(LocatorIF locator) {
throw new UnsupportedOperationException();
}
@Override
public void setValue(String value, LocatorIF datatype) {
throw new UnsupportedOperationException();
}
@Override
public void setReader(Reader value, long length, LocatorIF datatype) {
throw new UnsupportedOperationException();
}
@Override
public long getLength() {
throw new UnsupportedOperationException();
}
@Override
public TopicIF getType() {
return occ.getType();
}
@Override
public void setType(TopicIF type) {
throw new UnsupportedOperationException();
}
@Override
public TopicIF getReifier() {
return occ.getReifier();
}
@Override
public void setReifier(TopicIF reifier) {
throw new UnsupportedOperationException();
}
@Override
public Collection getScope() {
return occ.getScope();
}
@Override
public void addTheme(TopicIF theme) {
throw new UnsupportedOperationException();
}
@Override
public void removeTheme(TopicIF theme) {
throw new UnsupportedOperationException();
}
@Override
public String getObjectId() {
return occ.getObjectId();
}
@Override
public boolean isReadOnly() {
return true;
}
@Override
public TopicMapIF getTopicMap() {
return occ.getTopicMap();
}
@Override
public Collection getItemIdentifiers() {
return occ.getItemIdentifiers();
}
@Override
public void addItemIdentifier(LocatorIF source_locator) {
throw new UnsupportedOperationException();
}
@Override
public void removeItemIdentifier(LocatorIF source_locator) {
throw new UnsupportedOperationException();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
}
|
Neuroscience: The New English Major? Once strictly the domain of medical and graduate education, neuroscience has made its way into the undergraduate curriculum with over 230 colleges and universities now offering a bachelor's degree in neuroscience. The disciplinary focus on the brain teaches students to apply science to the understanding of human behavior, human interactions, sensation, emotions, and decision making. In this article, we encourage new and existing undergraduate neuroscience programs to envision neuroscience as a broad discipline with the potential to develop competencies suitable for a variety of careers that reach well beyond research and medicine. This article describes our philosophy and illustrates a broad-based undergraduate degree in neuroscience implemented at a major state university, Virginia Tech. We highlight the fact that the research-centered Experimental Neuroscience major is least popular of our four distinct majors, which underscores our philosophy that undergraduate neuroscience can cater to a different audience than traditionally thought. |
/*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.dmn.client.session;
import java.util.HashMap;
import java.util.Map;
import com.google.gwtmockito.GwtMockitoTestRunner;
import org.appformer.client.stateControl.registry.DefaultRegistry;
import org.appformer.client.stateControl.registry.Registry;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.kie.workbench.common.dmn.client.canvas.controls.inlineeditor.DMNCanvasInlineTextEditorControl;
import org.kie.workbench.common.dmn.client.canvas.controls.resize.DecisionServiceMoveDividerControl;
import org.kie.workbench.common.stunner.core.client.canvas.AbstractCanvasHandler;
import org.kie.workbench.common.stunner.core.client.canvas.controls.CanvasControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.ClipboardControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.ContainmentAcceptorControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.DockingAcceptorControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.LocationControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.ResizeControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.ToolboxControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.builder.EdgeBuilderControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.builder.ElementBuilderControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.builder.NodeBuilderControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.builder.impl.Observer;
import org.kie.workbench.common.stunner.core.client.canvas.controls.connection.ConnectionAcceptorControl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.keyboard.AbstractCanvasShortcutsControlImpl;
import org.kie.workbench.common.stunner.core.client.canvas.controls.keyboard.KeyboardControl;
import org.kie.workbench.common.stunner.core.client.canvas.event.registration.RegisterChangedEvent;
import org.kie.workbench.common.stunner.core.client.command.CanvasViolation;
import org.kie.workbench.common.stunner.core.client.command.SessionCommandManager;
import org.kie.workbench.common.stunner.core.command.Command;
import org.mockito.Mock;
import org.uberfire.mocks.EventSourceMock;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.verify;
@RunWith(GwtMockitoTestRunner.class)
public class DMNEditorSessionTest extends BaseDMNSessionTest<DMNEditorSession> {
@Mock
private SessionCommandManager<AbstractCanvasHandler> sessionCommandManager;
@Mock
private Registry<Command<AbstractCanvasHandler, CanvasViolation>> commandRegistry;
@Mock
private DefaultRegistry<Command<AbstractCanvasHandler, CanvasViolation>> redoCommandRegistry;
private EventSourceMock<RegisterChangedEvent> registerChangedEvent = new EventSourceMock<>();
@Mock
private ResizeControl resizeControl;
@Mock
private DecisionServiceMoveDividerControl decisionServiceMoveDividerControl;
@Mock
private ConnectionAcceptorControl connectionAcceptorControl;
@Mock
private ContainmentAcceptorControl containmentAcceptorControl;
@Mock
private DockingAcceptorControl dockingAcceptorControl;
@Mock
private DMNCanvasInlineTextEditorControl canvasInlineTextEditorControl;
@Mock
private LocationControl locationControl;
@Mock
private ToolboxControl toolboxControl;
@Mock
private ElementBuilderControl elementBuilderControl;
@Mock
private NodeBuilderControl nodeBuilderControl;
@Mock
private EdgeBuilderControl edgeBuilderControl;
@Mock
private KeyboardControl keyboardControl;
@Mock
private ClipboardControl clipboardControl;
@Mock
private AbstractCanvasShortcutsControlImpl canvasShortcutsControl;
@Before
@Override
@SuppressWarnings("unchecked")
public void setup() {
super.setup();
}
@Override
protected DMNEditorSession getSession() {
final DMNEditorSession session = new DMNEditorSession(managedSession,
canvasCommandManager,
sessionCommandManager,
commandRegistry,
redoCommandRegistry,
registerChangedEvent);
session.constructInstance();
return session;
}
@Override
protected Map<CanvasControl, Class> getCanvasControlRegistrations() {
final HashMap<CanvasControl, Class> canvasControls = new HashMap<>();
canvasControls.put(keyboardControl, KeyboardControl.class);
canvasControls.put(clipboardControl, ClipboardControl.class);
return canvasControls;
}
@Override
protected Map<CanvasControl, Class> getCanvasHandlerControlRegistrations() {
final HashMap<CanvasControl, Class> canvasHandlerControls = new HashMap<>();
canvasHandlerControls.put(resizeControl, ResizeControl.class);
canvasHandlerControls.put(decisionServiceMoveDividerControl, DecisionServiceMoveDividerControl.class);
canvasHandlerControls.put(connectionAcceptorControl, ConnectionAcceptorControl.class);
canvasHandlerControls.put(containmentAcceptorControl, ContainmentAcceptorControl.class);
canvasHandlerControls.put(dockingAcceptorControl, DockingAcceptorControl.class);
canvasHandlerControls.put(canvasInlineTextEditorControl, DMNCanvasInlineTextEditorControl.class);
canvasHandlerControls.put(locationControl, LocationControl.class);
canvasHandlerControls.put(toolboxControl, ToolboxControl.class);
canvasHandlerControls.put(elementBuilderControl, ElementBuilderControl.class);
canvasHandlerControls.put(nodeBuilderControl, NodeBuilderControl.class);
canvasHandlerControls.put(edgeBuilderControl, EdgeBuilderControl.class);
canvasHandlerControls.put(canvasShortcutsControl, DMNCanvasShortcutsControl.class);
return canvasHandlerControls;
}
@Override
protected void assertInitQualifiers() {
super.assertInitQualifiers();
verify(managedSession).registerCanvasHandlerControl(eq(DMNCanvasInlineTextEditorControl.class));
verify(managedSession).registerCanvasHandlerControl(eq(ElementBuilderControl.class), eq(Observer.class));
verify(managedSession).registerCanvasHandlerControl(eq(DMNCanvasShortcutsControl.class));
}
}
|
<filename>test/core/test_base.py
from tempfile import TemporaryDirectory
from pathlib import Path
from numpy import array, allclose
from skdh.base import BaseProcess
class TestBaseProcess:
def test_str_repr(self):
bp = BaseProcess(kw1=1, kw2="2")
assert str(bp) == "BaseProcess"
assert repr(bp) == "BaseProcess(kw1=1, kw2='2')"
def test_eq(self, testprocess, testprocess2):
tp1_a = testprocess(kw1=1)
tp1_b = testprocess(kw1=2)
tp2_a = testprocess2(kwa=1)
tp2_b = testprocess2(kwa=2)
assert tp1_a == tp1_a
assert all([tp1_a != i for i in [tp1_b, tp2_a, tp2_b]])
assert tp1_b == tp1_b
assert all([tp1_b != i for i in [tp1_a, tp2_a, tp2_b]])
assert tp2_a == tp2_a
assert all([tp2_a != i for i in [tp1_a, tp1_b, tp2_b]])
assert tp2_b == tp2_b
assert all([tp2_b != i for i in [tp1_a, tp1_b, tp2_a]])
@staticmethod
def setup_lgr():
class Lgr:
msgs = []
def info(self, msg):
self.msgs.append(msg)
return Lgr()
def test__check_if_idx_none(self):
bp = BaseProcess()
bp.logger = self.setup_lgr() # overwrite the logger
x = array([[0, 10], [15, 20]])
s, e = bp._check_if_idx_none(x, "none msg", None, None)
assert allclose(s, [0, 15])
assert allclose(e, [10, 20])
s, e = bp._check_if_idx_none(None, "none msg", 0, 10)
sn, en = bp._check_if_idx_none(None, "none msg", None, 10)
assert "none msg" in bp.logger.msgs
assert s == 0
assert e == 10
assert sn is None
assert en is None
def test_predict(self):
bp = BaseProcess()
bp.logger = self.setup_lgr()
bp.predict(
expect_days=True, expect_wear=True, accel=array([[1, 2, 3], [4, 5, 6]])
)
assert bp._file_name == ""
assert (
"Entering BaseProcess processing with call BaseProcess()" in bp.logger.msgs
)
assert (
"[BaseProcess] Day indices [(-1, -1)] not found. No day split used."
in bp.logger.msgs
)
def test_save_results(self):
bp = BaseProcess()
bp.predict(expect_wear=False, expect_days=False, file="test_file.infile")
with TemporaryDirectory() as tdir:
tdir = Path(tdir)
fname = tdir / "{file}__{name}.out"
bp.save_results({"a": [1, 2, 3]}, str(fname))
files = [i.name for i in tdir.glob("*")]
assert "test_file__BaseProcess.out" in files
|
Headliner shows are held in the Kodak Hall at Eastman Theatre (60 Gibbs Street). These are ticketed shows; a club pass doesn't work here. Tickets, which range from $40 to $105 (plus service charges), are available through the Jazz Festival website (rochesterjazz.com) or by calling 585-454-2060. Note that some headliner shows are already sold out, including Steve Martin & The Steep Canyon Ranger featuring Edie Brickell, and Earth, Wind and Fire. Kodak Hall also hosts the free Gerry Niewood Jazz Scholarships Performance on Monday, June 23.
State Street Bar & Grill (Rochester Plaza Hotel, 70 State St.) Free jazz jams starting nightly at 10:30 p.m.
The Xerox Rochester International Jazz Festival's official website is rochesterjazz.com. You can also visit the Jazz Ticket Shop & Info Center on the corner of East Avenue and Gibbs Street, open June 17, 10 a.m. to 6 p.m. and daily during the festival, 10 a.m. to 11 p.m. The XRIFJ also has a mobile-phone app for both Apple and Android phones. It includes a full schedule, venue and artist lists, maps, and an option to buy tickets online. Find the download button on the Jazz Fest website's homepage. |
<filename>src/main/java/com/mercadopago/resources/Disbursement.java
package com.mercadopago.resources;
import com.mercadopago.core.MPBase;
import com.mercadopago.core.MPRequestOptions;
import com.mercadopago.core.annotations.rest.POST;
import com.mercadopago.exceptions.MPException;
import com.mercadopago.resources.datastructures.advancedpayment.AdditionalInfo;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
public class Disbursement extends MPBase {
private Integer id;
private Float amount;
private String externalReference;
private String collectorId;
private Float applicationFee;
private Float moneyReleaseDays;
private AdditionalInfo additionalInfo;
private Date moneyReleaseDate = null;
public Date getMoneyReleaseDate() {
return moneyReleaseDate;
}
public Disbursement setMoneyReleaseDate(Date moneyReleaseDate) {
this.moneyReleaseDate = moneyReleaseDate;
return this;
}
public Integer getId() {
return id;
}
public Disbursement setId(Integer id) {
this.id = id;
return this;
}
public Float getAmount() {
return amount;
}
public Disbursement setAmount(Float amount) {
this.amount = amount;
return this;
}
public String getExternalReference() {
return externalReference;
}
public Disbursement setExternalReference(String externalReference) {
this.externalReference = externalReference;
return this;
}
public String getCollectorId() {
return collectorId;
}
public Disbursement setCollectorId(String collectorId) {
this.collectorId = collectorId;
return this;
}
public Float getApplicationFee() {
return applicationFee;
}
public Disbursement setApplicationFee(Float applicationFee) {
this.applicationFee = applicationFee;
return this;
}
public Float getMoneyReleaseDays() {
return moneyReleaseDays;
}
public Disbursement setMoneyReleaseDays(Float moneyReleaseDays) {
this.moneyReleaseDays = moneyReleaseDays;
return this;
}
public AdditionalInfo getAdditionalInfo() {
return additionalInfo;
}
public Disbursement setAdditionalInfo(AdditionalInfo additionalInfo) {
this.additionalInfo = additionalInfo;
return this;
}
public static boolean updateReleaseDate(Long advancedPaymentId, Long disbursementId, Date releaseDate) throws MPException {
return updateReleaseDate(advancedPaymentId, disbursementId, releaseDate, MPRequestOptions.createDefault());
}
@POST(path="/v1/advanced_payments/:advanced_payment_id/disbursements/:disbursement_id/disburses")
public static boolean updateReleaseDate(Long advancedPaymentId, Long disbursementId, Date releaseDate, MPRequestOptions requestOptions) throws MPException {
Disbursement disbursement = new Disbursement()
.setMoneyReleaseDate(releaseDate);
Map<String, String> queryParams = new HashMap<String, String>();
queryParams.put("advanced_payment_id", advancedPaymentId.toString());
queryParams.put("disbursement_id", disbursementId.toString());
Disbursement response = processMethod(Disbursement.class, disbursement, "updateReleaseDate", queryParams, WITHOUT_CACHE, requestOptions);
if (response.getLastApiResponse().getStatusCode() >= 200 && response.getLastApiResponse().getStatusCode() < 300){
return true;
}
return false;
}
}
|
package webl.lang.builtins;
import webl.lang.*;
import webl.lang.expr.*;
import webl.page.*;
import java.util.*;
public class NameFun extends AbstractFunExpr
{
public String toString() {
return "<Name>";
}
public Expr Apply(Context c, Vector args, Expr callsite) throws WebLException {
CheckArgCount(c, args, callsite, 1);
Expr p = ((Expr)(args.elementAt(0))).eval(c);
if (p instanceof Piece) {
String name = ((Piece)p).name;
if (name == null) name = "";
return Program.Str(name);
} else
throw new WebLException(c, callsite, "ArgumentError", toString() + " function expects a piece as first argument");
}
} |
<gh_stars>0
package com.easywf.wf.util.service;
import com.easywf.wf.util.context.ProcessContext;
public interface ProcessRejectService {
void reject(ProcessContext pc);
boolean support(ProcessContext pc);
}
|
<reponame>ArturAquino/TesteZup
package br.mg.com.zup.comandos;
import br.mg.com.zup.robo.Robo;
public interface ICmd{
public void inicia(final Robo robo);
} |
<gh_stars>1-10
use std::fs;
use std::str::FromStr;
struct Instruction {
command: String,
offset: u32,
}
impl FromStr for Instruction {
type Err = std::num::ParseIntError;
fn from_str (line: &str) -> Result<Self, Self::Err> {
let x: Vec<&str> = line.split_whitespace().collect();
let command: String = x[0].to_string();
let offset: u32 = u32::from_str(x[1]).expect("Err parsing offset");
Ok(Instruction { command, offset })
}
}
fn main() {
let input = fs::read_to_string("input-1")
.expect("fail input");
let mut hpos = 0;
let mut dpos = 0;
for line in input.lines() {
let x = Instruction::from_str(line).expect("Error parsing instruction");
match x.command.as_str() {
"forward" => hpos += x.offset,
"up" => dpos -= x.offset,
"down" => dpos += x.offset,
_ => unreachable!(),
}
}
println!("{}", hpos * dpos);
}
|
<reponame>AlexSafatli/NootBot
package net.dirtydeeds.discordsoundboard.listeners;
import net.dirtydeeds.discordsoundboard.moderation.ModerationRules;
import net.dirtydeeds.discordsoundboard.service.SoundboardBot;
import net.dirtydeeds.discordsoundboard.utils.StyledEmbedMessage;
import net.dv8tion.jda.api.entities.Guild;
import net.dv8tion.jda.api.entities.Member;
import net.dv8tion.jda.api.entities.TextChannel;
import net.dv8tion.jda.api.events.guild.member.GuildMemberJoinEvent;
import net.dv8tion.jda.api.requests.RestAction;
import java.util.Map;
public class GuildUserListener extends AbstractListener {
private Map<Guild, ModerationRules> modRules;
public GuildUserListener(SoundboardBot bot, Map<Guild, ModerationRules> rules) {
this.bot = bot;
this.modRules = rules;
}
public void onGuildMemberJoin(GuildMemberJoinEvent event) {
Member member = event.getMember();
verifyRole(member);
}
private void verifyRole(Member member) {
ModerationRules rules = modRules.get(member.getGuild());
if (rules != null) {
RestAction<Void> assign = rules.giveDefaultRole(member);
if (assign != null) {
assign.queue(
x -> bot.sendMessageToUser(
"Added " + member.getEffectiveName() + " to role " +
rules.getDefaultRole().getName(), bot.getOwner()));
}
}
}
private void mentionMember(TextChannel channel, Member member, String title, String desc) {
if (channel != null) {
StyledEmbedMessage em = StyledEmbedMessage.forMember(bot, member, title, desc);
embed(channel, em);
}
}
}
|
<filename>NSudoSDK/M2WindowsDownlevelHelpers.h<gh_stars>0
/*
* PROJECT: M2-Team Common Library
* FILE: M2WindowsDownlevelHelpers.h
* PURPOSE: Definition for the Windows downlevel helper functions
*
* LICENSE: The MIT License
*
* DEVELOPER: Mouri_Naruto (Mouri_Naruto AT Outlook.com)
*/
#pragma once
#ifndef _M2_WINDOWS_DOWNLEVEL_HELPERS_
#define _M2_WINDOWS_DOWNLEVEL_HELPERS_
#include <Windows.h>
/**
* Retrieves the calling thread's last-error code value. The last-error code is
* maintained on a per-thread basis. Multiple threads do not overwrite each
* other's last-error code.
*
* @param IsLastFunctionCallSucceeded Set this parameter TRUE if you can be
* sure that the last call was succeeded.
* Otherwise, set this parameter FALSE.
* @param UseLastErrorWhenSucceeded Set this parameter TRUE if you want to use
* last-error code if the last call was
* succeeded. Otherwise, set this parameter
* FALSE.
* @return The calling thread's last-error code.
*/
DWORD M2GetLastWin32Error(
_In_ BOOL IsLastFunctionCallSucceeded = FALSE,
_In_ BOOL UseLastErrorWhenSucceeded = FALSE);
/**
* Retrieves the calling thread's last-error code value. The last-error code is
* maintained on a per-thread basis. Multiple threads do not overwrite each
* other's last-error code.
*
* @param KnownFailed Set this parameter TRUE if you can be sure that the last
* call was failed, Otherwise, set this parameter FALSE.
* @param LastErrorCode A pointer to a variable that returns the calling
* thread's last-error code. This parameter can be NULL.
* @return The calling thread's last-error code which is converted to an
* HRESULT value.
*/
HRESULT M2GetLastHResultError(
_In_ BOOL IsLastFunctionCallSucceeded = FALSE,
_In_ BOOL UseLastErrorWhenSucceeded = FALSE);
/**
* Allocates a block of memory from a heap. The allocated memory is not
* movable.
*
* @param lpNewMem A pointer to the allocated memory block.
* @param hHeap A handle to the heap from which the memory will be allocated.
* @param dwFlags The heap allocation options.
* @param dwBytes The number of bytes to be allocated.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see HeapAlloc.
*/
HRESULT M2HeapAlloc(
_Out_ PVOID* lpNewMem,
_In_ HANDLE hHeap,
_In_ DWORD dwFlags,
_In_ SIZE_T dwBytes);
/**
* Reallocates a block of memory from a heap. This function enables you to
* resize a memory block and change other memory block properties. The
* allocated memory is not movable.
*
* @param lpNewMem A pointer to the allocated memory block.
* @param hHeap A handle to the heap from which the memory is to be
* reallocated.
* @param dwFlags The heap reallocation options.
* @param lpMem A pointer to the block of memory that the function reallocates.
* @param dwBytes The new size of the memory block, in bytes.
* @return HRESULT. If the function succeeds, the return value is S_OK. If the
* function fails, the original memory is not freed, and the original
* handle and pointer are still valid.
* @remark For more information, see HeapReAlloc.
*/
HRESULT M2HeapReAlloc(
_Out_ PVOID* lpNewMem,
_Inout_ HANDLE hHeap,
_In_ DWORD dwFlags,
_In_ LPVOID lpMem,
_In_ SIZE_T dwBytes);
/**
* Frees a memory block allocated from a heap by the M2HeapAlloc and
* M2HeapReAlloc function.
*
* @param hHeap A handle to the heap whose memory block is to be freed.
* @param dwFlags The heap free options.
* @param lpMem A pointer to the memory block to be freed.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see HeapFree.
*/
HRESULT M2HeapFree(
_Inout_ HANDLE hHeap,
_In_ DWORD dwFlags,
_In_ LPVOID lpMem);
/**
* Closes an open object handle.
*
* @param hObject A valid handle to an open object.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see CloseHandle.
*/
HRESULT M2CloseHandle(
_In_ HANDLE hObject);
/**
* Creates a thread to execute within the virtual address space of the calling
* process.
*
* @param lpThreadHandle The address of the returned handle to the new thread.
* @param lpThreadAttributes A pointer to a SECURITY_ATTRIBUTES structure that
* determines whether the returned handle can be
* inherited by child processes.
* @param dwStackSize The initial size of the stack, in bytes.
* @param lpStartAddress A pointer to the application-defined function to be
* executed by the thread.
* @param lpParameter A pointer to a variable to be passed to the thread.
* @param dwCreationFlags The flags that control the creation of the thread.
* @param lpThreadId A pointer to a variable that receives the thread
* identifier.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see CreateThread.
*/
HRESULT M2CreateThread(
_Out_ PHANDLE lpThreadHandle,
_In_opt_ LPSECURITY_ATTRIBUTES lpThreadAttributes,
_In_ SIZE_T dwStackSize,
_In_ LPTHREAD_START_ROUTINE lpStartAddress,
_In_opt_ LPVOID lpParameter,
_In_ DWORD dwCreationFlags,
_Out_opt_ LPDWORD lpThreadId);
/**
* Retrieves the number of logical processors in the current group.
*
* @return The number of logical processors in the current group.
*/
DWORD M2GetNumberOfHardwareThreads();
/**
* Retrieves the number of milliseconds that have elapsed since the system was
* started.
*
* @return The number of milliseconds.
*/
ULONGLONG M2GetTickCount();
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
/**
* Creates or opens a file or I/O device. The most commonly used I/O devices
* are as follows: file, file stream, directory, physical disk, volume, console
* buffer, tape drive, communications resource, mailslot, and pipe. The
* function returns a handle that can be used to access the file or device for
* various types of I/O depending on the file or device and the flags and
* attributes specified.
*
* @param lpFileHandle The address of the returned handle to the specified
* file.
* @param lpFileName The name of the file or device to be created or opened.
* You may use either forward slashes (/) or backslashes ()
* in this name.
* @param dwDesiredAccess The requested access to the file or device, which can
* be summarized as read, write, both or neither zero).
* @param dwShareMode The requested sharing mode of the file or device, which
* can be read, write, both, delete, all of these, or none
* (refer to the following table). Access requests to
* attributes or extended attributes are not affected by
* this flag.
* @param lpSecurityAttributes A pointer to a SECURITY_ATTRIBUTES structure
* that contains two separate but related data
* members: an optional security descriptor, and a
* Boolean value that determines whether the
* returned handle can be inherited by child
* processes. This parameter can be NULL.
* @param dwCreationDisposition An action to take on a file or device that
* exists or does not exist.
* @param dwFlagsAndAttributes The file or device attributes and flags,
* FILE_ATTRIBUTE_NORMAL being the most common
* default value for files.
* @param hTemplateFile A valid handle to a template file with the GENERIC_READ
* access right. The template file supplies file
* attributes and extended attributes for the file that is
* being created. This parameter can be NULL.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see CreateFileW.
*/
HRESULT M2CreateFile(
_Out_ PHANDLE lpFileHandle,
_In_ LPCWSTR lpFileName,
_In_ DWORD dwDesiredAccess,
_In_ DWORD dwShareMode,
_In_opt_ LPSECURITY_ATTRIBUTES lpSecurityAttributes,
_In_ DWORD dwCreationDisposition,
_In_ DWORD dwFlagsAndAttributes,
_In_opt_ HANDLE hTemplateFile);
#endif
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
/**
* Closes a handle to the specified registry key.
*
* @param hKey A handle to the open key to be closed.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see RegCloseKey.
*/
HRESULT M2RegCloseKey(
_In_ HKEY hKey);
/**
* Creates the specified registry key. If the key already exists, the function
* opens it. Note that key names are not case sensitive.
*
* @param hKey A handle to an open registry key.
* @param lpSubKey The name of a subkey that this function opens or creates
* @param Reserved This parameter is reserved and must be zero.
* @param lpClass The user-defined class type of this key.
* @param dwOptions This parameter can be one of the following values:
* REG_OPTION_BACKUP_RESTORE, REG_OPTION_CREATE_LINK,
* REG_OPTION_NON_VOLATILE, REG_OPTION_VOLATILE.
* @param samDesired A mask that specifies the access rights for the key to be
* created.
* @param lpSecurityAttributes A pointer to a SECURITY_ATTRIBUTES structure
* that determines whether the returned handle can
* be inherited by child processes.
* @param phkResult A pointer to a variable that receives a handle to the
* opened or created key.
* @param lpdwDisposition A pointer to a variable that receives one of the
* following disposition values.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see RegCreateKeyEx.
*/
HRESULT M2RegCreateKey(
_In_ HKEY hKey,
_In_ LPCWSTR lpSubKey,
_Reserved_ DWORD Reserved,
_In_opt_ LPWSTR lpClass,
_In_ DWORD dwOptions,
_In_ REGSAM samDesired,
_In_opt_ CONST LPSECURITY_ATTRIBUTES lpSecurityAttributes,
_Out_ PHKEY phkResult,
_Out_opt_ LPDWORD lpdwDisposition);
/**
* Retrieves the type and data for the specified value name associated with an
* open registry key.
*
* @param hKey A handle to an open registry key.
* @param lpValueName The name of the registry value.
* @param lpReserved This parameter is reserved and must be NULL.
* @param lpType A pointer to a variable that receives a code indicating the
* type of data stored in the specified value.
* @param lpData A pointer to a buffer that receives the value's data.
* @param lpcbData A pointer to a variable that specifies the size of the
* buffer pointed to by the lpData parameter, in bytes.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see RegQueryValueEx.
*/
HRESULT M2RegQueryValue(
_In_ HKEY hKey,
_In_opt_ LPCWSTR lpValueName,
_Reserved_ LPDWORD lpReserved,
_Out_opt_ LPDWORD lpType,
_Out_opt_ LPBYTE lpData,
_Inout_opt_ LPDWORD lpcbData);
/**
* Retrieves the type and data for the specified value name associated with an
* open registry key.
*
* @param hKey A handle to an open registry key.
* @param lpValueName The name of the value to be set.
* @param Reserved This parameter is reserved and must be zero.
* @param dwType The type of data pointed to by the lpData parameter.
* @param lpData The data to be stored.
* @param cbData The size of the information pointed to by the lpData
* parameter, in bytes.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see RegSetValueEx.
*/
HRESULT M2RegSetValue(
_In_ HKEY hKey,
_In_opt_ LPCWSTR lpValueName,
_Reserved_ DWORD Reserved,
_In_ DWORD dwType,
_In_opt_ CONST BYTE* lpData,
_In_ DWORD cbData);
#endif
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
/**
* Loads the specified module with the optimization of the mitigation of DLL
* preloading attacks into the address space of the calling process safely. The
* specified module may cause other modules to be loaded.
*
* @param phLibModule A handle to the loaded module.
* @param lpLibFileName A string that specifies the file name of the module to
* load.
* @param hFile This parameter is reserved for future use. It must be NULL.
* @param dwFlags The action to be taken when loading the module.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see LoadLibraryEx.
*/
HRESULT M2LoadLibrary(
_Out_ HMODULE* phLibModule,
_In_ LPCWSTR lpLibFileName,
_Reserved_ HANDLE hFile,
_In_ DWORD dwFlags);
#endif
/**
* Frees the loaded dynamic-link library (DLL) module and, if necessary,
* decrements its reference count. When the reference count reaches zero, the
* module is unloaded from the address space of the calling process and the
* handle is no longer valid.
*
* @param hLibModule A handle to the loaded library module.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see FreeLibrary.
*/
HRESULT M2FreeLibrary(
_In_ HMODULE hLibModule);
/**
* Retrieves the address of an exported function or variable from the specified
* dynamic-link library (DLL).
*
* @param lpProcAddress The address of the exported function or variable.
* @param hModule A handle to the DLL module that contains the function or
* variable. The LoadLibrary, LoadLibraryEx, LoadPackagedLibrary
* or GetModuleHandle function returns this handle. This
* function does not retrieve addresses from modules that were
* loaded using the LOAD_LIBRARY_AS_DATAFILE flag. For more
* information, see LoadLibraryEx.
* @param lpProcName The function or variable name, or the function's ordinal
* value. If this parameter is an ordinal value, it must be
* in the low-order word; the high-order word must be zero.
* @return HRESULT. If the function succeeds, the return value is S_OK.
*/
HRESULT M2GetProcAddress(
_Out_ FARPROC* lpProcAddress,
_In_ HMODULE hModule,
_In_ LPCSTR lpProcName);
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP | WINAPI_PARTITION_SYSTEM)
/**
* Sends a control code directly to a specified device driver, causing the
* corresponding device to perform the corresponding operation.
*
* @param hDevice A handle to the device on which the operation is to be
* performed.
* @param dwIoControlCode The control code for the operation.
* @param lpInBuffer A pointer to the input buffer that contains the data
* required to perform the operation. This parameter can be
* NULL if dwIoControlCode specifies an operation that does
* not require input data.
* @param nInBufferSize The size of the input buffer, in bytes.
* @param lpOutBuffer A pointer to the output buffer that is to receive the
* data returned by the operation. This parameter can be
* NULL if dwIoControlCode specifies an operation that does
* not return data.
* @param nOutBufferSize The size of the output buffer, in bytes.
* @param lpBytesReturned A pointer to a variable that receives the size of
* the data stored in the output buffer, in bytes.
* @param lpOverlapped A pointer to an OVERLAPPED structure.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see DeviceIoControl.
*/
HRESULT M2DeviceIoControl(
_In_ HANDLE hDevice,
_In_ DWORD dwIoControlCode,
_In_opt_ LPVOID lpInBuffer,
_In_ DWORD nInBufferSize,
_Out_opt_ LPVOID lpOutBuffer,
_In_ DWORD nOutBufferSize,
_Out_opt_ LPDWORD lpBytesReturned,
_Inout_opt_ LPOVERLAPPED lpOverlapped);
#endif
/**
* Retrieves file information for the specified file.
*
* @param hFile A handle to the file that contains the information to be
* retrieved. This handle should not be a pipe handle.
* @param FileInformationClass A FILE_INFO_BY_HANDLE_CLASS enumeration value
* that specifies the type of information to be
* retrieved.
* @param lpFileInformation A pointer to the buffer that receives the requested
* file information.
* @param dwBufferSize The size of the lpFileInformation buffer, in bytes.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see GetFileInformationByHandleEx.
*/
HRESULT M2GetFileInformation(
_In_ HANDLE hFile,
_In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass,
_Out_ LPVOID lpFileInformation,
_In_ DWORD dwBufferSize);
/**
* Sets the file information for the specified file.
*
* @param hFile A handle to the file for which to change information. This
* handle should not be a pipe handle.
* @param FileInformationClass A FILE_INFO_BY_HANDLE_CLASS enumeration value
* that specifies the type of information to be
* changed.
* @param lpFileInformation A pointer to the buffer that contains the
* information to change for the specified file
* information class.
* @param dwBufferSize The size of the lpFileInformation buffer, in bytes.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see SetFileInformationByHandle.
*/
HRESULT M2SetFileInformation(
_In_ HANDLE hFile,
_In_ FILE_INFO_BY_HANDLE_CLASS FileInformationClass,
_In_ LPVOID lpFileInformation,
_In_ DWORD dwBufferSize);
/**
* Creates a single uninitialized object of the class associated with a
* specified CLSID.
*
* @param lpszCLSID The string representation of the CLSID.
* @param pUnkOuter If NULL, indicates that the object is not being created as
* part of an aggregate. If non-NULL, pointer to the aggregate
* object's IUnknown interface (the controlling IUnknown).
* @param dwClsContext Context in which the code that manages the newly created
* object will run. The values are taken from the
* enumeration CLSCTX.
* @param lpszIID A pointer to the string representation of the IID.
* @param ppv Address of pointer variable that receives the interface pointer
* requested in riid. Upon successful return, *ppv contains the
* requested interface pointer. Upon failure, *ppv contains NULL.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see CoCreateInstance.
*/
HRESULT M2CoCreateInstance(
_In_ LPCWSTR lpszCLSID,
_In_opt_ LPUNKNOWN pUnkOuter,
_In_ DWORD dwClsContext,
_In_ LPCWSTR lpszIID,
_Out_ LPVOID* ppv);
/**
* Enables or disables privileges in the specified access token. Enabling or
* disabling privileges in an access token requires TOKEN_ADJUST_PRIVILEGES
* access.
*
* @param TokenHandle A handle to the access token that contains the privileges
* to be modified. The handle must have
* TOKEN_ADJUST_PRIVILEGES access to the token. If the
* PreviousState parameter is not NULL, the handle must also
* have TOKEN_QUERY access.
* @param DisableAllPrivileges Specifies whether the function disables all of
* the token's privileges. If this value is TRUE,
* the function disables all privileges and ignores
* the NewState parameter. If it is FALSE, the
* function modifies privileges based on the
* information pointed to by the NewState
* parameter.
* @param NewState A pointer to a TOKEN_PRIVILEGES structure that specifies an
* array of privileges and their attributes. If
* DisableAllPrivileges is TRUE, the function ignores this
* parameter.
* @param BufferLength Specifies the size, in bytes, of the buffer pointed to
* by the PreviousState parameter. This parameter can be
* zero if the PreviousState parameter is NULL.
* @param PreviousState A pointer to a buffer that the function fills with a
* TOKEN_PRIVILEGES structure that contains the previous
* state of any privileges that the function modifies.
* This parameter can be NULL.
* @param ReturnLength A pointer to a variable that receives the required size,
* in bytes, of the buffer pointed to by the PreviousState
* parameter. This parameter can be NULL if PreviousState
* is NULL.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see AdjustTokenPrivileges.
*/
HRESULT M2AdjustTokenPrivileges(
_In_ HANDLE TokenHandle,
_In_ BOOL DisableAllPrivileges,
_In_opt_ PTOKEN_PRIVILEGES NewState,
_In_ DWORD BufferLength,
_Out_opt_ PTOKEN_PRIVILEGES PreviousState,
_Out_opt_ PDWORD ReturnLength);
/**
* Retrieves a specified type of information about an access token. The calling
* process must have appropriate access rights to obtain the information.
*
* @param TokenHandle A handle to an access token from which information is
* retrieved.
* @param TokenInformationClass Specifies a value from the
* TOKEN_INFORMATION_CLASS enumerated type to
* identify the type of information the function
* retrieves.
* @param TokenInformation A pointer to a buffer the function fills with the
* requested information.
* @param TokenInformationLength Specifies the size, in bytes, of the buffer
* pointed to by the TokenInformation parameter.
* @param ReturnLength A pointer to a variable that receives the number of
* bytes needed for the buffer pointed to by the
* TokenInformation parameter.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see GetTokenInformation.
*/
HRESULT M2GetTokenInformation(
_In_ HANDLE TokenHandle,
_In_ TOKEN_INFORMATION_CLASS TokenInformationClass,
_Out_opt_ LPVOID TokenInformation,
_In_ DWORD TokenInformationLength,
_Out_ PDWORD ReturnLength);
/**
* Retrieves a specified type of information about an access token. The calling
* process must have appropriate access rights to obtain the information.
*
* @param TokenHandle A handle to the access token for which information is to
* be set.
* @param TokenInformationClass A value from the TOKEN_INFORMATION_CLASS
* enumerated type that identifies the type of
* information the function sets. The valid values
* from TOKEN_INFORMATION_CLASS are described in
* the TokenInformation parameter.
* @param TokenInformation A pointer to a buffer that contains the information
* set in the access token.
* @param TokenInformationLength Specifies the length, in bytes, of the buffer
* pointed to by TokenInformation.
* @return HRESULT. If the function succeeds, the return value is S_OK.
* @remark For more information, see SetTokenInformation.
*/
HRESULT M2SetTokenInformation(
_In_ HANDLE TokenHandle,
_In_ TOKEN_INFORMATION_CLASS TokenInformationClass,
_In_ LPVOID TokenInformation,
_In_ DWORD TokenInformationLength);
#endif // !_M2_WINDOWS_DOWNLEVEL_HELPERS_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.