content
stringlengths 7
2.61M
|
---|
TEXT MINING WITH LUCENE AND HADOOP: DOCUMENT CLUSTERING WITH FEATURE EXTRACTION In this work a new method for document clustering has been adapted using non negative matrix factorization. The key idea is to cluster the documents after measuring the proximity of the documents with the extracted features. The extracted features are considered as the final cluster labels and clustering is done using cosine similarity which is equivalent to kmeans with a single turn. An application was developed using apache lucene for indexing documents and mapreduce framework of apache hadoop project was used for parallel implementation of kmeans algorithm from apache mahout project. This application was named 'Swami'. The performance of models with proposed technique was conducted on news from 20 newsgroups datasets and the accuracy was found to be above 80% for 2 clusters and above 75% for 3 clusters. Since the experiments were carried only in one cluster of hadoop, the significant reduction in time was obtained by mapreduce implementation when clusters size exceeded 9 i.e. 40 documents averaging 1.5 kilobytes. Thus it's concluded that the feature extracted using NMF can be used to cluster documents considering them to be final cluster labels as in kmeans, and for large scale documents the parallel implementation using mapreduce can lead to reduction of computational time. i ACKNOWLDEGEMENTS I would like to thank my advisor professor Andoh Tomoharu for giving me the opportunity to work on this project and his valuable guidance. I would also like to thank expresident of this university, Maruyama Fujio for building my interest in mapreduce, professor Numata Yasuhide for his valuable help in understanding mathematical formulas. I am grateful to Bishnu Prasad Gautam for being my supervisor, friends in apache mahout and apache lucene project for their help in using the libraries. My thank goes to Bishal Acharya for all his support. Finally, I am very indebted to Swami for always being with me, my sister Kripa and my parents for their constant support and encouragement. |
package client
import (
"bytes"
"context"
"fmt"
"tcli"
"tcli/utils"
"github.com/c4pt0r/log"
"github.com/tikv/client-go/v2/tikv"
pd "github.com/tikv/pd/client"
)
func newTxnKVClient(pdAddr []string) *txnkvClient {
client, err := tikv.NewTxnClient(pdAddr)
if err != nil {
log.F(err)
}
return &txnkvClient{
txnClient: client,
pdAddr: pdAddr,
}
}
type txnkvClient struct {
txnClient *tikv.KVStore
pdAddr []string
}
func (c *txnkvClient) Close() {
if c.txnClient != nil {
c.txnClient.Close()
}
}
func (c *txnkvClient) GetClientMode() TiKV_MODE {
return TXN_CLIENT
}
func (c *txnkvClient) GetClusterID() string {
return fmt.Sprintf("%d", c.txnClient.GetPDClient().GetClusterID(context.TODO()))
}
func (c *txnkvClient) GetStores() ([]StoreInfo, error) {
var ret []StoreInfo
stores, err := c.txnClient.GetPDClient().GetAllStores(context.TODO())
if err != nil {
return nil, err
}
for _, store := range stores {
ret = append(ret, StoreInfo{
ID: fmt.Sprintf("%d", store.GetId()),
Version: store.GetVersion(),
Addr: store.GetAddress(),
State: store.GetState().String(),
StatusAddress: store.GetStatusAddress(),
})
}
return ret, nil
}
func (c *txnkvClient) GetPDClient() pd.Client {
return c.txnClient.GetPDClient()
}
func (c *txnkvClient) Put(ctx context.Context, kv KV) error {
tx, err := c.txnClient.Begin()
if err != nil {
return err
}
tx.Set(kv.K, kv.V)
err = tx.Commit(context.TODO())
if err != nil {
err = tx.Rollback()
if err != nil {
return err
}
}
return nil
}
func (c *txnkvClient) Scan(ctx context.Context, startKey []byte) (KVS, int, error) {
scanOpts := utils.PropFromContext(ctx)
tx, err := c.txnClient.Begin()
if err != nil {
return nil, 0, err
}
strictPrefix := scanOpts.GetBool(tcli.ScanOptStrictPrefix, false)
countOnly := scanOpts.GetBool(tcli.ScanOptCountOnly, false)
keyOnly := scanOpts.GetBool(tcli.ScanOptKeyOnly, false)
if keyOnly || countOnly {
tx.GetSnapshot().SetKeyOnly(keyOnly)
}
// count only mode will ignore this
limit := scanOpts.GetInt(tcli.ScanOptLimit, 100)
it, err := tx.Iter(startKey, nil)
if err != nil {
return nil, 0, err
}
defer it.Close()
var ret []KV
var lastKey KV
count := 0
for it.Valid() {
if !countOnly && limit == 0 {
break
}
if strictPrefix && !bytes.HasPrefix(it.Key(), startKey) {
break
}
// count only will not use limit
if !countOnly {
ret = append(ret, KV{K: it.Key()[:], V: it.Value()[:]})
limit--
}
count++
lastKey.K = it.Key()[:]
it.Next()
}
if countOnly {
ret = append(ret, KV{K: []byte("Count"), V: []byte(fmt.Sprintf("%d", count))})
ret = append(ret, KV{K: []byte("Last Key"), V: []byte(lastKey.K)})
}
return ret, count, nil
}
func (c *txnkvClient) BatchPut(ctx context.Context, kvs []KV) error {
tx, err := c.txnClient.Begin()
if err != nil {
return err
}
for _, kv := range kvs {
err := tx.Set(kv.K[:], kv.V[:])
if err != nil {
return err
}
}
return tx.Commit(context.Background())
}
func (c *txnkvClient) Get(ctx context.Context, k Key) (KV, error) {
tx, err := c.txnClient.Begin()
if err != nil {
return KV{}, err
}
v, err := tx.Get(context.TODO(), k)
if err != nil {
return KV{}, err
}
return KV{K: k, V: v}, nil
}
func (c *txnkvClient) Delete(ctx context.Context, k Key) error {
tx, err := c.txnClient.Begin()
if err != nil {
return err
}
tx.Delete(k)
return tx.Commit(context.Background())
}
// return lastKey, delete count, error
func (c *txnkvClient) DeletePrefix(ctx context.Context, prefix Key, limit int) (Key, int, error) {
tx, err := c.txnClient.Begin()
if err != nil {
return nil, 0, err
}
tx.GetSnapshot().SetKeyOnly(true)
it, err := tx.Iter(prefix, nil)
if err != nil {
return nil, 0, err
}
defer it.Close()
var lastKey KV
count := 0
var batch []KV
for it.Valid() && limit > 0 {
if !bytes.HasPrefix(it.Key(), prefix) {
break
}
lastKey.K = it.Key()[:]
batch = append(batch, KV{K: it.Key()[:]})
// TODO batch size shoule not be fixed
if len(batch) == 1000 {
// do delete
if err := c.BatchDelete(ctx, batch); err != nil {
return lastKey.K, count, err
}
count += len(batch)
// reset batch
batch = nil
}
limit--
it.Next()
}
if len(batch) > 0 {
if err := c.BatchDelete(ctx, batch); err != nil {
return nil, count, err
}
count += len(batch)
}
return lastKey.K, count, nil
}
func (c *txnkvClient) BatchDelete(ctx context.Context, kvs []KV) error {
tx, err := c.txnClient.Begin()
if err != nil {
return err
}
for _, kv := range kvs {
err := tx.Delete(kv.K)
if err != nil {
return err
}
}
return tx.Commit(context.Background())
}
|
/**
* This class is the entrance of GUI
* @author tangshulan
*
*/
public class MainApp extends Application {
private Stage primaryStage;
private BorderPane rootLayout;
private ObservableList<Car> carData;
private Simulator sim;
private ConnectionDB db;
/**
* initialize simulator, database instance and car list
*/
public MainApp() {
sim = Simulator.getInstance();
db = ConnectionDB.getInstance();
loadRecords();
carData = sim.getCars();
}
/**
* Returns the data as an observable list of cars.
* @return list of car
*/
public ObservableList<Car> getcarData() {
return carData;
}
@Override
public void start(Stage primaryStage) {
this.primaryStage = primaryStage;
this.primaryStage.setTitle("Connected Car App");
this.primaryStage.getIcons().add(new Image("/img/car.png"));
initRootLayout();
showCarOverview();
}
/**
* Initializes the root layout.
*/
public void initRootLayout() {
try {
// Load root layout from fxml file.
FXMLLoader loader = new FXMLLoader();
loader.setLocation(MainApp.class.getResource("../view/RootLayout.fxml"));
rootLayout = (BorderPane) loader.load();
// Show the scene containing the root layout.
Scene scene = new Scene(rootLayout);
primaryStage.setScene(scene);
primaryStage.show();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Returns the main stage.
* @return primary stage
*/
public Stage getPrimaryStage() {
return primaryStage;
}
/**
* Shows the car overview scene.
*/
public void showCarOverview() {
try {
// Load the fxml file and set into the center of the main layout
FXMLLoader loader = new FXMLLoader(MainApp.class.getResource("../view/CarOverview.fxml"));
AnchorPane overviewPage = (AnchorPane) loader.load();
rootLayout.setCenter(overviewPage);
CarOverviewController controller = loader.getController();
controller.setMainApp(this);
controller.setImges();
} catch (IOException e) {
// Exception gets thrown if the fxml file could not be loaded
e.printStackTrace();
}
}
/**
* Opens a dialog to add new device for the specified car. If the user
* clicks OK, the device is created and true
* is returned.
*
* @return true if the user clicked OK, false otherwise.
*/
public boolean showDeviceNewDialog() {
try {
// Load the fxml file and create a new stage for the popup dialog.
FXMLLoader loader = new FXMLLoader();
loader.setLocation(MainApp.class.getResource("../view/DeviceEditDialog.fxml"));
AnchorPane page = (AnchorPane) loader.load();
// Create the dialog Stage.
Stage dialogStage = new Stage();
dialogStage.setTitle("New Device");
dialogStage.getIcons().add(new Image("/img/device.png"));
dialogStage.initModality(Modality.WINDOW_MODAL);
dialogStage.initOwner(primaryStage);
Scene scene = new Scene(page);
dialogStage.setScene(scene);
// Set the device into the controller.
DeviceEditDialogController controller = loader.getController();
controller.setMainApp(this);
controller.setDialogStage(dialogStage);
controller.setCarIdBox();
controller.setRadioButton();
// Show the dialog and wait until the user closes it
dialogStage.showAndWait();
return controller.isOkClicked();
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
/**
* Open a dialog to add new property for a specified car device
* @param selectedDevice device to be add property
*/
public void showPropertyNewDialog(Device selectedDevice) {
try {
System.out.println("show dialog");
// Load the fxml file and create a new stage for the popup dialog.
FXMLLoader loader = new FXMLLoader();
loader.setLocation(MainApp.class.getResource("../view/PropertyEditDialog.fxml"));
AnchorPane page = (AnchorPane) loader.load();
// Create the dialog Stage.
Stage dialogStage = new Stage();
dialogStage.setTitle("New Property");
dialogStage.initModality(Modality.WINDOW_MODAL);
dialogStage.initOwner(primaryStage);
dialogStage.getIcons().add(new Image("/img/property.png"));
Scene scene = new Scene(page);
dialogStage.setScene(scene);
// Set the device into the controller.
PropertyEditDialogController controller = loader.getController();
controller.setMainApp(this);
controller.setDevice(selectedDevice);
controller.setDialogStage(dialogStage);
// Show the dialog and wait until the user closes it
dialogStage.showAndWait();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* Open a dialog to update a selected property
* @param selectedDevice device the property belongs to
* @param selectedProperty selected property to be edit
* @return true if the user clicked OK, false otherwise.
*/
public boolean showPropertyEditDialog(Device selectedDevice, Property selectedProperty) {
try {
System.out.println("show dialog");
// Load the fxml file and create a new stage for the popup dialog.
FXMLLoader loader = new FXMLLoader();
loader.setLocation(MainApp.class.getResource("../view/PropertyEditDialog.fxml"));
AnchorPane page = (AnchorPane) loader.load();
// Create the dialog Stage.
Stage dialogStage = new Stage();
dialogStage.setTitle("Update Property");
dialogStage.initModality(Modality.WINDOW_MODAL);
dialogStage.initOwner(primaryStage);
dialogStage.getIcons().add(new Image("/img/property.png"));
Scene scene = new Scene(page);
dialogStage.setScene(scene);
// Set the device into the controller.
PropertyEditDialogController controller = loader.getController();
controller.setMainApp(this);
controller.setDevice(selectedDevice);
controller.setProperty(selectedProperty);
controller.setDialogStage(dialogStage);
// Show the dialog and wait until the user closes it
dialogStage.showAndWait();
return controller.isOkClicked();
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
/**
* This method load car record from database, moved from previous main method in
* FileMonitor class
*/
private void loadRecords() {
List<String> resultID = db.getID();
// create car devices
if(db.getID().size()!=0) {
for(int i=0; i<resultID.size(); i++) {
sim.addDevice(resultID.get(i)+"&"+
db.getState(resultID.get(i)).get(0)+"&"+
db.getValue(resultID.get(i)));
}
}
//create and add properties
List<String> resultProperty = db.getProperty();
if(db.getProperty().size() != 0) {
for(int j = 0; j < resultProperty.size(); j++) {
sim.addProperty(resultProperty.get(j));
}
}
}
public static void main(String[] args) {
launch(args);
}
} |
A test strategy for time-to-digital converters using dynamic element matching and dithering This work presents a cost-effective test structure that is applicable to built-in self-test of time-to-digital converters (TDCs). The proposed structure uses deterministic dynamic element matching and dithering to generate linear time interval excitations for precision TDC test. Transition time points of a TDC can be measured with picosecond accuracy by using the proposed strategy, which enables the test and calibration of TDCs used in jitter characterization of communications systems with multigigabit-per-second data rates. |
/**
* This file contains an minimal example of a Linear "OpMode". An OpMode is a 'program' that runs in either
* the autonomous or the teleop period of an FTC match. The names of OpModes appear on the menu
* of the FTC Driver Station. When an selection is made from the menu, the corresponding OpMode
* class is instantiated on the Robot Controller and executed.
*
* This particular OpMode just executes a basic Tank Drive Teleop for a PushBot
* It includes all the skeletal structure that all linear OpModes contain.
*
* Use Android Studios to Copy this Class, and Paste it into your team's code folder with a new name.
* Remove or comment out the @Disabled line to add this opmode to the Driver Station OpMode list
*/
@Autonomous(name="encoderTest", group="Linear Opmode") // @Autonomous(...) is the other common choice
public class encoderTest extends LinearOpMode {
static final double COUNTS_PER_MOTOR_REV = 1440;
static final double WHEEL_DIAMETER_INCHES = 4.0;
double target;
static final double COUNTS_PER_INCH = (COUNTS_PER_MOTOR_REV) / (WHEEL_DIAMETER_INCHES * 3.1415);
/* Declare OpMode members. */
private ElapsedTime runtime = new ElapsedTime();
DcMotor leftFront = null;
DcMotor rightFront = null;
DcMotor leftBack = null;
DcMotor rightBack = null;
@Override
public void runOpMode() throws InterruptedException {
telemetry.addData("Status", "Initialized");
telemetry.update();
leftFront = hardwareMap.dcMotor.get("leftFront");
rightFront = hardwareMap.dcMotor.get("rightFront");
leftBack = hardwareMap.dcMotor.get("leftBack");
rightBack = hardwareMap.dcMotor.get("rightBack");
leftFront.setDirection(DcMotorSimple.Direction.REVERSE);
leftBack.setDirection(DcMotorSimple.Direction.REVERSE);
// Wait for the game to start (driver presses PLAY)
waitForStart();
runtime.reset();
encoderForward(0.5, 24, 5);
// run until the end of the match (driver presses STOP)
while (opModeIsActive()) {
telemtry();
idle(); // Always call idle() at the bottom of your while(opModeIsActive()) loop
}
}
public void encoderForward(double speed, double distance, double timeOut){
int leftFrontpos, rightBackpos;
target = distance * COUNTS_PER_INCH;
leftFront.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
rightBack.setMode(DcMotor.RunMode.STOP_AND_RESET_ENCODER);
leftFront.setTargetPosition((int) target);
rightBack.setTargetPosition((int) target);
leftFront.setPower(speed);
rightBack.setPower(speed);
leftFront.setMode(DcMotor.RunMode.RUN_TO_POSITION);
rightBack.setMode(DcMotor.RunMode.RUN_TO_POSITION);
telemetry.addData("Loop:", "Starting Loop");
/* while(runtime.seconds() < timeOut){
leftFrontpos = leftFront.getCurrentPosition();
rightBackpos = rightBack.getCurrentPosition();
if(leftFrontpos < target){
leftFront.setPower(speed);
leftBack.setPower(speed);
}
else{
leftFront.setPower(0);
leftBack.setPower(0);
}
if(rightBackpos < target){
rightFront.setPower(speed);
rightBack.setPower(speed);
}
else{
rightFront.setPower(0);
rightBack.setPower(0);
}
telemtry();
}*/
rightFront.setPower(0);
rightBack.setPower(0);
leftFront.setPower(0);
leftBack.setPower(0);
telemtry();
telemetry.addData("target", "targetReached");
}
public void telemtry(){
DbgLog.msg( "Run Time: " + runtime.toString());
DbgLog.msg("leftFrontEncoder" + leftFront.getCurrentPosition());
DbgLog.msg("rightBackEncoder" + rightBack.getCurrentPosition());
DbgLog.msg("leftFront" + leftFront.getPower());
DbgLog.msg("leftBack" + leftBack.getPower());
DbgLog.msg("rightFront" + rightFront.getPower());
DbgLog.msg("rightBack" + rightBack.getPower());
telemetry.addData("Target", target);
}
} |
/*!
Module configuration for the server side of the module.
See client [ToDoGinModule](${basePath}/java/com/todomvc/client/ToDoGinModule.java.html).
*/
public class ToDoServerModule extends AbstractModule {
@Override
protected void configure() {
bind(CommandExecutor.class).to(ServerCommandExecutor.class).in(Singleton.class);
bind(CommandSerialization.Serializer.class).to(CommandSerializer.class).in(Singleton.class);
bind(CommandService.class).to(CommandServiceImpl.class).in(Singleton.class);
bind(ToDoListCommandExecutor.class).to(ServerToDoListCommandExecutor.class).in(Singleton.class);
bind(ToDoCommandExecutor.class).to(ServerToDoCommandExecutor.class).in(Singleton.class);
bind(ToDoService.class).to(ToDoServiceImpl.class).in(Singleton.class);
}
@Provides
public ChannelService channelService() {
return ChannelServiceFactory.getChannelService();
}
} |
Post-delivery complications and treatment-seeking behaviour: Scenario among women in India After the ICPD conference in 1994, more attention has been paid to reduce the maternal deaths caused by complications during antenatal, natal and post-natal periods. This paper examines the post-delivery complications and treatment-seeking behaviour among 195031 currently married women with selected socio-economic and demographic characteristics in India from the District Level Household Survey (DLHS-2), 2002-04. The results show that 31.4 per cent women were suffering from some post-delivery complication and among them 50.3 per cent had sought treatment. Place of residence, educational and economic level, place of delivery, full ANC check ups have a significant effect on post-delivery complications and women with high education, high age at marriage, and urban residence are more likely to seek treatment among those who had complications. |
n, m = map(int, input().split())
w = [input() for _ in range(n)]
inf = 1000000
let = [inf] * n
num = [inf] * n
spec = [inf] * n
def get(i, c):
l = w[i].find(c)
r = w[i].rfind(c)
if l == -1:
return inf
return min(l, m - r)
for i in range(n):
for j in 'qwertyuiopasdfghjklzxcvbnm':
let[i] = min(let[i], get(i, j))
for j in '0123456789':
num[i] = min(num[i], get(i, j))
for j in '*#&':
spec[i] = min(spec[i], get(i, j))
ans = inf
for i in range(n):
for j in range(n):
if i == j:
continue
for k in range(n):
if i == k or j == k:
continue
ans = min(ans, let[i] + num[j] + spec[k])
print(ans) |
A primal-dual interior-point algorithm for symmetric optimization based on a new method for finding search directions Abstract We introduce an interior-point method for symmetric optimization based on a new method for determining search directions. In order to accomplish this, we use a new equivalent algebraic transformation on the centring equation of the system which characterizes the central path. In this way, we obtain a new class of directions. We analyse a special case of this class, which leads to the new interior-point algorithm mentioned before. Another way to find the search directions is using barriers derived from kernel functions. We show that in our case the corresponding direction cannot be deduced from a usual kernel function. In spite of this fact, we prove the polynomial complexity of the proposed algorithm. |
<filename>src/main/java/vazkii/quark/management/entity/EntityChestPassenger.java
package vazkii.quark.management.entity;
import net.minecraft.entity.Entity;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.init.Blocks;
import net.minecraft.inventory.IInventory;
import net.minecraft.inventory.InventoryHelper;
import net.minecraft.inventory.ItemStackHelper;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraft.network.datasync.DataParameter;
import net.minecraft.network.datasync.DataSerializers;
import net.minecraft.network.datasync.EntityDataManager;
import net.minecraft.util.NonNullList;
import net.minecraft.world.World;
public class EntityChestPassenger extends Entity implements IInventory {
private NonNullList<ItemStack> items = NonNullList.<ItemStack>withSize(27, ItemStack.EMPTY);
private static final DataParameter<ItemStack> CHEST_TYPE = EntityDataManager.<ItemStack>createKey(EntityChestPassenger.class, DataSerializers.ITEM_STACK);
private static final String TAG_CHEST_TYPE = "chestType";
public EntityChestPassenger(World worldIn) {
super(worldIn);
}
public EntityChestPassenger(World worldIn, ItemStack stack) {
this(worldIn);
ItemStack newStack = stack.copy();
newStack.setCount(1);
dataManager.set(CHEST_TYPE, newStack);
}
@Override
protected void entityInit() {
noClip = true;
dataManager.register(CHEST_TYPE, new ItemStack(Blocks.CHEST));
}
@Override
public void onUpdate() {
super.onUpdate();
if(isDead)
return;
if(!isRiding()) {
if(!world.isRemote)
setDead();
return;
}
Entity riding = getRidingEntity();
rotationYaw = riding.prevRotationYaw;
rotationPitch = 0F;
}
@Override
public int getSizeInventory() {
return items.size();
}
@Override
public boolean isEmpty() {
for(ItemStack itemstack : items)
if(!itemstack.isEmpty())
return false;
return true;
}
@Override
public ItemStack getStackInSlot(int index) {
return items.get(index);
}
@Override
public ItemStack decrStackSize(int index, int count) {
return ItemStackHelper.getAndSplit(items, index, count);
}
@Override
public ItemStack removeStackFromSlot(int index) {
ItemStack itemstack = items.get(index);
if(itemstack.isEmpty())
return ItemStack.EMPTY;
else {
items.set(index, ItemStack.EMPTY);
return itemstack;
}
}
@Override
public void setInventorySlotContents(int index, ItemStack stack) {
items.set(index, stack);
}
@Override
public int getInventoryStackLimit() {
return 64;
}
@Override
public void markDirty() {
// NO-OP
}
@Override
public boolean isUsableByPlayer(EntityPlayer player) {
return !isDead && player.getDistanceSq(this) <= 64;
}
@Override
public void openInventory(EntityPlayer player) {
// NO-OP
}
@Override
public void closeInventory(EntityPlayer player) {
// NO-OP
}
@Override
public boolean isItemValidForSlot(int index, ItemStack stack) {
return true;
}
@Override
public int getField(int id) {
return 0;
}
@Override
public void setField(int id, int value) {
// NO-OP
}
@Override
public int getFieldCount() {
return 0;
}
@Override
public void clear() {
items.clear();
}
@Override
public void setDead() {
if(!world.isRemote) {
InventoryHelper.dropInventoryItems(world, this, this);
InventoryHelper.spawnItemStack(world, posX, posY, posZ, getChestType());
}
super.setDead();
}
@Override
protected void writeEntityToNBT(NBTTagCompound compound) {
ItemStackHelper.saveAllItems(compound, items);
NBTTagCompound itemCmp = new NBTTagCompound();
dataManager.get(CHEST_TYPE).writeToNBT(itemCmp);
compound.setTag(TAG_CHEST_TYPE, itemCmp);
}
@Override
protected void readEntityFromNBT(NBTTagCompound compound) {
ItemStackHelper.loadAllItems(compound, items);
NBTTagCompound itemCmp = compound.getCompoundTag(TAG_CHEST_TYPE);
ItemStack stack = new ItemStack(itemCmp);
if(!stack.isEmpty())
dataManager.set(CHEST_TYPE, stack);
}
public ItemStack getChestType() {
return dataManager.get(CHEST_TYPE);
}
}
|
/**
* Author: Crownstone Team
* Copyright: Crownstone (https://crownstone.rocks)
* Date: 28 May, 2019
* Triple-license: LGPLv3+, Apache License, and/or MIT
*/
#pragma once
/**
* Use this config file to overwrite values in sdk_config.h.
*
* The sdk_config.h is a copy of SDK_15-3/examples/dfu/secure_bootloader/pca10040_ble_debug/config/sdk_config.h
*/
#define NRF_BL_DFU_ENTER_METHOD_BUTTON 0
#define NRF_BL_DFU_ENTER_METHOD_PINRESET 0
#define NRF_BL_DFU_ENTER_METHOD_GPREGRET 1
#define NRF_BL_DFU_ENTER_METHOD_BUTTONLESS 0
#define NRF_DFU_IN_APP 0
#define NRF_DFU_SAVE_PROGRESS_IN_FLASH 0
#define NRF_DFU_APP_DOWNGRADE_PREVENTION 0
#define NRF_DFU_APP_ACCEPT_SAME_VERSION 1
#define NRF_DFU_REQUIRE_SIGNED_APP_UPDATE 0
/**
* Reserved number of flash pages used for app data.
* 4 for FDS (FDS_PHY_PAGES).
* See how_to_nordicSDK.md and https://devzone.nordicsemi.com/f/nordic-q-a/42632/mesh-sdk---nrf52840-flash-page-issue
* 1 for mesh access (ACCESS_FLASH_PAGE_COUNT).
* 1 for mesh dsm (DSM_FLASH_PAGE_COUNT).
* 1 for mesh defrag/garbage collection.
* 1 for mesh recovery?.
*/
#define NRF_DFU_APP_DATA_AREA_SIZE ((4+4)*4096)
//! Device information service.
#define BLE_DIS_ENABLED 1
/**
* By default, the bootloader changes MAC address when in bootloader mode.
* This prevents connect issues, where services are cached etc.
* But, since iOS devices don't get to see the MAC address, they don't know which device is in bootloader mode.
* So we adjusted the code to make the bootloader not change MAC address with this define, so that it's easy to find.
*/
#define CS_DFU_CHANGE_MAC_ADDRESS 0
/**
* For iOS, the absence of the service changed characteristic makes it always discover services.
*/
#define NRF_SDH_BLE_SERVICE_CHANGED 1
#if CS_SERIAL_NRF_LOG_ENABLED > 0
#define NRF_LOG_ENABLED 1
#else
#define NRF_LOG_ENABLED 0
#endif
//! Log data is buffered and can be processed in idle
#define NRF_LOG_DEFERRED 1
// <0=> Off
// <1=> Error
// <2=> Warning
// <3=> Info
// <4=> Debug
#define NRF_LOG_DEFAULT_LEVEL 3
#define NRF_SDH_SOC_LOG_LEVEL 3
#define NRF_SDH_BLE_LOG_LEVEL 3
#define NRF_SDH_LOG_LEVEL 3
#define NRF_LOG_USES_COLORS 1
#define NRF_LOG_WARNING_COLOR 4
#define NRF_LOG_USES_TIMESTAMP 0
#define NRF_FPRINTF_ENABLED 1
#define NRF_FPRINTF_FLAG_AUTOMATIC_CR_ON_LF_ENABLED 1
#if CS_SERIAL_NRF_LOG_ENABLED == 1
#define NRF_LOG_BACKEND_RTT_ENABLED 1
#else
#define NRF_LOG_BACKEND_RTT_ENABLED 0
#endif
#if CS_SERIAL_NRF_LOG_ENABLED == 2
#define NRF_LOG_BACKEND_UART_ENABLED 1
#else
#define NRF_LOG_BACKEND_UART_ENABLED 0
#endif
#define NRF_LOG_BACKEND_UART_TX_PIN CS_SERIAL_NRF_LOG_PIN_TX
#if CS_SERIAL_NRF_LOG_ENABLED == 2
// UARTE_ENABLED is overwritten by apply_old_config.h
#define UARTE_ENABLED 1
#define UART0_ENABLED 1
#define UART_ENABLED 1
// It wouldn't compile when using UARTE, so use normal UART instead.
#define UART_LEGACY_SUPPORT 1
#define UART_EASY_DMA_SUPPORT 0
#define NRFX_UARTE_DEFAULT_CONFIG_HWFC 0
#define NRFX_UARTE_DEFAULT_CONFIG_PARITY 0
#endif
// <323584=> 1200 baud
// <643072=> 2400 baud
// <1290240=> 4800 baud
// <2576384=> 9600 baud
// <3862528=> 14400 baud
// <5152768=> 19200 baud
// <7716864=> 28800 baud
// <10289152=> 38400 baud
// <15400960=> 57600 baud
// <20615168=> 76800 baud
// <30801920=> 115200 baud
// <61865984=> 230400 baud
// <67108864=> 250000 baud
// <121634816=> 460800 baud
// <251658240=> 921600 baud
// <268435456=> 1000000 baud
#define NRF_LOG_BACKEND_UART_BAUDRATE 61865984
/**
* Settings below were missing from the sdk_config.h
* They're copied from some example sdk_config.h
*/
// <o> NRF_LOG_BACKEND_UART_TEMP_BUFFER_SIZE - Size of buffer for partially processed strings.
// <i> Size of the buffer is a trade-off between RAM usage and processing.
// <i> if buffer is smaller then strings will often be fragmented.
// <i> It is recommended to use size which will fit typical log and only the
// <i> longer one will be fragmented.
#ifndef NRF_LOG_BACKEND_UART_TEMP_BUFFER_SIZE
#define NRF_LOG_BACKEND_UART_TEMP_BUFFER_SIZE 64
#endif
// <o> UART_DEFAULT_CONFIG_HWFC - Hardware Flow Control
// <0=> Disabled
// <1=> Enabled
#ifndef UART_DEFAULT_CONFIG_HWFC
#define UART_DEFAULT_CONFIG_HWFC 0
#endif
// <o> UART_DEFAULT_CONFIG_PARITY - Parity
// <0=> Excluded
// <14=> Included
#ifndef UART_DEFAULT_CONFIG_PARITY
#define UART_DEFAULT_CONFIG_PARITY 0
#endif
// <o> UART_DEFAULT_CONFIG_BAUDRATE - Default Baudrate
// <323584=> 1200 baud
// <643072=> 2400 baud
// <1290240=> 4800 baud
// <2576384=> 9600 baud
// <3862528=> 14400 baud
// <5152768=> 19200 baud
// <7716864=> 28800 baud
// <10289152=> 38400 baud
// <15400960=> 57600 baud
// <20615168=> 76800 baud
// <30801920=> 115200 baud
// <61865984=> 230400 baud
// <67108864=> 250000 baud
// <121634816=> 460800 baud
// <251658240=> 921600 baud
// <268435456=> 1000000 baud
#ifndef UART_DEFAULT_CONFIG_BAUDRATE
#define UART_DEFAULT_CONFIG_BAUDRATE 30801920
#endif
// <i> Priorities 0,2 (nRF51) and 0,1,4,5 (nRF52) are reserved for SoftDevice
// <0=> 0 (highest)
// <1=> 1
// <2=> 2
// <3=> 3
// <4=> 4
// <5=> 5
// <6=> 6
// <7=> 7
#ifndef UART_DEFAULT_CONFIG_IRQ_PRIORITY
#define UART_DEFAULT_CONFIG_IRQ_PRIORITY 6
#endif
|
/* $Id: aty128.h,v 1.1 1999/10/12 11:00:40 geert Exp $
* linux/drivers/video/aty128.h
* Register definitions for ATI Rage128 boards
*
* Anthony Tong <atong@uiuc.edu>, 1999
* Brad Douglas <brad@neruo.com>, 2000
*/
#ifndef REG_RAGE128_H
#define REG_RAGE128_H
#define CLOCK_CNTL_INDEX 0x0008
#define CLOCK_CNTL_DATA 0x000c
#define BIOS_0_SCRATCH 0x0010
#define BUS_CNTL 0x0030
#define BUS_CNTL1 0x0034
#define GEN_INT_CNTL 0x0040
#define CRTC_GEN_CNTL 0x0050
#define CRTC_EXT_CNTL 0x0054
#define DAC_CNTL 0x0058
#define I2C_CNTL_1 0x0094
#define PALETTE_INDEX 0x00b0
#define PALETTE_DATA 0x00b4
#define CONFIG_CNTL 0x00e0
#define GEN_RESET_CNTL 0x00f0
#define CONFIG_MEMSIZE 0x00f8
#define MEM_CNTL 0x0140
#define MEM_POWER_MISC 0x015c
#define AGP_BASE 0x0170
#define AGP_CNTL 0x0174
#define AGP_APER_OFFSET 0x0178
#define PCI_GART_PAGE 0x017c
#define PC_NGUI_MODE 0x0180
#define PC_NGUI_CTLSTAT 0x0184
#define MPP_TB_CONFIG 0x01C0
#define MPP_GP_CONFIG 0x01C8
#define VIPH_CONTROL 0x01D0
#define CRTC_H_TOTAL_DISP 0x0200
#define CRTC_H_SYNC_STRT_WID 0x0204
#define CRTC_V_TOTAL_DISP 0x0208
#define CRTC_V_SYNC_STRT_WID 0x020c
#define CRTC_VLINE_CRNT_VLINE 0x0210
#define CRTC_CRNT_FRAME 0x0214
#define CRTC_GUI_TRIG_VLINE 0x0218
#define CRTC_OFFSET 0x0224
#define CRTC_OFFSET_CNTL 0x0228
#define CRTC_PITCH 0x022c
#define OVR_CLR 0x0230
#define OVR_WID_LEFT_RIGHT 0x0234
#define OVR_WID_TOP_BOTTOM 0x0238
#define LVDS_GEN_CNTL 0x02d0
#define DDA_CONFIG 0x02e0
#define DDA_ON_OFF 0x02e4
#define VGA_DDA_CONFIG 0x02e8
#define VGA_DDA_ON_OFF 0x02ec
#define CRTC2_H_TOTAL_DISP 0x0300
#define CRTC2_H_SYNC_STRT_WID 0x0304
#define CRTC2_V_TOTAL_DISP 0x0308
#define CRTC2_V_SYNC_STRT_WID 0x030c
#define CRTC2_VLINE_CRNT_VLINE 0x0310
#define CRTC2_CRNT_FRAME 0x0314
#define CRTC2_GUI_TRIG_VLINE 0x0318
#define CRTC2_OFFSET 0x0324
#define CRTC2_OFFSET_CNTL 0x0328
#define CRTC2_PITCH 0x032c
#define DDA2_CONFIG 0x03e0
#define DDA2_ON_OFF 0x03e4
#define CRTC2_GEN_CNTL 0x03f8
#define CRTC2_STATUS 0x03fc
#define OV0_SCALE_CNTL 0x0420
#define SUBPIC_CNTL 0x0540
#define PM4_BUFFER_OFFSET 0x0700
#define PM4_BUFFER_CNTL 0x0704
#define PM4_BUFFER_WM_CNTL 0x0708
#define PM4_BUFFER_DL_RPTR_ADDR 0x070c
#define PM4_BUFFER_DL_RPTR 0x0710
#define PM4_BUFFER_DL_WPTR 0x0714
#define PM4_VC_FPU_SETUP 0x071c
#define PM4_FPU_CNTL 0x0720
#define PM4_VC_FORMAT 0x0724
#define PM4_VC_CNTL 0x0728
#define PM4_VC_I01 0x072c
#define PM4_VC_VLOFF 0x0730
#define PM4_VC_VLSIZE 0x0734
#define PM4_IW_INDOFF 0x0738
#define PM4_IW_INDSIZE 0x073c
#define PM4_FPU_FPX0 0x0740
#define PM4_FPU_FPY0 0x0744
#define PM4_FPU_FPX1 0x0748
#define PM4_FPU_FPY1 0x074c
#define PM4_FPU_FPX2 0x0750
#define PM4_FPU_FPY2 0x0754
#define PM4_FPU_FPY3 0x0758
#define PM4_FPU_FPY4 0x075c
#define PM4_FPU_FPY5 0x0760
#define PM4_FPU_FPY6 0x0764
#define PM4_FPU_FPR 0x0768
#define PM4_FPU_FPG 0x076c
#define PM4_FPU_FPB 0x0770
#define PM4_FPU_FPA 0x0774
#define PM4_FPU_INTXY0 0x0780
#define PM4_FPU_INTXY1 0x0784
#define PM4_FPU_INTXY2 0x0788
#define PM4_FPU_INTARGB 0x078c
#define PM4_FPU_FPTWICEAREA 0x0790
#define PM4_FPU_DMAJOR01 0x0794
#define PM4_FPU_DMAJOR12 0x0798
#define PM4_FPU_DMAJOR02 0x079c
#define PM4_FPU_STAT 0x07a0
#define PM4_STAT 0x07b8
#define PM4_TEST_CNTL 0x07d0
#define PM4_MICROCODE_ADDR 0x07d4
#define PM4_MICROCODE_RADDR 0x07d8
#define PM4_MICROCODE_DATAH 0x07dc
#define PM4_MICROCODE_DATAL 0x07e0
#define PM4_CMDFIFO_ADDR 0x07e4
#define PM4_CMDFIFO_DATAH 0x07e8
#define PM4_CMDFIFO_DATAL 0x07ec
#define PM4_BUFFER_ADDR 0x07f0
#define PM4_BUFFER_DATAH 0x07f4
#define PM4_BUFFER_DATAL 0x07f8
#define PM4_MICRO_CNTL 0x07fc
#define CAP0_TRIG_CNTL 0x0950
#define CAP1_TRIG_CNTL 0x09c0
/******************************************************************************
* GUI Block Memory Mapped Registers *
* These registers are FIFOed. *
*****************************************************************************/
#define PM4_FIFO_DATA_EVEN 0x1000
#define PM4_FIFO_DATA_ODD 0x1004
#define DST_OFFSET 0x1404
#define DST_PITCH 0x1408
#define DST_WIDTH 0x140c
#define DST_HEIGHT 0x1410
#define SRC_X 0x1414
#define SRC_Y 0x1418
#define DST_X 0x141c
#define DST_Y 0x1420
#define SRC_PITCH_OFFSET 0x1428
#define DST_PITCH_OFFSET 0x142c
#define SRC_Y_X 0x1434
#define DST_Y_X 0x1438
#define DST_HEIGHT_WIDTH 0x143c
#define DP_GUI_MASTER_CNTL 0x146c
#define BRUSH_SCALE 0x1470
#define BRUSH_Y_X 0x1474
#define DP_BRUSH_BKGD_CLR 0x1478
#define DP_BRUSH_FRGD_CLR 0x147c
#define DST_WIDTH_X 0x1588
#define DST_HEIGHT_WIDTH_8 0x158c
#define SRC_X_Y 0x1590
#define DST_X_Y 0x1594
#define DST_WIDTH_HEIGHT 0x1598
#define DST_WIDTH_X_INCY 0x159c
#define DST_HEIGHT_Y 0x15a0
#define DST_X_SUB 0x15a4
#define DST_Y_SUB 0x15a8
#define SRC_OFFSET 0x15ac
#define SRC_PITCH 0x15b0
#define DST_HEIGHT_WIDTH_BW 0x15b4
#define CLR_CMP_CNTL 0x15c0
#define CLR_CMP_CLR_SRC 0x15c4
#define CLR_CMP_CLR_DST 0x15c8
#define CLR_CMP_MASK 0x15cc
#define DP_SRC_FRGD_CLR 0x15d8
#define DP_SRC_BKGD_CLR 0x15dc
#define DST_BRES_ERR 0x1628
#define DST_BRES_INC 0x162c
#define DST_BRES_DEC 0x1630
#define DST_BRES_LNTH 0x1634
#define DST_BRES_LNTH_SUB 0x1638
#define SC_LEFT 0x1640
#define SC_RIGHT 0x1644
#define SC_TOP 0x1648
#define SC_BOTTOM 0x164c
#define SRC_SC_RIGHT 0x1654
#define SRC_SC_BOTTOM 0x165c
#define GUI_DEBUG0 0x16a0
#define GUI_DEBUG1 0x16a4
#define GUI_TIMEOUT 0x16b0
#define GUI_TIMEOUT0 0x16b4
#define GUI_TIMEOUT1 0x16b8
#define GUI_PROBE 0x16bc
#define DP_CNTL 0x16c0
#define DP_DATATYPE 0x16c4
#define DP_MIX 0x16c8
#define DP_WRITE_MASK 0x16cc
#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
#define DEFAULT_OFFSET 0x16e0
#define DEFAULT_PITCH 0x16e4
#define DEFAULT_SC_BOTTOM_RIGHT 0x16e8
#define SC_TOP_LEFT 0x16ec
#define SC_BOTTOM_RIGHT 0x16f0
#define SRC_SC_BOTTOM_RIGHT 0x16f4
#define WAIT_UNTIL 0x1720
#define CACHE_CNTL 0x1724
#define GUI_STAT 0x1740
#define PC_GUI_MODE 0x1744
#define PC_GUI_CTLSTAT 0x1748
#define PC_DEBUG_MODE 0x1760
#define BRES_DST_ERR_DEC 0x1780
#define TRAIL_BRES_T12_ERR_DEC 0x1784
#define TRAIL_BRES_T12_INC 0x1788
#define DP_T12_CNTL 0x178c
#define DST_BRES_T1_LNTH 0x1790
#define DST_BRES_T2_LNTH 0x1794
#define SCALE_SRC_HEIGHT_WIDTH 0x1994
#define SCALE_OFFSET_0 0x1998
#define SCALE_PITCH 0x199c
#define SCALE_X_INC 0x19a0
#define SCALE_Y_INC 0x19a4
#define SCALE_HACC 0x19a8
#define SCALE_VACC 0x19ac
#define SCALE_DST_X_Y 0x19b0
#define SCALE_DST_HEIGHT_WIDTH 0x19b4
#define SCALE_3D_CNTL 0x1a00
#define SCALE_3D_DATATYPE 0x1a20
#define SETUP_CNTL 0x1bc4
#define SOLID_COLOR 0x1bc8
#define WINDOW_XY_OFFSET 0x1bcc
#define DRAW_LINE_POINT 0x1bd0
#define SETUP_CNTL_PM4 0x1bd4
#define DST_PITCH_OFFSET_C 0x1c80
#define DP_GUI_MASTER_CNTL_C 0x1c84
#define SC_TOP_LEFT_C 0x1c88
#define SC_BOTTOM_RIGHT_C 0x1c8c
#define CLR_CMP_MASK_3D 0x1A28
#define MISC_3D_STATE_CNTL_REG 0x1CA0
#define MC_SRC1_CNTL 0x19D8
#define TEX_CNTL 0x1800
/* CONSTANTS */
#define GUI_ACTIVE 0x80000000
#define ENGINE_IDLE 0x0
#define PLL_WR_EN 0x00000080
#define CLK_PIN_CNTL 0x0001
#define PPLL_CNTL 0x0002
#define PPLL_REF_DIV 0x0003
#define PPLL_DIV_0 0x0004
#define PPLL_DIV_1 0x0005
#define PPLL_DIV_2 0x0006
#define PPLL_DIV_3 0x0007
#define VCLK_ECP_CNTL 0x0008
#define HTOTAL_CNTL 0x0009
#define X_MPLL_REF_FB_DIV 0x000a
#define XPLL_CNTL 0x000b
#define XDLL_CNTL 0x000c
#define XCLK_CNTL 0x000d
#define MPLL_CNTL 0x000e
#define MCLK_CNTL 0x000f
#define AGP_PLL_CNTL 0x0010
#define FCP_CNTL 0x0012
#define PLL_TEST_CNTL 0x0013
#define P2PLL_CNTL 0x002a
#define P2PLL_REF_DIV 0x002b
#define P2PLL_DIV_0 0x002b
#define POWER_MANAGEMENT 0x002f
#define PPLL_RESET 0x01
#define PPLL_ATOMIC_UPDATE_EN 0x10000
#define PPLL_VGA_ATOMIC_UPDATE_EN 0x20000
#define PPLL_REF_DIV_MASK 0x3FF
#define PPLL_FB3_DIV_MASK 0x7FF
#define PPLL_POST3_DIV_MASK 0x70000
#define PPLL_ATOMIC_UPDATE_R 0x8000
#define PPLL_ATOMIC_UPDATE_W 0x8000
#define MEM_CFG_TYPE_MASK 0x3
#define XCLK_SRC_SEL_MASK 0x7
#define XPLL_FB_DIV_MASK 0xFF00
#define X_MPLL_REF_DIV_MASK 0xFF
/* CRTC control values (CRTC_GEN_CNTL) */
#define CRTC_CSYNC_EN 0x00000010
#define CRTC2_DBL_SCAN_EN 0x00000001
#define CRTC2_DISPLAY_DIS 0x00800000
#define CRTC2_FIFO_EXTSENSE 0x00200000
#define CRTC2_ICON_EN 0x00100000
#define CRTC2_CUR_EN 0x00010000
#define CRTC2_EN 0x02000000
#define CRTC2_DISP_REQ_EN_B 0x04000000
#define CRTC_PIX_WIDTH_MASK 0x00000700
#define CRTC_PIX_WIDTH_4BPP 0x00000100
#define CRTC_PIX_WIDTH_8BPP 0x00000200
#define CRTC_PIX_WIDTH_15BPP 0x00000300
#define CRTC_PIX_WIDTH_16BPP 0x00000400
#define CRTC_PIX_WIDTH_24BPP 0x00000500
#define CRTC_PIX_WIDTH_32BPP 0x00000600
/* DAC_CNTL bit constants */
#define DAC_8BIT_EN 0x00000100
#define DAC_MASK 0xFF000000
#define DAC_BLANKING 0x00000004
#define DAC_RANGE_CNTL 0x00000003
#define DAC_CLK_SEL 0x00000010
#define DAC_PALETTE_ACCESS_CNTL 0x00000020
#define DAC_PALETTE2_SNOOP_EN 0x00000040
#define DAC_PDWN 0x00008000
/* CRTC_EXT_CNTL */
#define CRT_CRTC_ON 0x00008000
/* GEN_RESET_CNTL bit constants */
#define SOFT_RESET_GUI 0x00000001
#define SOFT_RESET_VCLK 0x00000100
#define SOFT_RESET_PCLK 0x00000200
#define SOFT_RESET_ECP 0x00000400
#define SOFT_RESET_DISPENG_XCLK 0x00000800
/* PC_GUI_CTLSTAT bit constants */
#define PC_BUSY_INIT 0x10000000
#define PC_BUSY_GUI 0x20000000
#define PC_BUSY_NGUI 0x40000000
#define PC_BUSY 0x80000000
#define BUS_MASTER_DIS 0x00000040
#define PM4_BUFFER_CNTL_NONPM4 0x00000000
/* DP_DATATYPE bit constants */
#define DST_8BPP 0x00000002
#define DST_15BPP 0x00000003
#define DST_16BPP 0x00000004
#define DST_24BPP 0x00000005
#define DST_32BPP 0x00000006
#define BRUSH_SOLIDCOLOR 0x00000d00
/* DP_GUI_MASTER_CNTL bit constants */
#define GMC_SRC_PITCH_OFFSET_DEFAULT 0x00000000
#define GMC_DST_PITCH_OFFSET_DEFAULT 0x00000000
#define GMC_SRC_CLIP_DEFAULT 0x00000000
#define GMC_DST_CLIP_DEFAULT 0x00000000
#define GMC_BRUSH_SOLIDCOLOR 0x000000d0
#define GMC_SRC_DSTCOLOR 0x00003000
#define GMC_BYTE_ORDER_MSB_TO_LSB 0x00000000
#define GMC_DP_SRC_RECT 0x02000000
#define GMC_3D_FCN_EN_CLR 0x00000000
#define GMC_AUX_CLIP_CLEAR 0x20000000
#define GMC_DST_CLR_CMP_FCN_CLEAR 0x10000000
#define GMC_WRITE_MASK_SET 0x40000000
#define GMC_DP_CONVERSION_TEMP_6500 0x00000000
/* DP_GUI_MASTER_CNTL ROP3 named constants */
#define ROP3_PATCOPY 0x00f00000
#define ROP3_SRCCOPY 0x00cc0000
#define SRC_DSTCOLOR 0x00030000
/* DP_CNTL bit constants */
#define DST_X_RIGHT_TO_LEFT 0x00000000
#define DST_X_LEFT_TO_RIGHT 0x00000001
#define DST_Y_BOTTOM_TO_TOP 0x00000000
#define DST_Y_TOP_TO_BOTTOM 0x00000002
#define DST_X_MAJOR 0x00000000
#define DST_Y_MAJOR 0x00000004
#define DST_X_TILE 0x00000008
#define DST_Y_TILE 0x00000010
#define DST_LAST_PEL 0x00000020
#define DST_TRAIL_X_RIGHT_TO_LEFT 0x00000000
#define DST_TRAIL_X_LEFT_TO_RIGHT 0x00000040
#define DST_TRAP_FILL_RIGHT_TO_LEFT 0x00000000
#define DST_TRAP_FILL_LEFT_TO_RIGHT 0x00000080
#define DST_BRES_SIGN 0x00000100
#define DST_HOST_BIG_ENDIAN_EN 0x00000200
#define DST_POLYLINE_NONLAST 0x00008000
#define DST_RASTER_STALL 0x00010000
#define DST_POLY_EDGE 0x00040000
/* DP_MIX bit constants */
#define DP_SRC_RECT 0x00000200
#define DP_SRC_HOST 0x00000300
#define DP_SRC_HOST_BYTEALIGN 0x00000400
/* LVDS_GEN_CNTL constants */
#define LVDS_BL_MOD_LEVEL_MASK 0x0000ff00
#define LVDS_BL_MOD_LEVEL_SHIFT 8
#define LVDS_BL_MOD_EN 0x00010000
#define LVDS_DIGION 0x00040000
#define LVDS_BLON 0x00080000
#define LVDS_ON 0x00000001
#define LVDS_DISPLAY_DIS 0x00000002
#define LVDS_PANEL_TYPE_2PIX_PER_CLK 0x00000004
#define LVDS_PANEL_24BITS_TFT 0x00000008
#define LVDS_FRAME_MOD_NO 0x00000000
#define LVDS_FRAME_MOD_2_LEVELS 0x00000010
#define LVDS_FRAME_MOD_4_LEVELS 0x00000020
#define LVDS_RST_FM 0x00000040
#define LVDS_EN 0x00000080
/* CRTC2_GEN_CNTL constants */
#define CRTC2_EN 0x02000000
/* POWER_MANAGEMENT constants */
#define PWR_MGT_ON 0x00000001
#define PWR_MGT_MODE_MASK 0x00000006
#define PWR_MGT_MODE_PIN 0x00000000
#define PWR_MGT_MODE_REGISTER 0x00000002
#define PWR_MGT_MODE_TIMER 0x00000004
#define PWR_MGT_MODE_PCI 0x00000006
#define PWR_MGT_AUTO_PWR_UP_EN 0x00000008
#define PWR_MGT_ACTIVITY_PIN_ON 0x00000010
#define PWR_MGT_STANDBY_POL 0x00000020
#define PWR_MGT_SUSPEND_POL 0x00000040
#define PWR_MGT_SELF_REFRESH 0x00000080
#define PWR_MGT_ACTIVITY_PIN_EN 0x00000100
#define PWR_MGT_KEYBD_SNOOP 0x00000200
#define PWR_MGT_TRISTATE_MEM_EN 0x00000800
#define PWR_MGT_SELW4MS 0x00001000
#define PWR_MGT_SLOWDOWN_MCLK 0x00002000
#define PMI_PMSCR_REG 0x60
/* used by ATI bug fix for hardware ROM */
#define RAGE128_MPP_TB_CONFIG 0x01c0
#endif /* REG_RAGE128_H */
|
<filename>buildSrc/src/main/java/org/zaproxy/gradle/tasks/DownloadWebDriver.java
/*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2019 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.gradle.tasks;
import io.github.bonigarcia.wdm.WebDriverManager;
import io.github.bonigarcia.wdm.config.Architecture;
import io.github.bonigarcia.wdm.config.DriverManagerType;
import io.github.bonigarcia.wdm.config.OperatingSystem;
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Locale;
import javax.inject.Inject;
import org.gradle.api.DefaultTask;
import org.gradle.api.file.ConfigurableFileCollection;
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.provider.Property;
import org.gradle.api.tasks.Classpath;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.TaskAction;
import org.gradle.workers.WorkAction;
import org.gradle.workers.WorkParameters;
import org.gradle.workers.WorkQueue;
import org.gradle.workers.WorkerExecutor;
public abstract class DownloadWebDriver extends DefaultTask {
public enum Browser {
CHROME,
FIREFOX
}
public enum OS {
LINUX,
MAC,
WIN
}
public enum Arch {
X32,
X64,
ARM64
}
@Input
public abstract Property<Browser> getBrowser();
@Input
public abstract Property<String> getVersion();
@Input
public abstract Property<OS> getOs();
@Input
public abstract Property<Arch> getArch();
@OutputFile
public abstract RegularFileProperty getOutputFile();
@Inject
public abstract WorkerExecutor getWorkerExecutor();
@Classpath
public abstract ConfigurableFileCollection getWebdriverClasspath();
@TaskAction
public void download() {
WorkQueue workQueue =
getWorkerExecutor()
.classLoaderIsolation(
workerSpec ->
workerSpec.getClasspath().from(getWebdriverClasspath()));
workQueue.submit(
Download.class,
params -> {
params.getBrowser().set(DriverManagerType.valueOf(toUpperCase(getBrowser())));
params.getWdVersion().set(getVersion().get());
params.getOs().set(OperatingSystem.valueOf(toUpperCase(getOs())));
params.getArch().set(Architecture.valueOf(toUpperCase(getArch())));
params.getOutputFile().set(getOutputFile());
});
}
private static String toUpperCase(Property<? extends Enum<?>> property) {
return property.get().name().toUpperCase(Locale.ROOT);
}
public interface DownloadWorkParameters extends WorkParameters {
Property<DriverManagerType> getBrowser();
Property<String> getWdVersion();
Property<OperatingSystem> getOs();
Property<Architecture> getArch();
RegularFileProperty getOutputFile();
}
public abstract static class Download implements WorkAction<DownloadWorkParameters> {
@Override
public void execute() {
WebDriverManager wdm = getInstance(getParameters().getBrowser().get());
wdm.driverVersion(getParameters().getWdVersion().get())
.operatingSystem(getParameters().getOs().get())
.architecture(getParameters().getArch().get())
.setup();
File outputFile = getParameters().getOutputFile().get().getAsFile();
try {
Files.copy(
Paths.get(wdm.getDownloadedDriverPath()),
outputFile.toPath(),
StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
throw new UncheckedIOException(
"Failed to copy the WebDriver from "
+ wdm.getDownloadedDriverPath()
+ " to "
+ outputFile,
e);
}
}
private static WebDriverManager getInstance(DriverManagerType browser) {
switch (browser) {
case CHROME:
return WebDriverManager.chromedriver();
case FIREFOX:
return WebDriverManager.firefoxdriver();
default:
throw new UnsupportedOperationException(
"Only Chrome and Firefox are currently supported.");
}
}
}
}
|
Gastroenteropancreatic neuroendocrine tumors. A histochemical and immunohistochemical study of epithelial (keratin proteins, carcinoembryonic antigen) and neuroendocrine (neuron-specific enolase, bombesin and chromogranin) markers in foregut, midgut, and hindgut tumors. Thirty-four gastroenteropancreatic (GEP) neuroendocrine tumors were evaluated for expression of epithelial (keratin, carcinoembryonic antigen and neuroendocrine (neuron-specific enolase, chromogranin, bombesin) markers, and results were correlated with histologic patterns and histochemical staining. Tumors of mixed pattern (insular or trabecular with glandular areas) predominated. CEA localization corresponded to staining for mucin, with polarized apical or lumenal staining in glandular areas. Four trabecular midgut carcinoids, however, revealed diffuse cytoplasmic staining for CEA. Staining for keratin proteins was present in 68% of tumors. Bombesin immunoreactivity was demonstrated in 60% of GEP neuroendocrine tumors, indicating that bombesin positive metastatic tumors may not be predominantly of pulmonary origin, as previously suggested. Chromogranin was a sensitive marker for identifying normal gastrointestinal neuroendocrine cells that were not demonstrated by staining for neuron-specific enolase. Chromogranin was present in most neuroendocrine tumors, but was absent from three of five rectal carcinoids in keeping with the distinctive profile of hormonal and silver staining in these tumors. All GEP neuroendocrine neoplasms expressed both neuroendocrine and epithelial markers, supporting their derivation from endodermal epithelium. |
<reponame>paskalev2001/NerdChan
import wolframalpha
import speech_recognition as sr
import wikipedia
import speech_recognition as sr
import pyttsx3
import PySimpleGUI as sg
import sys
def readConfig(name):
configFile = open("./config/"+name,'r')
configLines = configFile.readlines()
config = {}
for line in configLines:
line = line.replace('\n','')
kvp = line.split('=')
config[kvp[0]] = kvp[1]
if(config['apiKey']==''):
sg.PopupNonBlocking("Warning: Wolfram api key is not set therefore NerdChan will only return results from wikipedia. \n Visit https://developer.wolframalpha.com/portal/myapps/ to get one and write it in the config file.")
return config
def voiceRec():
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Say something!")
audio = r.listen(source)
# recognize speech using Sphinx
try:
voice_out = r.recognize_sphinx(audio)
print("Sphinx thinks you said " + voice_out)
return voice_out
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
def wikiWolf(currentConfig):
while True:
event, values = window.read()
if event in (None, 'Close'):
break
if event == 'Voice':
values[0] = voiceRec()
try:
wiki_res = wikipedia.summary(values[0], sentences=currentConfig["wikiLenght"])
wolfram_res = next(client.query(values[0]).results).text
engine.say(wolfram_res)
sg.PopupNonBlocking("Wolfram Result: "+wolfram_res,"Wikipedia Result: "+wiki_res)
except wikipedia.exceptions.DisambiguationError:
wolfram_res = next(client.query(values[0]).results).text
engine.say(wolfram_res)
sg.PopupNonBlocking(wolfram_res)
except wikipedia.exceptions.PageError:
wolfram_res = next(client.query(values[0]).results).text
engine.say(wolfram_res)
sg.PopupNonBlocking(wolfram_res)
except:
wiki_res = wikipedia.summary(values[0], sentences=currentConfig["wikiLenght"])
engine.say(wiki_res)
sg.PopupNonBlocking(wiki_res)
engine.runAndWait()
#print (values[0])
window.close()
currentConfig = {}
if (len(sys.argv) == 1):
currentConfig = readConfig("Default.config")
elif (len(sys.argv) == 2):
currentConfig = readConfig(sys.argv[1])
else:
print("Usage: python3 NerdChan.py SomeFile.config (Optional)")
sys.exit()
client = wolframalpha.Client(currentConfig["apiKey"])
sg.theme(currentConfig["theme"])
layout =[
[sg.Text('Ask me something :)'), sg.InputText()],
[sg.Button('Ask'), sg.Button('Close'),sg.Button('Voice')]
]
window = sg.Window('NerdChan', layout)
engine = pyttsx3.init()
engine.setProperty('rate', currentConfig["rate"])
engine.setProperty('voice', currentConfig["voice"])
wikiWolf(currentConfig)
|
/*
* This Java file has been generated by smidump 0.4.5. Do not edit!
* It is intended to be used within a Java AgentX sub-agent environment.
*
* $Id: SerialConnectionEntry.java 1458 2006-05-29 16:21:11Z strauss $
*/
/**
This class represents a Java AgentX (JAX) implementation of
the table row serialConnectionEntry defined in RMON2-MIB.
@version 1
@author smidump 0.4.5
@see AgentXTable, AgentXEntry
*/
import jax.AgentXOID;
import jax.AgentXSetPhase;
import jax.AgentXResponsePDU;
import jax.AgentXEntry;
public class SerialConnectionEntry extends AgentXEntry
{
protected int serialConnectIndex = 0;
protected byte[] serialConnectDestIpAddress = new byte[4];
protected byte[] undo_serialConnectDestIpAddress = new byte[4];
protected int serialConnectType = 0;
protected int undo_serialConnectType = 0;
protected byte[] serialConnectDialString = new byte[0];
protected byte[] undo_serialConnectDialString = new byte[0];
protected byte[] serialConnectSwitchConnectSeq = new byte[0];
protected byte[] undo_serialConnectSwitchConnectSeq = new byte[0];
protected byte[] serialConnectSwitchDisconnectSeq = new byte[0];
protected byte[] undo_serialConnectSwitchDisconnectSeq = new byte[0];
protected byte[] serialConnectSwitchResetSeq = new byte[0];
protected byte[] undo_serialConnectSwitchResetSeq = new byte[0];
protected byte[] serialConnectOwner = new byte[0];
protected byte[] undo_serialConnectOwner = new byte[0];
protected int serialConnectStatus = 0;
protected int undo_serialConnectStatus = 0;
public SerialConnectionEntry(int serialConnectIndex)
{
this.serialConnectIndex = serialConnectIndex;
instance.append(serialConnectIndex);
}
public int get_serialConnectIndex()
{
return serialConnectIndex;
}
public byte[] get_serialConnectDestIpAddress()
{
return serialConnectDestIpAddress;
}
public int set_serialConnectDestIpAddress(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectDestIpAddress = serialConnectDestIpAddress;
serialConnectDestIpAddress = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectDestIpAddress[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectDestIpAddress = undo_serialConnectDestIpAddress;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectDestIpAddress = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public int get_serialConnectType()
{
return serialConnectType;
}
public int set_serialConnectType(AgentXSetPhase phase, int value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectType = serialConnectType;
serialConnectType = value;
break;
case AgentXSetPhase.UNDO:
serialConnectType = undo_serialConnectType;
break;
case AgentXSetPhase.CLEANUP:
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public byte[] get_serialConnectDialString()
{
return serialConnectDialString;
}
public int set_serialConnectDialString(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectDialString = serialConnectDialString;
serialConnectDialString = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectDialString[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectDialString = undo_serialConnectDialString;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectDialString = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public byte[] get_serialConnectSwitchConnectSeq()
{
return serialConnectSwitchConnectSeq;
}
public int set_serialConnectSwitchConnectSeq(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectSwitchConnectSeq = serialConnectSwitchConnectSeq;
serialConnectSwitchConnectSeq = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectSwitchConnectSeq[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectSwitchConnectSeq = undo_serialConnectSwitchConnectSeq;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectSwitchConnectSeq = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public byte[] get_serialConnectSwitchDisconnectSeq()
{
return serialConnectSwitchDisconnectSeq;
}
public int set_serialConnectSwitchDisconnectSeq(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectSwitchDisconnectSeq = serialConnectSwitchDisconnectSeq;
serialConnectSwitchDisconnectSeq = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectSwitchDisconnectSeq[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectSwitchDisconnectSeq = undo_serialConnectSwitchDisconnectSeq;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectSwitchDisconnectSeq = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public byte[] get_serialConnectSwitchResetSeq()
{
return serialConnectSwitchResetSeq;
}
public int set_serialConnectSwitchResetSeq(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectSwitchResetSeq = serialConnectSwitchResetSeq;
serialConnectSwitchResetSeq = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectSwitchResetSeq[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectSwitchResetSeq = undo_serialConnectSwitchResetSeq;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectSwitchResetSeq = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public byte[] get_serialConnectOwner()
{
return serialConnectOwner;
}
public int set_serialConnectOwner(AgentXSetPhase phase, byte[] value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectOwner = serialConnectOwner;
serialConnectOwner = new byte[value.length];
for(int i = 0; i < value.length; i++)
serialConnectOwner[i] = value[i];
break;
case AgentXSetPhase.UNDO:
serialConnectOwner = undo_serialConnectOwner;
break;
case AgentXSetPhase.CLEANUP:
undo_serialConnectOwner = null;
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
public int get_serialConnectStatus()
{
return serialConnectStatus;
}
public int set_serialConnectStatus(AgentXSetPhase phase, int value)
{
switch (phase.getPhase()) {
case AgentXSetPhase.TEST_SET:
break;
case AgentXSetPhase.COMMIT:
undo_serialConnectStatus = serialConnectStatus;
serialConnectStatus = value;
break;
case AgentXSetPhase.UNDO:
serialConnectStatus = undo_serialConnectStatus;
break;
case AgentXSetPhase.CLEANUP:
break;
default:
return AgentXResponsePDU.PROCESSING_ERROR;
}
return AgentXResponsePDU.NO_ERROR;
}
}
|
Soil fertility classification for sugarcane in supply areas of a sugar mill Objective. To prepare the fertility classification for the sugarcane-cultivated soils in the Pujiltic Sugarcane Mill (PSM) supply area in order to improve decision-making. Design/methodology/approach. The soils were classified according to their fertility (FCC), using a system based on the quantifiable parameters of the upper soil layer and some characteristics of the subsoil directly linked to the growth of sugarcane. Results. Six factors limited the agricultural potential of the PSM soils: alkalinity, water excess or deficit, clay content, erosion, nutritional deficiencies, and low CEC, which alone or in groups act in detriment of soil fertility. Limitations/implications. Solving these problems requires a comprehensive analysis that considers crop type, planting season, and technology availability. Findings/conclusions. The soil fertility classification system enabled the classification of 11 soil subunits of the PSM area. INTRODUCTION The Pujiltic Sugarcane Mill is the most important of its kind in the State of Chiapas. It has a cultivated surface of 17,100 ha and uses auxiliary irrigation, with a yield of 90.60 t ha 1 of sugarcane and a factory yield of 12.01%, according to the Comit Nacional para el Desarrollo Sustentable de la Caa de Azcar. The agricultural production of sugarcane is affected by droughts and a diminished soil fertility. As an alternative, production can be boosted through the increase of the cultivable surface and -according to studies about the yields of sugarcane-, these can be increased through adequate soil fertility management practices and the use of improved varieties (). Local experiments have shown that the integration of soil fertility and nutrient management is an advanced approach that serves as a resource to increase or maintain soil fertility throughout time (;;). However, a diagnosis is needed in order to identify limitations. Buol et al. developed a system to classify soils according to their fertility (Fertility Capability Classification or FCC), with the aim of closing the gap between the classification and soil fertility subdisciplines. As a technical soil classification system, the FCC has a specific use, derived from natural classification systems such as soil taxonomy (Soil Survey Staff, 2014) or the World Reference Base for Soil Resources (IUSS Working Group WRB, 2014). The FCC's categories indicate the main limitations of the soils according to their fertility, which can be interpreted in relation to the crops of interest. Since its publication in 1975, the FCC has been assessed and applied in several countries. As a result, the definitions of several modifiers have changed and new ones have been therefore included to improve the system (). The system is a good starting point to study the suitability of tropical soils. In the Mexican tropic, particularly in the State of Tabasco, this system has only been applied in three regions, resulting in good agronomic management recommendations (Salgado and Palma, 2002;Salgado and Obrador, 2012;). Knowing the FCC's classes allows us to identify the fertility limitations and -given the importance of sugarcane in the State of Chiapas-generating the said information is necessary. Moreover, a soil study of 33,974.7 ha is available for the Pujiltic Sugar Mill (). Therefore, the objective of this article was to develop the fertility classification of the sugarcane-cultivated soils in the PSM supply area. MATERIALS AND METHODS The study area covered a surface of 33,974.7 ha, divided in 11 soil subunits (Figure 1), all of which are cultivated with sugarcane ( Figure 2). The physical and chemical properties data of the first two horizons of each soil subunit in the PSM supply area was taken from the soil study conducted by Salgado-Garca et al.. The system to classify soils according to their fertility (FCC) was used. This system comprises three categories which, in turn, comprise different classes. The combination of these classes makes up the FCC units: Type. The texture of the plow layer or the top 20 cm, whichever is shallower. S: Sandy topsoil: loamy sands and sands (Soil Survey Staff, 2014). L: Loamy topsoil: 35% clay, but without loamy sand or sand. C: Clayey topsoil: 35% clay. O: Organic soils: 30% organic matter (OM) up to a depth of 50 cm or more. Substrata type (subsoil texture). This is only used when there is a marked textural change relative to the surface or if a hard layer hinders root growth up to the first 50 cm of the soil. S: Sandy subsoil: same as in type; L: Loamy subsoil: texture similar to type; C: Clayey subsoil: texture similar to type; R: Rock or another hard layer hindering root development. Modifiers. When more than one criterion is indicated for each modifier, only one needs to be known. Ideally, the first criterion mentioned should be used if data is available. The following criteria are presented for those cases in which identifying the first one is impossible (): g (gley), d (dry), e (low cation exchange capacity), a (aluminum toxicity), h (acid), i (high P-fixation by iron), x (short-range-order minerals), v (vertisol), k (low K reserves), b (basic reaction), s (salinity), n (natric), c (cat clay), ' and '' (gravel), % (slope). Procedure. Soils are classified according to these parameters by determining if the characteristic is present or not. Most quantitative limits are criteria found in the Soil Taxonomy and the World Reference Base for Soil Resources (Soil Survey Staff, 2014;). FCC units list the type of texture and substrata (if they differ) in capital letters, the modifiers in lowercase, the gravel modifier with a prime symbol ('), and the slope in parentheses, if so wished. Table 1 presents the chemical and physical properties of the first two horizons of the soil subunits and Figure 1 shows the representative profiles. The FCC classification for each soil subunit is presented below. RESULTS AND DISCUSSION Soil Study Cb. Chernic Chernozem (CHch). This soil subunit has a mollic horizon, it is deep and well structured, has a high base saturation (80% or more), high content of organic matter (2.5% or more), and a high biological activity. These soils have clayey textures in most horizons, with a moderate permeability. The irrigation availability allows yields of up to 116 t ha 1 (). Lb (12%). Hypocalcic Calcisol (CLccw). This subunit presents a medium infiltration index and a medium water retention capacity; it has secondary carbonate concentrations up to a depth of 100 cm from the soil surface. Its rapid permeability allows a good drainage. The pH is generally considered as moderately alkaline; the electric conductivity (EC) is 0.20 dS m 1, which indicates that no salinity problems are present; the OM is very rich in the plow layer and poor in the last horizons (5.33-1.24%). Given the calcareous nature of these soils, the use of fertilizers derived from phosphate rock or other non-water-soluble phosphates must be avoided. Its iron, boron, and zinc deficiency must be supplemented through chemical or organic fertilization (). Cbv (<5%). Vertic Calcisol (CLvr). This subunit has a vertic horizon up to a depth of 100 cm from the soil surface. These soils present a subsurface clayey horizon, as a result of expansion and contraction. Its slickensides or structural aggregates have 30% more clay throughout its thickness. Most of this soil profile shows 60% clay contents; however, these clay contents and the bulk density (BD) (1 g cm 3 ) do not cause compaction problems, likely as a result of its high organic matter and calcium contents. Based on field observations, these soils have cutans as a result of the accumulation of carbonates, which react strongly to HCl. Moreover, they have soft, small, cream-colored CaCO 3 nodules; the profile presents a good drainage, although permeability goes from moderate to slow, which is attributed to a high clay content (). Cb (<1%). Mollic Cambisol (CMmo). This subunit has a low infiltration index and good water retention capacity. Its soils have a high base saturation (50%) and a high OM content. They have a moderately alkaline pH and no salinity problems (EC1 dS m 1 ). Cbeg (5%). Calcaric Fluvisol (FLca). This subunit has a low infiltration index and good water retention capacity. It is a calcareous soil at least up to a 20-50 cm depth from the soil surface. Based on field observations, the water table was found at a depth of 150 cm; the gleyic processes at this depth gives the soil a grey color. These soils have an average BD of 1 g cm 3 and have no compaction problems. They have a moderately alkaline pH and no salinity problems, because their EC ranges from 0.13 to 0.47 dS m 1. The OM has an irregularly arranged profile: it is rich in the first horizon, decreases as the depth increases, and increases again at a still lower depth (3.2-0.4-3.1%), as a result of the continuous alluviation processes. The subunit has a low cation-exchange capacity (CEC), which favors lixiviation, especially in the cases of K, Ca, and Mg. Therefore, we recommend using 10 t ha 1 of compost in order not to limit crop development (). This soil presented a lower K content than Vertisols, contrary to the results reported by Bolio et al. for the sugarcane soils in Chontalpa, Tabasco. Cbg (1%). Mollic Gleysol (GLmo). This subunit's soils have high nutrient and organic matter contents; they present loamy textures in the surface that overlie silty clay textures (C horizon). Occasionally, the bottom of the profile may present sandy textures. These soils are deep, although most of the year the water table is found near the surface, causing the sugarcane rooting depth to be less than 60 cm. These soils are predominantly characterized by a clayey texture, grey colors due to gleyzation processes, and poor drainage. Therefore, drainage is necessary to avoid altering the crop's physiology and to allow it to ripen in optimal conditions (). The 1 g cm 3 BD does not indicate compaction problems and most horizons maintain a strong reaction to HCl. These soils have a moderately alkaline pH and the EC does not indicate salinity problems (3 dS m 1 ). Most of the profile has a very high CEC and, in order to improve the OM content, vinasse and compost must be applied in 150 m 3 and 10 t ha 1 doses, respectively (). Cb (.1%). Rendzic Leptosol (LPrz). The main characteristics of these soils are related to their low depth and calcareous rock origins, which provides them with very significant properties such as good OM contents, good nutrient contents, a good infiltration or permeability, a good soil structural development and soil structure stability. However, they have significative problems related to the slope, including: a low radicular volume, a tendency to erosion, a difficult accessibility, and the Ca and Mg saturation. Phosphorus fixation phenomena and iron deficiencies take place when the said saturation reacts to calcium, during the early development stages of the sugarcane crop. The pH is alkaline and the EC does not indicate salinity problems (0.12 dS m 1 ). The CEC is very high (52.2 cmol() kg 1 ) and the water availability allows the sugarcane crop to achieve yields in excess of 90 t ha 1. Lbe. Calcaric Regosol (RGca). These Regosols have calcareous properties at least between 20 and 50 cm from the soil surface. They have good permeability and drainage. They show a strong reaction to HCl in all their horizons and are also very rocky soils, with pebbles and gravel throughout the profile. The BD does not reflect compaction problems (1 g cm 3 ); these soils have a moderately alkaline pH and there are no salinity-related effects (average EC of 0.2 dS m 1 ). The superficial horizon of these soils is rich in OM, which diminishes along with the depth of the profile (6.6-1.1%). These Regosols present a low CEC, which favors lixiviation, particularly in the cases of K, Ca, and Mg. Cbev. Eutric Vertisol (VReu). This subunit has a 35% clay layer which covers the whole profile. It has a high-water table that remains flooded during rainy season, causing the stems to die, as a consequence of the saturation of the pores of the soil. Since these soils present denitrification problems due to an anaerobic subsoil, a superficial drainage is recommended. It has a moderately alkaline pH and no salinity problems, as a result of its ECof 2 dS m 1. These soils have a low CEC that favors lixiviation, particularly in the cases of K, Ca, and Mg. During the sugarcane cultivation, the soil did not accumulate potassium, as indicated by Bolio et al. for Vertisols in Chontalpa, Tabasco. Low K contents -compared with the high Ca and Mg contents (Table 1)-account for the foliar deficiency of K (1.0% in leaf 4) in the sugarcane crops of the Pujiltic region. This phenomenon took place in spite of the application of the fertilizer dose recommended by SIRDF and/or 10 t ha 1 of compost, which allows a yield of 60-101.8 t ha 1 (). CONCLUSIONS AND RECOMMENDATIONS Six factors limit the potential production rate of soils in the area where sugarcane used in the PSM is cultivated: soil alkalinity, water excess or deficit, clay content, erosion, nutrient deficiencies, and a low cation exchange capacity. These factors, alone or grouped together, act in detriment of soil fertility. Knowledge about the relation between soils, plants, and atmosphere allows us to consider agricultural drainage, irrigation, and fertilization with macro-and micronutrients as agronomic practices that would improve the conditions of Gleysol, Vertisol, Fluvisol, and Cambisol units. |
. Brain lesions located outside the language area may be associated with non-aphasic disorders of language. The difficulties encountered by these patients concern one or several of the following levels: incitement to communicate, initiation and maintenance of verbal activity, internal organization and adequation of the discourse to the context, mastery of the lexico-semantic code. In every case, the elementary aspects of language are untouched, but verbal communication is severely perturbed. |
Residual stress field analysis of Al/steel butt joint using laser weldingbrazing ABSTRACT The objective of this study is to analyse residual stress of the Al/steel butt joint using the laser weldingbrazing. The welding parameters were a laser power of 1200W, a welding speed of 600mm min−1, thickness of 1501502mm and 1501501mm, respectively. The residual stress was measured by the hole-drilling methods. Then, a finite-element model of the welding process was established, and it was verified by experiments. The results show that the calculated results are in conformity with the experimental results. The longitudinal residual stress on the galvanised steel (329MPa) is larger than that on the aluminium alloy (293MPa). At the location of the fixture, the longitudinal residual stress is substantially zero. |
Fate of weed seeds in spent mushroom compost following commercial mushroom production Abstract Commercial mushroom producers grow several varieties of mushrooms on compost. Upon completion of the growing cycle, the spent mushroom compost is often sold as a soil amendment for both agricultural and homeowner use. Mushroom compost ingredients often come from fields infested with weeds, and in turn compost may spread unwanted weed seed. We conducted studies to assess the viability of weed seed following specific stages of the commercial mushroom production process. Weed seed was more likely to survive if the entire production process was not completed. However, no viable hairy vetch, Italian ryegrass, ivyleaf morningglory, Palmer amaranth, or velvetleaf remained at the end of the study. Although the seeds of most species were eliminated earlier in the composting process, ivyleaf morningglory required the complete process to eliminate 100% of the seed. These results indicate that spent mushroom compost is free of many weed species upon removal from mushroom houses and is unlikely to spread weed seed. Nomenclature: Hairy vetch, Vicia villosa Roth; Italian ryegrass, Lolium multiflorum Lam.; ivyleaf morningglory, Ipomoea hederacea (L.) Jacq.; Palmer amaranth, Amaranthus palmeri S. Wats.; velvetleaf, Abutilon theophrasti Medik.; mushroom, Agaricus bisporus |
The disclosure Wednesday that there will be no charges laid under B.C.’s environmental laws for Imperial Metals’ Mount Polley tailings dam failure in 2014 has environmentalists questioning whether the province’s laws are strong enough.
There remains the possibility of federal charges under the Fisheries Act, but the B.C. Conservation Officer Service has said a B.C.-federal investigation will not be complete by Friday — when the three-year time limit to lay charges under B.C.’s Environmental Management Act ends.
The B.C. conservation service-led investigation — involving a dedicated team of officers and several federal investigators — started almost immediately after the Aug. 4, 2014, failure of the earth-and-rock dam at the gold-and-copper mine northeast of Williams Lake.
That failure spilled millions of cubic metres of effluent and finely ground rock containing potentially toxic metals called tailings. The release scoured a nine-kilometre creek, home to trout and spawning coho salmon, and dumped tailings into Quesnel Lake, the migration route for more than one million sockeye salmon.
Andrew Gage, a lawyer with West Coast Environmental Law, said the fact there will be no charges laid under B.C. laws for one of Canada’s largest mining spills is remarkable and highlights a general trend, he believes, of decreased successful convictions of environmental violations.
Gage pointed to data West Coast Environmental Law has put together showing that since 1990 there has been a significant decline in the number of convictions under the province’s pollution statutes.
A Ministry of Environment online database lists 47 court convictions under the Environmental Management Act since 2006, only one involving a mining company, the coal giant Teck Resources.
Teck had convictions in 2013 and 2016, the latest resulting in $3.4 million in fines related to releases of water with elevated levels of metals and chemicals.
“I hope the new NDP government will ask tough questions,” said Gage, of the failure of the B.C.-led investigation to reach a conclusion by the three-year time limit.
Calvin Sandborn, legal director of the University of Victoria’s Environmental Law Centre, said it is significant that one of the biggest global mining “disasters” has not resulted in charges in B.C.
He believes it underscores a benign neglect in regulation of mining, highlighted in a B.C. auditor general report last year that concluded compliance and enforcement in the mining sector are not adequate to protect the environment.
“Lack of enforcement, or lack of laws to enforce, shows the need for a royal commission of inquiry,” said Sandborn, repeating a call made earlier for an inquiry into the regulation and oversight of mining in the province.
However, Robin Junger, a lawyer with McMillan LLP in Vancouver, said it is not uncommon for both provincial and federal agencies to investigate an incident and for one or the other not to lay charges.
Generally — as he could not speak to the specifics of this case — there could also be reason for not pursuing charges including a defence of due diligence, said Junger, a former deputy minister of mines in B.C., as well as a former head of the province’s Environmental Assessment Office.
Ugo Lapointe, program co-ordinator for Mining Watch Canada, said the lack of charges under B.C. laws says to him that the province’s laws are weak.
“It needs to be fixed and fixed quickly,” he said.
Lapointe said Mining Watch believes B.C. laws were violated as a result of the dam failure, including potentially under the Environmental Management Act, Mining Act and Water Sustainability Act.
He said Mining Watch has its legal team examining if it’s possible to lay private charges before the three-year time limit ends on Friday.
ghoekstra@postmedia.com
twitter.com/gordon_hoekstra |
Step inside Mazda's Hiroshima factory and museum, where the company started and where it still makes many of its cars.
From the bestselling sports car of all time, to the weird and wacky Wankel, Mazda has had a long history with interesting and often unique cars. Throughout it all, its Hiroshima factory has churned out thousands of cars every year.
Today, the plant straddles a lower branch of the river Ōta, connected by one of the largest privately owned bridges in the world. Miatas and CX-5s roll off its assembly line, having been assembled from mere parts and pieces in a matter of hours.
Upstairs in one of the buildings, however, is a collection of Mazda's most famous cars. Some you certainly know, many others you might not. They're all arranged and immaculately maintained -- and, unlike most car museums, Mazda lets you walk right up and touch them.
We begin at Mazda's corporate headquarters, a nondescript building. It's just a short walk from Mukainada station, about 30 minutes outside of downtown Hiroshima.
The lobby is festooned with the current line of Mazda vehicles, all shiny and the same shade of red. You don't stay here long, though. It's a short bus ride to the main plant, and along the way you'll cross what used to be the longest privately owned bridge in the world.
In the museum itself you're treated to a short intro to Mazda itself with a look back at 85 years of history. Then you head upstairs, where the tour starts in earnest.
It's probably not surprising that the first car you see is not Mazda's first car ever. Instead it's the Cosmo Sport: Mazda's first car to sport its beloved Wankel rotary engine, and the second car ever built with one.
The rotary is a proud part of Mazda's history, and it's a fascinating engine. It's composed of what's essentially a triangle rotating inside a pinched donut and lauded for its light weight, high horsepower, and smooth operation. But it's also derided for its low torque and high emissions.
References, examples and displays about the rotary are all over the museum, but so are Mazda's other cars. Across from the Cosmo is the Type TCS trike, Mazda's first vehicle, and its early small cars such as the R360 sit nearby, polished to a good-as-new shine.
Unlike most car museums, there are no barriers. You can go right up to the cars and peek inside -- you'll find that the interiors are just as immaculate as the exteriors.
Around the bend you'll see more modern cars such as the RX-7 and Eunos Cosmo. There's even a pristine Autozam AZ-1, known most only through its digital version in video games such as Gran Turismo.
The 787B has its own section. It's the only car from a Japanese manufacturer to win the 24 Hours of Le Mans, and the only one to win without a piston engine.
The museum finishes with a detailed look at the design and manufacturing process. From clay models to the metal stamping process to a layout of the extensive wiring under the dash of every modern car. That's seriously frightening. Like, end of "Superman III" frightening.
Then you're treated to a quick tour of the factory, where you can peer down and watch the cars being assembled. It's a fascinating sight, a blend of humans and machines. Most surprising to me was that it was a mixed assembly line, with Miatas alternating with CX-5s. Mazda claims it's more efficient, which I'm sure it is, but it's not what I expected.
The Mazda Museum is free, all you need to do is book ahead of time. It's a quick tour and you don't get to spend much time with the cars before you're hurried along to the next area. But it is a great look at the cars, classic and new. Zoom zoom, as they say. |
Interferon and ribavirin vs interferon alone in the re-treatment of chronic hepatitis C previously nonresponsive to interferon: A meta-analysis of randomized trials. CONTEXT Hepatitis C is the leading cause of chronic liver disease in the United States. Several trials have found that interferon and ribavirin combination therapy is more efficacious than interferon monotherapy for previously untreated patients and those who relapsed after prior interferon monotherapy, but its effectiveness for nonresponders to prior interferon monotherapy is unclear. OBJECTIVE To assess the efficacy and safety of interferon and ribavirin vs interferon alone for treatment of patients with chronic hepatitis C who previously did not respond to interferon monotherapy. DATA SOURCES A systematic search was performed using MEDLINE and the Science Citation Index for publications from 1966 to December 1999. A manual reference search and a manual review of relevant specialty journals also were performed, and input from clinical hepatology experts was sought. STUDY SELECTION Included studies were randomized, controlled clinical trials comparing interferon and ribavirin with interferon alone and reporting virological and biochemical outcomes after a follow-up period. Of 50 identified studies, 12 trials (941 patients) were included in the analysis. DATA EXTRACTION Two investigators reviewed trials independently for methods, inclusion and exclusion criteria, and outcomes. Disagreements were resolved by discussion. Abstracted data included study and patient characteristics and virological, biochemical, and histological outcomes. A quality evaluation questionnaire was used to score studies. DATA SYNTHESIS The pooled virological response rate for combination therapy was 14% (95% confidence interval , 11%-17%), with a risk difference favoring combination therapy of 7% (95% CI, 2%-13%). Use of interferon alfa-2a/2b and ribavirin, 1000 to 1200 mg/d, was associated with a pooled virological response rate of 18% and a risk difference of 16% (95% CI, 11%-21%). When interferon alfa-n/n3 and a lower dosage of ribavirin (600-800 mg/d) were used, the risk difference was 0% (95% CI, -7% to 7%). Combination therapy was associated with more adverse effects and an increased rate of discontinuation of treatment compared with interferon monotherapy. CONCLUSIONS For chronic hepatitis C that is nonresponsive to prior interferon monotherapy, combination therapy is more effective than re-treatment with interferon alone. Response rates remain less than 20% even in the most responsive subgroups, demonstrating a need for better therapeutic options. |
// @target: es6
// @module: commonjs
class SomeClass {
static get someProp(): number {
return 0;
}
static set someProp(value: number) {}
static set someProp(value: number) {}
}
export = SomeClass; |
<gh_stars>0
/*!
* Generates the about of application
*/
import * as gitRepoInfo from 'git-repo-info';
import * as fs from 'fs';
import * as path from 'path';
import { promisify } from 'util';
const writeFile = promisify(fs.writeFile);
const readFile = promisify(fs.readFile);
const exists = promisify(fs.exists);
const mkDir = promisify(fs.mkdir);
const readJson = async <T>(filename: string): Promise<T> => {
const value = await readFile(filename, 'utf8');
try {
return JSON.parse(value);
} catch (e) {
console.error('%s =>', filename, e);
return null;
}
}
/**
* Generate the deployment information for the **about** use case
*/
( async () => {
const pkg = await readJson<any>('./package.json');
const git = gitRepoInfo();
const now = new Date().toISOString();
const deploy = {
name: pkg.name,
title: pkg.title,
version: pkg.version,
description: pkg.description,
author: `${pkg.author.name} <${pkg.author.email}>`,
commit: git.sha,
commitDate: git.committerDate || git.authorDate || now,
branch: git.branch,
buildDate: now,
};
console.info('About\n%s\n', JSON.stringify(deploy, null, 2));
const dataPath = path.join(__dirname, 'data');
if (!await exists(dataPath)) {
await mkDir(dataPath, {recursive: true});
}
await writeFile(path.join(dataPath, 'about.json'), JSON.stringify(deploy, null, 2));
})();
|
Tofu Product Branding for Culinary Tourism of Sumedang, Indonesia Measuring standards for the management of tofu products as culinary tourism can contribute to the branding of the city of Sumedang, Indonesia. With the challenges in the technology era 4.0 the culinary industry, including Sumedang, is demanded to do branding through digital technology. This is the background of the importance of this study. The research design uses the ethnographic study of public relations with a qualitative approach. Data collection involved 4 producers and 4 consumers through interview and observation techniques. This research found differentiation as a strategy for managing tofu products. The differentiation strategy includes the use of digital promotional media in the form of websites and social media, which previously only used conventional media, such as banners. Differentiation can also be found in packaging using boxes made of woven bamboo. Another differentiation is in the variation of flavors by adding sambal kecap (chili soy sauce), sambal oncom (chili sauce mixed with a fermented caked product made of peanut press cake), sambal hejo (green chili sauce) in addition to its original accompaniment of the whole chilies. However, these changes do not change the tofu price of around 500 rupiahs per piece. Differentiation as an effort to build the branding of Sumedang city is what drives the local government to make tofu products as culinary tourism. This study offers a concept of culinary tourism as cultural tourism in the city of Sumedang. The concept of this tourism has not been much researched, although culinary culture is one of the intellectual properties that must be developed. Product development through product differentiation and convergence of Internet-based media is a product communication strategy effort for managing Sumedang Tofu products. |
// Copyright 2018, Baidu Inc. All rights reserved.
// Author: <NAME>
#include "thread_pool.h"
#include <thread>
#include <algorithm>
#include <numeric>
#include "boost/lexical_cast.hpp"
// #include "gtest/gtest.h"
#include <cassert>
#define TEST(a, b) void b()
#define ASSERT_EQ(a, b) assert(a == b)
namespace simple_thread_pool {
TEST(ThreadPoolTestSuite, test_post_future) {
ThreadPool pool(5);
int n = 1;
std::future<int> r1 = pool.post([&]{
return n;
});
std::this_thread::sleep_for(std::chrono::microseconds(100));
std::future<int> r2 = pool.post([&] {
std::this_thread::sleep_for(std::chrono::microseconds(100));
return n;
});
n = 2;
ASSERT_EQ(1, r1.get());
ASSERT_EQ(2, r2.get());
}
TEST(ThreadPoolTestSuite, test_transform) {
ThreadPool pool(5);
std::vector<int> nums = {1, 2, 3, 4, 5, 6, 7, 8};
std::vector<std::string> expect;
std::vector<std::string> real;
auto to_str = [](int n) -> std::string { return boost::lexical_cast<std::string>(n); };
std::transform(nums.begin(), nums.end(), std::back_inserter(expect), to_str);
pool.transform(nums.begin(), nums.end(), std::back_inserter(real), to_str);
ASSERT_EQ(expect, real);
}
TEST(ThreadPoolTestSuite, test_for_each) {
ThreadPool pool(5);
std::vector<int> expect = {1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int> real = {1, 2, 3, 4, 5, 6, 7, 8};
auto incr = [](int& input) {
++input;
};
std::for_each(expect.begin(), expect.end(), incr);
pool.for_each(real.begin(), real.end(), incr);
ASSERT_EQ(expect, real);
}
TEST(ThreadPoolTestSuite, test_nested_pools) {
ThreadPool pool1(2);
ThreadPool pool2(10);
std::vector<int> nums1 = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int> nums2 = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<int> expect;
std::vector<int> real;
auto product_num2_and_accumulate_by_std = [&](int n) {
std::vector<int> tmp;
std::transform(nums2.begin(), nums2.end(), std::back_inserter(tmp), [&](int m) {
return m * n;
});
return std::accumulate(tmp.begin(), tmp.end(), 0);
};
std::transform(nums1.begin(), nums1.end(), std::back_inserter(expect),
product_num2_and_accumulate_by_std);
auto product_num2_and_accumulate_by_pool = [&](int n) {
std::vector<int> tmp;
pool2.transform(nums2.begin(), nums2.end(), std::back_inserter(tmp), [&](int m) {
return m * n;
});
return std::accumulate(tmp.begin(), tmp.end(), 0);
};
pool1.transform(nums1.begin(), nums1.end(), std::back_inserter(real),
product_num2_and_accumulate_by_pool);
ASSERT_EQ(expect, real);
}
} //namespace simple_thread_pool
int main(int argc, char **argv)
{
simple_thread_pool::test_post_future();
simple_thread_pool::test_transform();
simple_thread_pool::test_for_each();
simple_thread_pool::test_nested_pools();
simple_thread_pool::ThreadPool pool(4);
std::vector<int> nums = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
pool.for_each(nums.begin(), nums.end(), [](int n) {
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cout << n << std::endl;
});
}
|
def rs(self, props: PropsDict) -> NDArrayOrFloat:
rs, _ = self._get_rsbo(100.0, props["pres"])
return rs |
<reponame>Tiernebre/tailgate
package com.tiernebre.zone_blitz.user.dto;
import lombok.Builder;
import lombok.Value;
import lombok.With;
import javax.validation.constraints.*;
import java.util.List;
import static com.tiernebre.zone_blitz.user.validator.UserValidationConstants.*;
@Value
@Builder
public class CreateUserRequest {
@NotBlank
@Email
String email;
@NotBlank
@Size(min = MINIMUM_PASSWORD_LENGTH, max = MAXIMUM_PASSWORD_LENGTH)
@With
String password;
String confirmationPassword;
@NotEmpty
@Size(
min = NUMBER_OF_ALLOWED_SECURITY_QUESTION,
max = NUMBER_OF_ALLOWED_SECURITY_QUESTION,
message = NUMBER_OF_SECURITY_QUESTION_VALIDATION_MESSAGE
)
@With
List<@NotNull(message = NULL_SECURITY_QUESTION_ENTRIES_VALIDATION_MESSAGE) CreateUserSecurityQuestionRequest> securityQuestions;
}
|
package org.junit.tests.experimental.rules;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
/**
* <tt>TemporaryFolderUsageTest</tt> provides tests for API usage correctness
* and ensure implementation symmetry of public methods against a root folder.
*/
public class TemporaryFolderUsageTest {
private TemporaryFolder tempFolder;
@Rule
public final ExpectedException thrown = ExpectedException.none();
@Before
public void setUp() {
tempFolder = new TemporaryFolder();
}
@After
public void tearDown() {
tempFolder.delete();
}
@Test(expected = IllegalStateException.class)
public void getRootShouldThrowIllegalStateExceptionIfCreateWasNotInvoked() {
new TemporaryFolder().getRoot();
}
@Test(expected = IllegalStateException.class)
public void newFileThrowsIllegalStateExceptionIfCreateWasNotInvoked()
throws IOException {
new TemporaryFolder().newFile();
}
@Test(expected = IllegalStateException.class)
public void newFileWithGivenNameThrowsIllegalStateExceptionIfCreateWasNotInvoked()
throws IOException {
new TemporaryFolder().newFile("MyFile.txt");
}
@Test
public void newFileWithGivenFilenameThrowsIllegalArgumentExceptionIfFileExists() throws IOException {
tempFolder.create();
tempFolder.newFile("MyFile.txt");
thrown.expect(IOException.class);
thrown.expectMessage("a file with the name 'MyFile.txt' already exists in the test folder");
tempFolder.newFile("MyFile.txt");
}
@Test(expected = IllegalStateException.class)
public void newFolderThrowsIllegalStateExceptionIfCreateWasNotInvoked()
throws IOException {
new TemporaryFolder().newFolder();
}
@Test(expected = IllegalStateException.class)
public void newFolderWithGivenPathThrowsIllegalStateExceptionIfCreateWasNotInvoked() throws IOException {
new TemporaryFolder().newFolder("level1", "level2", "level3");
}
@Test
public void newFolderWithGivenFolderThrowsIllegalArgumentExceptionIfFolderExists() throws IOException {
tempFolder.create();
tempFolder.newFolder("level1");
thrown.expect(IOException.class);
thrown.expectMessage("a folder with the name 'level1' already exists");
tempFolder.newFolder("level1");
}
@Test
public void newFolderWithGivenFolderThrowsIOExceptionIfFolderNameConsistsOfMultiplePathComponents()
throws IOException {
tempFolder.create();
thrown.expect(IOException.class);
thrown.expectMessage("name cannot consist of multiple path components");
tempFolder.newFolder("temp1/temp2");
}
@Test
public void newFolderWithGivenPathThrowsIllegalArgumentExceptionIfPathExists() throws IOException {
tempFolder.create();
tempFolder.newFolder("level1", "level2", "level3");
thrown.expect(IOException.class);
thrown.expectMessage("a folder with the name 'level3' already exists");
tempFolder.newFolder("level1", "level2", "level3");
}
@Test
public void newFolderWithGivenPathThrowsIOExceptionIfFolderNamesConsistOfMultiplePathComponents()
throws IOException {
tempFolder.create();
thrown.expect(IOException.class);
thrown.expectMessage("name cannot consist of multiple path components");
tempFolder.newFolder("temp1", "temp2", "temp3/temp4");
}
@Test
public void createInitializesRootFolder() throws IOException {
tempFolder.create();
assertFileExists(tempFolder.getRoot());
}
@Test
public void deleteShouldDoNothingIfRootFolderWasNotInitialized() {
tempFolder.delete();
}
@Test
public void deleteRemovesRootFolder() throws IOException {
tempFolder.create();
tempFolder.delete();
assertFileDoesNotExist(tempFolder.getRoot());
}
@Test
public void newRandomFileIsCreatedUnderRootFolder() throws IOException {
tempFolder.create();
File f = tempFolder.newFile();
assertFileExists(f);
assertFileCreatedUnderRootFolder("Random file", f);
}
@Test
public void newNamedFileIsCreatedUnderRootFolder() throws IOException {
final String fileName = "SampleFile.txt";
tempFolder.create();
File f = tempFolder.newFile(fileName);
assertFileExists(f);
assertFileCreatedUnderRootFolder("Named file", f);
assertThat("file name", f.getName(), equalTo(fileName));
}
@Test
public void newRandomFolderIsCreatedUnderRootFolder() throws IOException {
tempFolder.create();
File f = tempFolder.newFolder();
assertFileExists(f);
assertFileCreatedUnderRootFolder("Random folder", f);
}
@Test
public void newNestedFoldersCreatedUnderRootFolder() throws IOException {
tempFolder.create();
File f = tempFolder.newFolder("top", "middle", "bottom");
assertFileExists(f);
assertParentFolderForFileIs(f, new File(tempFolder.getRoot(),
"top/middle"));
assertParentFolderForFileIs(f.getParentFile(),
new File(tempFolder.getRoot(), "top"));
assertFileCreatedUnderRootFolder("top", f.getParentFile()
.getParentFile());
}
@Test
public void canSetTheBaseFileForATemporaryFolder() throws IOException {
File tempDir = createTemporaryFolder();
TemporaryFolder folder = new TemporaryFolder(tempDir);
folder.create();
assertThat(tempDir, is(folder.getRoot().getParentFile()));
}
private File createTemporaryFolder() throws IOException {
File tempDir = File.createTempFile("junit", "tempFolder");
assertTrue("Unable to delete temporary file", tempDir.delete());
assertTrue("Unable to create temp directory", tempDir.mkdir());
return tempDir;
}
private void assertFileDoesNotExist(File file) {
checkFileExists("exists", file, false);
}
private void checkFileExists(String msg, File file, boolean exists) {
assertThat("File is null", file, is(notNullValue()));
assertThat("File '" + file.getAbsolutePath() + "' " + msg,
file.exists(), is(exists));
}
private void assertFileExists(File file) {
checkFileExists("does not exist", file, true);
}
private void assertFileCreatedUnderRootFolder(String msg, File f) {
assertParentFolderForFileIs(f, tempFolder.getRoot());
}
private void assertParentFolderForFileIs(File f, File parentFolder) {
assertThat("'" + f.getAbsolutePath() + "': not under root",
f.getParentFile(), is(parentFolder));
}
}
|
1979 Italian Grand Prix
Background
Monza was revamped for 1979, with the track re-surfaced and run-off areas added to the Curva Grande and the Lesmo curves.
The entry list was enlarged by the return of the Alfa Romeo team, which had participated in the Belgian and French Grands Prix earlier in the season. Alfa Romeo fielded two cars: a new 179 chassis for Bruno Giacomelli, and the old 177 for Vittorio Brambilla, back in action for the first time since the crash in the previous year's race at Monza that had claimed the life of Ronnie Peterson. Meanwhile, Mexican Héctor Rebaque had his HR100 chassis ready for the first time, while Switzerland's Marc Surer, having won the Formula Two championship the previous month, made his first Formula One appearance as Ensign took him on in place of Patrick Gaillard.
Qualifying
The turbo-powered Renaults were quick in qualifying and filled the front row of the grid, with Jean-Pierre Jabouille ahead of René Arnoux. It was Jabouille's fourth pole position of the season, and Renault's sixth. Scheckter and Alan Jones in the Williams made up the second row, while on the third were their respective teammates, Gilles Villeneuve and Clay Regazzoni. The top ten was completed by Jacques Laffite in the Ligier, the Brabhams of Nelson Piquet and Niki Lauda, and Mario Andretti in the Lotus.
Race
The Renaults were slow off the start line and so Scheckter took the lead, with Arnoux holding on to second. Jones also made a poor start and dropped to the back of the field, putting Villeneuve third and the fast-starting Laffite fourth. On lap 2, Arnoux passed Scheckter for the lead, while Piquet crashed out after tangling with Regazzoni.
For the next eleven laps Arnoux, Scheckter, Villeneuve, Laffite and Jabouille ran nose-to-tail, with Regazzoni a distant sixth. Then, on lap 13, Arnoux's engine began to misfire, leading to his retirement. Scheckter thus regained the lead, with Villeneuve dutifully following him. Later in the race, Laffite and Jabouille also suffered engine failures, promoting Regazzoni to third.
Scheckter eventually took the chequered flag half a second ahead of Villeneuve and, with it, the Drivers' Championship. This one-two finish for Ferrari in their home race also secured them the Constructors' Championship. Regazzoni finished four seconds behind Villeneuve and 50 ahead of Lauda, with the final points going to Andretti and Jean-Pierre Jarier in the Tyrrell. |
<filename>tools/Peek.ts
import fs from "fs"
import { Repo } from "../src"
const raf: Function = require("random-access-file")
const path = process.argv[2]
const id = process.argv[3]
if (path === undefined || id === undefined) {
console.log("Usage: peek REPO_DIR DOC_ID")
process.exit()
}
if (!fs.existsSync(path + "/ledger")) {
console.log("No repo found: " + path)
process.exit()
}
if (!fs.existsSync(path + "/" + id)) {
console.log("No doc found in repo: " + id)
process.exit()
}
setTimeout(() => {}, 50000)
const repo = new Repo({ path, storage: raf })
repo.doc(id).then((doc) => {
console.log(doc)
process.exit()
})
|
package org.duckdns.owly.quarkus.entities;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
@Entity
public class Genre {
@Id
@GeneratedValue
public long id;
public String name;
}
|
The Numerical Solution of Boundary Integral Equations Much of the research on the numerical analysis of Fredholm type integral equations during the past ten years has centered on the solution of boundary integral equations (BIE). A great deal of this research has been on the numerical solution of BIE on simple closed boundary curves S for planar regions. When a BIE is de ned on a smooth curve S, there are many numerical methods for solving the equation. The numerical analysis of most such problems is now well-understood, for both BIE of the rst and second kind, with many people having contributed to the area. For the case with the BIE de ned on a curve S which is only piecewise smooth, new numerical methods have been developed during the past decade. Such methods for BIE of the second kind were developed in the mid to late 80s; and more recently, high order collocation methods have been given and analyzed for BIE of the rst kind. The numerical analysis of BIE on surfaces S in R has become more active during the past decade, and we review some of the important results. The convergence theory for Galerkin methods for BIE is well-understood in the case that S is a smooth surface, for BIE of both the rst and second kind. For BIE of the second kind on piecewise smooth surfaces, important analyses have been given more recently for both Galerkin and collocation methods. In contrast, almost nothing is understood about collocation methods for solving BIE of the rst kind, regardless of the smoothness of S. Numerical methods for BIE on surfaces S in R lead to computationally expensive procedures, and a great deal of the research for such BIE has looked at the e cient numerical evaluation of integrals, the use of iterative methods for solving the associated linear systems, and the use of \fast matrix-vector calculations" for use in iteration procedures. |
<filename>systemstubs/src/main/java/com/android/internal/content/ReferrerIntent.java
package com.android.internal.content;
import android.content.Intent;
public class ReferrerIntent {
public ReferrerIntent(Intent baseIntent, String referrer) {
throw new RuntimeException("Stub!");
}
}
|
Factors Associated With Clinical Deterioration Among Patients Hospitalized on the Wards at a Tertiary Cancer Hospital. PURPOSE Patients hospitalized outside the intensive care unit (ICU) frequently experience clinical deterioration. Little has been done to describe the landscape of clinical deterioration among inpatients with cancer. We aimed to describe the frequency of clinical deterioration among patients with cancer hospitalized on the wards at a major academic hospital and to identify independent risk factors for clinical deterioration among these patients. METHODS This was a retrospective cohort study at a 1,300-bed urban academic hospital with a 138-bed inpatient cancer center. We included consecutive admissions to the oncology wards between January 1, 2014, and June 30, 2017. We defined clinical deterioration as the composite of ward death and transfer to the ICU. RESULTS We evaluated 21,219 admissions from 9,058 patients. The composite outcome occurred during 1,945 admissions (9.2%): 1,365 (6.4%) had at least one ICU transfer, and 580 (2.7%) involved ward death. Logistic regression identified several independent risk factors for clinical deterioration, including the following: age (odds ratio , 1.33 per decade; 95% CI, 1.07 to 1.67), male sex (OR, 1.15; 95% CI, 1.05 to 1.33), comorbidities, illness severity (OR, 1.11; 95% CI, 1.10 to 1.13), emergency admission (OR, 1.45; 95% CI, 1.26 to 1.67), hospitalization on particular wards (OR, 1.525; 95% CI, 1.326 to 1.67), bacteremia (OR, 1.24; 95% CI, 1.01 to 1.52), fungemia (OR, 3.76; 95% CI, 1.90 to 7.41), tumor lysis syndrome (OR, 3.01; 95% CI, 2.41 to 3.76), and receipt of antimicrobials (OR, 2.04; 95% CI, 1.72 to 2.42) and transfusions (OR, 1.65; 95% CI, 1.42 to 1.92). CONCLUSION Clinical deterioration was common; it occurred in more than 9% of admissions. Factors independently associated with deterioration included comorbidities, admission source, infections, and blood product transfusion. |
Experimental study for improving behavior of castellated steel beam using steel rings Castellated beams are made from a hot rolled steel I-section in a few steps. Firstly, the web of the parent I-section is cut in a particular zigzag pattern and the two halves are reconnected by welding to form a castellated beam with hexagonal openings. In other cases, a spacer plate is placed between the two halves to produce octagonal openings, which increase the section depth. Increasing the depth by adding spacer plates leads to web-post buckling. This study focuses on improving the castellated beam to obtain high strength with relatively low cost by placing a steel ring inside the octagonal openings to strengthen the weakest part, which is its web. The results show that the steel ring is effective in strengthening the web-post. |
SCIENTIFIC DISCUSSION 1 Introduction The pathophysiology of Type 2 diabetes mellitus (T2DM) is characterised by deficient insulin activity arising from decreased insulin secretion secondary to beta cell failure, and/or compromised insulin action in peripheral target tissues (insulin resistance). Thi The pathophysiology of Type 2 diabetes mellitus (T2DM) is characterised by deficient insulin activity arising from decreased insulin secretion secondary to beta cell failure, and/or compromised insulin action in peripheral target tissues (insulin resistance). This abnormal metabolic state is exacerbated by excess hepatic glucose production and altered metabolism of proteins and lipids, which along with hyperglycaemia, contribute to microvascular and macrovascular complications. T2DM accounts for approximately 85% to 95% of diabetes cases in developed regions like the European Union. Age and weight are established risk factors for T2DM. The majority of patients with T2DM are overweight or obese. Diet modification and exercise is the first line of treatment for T2DM. Pharmacologic intervention with one oral antidiabetic drug (OAD) is usually the next step in treatment. After 3 to 9 years of OAD monotherapy, patients typically require an additional intervention. The recommended first line treatment is metformin, which restrains hepatic glucose production and decreases peripheral insulin resistance. Sulphonylureas, which are insulin secretagogues, may be used as an alternative to patients intolerant to metformin, or as an addition to metformin. Other second line oral treatment alternatives include alpha-glucosidase inhibitors, meglitinides and thiazolidinediones. Although being efficient in attenuating hyperglycaemia, all of these treatment alternatives have more or less serious side effects and there is a need for development of efficient drugs without metabolic or other side effects. |
import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs';
import { mergeMap, tap } from 'rxjs/operators';
import { Customer } from '../model/customer.model';
import { Contract } from '../model/contract.model';
@Injectable({
providedIn: 'root'
})
export class CustomerService {
constructor(private http: HttpClient) { }
getCustomers(): Observable<Customer[]> {
return this.http.get<Contract[]>('/api/contracts').pipe(
mergeMap(contracts => this.http.get<Customer[]>('/api/customers').pipe(
tap(customers => customers.forEach(c => c.contracts = contracts))
))
);
}
getCustomer(id: string): Observable<Customer> {
return this.http.get<Customer>('/api/customer');
}
}
|
<gh_stars>0
import { promises as dns } from 'dns'
import { timeout } from 'extra-promise'
export { TimeoutError } from 'extra-promise'
/**
* @throws {NodeJS.ErrnoException}
* @throws {TimeoutError}
*/
export async function resolveA(
resolver: dns.Resolver
, hostname: string
, timeoutMsecs?: number
): Promise<string[]> {
if (timeoutMsecs) {
return await Promise.race([
resolver.resolve4(hostname)
, timeout(timeoutMsecs)
])
} else {
return await resolver.resolve4(hostname)
}
}
|
package br.com.colbert.base.ui;
import java.awt.*;
import br.com.colbert.consolidador.infraestrutura.swing.SwingUtils;
/**
* Um tipo de {@link View} que é representada em uma janela que pode ser, dentre outras, aberta e fechada.
*
* @author <NAME>
* @since 02/02/2015
*/
public interface WindowView extends View {
/**
* Torna a janela visível.
*/
default void show() {
SwingUtils.invokeLater(() -> getContainer().setVisible(true));
}
/**
* Fecha a janela.
*/
default void close() {
SwingUtils.invokeLater(() -> getContainer().setVisible(false));
}
/**
* Verifica se a janela está visível.
*
* @return <code>true</code>/<code>false</code>
*/
default boolean isVisible() {
return getContainer().isVisible();
}
/**
* Altera o título da janela.
*
* @param titulo
* o novo título
*/
default void setTitulo(String titulo) {
Container container = getContainer();
if (container instanceof Frame) {
((Frame) container).setTitle(titulo);
} else if (container instanceof Dialog) {
((Dialog) container).setTitle(titulo);
}
}
}
|
In many fields there is a need to join tubes together at right angles to their longitudinal axes--such joints are known as saddle joints and, by way of example, one is shown schematically in FIG. 1. In order to join a first tube end on to the side of a second tube in this way it is necessary to cut the end of the first tube so that it matches the exterior shape of the second tube allowing it to be joined to the second tube in end-to-edge abutment. In even the simplest cylindrical case, involving circular-cylindrical tubes, where a first tube joins a second tube at a right angle it is a relatively complex operation to determine exactly the shape that the end of the first tube needs to be cut to in order to fit the second tube.
The conventional method for reproducing the necessary shape of the end of the first pipe in order for it to fit to the second pipe involves the use of relatively complex Euclidean geometry calculations to arrive at a curve which can be drawn on a piece of paper. The paper is wrapped around the tube and used as a guide-line along which a cut is made.
A skilled and experienced technician is generally required to carry out these calculations and the process is a protracted one.
Furthermore, in such a lengthy and complex process there is always a significant risk that an error may be made, which can result in an expensive mistake.
Evidently there is a need for a simpler method of reproducing the shape to which a first tube must be cut when joining it in end-to-edge abutment to a second tube. |
n,m=input().split(" ")
n=int(n)
m=int(m)
c=[]
d=0
e=[]
for i in range(n):
c.append([])
a=input().split(" ")
for j in range(int(a[0])+1):
c[i].append(int(a[j]))
for i in range(n):
d=0
for j in range(1,len(c[i])):
if c[i][j]<m:
d+=1
break
if d==1:
e.append(i+1)
print(len(e))
for i in range(len(e)):
print(e[i],end=' ') |
. The aim of the study was to assess the reliability of the indicators of dose and effect in the health monitoring of asbestos exposed workers. In 49 cases out of 158 studied workers (31%) asbestos-related diseases were diagnosed following ATS criteria. Using nonparametric statistical methods (permutation tests) 6 variables were analyzed with respect to asbestos-related diseases and working sectors, demonstrating a difference in the concentration of amphiboles (p < 0.01), greater in patients with asbestosis and workers involved in asbestos removal from railway carriages. There was not a correlation between mesothelin and amphiboles, chrysotile and total fibers concentrations (Spearman test). |
The independent living centre. The Association for the Physically Disabled Western Cape (APD-WC) and its 20 registered branches, as private registered welfare organisations, render a variety of services to persons with disabilities in the Western Cape Province. The Association operates as a Provincial Association of the National Council for Persons with Physical Disabilities in S A., a member of South African Federal Council on Disability. |
n=list(map(int,input()))
keta=len(n)
satsu=0
for i in range(keta):
tmp = n.pop()
if i == keta-1:
n.append(0)
if tmp == 10:
tmp=0
n[-1]=n[-1]+1
if tmp > 5 or (tmp==5 and n[-1] >= 5):
satsu += 10 - tmp
n[-1] = n[-1] + 1
else:
satsu += tmp
print(satsu+n[0])
|
<gh_stars>0
from .losses import DistillationLoss
from .imnet_module import ImagenetDataModule
from .deit_module import DeiT
|
This will be the first new station added to the Western Line in nearly 50 years.The city’s western suburban railway line will get a new station, its first in nearly fifty years, by the end of June this year.The BMC has promised to complete work on a flyover connecting NSC exhibition centre in Goregaon on the western side to SV Road in the east by April end, allowing the Western Railway to throw open the longdelayed Oshiwara railway station.The flyover will replace a level crossing used by thousands of motorists and pedestrians every day to cross over from one side of the station to the other.Since the BMC could not meet the deadline for completing its part of the flyover and the residents did not allow the level crossing to be shut in the absence of an alternative, the station’s opening has missed several deadlines and has been delayed by nearly five years.While the station has been ready for a few years now, the Western Railway on Friday told Mumbai Mirror that it will take only a month to give it the finishing touches once the flyover is ready. With the BMC promising to complete work by May, if all goes well, the station should open to commuters by June end.Oshiwara will become the 37th station on the Western Railway’s 120-km Mumbai suburban section that starts at Churchgate in south and ends at Dahanu in the north. The last new station added to the Western line was Naigaon in the 1960s.Oshiwara is one of the most densely populated neighbourhoods on the city’s western flank. Home to several industrial estates and almost the entire television industry, Oshiwara station will help decongest Jogeshwari and Goregaon stations to its south and north respectively.While the Western Railway has completed work on the part of the flyover on railway land, the construction work on the rest of the flyover on both sides was stuck because of encroachments. The issue was discussed at the railway’s 14th co-ordination committee meeting and the BMC officials confirmed that all stumbling blocks had been cleared and that the flyover would be ready in three months.While residents are not sure if the BMC will meet the new deadline it has set for itself, they are happy to learn that there is some movement on the longdelayed project. “Once the station opens, it will be the closest link to places like Lokhandwala and NSC grounds in Goregaon. The bridge too will be of immense help in decongesting traffic in the area,” Sulaiman Bhimani, a resident, said.When the flyover is ready and the level crossing is shut, the station’s platforms will be extended to the point where the level crossing is located currently. “Once the BMC completes its work, we can throw open the station in a matter of few months,” said Sharat Chandrayan, Western Railway’s chief spokesperson.Western Railway Mumbai Divisional Manager Shailendra Kumar confirmed that the issues that were delaying the completion of the flyover have been resolved.The BMC is confident it will not miss the deadline this time. “The approach to the flyover on the western side was cleared of all encroachments some time ago, but around 80 metres on the eastern side was held because of encroachments. But the issue has now been resolved out of court and work on the incomplete portion shall begin soon,” said a senior BMC official who did not wish to be identified. |
1. Preheat oven to 350 degrees. In a mixing bowl, whisk together flour, oats, salt and baking soda for 30 seconds. Add in 1/2 cup granulated sugar and 1/2 cup brown sugar and mix until no clumps remain. Combine melted butter and vanilla and add to mixture, then stir with a spoon until evenly moistened. Gently press half of the mixture into a greased 8 by 8-inch baking dish (a 9 by 9-inch baking dish would be great to, just reduce the baking time slightly as needed) and bake in preheated oven 15 minutes.
2. Meanwhile in a mixing bowl, whisk together 1/4 cup granulated sugar, 1/4 cup brown sugar, cinnamon, nutmeg, ginger, cloves and salt. Add in egg, egg yolk and vanilla and stir until blended. Mix in pumpkin then milk. Pour mixture over baked cookie portion and return to oven to bake 15 minutes, then remove from oven and sprinkle remaining cookie crumb mixture over top while breaking the crumb into small bits. Transfer oven rack closer to the top-center (not directly beneath but a few levels below) and return to oven to bake about 20 - 25 minutes longer until golden on top and center only jiggles slightly. Remove from oven and allow to cool 1 hour at room temperature, then transfer to refrigerator to cool 1 hour longer (or serve warm from the oven like a crumble with ice cream). Cut into squares and serve with sweetened whipped cream and a light dusting of cinnamon if desired. |
Equipment health management through information fusion for reliability A generic framework for estimating the reliability of equipment is through information fusion of its failure history, predictive maintenance data and domain expert's knowledge is proposed and demonstrated. The framework uses Degree of Certainty arrived at using fuzzy sets and Belief & Plausibility measures to arrive at a decision on the effectiveness of predictive maintenance. Uncertainty, randomness, imprecision and ambiguity/conflicts inherent in the information/data are logically synthesized. The information/data is grouped into information blocks and the framework connects these blocks, synthesizing the information to arrive at logical conclusions. The essence of the proposed framework is to provide information on missing links between quantitative data and qualitative expert domain knowledge, which are ignored while dealing with reliability metrics such as MTBF, MTTR etc. This framework is demonstrated with a case study using the maintenance information/data of large industrial motors. The case study highlights the advantages of the framework used to fuse the ma intenance information/data drawn from various sources to draw inferences on the failure processes of the motors. This in turn leads to a revision of the maintenance strategy of the motors and can lead to large tangible/intangible benefits in reducing the failures. A simple desktop application program can be developed based on this framework to suit individual plant operations. |
The Influence of Energy Certification on Housing Sales Prices in the Province of Alicante (Spain) : This work examines the implementation of energy labelling by the residential real estate sector. First, it considers the interest by real estate sellers in not publishing energy certification information, and then, it quantifies the impact of the housings energy certification on the asking price. The results are compared with those obtained from other studies conducted in distinct European countries. The studys final sample was collected, including information from 52,939 multi-family homes placed on the real estate market in the province of Alicante (Spain). One-way analysis of variance (ANOVA) was used, as well as an ordinary least squares regression model. This study highlights the fact that, in the current market, owners and sellers have no incentive to reveal the energy certification, since this permits them to sell homes with low energy ratings at prices similar to those of more energy e ffi cient homes. In addition, it was found that homes with better energy ratings (letters A and B) are not sold at higher prices than homes with other rating letters, unlike the case of other European countries that were examined. The European directives establish a mandatory certification system, the so-called "ABCDEFG qualification", which rates buildings based on their energy efficiency, similar to the classification used for household appliances. In addition, these directives require the publication of an energy performance certificate (EPC), to be included in the documentation supplied by owners to purchasers or renters. These policies are an attempt to offer increased transparency and information to consumers, to assist in decision-making related to property purchase or rental. In Spain, these directives result from the enforcement of a series of decrees requiring energy certification and the presentation of an energy efficiency label in properties placed on the rental or sales market. On the other hand, the level of compliance with the directive varies depending on the specific case. In the case of real estate sales, compliance is significant, given that certification documentation is required by the notary public when formalizing a deed of sale. In the case of publishing energy In terms of energy and climate ( Figure 2), Spain's climatic diversity should be considered (CTE-HE 2013), since it has always, and continues to, significantly condition the energy characteristics of the building stock. In Spain, within the same region, it is possible to find mild climates in Mediterranean coastal areas and continental climates with more extreme temperatures, as occurring in the Alicante province. This characteristic may even be found in cases in which the separation between climate zones is barely 70 km, as occurs in the coast and the interior of the province, where mountain chains exceeding 1000 m in height are found. This climatic diversity leads to a distinct energy allocation of the buildings. Therefore, electricity is frequently used to heat and cool buildings on the Mediterranean coast, via heating pumps, whereas in the interior, it is more common to find buildings that are heated with community or individual heating systems using natural gas. As for the heating system used in the Valencian community, centralized or collective systems represent only 4.6% (10.56% on a national level), while heating in the entire housing represents only 27.4% (as compared to 46.30% across Spain). In this province, it is common to use small heating devices for single rooms (54.2%) or even, to have no heating devices at all (13.8%). (see Table A1 of the Appendix A). In terms of energy and climate ( Figure 2), Spain's climatic diversity should be considered (CTE-HE 2013), since it has always, and continues to, significantly condition the energy characteristics of the building stock. In Spain, within the same region, it is possible to find mild climates in Mediterranean coastal areas and continental climates with more extreme temperatures, as occurring in the Alicante province. This characteristic may even be found in cases in which the separation between climate zones is barely 70 km, as occurs in the coast and the interior of the province, where mountain chains exceeding 1000 m in height are found. This climatic diversity leads to a distinct energy allocation of the buildings. Therefore, electricity is frequently used to heat and cool buildings on the Mediterranean coast, via heating pumps, whereas in the interior, it is more common to find buildings that are heated with community or individual heating systems using natural gas. As for the heating system used in the Valencian community, centralized or collective systems represent only 4.6% (10.56% on a national level), while heating in the entire housing represents only 27.4% (as compared to 46.30% across Spain). In this province, it is common to use small heating devices for single rooms (54.2%) or even, to have no heating devices at all (13.8%). (see Table A1 of the Appendix A). In terms of energy and climate (Figure 2), Spain's climatic diversity should be considered (CTE-HE 2013), since it has always, and continues to, significantly condition the energy characteristics of the building stock. In Spain, within the same region, it is possible to find mild climates in Mediterranean coastal areas and continental climates with more extreme temperatures, as occurring in the Alicante province. This characteristic may even be found in cases in which the separation between climate zones is barely 70 km, as occurs in the coast and the interior of the province, where mountain chains exceeding 1000 m in height are found. This climatic diversity leads to a distinct energy allocation of the buildings. Therefore, electricity is frequently used to heat and cool buildings on the Mediterranean coast, via heating pumps, whereas in the interior, it is more common to find buildings that are heated with community or individual heating systems using natural gas. As for the heating system used in the Valencian community, centralized or collective systems represent only 4.6% (10.56% on a national level), while heating in the entire housing represents only 27.4% (as compared to 46.30% across Spain). In this province, it is common to use small heating devices for single rooms (54.2%) or even, to have no heating devices at all (13.8%). (see Table A1 of the Appendix A). The document is organized as follows: In the second section, the materials and methods are described, detailing the sources used and the database generated. The third section offers the results. The fourth section contains the discussion and finally, a summary of the conclusions obtained is presented. The document is organized as follows: In the second section, the materials and methods are described, detailing the sources used and the database generated. The third section offers the results. The fourth section contains the discussion and finally, a summary of the conclusions obtained is presented. Materials and Methods First, an analysis of variance (ANOVA) was proposed to determine whether or not differences exist in the offered prices, based on the published energy qualification. To examine the economic premiums of the housing based on energy qualification, an ordinary least squares regression model has been proposed. For this, various estimates have been made, based on the reference used in the housing's energy qualification (letter or group of letters). Hedonic regression models have been used since the "New Approach to Consumer Theory" created by Lancaster. Ridker and Henning used ordinary least squares (OLS) for the first time in the context of the housing market. Authors such as Zietz, et al. indicate that hedonic regression analysis is normally used to identify the marginal effect of a set of characteristics on the housing price. For the case of heterogeneous goods such as housing, hedonic methodology permits the estimation for the contribution of each characteristic on the price. Currently, this methodology is the most commonly used to determine the economic premium generated by distinct characteristics. In Table A2 of the Appendix A, the variables that are the most commonly used by other authors to determine housing price are shown. Population and Sample The database consists of multi-family housing placed on the market in the province of Alicante (Valencian Community, Spain) see Figure 3. The interest and selection criteria were based on the significant activity of the construction sector in this area, which is the third province in the country in terms of having the largest number of property transactions (purchases), after Barcelona and Madrid. On the other hand, it is fourth in terms of number of unsubsidized housing sales initiated in 2017. Materials and Methods First, an analysis of variance (ANOVA) was proposed to determine whether or not differences exist in the offered prices, based on the published energy qualification. To examine the economic premiums of the housing based on energy qualification, an ordinary least squares regression model has been proposed. For this, various estimates have been made, based on the reference used in the housing's energy qualification (letter or group of letters). Hedonic regression models have been used since the "New Approach to Consumer Theory" created by Lancaster. Ridker and Henning used ordinary least squares (OLS) for the first time in the context of the housing market. Authors such as Zietz, et al. indicate that hedonic regression analysis is normally used to identify the marginal effect of a set of characteristics on the housing price. For the case of heterogeneous goods such as housing, hedonic methodology permits the estimation for the contribution of each characteristic on the price. Currently, this methodology is the most commonly used to determine the economic premium generated by distinct characteristics. In Table A2 of the Appendix A, the variables that are the most commonly used by other authors to determine housing price are shown. Population and Sample The database consists of multi-family housing placed on the market in the province of Alicante (Valencian Community, Spain) see Figure 3. The interest and selection criteria were based on the significant activity of the construction sector in this area, which is the third province in the country in terms of having the largest number of property transactions (purchases), after Barcelona and Madrid. On the other hand, it is fourth in terms of number of unsubsidized housing sales initiated in 2017. The study sample consists of housing properties that were placed on the market via the idealista.com real estate portal between June 2017 and May 2018. During this period, information was collected on 97,279 properties placed on the market, extracting data on the characteristics of the housing and buildings. Subsequently, via GIS, information was provided on the location, neighborhood and market, obtained from other information sources. The final database was subject to a univariate analysis of outliers, discarding properties that differed by more or less than three The study sample consists of housing properties that were placed on the market via the idealista.com real estate portal between June 2017 and May 2018. During this period, information was collected on 97,279 properties placed on the market, extracting data on the characteristics of the housing and buildings. Subsequently, via GIS, information was provided on the location, neighborhood and market, obtained from other information sources. The final database was subject to a univariate analysis of outliers, discarding properties that differed by more or less than three standard deviations in their Appl. Sci. 2020, 10, 7129 5 of 30 respective variables (Z scores). This process was performed on the following variables: natural log of the property price, age, height in stories, constructed surface area, number of bedrooms and bathrooms. To identify the multivariate atypical cases, the regression model was calibrated by calculating the Mahalanobis distance (MD) and its statistical significance, discarding any files in which the significance was less than 0.001, in accordance with Hair, et al.. Finally, those properties having missing data on any of the variables that were subject of the analysis were discarded, obtaining a final sample of 52,939 observations, of which, 9194 included information on energy qualification. The sample's representativeness was verified via the Equation, designed for large or infinite populations when the exact size of the units making it up is unknown. where: z /2 is the Z score corresponding to the selected level of confidence, p is the probability that event p takes place (when not having sufficient information the least favorable value is assigned, p = 0.50), and E is the maximum admissible error or the maximum error that is committed in the sample. Using a 95% confidence level (z /2 = 1.96), a probability of p = 0.50 and samples sizes of n = 52,939 and n = 9194, clearing E, a maximum estimated error of 0.4% (0.004) and 1.0% (0.010) were obtained, respectively, ensuring the high statistical precision of the sample. Sources of Information The main source of information is the real estate portal idealista.com, which publishes the asking prices along with the characteristics of the housing and the building in which it is located. Other studies have also considered real estate portals with the same objective, given the lack of official information available, with the real estate asking prices being a suitable substitute for the transaction prices. In Figure 4, the distribution of energy certificates in the Alicante province is shown, as well as details on the province's two largest cities (Alicante and Elche). Appl. Sci. 2020, 10, x FOR PEER REVIEW 5 of 34 standard deviations in their respective variables (Z scores). This process was performed on the following variables: natural log of the property price, age, height in stories, constructed surface area, number of bedrooms and bathrooms. To identify the multivariate atypical cases, the regression model was calibrated by calculating the Mahalanobis distance (MD) and its statistical significance, discarding any files in which the significance was less than 0.001, in accordance with Hair, et al.. Finally, those properties having missing data on any of the variables that were subject of the analysis were discarded, obtaining a final sample of 52,939 observations, of which, 9194 included information on energy qualification. The sample's representativeness was verified via the Equation, designed for large or infinite populations when the exact size of the units making it up is unknown. where: / is the Z score corresponding to the selected level of confidence, p is the probability that event p takes place (when not having sufficient information the least favorable value is assigned, p = 0.50), and E is the maximum admissible error or the maximum error that is committed in the sample. Using a 95% confidence level ( / = 1.96), a probability of p = 0.50 and samples sizes of n = 52,939 and n = 9194, clearing E, a maximum estimated error of 0.4% (0.004) and 1.0% (0.010) were obtained, respectively, ensuring the high statistical precision of the sample. Sources of Information The main source of information is the real estate portal idealista.com, which publishes the asking prices along with the characteristics of the housing and the building in which it is located. Other studies have also considered real estate portals with the same objective, given the lack of official information available, with the real estate asking prices being a suitable substitute for the transaction prices. In Figure 4, the distribution of energy certificates in the Alicante province is shown, as well as details on the province's two largest cities (Alicante and Elche). Based on the alphanumeric and vectorial information from the DGC, a raster map was created to estimate the age of the housing (Figure 5a) and the ratio of the constructed surface area in the proximity of each building (150 m around the same). With population census and INE housing data and IGN mapping, the type of occupation was collected for each census tract (vacant, main and secondary), as well as the type of tenancy regime (rented, mortgaged and owned) and the population's sociodemographic characteristics (dependency, ageing, foreign population and education level). With the information from the CECD and the DGOEAPCS, distances between the housing and the public services or the following points of interest were calculated: hospitals, health centers, pharmacies, schools (Figure 5b), as well as proximity to the coast. Distances have been calculated by network, that is, based on the length of origin and destination using a layout of pre-established streets and intersections, simulating the reality of the urban network. The CTE-DB-HE is used to determine the climatic zone of the town where the property is located (climatic severity of summer and winter seasons). Appl. Sci. 2020, 10, x FOR PEER REVIEW 6 of 34 the Health Department (DGOEAPCS) and the Basic Document on Energy Saving from the Technical Code for Buildings (CTE-DB-HE). Based on the alphanumeric and vectorial information from the DGC, a raster map was created to estimate the age of the housing (Figure 5a) and the ratio of the constructed surface area in the proximity of each building (150 m around the same). With population census and INE housing data and IGN mapping, the type of occupation was collected for each census tract (vacant, main and secondary), as well as the type of tenancy regime (rented, mortgaged and owned) and the population's sociodemographic characteristics (dependency, ageing, foreign population and education level). With the information from the CECD and the DGOEAPCS, distances between the housing and the public services or the following points of interest were calculated: hospitals, health centers, pharmacies, schools (Figure 5b), as well as proximity to the coast. Distances have been calculated by network, that is, based on the length of origin and destination using a layout of preestablished streets and intersections, simulating the reality of the urban network. The CTE-DB-HE is used to determine the climatic zone of the town where the property is located (climatic severity of summer and winter seasons). Data Variables were selected based on a literature review (see Table A2 of the Appendix A). Based on the information received, 63 variables were obtained, as summarized in Table 1. The variables are ordered based on five categories: Housing characteristics (A), Building characteristics (B), Location characteristics (C), Neighborhood characteristics (D), and Market characteristics (E). The unit with which each variable has been measured is also indicated, as well as a brief description of the same and verification as to whether or not it was used in model estimation. Data Variables were selected based on a literature review (see Table A2 of the Appendix A). Based on the information received, 63 variables were obtained, as summarized in Table 1. The variables are ordered based on five categories: Housing characteristics (A), Building characteristics (B), Location characteristics (C), Neighborhood characteristics (D), and Market characteristics (E). The unit with which each variable has been measured is also indicated, as well as a brief description of the same and verification as to whether or not it was used in model estimation. Descriptive Statistics The descriptive statistics of the variables are shown in Table 2. Methodology The analysis of variance allows for the contrasting of the null hypothesis that the means of K populations (K > 2) are equal, with the alternative hypothesis that at least one of the populations differs from the others in terms of its expected value (Equation ). The one-way analysis of variance consists of three parts. The first part of the analysis permits contrasting of the null hypothesis of equality of means in the groups through the F statistic. The second contrasts the equality of the variances of the dependent variable in the groups using Levene's test. The third and final part of the analysis determines which of the distinct levels of the factor differ from the others, based on different post hoc tests. The regression model is estimated using ordinary least squares (OLS), and its specification is semilogarithmic, based on the following expression: where ln (P i ) is the natural logarithm of the advertised asking price for housing "i"; is the fixed component, it does not depend on the market; j is the parameter to estimate related to the characteristic "j"; X ij is the continuous variable that collects the characteristic "j" of the observation "i"; k is the parameter to estimate related to the characteristic "k"; D ik is the dummy variable that collects the characteristic "k" of the observation "i"; and i is the error term in the observation "i". The semilogarithmic functional form was selected, since according to, this form offers certain advantages. First, it facilitates the interpretation of the coefficients. That is, for each increase in unit of the explanatory variable (X j and D k ), the dependent variable (P)-in this case, the asking price-varies on average (100). And second, it minimizes the problem of heteroscedasticity, improving the goodness of fit of the estimates. The model is estimated on distinct occasions, based on the energy qualification characteristic ( Table 3), such that the results obtained may be compared with other studies. For this analysis, the SPSS statistics package for Windows, version 24 was used, based on the method of "excluding cases listwise". This leads to the elimination of observations with missing data. One-Way Analysis of Variance (ANOVA) In the database created, from a sample of 52,939 homes, only 9194 published their energy qualifications (17.4%), despite the fact that Royal Decree 235/2013 requires the publication of energy rating of homes that are being sold or rented. This low percentage leads us to believe that the failure to publish an energy qualification may have some sort of advantage for real estate sellers. In order to examine this supposition, a statistical test was created for a one-way analysis of variance (ANOVA), graphically revealing the data in Figure 6. By evaluating the homogeneity of the variance of each group using Levene's test (F = 68.8, p = 0.000), the variance of the groups is found to differ. This result supports the use of robust tests of equality of means, specifically, those by Welch (F = 314.2, p = 0.000) and Brown-Forsythe (F = 237.6; p = 0.000), which confirm that the mean asking prices differ between energy qualification letters. One-Way Analysis of Variance (ANOVA) In the database created, from a sample of 52,939 homes, only 9194 published their energy qualifications (17.4%), despite the fact that Royal Decree 235/2013 requires the publication of energy rating of homes that are being sold or rented. This low percentage leads us to believe that the failure to publish an energy qualification may have some sort of advantage for real estate sellers. In order to examine this supposition, a statistical test was created for a one-way analysis of variance (ANOVA), graphically revealing the data in Figure 6. By evaluating the homogeneity of the variance of each group using Levene's test (F = 68.8, p = 0.000), the variance of the groups is found to differ. This result supports the use of robust tests of equality of means, specifically, those by Welch (F = 314.2, p = 0.000) and Brown-Forsythe (F = 237.6; p = 0.000), which confirm that the mean asking prices differ between energy qualification letters. To identify the relationships between the groups, a "post hoc" test was performed with Scheff's method and a classification of the groupings (or homogenous subgroupings) based on the value of the means. For each subset, a test was carried out on the equality of means hypothesis, with To identify the relationships between the groups, a "post hoc" test was performed with Scheff's method and a classification of the groupings (or homogenous subgroupings) based on the value of the means. For each subset, a test was carried out on the equality of means hypothesis, with significances of over 0.05 (no difference in means), in accordance with that observed in Table 4. Subset 1 is formed by housing with letters G and F, whose means do not differ significantly (p = 0.055). Subset 2 includes housing with letters F and E, whose means do not differ significantly (p = 0.123). Subset 3 is made up of homes with letters NT, A, D and B, whose means do not differ significantly (p = 0.053), and subset 4 consists of only those homes with letter C, which, obviously, do not differ from themselves (p = 1.0). Regression Analysis Upon introducing the variables in the regression model, problems of self-correction were observed between some of the same. Therefore, a total of eight variables have been discarded. Three correspond to the climate area (Zone_B4, Zone_C3, and Zone_D3), one is referred to the percentage of the population with primary and secondary school educations (D_students), two are referred to the percentages of vacant and main homes (E_vacant_dw and E_main_dw), and two more are referred to the percentages of mortgaged homes and properties with homeowners (E_mortgaged_dw and E_homeownership). Figure A1 in the Appendix A shows a graph with the most relevant correlations. In order to determine if the estimates achieved suitable quality criteria, the following were examined: the normality of the population, the lack of problems of specification in the estimates (no multicollinearity, heteroscedasticity or autocorrelation), the statistical significance of the estimates, and finally, that the proportion of the estimated variance was high (R 2 ). The normality of the population is verified through a histogram (Figure 7a,d) and a graph of normality of the residuals (Figure 7b,e), revealing that the sample has a normal distribution. The multicollinearity was verified via the VIF statistic (Variance Inflation Factor), with various authors suggesting that there are collinearity problems if any VIF exceeds 10. In the new estimations made, the majority of the VIF values are between 1 and 4.6, therefore it is considered that there are no problems arising from multicollinearity. The heteroscedasticity was analyzed with a residual dispersion plot (Figure 7c,f) and there was no evidence of serious problems of heteroscedasticity, given the random distribution of the residuals. The existence of autocorrelation was verified using the Durbin-Watson statistic, obtaining values close to two in all of the estimations, which suggests the absence of autocorrelation in the residuals. The significance of each estimation is measured with Snedecor's F-test, being found to be statistically significant. The coefficient of determination (adjusted R 2 ) of the estimates is indicated in Table 5 and all of these have an explanatory power approaching 71%. In summary, the estimations have a sufficient level of robustness and significance, making them acceptable for purposes of inference making. To control the fixed effects due to the spatial location of the data, the comarcas location variables are used. A positive spatial autocorrelation is detected with the Moran's I test (residuals from estimation 3, I = 0.274, z = 99.89, p < 0.001; inverse distance squared, bandwidth 500 m), a common result in global regression models. obtaining values close to two in all of the estimations, which suggests the absence of autocorrelation in the residuals. The significance of each estimation is measured with Snedecor's F-test, being found to be statistically significant. The coefficient of determination (adjusted R 2 ) of the estimates is indicated in Table 5 and all of these have an explanatory power approaching 71%. In summary, the estimations have a sufficient level of robustness and significance, making them acceptable for purposes of inference making. To control the fixed effects due to the spatial location of the data, the comarcas location variables are used. A positive spatial autocorrelation is detected with the Moran's I test (residuals from estimation 3, I = 0.274, z = 99.89, p < 0.001; inverse distance squared, bandwidth 500 m), a common result in global regression models. Currently, studies carried out have been based on distinct scenarios and the literature has revealed a certain diversity in terms of determining the letter (or set of letters) of reference for measuring and comparing the impact of energy qualifications on housing prices (Figure 8). This circumstance hinders comparisons between the premium resulting from going from one value to another within the ABCDEFG qualification scale. To facilitate comparison between studies, some authors have recommended that letters not be grouped and that D be considered the letter of reference, since it is in the middle of the scale. Based on this background, the results of the model obtained for the estimate 4 are presented, since it is the one that complies with the recommendations of using letter D as a reference. As for the characteristics of the housing, the model estimates that for each additional year (in terms of age of the housing), the asking price will be reduced a mean of 0.29%. As for size, the estimated impact implies that an increase of one square meter in surface area results in a 0.58% price increase, whereas the addition of another bedroom leads to a reduction of 2.09%. However, an additional bathroom represents a mean price increase of 22.44%. If the property has extras, such as built-in wardrobe, air conditioning or a terrace, the mean impact on prices estimated by the model is 0.25%, 8.42% and 2.81%, respectively. The results reveal that a home situated on an additional story has a price increase of 0.02%. Using as a reference a second-hand home in good state, the model estimates a mean discount in the asking price of 9.49% for a second-hand home that needs renovation. On the other hand, if the home is a new construction, the results reveal a price increase of 20.78%. Within the typology of homes and using apartments as the reference, a duplex or attic apartment has a price increase of 1.30% and 9.45%, respectively, whereas studio apartments have a discount of 23.56%. The values obtained in the estimation of the parameters related to the building characteristics, such as having an elevator, garage, storage space or swimming pool, imply a mean price increase of 19.48%, 10.18%, 4.88% and 9.38%, respectively. On the other hand, having a garden has a contrary effect, leading to a mean price reduction of 0.16% (not significant). As for characteristics related to location, for properties situated in neighborhoods with a higher gross development (those in which there are more homes per sector surface area), the model estimates a mean reduction in price of approximately 1.80%. As for the geographic distances, they are all statistically significant, except for the distance to pharmacies. The results reveal that for each kilometer that the housing is distanced from pharmacies or level 2 schools (secondary and high schools), the price decreases by 0.50% and 1.18%. The opposite occurs when the housing is distanced from health centers, hospitals and level 1 schools (infant and primary schools). Homes that are in coastal towns have a price increase of 16.57%. Finally, the estimated impact on prices of homes situated in the Marina Baja district have an increase of 1.81% with regard to the reference district (Alicante). As for the rest of the districts, the effect that is estimated by the model implies a reduction in asking prices, reaching discounts of between 15% and 20% in interior and southern districts of the province. As for the neighborhood characteristics, an increase of 1% was found for dependency and ageing, implying an increase in sales price of 0.16% and 0.01%, respectively. As for the percentage of foreigners or the percentage of individuals with university studies, an increase of 1% for these variables implies a 0.12% and 0.85% price increase, respectively. On the other hand, with a 1% increase in the percentage of the population without an education, there is a price reduction of 0.54%. Based on this background, the results of the model obtained for the estimate 4 are presented, since it is the one that complies with the recommendations of using letter D as a reference. As for the characteristics of the housing, the model estimates that for each additional year (in terms of age of the housing), the asking price will be reduced a mean of 0.29%. As for size, the estimated impact implies that an increase of one square meter in surface area results in a 0.58% price increase, whereas the addition of another bedroom leads to a reduction of 2.09%. However, an additional bathroom represents a mean price increase of 22.44%. If the property has extras, such as built-in wardrobe, air conditioning or a terrace, the mean impact on prices estimated by the model is 0.25%, 8.42% and 2.81%, respectively. The results reveal that a home situated on an additional story has a price increase of 0.02%. Using as a reference a second-hand home in good state, the model estimates a mean discount in the asking price of 9.49% for a second-hand home that needs renovation. On the other hand, if the home is a new construction, the results reveal a price increase of 20.78%. Within the typology of homes and using apartments as the reference, a duplex or attic apartment has a price increase of 1.30% and 9.45%, respectively, whereas studio apartments have a discount of 23.56%. The values obtained in the estimation of the parameters related to the building characteristics, such as having an elevator, garage, storage space or swimming pool, imply a mean price increase of 19.48%, 10.18%, 4.88% and 9.38%, respectively. On the other hand, having a garden has a contrary effect, leading to a mean price reduction of 0.16% (not significant). As for characteristics related to location, for properties situated in neighborhoods with a higher gross development (those in which there are more homes per sector surface area), the model estimates a mean reduction in price of approximately 1.80%. As for the geographic distances, they are all statistically significant, except for the distance to pharmacies. The results reveal that for each kilometer that the housing is distanced from pharmacies or level 2 schools (secondary and high schools), the price decreases by 0.50% and 1.18%. The opposite occurs when the housing is distanced from health centers, hospitals and level 1 schools (infant and primary schools). Homes that are in coastal towns have a price increase of 16.57%. Finally, the estimated impact on prices of homes situated in the Marina Baja district have an increase of 1.81% with regard to the reference district (Alicante). As for the rest of the districts, the effect that is estimated by the model implies a reduction in asking prices, reaching discounts of between 15% and 20% in interior and southern districts of the province. As for the neighborhood characteristics, an increase of 1% was found for dependency and ageing, implying an increase in sales price of 0.16% and 0.01%, respectively. As for the percentage of foreigners or the percentage of individuals with university studies, an increase of 1% for these variables implies a 0.12% and 0.85% price increase, respectively. On the other hand, with a 1% increase in the percentage of the population without an education, there is a price reduction of 0.54%. As for market characteristics, the model estimations reveal increases in prices in areas having a higher percentage of homes in rent and secondary homes, at 0.32% and 0.21%, respectively. The sale of homes indicates that when the properties are sold by professionals or banks, the price is reduced by a mean of 0.63% and 0.62%, respectively, with these values not being statistically significant. As for the characteristics having a greater impact on asking prices, the five variables from the estimates having the greatest explanatory power, according to the standardized beta coefficients (not included due to problems of extension) are: (A) housing characteristics-constructed surface area and number of bathrooms; (B) building characteristics-having an elevator; and (C) location characteristics-percentage of individuals with university studies and being situated in a coastal town. As for the energy qualification, the results for the entire sample are summarized in Figure 9, where it is observed that the housing with any qualification type (ABCDEFG grouping) and homes with high energy qualifications (letter A) had lower prices, respectively, 3.22% and 0.30% lower. Estimate 3 reveals that housing with high qualifications (letters A and B) do not have better economic premiums than other homes with lower qualifications or those that have not published their qualifications. This suggests that by not publishing the energy qualification, sellers may ask for higher prices than those asked for other homes with lower qualifications (E, F or G). Appl. Sci. 2020, 10, x FOR PEER REVIEW 21 of 34 As for the characteristics having a greater impact on asking prices, the five variables from the estimates having the greatest explanatory power, according to the standardized beta coefficients (not included due to problems of extension) are: (A) housing characteristics-constructed surface area and number of bathrooms; (B) building characteristics-having an elevator; and (C) location characteristics-percentage of individuals with university studies and being situated in a coastal town. As for the energy qualification, the results for the entire sample are summarized in Figure 9, where it is observed that the housing with any qualification type (ABCDEFG grouping) and homes with high energy qualifications (letter A) had lower prices, respectively, 3.22% and 0.30% lower. Estimate 3 reveals that housing with high qualifications (letters A and B) do not have better economic premiums than other homes with lower qualifications or those that have not published their qualifications. This suggests that by not publishing the energy qualification, sellers may ask for higher prices than those asked for other homes with lower qualifications (E, F or G). Estimations 3 to 9 are carried out with the sample of homes that published their energy qualifications (Figures 9 and 10). If letter D is used as the reference (estimations 4, 5 and 6), it is observed that letters A, B (and the AB grouping) do not have better premiums than those of letters C and D. In the case of homes qualified as E, F or G, they have very similar negative premiums, a decrease of approximately 8%. If adopting the letter G as a reference (estimations 7, 8 and 9), the positive price premiums for letters C and D are of special note, as well as the similarity of prices for Estimations 3 to 9 are carried out with the sample of homes that published their energy qualifications (Figures 9 and 10). If letter D is used as the reference (estimations 4, 5 and 6), it is observed that letters A, B (and the AB grouping) do not have better premiums than those of letters C and D. In the case of homes qualified as E, F or G, they have very similar negative premiums, a decrease of approximately 8%. If adopting the letter G as a reference (estimations 7, 8 and 9), the positive price premiums for letters C and D are of special note, as well as the similarity of prices for the lower qualifications (E, F and G). Discussion The results of the one-way analysis of variance (ANOVA) support the first hypothesis (H 1 ), since it reveals that the mean of the asking prices for the properties that do not publish their energy qualification (NT) are similar to those of homes with higher ratings, letters A, B or D (subset 3). Therefore, the sellers of these properties have no interest in publishing the qualification letter. These results are in line with other works. It is very likely that homes hiding the energy qualification have letters E, F or G, since this segment represents 86% of the labelled building stock of the autonomous community. The results of the regression model obtained from estimations 1, 2 and 3 are contradictory to the second hypothesis (H 2 ) proposed in this document. Homes with an energy qualification-ABCDEFG grouping (estimation 1)-or a high qualification-letter A (estimation 2)-have a discount in price of 3.22% or 0.30%, respectively, with respect to the homes that do not publish their energy qualification (NT). In addition, if comparing estimations 1 and 2 with the results obtained by Cespedes-Lopez, et al., it is observed that having an energy qualification, as compared to not having one, does not have a positive effect on the asking price, as it does in Europe, in general (2.32%). In estimation 3, it is observed that homes that do not publish their energy qualifications have higher prices than those qualified with letters B, E, F and G. This estimation may be compared with (see Table 6), where a positive impact was found on prices for the high qualifications (A and B), and a decrease of between 0.8 and 1.6% for the poorer qualified (D, E, F and G). Note: * indicates that the coefficient is statistically significant. Results of the regression model for estimations 4 to 9 ( Figure 10) are contrary to the third hypothesis (H 3 ). For estimations 4, 5 and 6 (ref. D) a negative sign was anticipated for letters below the reference letter, and a positive impact for letters above the reference letters (Figure 10a). Estimations 7, 8 and 9 (ref. G) anticipated that the premium of letter A would be positive and the sign of the subsequent letters would also be positive and with a decreasing impact on the prices until reaching the reference letter ( Figure 10e). However, upon comparing the expected and obtained results, it can be seen that they do not comply with the initially proposed hypothesis (H 3 ), since homes with better qualifications do not have better premiums than those with poorer qualifications. For housing with high qualifications-letters A, B and the AB and ABC groupings-a discount was obtained with respect to the reference housing -letter D-(estimations 4, 5 and 6). For estimations 7, 8 and 9, it is seen that housing qualified as C and D are the best valued of the market segment, with the highest premiums as compared to the reference G. If comparing these results with those obtained from other studies (Table 6), it may be observed that, in general, housing with higher qualifications have higher sales prices. For example, estimation 6 from this study may be compared to the results of, where high qualifications (the AB grouping) obtained a positive price premium, and for the housing with low qualifications, the premiums are negative and decrease as the qualification decreases. Conclusions This work seeks to examine the effect of energy qualifications on the asking price of housing located in the Alicante (Spain) real estate market. To do so, a database was constructed based on 52,939 observations, of which 9194 offered information on energy qualifications (17.4%). The information contained in the database has permitted the creation of 63 variables that are used to estimate the regression model. In order to compare the results of this work with those of other studies, the model has been estimated 9 times. The first objective attempts to determine if an interest exists in not publishing energy qualification information for homes being sold. Two initial hypotheses are proposed-H 1 and H 2. The one-way analysis of variance (ANOVA) reveals that the H 1 hypothesis is supported, since hiding the qualification may lead to higher asking prices. The second proposed hypothesis-H 2 -attempts to contrast whether or not energy qualification is a determining characteristic of the asking price. Estimations 1 and 2 reveal that this hypothesis is rejected, since homes with an energy qualification-ABCDEFG grouping-or those having a high qualification-letter A-as compared to those that did not publish their rating -NT-, have a negative premium of 3.2% (significant) and of 0.3% (not sig.) respectively. The second objective proposes quantifying the economic impact of energy qualification on the asking price, offering the hypothesis-H 3. Estimations 4 to 9 suggest the contrary, finding that housing qualified with letters C and D have higher premiums than housing with higher qualifications (A or B). This study reveals that, in multi-family housing sold in the Alicante (Spain) province, a positive relationship does not exist between the energy certification system and the housing's asking price. This is due to a variety of reasons. -First, real estate sellers and owners who do not publish energy qualifications offer their homes at prices that are similar to those having high qualifications. -Second, there is the lack of sanctions placed by the public administration on companies, owners and real estate portals that do not publish the energy qualifications of the housing that is for sale or rent, motivating owners to not publish the letter and generating distorted asking prices for the housing. Therefore, it is important for the administration to closely supervise compliance with regulations and assign the necessary resources to local authorities to ensure said compliance, and if needed, to impose sanctions. -Third, owners are not interested in improving energy qualifications, since, according to there is no compensation for the additional investment needed to improve this qualification. And fourth and finally, the current regulations for housing only require that these homes obtain energy qualification if they are going to sell, rent or publish. However, there is no obligation to obtain a minimum qualification, so the improved energy performance of the homes is not encouraged. -Fifth and finally, society's perception of EPC is negative, as revealed by several studies relying on surveys completed by professional real estate agents or energy certifiers. Regardless, these studies suggest that the main criteria used to select a home is price and location. Currently, both nationally and regionally, economic incentives exist in order to offer value to housing with higher qualifications and to promote renovation. On a national level, the PAREER II program was financed with 204 million euros. Regionally speaking (Valencian community), there are distinct plans such as RENHATA, which intends to offer 4.95 million euros between 2020-2021 to improve the preservation of housing, accessibility and energy rehabilitation. Given that in Spain, there are over 9.5 million buildings, it is unlikely that a country's building stock will be renewed thanks to public budgets. Therefore, it will be necessary to rely on private initiatives, based on market incentives (higher sales prices, higher rents) that encourage investments in energy renewal of buildings. In this way, not only would property owners directly benefit from these renewals, the entire population would also receive benefits. This would ensure a more sustainable and environmentally respectful building stock, helping to create cleaner cities and an improved quality of life. Table A2. Variables used by other authors for the determination of the price of housing. Own elaboration from. Table A2. Cont. Price In all studies this is the dependent variable Use of the dwelling Housing tenure Appl. Sci. 2020, 10, x FOR PEER REVIEW 28 of 34 Figure A1. Correlation between the characteristics of the properties-independent variables-and the asking prices. Only correlations greater than 0.35 (in absolute value) are shown. |
Dental restorative biomaterials induce glutathione depletion in cultured human gingival fibroblast: protective effect of N-acetyl cysteine. Eight biomaterials eluted from four different types of dental restorative biomaterials, that is, from glass-ionomer cement (GIC: Ketac-fil and Fuji II), resin-modified glass ionomer cement (RM-GIC: Fuji II LC and Photac-fil), composite (Z100 MP and Tetric-flow), and compomer (Compoglass F and F-2000), were studied for their cytotoxic properties in relation to glutathione (GSH) content in cultured human gingival fibroblasts. Z100 MP, Tetric-flow, and Compoglass F were less cytotoxic than the others, with a toxic concentration of 50% (TC 50) > 24% (of eluate), as determined by the MTT test. F-2000, Tetric-flow, and the other biomaterials were relatively more cytotoxic (TC 50 = 9-16%). With the exception of Z100 MP, all the biomaterials induced a depletion of cellular glutathione (GSH) that was variable depending upon the biomaterial eluates. The strongest GSH depletion was with F-2000, Fuji II, and Photac-fil. GSH depletion, with Compoglass and F-2000, was rapid-detectable after one h of cell treatment and complete within 3 h-whereas a longer period of incubation was required for the other biomaterials. Interestingly, the drug cytotoxic effects induced by all the biomaterials were prevented by cell treatment with the antioxidant N-acetylcysteine (NAC). This study provides evidence that the cytotoxic property of dental restorative biomaterials is associated with depletion of the glutathione level in gingival fibroblasts. While the molecular mechanisms of this phenomenon require further investigations, our data suggest that NAC may be useful in preventing the cellular damage induced by dental restorative biomaterials. |
import {
ActionReducer,
ActionReducerMap,
createFeatureSelector,
createSelector,
MetaReducer
} from '@ngrx/store';
import { environment } from '../../environments/environment';
import * as fromApp from './app.reducer';
import * as fromArticle from './article.reducer';
export interface State {
app: fromApp.State;
article: fromArticle.State;
}
export const reducers: ActionReducerMap<State> = {
app: fromApp.reducer,
article: fromArticle.reducer,
};
export const metaReducers: MetaReducer<State>[] = !environment.production ? [] : [];
/**
* Selectors
*/
export const getAppState = (state: State) => state.app;
export const getTitle = createSelector(getAppState, fromApp.getTitle);
export const getArticleEntitiyState = (state: State) => state.article;
export const getArticles = createSelector(getArticleEntitiyState, fromArticle.selectAll);
export const getLoading = createSelector(getArticleEntitiyState, fromArticle.getLoading);
export const getArticle = createSelector(getArticleEntitiyState, fromArticle.getArticle);
export const getCount = createSelector(getArticleEntitiyState, fromArticle.getCount);
|
<filename>JianMerchant/src/main/java/com/woniukeji/jianmerchant/partjob/PartJobManagerActivity.java<gh_stars>1-10
package com.woniukeji.jianmerchant.partjob;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentManager;
import android.support.v4.app.FragmentStatePagerAdapter;
import android.support.v4.view.ViewPager;
import android.view.View;
import android.widget.ImageView;
import android.widget.TextView;
import com.flyco.tablayout.CommonTabLayout;
import com.flyco.tablayout.listener.CustomTabEntity;
import com.flyco.tablayout.listener.OnTabSelectListener;
import com.woniukeji.jianmerchant.R;
import com.woniukeji.jianmerchant.base.BaseActivity;
import com.woniukeji.jianmerchant.base.FragmentText;
import com.woniukeji.jianmerchant.entity.TabEntity;
import com.woniukeji.jianmerchant.utils.ActivityManager;
import java.util.ArrayList;
import butterknife.ButterKnife;
import butterknife.InjectView;
public class PartJobManagerActivity extends BaseActivity {
@InjectView(R.id.tl_6) CommonTabLayout tl6;
@InjectView(R.id.mainPager) ViewPager mainPager;
@InjectView(R.id.img_back) ImageView imgBack;
@InjectView(R.id.tv_title) TextView tvTitle;
private ArrayList<CustomTabEntity> mTabEntities = new ArrayList<>();
private String[] mTitles = {"录取", "完成"};
private int[] mIconUnselectIds = {
R.mipmap.tab_guo_talk_unselect,
R.mipmap.tab_about_me_unselect};
private int[] mIconSelectIds = {
R.mipmap.tab_guo_talk_select,
R.mipmap.tab_about_me_select};
private ViewPagerAdapter adapter;
public int mType=1;//用于判断是录取还是完成的fragment(同一个对象实例化,需要区分)
@Override
public void setContentView() {
setContentView(R.layout.activity_part_job_manager);
ButterKnife.inject(this);
}
@Override
public void initViews() {
tvTitle.setText("管理兼职");
adapter = new ViewPagerAdapter(getSupportFragmentManager());
mainPager.setAdapter(adapter);
for (int i = 0; i < mTitles.length; i++) {
mTabEntities.add(new TabEntity(mTitles[i], mIconSelectIds[i], mIconUnselectIds[i]));
}
tl6.setTabData(mTabEntities);
tl6.setOnTabSelectListener(new OnTabSelectListener() {
@Override
public void onTabSelect(int position) {
mainPager.setCurrentItem(position);
}
@Override
public void onTabReselect(int position) {
// mainPager.setCurrentItem(position);
}
});
mainPager.addOnPageChangeListener(new ViewPager.OnPageChangeListener() {
@Override
public void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) {
}
@Override
public void onPageSelected(int position) {
tl6.setCurrentTab(position);
// if (position==2){
// tl6.hideMsg(2);
// }
}
@Override
public void onPageScrollStateChanged(int state) {
}
});
}
@Override
public void initListeners() {
imgBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
finish();
}
});
}
@Override
public void initData() {
}
@Override
public void addActivity() {
ActivityManager.getActivityManager().addActivity(this);
}
@Override
public void onClick(View view) {
}
public int getmType() {
return mType;
}
public void setmType(int mType) {
this.mType = mType;
}
private class ViewPagerAdapter extends FragmentStatePagerAdapter {
public ViewPagerAdapter(FragmentManager fm) {
super(fm);
}
@Override
public Fragment getItem(int position) {
switch (position) {
case 0:
return PartJobManagerFragment.newInstance(1); //录取
case 1:
return PartJobManagerFragment.newInstance(0); //完成
}
return new FragmentText();
}
@Override
public int getCount() {
return 2;
}
}
}
|
#include <stdio.h>
#include <threads.h>
#include <stdatomic.h>
#include "librace.h"
#define RW_LOCK_BIAS 0x00100000
#define WRITE_LOCK_CMP RW_LOCK_BIAS
/** Example implementation of linux rw lock along with 2 thread test
* driver... */
typedef union {
atomic_int lock;
} rwlock_t;
static inline int read_can_lock(rwlock_t *lock)
{
return atomic_load_explicit(__LINE__, &lock->lock, memory_order_relaxed) > 0;
}
static inline int write_can_lock(rwlock_t *lock)
{
return atomic_load_explicit(__LINE__, &lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
}
static inline void read_lock(rwlock_t *rw)
{
int priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, 1, memory_order_acquire);
while (priorvalue <= 0) {
atomic_fetch_add_explicit(__LINE__, &rw->lock, 1, memory_order_relaxed);
do {
priorvalue = atomic_load_explicit(__LINE__, &rw->lock, memory_order_relaxed);
} while (priorvalue <= 0);
priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, 1, memory_order_acquire);
}
}
static inline void write_lock(rwlock_t *rw)
{
int priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_acquire);
while (priorvalue != RW_LOCK_BIAS) {
atomic_fetch_add_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
do {
priorvalue = atomic_load_explicit(__LINE__, &rw->lock, memory_order_relaxed);
} while (priorvalue != RW_LOCK_BIAS);
priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_acquire);
}
}
static inline int read_trylock(rwlock_t *rw)
{
int priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, 1, memory_order_acquire);
if (priorvalue > 0)
return 1;
atomic_fetch_add_explicit(__LINE__, &rw->lock, 1, memory_order_relaxed);
return 0;
}
static inline int write_trylock(rwlock_t *rw)
{
int priorvalue = atomic_fetch_sub_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_acquire);
if (priorvalue == RW_LOCK_BIAS)
return 1;
atomic_fetch_add_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
return 0;
}
static inline void read_unlock(rwlock_t *rw)
{
atomic_fetch_add_explicit(__LINE__, &rw->lock, 1, memory_order_release);
}
static inline void write_unlock(rwlock_t *rw)
{
atomic_fetch_add_explicit(__LINE__, &rw->lock, RW_LOCK_BIAS, memory_order_release);
}
rwlock_t mylock;
int shareddata;
static void a(void *obj)
{
int i;
for(i = 0; i < 2; i++) {
if ((i % 2) == 0) {
read_lock(&mylock);
load_32(&shareddata);
read_unlock(&mylock);
} else {
write_lock(&mylock);
store_32(&shareddata,(unsigned int)i);
write_unlock(&mylock);
}
}
}
int user_main(int argc, char **argv)
{
thrd_t t1, t2;
atomic_init(&mylock.lock, RW_LOCK_BIAS);
thrd_create(&t1, (thrd_start_t)&a, NULL);
thrd_create(&t2, (thrd_start_t)&a, NULL);
thrd_join(t1);
thrd_join(t2);
return 0;
}
|
<filename>src/main/java/me/mrdaniel/adventuremmo/catalogtypes/tools/ToolTypeRegistryModule.java<gh_stars>10-100
package me.mrdaniel.adventuremmo.catalogtypes.tools;
import java.util.Collection;
import java.util.Optional;
import javax.annotation.Nonnull;
import org.spongepowered.api.registry.CatalogRegistryModule;
public class ToolTypeRegistryModule implements CatalogRegistryModule<ToolType> {
@Override
public Optional<ToolType> getById(@Nonnull final String id) {
return ToolTypes.of(id);
}
@Override
public Collection<ToolType> getAll() {
return ToolTypes.VALUES;
}
} |
Pneumolysin stimulates production of tumor necrosis factor alpha and interleukin-1 beta by human mononuclear phagocytes Human peripheral blood monocytes and a human monocyte cell line were exposed to the toxin pneumolysin. Pneumolysin-exposed cells produced significantly larger amounts of tumor necrosis factor alpha and interleukin-1 beta than cells not exposed to the toxin. The viability of cells was not affected by the concentrations of pneumolysin used in the experiments. |
Reliability and validity of the structured interview for personality disorders in adolescents. The Structured Interview for the DSM-III Personality Disorders was administered to 23 currently affectively ill adolescents and their parents. Interviews were videotaped and rerated; interrater agreement was moderate (weighted K = 0.49; unweighted K = 0.59). Moreover, there was evidence of convergent validity for Cluster II traits and disorders (borderline, histrionic, narcissistic), insofar as these diagnoses were associated with higher scores on the novelty-seeking subscale of the Tridimensional Personality Questionnaire as predicted. Cluster II patients tended to have higher rates of attention deficit disorder and bipolar disorder, and higher rates of suicidal gestures among second-degree relatives. Some difficulty was encountered differentiating symptoms of affective illness from those of personality disorder and in deciding when personality traits were impairing enough to call them disorders. Reliability may be improved by: interviewing patients when out of affective episode; and using standardized functional impairment criteria for differentiating personality style from disorder. Additional work is advocated to learn if personality disorders are precursors, epiphenomena, or the consequences of affective disorder. |
#include<bits/stdc++.h>
using namespace std;
typedef long long LL;
int main()
{
LL u,v;
cin>>u>>v;
if(u==v)
{
if(u==0)printf("0\n");
else printf("1\n%lld\n",u);
}
else if(u>v)printf("-1\n");
else
{
LL t1=u%2;
LL t2=v%2;
if(t1==t2)
{
LL a,b,c;
a=u;
b=v-u;
c=b/2;
LL d=(a+c)^c;
if(d==u)
{
printf("2\n");
printf("%lld %lld\n",a+c,c);
}
else
{
printf("3\n");
printf("%lld %lld %lld\n",u,c,c);
}
}
else printf("-1\n");
}
}
|
Synthesis of Oligo--Alanine-Based Surfactant via Cobalt-Catalyzed Carbonylation and Surface Activity Study Synthesis of a novel surfactant with an oligo- -alanine hydrophilic headgroup was achieved via the carbonylative oligomerization of aziridine followed by coupling with n-octylamine in one pot. The chemical structure of the surfactant was confirmed by NMR and MALDI MS. Preliminary studies on its surface properties, including surface tension measurement and its adsorption on polystyrene latex particles, are reported. |
Five years ago Monday, when there was an explosion on the Deepwater Horizon, an oil rig in the Gulf of Mexico, the first news reports said that nearly a dozen people had been killed by the blast.
Before long, however, it was clear that the impact would continue to be felt, and by many more people. The oil spill that began that day and continued into the summer would end up being the worst such accident in U.S. history, spilling millions of gallons of crude into the fragile waterway. How it would be cleaned up remained a mystery, one that is still being answered today.
Investigators are still exploring exactly what went wrong on the Deepwater Horizon, but the catastrophe seems to have been the result of a cascading series of failures–and too little oversight. Rigs are equipped with blowout preventers, 40-ft.-high (12 m) stacks of machinery with multiple hydraulic valves that are designed to seal a well should anything go wrong. Crew members on the Horizon couldn’t activate the blowout preventer, and a deadman’s switch that should have kicked in when control of the rig was lost failed as well. One safety feature the Horizon did not have is an acoustic switch, an additional backup that can activate the blowout preventer remotely. Regulators don’t mandate them in the U.S., though they are effectively required in nations like Brazil and Norway. |
Please consider donating to the Computer Science Club to help offset the costs of bringing you our talks.
Abstract
Instant messaging (IM) is an increasingly popular mode of communication on the Internet. Although it is used for personal and private conversations, it is not at all a private medium. Not only are all of the messages unencrypted and unauthenticated, but they are all routedthrough a central server, forming a convenient interception point for an attacker. Users would benefit from being able to have truly private conversations over IM, combining the features of encryption, authentication, deniability, and forward secrecy, while working within their existing IM infrastructure. In this talk, I will discuss "Off-the-Record Messaging" (OTR), a widely used software tool for secure and private instant messaging. I will outline the properties of Useful Security and Privacy Technologies that motivated OTR's design, compare it to other IM security mechanisms, and talk about its ongoing development directions.
View
Get the Flash Player to see this video using Flash Player.
Download
Bio Ian Goldberg is an Assistant Professor of Computer Science at the University of Waterloo, where he is a founding member of the Cryptography, Security, and Privacy (CrySP) research group. He holds a Ph.D. from the University of California, Berkeley, where he discovered serious weaknesses in a number of widely deployed security systems, including those used by cellular phones and wireless networks. He also studied systems for protecting the personal privacy of Internet users, which led to his role as Chief Scientist at Zero-Knowledge Systems (now known as Radialpoint), where he commercialized his research as the Freedom Network.
Please consider donating to the Computer Science Club to help offset the costs of bringing you our talks. |
package v1
import (
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/def"
"github.com/huaweicloud/huaweicloud-sdk-go-v3/services/dis/v1/model"
"net/http"
)
func GenReqDefForCreatePoliciesV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v3/{project_id}/streams/{stream_name}/policies").
WithResponse(new(model.CreatePoliciesV3Response)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCreateStream() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v2/{project_id}/streams").
WithResponse(new(model.CreateStreamResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCreateTransferTask() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v2/{project_id}/streams/{stream_name}/transfer-tasks").
WithResponse(new(model.CreateTransferTaskResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCreateTransferTaskV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v3/{project_id}/streams/{stream_name}/transfer-tasks").
WithResponse(new(model.CreateTransferTaskV3Response)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteStream() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v2/{project_id}/streams/{stream_name}").
WithResponse(new(model.DeleteStreamResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteStreamV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v3/{project_id}/streams/{stream_name}").
WithResponse(new(model.DeleteStreamV3Response)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteTransferTask() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v2/{project_id}/streams/{stream_name}/transfer-tasks/{task_name}").
WithResponse(new(model.DeleteTransferTaskResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("TaskName").
WithJsonTag("task_name").
WithLocationType(def.Path))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteTransferTaskV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v3/{project_id}/streams/{stream_name}/transfer-tasks/{task_name}").
WithResponse(new(model.DeleteTransferTaskV3Response)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("TaskName").
WithJsonTag("task_name").
WithLocationType(def.Path))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDescribeStream() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/streams/{stream_name}").
WithResponse(new(model.DescribeStreamResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StartPartitionId").
WithJsonTag("start_partitionId").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("LimitPartitions").
WithJsonTag("limit_partitions").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForGetCursor() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/cursors").
WithResponse(new(model.GetCursorResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream-name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("PartitionId").
WithJsonTag("partition-id").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("CursorType").
WithJsonTag("cursor-type").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StartingSequenceNumber").
WithJsonTag("starting-sequence-number").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Timestamp").
WithJsonTag("timestamp").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForGetRecords() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/records").
WithResponse(new(model.GetRecordsResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("PartitionCursor").
WithJsonTag("partition-cursor").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("MaxFetchBytes").
WithJsonTag("max_fetch_bytes").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForListPoliciesV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v3/{project_id}/streams/{stream_name}/policies").
WithResponse(new(model.ListPoliciesV3Response)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForListStreams() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/streams").
WithResponse(new(model.ListStreamsResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Limit").
WithJsonTag("limit").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StartStreamName").
WithJsonTag("start_stream_name").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForListTransferTasksV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v3/{project_id}/streams/{stream_name}/transfer-tasks").
WithResponse(new(model.ListTransferTasksV3Response)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForPutRecords() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v2/{project_id}/records").
WithResponse(new(model.PutRecordsResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForUpdatePartitionCount() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPut).
WithPath("/v2/{project_id}/streams/{stream_name}").
WithResponse(new(model.UpdatePartitionCountResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForUpdateStreamV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPut).
WithPath("/v3/{project_id}/streams/{stream_name}").
WithResponse(new(model.UpdateStreamV3Response)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Path))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCreateApp() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v2/{project_id}/apps").
WithResponse(new(model.CreateAppResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCreateAppV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v3/{project_id}/apps").
WithResponse(new(model.CreateAppV3Response)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteApp() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v2/{project_id}/apps/{app_name}").
WithResponse(new(model.DeleteAppResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("AppName").
WithJsonTag("app_name").
WithLocationType(def.Path))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDescribeApp() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/apps/{app_name}").
WithResponse(new(model.DescribeAppResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("AppName").
WithJsonTag("app_name").
WithLocationType(def.Path))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForListApp() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/apps").
WithResponse(new(model.ListAppResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Limit").
WithJsonTag("limit").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StartAppName").
WithJsonTag("start_app_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForListAppV3() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v3/{project_id}/apps").
WithResponse(new(model.ListAppV3Response)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Limit").
WithJsonTag("limit").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StartAppName").
WithJsonTag("start_app_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForCommitCheckpoint() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodPost).
WithPath("/v2/{project_id}/checkpoints").
WithResponse(new(model.CommitCheckpointResponse)).
WithContentType("application/json;charset=UTF-8")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForDeleteCheckpoint() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodDelete).
WithPath("/v2/{project_id}/checkpoints").
WithResponse(new(model.DeleteCheckpointResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("AppName").
WithJsonTag("app_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("CheckpointType").
WithJsonTag("checkpoint_type").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("PartitionId").
WithJsonTag("partition_id").
WithLocationType(def.Query))
reqDefBuilder.WithResponseField(def.NewFieldDef().
WithName("Body").
WithLocationType(def.Body))
requestDef := reqDefBuilder.Build()
return requestDef
}
func GenReqDefForGetCheckpoint() *def.HttpRequestDef {
reqDefBuilder := def.NewHttpRequestDefBuilder().
WithMethod(http.MethodGet).
WithPath("/v2/{project_id}/checkpoints").
WithResponse(new(model.GetCheckpointResponse)).
WithContentType("application/json")
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("StreamName").
WithJsonTag("stream_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("PartitionId").
WithJsonTag("partition_id").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("AppName").
WithJsonTag("app_name").
WithLocationType(def.Query))
reqDefBuilder.WithRequestField(def.NewFieldDef().
WithName("CheckpointType").
WithJsonTag("checkpoint_type").
WithLocationType(def.Query))
requestDef := reqDefBuilder.Build()
return requestDef
}
|
The pressure of deep-ocean sound waves could be used to stop tsunamis in their tracks, researchers have found, by dissipating their energy across wider areas and reducing the height and speed of these monster waves before they reach land.
Tsunamis - which can be caused by earthquakes, landslides, or any sudden release of energy underwater - are capable of devastating coastal regions when they hit land, and right now, there's not much we can do to stop them.
But mathematician Usama Kadri from the University of Cardiff in the UK thinks acoustic-gravity waves (AGWs) could be the solution.
Acoustic-gravity waves occur naturally in the oceans, cutting through the water at the speed of sound, and Kadri says controlling these waves could give us a way of reducing a tsunami's momentum.
"Up until now, little attention has been paid to trying to mitigate tsunamis and the potential of acoustic-gravity waves remains largely unexplored," says Kadri.
AGWs can stretch for hundreds of kilometres, and travel many thousands of metres, and it's thought that plankton (which can't swim themselves) rely on these waves to move around and find food.
How AGWs could warn us about and stop tsunamis. Credit: Usama Kadri/Heliyon
Kadri has previously suggested that these AGWs could act as early-warning systems for tsunamis, as they often precede these massive waves.
According to Kadri, the power in these sound waves is also enough to dilute the strength of an onrushing tsunami, so most of its energy would be used up before it reaches land.
The hypothesis is based on calculations of how energy could be transferred and dispersed underwater, and draws on Kadri's previously published work on the physics of these AGWs.
Now, all we need is a way to engineer and control these sound waves - something Kadri hasn't covered in his sums.
The best way to tackle this could be to somehow harness the AGWs created naturally by tsunamis, Kadri says. Essentially, we need to figure out how to fire some the energy created by a natural disaster back in the other direction.
For the time being, Kadri's calculations are just a proof-of-concept, but if we can get the idea to work, there's the potential to save many lives, and lessen the risk of large-scale chaos.
Take the 2004 Indian Ocean earthquake and tsunami, for example, which was estimated to be responsible for the deaths of more than 200,000 people, as well as widespread damage to local communities and ecosystems.
It might be a while before we have anti-tsunami stations dotted around the coast, but this research suggests that they could be feasible, and potentially adjusted to suit each incident as well.
"One could adapt the mechanisms presented here to account for other violent geophysical processes in the ocean such as landslides, volcanic eruptions, underwater explosions, and falling meteorites," explains Kadri.
"While the scales involved may differ in each process, the underlying physical processes involved are similar."
The findings have been published in Heliyon. |
import os
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
db_url = os.getenv('DB_URL')
db_url = db_url.replace('mysql://', 'mysql+pymysql://')
engine = create_engine(db_url, pool_recycle=3600)
SessionFactory = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = SessionFactory()
try:
yield session
session.commit()
except Exception as e:
print(e)
session.rollback()
raise
finally:
session.close() |
Lycopene in tomatoes and tomato products Abstract This article focused on the content of lycopene in fresh and dried tomatoes and tomato pomace, as well as in tomato paste at different harvest times (harvest 1August and harvest 2September). The lycopene content of tomatoes and tomato products was evaluated by using high-performance liquid chromatography (HPLC). The results showed that the highest content of lycopene was estimated in the tomato paste independent of the time of harvest (211.73mg/100gdm in August and 184.29mg/100gdm in September) and the lowest content in fresh pomace (20.45 and 16.11mg/100gdm in August and September, respectively). Good sources of lycopene are tomato by-products, in particularly dried tomato pomace (25.11mg/100gdmharvest 1 and 19.30mg/100gdmharvest 2). This study showed that tomato waste can be considered as a promising source of lycopene for the production of functional foods. |
THE IMPACT OF THE DIGITAL ECONOMY ON THE DEVELOPMENT OF THE WORLD ECONOMY This article analyzes the impact of the digital economy on the development of the world economy. The introduction and development of the concept of digital economy in the economy is also shown. The article also analyzes the advantages of the digital economy and the factors influencing it, and develops proposals and conclusions for the development of the digital economy, studying the research of researchers. |
#include "Load0.h"
#include "../Data.h"
#include "../Ensemble.h"
#include "../Member.h"
VariableLoad0::VariableLoad0() : Variable("Load0") {
}
float VariableLoad0::computeCore(const Data& iData,
int iDate,
int iInit,
float iOffset,
const Location& iLocation,
const Member& iMember,
Input::Type iType) const {
// Use observed load at the 0 forecast horizon
if(iOffset == iInit) {
return iData.getValue(iDate, iInit, iOffset, iLocation, iMember, "Load", Input::typeObservation);
}
// Find nearest temperature location
std::vector<Location> nearest;
iData.getInput("T", Input::typeForecast)->getSurroundingLocations(iLocation, nearest);
std::vector<float> values;
// Hour of day: 1.. 24
int timeOfDay = (int) iOffset;
timeOfDay += 1;
timeOfDay = timeOfDay % 24 + 1;
//if(timeOfDay == 0) timeOfDay = 24;
//std::cout << "Time of day: " << timeOfDay << " " << iOffset << std::endl;
// Previous load
Ensemble ensLoad;
float prevOffset = fmod(iOffset-1,24);
float prevLoad = iData.getValue(iDate, iInit, prevOffset, iLocation, iMember, "Load0", Input::typeForecast);
// Month
int month = Global::getMonth(iDate);
// Current temperature
Ensemble ensT;
iData.getEnsemble(iDate, iInit, iOffset, nearest[0], "T", Input::typeForecast, ensT);
float T = Global::mean(ensT.getValues()) ;
// Previous temperature
Ensemble ensT0;
iData.getEnsemble(iDate, iInit, iOffset-1, nearest[0], "T", Input::typeForecast, ensT0);
float Tprev = Global::mean(ensT0.getValues());
values.push_back(timeOfDay);
values.push_back(prevLoad);
values.push_back(month);
values.push_back(T);
values.push_back(Tprev);
for(int i = 0; i < (int) values.size(); i++) {
if(!Global::isValid(values[i])) {
//std::cout << "Missing" << std::endl;
std::stringstream ss;
ss << "VariableLoad0: D" << iDate << " O" << iOffset << " L" << iLocation.getId() << " M" << iMember.getId() << " Missing ";
ss << i << " " << std::endl;
Global::logger->write(ss.str(), Logger::warning);
return Global::MV;
}
}
float load = gepModelL(values);
return load;
}
float VariableLoad0::gepModelL(const std::vector<float>& iValues) {
float G1C0 = 7.15625;
float G1C1 = 2.938508;
float G2C0 = 4.288299;
float G2C1 = 6.900544;
float G3C0 = -9.998749;
//float G3C1 = -4.421844;
float G4C0 = 9.162597;
float G4C1 = 9.376007;
float G5C0 = 2.04776;
float G5C1 = -2.143921;
long unsigned int HH = 0;
long unsigned int L1h = 1;
long unsigned int MM = 2;
long unsigned int T = 3;
long unsigned int T1h = 4;
double dblTemp = 0.0;
dblTemp = (pow(log(pow(((G1C1+G1C0) > (G1C0 > iValues[HH] ? iValues[T] : iValues[HH]) ? iValues[HH] : G1C1),3)),3)-G1C0);
dblTemp += pow((cos((sin((G2C1 < iValues[HH] ? iValues[HH] : G2C1))*(iValues[HH]+G2C0)))-iValues[HH]),2);
dblTemp += ((((G3C0-iValues[HH])+iValues[L1h])-((iValues[HH]+iValues[HH])+iValues[HH]))-iValues[T]);
dblTemp += ((G4C0*sqrt((iValues[HH]*G4C1)))*cos(pow((iValues[HH]/G4C1),3)));
dblTemp += (iValues[T1h]-pow((((G5C0 > G5C0 ? iValues[T] : iValues[HH])+pow(G5C1,3))*(G5C0 > iValues[MM] ? G5C1 : G5C0)),2));
return dblTemp;
}
|
<filename>primer/ch03/exercise/ex45.cc
#include <cstddef>
#include <iostream>
using std::size_t;
using std::cout;
using std::endl;
int main() {
int ia[3][4] = {
{ 0, 1, 2, 3 },
{ 4, 5, 6, 7 },
{ 8, 9, 10, 11 }
};
// 1. range for
cout << "ia[3][4]: " << endl;
for (const auto &row : ia) {
for (int col : row) {
cout << col << " ";
}
cout << endl;
}
cout << endl;
// 2. for loop using subscripts
cout << "ia[3][4]: " << endl;
for (size_t i = 0; i < 3; ++i) {
for (size_t j = 0; j < 4; ++j) {
cout << ia[i][j] << " ";
}
cout << endl;
}
cout << endl;
// 3. for loop using pointer
cout << "ia[3][4]: " << endl;
for (auto *row = ia; row != ia + 3; ++row) {
for (int *col = *row; col != *row + 4; ++col) {
cout << *col << " ";
}
cout << endl;
}
cout << endl;
return 0;
}
|
/*
* Copyright (c) 2013 <NAME>
* All rights reserved.
*/
package colobot.editor.opengl;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import org.lwjgl.opengl.GL11;
import org.lwjgl.opengl.GL15;
public final class VBOModel extends CompiledModel
{
private int id = 0;
public VBOModel(Model model)
{
super(model);
}
@Override
public void create()
{
if(id != 0) return;
ByteBuffer buffer = ByteBuffer.allocateDirect(model.size() * 3 * 4 * (3 + 3 + 2));
buffer.order(ByteOrder.nativeOrder());
for(Triangle triangle : model)
{
for(int i=0; i<3; i++)
{
Vertex vertex = triangle.getVertex(i);
// puts UV
buffer.putFloat(vertex.getU());
buffer.putFloat(vertex.getV());
// puts normal
buffer.putFloat(vertex.getNormalX());
buffer.putFloat(vertex.getNormalY());
buffer.putFloat(vertex.getNormalZ());
// puts position
buffer.putFloat(vertex.getX());
buffer.putFloat(vertex.getY());
buffer.putFloat(vertex.getZ());
// */
}
}
buffer.flip();
id = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, id);
GL15.glBufferData(GL15.GL_ARRAY_BUFFER, buffer, GL15.GL_STATIC_DRAW);
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
}
@Override
public void destroy()
{
if(id == 0) return;
GL15.glDeleteBuffers(id);
id = 0;
}
public void render()
{
if(id == 0) return;
GL11.glBindTexture(GL11.GL_TEXTURE_2D, model.getTexture().getID());
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, id);
GL11.glInterleavedArrays(GL11.GL_T2F_N3F_V3F, 0, 0);
GL11.glDrawArrays(GL11.GL_TRIANGLES, 0, 3 * model.size());
GL11.glBindTexture(GL11.GL_TEXTURE_2D, 0);
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
}
}
|
Democratic officials are second-guessing the party’s obsession with attacking the Koch brothers, saying it bears some of the blame for last year’s devastating election losses as the focus on the conservative billionaires diluted a party message already struggling for clarity.
Doubts about the relentless attacks on the Koch brothers surfaced as the Democratic National Committee held its annual meeting Thursday in Washington, where state party officials from across the country mulled what went wrong in 2014.
Led by then-Senate Majority Leader Harry Reid, Democrats repeatedly shifted attention during the 2014 election cycle to Charles G. Koch and David H. Koch, who spent more than $100 million supporting conservative candidates through their various political organizations, most notably Americans for Prosperity and Freedom Partners Action Fund.
Mr. Reid, Nevada Democrat, accused them of “trying to buy America” and cited the brothers by name hundreds of times in speeches on the Senate floor. Democratic campaigns, meanwhile, begged supporters for donations to combat the Kochs’ money.
But some Democratic officials at the DNC meeting said the message doesn’t resonate with voters.
“It raises money for sure. But is it good to motivate a voter? No,” said a state party executive director who said he didn’t want to publicly criticize the national party leaders.
And Ken Martin, chairman of the Minnesota Democratic-Farmer-Labor Party, said campaign finances issues were “inside baseball.”
“Americans are focused on bread and butter issues and could care less about who is funding the campaigns,” he said.
Mr. Martin, who leads the campaign finance reform committee for the Association of State Democratic Chairs, said that the effect of money in politics was an important issue but “people don’t care.”
Sharon Stroschein, national committee woman for South Dakota, defended the attacks on the Koch brothers but acknowledged that the party’s message, such as touting the success of Obamacare, was “getting lost in the shuffle.”
“I’m not happy with how the message is getting out,” she said. “It’s frustrating that we can’t do a better job.”
The Koch brothers and Freedom Partners Chamber of Commerce have announced plans to spend $889 million in the 2016 presidential and congressional races. But they have also linked arms with liberal advocates this year in a $5 million campaign for criminal justice reform.
The Coalition for Public Safety includes the liberal Center for American Progress and American Civil Liberties Union, as well as Koch Industries, the tea party group Freedom Works and the conservative Americans for Tax Reform, according to the New York Times.
Democrats’ losses in the 2014 elections extended across the board: they lost nine seats in the Senate, giving up control after eight years in the majority, and also yielded seats in the House, ceded several governorships and gave up seats in statehouses across the country.
Blame for the disaster has fallen on everyone from President Obama to bad candidates who Democrats said ran as “Republican-light.”
For her part, Mrs. Stoschein said that the time had come for DNC Chairman Debbie Wasserman Schultz, who has held the post since 2011, to step aside.
“She is doing a good job but it’s time for a change in leadership. We need to have someone new with new ideas,” said Mrs. Stoschein.
Mrs. Wasserman Schultz made a splash Thursday by rebutting former New York Republican Mayor Rudolph W. Giuliani, who said this week that he does “not believe that the president loves America.” She denounced the remarks and challenged potential Republican presidential candidates and other GOP leaders to disavow the remarks.
Quoting Mr. Giuliani, Mrs. Wasserman Schultz told the DNC crown they should be outraged.
“Let that sink in a moment. A leader of the Republican Party said the president down’t love us and doesn’t love the country,” she said. “Is this what it has come to? Really?”
She noted that Wisconsin Gov. Scott Walker, who is eyeing a 2016 White House run, was at the event with Mr. Giuliani and didn’t repudiate the comment. She also called out several other potential Republican presidential candidates, including Louisiana Gov. Bobby Jindal.
“Stand up and say, ‘Enough!’” she demanded. “They need to start leading.”
Mr. Walker said on CNBC that “the mayor can speak for himself. I’m not going to comment on what the president thinks or not. He can speak for himself.”
Mr. Jindal said in a statement that he questioned Mr. Giuliani’s “phraseology” but not his sentiment.
“The gist of what Mayor Giuliani said — that the president has shown himself to be completely unable to speak the truth about the nature of the threats from these ISIS terrorists — is true,” he said.
Copyright © 2019 The Washington Times, LLC. Click here for reprint permission. |
Exactly initialized recursive least squares In this paper, we present three order-recursive formulas for the Moore-Penrose pseudoinverses of matrices which are the improved and extended Greville formulas. These new versions not only reduce almost half memory locations of Greville formula at each recursion, but also are very useful to derive recursive formulas for the optimization solutions involving the pseudoinverses of matrices. As applications, using the new formulas, we derive Recursive Least Squares (RLS) procedures which coincide exactly with the batch LS solutions to the problems of unconstrained LS, LS with linear equality constraints, and weighted LS, respectively, including their simple and exact initializations. In comparison with previous results of Albert and Sittler, not only the derivation of the recursive formulas are much easier, but also the formulas themselves are clearer and simpler. In particular, the linear equality constrained RLS can be of the same version of RLS without constraint except the initial values, which has important practical applications. |
import { isDefined } from '~/is-defined'
export function concat<T>(...items: ReadonlyArray<ConcatArray<T> | null | undefined>): T[] {
return [].concat(...items.filter(isDefined))
}
|
Impact of macrolide therapy in patients hospitalized with Pseudomonas aeruginosa community-acquired pneumonia. BACKGROUND Several studies have described a clinical benefit of macrolides due to their immunomodulatory properties in various respiratory diseases. We aimed to assess the effect of macrolide therapy on mortality in patients hospitalized for Pseudomonas aeruginosa community-acquired pneumonia (CAP). METHODS We performed a retrospective population-based study of > 150 hospitals in the US Veterans Health Administration. Patients were included if they had a diagnosis of CAP and P aeruginosa was identified as the causative pathogen. Patients with health-care-associated pneumonia and immunosuppression were excluded. Macrolide therapy was considered when administered within the first 48 h of admission. Univariate and multivariable analyses were performed using 30-day mortality as the dependent measure. RESULTS We included 402 patients with P aeruginosa CAP, of whom 171 (42.5%) received a macrolide during the first 48 h of admission. These patients were older and white. Macrolide use was not associated with lower 30-day mortality (hazard ratio, 1.14; 95% CI, 0.70-1.83; P =.5). In addition, patients treated with macrolides had no differences in ICU admission, use of mechanical ventilation, use of vasopressors, and length of stay (LOS) compared with patients not treated with macrolides. A subgroup analysis among patients with P aeruginosa CAP in the ICU showed no differences in baseline characteristics and outcomes. CONCLUSIONS Macrolide therapy in the first 48 h of admission is not associated with decreased 30-day mortality, ICU admission, need for mechanical ventilation, and LOS in hospitalized patients with P aeruginosa CAP. Larger cohort studies should address the benefit of macrolides as immunomodulators in patients with P aeruginosa CAP. |
<filename>src/JournalFilter.h
/*
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef JOURNAL_FILTER_H
#define JOURNAL_FILTER_H
#include <map>
#include <string>
#include <vector>
#include "Journal.h"
#include "JournalBookBase.h"
#include "memory"
using namespace std;
/**
* @brief
* 缓存日记的stamp和level以及tags,用于加快排序和筛选的速度。
* 相关信息不会在创建filter全部缓存,而是在进行相关排序和筛选的时候临时处理。
*
* To cache stamp, level and tags of journal, in order to bost sort and filter.
* Data will not caceh everything while filter construct. It will be cache before
* filter and sort.
*/
typedef struct guidance_s
{
size_t order; //order in book
time_t stamp;
int32_t level;
vector<string> tags;
} guidance;
class JournalFilter
{
private:
shared_ptr<JournalBookBase> mBook;
vector <guidance> mGuidance;
uint32_t mCacheFlag;
void tryCache(uint32_t cacheFlag);
static bool orderAscCmp(const guidance &d1,const guidance &d2);
static bool orderDescCmp(const guidance &d1,const guidance &d2);
static bool stampAscCmp(const guidance &d1,const guidance &d2);
static bool stampDescCmp(const guidance &d1,const guidance &d2);
static bool levelAscCmp(const guidance &d1,const guidance &d2);
static bool levelDescCmp(const guidance &d1,const guidance &d2);
public:
JournalFilter(shared_ptr<JournalBookBase> book);
~JournalFilter() = default;
vector<size_t> getJournalOrder();
void sortByOrder(bool asc = true);
void sortByStamp(bool asc = true);
void sortByLevel(bool asc = true);
void stampFilter(time_t stamp, bool uperrLimit);
void levelFilter(int32_t level, bool uperrLimit);
void withTagsFilter(const vector<string> &tags);
map<string, size_t> tagsCount();
};
#endif |
Mesiodistal tooth width in a Saudi population sample comparing right and left sides. Part 2. The objective of the present study was to investigate whether there was a significant difference or not in mesiodistal tooth width between the right and left sides in a Saudi sample. The measurements were obtained from 720 teeth of pretreatment orthodontic study casts. The results showed no significant statistical difference between right and left sides, which in agreement with literature. Thus, measurement of one side could be regarded as truly representative when the corresponding measurement on the other side was unobtainable. |
Canterbury–York dispute
The Canterbury–York dispute was a long-running conflict between the archdioceses of Canterbury and York in medieval England. It began shortly after the Norman Conquest of England and dragged on for many years. The main point of the dispute was over whether Canterbury would have jurisdiction, or primacy, over York. A number of archbishops of Canterbury attempted to secure professions of obedience from successive archbishops of York, but in the end they were unsuccessful. York fought the primacy by appealing to the kings of England as well as the papacy. In 1127, the dispute over the primacy was settled mainly in York's favour, for they did not have to submit to Canterbury. Later aspects of the dispute dealt with concerns over status and prestige.
Nature of the dispute
The main locus of the dispute was the attempt by post-Norman Conquest Archbishops of Canterbury to assert their primacy, or right to rule, over the province of York. Canterbury used texts to back up their claims, including Bede's major historical work the Historia ecclesiastica gentis Anglorum, which sometimes had the Canterbury archbishops claiming primacy over not just York, but the entire ecclesiastical hierarchy of the British Isles. It began under Lanfranc, the first Norman Archbishop of Canterbury, and ended up becoming a never ending dispute between the two sees over prestige and status. The historian David Carpenter says Lanfranc's actions "sucked his successors into a quagmire, and actually weakened rather than strengthened church discipline and the unity of the kingdom." Carpenter further argues that "it became impossible in later centuries, thanks to disputes over status, for the two archbishops to appear in each others presence."
Feeding into the dispute were the two cathedral chapters, who encouraged their respective archbishops to continue the struggle. An additional element was the fact that Canterbury had a monastic chapter, while York had secular clergy in the form of canons, interjecting a note of secular and monastic clerical rivalries into the dispute. Another problem that intertwined with the dispute was the Investiture controversy in England, which was concurrent with the dispute and involved most of the same protagonists. The kings of England, who might have forced a decision, were more concerned with other matters, and were ambivalent about Canterbury's claims, which removed a potential way to resolve the dispute. At times, kings supported Canterbury's claims in order to keep the north of England from revolting, but this was balanced by the times that the kings were in quarrels with Canterbury.
The popes, who were often called upon to decide the issue, had their own concerns with granting a primacy, and did not wish to actually rule in Canterbury's favour. But the main driving forces behind the Canterbury position were Lanfranc and Anselm of Canterbury, both of whom enjoyed immense prestige in the church and thus it was not easy for the papacy to rule against them or their position. Once Anselm was out of office, however, the popes began to side more often with York, and generally strived to avoid making any final judgement.
Under Lanfranc
The dispute began under Lanfranc, who demanded oaths of obedience from not just the traditional suffragan bishops of Canterbury but also from the archbishop of York. This happened shortly after Lanfranc's own consecration, when King William I of England then proposed that Lanfranc consecrate the new archbishop of York, Thomas of Bayeux. Lanfranc demanded that Thomas swear to obey Lanfranc as Thomas' primate before the consecration could take place, and Thomas refused, but he eventually gave way, and made a profession. However, the exact form that this oath took was disputed, with Canterbury claiming it was without conditions, and York claiming that it was only a personal submission to Lanfranc, and did not involve the actual offices of Canterbury and York. When both Thomas and Lanfranc visited Rome in 1071, Thomas brought up the primacy issue again, and for good measure tacked on a claim to three of Canterbury's suffragan dioceses, Lichfield, Dorchester, and Worcester. Pope Alexander II sent the issue back to England, to be settled at a council convened by the papal legate. This council took place at Winchester in April 1072, and Lanfranc was victorious on both the primacy issue as well as the dioceses. The victory was drawn up in the Accord of Winchester, to which those present affixed their names. However, papal confirmation of the decision did not extend to Lanfranc's successors, and in fact was never a complete confirmation of the rulings of the council. Lanfranc enjoyed the support of King William I at this council. Thomas was compensated with authority over the Scottish bishops, which was an attempt to give York enough suffragans to allow the archbishops of York to be consecrated without the help of Canterbury. An archiepiscopal consecration required three bishops, and after York's claims to Lichfield, Dorchester, and Worcester were denied, York only had one suffragan, the Diocese of Durham.
Why exactly Lanfranc decided to press forward claims to a judicial primacy over York is unclear. Some historians, including Frank Barlow have speculated that it was because Thomas was a disciple of Odo of Bayeux, one of Lanfranc's rivals in the English church. Another possibility was that Lanfranc desired to assert authority over the northern province of Britain in order to aid the reforming efforts Lanfranc was attempting. Lanfranc was surely influenced by his cathedral chapter at Canterbury, who may have desired to recover their honours after the problems encountered in Lanfranc's predecessor Stigand's archiepiscopate. York had never had a primacy, and based its arguments on the general principle that primacies were erroneous. While Canterbury in the Anglo-Saxon era had been more prestigious than York, it had never in fact had a judicial primacy. Another influence was probably Lanfranc's monastic background, with Lanfranc feeling that the ecclesiastical structure should mirror the monastic absolute obedience to a superior. However, a main influence was probably the so-called False Decretals, a collection of decrees and canons from the ninth century, which mentioned primates as the equivalent of patriarchs and placed them between the pope and the metropolitan bishops in the hierarchy.
When Lanfranc attempted to find documentary proof to rebutt York's refusal, it was discovered that no explicit statement of such a primacy existed. This involved the use of letters of Pope Gregory the Great, which were repeated in Bede's Historia, but a complication was that Gregory's plan for the Gregorian mission had specified that the southern province would be based at London, not Canterbury. There was documentary evidence from the papacy that stated that Canterbury had a primacy over the island, but these dated from before York had been raised to an archbishopric. During the council of Winchester in 1072, papal letters were produced which may or may not have been forgeries. A biographer of Lanfranc, Margaret Gibson, argues that they already existed before Lanfranc used them. Another historian, Richard Southern, holds that the statements relating to primacy were inserted into legitimate papal letters after Lanfranc's day. Most historians agree that Lanfranc did not have anything to do with the forgeries, however they came about.
King William I supported Lanfranc in this dispute, probably because he felt that it was important that his kingdom be represented by one ecclesiastical province, and this would best be accomplished by supporting the primacy of Canterbury. Before conquering England, William had ruled the duchy of Normandy, which corresponded to the archdiocese of Rouen, and the simplicity of control which this allowed the dukes of Normandy probably was a strong factor in William's support of Canterbury's claims. Another concern was that in 1070–1072, the north of England, where York was located, was still imperfectly pacified, and allowing York independence might lead to York crowning another king.
Thomas claimed that when Lanfranc died in 1089, Thomas' profession lapsed, and during the long vacancy at Canterbury that followed on Lanfranc's death, Thomas performed most of the archiepiscopal functions in England.
Under Anselm
When Anselm was appointed to Canterbury, after a long vacancy that lasted from 1089 to 1093, the only flareup of the dispute was a dispute at Anselm's consecration on 4 December 1093 over the exact title that would be employed in the ceremony. The dispute centered on the title that would be confirmed on Anselm, and although it was settled quickly, the exact title used is unknown, as the two main sources of information differ. Eadmer, Anselm's biographer and a Canterbury partisan, proclaims that the title agreed upon was "Primate of all Britain". Hugh the Chanter, a chronicler from York and a partisan of York, claims the title used was "Metropolitan of Canterbury". Until the ascension of King Henry I in 1100, Anselm was much more occupied with other disputes with King William II.
It was during Anselm's archiepiscopate that the primacy dispute became central to Anselm's plans. Eadmer made the dispute central to his work, the Historia Novorum. Likewise, Hugh the Chanter, made the primacy dispute one of the central themes of his work History of the Church of York.
In 1102, Pope Paschal II, in the midst of the Investiture controversy, tried to smooth over the problems about investiture by granting Anselm a primacy, but only to Anselm himself, not to his successors. Nor did the grant explicitly mention York as being subject to Canterbury. Anselm then held a council in September 1102 at Westminster, which was attended by Gerard, the new archbishop of York. According to Hugh the Chanter, when the seats for the bishops were arranged, Anselm's was set higher than Gerard's, which led Gerard to kick over chairs and refuse to be seated until his own chair was exactly as high as Anselm's. Late in 1102, the pope wrote to Gerard, admonishing him and ordering him to make the oath to Anselm.
Gerard died in May 1108, and his successor was nominated within six days. Thomas, however, delayed going to Canterbury to be consecrated, under pressure from his cathedral chapter and knowing that since Anselm was in poor health, he might be able to outlast Anselm. Thomas told Anselm that his cathedral chapter had forbidden him to make any oath of obedience, and this was confirmed by the canons themselves, who wrote to Anselm confirming Thomas' account. Although Anselm died before Thomas' had submitted, one of the last letters Anselm wrote ordered Thomas not to seek consecration until he had made the required profession. After Anselm's death, the king then pressured Thomas to submit a written profession, which he eventually did. The actual document has disappeared, and as always, Eadmer and Hugh the Chanter disagree on the exact wording, with Eadmer claiming it was made to Canterbury and any successor archbishops, and Hugh claiming that Thomas qualified the oath by making it clear that it could not impede the rights of the Church of York.
Dispute under Thurstan
During the archbishopric of Thurstan, the Archbishop of York between 1114 and 1140, the dispute flared up and Thurstan appealed to the papacy over the issue, with Canterbury under Ralph d'Escures countering with information from Bede as well as forged documents. The papacy did not necessarily believe the forgeries, but the dispute rumbled on for a number of years. Shortly after Thurstan's election in 1114, Ralph refused to consecrate the Thurstan unless Ralph received a written, not just oral, profession of obedience. Thurstan refused to do so, and assured his cathedral chapter that he would not submit to Canterbury. York based its claim on the fact that no metropolitan bishop or archbishop could swear allegiance to anyone but the pope, a position guaranteed to gain support from the papacy. King Henry, however, refused permission for Thurstan to appeal to the papacy, which left the dispute in limbo for two years. Henry does not seem to have cared about who won the dispute, and Henry may have delayed hoping that Ralph and Thurstan would reach a compromise which would keep Henry from having to alienate either of them.
Pressure mounted, however, and Henry called a council in the spring of 1116, and Henry ordered that when Thurstan arrived at the council, he must swear to obey Canterbury. If Thurstan would not do so, Henry threatened to depose him from office. But, on his way to the council, Thurstan received a letter from the pope, ordering Thurstan's consecration without any profession. Although Thurstan did not reveal that the pope had ordered his consecration, he continued to refuse to make a profession, and resigned his see in the presence of the king and the council. But, the papacy, the York cathedral chapter, and even King Henry still considered Thurstan the archbishop-elect. In 1117, Ralph attempted to visit Pope Paschal II about the dispute, but was unable to actually meet the pope, and only secured a vague letter confirming Canterbury's past privileges, but since the exact privileges weren't specified, the letter was useless.
Both Ralph and Thurstan attended the Council of Reims in 1119, convened by Pope Calixtus II in October. Although Canterbury sources state that Thurstan promised King Henry he would refuse consecration while at the council, Yorkish sources deny that any such promise was made. Calixtus promptly consecrated Thurstan at the start of the council, which angered Henry and led the king to exile Thurstan from England and Normandy. Although the pope and king met and negotiated Thurstan's status in November 1119, nothing came of this, and Calixtus in March 1120 gave Thurstan two papal bulls, one an exemption for York from Canterbury's claims, titled Caritatis Bonun, and the other a threat of interdict on England if Thurstan was not allowed to return to York. After some diplomatic efforts, Thurstan was allowed back into the king's favour and his office returned to him. Calixtus' bulls also allowed any future Archbishops of York to be consecrated by their suffragans if the Archbishop of Canterbury refused.
In 1123, William of Corbeil, recently elected Archbishop of Canterbury, refused consecration by Thurstan unless Thurstan would incorporate into the ceremony an admission that Canterbury was primate of Britain. When Thurstan refused, William was consecrated by three of his own bishops. William then traveled to Rome to secure confirmation of his election, which was disputed. Thurstan also traveled to Rome, as both archbishops had been summoned to attend a papal council, which both arrived too late to attend. Thurstan arrived shortly before William. While there, William and his advisors presented documents to the papal curia which they insisted proved Canterbury's primacy. However, the cardinals and the curia found the documents to be forgeries. What persuaded the cardinals was the absence of papal bulls from the nine documents produced, which the Canterbury delegation tried to explain away by saying the bulls had "wasted away or were lost". Hugh the Chanter, a medieval chronicler of York, stated that when the cardinals heard that explanation, they laughed and ridiculed the documents "saying how miraculous it was that lead should waste away or be lost and parchment should survive". Hugh goes on to record that the attempts by the Canterbury party to secure their objective by bribery likewise failed.
Pope Honorius II made a judgment in York's favour in 1126, having found the documents and case presented by Canterbury to be unconvincing. In the winter of 1126–1127, an attempt at compromise was made, with Canterbury agreeing to give jurisdiction over the sees of Chester, Bangor and St Asaph to York in return for the submission of York to Canterbury. This foundered when William of Corbeil arrived at Rome and told the pope that he had not agreed to the surrender of St Asaph. This was the last attempt by William to secure an oath from Thurstan, for a compromise in the primacy dispute was made, with William of Corbeil receiving a papal legateship, which effectively gave him the powers of the primacy without the papacy actually having to concede a primacy to Canterbury. This legateship covered not only England, but Scotland as well.
A small flare-up in 1127 happened when William of Corbeil objected to Thurstan having his episcopal cross carried in processions in front of Thurstan while Thurstan was in Canterbury's province. William also objected to Thurstan participating in the ceremonial crownings of the king at the royal court. Thurstan appealed to Rome, and Honorius wrote a scathing letter to William declaring that if the reports from Thurstan were true, William would be punished for his actions. Thurstan then traveled to Rome, where he secured new rulings from the papacy. One gave the seniority between the two British archbishops to whichever had been consecrated first. Another ruling allowed the Archbishops of York to have their crosses carried in Canterbury's province.
Legacy of the first dispute
The main import of the first dispute was the increase in appeals to the papacy to solve the problem. This was part of a general trend to seek support and resolution at the papacy instead of in the royal courts, a trend that grew through the reigns of William II and Henry I. Also important was the impetus that the disputes gave to efforts by both York and Canterbury to assert their jurisdiction over Scotland, Wales and Ireland. After the settlement of the profession issue, the dispute turned to other, lesser matters such as how the respective chairs of the two archbishops would be arranged when they were together and the right of either to carry their episcopal cross in the others' province.
Under Stephen
Under Stephen, the dispute arose briefly at the Council of Reims of 1148. Theobald of Bec, who was Archbishop of Canterbury for most of Stephen's reign, attended the council, and when Henry Murdac, just recently elected to York, did not arrive, Theobald claimed the primacy over York at one of the early council sessions. However, as Murdac was a Cistercian, as was Pope Eugene III, who had called the council, nothing further was done about Canterbury's claim. Eugene postponed any decision until Murdac was established in his see.
Most of the time, however, Theobald was not concerned with reopening the dispute, as demonstrated when he consecrated Roger de Pont L'Evêque, newly elected to York in 1154. Theobald, at Roger's request, performed the consecration as papal legate, and not as archbishop, thus side-stepping the question of a profession of obedience.
Disputes under Henry II
During Thomas Becket's archiepiscopate, the dispute flared up again, with the added complication of an attempt by Gilbert Foliot, the Bishop of London, to have his see raised to an archbishopric, basing his case on the old Gregorian plan for London to be the seat of the southern province. Foliot was an opponent of Becket's, and this fed into the dispute, as well as Becket's legateships, which specifically excluded York. When Roger de Pont L'Evêque, the Archbishop of York, crowned Henry the Young King in 1170, this was a furthering of the dispute, as it was Canterbury's privilege to crown the kings of England.
The first sign of the revival of the dispute was at the Council of Tours, called in 1163 by Pope Alexander III. While there, Roger and Becket disputed over the placement of their seats in the council. Roger argued, that based on Gregory the Great's plan that primacy should go to the archbishop who had been consecrated first, he had the right to the more honourable placement at the council. Eventually, Alexander placed them both on equal terms, but not before the council spent three days listening to the claims and counter-claims, as well as Roger relating the whole history of the dispute. In 1164 Alexander gave Roger a papal legateship, but excluded Becket from its jurisdiction. The pope did, however, decline to declare that Canterbury had a primacy in England. Alexander on 8 April 1166 confirmed Canterbury's primacy, but this became less important than the grant of a legateship on 24 April to Becket. This grant, though, did not cover the diocese of York, which was specifically prohibited.
During the reign of Henry II, the dispute took a new form, concerning the right of either archbishop to carry their archiepiscopal cross throughout the kingdom, not just in their own province. During the vacancy between the death of Theobald of Bec and the appointment of Becket, Roger had secured papal permission to carry his cross anywhere in England. As the Becket controversy grew, however, Alexander asked Roger to forbear from doing so, in order to stop the wrangling that had arisen from Roger's doing so. Later, Alexander revoked the privilege, claiming it had been given in error. The dispute continued between Hubert Walter and Geoffrey, respectively Archbishop of Canterbury and Archbishop of York, during King Richard I's reign, when both archbishops had their archiepiscopal crosses carried before themselves in the others diocese, prompting angry recriminations. Eventually, both prelates attempted to secure a settlement from Richard in their favour, but Richard declined, stating that this was an issue that needed to be settled by the papacy. However, no firm settlement was made until the 14th century.
The papacy, while continuing to grant legateships to the archbishops of Canterbury, began after 1162 to specifically exclude the legateships from covering the province of York. The only exception from the later half of the 12th century was the legateship of Hubert Walter in 1195, which covered all of England. This exception, however, was more due to Pope Celestine III's dislike of Geoffrey, the archbishop of York at the time. |
Longest Streak with H≤1, R=0 From 1914 to 2014: Results
generated by a site user
This report was generated using the SHARE link located just above the stat tables on the site. Give it a try!
User's Notes:
Rk Strk Start End Games W L AB R H 2B 3B HR RBI SO BB SB CS BA OBP SLG OPS Opp 1 CLE 2014-06-29 2014-06-30 2 0 2 54 0 2 0 0 0 0 16 4 1 0 .037 .103 .037 .140 SEA,LAD 2 BSN 1916-09-28 1916-09-30 2 0 2 55 0 2 0 0 0 0 9 2 0 0 .036 .070 .036 .107 NYG 3 SEA 2014-06-28 2014-06-28 1 0 1 28 0 1 0 0 0 0 11 0 0 0 .036 .036 .036 .071 CLE 4 SDP 2014-06-25 2014-06-25 1 0 1 27 0 0 0 0 0 0 6 1 0 0 .000 .036 .000 .036 SFG 5 HOU 2014-06-21 2014-06-21 1 0 1 28 0 1 0 0 0 0 11 2 0 0 .036 .100 .036 .136 TBR 6 COL 2014-06-18 2014-06-18 1 0 1 28 0 0 0 0 0 0 15 0 0 0 .000 .000 .000 .000 LAD 7 PHI 2014-05-25 2014-05-25 1 0 1 27 0 0 0 0 0 0 6 3 0 0 .000 .100 .000 .100 LAD 8 ARI 2014-05-20 2014-05-20 1 0 1 28 0 1 1 0 0 0 9 0 0 0 .036 .036 .071 .107 STL 9 NYM 2014-04-18 2014-04-18 1 0 1 27 0 1 0 0 0 0 8 6 0 0 .037 .212 .037 .249 ATL 10 DET 2014-04-11 2014-04-11 1 0 1 28 0 1 0 0 0 0 11 2 2 0 .036 .100 .036 .136 SDP 11 SEA 2014-04-09 2014-04-09 1 0 1 29 0 1 0 0 0 0 9 4 0 0 .034 .152 .034 .186 LAA 12 DET 2013-09-29 2013-09-29 1 0 1 28 0 0 0 0 0 0 4 1 0 0 .000 .067 .000 .067 MIA 13 WSN 2013-09-24 2013-09-24 1 0 1 28 0 1 0 0 0 0 9 2 0 0 .036 .100 .036 .136 STL 14 PIT 2013-09-16 2013-09-16 1 0 1 27 0 1 0 0 0 0 7 0 0 0 .037 .037 .037 .074 SDP 15 NYM 2013-09-09 2013-09-09 1 0 1 28 0 1 0 0 0 0 8 2 0 0 .036 .100 .036 .136 WSN 16 ARI 2013-09-06 2013-09-06 1 0 1 28 0 1 0 0 0 0 7 0 0 0 .036 .036 .036 .071 SFG 17 SDP 2013-07-13 2013-07-13 1 0 1 27 0 0 0 0 0 0 13 4 1 0 .000 .156 .000 .156 SFG 18 SFG 2013-07-02 2013-07-02 1 0 1 27 0 0 0 0 0 0 9 1 0 0 .000 .036 .000 .036 CIN 19 PIT 2013-06-05 2013-06-05 1 0 1 27 0 1 0 0 0 0 13 2 0 0 .037 .161 .037 .198 ATL 20 PIT 2013-05-31 2013-05-31 1 0 1 27 0 1 0 0 0 0 7 1 0 0 .037 .103 .037 .140 CIN 21 MIN 2013-05-24 2013-05-24 1 0 1 28 0 1 0 0 0 0 12 3 0 0 .036 .129 .036 .165 DET 22 LAA 2013-05-12 2013-05-12 1 0 1 28 0 1 0 0 0 0 7 0 1 0 .036 .036 .036 .071 CHW 23 COL 2013-05-10 2013-05-10 1 0 1 28 0 1 0 0 0 0 13 0 1 0 .036 .036 .036 .071 STL 24 TOR 2013-05-10 2013-05-10 1 0 1 28 0 1 1 0 0 0 5 0 0 0 .036 .036 .071 .107 BOS 25 CHW 2013-05-07 2013-05-07 1 0 1 31 0 1 0 0 0 0 14 0 0 0 .032 .032 .032 .065 NYM Rk Strk Start End Games W L AB R H 2B 3B HR RBI SO BB SB CS BA OBP SLG OPS Opp 26 PHI 2013-05-04 2013-05-04 1 0 1 28 0 1 0 0 0 0 12 2 0 0 .036 .100 .036 .136 MIA 27 CIN 2013-04-26 2013-04-26 1 0 1 29 0 1 0 0 0 0 4 1 0 0 .034 .067 .034 .101 WSN 28 STL 2013-04-17 2013-04-17 1 0 1 28 0 1 1 0 0 0 11 0 0 0 .036 .069 .071 .140 PIT 29 PIT 2012-09-28 2012-09-28 1 0 1 27 0 0 0 0 0 0 10 1 1 1 .000 .036 .000 .036 CIN 30 TBR 2012-08-15 2012-08-15 1 0 1 27 0 0 0 0 0 0 12 0 0 0 .000 .000 .000 .000 SEA 31 CHC 2012-07-31 2012-07-31 1 0 1 28 0 1 0 0 0 0 8 2 2 0 .036 .129 .036 .165 PIT 32 HOU 2012-07-19 2012-07-19 1 0 1 27 0 1 0 0 0 0 5 3 0 1 .037 .133 .037 .170 SDP 33 CIN 2012-06-28 2012-06-28 1 0 1 26 0 1 0 0 0 0 8 2 1 1 .038 .107 .038 .146 SFG 34 BAL 2012-06-18 2012-06-18 1 0 1 28 0 1 0 0 0 0 13 2 0 0 .036 .100 .036 .136 NYM 35 ARI 2012-06-16 2012-06-16 1 0 1 28 0 1 0 0 0 0 5 1 0 0 .036 .069 .036 .105 LAA 36 ATL 2012-06-16 2012-06-16 1 0 1 28 0 1 0 0 0 0 8 2 0 0 .036 .100 .036 .136 BAL 37 MIA 2012-06-15 2012-06-15 1 0 1 28 0 1 0 0 0 0 12 4 1 0 .036 .156 .036 .192 TBR 38 HOU 2012-06-13 2012-06-13 1 0 1 27 0 0 0 0 0 0 14 0 0 0 .000 .000 .000 .000 SFG 39 LAD 2012-06-08 2012-06-08 1 0 1 26 0 0 0 0 0 0 9 3 0 0 .000 .103 .000 .103 SEA 40 STL 2012-06-01 2012-06-01 1 0 1 27 0 0 0 0 0 0 8 5 0 0 .000 .156 .000 .156 NYM 41 OAK 2012-05-22 2012-05-22 1 0 1 28 0 1 0 0 0 0 9 2 0 0 .036 .100 .036 .136 LAA 42 OAK 2012-05-19 2012-05-19 1 0 1 26 0 1 0 0 0 0 7 2 0 0 .038 .107 .038 .146 SFG 43 PIT 2012-05-18 2012-05-18 1 0 1 28 0 1 0 0 0 0 12 2 0 0 .036 .100 .036 .136 DET 44 MIN 2012-05-05 2012-05-05 1 0 1 27 0 1 0 0 0 0 10 2 0 0 .037 .103 .037 .140 SEA 45 MIN 2012-05-02 2012-05-02 1 0 1 28 0 0 0 0 0 0 9 1 0 0 .000 .034 .000 .034 LAA 46 SEA 2012-04-21 2012-04-21 1 0 1 27 0 0 0 0 0 0 9 0 0 0 .000 .000 .000 .000 CHW 47 PIT 2012-04-13 2012-04-13 1 0 1 28 0 1 0 0 0 0 11 0 0 0 .036 .036 .036 .071 SFG 48 OAK 2012-04-10 2012-04-10 1 0 1 20 0 1 1 0 0 0 10 4 0 0 .050 .208 .100 .308 KCR 49 PIT 2011-09-10 2011-09-10 1 0 1 28 0 1 1 0 0 0 11 3 0 0 .036 .129 .071 .200 FLA 50 MIN 2011-09-05 2011-09-05 1 0 1 28 0 1 1 0 0 0 9 0 0 0 .036 .036 .071 .107 CHW Rk Strk Start End Games W L AB R H 2B 3B HR RBI SO BB SB CS BA OBP SLG OPS Opp 51 OAK 2011-08-21 2011-08-21 1 0 1 27 0 1 0 0 0 0 9 3 0 0 .037 .133 .037 .170 TOR 52 SDP 2011-07-09 2011-07-09 1 0 1 28 0 1 0 0 0 0 10 4 0 0 .036 .156 .036 .192 LAD 53 BAL 2011-07-01 2011-07-01 1 0 1 28 0 1 0 0 0 0 8 1 1 0 .036 .069 .036 .105 ATL 54 FLA 2011-06-28 2011-06-28 1 0 1 28 0 1 0 0 0 0 12 3 0 0 .036 .129 .036 .165 OAK 55 TBR 2011-06-15 2011-06-15 1 0 1 28 0 1 0 0 0 0 6 0 0 0 .036 .036 .036 .071 BOS 56 LAA 2011-05-28 2011-05-28 1 0 1 31 0 1 1 0 0 0 5 2 0 0 .032 .091 .065 .155 MIN 57 STL 2011-05-07 2011-05-07 1 0 1 27 0 1 0 0 0 0 8 4 0 0 .037 .161 .037 .198 MIL 58 TOR 2011-05-07 2011-05-07 1 0 1 26 0 0 0 0 0 0 4 1 0 0 .000 .037 .000 .037 DET 59 MIL 2011-05-04 2011-05-04 1 0 1 28 0 1 1 0 0 0 6 1 0 0 .036 .069 .071 .140 ATL 60 CHW 2011-05-03 2011-05-03 1 0 1 24 0 0 0 0 0 0 2 6 1 0 .000 .200 .000 .200 MIN 61 CLE 2011-04-12 2011-04-12 1 0 1 28 0 1 0 0 0 0 8 2 0 1 .036 .100 .036 .136 LAA 62 TEX 2010-09-23 2010-09-23 1 0 1 28 0 1 0 0 0 0 9 3 0 0 .036 .156 .036 .192 OAK 63 ATL 2010-09-22 2010-09-22 1 0 1 28 0 1 1 0 0 0 8 2 1 0 .036 .100 .071 .171 PHI 64 OAK 2010-09-02 2010-09-02 1 0 1 28 0 1 0 0 0 0 5 3 0 0 .036 .182 .036 .218 NYY 65 PHI 2010-08-30 2010-08-30 1 0 1 27 0 1 0 0 0 0 7 2 0 0 .037 .133 .037 .170 LAD 66 MIN 2010-08-23 2010-08-23 1 0 1 26 0 1 0 0 0 0 9 6 0 0 .038 .219 .038 .257 TEX 67 PHI 2010-08-13 2010-08-13 1 0 1 28 0 1 0 0 0 0 7 1 0 0 .036 .069 .036 .105 NYM 68 TBR 2010-08-08 2010-08-08 1 0 1 29 0 1 0 0 0 0 17 2 0 0 .034 .097 .034 .131 TOR 69 SFG 2010-07-29 2010-07-29 1 0 1 28 0 1 0 0 0 0 8 1 0 0 .036 .069 .036 .105 FLA 70 DET 2010-07-26 2010-07-26 1 0 1 26 0 0 0 0 0 0 6 1 0 0 .000 .037 .000 .037 TBR 71 STL 2010-07-22 2010-07-22 1 0 1 33 0 1 0 0 0 0 9 2 1 0 .030 .086 .030 .116 PHI 72 PIT 2010-07-08 2010-07-08 1 0 1 28 0 1 0 0 0 0 8 2 0 0 .036 .129 .036 .165 HOU 73 TBR 2010-06-25 2010-06-25 1 0 1 27 0 0 0 0 0 0 6 8 2 1 .000 .250 .000 .250 ARI 74 CHW 2010-06-13 2010-06-13 1 0 1 27 0 1 0 0 0 0 4 3 0 0 .037 .188 .037 .225 CHC 75 SDP 2010-06-10 2010-06-10 1 0 1 28 0 1 1 0 0 0 6 0 0 0 .036 .036 .071 .107 NYM Rk Strk Start End Games W L AB R H 2B 3B HR RBI SO BB SB CS BA OBP SLG OPS Opp 76 CLE 2010-06-02 2010-06-02 1 0 1 28 0 1 0 0 0 0 3 0 0 0 .036 .036 .036 .071 DET 77 FLA 2010-05-29 2010-05-29 1 0 1 27 0 0 0 0 0 0 11 0 0 0 .000 .000 .000 .000 PHI 78 ARI 2010-05-28 2010-05-28 1 0 1 28 0 1 1 0 0 0 9 0 0 0 .036 .069 .071 .140 SFG 79 TBR 2010-05-25 2010-05-25 1 0 1 28 0 1 0 0 0 0 12 6 0 0 .036 .206 .036 .242 BOS 80 PHI 2010-05-22 2010-05-22 1 0 1 26 0 1 0 0 0 0 5 4 1 0 .038 .167 .038 .205 BOS 81 SFG 2010-05-13 2010-05-13 1 0 1 28 0 1 0 0 0 0 6 0 0 0 .036 .036 .036 .071 SDP 82 PIT 2010-05-11 2010-05-11 1 0 1 27 0 1 0 0 0 0 8 0 0 1 .037 .071 .037 .108 CIN 83 TBR 2010-05-09 2010-05-09 1 0 1 27 0 0 0 0 0 0 6 0 0 0 .000 .000 .000 .000 OAK 84 ATL 2010-04-17 2010-04-17 1 0 1 25 0 0 0 0 0 0 7 6 1 0 .000 .194 .000 .194 COL 85 LAD 2009-09-30 2009-09-30 1 0 1 27 0 1 0 0 0 0 7 5 0 0 .037 .188 .037 .225 SDP 86 TEX 2009-09-30 2009-09-30 1 0 1 25 0 1 0 0 0 0 7 5 0 1 .040 .200 .040 .240 LAA 87 TEX 2009-09-16 2009-09-16 1 0 1 28 0 1 0 0 0 0 10 2 0 0 .036 .100 .036 .136 OAK 88 MIL 2009-09-07 2009-09-07 1 0 1 27 0 1 1 0 0 0 10 2 0 0 .037 .103 .074 .178 STL 89 NYY 2009-09-04 2009-09-04 1 0 1 28 0 1 1 0 0 0 9 3 0 0 .036 .129 .071 .200 TOR 90 SEA 2009-08-30 2009-08-30 1 0 1 28 0 1 0 0 0 0 5 1 0 0 .036 .069 .036 .105 KCR 91 CHW 2009-08-29 2009-08-29 1 0 1 28 0 1 1 0 0 0 6 2 0 0 .036 .100 .071 .171 NYY 92 PIT 2009-08-04 2009-08-04 1 0 1 29 0 1 0 0 0 0 6 3 1 0 .034 .125 .034 .159 ARI 93 TBR 2009-07-23 2009-07-23 1 0 1 27 0 0 0 0 0 0 6 0 0 0 .000 .000 .000 .000 CHW 94 FLA 2009-07-16 2009-07-16 1 0 1 28 0 1 0 0 0 0 5 1 0 0 .036 .069 .036 .105 PHI 95 SDP 2009-07-10 2009-07-10 1 0 1 28 0 0 0 0 0 0 11 0 0 0 .000 .000 .000 .000 SFG 96 BAL 2009-07-06 2009-07-06 1 0 1 28 0 1 0 0 0 0 3 0 0 0 .036 .036 .036 .071 SEA 97 TEX 2009-06-28 2009-06-28 1 0 1 28 0 1 0 0 0 0 10 3 0 0 .036 .129 .036 .165 SDP 98 NYM 2009-06-27 2009-06-27 1 0 1 28 0 1 0 0 0 0 10 3 0 0 .036 .129 .036 .165 NYY 99 TEX 2009-05-19 2009-05-19 1 0 1 28 0 1 1 0 0 0 8 2 0 0 .036 .100 .071 .171 DET 100 BOS 2009-04-30 2009-04-30 1 0 1 27 0 1 0 0 0 0 11 2 0 0 .037 .103 .037 .140 TBR View Play Index Tool Used
Generated 7/1/2014. Provided by Baseball-Reference.com Generated 7/1/2014.
Please report any abuse. |
/**
* interate over keypoints and return the first close enough to point
*/
int Grid3D::getKeyPointIndex(cv::Point p) const
{
for (size_t i = 0; i < _interactionPoints.size(); ++i)
{
if (cv::norm(_center + _interactionPoints[i] - p) < (_radius / 10) )
return static_cast<int>(i);
}
return -1;
} |
<filename>Geometry/HGCalCommonData/interface/HGCalParameters.h
#ifndef Geometry_HGCalCommonData_HGCalParameters_h
#define Geometry_HGCalCommonData_HGCalParameters_h
#include "CondFormats/Serialization/interface/Serializable.h"
#include "DataFormats/GeometryVector/interface/GlobalPoint.h"
#include "Geometry/HGCalCommonData/interface/HGCalGeometryMode.h"
#include <CLHEP/Geometry/Transform3D.h>
#include <string>
#include <vector>
#include <iostream>
#include <cstdint>
#include <unordered_map>
class HGCalParameters {
public:
typedef std::vector<std::unordered_map<int32_t,int32_t> > layer_map;
typedef std::unordered_map<int32_t, int32_t> wafer_map;
static constexpr double k_ScaleFromDDD = 0.1;
static constexpr double k_ScaleToDDD = 10.0;
static constexpr uint32_t k_CornerSize = 6;
struct hgtrap {
int lay;
float bl, tl, h, dz, alpha, cellSize;
};
struct hgtrform {
int zp, lay, sec, subsec;
CLHEP::Hep3Vector h3v;
CLHEP::HepRotation hr;
};
HGCalParameters(const std::string& nam);
~HGCalParameters( void );
void fillModule(const hgtrap& mytr, bool reco);
hgtrap getModule(unsigned int k, bool reco) const;
void fillTrForm(const hgtrform& mytr);
hgtrform getTrForm(unsigned int k) const;
void addTrForm(const CLHEP::Hep3Vector& h3v);
void scaleTrForm(double);
std::array<int,4> getID(unsigned int k) const;
std::string name_;
int nCells_;
int nSectors_;
std::vector<double> cellSize_;
std::vector<int> moduleLayS_;
std::vector<double> moduleBlS_;
std::vector<double> moduleTlS_;
std::vector<double> moduleHS_;
std::vector<double> moduleDzS_;
std::vector<double> moduleAlphaS_;
std::vector<double> moduleCellS_;
std::vector<int> moduleLayR_;
std::vector<double> moduleBlR_;
std::vector<double> moduleTlR_;
std::vector<double> moduleHR_;
std::vector<double> moduleDzR_;
std::vector<double> moduleAlphaR_;
std::vector<double> moduleCellR_;
std::vector<uint32_t> trformIndex_;
std::vector<double> trformTranX_;
std::vector<double> trformTranY_;
std::vector<double> trformTranZ_;
std::vector<double> trformRotXX_;
std::vector<double> trformRotYX_;
std::vector<double> trformRotZX_;
std::vector<double> trformRotXY_;
std::vector<double> trformRotYY_;
std::vector<double> trformRotZY_;
std::vector<double> trformRotXZ_;
std::vector<double> trformRotYZ_;
std::vector<double> trformRotZZ_;
std::vector<int> layer_;
std::vector<int> layerIndex_;
std::vector<int> layerGroup_;
std::vector<int> cellFactor_;
std::vector<int> depth_;
std::vector<int> depthIndex_;
std::vector<int> depthLayerF_;
std::vector<double> zLayerHex_;
std::vector<double> rMinLayHex_;
std::vector<double> rMaxLayHex_;
std::vector<int> waferCopy_;
std::vector<int> waferTypeL_;
std::vector<int> waferTypeT_;
std::vector<double> waferPosX_;
std::vector<double> waferPosY_;
std::vector<double> cellFineX_;
std::vector<double> cellFineY_;
wafer_map cellFineIndex_;
std::vector<bool> cellFineHalf_;
std::vector<double> cellCoarseX_;
std::vector<double> cellCoarseY_;
wafer_map cellCoarseIndex_;
std::vector<bool> cellCoarseHalf_;
std::vector<int> layerGroupM_;
std::vector<int> layerGroupO_;
std::vector<double> boundR_;
std::vector<double> rLimit_;
std::vector<int> cellFine_;
std::vector<int> cellCoarse_;
double waferR_;
std::vector<int> levelT_;
HGCalGeometryMode::GeometryMode mode_;
double slopeMin_;
layer_map copiesInLayers_;
int nCellsFine_;
int nCellsCoarse_;
double waferSize_;
double waferThick_;
double sensorSeparation_;
double mouseBite_;
int waferUVMax_;
std::vector<int> waferUVMaxLayer_;
bool defineFull_;
std::vector<double> cellThickness_;
std::vector<double> radius100to200_;
std::vector<double> radius200to300_;
int choiceType_;
int nCornerCut_;
double fracAreaMin_;
double zMinForRad_;
std::vector<double> radiusMixBoundary_;
std::vector<int> nPhiBinBH_;
std::vector<double> dPhiEtaBH_;
std::vector<int> firstModule_;
std::vector<int> lastModule_;
std::vector<double> slopeTop_;
std::vector<double> zFront_;
std::vector<double> rMaxFront_;
std::vector<double> zRanges_;
double etaMinBH_;
std::vector<int> iEtaMinBH_;
int firstLayer_;
wafer_map wafersInLayers_;
wafer_map typesInLayers_;
COND_SERIALIZABLE;
private:
const int kMaskZside = 0x1;
const int kMaskLayer = 0x7F;
const int kMaskSector = 0x3FF;
const int kMaskSubSec = 0x1;
const int kShiftZside = 19;
const int kShiftLayer = 12;
const int kShiftSector = 1;
const int kShiftSubSec = 0;
};
#endif
|
<gh_stars>0
package com.Revature.compare;
public class Student implements Comparable<Student>{
private int studentID;
private String label;
private double gpa;
public Student() {}
public Student(int studentID,String label,double gpa) {
this.studentID=studentID;
this.label=label;
this.gpa=gpa;
}
public int getStudentID() {
return studentID;
}
public void setStudentID(int studentID) {
this.studentID = studentID;
}
public String getLabel() {
return label;
}
public void setLabel(String label) {
this.label = label;
}
public double getGpa() {
return gpa;
}
public void setGpa(double gpa) {
this.gpa = gpa;
}
@Override
public String toString() {
return "ID: "+studentID;
}
public int compareTo(Student arg0) {
// TODO Auto-generated method stub
return this.getStudentID()-arg0.getStudentID();
}
}
|
Edinson Vólquez
Early life
Vólquez grew up in the Dominican Republic and started playing baseball when he was 9 or 10 with the support of his parents. "It was good for me because my mom and dad always took care of me... The only thing I did was go to school and play baseball."
Texas Rangers
Vólquez was signed as an amateur free agent by the Texas Rangers in the Dominican Republic in 2001. Together with John Danks and Thomas Diamond, Vólquez was one third of the "DVD" trio of Rangers pitching prospects.
After spending four years in the Rangers' minor league system, Vólquez made his Major League debut on August 30, 2005 in a start against the Chicago White Sox. He lost all three Major League games he started that season, as well as one of the three games in which he appeared as a reliever, and posted a 14.21 ERA. He spent the first five months of the 2006 season with the Oklahoma RedHawks of the Class AAA Pacific Coast League until he was recalled to the majors in September. This time, he fared better, winning one of his eight starts and posting a 7.29 ERA.
The Rangers were dissatisfied with the results shown by one of their top pitching prospects, so in 2007 they tried an unconventional tactic. Vólquez was demoted to the Bakersfield Blaze of the Class A-Advanced California League, to work on his control. As Vólquez progressed, he was slowly promoted up through the minor league system until he reached the big leagues in September. This tactic had been used by Mark Connor, the Rangers' pitching coach, once before. Vólquez showed much improvement in his big league performance that season, posting a 2-1 record and 4.50 ERA in six starts. Vólquez later said about the time in the minors, "At the time, I didn't understand, because if I play in the Big Leagues, why do I have to go all the way back to Single-A?... It made me better. It made me a better person."
Cincinnati Reds
On December 21, 2007, the Rangers traded Vólquez to the Cincinnati Reds, along with Daniel Ray Herrera, in a deal for Josh Hamilton. Vólquez made his Reds' debut on April 6, 2008 in a game against the Philadelphia Phillies in Cincinnati. In 5⅓ innings of work, he allowed only five hits, one earned run and two walks while striking out eight batters in an 8–2 victory.
Vólquez started 2008 with a 7–1 record and a 1.33 ERA in nine starts, and allowed no more than one earned run in all but one of these starts (in which he allowed two). He became the only Reds pitcher to accomplish this since 1912. On May 18, 2008, Vólquez participated in a pitching matchup with the Cleveland Indians' Cliff Lee, who at that point led the American League with an ERA of 0.67. It was the third time in MLB history that the ERA leaders of each league had faced each other. Vólquez won the contest by a score of 6–4, improving to 7–1. Lee's loss, his first of the season, left him with a 6–1 record.
Vólquez was selected to represent the National League in the 2008 MLB All-Star Game. By the All-Star Break, Vólquez had a 12–3 record with a 2.29 ERA and 126 strikeouts. Vólquez finished the season with a 17–6 record and an earned run average of 3.21 in 196 innings, 8th-best in the National League. His 206 strikeouts tied for second-most in the National League with Johan Santana and Dan Haren, behind the Cy Young winner Tim Lincecum, and his 9.46 K/9 rate was also second in the league behind Lincecum. Vólquez threw changeups 31.9% of the time in 2008, more than any other starter.
After the season, the Baseball Writers' Association of America put Vólquez on the ballot for National League Rookie of the Year Award voting, an award for which he was not eligible. He subsequently received three second place votes for the award, which went to Geovany Soto.
Vólquez did not follow up his 2008 All-Star campaign with the same success. In 2009 with Cincinnati, Vólquez posted a 4–2 record with a 4.35 ERA through June 1. He was placed on the 15-day DL with elbow pain on June 2, and then eventually moved to the 60-day DL in preparation for Tommy John surgery, which ended his season.
On April 20, 2010, he received a 50-game suspension for use of performance-enhancing drugs. Vólquez made his 2010 debut with the Reds on July 17, 2010 vs the Colorado Rockies with an 8-1 win. Vólquez held the Rockies to one earned run and three hits in six innings with 9 strikeouts and 2 base on balls. However, his next several starts were unimpressive, and for the second time in his career, he was demoted straight to single-A (the Dayton Dragons). He was recalled on September 7 and finished the season with a 4-3 record and 4.31 ERA over 62.2 innings. He started Game 1 of the 2010 National League Division Series against the Philadelphia Phillies and lost, allowing four earned over 1.2 innings with Roy Halladay throwing a no hitter.
Vólquez was the Reds' Opening Day starter in 2011 and finished the season 5-7 with a 5.71 ERA in 20 starts for Cincinnati. He also spent time in the minor leagues, going 4-2 with a 2.37 ERA for Triple-A Louisville.
San Diego Padres
On December 17, 2011, Vólquez, Yonder Alonso, Yasmani Grandal, and Brad Boxberger were traded by the Reds to the San Diego Padres for Mat Latos. Vólquez was the Padres' Opening Day starter for the 2012 season, losing 5–3 to the Los Angeles Dodgers. Vólquez, along with Clayton Richard, was a mainstay of the Padres 2012 rotation, making 32 starts and pitching 182 ²⁄₃ innings. His highlight game of the season came on July 19 when he pitched a one-hit shutout at home against the Houston Astros. Vólquez finished the season 11-11 with a 4.14 ERA. He collected 174 strikeouts, but issued a league-leading 105 walks.
Vólquez was again the Padres' Opening Day starter in 2013. On June 2, Vólquez hit his first career home run, a 3-run homer off Toronto Blue Jays pitcher Ramón Ortiz. Despite his home run, the Padres lost the game 7-4. The Padres designated Vólquez for assignment on August 24, a day after he gave up six runs while only recording two outs in a start against the Chicago Cubs. At the time, Vólquez led the NL with 95 earned runs. He was released three days later. In 27 starts for the Padres in 2013, Vólquez went 9-10 with a 6.01 ERA.
Los Angeles Dodgers
On August 30, 2013, Vólquez signed an agreement with the Los Angeles Dodgers on a Major League contract. Vólquez appeared for the Dodgers that night, pitching one scoreless inning in relief against his former team, the Padres. He joined the Dodgers rotation soon after and made 5 starts in September for them. He was 0-2 with a 4.18 ERA for the Dodgers in 2013.
Pittsburgh Pirates
After the 2013 season, Vólquez signed a one-year deal worth $5 million with the Pittsburgh Pirates. Volquez experienced a career rebirth with the Pirates, going 13-7 with a 3.04 ERA and 140 strikeouts in 32 games (31 starts), pitching 192 ²⁄₃ innings. On October 1, 2014. Vólquez started the 2014 National League Wild Card Game for the Pirates against the San Francisco Giants. Vólquez would not come through however, giving up 5 ERs, including a grand slam to Giants shortstop Brandon Crawford, in 5 innings pitched. The Pirates would go on to lose 8-0, eliminating them from the playoffs in the process. The Wild Card Game would prove to be Volquez's last game as a Pirate, as he became a free agent after the 2014 season.
Kansas City Royals
On December 29, 2014, the Kansas City Royals announced that they had signed Vólquez to a 2-year, $20 million contract. During a game against the Chicago White Sox on April 23, 2015, Vólquez was ejected for his role in the brawl. Two days later, he was suspended five games. He had the option to appeal the suspension but dropped it on April 27, 2015, which made the suspension apply effective immediately. After finishing the regular season 13-9 with a 3.55 ERA in a career-high 200 ¹⁄₃ innings, Vólquez started Game 1 of the 2015 World Series, giving up three runs in six innings and receiving a no-decision. The Royals would go on to win, 5-4, in 14 innings. Vólquez pitched the game unaware that his father had died (his wife found out before the game and instructed the coaching staff and the press not to let Vólquez know of that until he was done pitching for the night). Vólquez got the nod to start in Game 5 against Matt Harvey, where he gave up 2 earned runs on only 2 hits in 6 innings with a no-decision. The Royals again forced the game into extra innings before defeating the Mets to win the World Series.
On June 24, 2016, Vólquez experienced one of the worst starts of his career as he allowed 12 runs (11 earned) in the first inning. He only lasted one inning as the Royals lost to the Houston Astros 13-4. On November 4, Vólquez declined his option to remain with the Royals and became a free agent.
Miami Marlins
On November 28, 2016, Vólquez agreed to a two-year, $22 million contract with the Miami Marlins. The contract became official on December 1. On March 24, 2017, Vólquez was named the Marlins opening day starter for the 2017 season. He was credited with 7 losses and no wins in his first nine starts. He recorded his first win on May 29 in a 4–1 decision against the Philadelphia Phillies. On June 3, Volquez threw his first no-hitter against the Arizona Diamondbacks at Miami's Marlins Park in a 3–0 decision. Volquez threw 98 pitches, struck out 10 and walked two batters (who were both retired on double plays), facing the minimum 27 batters. It was the sixth no hitter in Marlins history and the first since Henderson Alvarez's in 2013. Before the game, he dedicated the game to his fallen Royals friend, Yordano Ventura, as well as fallen Marlins ace Jose Fernandez. The game took place on what would have been Ventura's 26th birthday. He was later named the NL Player of the Week for the first time in his career, pitching 15 innings allowing one run (an 0.60 ERA) and four hits with 14 strikeouts in two starts. On August 4, 2017, Volquez underwent Tommy John surgery, putting him down for the remainder of the season. Vólquez was released by the Marlins on December 13, 2017.
Second stint with the Rangers
On February 16, 2018, Vólquez signed a two-year minor league contract with the Texas Rangers, with the 2018 season spent recovering from Tommy John surgery.
On November 20, 2018, Vólquez was added to the Texas Rangers 40-man major league roster for the 2019 season. On April 5, 2019, Vólquez was placed on the 10-day injured list with a sprained right elbow. On July 27, Vólquez announced he was retiring after the 2019 season.
International career
Vólquez pitched in the 2009 World Baseball Classic. He took the loss in the opening game for the Dominican Republic against the Netherlands, giving up three runs (unearned), two hits, two walks and three strikeouts in three innings pitched.
Vólquez again pitched for the eventual champion Dominican Republic in the 2013 World Baseball Classic, starting the first game in each of the three tournament rounds. He allowed 5 runs in 10 ¹⁄₃ innings and picked up the win in the semifinal game against the Netherlands.
Scouting report
Vólquez throws 4 pitches: a low to mid-90s fastball, a two-seam fastball that clocks also in the low to mid-90s, a mid-80s changeup and a high-70s curveball. Throughout his career, Vólquez has struggled with command of his pitches.
Personal life
Vólquez still makes a home in the Dominican Republic, where he spends four months during the off-season. Vólquez's father died on October 27, 2015, the same day he started Game 1 of the World Series. His wife requested that he not be informed mid-game of his father's death, so he had no knowledge of his death during his start. After pitching six innings, Vólquez exited the game and learned about the death in the clubhouse surrounded by his family.
Vólquez's brother, Brandy, was stabbed to death in the Dominican Republic on January 17, 2017.
Name issues
When he was signed by the Rangers in 2001 at age 17, he went by the name Julio Reyes, but his name was revealed to be Edison Vólquez after an immigration crackdown in 2003. In 2007, he asked the Rangers to add an "n" to his name after checking his birth certificate to find he was born Edinson. |
Nov. 13 (UPI) — A North Korean soldier was shot by Pyongyang forces on Monday while trying to escape through the de-militarized zone between North and South Korea, officials said.
The soldier left his post in the Joint Security Area and ran through the heavily fortified, 2.5-mile neutral zone, a statement from the South Korean military said.
The escaping soldier was found, bleeding, near Freedom House, the reception building on the southern side.
The soldier was hit in the shoulder and arm and was flown to a hospital in a United Nations helicopter after he was found. The United Nations operates the southern side of the DMZ.
The two sides did not exchange gunfire, although the alert level along the DMZ was raised and South Korean troops entered into full readiness mode, Stars and Stripes reported.
Though about 30,000 North Koreans escape to the south each year, it is rare for Pyongyang troops to leave their posts in the Joint Security Area. The last incident occurred in 2007, the South Korea Unification Ministry, which oversees defectors, said.
In 1984, a gun battle erupted as a tourist from the Soviet Union attempted to sun across the demarcation line between the two countries.
The shooting occurred on the same day that South Korean President Moon Jae-in arrived in the Philippines for the ASEAN summit of Pacific Rim countries. |
A local state chair in Texas has infuriated his party with his bizarre tirade against the Stonewall Democrats, a group of Gay activists he compares to “termites.”
Via Wonk Room:
In an interview with the Current today, [Bexar County Democratic Chair Dan] Ramos blamed homosexuals in the party for both undermining his authority and for the poor election results in Bexar County in 2010. “They are all connected to the gay Democratic Party, the so-called Stonewall Democrats. Just like termites they managed to get some of their people in key positions,” he said. [...] Ramos said he opposes homosexuality on religious grounds and doesn’t believe gay-friendly Democrats like Stonewall reflect the values of Bexar County voters. “I liken them to the Tea Party — the Tea Party and the fucking Nazi Party — because they’re 90 percent white, blue-eyed, and Anglo, and I don’t give a fuck who knows that. Just like the blacks … they’re American, but you can’t get your way just because you’re black.”
Ramos, who is continuing to refuse to apologize for his comments, is now being asked by numerous leaders in the state’s Democratic Party to step down from his role as chair.
Via Burnt Orange Report:
County chairs and organizations across the state, including Harris County, have also denounced Ramos’ statements and actions. The Northeast Bexar County Democrats, a local Democratic organization, has created an online petition people can sign calling on his resignation.
The Dallas Voice reported that State Chairman Boyd Richie felt that Ramos possibly was in need of mental health services. “If this had only happened one time and he had made a sincere apology, then I might feel differently. But after having had the opportunity to do that, he’s only exacerbated the situation and made it worse. In my humble opinion, Mr. Ramos is in desperate need of mental health services,” said Richie.
Photo by Makaristos (Own work) [Public domain], via Wikimedia Commons |
A grassroots approach for greener education: An example of a medical student-driven planetary health curriculum Given the widespread impacts of climate change and environmental degradation on human health, medical schools have been under increasing pressure to provide comprehensive planetary health education to their students. However, the logistics of integrating such a wide-ranging and multi-faceted topic into existing medical curricula can be daunting. In this article, we present the Warren Alpert Medical School of Brown University as an example of a student-driven, bottom-up approach to the development of a planetary health education program. In 2020, student advocacy led to the creation of a Planetary Health Task Force composed of medical students, faculty, and administrators as well as Brown Environmental Sciences faculty. Since that time, the task force has orchestrated a wide range of planetary health initiatives, including interventions targeted to the entire student body as well as opportunities catering to a subset of highly interested students who wish to engage more deeply with planetary health. The success of the task force stems from several factors, including the framing of planetary health learning objectives as concordant with the established educational priorities of the Medical School's competency-based curriculum known as the Nine Abilities, respecting limitations on curricular space, and making planetary health education relevant to local environmental and hospital issues. Introduction With the rising urgency of climate change, medical schools cannot ignore the impact of environmental problems on students and patients. In recent years, increased attention has been focused on how to prepare future doctors to address the health impacts of climate change, biodiversity loss, and environmental degradation. However, the medical education community has not settled on a unified approach to planetary health (PH) education. Implementing a PH curriculum is a. /fpubh.. particularly daunting challenge given the novelty of the field for many doctors and the overwhelming number of topics encompassed within. The magnitude of this challenge is significant, as revealed by surveys completed by the International Federation of Medical Student Associations in 2019-20 that found that only 14.7% of medical schools globally included climate change and health within the curriculum and only 11% incorporated education about the health impacts of air pollution. Multiple groups have proposed overarching principles to guide the creation of PH curricula from a top-down perspective. For instance, in 2019 the Planetary Health Alliance convened a task force to develop a framework for PH education intended "to move beyond a prescriptive list of competencies". The task force proposed five foundational domains for PH education (equity and social justice, interconnection within nature, movement building and systems change, systems thinking and complexity, and the Anthropocene and health) that they conceptualized as being embedded within learning priorities guided by local and global conditions. Alternatively, Maxwell and Blashki proposed using a triad of outcomes to guide curriculum development: climate change preparedness (involving clinical management of climate-related illness and knowledge of how to provide healthcare sustainably), depth of education (using climate change as an illustrative example to deepen the existing knowledge and skills of medical graduates), and breadth of education (public and eco-health literacy). Separately, an international workshop used a collaborative approach to identify five domains meant to provide an overarching framework for the development of specific learning objectives. The domains included eco-medical literacy and clinical preparedness, proficiency in promoting eco-health literacy both among patients and at the community level, education in the delivery of sustainable systems, and incorporating sustainability as an element of medical professionalism. In other cases, PH education has evolved spontaneously in response to student advocacy and concerns. Students at the Florida International University Herbert Wertheim College of Medicine built on the impacts of environmental degradation that they observed during community service to develop a series of slides about planetary health topics that were inserted into existing lectures. At Emory University, two students began by organizing a lunch panel discussion on climate and health and, after the lunch panel garnered a surprising amount of interest, then harnessed the enthusiasm of the student body to develop a proposal for incorporating planetary health topics into the medical school curriculum. Their proposal was accepted by Emory's Executive Curriculum Committee with plans to implement the changes for the class of 2024. The creation of the Planetary Health Report Card by students at University of California, San Francisco School of Medicine and the subsequent Planetary Health Report Card Conference held online in October 2021 provided an important venue for medical students at different schools to learn from each other's experiences and gain advocacy skills. Here we present the Warren Alpert Medical School of Brown University as an example of a student-driven, bottomup approach that has led to the development of a longitudinal PH education program integrated into existing pedagogical priorities. The earliest efforts to expand PH education at the Medical School were disparate initiatives organized and led by students. In 2020, the efforts gained significant momentum when the administration sanctioned the creation of a task force dedicated to improving PH education. Composed of medical students, faculty, and administrators, the Planetary Health Task Force (PH-TF) has focused on both educating the entire student body about PH as well as creating opportunities for highly interested students to engage more deeply with these issues. In this article, we describe the student PH advocacy that led up to the creation of the task force, the curricular changes implemented to date, and the task force's ongoing work to improve PH education ( Figure 1). Importantly, we focus on the role of the PH-TF in establishing student-identified PH educational priorities within the Medical School's existing pedagogical framework, known as the Nine Abilities. To our knowledge, the PH-TF has led to one of the broadest and most successful efforts to date to integrate PH education across all 4 years of medical school in a variety of forms. Although we recognize that each medical school will need to individualize their own PH curriculum, we hope our experiences can be a template for how to incorporate PH into medical education. Frameworks The Medical School's PH curriculum was developed within the existing framework of the Medical School's pedagogical priorities. Since 1996 and most recently revised in 2021, the Warren Alpert Medical School has followed a competencybased curriculum which seeks to define the qualities, abilities, and knowledge that all students should have upon graduating, known as the Nine Abilities. These abilities are: effective communication, basic clinical skills, using basic science in the practice of medicine, diagnosis, prevention, and treatment, lifelong learning, professionalism, health equity and racial justice, moral reasoning and clinical ethics, and clinical decision making. The competencies set prior to the 2021 revision have been described previously in. The PH-TF served as the key vehicle for enacting PH curricular change within the guidelines set by the Nine Abilities. The priorities for PH curricular change were selected in part based on the pre-existing student-driven initiatives that led to the creation of the PH-TF as well as the expertise of PH-TF faculty members in specific content areas. To ensure sufficient scope, the PH-TF consulted previously published frameworks. /fpubh.. for PH education including the PH learning objectives from the "Climate and Health Key Competencies for Health Professions Students" from the Global Consortium on Climate and Health Education (GCCHE). This approach has enabled the PH-TF's efforts to emphasize the local priorities and interests of our community while avoiding unintentional omissions of crucial PH topics identified from national or international viewpoints. Origins of the planetary health task force: Student-driven initiatives and the planetary health report card Prior to the formation of the PH-TF, medical students led a variety of sporadic sustainability initiatives. On the medical school campus, student advocacy in 2019 resulted in the addition of composting and single-stream recycling. The Student Senate began requiring student group leaders to view a presentation on sustainable event-hosting and commit to following the guidelines. Given the intrinsic connection of PH to community impacts, interested students also frequently engaged with local environmental justice organizations in Rhode Island (RI). These included the Brown Agriculture Nutrition and Community Health program, a collaboration between the Brown Department of Family Medicine and a local elementary school that seeks to address disparities in access to green spaces, nutrition, and health education, as well as political advocacy with grassroots organizations such as Renew RI and Sunrise PVD, tours of the local landfill, and volunteer trips to harvest leftover crops for food banks. In 2018, a student-led needs assessment drew attention to gaps in the Medical School curriculum regarding PH. The survey, developed by a medical student after consultation with Medical School faculty and Rhode Island Department of Health experts, was sent to all 1st-year medical students (n = 144) and achieved a response rate of 50.7% (n = 73). This survey found that 95% of first-year medical students agreed that it is important for medical providers to know about the health impacts of climate change, but only 6.8% of students felt that the Medical School provided sufficient education on climate change and health. Furthermore, only 9.6% felt confident discussing health impacts of climate change with patients and only 6.8% felt they knew ways to mitigate the health impacts of climate change. While the needs assessment revealed important shortcomings in PH education, the key event that led to the consolidation of student efforts with the backing of the Medical School administration was the 2020 Planetary Health Report Card (PHRC). The PHRC is a student-driven, metricbased initiative run by the national organization Medical Students for a Sustainable Future. Its goal is to inspire PH and sustainable healthcare education engagement in medical schools across the globe. The PHRC evaluates the performance of medical schools with respect to PH using five metrics including curriculum, research, community outreach, support for student-led initiatives, and campus sustainability. In response to the B-grade that the Medical School received in the 2020 PHRC, the administration created the PH-TF. Structure of the planetary health task force The task force includes medical students, faculty, and administrators as well as Brown University Environmental Sciences faculty. The PH-TF is structured as two working groups focused on community engagement and curriculum development, which were the weakest areas according to. the Planetary Health Report Card. While the community engagement working group has had difficulty making progress due to COVID-related disruptions affecting local environmental organizations, the curriculum working group has achieved significant results over the past 2 years. The entire curriculum working group meets on an adhoc basis ∼5 times each year. Subcommittees in charge of implementing specific initiatives meet separately, with varying frequencies. Curriculum proposals generated by the curriculum working group that affect the entire student body are presented to the Medical Curriculum Committee (MCC) of the Medical School. If the MCC approves a proposal, proposal implementation is led by PH-TF member(s) with the greatest level of interest and expertise in the specific proposal. Depending on the scope of the proposal, this has at times required the creation of a sub-committee, such as the Curriculum Integration Committee described below in Section Integrating planetary health into preclerkship material. If the proposal is an initiative targeted to a subset of students interested in PH rather than the entire study body, such as an elective course, it does not necessarily need to be presented to the MCC. When proposals require the involvement of faculty outside the PH-TF, the PH-TF member(s) leading the initiative contact and coordinate with relevant faculty and course directors. To date, the PH-TF has found the faculty who lead the Medical School's pre-clerkship and clerkship courses to be very supportive of the PH-TF's goals and proposals. Development of the planetary health core competencies through national and institutional frameworks Tasked with incorporating a longitudinal PH education into the medical school curriculum, the PH-TF first identified a set of core PH-related skills and knowledge that all students should possess upon graduation. These were adopted from the "Climate and Health Key Competencies for Health Professions Students" from the Global Consortium on Climate and Health Education (GCCHE) to fit the Medical School curriculum. One of the primary strategies for tailoring the GCCHE competencies to the curriculum was to situate the PH competencies within the pre-existing framework of the Medical School's Nine Abilities. To demonstrate why PH education should be incorporated into the medical school curriculum, the PH-TF emphasized how the objectives of the PH curriculum furthered seven of The Nine Abilities (Table 1). Although different medical schools have different educational priorities, framing the goals of a PH education within preexisting curricular objectives emphasizes the relevance and importance of PH knowledge for future clinicians. It also highlights how PH can be efficiently woven into existing curricula rather than requiring the addition of a new and distinct subject area. Learning environment, objectives, and format In line with the PH curriculum's core competencies, the interventions undertaken by the PH-TF span a diverse range of learning environments and formats from traditional core classroom education to an elective course, student-directed research, and extracurricular community engagement opportunities. These diverse initiatives can be categorized into two groups: first, the interventions targeted to include the entire student body and second, the programs designed for the subset of students who desire deeper engagement with planetary health. The format and learning objectives of the specific initiatives are detailed here. Student body-wide interventions undertaken by the planetary health task force Integrating planetary health into pre-clerkship material Prior to the formation of the PH-TF, isolated lectures on PH topics existed in the curriculum. The first-semester Health Systems Sciences (HSS) course included lectures on environmental justice, lead poisoning, and occupational health, but PH themes disappeared from the curriculum after the conclusion of the HSS course. During early discussions with the PH-TF, the main concern from the Office of Medical Education about improving PH education was the limited time available to cover new topics. To solve this problem, the curriculum working group proposed to integrate PH longitudinally within the existing curriculum. Similar concerns about limited curricular space are widely shared across health professions schools and several other medical institutions have adopted a similar integrative approach to PH education designed to minimally disrupt existing curricula. Student members of the PH-TF first reviewed the course objectives for every pre-clerkship course and identified topics that were amenable to being viewed through a PH lens. For each of these topics, students proposed specific PH-related learning objectives that could be addressed within existing lectures. Each proposed learning objective was connected to the relevant PH core competency. A selection of the learning objectives are shown in Table 2. The planetary health core competencies adopted from the "Climate and Health Key Competencies for Health Professions Students" from the Global Consortium on Climate and Health Education were able to be aligned with seven of the "Nine Abilities" that guide the Medical School curriculum. After the administration accepted this proposal in May 2021, the PH Curriculum Integration Committee (PHCIC) consisting of students, curriculum deans, and faculty experts in PH was formed within the curriculum working group to implement the proposal. The PHCIC's approach to implementation is guided by three principles: efficacy, sustainability, and minimizing disruptions to the existing curriculum. The PHCIC is currently refining the PH learning objectives and planning to incorporate them into courses using an "integration toolbox" which will provide a set format for integration of PH material. Importantly, the PHCIC is focusing on integrating material into case-based small group discussions, as teaching PH through active learning methods has been found to be critical for other institutions' success. The PHCIC meets individually with course leaders to introduce the project and solicit feedback, thereby engaging the faculty as stakeholders in this initiative with the goal of promoting sustainability of the initiatives. The PHCIC will also oversee designing an evaluation program to assess impact and enable refinement of the curriculum changes. While this is a multi-year process, the PHCIC plans target a pilot set of the organ systems-based courses for this year's incoming medical students. Education in environmental exposure screening and counseling Several of the PH core competencies detailed in Table 1 require teaching students to directly address PH issues with affected patients. The PH-TF chose to situate this component of PH education within the school's Doctoring course, a 2-year clinical skills course on interviewing and physical examination skills taught during the pre-clerkship years. To this end, in fall 2021 additional environmental exposure screening questions and context were incorporated into the social history component of the patient interview checklist taught to 1st year medical students. These questions are shown in Figure 2. In future years, the PH-TF plans to develop a dedicated class on environmental exposure screening and counseling that will be taught to all medical students during Doctoring. The session will include case simulations so students can practice environmental exposure counseling. To provide longitudinal reinforcement of these skills, the PH-TF is also currently in discussions with the family medicine clerkship director about integrating PH screening and counseling within the existing family medicine clerkship curriculum for all 3rd-year medical students. Mandatory waste training for clinical medical students Prompted by a student-led waste audit at a local hospital, PH-TF students created a mandatory waste training for students entering clerkships. The training is designed to further Ability 8 on moral reasoning and clinical ethics by addressing healthcare impacts on the environment (Table 1), given that the healthcare system produces 10% of US greenhouse gasses and generates four billion pounds of waste each year. This training consists of an hour-long session for all 3rd-year medical students during pre-clerkship clinical skills training and has now been taught to two classes of students in 2021 and 2022. It teaches students about proper healthcare waste disposal and decreasing red bag waste, including hospital waste regulations, practice scenarios, and instruction in counseling patients on medical waste disposal at home. Figure 3 shows two practice scenarios from the training. FIGURE Environmental exposure screening questions incorporated into the social history section of the patient interview checklist taught to first year medical students. Additional context was provided in the Student Companion to the Medical Interview, a medical interview guide available to all students. FIGURE Examples of practice scenarios from the waste disposal training delivered to all medical students entering clerkships. Planetary health elective courses Although much of the PH curriculum at the Medical School has been designed to reach all medical students, the PH-TF has also created multiple opportunities for students with strong interests in PH to develop additional knowledge and skills. One such opportunity comes via PH elective courses. At the Warren Alpert Medical School of Brown University, pre-clerkship electives are offered to 1st-and 2ndyear students. Pre-clerkship electives may be organized by. /fpubh.. students and either taught by faculty or led by students alongside a faculty advisor. The Medical School also offers electives for clinical students in their third and 4th years, which are typically organized and led by faculty. The PH-TF created a pre-clerkship PH elective that was taught in 2021 and a clinical elective is being developed for the 2022-23 academic year. Prior to the formation of the PH-TF, a pre-clerkship elective on "Climate Change and Health" was offered to 1st-and 2ndyear medical students in fall 2019. The course exposed students to heat-related morbidity and mortality, changing infectious disease patterns, and the impacts of extreme weather events on human health through lectures, a final project, and community service. Although the COVID-19 pandemic disrupted this elective, the PH-TF revived the elective in fall 2021. Titled Planetary Health: Global Environmental Change and Emerging Infectious Disease, this version of the course took a more focused approach to one facet of PH. Over eight sessions, clinicians, ecologists, and public health experts introduced students to the dynamics of infectious disease emergence resulting from climate change, land-use change, and increased human interaction with wildlife. For a final project, students wrote op-eds about the effects of climate change on human health and were offered guidance to publish their work. The elective garnered significant interest in fall 2021 and has been renewed for fall 2022. PH-TF members are currently crafting a clinical PH elective for 3rd-and 4th-year medical students. This elective's goal is to build on the foundation of pre-clerkship PH knowledge to develop student leaders in PH. This elective will follow an asynchronous curriculum that explores PH education, policy, and clinical impacts. The asynchronous nature of the elective will allow students independence to pursue a specific topic of interest related to education, advocacy, or research within the field of PH under the mentorship of faculty. Facilitating student research in planetary health Another goal of the PH-TF is to encourage student PHrelated research in order to fulfill the PH competencies within Abilities 3 and 7 (see Table 1). Prior to the creation of the PH-TF, small groups of driven medical students had already found and created opportunities to engage in PH research. These efforts have resulted in several publications including waste audits in local hospital emergency rooms and a retrospective study on the impact of summer temperatures on Emergency Medical Services (EMS) utilization. Ongoing student PH research projects include surveys to quantify the carbon footprint of residency interview travel and Rhode Island EMS and assess hospital food waste as well as a retrospective study on the impact of temperatures at discharge on surgical patient readmissions. However, a significant challenge for students interested in PH research is the lack of a centralized program devoted to PH research at the Medical School and its affiliated healthcare systems. Medical students currently rely on word-of-mouth to find research mentors with expertise in PH. Compared to other research areas such as sepsis or aging, the Medical School and its affiliated hospital systems have fewer principal investigators engaged in PH research. Recently, the decision of the Rhode Island Medical Journal to dedicate an issue to climate change and health helped draw the attention of local researchers and physicians to these issues. While hiring new faculty or creating a centralized research initiative dedicated to PH is beyond the scope of the PH-TF, the Medical School can still make progress by directly assisting students interested in PH research as well as drawing the attention of the student body to the potential for scholarship in this area. To this end, the Medical School administration recently announced a new opportunity for a rising 3rd-or 4th-year medical student to spend a fully funded gap year focused on PH research. The selected student will have the opportunity to sit on the PH-TF and contribute to the task force's initiatives. In addition, the PH-TF plans to create a new Scholarly Concentration (SC) in Planetary Health. The SC program is a longitudinal commitment to a rigorous independent scholarly project across all 4 years of medical school. Projects are undertaken under the mentorship of a Brown faculty member and further educational and mentorship opportunities are provided by the program directors of each SC. Students choose to participate in the SC program on an elective basis and undergo a competitive application process, with ∼25% of the student body selected to participate. As of 2022, there are 12 Scholarly Concentrations at the Medical School, ranging from traditional biomedical research in the Translational Research in Medicine SC to more socially oriented domains such as the Caring for Underserved Communities SC or the Medical Humanities and Ethics SC. A SC in Planetary Health would serve as a vehicle to consolidate available research opportunities and connect students to relevant faculty. The PH-TF plans to design the SC to help medical students take advantage of resources available in the broader Brown University community by including researchers at Brown's School of Public Health and the Institute at Brown for the Environment and Society. By providing dedicated training in research methods relevant to PH, the SC would enable medical school graduates to advance scholarship in this field throughout their careers. Engaging with and learning from the community Community engagement is both an important part of medical education and an intrinsic part of the PH movement. While the COVID-19 pandemic negatively. /fpubh.. impacted many local environmental organizations, the PH-TF's community engagement working group plans to deepen existing partnerships between medical students and the Community Engagement Core of the Brown Superfund Research Program, an initiative that focuses on academic-government-community partnerships to address PH and remediation issues in RI. Results to date The creation of the PH-TF resulted in a more comprehensive, cohesive, and longitudinal PH curriculum. To date a variety of initiatives have been successfully enacted including: various efforts to integrate PH longitudinally within the existing pre-clinical medical curriculum including additions to the medical interview checklist in the Doctoring course and, through the establishment of the PHCIC and resulting interest from course leaders, the addition of a dedicated lecture on air pollution in the 2nd-year Pulmonary course; mandatory waste training for clerkship students to address healthcare impacts on the environment and discuss moral reasoning and clinical ethics; an elective to introduce pre-clerkship students to infectious disease emergence in relation to global environmental change; and the opportunity for a clerkship student to take a fully funded gap year involving PH research and PH-TF initiatives. The work of the PH-TF is ongoing and the following changes are currently in the process of being implemented: finalization of an integration "toolbox" by the PHCIC with plans to pilot changes for current first-year medical students when they start the organ systems-based courses in spring 2023; creation of a Doctoring session on environmental exposure screening; and a clerkship elective focused on PH policy and clinical impacts. In future years, the PH-TF plans to tackle additional projects including the creation of a PH scholarly concentration for research endeavors and expanding the work of the PH community engagement working group. While no formal assessments of the impact of these initiatives have been completed to date, the PHCIC plans to repeat the PH education needs assessment and use structured surveys to assess the effect of the curriculum changes that will be implemented in the 2022-23 curriculum. Discussion of lessons learned and limitations The approach to PH education at the Warren Alpert Medical School of Brown University has evolved over the past 5 years from a set of sporadic student initiatives into a cohesive structured task force capable of sustainably enacting significant changes over a multi-year time frame. Framing PH learning objectives within the school's established educational priorities, the Nine Abilities, was central to our success because it demonstrated that PH was integral to the mission of the Medical School. Other important factors that led to the success of this initiative include respecting limitations on curricular space by addressing PH topics at their intersection with existing material, creating connections to local environmental and hospital issues, and providing a range of opportunities for both the entire student body as well as a subset of highly interested students. The willingness of the Medical School administration to listen and respond to student concerns has been essential throughout this process. While we appreciate that each medical school will need to tailor their approaches, we believe that some of the strategies that worked in our context will likely be generalizable to other institutions. Including students, faculty, and administrative members on the PH-TF has substantially hastened the speed with which realistic proposals can be generated and implemented because it enables all parties to communicate directly with each other from the start of the process. The strategy of inserting PH topics within the existing curriculum has succeeded for us and students at several other medical schools because it does not require significant schedule changes. It also does not overwhelm students with additional lectures nor does it require faculty members to be content experts in PH. The Planetary Health Report Card also served as a helpful starting point for student advocacy efforts that effectively caught the attention of our administration. Finally, ensuring that elective opportunities are available for the subset of students most passionate about PH has helped build relationships between PH-interested students across class years and has effectively created a pipeline for recruiting new student members to the PH-TF, reducing the difficulties associated with student body turnover. While the curriculum working group of the PH-TF has significantly improved the quality and scope of PH education, the task force structure has its limitations. While the involvement of faculty and administration on the PH-TF has helped provide the continuity required to enact multi-year initiatives, it has still been difficult to build institutional memory about prior PH initiatives given the constant turnover of medical students and the changing schedules of students between pre-clerkship, clerkship, and post-clerkship years. The community engagement working group of the PH-TF has had difficulty making progress because the COVID-19 pandemic disrupted the activities of local environmental organizations for so many months that previous ties between medical students and those organizations were effectively severed when those medical students graduated and moved to residencies without the. /fpubh.. chance to pass down those connections to the subsequent medical student classes. In addition, the PH-TF's capacity to enact change has been somewhat limited by the sparsity of medical faculty members with expertise in planetary health education. The lack of a centralized program for planetary health research at the Medical School and its teaching hospitals has also made it difficult to find and recruit new faculty members. While significant research and scholarship related to planetary health occur at the Brown School of Public Health and in the environmental sciences department, the PH-TF has had difficulty coordinating with other parts of the university, in part due to the irregular and sometimes unpredictable hours required during the clinical years of medical school. Regarding the scope of the PH core competencies, although many of the GCCHE competencies were amenable to being adapted within the framework of the Nine Abilities, some could not be included. For instance, the GCCHE competency "explain the role of subnational, national and global policy frameworks and governance structures to address health risks associated with climate change" were not included because it did not fit easily within the primarily clinical focus of the Nine Abilities. However, while the PH-TF might have difficulty arguing that such topics need to be taught to the entire study body, the elective opportunities created for students with the greatest interest in PH provide flexibility for individual students to pursue such topics if interested. Finally, while the PH-TF plans to repeat the needs assessment and undertake surveys to assess the impact of PH education initiatives, our conclusions about the effect of the new PH education initiatives on the student body will remain speculative until these structured assessments are completed. Despite these limitations, we hope our experiences can serve as a useful example for other medical schools interested in implementing their own PH education programs. Data availability statement The original contributions presented in the study are included in the article/supplementary material, further inquiries can be directed to the corresponding author. |
SANTA CLARA — Nate Montana was on the 49ers’ practice field Wednesday trying to make a name for himself.
Well, a first name, anyway. That last name is pretty well established.
“It’s like a blessing and a curse. You try to ignore, “Oh, that’s Joe Montana’s son,'” the quarterback said after the 49ers’ workout for local pro prospects. “You just try to work and show them that you’re a different player than your dad.”
Nate Montana, 23, does not expect to be selected during the NFL Draft next week, figuring he’s more likely to catch on as an undrafted free agent. The 6-foot-4, 215-pounder is also open to possibility of playing in the Arena Football League or in Canada. He just wants a shot.
Nate’s younger brother, Nick, is a well regarded quarterback prospect who signed with Tulane and will be a junior in the fall after transferring from Mt. San Antonio College.
Nate Montana spent his senior season at West Virginia Wesleyan, the last of his four college stops. Because he attended De La Salle High in Concord, he was eligible to attend the 49ers’ local pro day along with about 50 other draft hopefuls.
This was his first workout for an NFL team. It was also the first-ever visit to the Santa Clara facility for the son of the Hall of Fame quarterback.
“I love the Bay Area. It’s a great, great place. I lived here and grew up here my whole life. In that aspect, it’s awesome to be local,” he said. “Coming back to where my dad played, you just have to put your head down and try to make your own name.”
The Bay Area represents the closest thing to roots for a player coming off a nomadic collegiate career. Nate Montana was a backup at Pasadena City College, Notre Dame and the University of Montana before starting nine games during his senior season at West Virginia Wesleyan.
Nate finally got a chance to play at that Division II school, where he led the conference in passing yards (2,480) and touchdown passes (19). He was an honorable mention all-conference selection.
Jonas Jackson, the Bobcats head coach last season, said in a phone interview that Nate Montana was not the type to ride on his father’s coattails.
“One of the first things I want to say is: That kid was an extremely hard worker. One day, he’s going to be a great coach,” Jackson said. “He watched a ton of film and asked a ton of questions. What he’s doing, it’s not because of his dad. He had his own love of the game.”
Montana attempted at least 50 passes in seven of his games, completing 51.6 percent. His biggest game was against West Virginia State, when he threw for 432 yards and four touchdowns.
“He makes all the right reads. He’s so fast at reading the field that he’s on it before it happens,” Jackson said. “A guy that works that hard only needs an opportunity.”
In evaluating his performance, Montana said: “I’m just trying to come out here and compete with the other guys and show a team I can play.”
Nate Montana understands that his NFL prospects are bleak. He was not among the 16 quarterbacks invited to the Scouting Combine and his performance at the NFL Regional Combine last week at Cowboys Stadium drew mixed reviews.
Nate said his dad has been most helpful in trying to prepare him for challenging road ahead.
“He’s been really supportive. It’s been great having him. He’s been through the process.” Nate said. “He opened my eyes to how cutthroat the business is. I know a little of what to expect.”
Follow Daniel Brown on Twitter at twitter.com/mercbrownie. |
Seismic radial anisotropy in Central-Western Mediterranean and Italian peninsula from ambient noise recordings <p><span>The dynamics of crustal extension and the crust-mantle interaction i</span>n the Central-Western Mediterranean and Italian peninsula (i.e. Liguro-Provençal and Tyrrhenian Basin), and plate convergence (i.e. Alpine and Apennines chains) are key for the understating of the current geodynamics setting and its evolution<span> in the region</span>. However, open questions <span>such as the style, depth and extent of the deformation </span>still exist despite the wealth of seismological and non-seismological data acquired in the past decades. In this context, it is necessary to provide improved subsurface models in terms of seismic velocities, from which better constraints on the geodynamic models can be derived.</p><p>We use seismic ambient noise for retrieving phase velocities of Rayleigh and Love waves in the 4-35 s period range, using private (LiSard network<span> in Sardinia island</span>) and publicly available continuous recordings from more than 500 seismic stations. Considering the excellent coverage and the short period of recovered phase velocities, our study aims to provide an unprecedented, high-resolution image of the shallow crust and uppermost mantle.</p><p>We employ a Bayesian trans-<span>dimensional</span>, Monte Carlo Markov chain inversion approach that requires no a-priori model nor a fixed parametrization. In addition to the (isotropic) shear wave velocity structure, we also recover the values of radial anisotropy (ξ=(V<sub>SH</sub>/V<sub>SV</sub>)<sup>2</sup>) as a function of depth, thanks to the joint inversion of both Rayleigh and Love phase velocities.</p><p>Focusing on radial anisotropy, this appears clearly uncoupled with respect to the shear wave velocity structure. The largest negative anisotropy anomalies (V<sub>SH</sub><V<sub>SV</sub>, ξ<0.9) are found in the Liguro-Provençal and western Tyrrhenian basins in the top 10-15 km, suggesting a common structural imprint inherited during the extensional phases of such basins. Conversely, the eastern Tyrrhenian basin shows positive radial anisotropy (V<sub>SH</sub>>V<sub>SV</sub>, ξ>1.1) within the same depth range. This evidence, combined with the observed shear wave velocities typical of the uppermost mantle, corroborates the presence of a sub-horizontal asthenospheric flow driving the current extension and <span>oceanization </span>of the eastern Tyrrhenian basins.</p><p>Moving towards the Italian mainland, a strong anomaly of negative anisotropy appears in the eastern portion of the Apennines chain. We relate such an anisotropic signal with the ongoing compressive regime affecting the area. Here, the high-angle thrust faults and folds, that accommodates the horizontal shortening, obliterate the horizontal layering of the sedimentary deposits, currently constituting the flanks of the fold system.</p><p>Our results suggest that the combination of radial anisotropy and shear wave velocities can unravel key characteristics of the crust and uppermost mantle, such as inherited or currently active structures resulting from past or ongoing geodynamic processes.</p> |
<reponame>chandlercruz/csc413-tankgame-chandlercruz
package Spectankular.game.GameObjects.PowerUps;
import Spectankular.game.GameObjects.GameObject;
import Spectankular.game.TankClasses.Tank;
public abstract class PowerUp extends GameObject {
/**
* Function which gives tank intended effect
* based off the power up type
*/
public abstract void poweredUp(Tank tank);
@Override
public void update() {}
}
|
Ita Buttrose has announced she's leaving Studio 10, a show that's already suffered several desertions in the past year. And other media tidbits of the day.
While an independently run Sky News under Disney would at first seem to diminish any Murdoch influence, it pays to remember that the family is playing the long game.
New Zealand's Sky Network Television has seen a massive share price fall in the last few days as it scrambles to stake its claim in the streaming ring alongside former News Corp comrades Foxtel and Sky plc.
The Murdochs are not in a position to say no to a fresh offer on Sky.
Monopolies aren’t what they used to be, writes journalist and media-watcher Christopher Warren.
Mayne: are the Murdochs (including Rupert) really ‘fit and proper’?
The UK government will have to decide if the Murdochs are "fit and proper" to hold a UK broadcasting licence. James and Lachlan have made the case that they are -- but what about dear old dad?
Mayne: will the Brits show some spine and resist Murdoch rent-seeking?
The Murdochs are masterful rent-seekers when it comes to politically vulnerable Western governments. |
Share. Rorschach finally joins the Before Watchmen fun. Rorschach finally joins the Before Watchmen fun.
Poll a group of Watchmen fans on who their favorite character is, and chances are the majority will answer "Rorschach." There's something endlessly appealing about the dark, twisted vigilante who sets out to solve the world's ills with his fists. No doubt many writers would have jumped at the chance to pen the further adventures of Rorschach, but DC opted for the writer perhaps best suited for his bloody, grimy, nihilistic world -- Brian Azzarello. Unsurprisingly, Azzarello quickly and comfortably settles in with the character in Before Watchmen: Rorschach #1.
Too many of the Before Watchmen books are guilty of retreading material we've already seen in the original series. But not unlike his Comedian series, Azzarello instead opts to explore an unknown period in Rorschach's career. Issue #1 picks up in 1977 and introduces a serial killer known as The Bard with a penchant for carving messages into the corpses of his victims. A perfect candidate for Rorschach's brand of justice, in other words.
If you're hoping for Azzarello to provide a radically new take on the lead character or further mine his psychological depths, you may be a little disappointed in this issue. It isn't overly creative or ambitious. That said, it is well executed and very faithful to the tone of Rorschach's scenes in the original series. Azzarello captures Kovacs' harsh, disjointed voice well in both his dialogue and the obligatory "Rorschach's Journal" narration. The narration is slightly more verbose than what we saw in the original series, but perhaps that's for the best. 20 pages of sentence fragments could easily grow tiresome.
And again, while Azzarello doesn't reinvent the wheel, his script does show some thematic ambition as it explores Rorschach's troubled past, his relationship with his mother, and his insistence on living among the scum and filth of a city he professes to hate so much. We know by now why Rorschach does what he does, but that doesn't mean there isn't compelling material to be had in attacking the character from a slightly new angle and in a new time period. My only real complaint about the first issue's script is that it feels a little brief. Rorschach barely embarks on his manhunt before running into a wall and regrouping for issue #2. With the series being only four issues long, I was hoping for a slightly meatier debut.
The series also has the distinction of reuniting Azzarello with artist Lee Bermejo. Unlike books like Luthor and Joker, Bermejo sticks entirely to his more cinematic, textured style rather than switching between styles. The downside to the hyper-realistic approach is that the storytelling flow isn't as strong as in some of the other Before Watchmen books. While Bermejo seems to make some adjustments to his style in terms of page layouts, in general the book is more about big, static images. On the plus side, Bermejo expertly captures the grit and general sense of hopelessness in this world. The book is almost pretty in its ugliness. In particular, Bermejo's depiction of the shadowy Bard and his victims really helps sell the danger of the villain in a way the sparse writing alone wouldn't have.
The lettering deserves special mention as well. Rob Leigh resurrects the old-school typewriter approach for Rorschach's captions. The intentional spelling errors and other mistakes go that extra little bit towards convincing the reader they're peering into an undiscovered portion of Rorschach's personal history. It's a shame the same effect couldn't be achieved with the lettering in the Crimson Corsair backup feature (which is as unremarkable this month as it's been since the beginning).
Before Watchmen: Rorschach delivers exactly what you'd expect when this creative team pairs up for this particular character. It doesn't break new storytelling ground, but it offers an enjoyable read that doesn't rely overly much on material we've read before. If any Before Watchmen book could be read and enjoyed with no knowledge of the original Watchmen, it's probably this one.
Jesse is a writer for IGN Comics and IGN Movies. He can't wait until he's old enough to feel ways about stuff. Follow Jesse on Twitter, or find him on IGN. |
Utility and pitfalls of the electrocardiogram in the evaluation of cardiac amyloidosis Abstract Background Cardiac amyloidosis is a protein misfolding disorder involving deposition of amyloid fibril proteins in the heart. The associated fibrosis of the conduction tissue results in conduction abnormalities and arrhythmias. Classical electrocardiogram (ECG) findings in cardiac amyloidosis include that of low voltage complexes with increased left ventricular wall thickness on echocardiography. However, this classical finding is neither sensitive nor specific. As cardiac amyloidosis is associated with a generally poor prognosis, the need for early recognition of this disease is important given the availability of new treatment options. In this review, we highlight 3 cases of patients with cardiac amyloidosis. Although presenting with typical clinical signs and symptoms, ECG for all 3 patients was not consistent with the classical findings described. They underwent further diagnostic tests which clinched the diagnosis of cardiac amyloidosis, allowing patients to receive targeted treatment. Through the review of the literature, we will highlight the different ECG patterns in patients with different types of cardiac amyloidosis and clinical scenarios, as well as the pitfalls of using ECG to identify the condition. Lastly, we also emphasize the current paradigms in diagnosing cardiac amyloidosis through the noninvasive methods of echocardiography, cardiac magnetic resonance imaging, and nuclear technetiumpyrophosphate imaging. Conclusions Electrocardiogram is often the first investigation used in evaluating many cardiac disorders, including cardiac amyloidosis. However, classical features of cardiac amyloidosis on ECG are often not present. A keen understanding on the ECG features of cardiac amyloidosis and knowledge of the diagnostic workflow is important to diagnose this condition. | INTRODUC TI ON Cardiac amyloidosis (CA) is a disorder of protein misfolding, with the end result of amyloid fibril deposition in the heart resulting in systolic and diastolic dysfunction. The commonest cause of CA is AL (immunoglobulin light chain amyloid) type, in which amyloid fibrils are derived from monoclonal immunoglobulin light chains (). The other most common amyloidosis affecting the heart would be transthyretin (TTR)-related amyloidosis (ATTR). ATTR can result either from slow deposition of amyloid fibrils derived from wild-type (non-mutant) TTR in the elderly (senile systemic amyloidosis) or when mutations in the TTR gene encode variant protein, decreasing the stability of the TTR tetramer and promoting misfolding into amyloid fibrils. Amyloid deposition in the heart is associated with conduction tissue fibrosis, resulting in conduction abnormalities and arrhythmias (). CA is often under-diagnosed in the general population and is associated with a variable but generally poor prognosis (;). Timely diagnosis of CA is challenging because the clinical presentation is similar to other infiltrative cardiomyopathies, storage disorders, and hypertrophic cardiomyopathy. Classical teaching for CA electrocardiography (ECG) includes low voltage complexes (defined by total height of the QRS complex in the limb leads <5 mm and <10 mm in the precordial leads), a pseudoinfarction pattern (pathological Q waves or QS waves on two consecutive leads in the absence of previous ischemic heart disease, left bundle branch block, or LV wall motion abnormalities), conduction abnormalities, and arrhythmias (). However, this classical pattern may not always be diagnostic and the clinician often needs to consider other investigations, especially when diagnosing ATTR CA. We present a case series of three patients with ATTR CA, their presenting ECGs, and investigations that they underwent to diagnose their condition. | C A S E 1 A 50-year-old gentleman presented with an episode of heart failure. He had complaints of orthopnea and lower limb swelling. He had a family history of heart failure and autonomic neuropathy, present in his mother and maternal uncle. His ECG showed sinus rhythm and QRS size on the limb leads were <5 mm. Average QRS size on the precordial leads was <10 mm. This was in keeping with low voltage QRS complexes. PR interval was 168 ms, QRS duration was 76 ms, and the corrected QT interval (QTc) was 445 ms. There were Q waves in leads II and III. (Figure 1a) These ECG findings were in keeping with the classical findings in CA. Cardiovascular magnetic resonance (CMR) revealed diffuse myocardial thickening with diffuse interstitial fibrosis involving the entire left ventricle, which was suggestive of an infiltrative cardiomyopathy. His myeloma screen was negative for monoclonal light chains, and a bone marrow examination was unremarkable. Nuclear technetium-pyrophosphate (PYP) imaging showed a Grade III myocardial PYP-uptake. Heart-to-contralateral ratio at 1 hour was 1.89. These findings were diagnostic for ATTR CA. (Figure 1b) He was counseled for genetic testing and was found to have the c.277A>G (p.Ile93Val) pathogenic variant for ATTR. He was started on tafamidis and has been clinically well. | C A S E 2 A 68-year-old gentleman presented with symptoms of heart failure. He had complaints of exertional shortness of breath, orthopnea, and decreased exercise tolerance with lower limb edema. He has a past medical history of hypertension, which was well-controlled. This Heart-to-contralateral ratio was 2.3, which was diagnostic for ATTR CA. He was treated with doxycycline and ursodeoxycholic acid, and has been clinically well. | C A S E 3 A 59-year-old gentleman presented with symptoms of bilateral numbness of the extremities and dysautonomia symptoms of postural dizziness. He also had orthopnea and lower limb swelling. He was found to have ATTR and familial amyloid polyneuropathy (FAP) that was diagnosed on sural nerve biopsy. This was on a background of known family history of FAP (father and paternal aunts). His ECG showed sinus rhythm, borderline left LVH, and P pulmonale. Average QRS size on precordial leads was 9 mm, and the average QRS size on the limb leads was 20 mm. PR interval was 164 ms, QRS duration was 98 ms, and QTc was prolonged at 505 ms. (Figure 3a) CMR revealed LVH, mildly impaired left ventricular ejection fraction, and diffuse subendocardial enhancement of bilateral atria and ventricles, which was in keeping with the diagnosis of CA. (Figure 3b). He underwent genetic testing, which came back positive for pathogenic variant Arg54Thr. He is being treated with tafamidis, frusemide, and spironolactone. He remains well on therapy. | ECG pattern differences between AL and ATTR CA Low voltage complexes on ECG coupled with increased ventricular wall thickness on echocardiography is the classical teaching of CA. This was first described by Carroll et al. utilizing limb and precordial voltage indices. Nevertheless, low voltage complexes is not a universal finding in CA. There is much variation in the reported prevalence of low voltage complexes in the literature, ranging from 46% to 70%. () In another analysis of ECG findings in patients with CA, low limb voltage was found in approximately 35% of patients, while 60% had low Sokolow-Lyon voltage ≤15 mm (). Low voltage complexes appear to be more prevalent in AL CA group than in ATTR CA group (). Mussinelli et al. ) showed that low peripheral QRS amplitude (defined as QRS amplitude ≤5 mm in each peripheral lead) or low Sokolow-Lyon index (≤15 mm) may represent a useful electrocardiographic clue in the diagnosis of cardiac involvement in a population of patients with AL CA. The corresponding prevalence of AL CA using the low peripheral QRS amplitude criteria was 66.4% and 84.0% when using the low Sokolow-Lyon index criteria. However, when focusing on patients with ATTR CA in the literature, low voltages was only present in about 25% of the study population ;). A possible reason for this difference in findings is that ATTR CA behaves as a progressive cardiomyopathy () characterized by slow amyloid deposition within the cardiac chambers and conduction system. AL CA, however, resembles an acute myocarditis with early symptoms onset and rapid disease progression to end-stage heart failure, despite lesser degrees of infiltration, due to the toxic effects of AL chains (). In a study by Dungu et al. looking at a population of patients with ATTR CA, the investigators found that F I G U R E 1 (a) Electrocardiogram of patient showing sinus rhythm with low voltage complexes in the chest leads and pseudoinfarction pattern in leads II and III. (b) Technetium-pyrophosphate imaging of patient showing diffuse pattern of moderately intense radiotracer uptake seen in the left ventricular myocardium compared with the bony rib cage. Heartto-contralateral lung ratio is 1.89 at 1 h. Semiquantitative interpretation in relation to rib uptake reveals increased myocardial uptake as compared to the rib tracer uptake, score 3. The scintigraphic features are strongly suggestive of transthyretin amyloid cardiomyopathy and in the precordial leads in 30 patients (49.2%). However, the investigators did notice that voltage size at initial presentation and during follow-up was negatively correlated with the duration of symptoms. Thus, a reduction in voltage size is likely to reflect increasing accumulation of amyloid protein over time. This finding of low voltage findings in patients with advanced disease in ATTR CA was also noted in the study by Cyrille et al. and As such, low voltage complexes on ECG is a relatively late finding of ATTR CA and may not be useful for early identification. Apart from differences in low voltage complexes between AL and ATTR CA, arrhythmias like atrial fibrillation (AF) tend to be more prevalent in ATTR CA, compared with AL CA. Prevalence of AF in ATTR CA was found to be 4 to 6 times more than in AL CA in a study by Cappelli et al. Another study by Cyrille et al. also showed similar findings where more than 50% of the patients in the study with ATTR CA had AF as compared to just 6% of the patients with AL CA. In terms of conduction defects, patients with ATTR CA also present with a higher prevalence of conduction defects including AV block grade 1 or higher and intraventricular delay () This finding was also seen in the study by Cyrille et al. where more than 50% of the patients with ATTR CA had either primary or secondary AV block as compared to just 26% of patients with AL CA. In both studies (;), the proportion of ATTR CA patients requiring pacing was much higher than that of those with AL CA. Pseudoinfarction pattern on ECG is defined by pathological Q waves (1/4 of the R amplitude) or QS waves on 2 consecutive leads in absence of previous ischemic heart disease, left bundle branch block, or left ventricular wall motion abnormalities is a common ECG finding seen in both AL and ATTR CA (;). In patients with AL CA, presence of pseudoinfarction was associated with lower systolic blood pressure, higher Nterminal pro-brain natriuretic peptide levels and advanced New York Heart Association (NYHA) heart failure classifications. Patients with pseudoinfarction pattern were also associated with higher prevalence of peripheral low voltage ECG findings, and survival was significantly shorter in the pseudoinfarction group than compared with the group without pseudoinfarction pattern. () A possible explanation for presence of pathological Q waves on ECG in patients without overt epicardial coronary obstruction could be due to the deposition of amyloid deposition in the microcirculation and smaller intramyocardial arteries, which is associated with worse outcomes. () The study by Gonzlez-Lpez et al. looked at a population of patients with ATTR CA showed that the presence of pseudoinfarction pattern was found to be 60% and was mainly present in the anterior leads. Presence of pseudoinfarction patterns in ATTR CA patients progressively increased with increasing thickness of the interventricular septum and was most common in patients with the thickest hearts. | QRS complex size relative to degree of LVH The study by Carroll et al. showed that voltage-mass ratio was a good indicator for CA. Voltage-mass ratio is defined as Sokolow-Lyon index divided by the cross-sectional area of the left ventricular wall and a ratio of <1.5 was suggested to be indicative of CA. This has since led to the recommendation to consider CA in patients with LVH on echocardiogram but yet have small QRS complexes. of low voltage-mass ratio. (;) However, when comparing AL CA to ATTR CA, the mean voltage-to-mass ratio was higher in patients in ATTR CA. The study by Rapezzi et al. ) looked at the difference in voltage-mass ratio between AL CA and ATTR CA. ATTR CA patients less often displayed low voltage-to-mass ratio (1.1 ± 0.5 in ATTR CA versus 0.9 ± 0.5; p <.0001in AL CA) (). The findings of a higher mean voltage-mass ratio in ATTR CA patients is in keeping with the lower prevalence of low QRS complexes in patients with ATTR CA. Another consideration is the use of the total QRS score (defined by the sum of all QRS voltages in all leads) divided by the left ventricular mass index. This finding was found to be rather sensitive (81%-87%) and specific (79%-82%) in the differentiation of CA to other conditions that result in LVH like hypertensive heart disease and hypertrophic cardiomyopathy (). This finding was also seen in another study by Perlini et al., | ECG in presence of background chronic pressure overload It would be interesting to study the ECG patterns of patients with CA and have a concomitant background of chronic pressure overload. There is growing recognition of an increasing prevalence of patients with ATTR CA in patients diagnosed with aortic stenosis (AS). Some studies report a prevalence of 25% () in patients with AS, and this is especially in older patients and is also associated with worse outcomes. In the study by Castano et al. ) patients with AS diagnosed with ATTR CA compared with those with isolated AS had longer QRS duration (127 vs.110 ms, p =.017) and higher prevalence of right bundle branch block (37.5% vs. 15.8%, p = 0.023). The most common arrhythmia in a reported series was AF, present in 41.7%-67% of patients with concomitant AS and ATTR CA (;;Sperry, Jones, et al., 2016). Even though AF is a frequent finding in patients with AS, when studying patients with both ATTR CA and AS, the prevalence of atrial arrhythmias was significantly higher: 67% in the ATTR CA group vs. 20.2% in the isolated AS group, p =.006. () Another study showed that voltage-mass ratio was lower in patients with patients with CA and AS than compared with isolated AS (0.9 10-2 mV/g/m2 (0.6-1.6) vs. 1.6 10-2 mV/g/m2 (1.1-2.3); p =.001) too (). The classical ECG findings of patients with hypertension include LVH pattern. Criteria to establish LVH in the literature are numerous, and the Cornell criteria () and the Sokolow and Lyon criteria are common criteria used to define LVH on ECG. CA leads to increased left ventricular wall thickness due to the abnormal deposition of amyloid fibrils in the ventricular wall. However, the presence of LVH on ECG in patients with CA is low, especially in patients with AL CA as they have a higher prevalence of low voltage complexes. With regard to the ATTR CA population, majority of the patients do not have LVH pattern on their ECGs but some studies do show that there is a small proportion of patients (12%-16%) who may have LVH pattern. (;;) | Additional investigations to aid in diagnosis of CA Electrocardiogram is not a sensitive modality for diagnosing CA and its subtypes. Other non-invasive imaging modalities are available to aid clinicians in diagnosing ATTR CA. Echocardiography plays a major role in non-invasive diagnosis of cardiac amyloidosis due to its ability to assess the structure and function of the heart. As described in our case series, the presence of apical sparing on strain imaging is a useful tool to aid in the diagnosis of CA (). Strain imaging has the ability to refine the non-invasive recognition of cardiac amyloidosis by quantitating longitudinal systolic function. Patients with CA demonstrate a typical pattern of distribution in which basal LV segments are severely impaired while apical segments are relatively spared ("Cherry-on-the-top" sign on longitudinal strain bullseye map). Therefore, strain imaging should be performed in patients with unexplained LVH. Cardiovascular magnetic resonance has a central role in the non-invasive diagnosis of CA due to its ability to provide tissue characterization in addition to high-resolution morphologic and functional assessment (). The common findings on CMR in patients with CA are that of global subendocardial and transmural late gadolinium enhancement (LGE). Both patterns are present in AL and ATTR CA, but to different extents, with subendocardial LGE being more prevalent in AL and transmural LGE more prevalent in ATTR CA (). Other findings of CA include nulling of myocardium before or at the same inversion time as the blood pool, LGE of left atrial wall, and extensive extracellular volume expansion which are combined with structural findings of increased wall thickness and myocardial mass. CMR, however, is unable to definitively distinguish AL from ATTR CA and should be combined with electrocardiographic, clinical, biomarker, and other imaging findings to maximize diagnostic accuracy (). Radionuclide imaging plays a unique role in the non-invasive diagnosis of CA. A variety of 99m Tc-labeled diphosphonate and PYP (bone-avid) compounds diagnose ATTR CA with high sensitivity and specificity (). With a negative myeloma screen (absence of monoclonal protein using urine and serum, with serumfree light chain assay and immunofixation electrophoresis) and a positive finding of grade 2 or 3, myocardial radiotracer uptake on bone scintigraphy allows a positive predictive value for ATTR CA of 100% (95% CI, 98.0%-100%) and thus obviating the need for an endomyocardial biopsy (). The study by Bokhari et al. also showed that the semiquantitative heart-to-contralateral ratio of ≥1.5 is also another accurate marker to diagnose ATTR CA if AL has been excluded through a negative myeloma screen. | CON CLUS ION With the recent introduction of novel strategies to inhibit amyloid fibril formation (such as tafamidis for ATTR CA), the need for early diagnosis of ATTR CA through non-invasive methods is ever more important. Using our case examples, we showed that using "classical finding" of low ECG voltage complexes as a diagnostic tool for CA is not ideal. Large-voltage complexes should not prevent further investigation for infiltrative cardiomyopathy either. If clinical suspicion for CA is high, there are other non-invasive methods such as strain imaging on echocardiography, CMR and nuclear scintigraphy that can aid us in earlier detection of this sinister disease. CO N FLI C T O F I NTE R E S T There is no conflict of interest for all authors. AUTHOR S' CONTRIBUTIONS Perryn Lin Fei Ng contributed to the conception of article, acquisition of data, data analysis, data interpretation, and manuscript drafting and revisions. Weiqin Lin contributed to the acquisition of data, data analysis, data interpretation, as well as manuscript drafting and revisions. Yoke Ching Lim, Lauren Kay Mance Evangelista, Raymond Ching Chiew Wong, Ping Chai, Ching Hui Sia, Hoi Yin Loi and Tiong Cheng Yeo contributed to the data interpretation as well as manuscript drafting and revisions. DATA AVA I L A B I L I T Y S TAT E M E N T The data that support the findings of this study are available on request from the corresponding author. The data are not publicly available due to privacy or ethical restrictions. E TH I C S A PPROVA L A N D CO N S E NT TO PA RTI CI PATE Ethics approval is waived as per institutional guidelines. CO N S ENT FO R PU B LI C ATI O N Written informed consent was obtained from the patient for publication of this case report and any accompanying images. A copy of the written consent is available for review by the Editor of this journal. DATA AVA I L A B I L I T Y S TAT M E N T Deidentified data can be made available by the corresponding author, upon reasonable request. |
<filename>sunggyu/algorism/condigTest1/implement/Implement3.java
package sunggyu.algorism.condigTest1.implement;
import java.io.*;
import java.util.*;
//https://www.acmicpc.net/problem/16927
//배열 돌리기 2
/*
sudo 코드
1. 시계 방향으로 각 변의 리스트를 뽑는다.
2. 리스트의 인덱스에 r을 더하여 rectangle에 넣는다.
*/
public class Implement3{
static int n;
static int m;
static int r;
static int[][] rectangle;
//동남서북
static int[][] directions = {{0,1},{1,0},{0,-1},{-1,0}};
public static void main(String[] args) throws Exception {
BufferedReader bf = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(System.out));
String[] nmr = bf.readLine().split(" ");
n = Integer.parseInt(nmr[0]);
m = Integer.parseInt(nmr[1]);
r = Integer.parseInt(nmr[2]);
rectangle = new int[n][m];
for(int i = 0; i < n; i++){
String[] split = bf.readLine().split(" ");
for(int j = 0; j < m; j++){
rectangle[i][j] = Integer.parseInt(split[j]);
}
}
rotate();
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
bw.write(rectangle[i][j] + " ");
}
bw.newLine();
}
bw.flush();
bw.close();
}
public static void rotate(){
List<List<Integer>> sidesOfRectangle = getSidesOfRectangle();
Scope scope = new Scope(0, 0, n, m);
for(List<Integer> sides : sidesOfRectangle){
swap(scope, sides);
scope.next();
}
}
public static void swap(Scope scope, List<Integer> sides){
int nowX = scope.startX;
int nowY = scope.startY;
int index = r;
for(int[] direct : directions){
//이 부분 이해 잘하기
while(true){
int nx = nowX + direct[0];
int ny = nowY + direct[1];
if(!scope.canVisit(nx, ny)) break;
rectangle[nowX][nowY] = sides.get(index % sides.size());
index++;
nowX = nx;
nowY = ny;
}
}
}
public static List<List<Integer>> getSidesOfRectangle(){
List<List<Integer>> getSidesOfRectangle = new ArrayList<>();
Scope scope = new Scope(0, 0, n, m);
while(true){
if(!scope.isValid()) break;
getSidesOfRectangle.add(getSides(scope));
scope.next();
}
return getSidesOfRectangle;
}
public static List<Integer> getSides(Scope scope){
List<Integer> sides = new ArrayList<>();
int nowX = scope.startX;
int nowY = scope.startY;
for(int[] direct : directions){
while(true){
int nx = nowX + direct[0];
int ny = nowY + direct[1];
if(!scope.canVisit(nx, ny)) break;
sides.add(rectangle[nowX][nowY]);
nowX = nx;
nowY = ny;
}
}
return sides;
}
public static class Scope{
int startX;
int startY;
int endX;
int endY;
public Scope(int startX, int startY, int endX, int endY){
this.startX = startX;
this.startY = startY;
this.endX = endX;
this.endY = endY;
}
public void next(){
startX++;
startY++;
endX--;
endY--;
}
public boolean isValid(){
if(startX >= endX || startY >= endY) return false;
return true;
}
public boolean canVisit(int x, int y){
if(x < startX || x >= endX || y < startY || y >= endY) return false;
return true;
}
}
}
|
Police shot co-owner of home, not burglar Say he confronted them with gun
EVERETT -- The owner of an Everett house where a 31-year-old man was shot and killed by police early Saturday says the man was a co-owner of the house and lived there.
"It's devastating. He didn't deserve to die," Bear Whalen told The Everett Herald.
Whalen described the slain man as his friend who was a good person who cared about his community and was a volunteer.
"He never even got a ticket. He respected the law," Whalen told the newspaper.
Three Everett police officers responding to a report of a burglary shot and killed Whalen's roommate at the home. Investigators found a shotgun next to the man's body, officials reported Sunday.
Snohomish County sheriff's spokeswoman Rebecca Hover said someone in the neighborhood called police just before 2 a.m. Saturday to report that someone was breaking windows and kicking in the door of a nearby house. Hover said three officers arrived and said they were confronted by a man with a gun, standing in the doorway of the home.
Officers said they repeatedly ordered the man to drop his weapon but he refused. The three officers fired multiple shots at the man. Hover said he died at the scene.
The Snohomish County Medical Examiner's Office had not released the slain man's name.
The officers involved in the shooting have been placed on paid administrative leave, while a team of detectives from throughout the county investigates. The officers were a 24-year-old woman who has been with the Everett Police Department for 2 1/2 years, a 33-year-old man who has been with the department for 2 years and a 29-year-old man who has been with the department for 1 1/2 years, Hover said.
Whalen, 28, said he went with the man who was slain with some friends to a bar Friday night.
Whalen said his roommate went home about 1 a.m., while he said he spent the night at a friend's house.
Gunnar Nelson, 26, a neighbor, told The Herald: "I just wish my friend was back. I don't know how long it's going to take to get over this." |
package com.example.chatapplication;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.view.View;
public class MainActivity extends AppCompatActivity {
private final int registerNewUser = 205;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Intent intent = new Intent(this, RegisterActivity.class);
startActivityForResult(intent, registerNewUser);
}
/*
public void registerNewUser(View view) {
Intent intent = new Intent(this, UserDatabase.class);
Bundle extras = new Bundle();
extras.putInt("requestCode", getSpecificUserCode);
extras.putString("email", "<EMAIL>");
intent.putExtras(extras);
startActivityForResult(intent, getSpecificUserCode);
}
*/
public void returnToPrevious(String userID) {
Intent result = new Intent(); //create an instance of an Intent object.
result.setData(Uri.parse(userID)); //set the value/data to pass back
setResult(RESULT_OK, result); //set a result code, It is either RESULT_OK or RESULT_CANCELLED
finish(); //Close the activity
}
// When activity exits after being called from startActivityForResult(intent, requestCode).
// it returns here, and by matching the requestCodes we can figure out where & what the result is
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == registerNewUser) { // Specific user
if (resultCode == RESULT_OK) {
String dataString = data.getData().toString();
}
}
}
} |
/**
* Test to update container storage usage monthly base.
*/
@Test
public void testFetchMonthlyStorageUsage() throws Exception {
MockTime mockTime = new MockTime(SystemTime.getInstance().milliseconds());
MySqlStorageUsageRefresher.time = mockTime;
try {
String currentMonth = MySqlStorageUsageRefresher.getCurrentMonth();
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
accountStatsMySqlStore.storeAggregatedAccountStats(snapshot);
accountStatsMySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(currentMonth);
properties.remove(StorageQuotaConfig.REFRESHER_POLLING_INTERVAL_MS);
StorageQuotaConfig storageQuotaConfig = new StorageQuotaConfig(new VerifiableProperties(properties));
AccountStatsMySqlStore newAccountStatsMySqlStore = createAccountStatsMySqlStore();
MySqlStorageUsageRefresher refresher =
new MySqlStorageUsageRefresher(newAccountStatsMySqlStore, scheduler, storageQuotaConfig, metrics);
refresher.fetchStorageUsageMonthlyBase();
assertEquals(containerStorageUsages, refresher.getContainerStorageUsageMonthlyBase());
String notCurrentMonth = "1970-01";
Map<String, Map<String, Long>> newContainerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(newContainerStorageUsages);
accountStatsMySqlStore.storeAggregatedAccountStats(snapshot);
accountStatsMySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(notCurrentMonth);
refresher.fetchStorageUsageMonthlyBase();
assertEquals(containerStorageUsages, refresher.getContainerStorageUsageMonthlyBase());
back to the current month
accountStatsMySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(currentMonth);
Thread.sleep(MYSQL_RETRY_BACKOFF_MS * 2);
assertEquals(newContainerStorageUsages, refresher.getContainerStorageUsageMonthlyBase());
mockTime.sleep((MySqlStorageUsageRefresher.secondsToNextMonthTick(currentMonth,
storageQuotaConfig.mysqlMonthlyBaseFetchOffsetSec) + 10) * 1000);
String nextMonth = MySqlStorageUsageRefresher.getCurrentMonth();
Function<String, Integer> stringMonthToInteger = (monthInStr) -> {
String[] parts = monthInStr.split("-");
int year = Integer.parseInt(parts[0]);
int month = Integer.parseInt(parts[1]);
return year * 12 + month;
};
assertEquals(stringMonthToInteger.apply(currentMonth) + 1, (int) stringMonthToInteger.apply(nextMonth));
Map<String, Map<String, Long>> nextContainerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(nextContainerStorageUsages);
accountStatsMySqlStore.storeAggregatedAccountStats(snapshot);
accountStatsMySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(nextMonth);
refresher.fetchStorageUsageMonthlyBase();
assertEquals(nextContainerStorageUsages, refresher.getContainerStorageUsageMonthlyBase());
assertEquals(nextContainerStorageUsages, refresher.getBackupFileManager().getBackupFileContent(nextMonth));
} finally {
MySqlStorageUsageRefresher.time = SystemTime.getInstance();
}
} |
NelderMead Simplex Search Method - A Study Nelder-Mead in n dimensions facilitates the set of n + 1 test points. It finds a new test point, Makes one of the old test points new, and so the technique progresses into objective behavior process is measured at each test point. The Nelder-Mead Simplex system uses Simplex to find the minimum space. The algorithm operates using a design framework with n + 1 points (called simplex), Where n is for simplex based operation Number of input dimensions. The Nelder-Mead method is one of the most popular non-derivative methods, using only the values of f to search. Only in the simplex formation of n + 1 will the points / moving / contraction of this simplex be in a positive direction. Strictly speaking, Nelder-Mead is not a truly universal optimization algorithm; however, in it works reasonably well for many non-local problems. Direct search is the gradient of the objective process Optimization is a method for solving problems that require no information. All of the points approaching an optimal point Pattern search algorithms that calculate the sequence. The existence of local trust is a key factor in defining the difficulty of the global optimization problem because it is relatively easy to improve locally and relatively difficult to improve locally. Slope Descent is an optimal method Machine learning models and to train neurological networks commonly used. Training data these models allow learning over time, and pricing function is particularly active in gradient descent. The barometer is an optimization algorithm that measures its accuracy at each parameter update and can be repeated by comparing optimal or different solutions. A satisfactory the solution will be found. With the advent of computers, optimization has become of computer aided design activities has become a part of Gradient Decent (GT) is a functional first-order upgrade algorithm Local minimum of the given function and Used to determine the maximum. This method is commonly used to reduce cost / loss performance in machine learning (ML) and deep learning (DL). The problem with finding optimal points in such situations is referred to as derivative-free optimization, and algorithms that do not use derivatives or defined variants are called derivative-free algorithms. |
testy = int(input())
results = []
for i in range(testy):
a, b = map(int, input().split())
if a == b:
results.append(0)
elif a > b:
wyk = 0
tmp = a
while tmp > b:
if tmp % 2 == 0:
tmp = tmp // 2
wyk += 1
else:
break
op = [0, 0, 0]
op[0] = wyk // 3
op[1] = (wyk - 3 * op[0]) // 2
op[2] = wyk - 3 * op[0] - 2 * op[1]
if tmp == b:
results.append(sum(op))
else:
results.append(-1)
elif b > a:
wyk = 0
tmp = b
while tmp > a:
if tmp % 2 == 0:
tmp = tmp // 2
wyk += 1
else:
break
op = [0, 0, 0]
op[0] = wyk // 3
op[1] = (wyk - 3 * op[0]) // 2
op[2] = wyk - 3 * op[0] - 2 * op[1]
if tmp == a:
results.append(sum(op))
else:
results.append(-1)
for result in results:
print(result)
|
import math
X=int(input())
if X >= 2100:
print(1)
else:
n=math.floor(X/100)
if X- n * 100 <=n*5:
print(1)
else:
print(0)
|
Compass: What do you do in your time off at home in Tel Aviv?
Yonatan Gat: We're actually writing new songs right now. We just rented this nice space for ourselves in the center of the city. There's a nice roof, we sit there all day, and write songs in our studio downstairs. Otherwise Ami and Haggai have more of a family life, I go out a lot, see my friends and family. Try to take it easy until the next tour or recordings.
Compass: You played the Harvest of Hope Festival last year. How was that?
Y.G.: Was just a fun show. I remember we drove overnight from Orlando and couldn't find a hotel cuz a lot of bikers were staying in the area. I called the promoters up at 4 a.m. asking them to find us a hotel. Good chance to apologize! Hey, promoters! Sorry for that. Show was really fun. It was sunny, there was a lot of people and they seemed to be going crazy and really into what we're doing.
Compass: You're banned from half the venues in Israel. Are they warming up to you?
Y.G.: More people seem to like us in Israel now that they read about us from media outlets outside of Israel, and I guess venues are opening up to crazier and louder rock 'n' roll over there, but we don't really know because we don't really play shows in Israel as much.
Monotonix performs with Surfer Blood and BLORR at 8:30 p.m. Jan. 25 in Café Eleven, 501 A1A Beach Blvd. Tickets are $8. Call 460-9311 or go to cafeeleven.com. |
Sub-word Modeling for Automatic Speech Recognition Modern automatic speech recognition systems handle large vocabularies of words, making it infeasible to collect enough repetitions of each word to train individual word models. Instead, large-vocabulary recognizers represent each word in terms of sub-word units. Typically the sub-word unit is the phone, a basic speech sound such as a single consonant or vowel. Each word is then represented as a sequence, or several alternative sequences, of phones specified in a pronunciation dictionary. Other choices of sub-word units have been studied as well. The choice of sub-word units, and the way in which the recognizer represents words in terms of combinations of those units, is the problem of sub-word modeling. Different sub-word models may be preferable in different settings, such as high-variability conversational speech, high-noise conditions, low-resource settings, or multilingual speech recognition. This article reviews past, present, and emerging approaches to sub-word modeling. In order to make clean comparisons between many approaches, the review uses the unifying language of graphical models. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.