content
stringlengths 7
2.61M
|
---|
<filename>Source/Shared/WinRT/AppConfiguration_WinRT.cpp
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#include "pch.h"
#include "AppConfiguration_WinRT.h"
using namespace Concurrency;
using namespace Platform;
using namespace std;
NAMESPACE_MICROSOFT_XBOX_SERVICES_BEGIN
XboxLiveAppConfiguration^ XboxLiveAppConfiguration::SingletonInstance::get()
{
return ref new XboxLiveAppConfiguration(xbox::services::xbox_live_app_config::get_app_config_singleton());
}
XboxLiveAppConfiguration::XboxLiveAppConfiguration(
_In_ std::shared_ptr<xbox::services::xbox_live_app_config> cppObj
) :
m_cppObj(std::move(cppObj))
{
}
NAMESPACE_MICROSOFT_XBOX_SERVICES_END
|
<gh_stars>1-10
const constants = {
bgColor: '#171922',
};
export default constants;
|
package cloud.mobe.utils.excel;
import static cloud.mobe.utils.CheckEmptyUtil.isEmpty;
import static cloud.mobe.utils.CheckEmptyUtil.isNotEmpty;
import static com.google.common.collect.Lists.newArrayListWithExpectedSize;
import static com.google.common.collect.Maps.newHashMapWithExpectedSize;
import static java.util.Collections.EMPTY_LIST;
import cloud.mobe.utils.excel.annotation.ExcelColumn;
import cloud.mobe.utils.excel.annotation.ExcelFile;
import cloud.mobe.utils.excel.annotation.ExcelHeader;
import cloud.mobe.utils.excel.dto.ExcelItemAnnotationInfo;
import cloud.mobe.utils.excel.dto.ExcelStructureInfo;
import cloud.mobe.utils.excel.dto.ExcelStructureInfo.DataField;
import cloud.mobe.utils.excel.dto.ExcelStructureInfo.HeaderField;
import cloud.mobe.utils.exception.MobeServiceException;
import java.beans.IntrospectionException;
import java.beans.PropertyDescriptor;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.math.BigDecimal;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.poifs.filesystem.OfficeXmlFileException;
import org.apache.poi.ss.usermodel.BorderStyle;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.CellType;
import org.apache.poi.ss.usermodel.CellValue;
import org.apache.poi.ss.usermodel.DateUtil;
import org.apache.poi.ss.usermodel.FillPatternType;
import org.apache.poi.ss.usermodel.FormulaEvaluator;
import org.apache.poi.ss.usermodel.IndexedColors;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.apache.poi.ss.util.CellRangeAddress;
import org.apache.poi.ss.util.CellUtil;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.springframework.web.multipart.MultipartFile;
/**
* Excel文档工具.
*
* @author <EMAIL>
* @since 2020/4/13 22:45
*/
@Slf4j
public class ExcelUtil {
/**
* 从文件构建workbook.
*
* @param file 传入的文件,如果文件是空,数据则为空
* @param definitionClass 传入文件对应的Class实体映射
* @return 构建的表格信息
* @throws IOException 输入流的异常
*/
public static <T extends AbstractExcelDefinition> ExcelDetail<T> build(
MultipartFile file, Class<T> definitionClass) throws IOException {
ExcelDetail<T> excelDetail = build(definitionClass);
Workbook workbook;
if (file != null) {
String fileName = file.getOriginalFilename();
fileName = isEmpty(fileName) ? "" : fileName.trim();
excelDetail.setFileName(fileName);
String fileNameLower = fileName.toLowerCase();
try {
if (fileNameLower.endsWith(".xlsx")) {
workbook = new XSSFWorkbook(file.getInputStream());
} else if (fileNameLower.endsWith(".xls")) {
workbook = new HSSFWorkbook(file.getInputStream());
} else {
workbook = new XSSFWorkbook();
log.warn(
"未知的文件类型,文件名 - {}",
fileName);
}
} catch (OfficeXmlFileException e) {
log.warn(
"文件类型错误 - {}",
e.getMessage(),
e);
throw new MobeServiceException("文件后缀和内容不统一");
}
} else {
workbook = new XSSFWorkbook();
excelDetail.setFileName("");
log.warn("读入文件流为空");
}
excelDetail.setWorkbook(workbook);
excelDetail.setEvaluator(workbook.getCreationHelper()
.createFormulaEvaluator());
return excelDetail;
}
/**
* 构建excel信息.
*
* @param definitionClass 定义的类映射
* @return excel信息
*/
private static <T extends AbstractExcelDefinition> ExcelDetail<T> build(
Class<T> definitionClass) {
ExcelDetail<T> excelDetail = new ExcelDetail<T>();
if (definitionClass == null) {
return excelDetail;
}
excelDetail.setDefinitionClass(definitionClass);
ExcelFile excelFile = definitionClass.getAnnotation(ExcelFile.class);
excelDetail.setFileName(excelFile.value() + "-示例.xlsx");
excelDetail.setSheetName(excelFile.sheet());
// 头信息占用的行数
int rowNum = excelFile.headerIndex();
if (rowNum < 1) {
throw new MobeServiceException("[ExcelFile.headerIndex()]表格头位置不能小于1");
}
excelDetail.setRowNum(excelFile.headerIndex());
Field[] declaredFields = definitionClass.getDeclaredFields();
List<Object[]> annotationFields = newArrayListWithExpectedSize(declaredFields.length);
for (Field declaredField : declaredFields) {
ExcelColumn fieldAnnotation = declaredField.getAnnotation(ExcelColumn.class);
if (fieldAnnotation == null) {
continue;
}
try {
PropertyDescriptor propertyDescriptor = new PropertyDescriptor(
declaredField.getName(),
definitionClass);
annotationFields.add(new Object[]{fieldAnnotation.index(),
propertyDescriptor.getWriteMethod(), fieldAnnotation.name(), declaredField.getType()});
} catch (IntrospectionException e) {
throw new MobeServiceException(
"未找到字段",
e);
}
}
excelDetail.setAnnotationFields(annotationFields);
return excelDetail;
}
/**
* 获取单元格数据.
*
* @param evaluator
* @param cell 单元格.
* @param clz 实体类的类型
* @return 单元格数值
*/
public static Object getCellValue(
FormulaEvaluator evaluator, Cell cell, Class<?> clz) {
Object cellValue = null;
// excel单元格数据类型
try {
CellType cellType = cell.getCellType();
if (log.isDebugEnabled()) {
log.debug(
"cell type - {}",
cellType);
}
if (cellType == CellType.NUMERIC) {
if (DateUtil.isCellDateFormatted(cell)) {
cellValue = cell.getDateCellValue();
} else {
cell.setCellType(CellType.STRING);
String tempCellValue = cell.getStringCellValue();
if (tempCellValue.contains(".")) {
cellValue = String.valueOf(Double.valueOf(tempCellValue))
.trim();
} else {
cellValue = tempCellValue.trim();
}
}
} else if (cellType == CellType.STRING) {
cellValue = cell.getStringCellValue();
} else if (cellType == CellType.FORMULA) {
CellValue formulaCellValue = evaluator.evaluate(cell);
boolean isNumeric = formulaCellValue.getCellType() == CellType.NUMERIC;
cellValue = (isNumeric)
? formulaCellValue.getNumberValue()
: formulaCellValue.getStringValue();
if (isNumeric && cellValue.toString()
.equals("0.0")) {
cellValue = cell.getNumericCellValue();
}
} else if (cellType == CellType.BOOLEAN) {
cellValue = cell.getBooleanCellValue();
} else if (cellType == CellType.ERROR) {
cellValue = cell.getErrorCellValue();
}
if (cellValue == null) {
return null;
}
if (log.isDebugEnabled()) {
log.debug(
"cell value - {} - {}",
cellValue,
cellValue.getClass());
}
// 实体类的数据类型
if (clz == String.class) {
cellValue = String.valueOf(cellValue.toString());
} else if (clz == Integer.class) {
cellValue = Double.valueOf(cellValue.toString())
.intValue();
} else if (clz == Long.class) {
cellValue = Double.valueOf(cellValue.toString())
.longValue();
} else if (clz == Double.class) {
cellValue = Double.valueOf(cellValue.toString());
} else if (clz == Float.class) {
cellValue = Float.valueOf(cellValue.toString());
} else if (clz == BigDecimal.class) {
cellValue = new BigDecimal(cellValue.toString());
}
return cellValue;
} catch (NumberFormatException | IllegalStateException e) {
log.warn(
"数据格式错误:第{}行,第{}列",
(cell.getRowIndex() + 1),
(cell.getColumnIndex() + 1),
e);
throw new MobeServiceException(String.format(
"数据格式错误:第%s行,第%s列",
(cell.getRowIndex() + 1),
(cell.getColumnIndex() + 1)));
}
}
/**
* 获取 excel 定义的结构信息.
*
* @param clz excel
* @return
*/
public static ExcelStructureInfo getExcelStructureInfo(Class<?> clz)
throws IntrospectionException {
ExcelStructureInfo structureInfo = new ExcelStructureInfo();
// sheet 头信息
List<ExcelItemAnnotationInfo<ExcelHeader>> headerList = ExcelUtil.getAnnotationInfo(
clz,
ExcelHeader.class);
if (isNotEmpty(headerList)) {
// 所有注解 header 的字段
List<HeaderField> headerFields = newArrayListWithExpectedSize(headerList.size());
for (ExcelItemAnnotationInfo<ExcelHeader> headerAnnotationInfo : headerList) {
HeaderField headerField = new HeaderField();
// 获取字段的注解信息
ExcelHeader excelHeader = headerAnnotationInfo.getAnnotation();
headerField.setIndex(excelHeader.index());
// header 注解的类型里包含 column 注解的信息
List<ExcelItemAnnotationInfo<ExcelColumn>> columnList = ExcelUtil.getAnnotationInfo(
headerAnnotationInfo.getFieldType(),
ExcelColumn.class);
if (isNotEmpty(columnList)) {
List<DataField> dataFields = newArrayListWithExpectedSize(columnList.size());
for (ExcelItemAnnotationInfo<ExcelColumn> columnAnnotationInfo : columnList) {
ExcelColumn excelColumn = columnAnnotationInfo.getAnnotation();
DataField dataField = new DataField();
dataField.setIndex(excelColumn.index());
dataField.setName(excelColumn.name());
dataField.setColspan(excelColumn.colspan());
dataField.setRowspan(excelColumn.rowspan());
dataField.setFontColor(excelColumn.fontColor());
// 样式, 当前CellUtil有19种设置,期待 map 大小设置为20
Map<String, Object> headerCellStyle = newHashMapWithExpectedSize(20);
headerCommonCellStyle(headerCellStyle);
headerCellStyle.put(
CellUtil.FILL_FOREGROUND_COLOR,
excelColumn.bgColor()
.getIndex());
headerCellStyle.put(
CellUtil.VERTICAL_ALIGNMENT,
excelColumn.vertical());
headerCellStyle.put(
CellUtil.ALIGNMENT,
excelColumn.horizontal());
dataField.setHeaderCellStyle(headerCellStyle);
dataFields.add(dataField);
}
headerField.setColumnFields(dataFields);
}
headerFields.add(headerField);
}
structureInfo.setHeaderFields(headerFields);
}
// 数据头信息
List<ExcelItemAnnotationInfo<ExcelColumn>> columnList = ExcelUtil.getAnnotationInfo(
clz,
ExcelColumn.class);
if (isNotEmpty(columnList)) {
List<DataField> dataFields = newArrayListWithExpectedSize(columnList.size());
for (ExcelItemAnnotationInfo<ExcelColumn> columnAnnotationInfo : columnList) {
PropertyDescriptor propertyDescriptor
= new PropertyDescriptor(
columnAnnotationInfo.getFieldName(),
clz);
ExcelColumn excelColumn = columnAnnotationInfo.getAnnotation();
DataField dataField = new DataField();
dataField.setIndex(excelColumn.index());
dataField.setWidth(excelColumn.width());
dataField.setReadMethod(propertyDescriptor.getReadMethod());
dataField.setName(excelColumn.name());
dataField.setFontColor(excelColumn.fontColor());
// 样式, 当前CellUtil有19种设置,期待 map 大小设置为20
Map<String, Object> headerCellStyle = newHashMapWithExpectedSize(20);
headerCommonCellStyle(headerCellStyle);
headerCellStyle.put(
CellUtil.ALIGNMENT,
excelColumn.horizontal());
headerCellStyle.put(
CellUtil.VERTICAL_ALIGNMENT,
excelColumn.vertical());
headerCellStyle.put(
CellUtil.FILL_FOREGROUND_COLOR,
excelColumn.bgColor()
.getIndex());
headerCellStyle.put(
CellUtil.FILL_PATTERN,
FillPatternType.SOLID_FOREGROUND);
dataField.setHeaderCellStyle(headerCellStyle);
Map<String, Object> dataCellStyle = newHashMapWithExpectedSize(20);
dataCellStyle.put(
CellUtil.ALIGNMENT,
excelColumn.horizontal());
dataCellStyle.put(
CellUtil.VERTICAL_ALIGNMENT,
excelColumn.vertical());
dataCellStyle.put(
CellUtil.FILL_FOREGROUND_COLOR,
IndexedColors.WHITE.getIndex());
dataCellStyle.put(
CellUtil.FILL_PATTERN,
FillPatternType.SOLID_FOREGROUND);
dataCellStyle.put(
CellUtil.WRAP_TEXT,
true);
commonCellStyle(dataCellStyle);
dataField.setDataCellStyle(dataCellStyle);
dataFields.add(dataField);
}
structureInfo.setDataFields(dataFields);
}
return structureInfo;
}
/**
* 获取一个类的注解信息.
*
* @param clz 类型
* @param annotation 注解信息
* @return
*/
public static <A extends Annotation> List<ExcelItemAnnotationInfo<A>> getAnnotationInfo(
Class<?> clz, Class<A> annotation) {
Field[] declaredFields = clz.getDeclaredFields();
if (isEmpty(declaredFields)) {
return EMPTY_LIST;
}
if (log.isDebugEnabled()) {
log.debug(
"declaredFields - {}, length - {}",
clz,
declaredFields.length);
}
List<ExcelItemAnnotationInfo<A>> annotationFieldList = newArrayListWithExpectedSize(
declaredFields.length);
for (Field declaredField : declaredFields) {
if (declaredField.isAnnotationPresent(annotation)) {
ExcelItemAnnotationInfo<A> itemAnnotationInfo = new ExcelItemAnnotationInfo<>();
// 字段名称
itemAnnotationInfo.setFieldName(declaredField.getName());
// 字段类型
itemAnnotationInfo.setFieldType(declaredField.getType());
// 注解信息
itemAnnotationInfo.setAnnotation(declaredField.getAnnotation(annotation));
annotationFieldList.add(itemAnnotationInfo);
}
}
if (isEmpty(annotationFieldList)) {
return EMPTY_LIST;
}
return annotationFieldList;
}
/**
* 公共的标题样式.
*
* @param headerCellStyle 样式 map
*/
private static void headerCommonCellStyle(Map<String, Object> headerCellStyle) {
commonCellStyle(headerCellStyle);
headerCellStyle.put(
CellUtil.FILL_PATTERN,
FillPatternType.SOLID_FOREGROUND);
headerCellStyle.put(
CellUtil.WRAP_TEXT,
true);
}
/**
* 公共样式.
*
* @param cellStyle 样式map
*/
private static void commonCellStyle(Map<String, Object> cellStyle) {
cellStyle.put(
CellUtil.BORDER_LEFT,
BorderStyle.THIN);
cellStyle.put(
CellUtil.BORDER_RIGHT,
BorderStyle.THIN);
cellStyle.put(
CellUtil.BORDER_TOP,
BorderStyle.THIN);
cellStyle.put(
CellUtil.BORDER_BOTTOM,
BorderStyle.THIN);
cellStyle.put(
CellUtil.LEFT_BORDER_COLOR,
IndexedColors.BLACK.getIndex());
cellStyle.put(
CellUtil.RIGHT_BORDER_COLOR,
IndexedColors.BLACK.getIndex());
cellStyle.put(
CellUtil.TOP_BORDER_COLOR,
IndexedColors.BLACK.getIndex());
cellStyle.put(
CellUtil.BOTTOM_BORDER_COLOR,
IndexedColors.BLACK.getIndex());
}
/**
* 设置合并单元格的边框.
*
* @param sheet sheet
* @param address 合并区域
* @param borderStyle 边框样式
* @param color 边框颜色
*/
public static void setRegionBorder(
Sheet sheet, CellRangeAddress address, BorderStyle borderStyle, short color) {
int firstRow = address.getFirstRow();
int lastRow = address.getLastRow();
int firstColumn = address.getFirstColumn();
int lastColumn = address.getLastColumn();
for (int i = firstRow; i <= lastRow; i++) {
Cell firstCell = CellUtil.getCell(
CellUtil.getRow(
i,
sheet),
firstColumn);
Cell lastCell = CellUtil.getCell(
CellUtil.getRow(
i,
sheet),
lastColumn);
CellUtil.setCellStyleProperty(
firstCell,
CellUtil.BORDER_LEFT,
borderStyle);
CellUtil.setCellStyleProperty(
firstCell,
CellUtil.LEFT_BORDER_COLOR,
color);
CellUtil.setCellStyleProperty(
lastCell,
CellUtil.BORDER_RIGHT,
borderStyle);
CellUtil.setCellStyleProperty(
lastCell,
CellUtil.RIGHT_BORDER_COLOR,
color);
}
for (int i = firstColumn; i <= lastColumn; i++) {
Cell firstCell = CellUtil.getCell(
CellUtil.getRow(
firstRow,
sheet),
i);
Cell lastCell = CellUtil.getCell(
CellUtil.getRow(
lastRow,
sheet),
i);
CellUtil.setCellStyleProperty(
firstCell,
CellUtil.BORDER_TOP,
borderStyle);
CellUtil.setCellStyleProperty(
firstCell,
CellUtil.TOP_BORDER_COLOR,
color);
CellUtil.setCellStyleProperty(
lastCell,
CellUtil.BORDER_BOTTOM,
borderStyle);
CellUtil.setCellStyleProperty(
lastCell,
CellUtil.BOTTOM_BORDER_COLOR,
color);
}
}
}
|
// IsOperatorInstalled tells if a OLM CSV or a Subscription is already installed in the namespace.
func IsOperatorInstalled(ctx context.Context, client client.Client, namespace string, global bool, options Options) (bool, error) {
options, err := fillDefaults(options, client)
if err != nil {
return false, err
}
if csv, err := findCSV(ctx, client, namespace, options); err != nil {
return false, err
} else if csv != nil {
return true, nil
}
if sub, err := findSubscription(ctx, client, namespace, global, options); err != nil {
return false, err
} else if sub != nil {
return true, nil
}
return false, nil
} |
import numpy as np
import pytest
import vtk
from vtk.util import numpy_support as ns
from pytestvtk.assert_vtk import assert_vtk
@pytest.fixture
def vtk_array():
result = vtk.vtkCellArray()
result.SetNumberOfCells(3)
cells = vtk.vtkIdTypeArray()
array = [
2, 1, 2,
3, 3, 4, 5,
4, 5, 6, 7, 8
]
for i in array:
cells.InsertNextValue(i)
result.SetCells(3, cells)
return result
@pytest.fixture
def vtk_array_mod():
result = vtk.vtkCellArray()
result.SetNumberOfCells(3)
cells = vtk.vtkIdTypeArray()
array = [
2, 1, 2,
2, 3, 4,
]
for i in array:
cells.InsertNextValue(i)
result.SetCells(2, cells)
return result
def test_compare_vtkCellArray(vtk_array, vtk_array_mod):
assert_vtk(vtk_array, vtk_array)
assert_vtk(vtk_array_mod, vtk_array_mod)
with pytest.raises(pytest.fail.Exception) as excinfo:
assert_vtk(vtk_array, vtk_array_mod) |
package com.aspose.email.examples.pop3;
import com.aspose.email.MailMessage;
import com.aspose.email.Pop3Client;
import com.aspose.email.Pop3MessageInfo;
import com.aspose.email.Pop3MessageInfoCollection;
import com.aspose.email.SaveOptions;
import com.aspose.email.SecurityOptions;
public class RetrieveEmailMessages {
public static void main(String[] args) {
retrieveMessagesUsingSequenceNumber();
retrieveMessagesUsingMessageUniqueURI();
retrieveAndSaveDirectlyToDisc();
}
public static void retrieveMessagesUsingSequenceNumber() {
Pop3Client client = new Pop3Client();
client.setHost("pop.aspose.com");
client.setUsername("username");
client.setPassword("password");
client.setSecurityOptions(SecurityOptions.Auto);
int iMessageCount = client.getMessageCount();
System.out.println("Total Messages: " + iMessageCount);
for (int i = 1; i <= iMessageCount; i++) {
MailMessage eml = client.fetchMessage(i);
System.out.println(eml.getSubject());
//Save to disc in EML format to disc
eml.save(i + ".eml", SaveOptions.getDefaultEml());
//Save to disc in Outlook MSG format to disc
eml.save(i + ".msg", SaveOptions.getDefaultMsgUnicode());
}
}
public static void retrieveMessagesUsingMessageUniqueURI() {
Pop3Client client = new Pop3Client();
client.setHost("Pop.domain.com");
client.setUsername("username");
client.setPassword("password");
client.setSecurityOptions(SecurityOptions.Auto);
Pop3MessageInfoCollection coll = client.listMessages();
for (Pop3MessageInfo msgInfo : coll) {
MailMessage eml = client.fetchMessage(msgInfo.getUniqueId());
//Save to disc in EML format to disc
eml.save(eml.getSubject().replace(":", "") + ".eml", SaveOptions.getDefaultEml());
//Save to disc in Outlook MSG format to disc
eml.save(eml.getSubject().replace(":", "") + ".msg", SaveOptions.getDefaultMsgUnicode());
}
}
public static void retrieveAndSaveDirectlyToDisc() {
Pop3Client client = new Pop3Client();
client.setHost("Pop.domain.com");
client.setUsername("username");
client.setPassword("password");
client.setSecurityOptions(SecurityOptions.Auto);
int iMessageCount = client.getMessageCount();
for (int i = 1; i < iMessageCount; i++)
client.saveMessage(i, i + ".eml");
}
}
|
package com.ggp.player_evaluators.savers;
import com.ggp.player_evaluators.EvaluatorEntry;
import com.ggp.player_evaluators.IPlayerEvaluationSaver;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVPrinter;
import java.io.IOException;
import java.io.Writer;
public class CsvSaver implements IPlayerEvaluationSaver {
private CSVPrinter csvOut;
public CsvSaver(Writer output) throws IOException {
csvOut = new CSVPrinter(output,
CSVFormat.EXCEL.withHeader("intended_time", "time", "intended_init_time", "init_time", "states", "init_states", "path_states", "path_states_min", "path_states_max", "exp", "first_act_exp"));
}
public void add(EvaluatorEntry e, double exploitability, double firstActExp) throws IOException {
if (csvOut == null) throw new RuntimeException("Cannot add entry to closed saver!");
csvOut.printRecord(e.getIntendedActTimeMs(), e.getEntryTimeMs(), e.getIntendedInitTimeMs(), e.getAvgInitTimeMs(),
e.getAvgVisitedStates(), e.getAvgInitVisitedStates(), e.getPathStatesAvg(), e.getPathStatesMin(),
e.getPathStatesMax(), exploitability, firstActExp);
csvOut.flush();
}
public void close() throws IOException {
csvOut.close();
csvOut = null;
}
}
|
<gh_stars>1-10
package com.android.ceehack.trust.cards;
import it.gmariotti.cardslib.library.internal.Card;
import java.text.SimpleDateFormat;
import android.content.Context;
import android.graphics.Color;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import com.android.ceehack.trust.R;
public class FulfilledCard extends Card {
private SimpleDateFormat sdf = new SimpleDateFormat("dd.MM.yyyy",
java.util.Locale.getDefault());
protected ImageView indicatorYes;
protected ImageView indicatorNo;
public FulfilledCard(Context context) {
this(context, R.layout.cards_fulfiled_main);
}
public FulfilledCard(Context context, int innerLayout) {
super(context, innerLayout);
init();
}
private void init() {
// No Header
/*
* //Set a OnClickListener listener setOnClickListener(new
* OnCardClickListener() {
*
* @Override public void onClick(Card card, View view) {
* Toast.makeText(getContext(), "Click Listener card=",
* Toast.LENGTH_LONG).show(); } });
*/
}
@Override
public void setupInnerViewElements(ViewGroup parent, View view) {
indicatorYes = (ImageView) view.findViewById(R.id.imageViewYes);
indicatorNo = (ImageView) view.findViewById(R.id.imageViewNo);
if(indicatorYes != null){
indicatorYes.setBackgroundColor(Color.parseColor("#30B643"));
indicatorNo.setBackgroundColor(Color.parseColor("#EF4604"));
//indicator.setBackgroundColor(Color.parseColor("#EF4604"));
}
}
}
|
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def rsplit(a_str, sep, howmany):
broken = a_str.split(sep)
where = len(broken) - howmany
if len(broken) == 1:
return broken
front, back = broken[:where], broken[where:]
back.insert(0, sep.join(front))
return back
def _get_target(target):
try:
target, attribute = rsplit(target, '.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
class Patch:
def __init__(self, target, new, return_value):
self.target = target
self.new = new
self.return_value = return_value
self.getter, self.attribute = _get_target(target)
self.backup = None
def get_original(self):
target = self.getter()
name = self.attribute
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, None)
return original
def start(self):
self.backup = self.get_original()
if self.new:
new_attr = self.new
else:
new_attr = self.return_value
setattr(self.getter(), self.attribute, new_attr)
def stop(self):
setattr(self.getter(), self.attribute, self.backup)
if self.target == 'sys.modules':
self.getter().modules['sys'].modules = self.backup
def pass_through(target, new=None, return_value=None):
return Patch(target, new, return_value)
patch = pass_through
patch.dict = pass_through
|
1. Field of the Invention
The present invention relates to an intake device of an internal combustion engine.
2. Description of the Related Art
In an attempt to improve volumetric efficiency when the engine is operating under a heavy load and to create a strong swirl motion in the combustion chamber when the engine is operating under a light load, an internal combustion engine is known in which a common intake passage is provided for a first intake valve and a second intake valve, which valves are arranged adjacent to each other (see Japanese Unexamined Utility Model Publication No. 60-92733). In this engine, the ccmmon intake passage is divided into a first intake passage and a second intake passage by a dividing wall which extends to a point upstream of the common intake passage from a position between the first intake valve and the second intake valve. A separating wall projecting downward from the upper wall of the intake port is formed in the intake passage. This separating wall extends along the axis of the first intake passage from a position around the valve step of the first intake valve to the central portion of the common intake passage beyond the upstream end of the dividing wall, i.e., upstream of the intake passage. An intake control valve, which is open when the engine is operating under a heavy load, is arranged between the upstream end of the separating wall and the side wall of the common intake passage, which is located on the second intake passage side. One of the passage portions of the first intake passage, which are formed by the separating wall, has a helical shape.
In this engine, when the engine is operating under a light load, the intake control valve is closed, and at this time, a large part of the air flows into the combustion chamber via the helically-shaped passage, and thus a strong swirl motion is created in the combustion chamber.
Another engine is known in which a separating wall projecting downward from the upper wall of the intake passage and extending along the axis of the intake passage is formed in the intake passage upstream of the intake valve (see Japanese Unexamined Utility Model Publication No. 59-154826). An intake control valve is arranged in one of passage portions of the intake passage, which are formed by the separating wall.
In this engine, when the engine is operating under a light load, the intake control valve is closed. At this time, air flows within the other passage portion of the intake passage, in which the intake control valve is not arranged. This air is caused to swirl along the curved circumferential wall formed around the valve stem of the intake passage, and thus a strong swirl motion is created in the combustion chamber.
However, in the engine disclosed in the above-mentioned publication No. 60-92733, one of the passage portions of the first intake passage, which are formed by the separating wall, is a helically shaped passage having a large flow resistance, in order to create a strong swirl motion within the first intake passage. In addition, since the separating wall extends approximately over the entire length of the first intake passage, the intake passage has a large flow resistance. Consequently, in this engine, a problem occurs in that a high volumetric efficiency cannot be obtained when the engine is operating under a heavy load at a high speed.
Conversely, in the engine disclosed in the above-mentioned publication No. 59-154826, although only one intake valve is provided for each cylinder, the separating wall has a short length, and one of the passage portions of the intake passage, which are formed by the separating wall, is not a helically-shaped passage having a large flow resistance. Consequently, in this engine, a high volumetric efficiency can be obtained when the engine is operating under a heavy load at a high speed.
However, since this engine is provided with only one intake valve, it is not provided with a dividing wall, as disclosed in the above-mentioned publication No. 60-92733. Consequently, even if such a separating wall, as disclosed in the publication No. 60-92733, is applied to the construction of the intake passage disclosed in the above-mentioned publication No. 59-154826, a problem arises in that a high volumetric efficiency will not be obtained if the positional relationship between the dividing wall and the separating wall is not precise. |
// This file has been generated by Py++.
#ifndef PrimList_hpp__pyplusplus_wrapper
#define PrimList_hpp__pyplusplus_wrapper
void register_PrimList_class();
#endif//PrimList_hpp__pyplusplus_wrapper
|
<filename>kernel/api/src/main/java/org/sakaiproject/util/ComponentMap.java
/**********************************************************************************
* $URL$
* $Id$
***********************************************************************************
*
* Copyright (c) 2003, 2004, 2005, 2006, 2007, 2008 Sakai Foundation
*
* Licensed under the Educational Community License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.opensource.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************/
package org.sakaiproject.util;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
/**
* <p>
* ComponentMap exposes the registered components as a map - the component id is mapped to the component implementation.
* </p>
*/
public class ComponentMap implements Map
{
/**
* @inheritDoc
*/
public int size()
{
// TODO Auto-generated method stub
return 0;
}
/**
* @inheritDoc
*/
public boolean isEmpty()
{
return false;
}
/**
* @inheritDoc
*/
public boolean containsKey(Object arg0)
{
return org.sakaiproject.component.cover.ComponentManager.contains((String) arg0);
}
/**
* @inheritDoc
*/
public boolean containsValue(Object arg0)
{
return false;
}
/**
* @inheritDoc
*/
public Object get(Object arg0)
{
return org.sakaiproject.component.cover.ComponentManager.get((String) arg0);
}
/**
* @inheritDoc
*/
public Object put(Object arg0, Object arg1)
{
return null;
}
/**
* @inheritDoc
*/
public Object remove(Object arg0)
{
return null;
}
/**
* @inheritDoc
*/
public void putAll(Map arg0)
{
}
/**
* @inheritDoc
*/
public void clear()
{
}
/**
* @inheritDoc
*/
public Set keySet()
{
return null;
}
/**
* @inheritDoc
*/
public Collection values()
{
return null;
}
/**
* @inheritDoc
*/
public Set entrySet()
{
return null;
}
}
|
Taylor Swift has said touring the world "is not that hard".
Speaking to Radio 1 Breakfast Show host Nick Grimshaw ahead of her show at Radio 1's Big Weekend in Norwich she said: "It's not that hard, I'm telling you now it's really not.
"Sometimes I really don't think it's that hard at all."
The singer is currently on her 1989 world tour, and flew to Norwich from America especially for her performance on the main stage.
She said she feels like she "really worked for something" when she comes off stage.
"That's the best part of this [touring]. When you walk off stage and you're drenched in sweat and you're tired and your legs hurt... it feels like you're really working for the job.
"It's nice to feel tired after a show. I'm like 'Oh my God, I'm finally doing something'.
"I really like the feeling of: 'You know what? I've been busy. I deserve this watching two hours of TV at the end of the night'.
"I like Friends because I've seen every episode, so it's like switching my brain off. I know exactly what they're about to say."
She admitted she was nervous on the first night of the 1989 tour, which runs until December, but started in May at a 55,000 seat stadium in Tokyo.
"We usually start in a smaller arena. It was just like - stadium two in a row, let's go.
"I do get nervous about things like that because it does matter."
Taylor added that she's become used to leaving home to go on tour.
"When I was starting out I'd go on tour for three months and I was gone for three months. Now almost 10 years in I thank God we get to play bigger shows so I don't have to play as many.
"If we're playing stadiums, I'll do two or three a week, not five shows a week, then I get to fly home in between. It's not that bad.
"It's not that hard. It's made out to be harder than it is."
"I do have to leave the cats behind when I leave the country, which is the hardest part."
Taylor said that her fame makes her "sometimes" miss the normality of being able to hang out with friends but that she's had to get used to "the abnormality of my life".
However, she added: "That's my life and I chose this and I can't then complain about it because then I'm a jerk, if you work that hard to get somewhere and then you get there and you're like: 'I hate this'."
And the essential item on a world tour? Caffeine. Or to be more precise: "Any kind of coffee I can get.
"I'm that sort of person that needs coffee every day or I feel like something's a little weird."
For the latest photos, videos and social media reaction from Radio 1's Big Weekend visit bbc.co.uk/bigweekend
Follow @BBCNewsbeat on Twitter, BBCNewsbeat on Instagram, Radio1Newsbeat on YouTube and you can now follow BBC_Newsbeat on Snapchat |
<filename>api/models/app.go
package models
import (
"errors"
"fmt"
)
type Apps []*App
var (
ErrAppsAlreadyExists = errors.New("App already exists")
ErrAppsCreate = errors.New("Could not create app")
ErrAppsGet = errors.New("Could not get app from datastore")
ErrAppsList = errors.New("Could not list apps from datastore")
ErrAppsMissingNew = errors.New("Missing new application")
ErrAppsNameImmutable = errors.New("Could not update app - name is immutable")
ErrAppsNotFound = errors.New("App not found")
ErrAppsNothingToUpdate = errors.New("Nothing to update")
ErrAppsRemoving = errors.New("Could not remove app from datastore")
ErrAppsUpdate = errors.New("Could not update app")
ErrDeleteAppsWithRoutes = errors.New("Cannot remove apps with routes")
ErrUsableImage = errors.New("Image not found")
)
type App struct {
Name string `json:"name"`
Routes Routes `json:"routes,omitempty"`
Config `json:"config"`
}
const (
maxAppName = 30
)
var (
ErrAppsValidationMissingName = errors.New("Missing app name")
ErrAppsValidationTooLongName = fmt.Errorf("App name must be %v characters or less", maxAppName)
ErrAppsValidationInvalidName = errors.New("Invalid app name")
)
func (a *App) Validate() error {
if a.Name == "" {
return ErrAppsValidationMissingName
}
if len(a.Name) > maxAppName {
return ErrAppsValidationTooLongName
}
for _, c := range a.Name {
if (c < '0' || '9' < c) && (c < 'A' || 'Z' > c) && (c < 'a' || 'z' < c) && c != '_' && c != '-' {
return ErrAppsValidationInvalidName
}
}
return nil
}
func (a *App) Clone() *App {
var c App
c.Name = a.Name
if a.Routes != nil {
for i := range a.Routes {
c.Routes = append(c.Routes, a.Routes[i].Clone())
}
}
if a.Config != nil {
c.Config = make(Config)
for k, v := range a.Config {
c.Config[k] = v
}
}
return &c
}
// UpdateConfig adds entries from patch to a.Config, and removes entries with empty values.
func (a *App) UpdateConfig(patch Config) {
if patch != nil {
if a.Config == nil {
a.Config = make(Config)
}
for k, v := range patch {
if v == "" {
delete(a.Config, k)
} else {
a.Config[k] = v
}
}
}
}
type AppFilter struct {
// An SQL LIKE query. Empty does not filter.
Name string
}
|
<reponame>changqing98/kafka
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.server.authorizer.AclDeleteResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class DeleteAclsResponse extends AbstractResponse {
public static final Logger log = LoggerFactory.getLogger(DeleteAclsResponse.class);
private final DeleteAclsResponseData data;
public DeleteAclsResponse(DeleteAclsResponseData data) {
this.data = data;
}
public DeleteAclsResponse(Struct struct, short version) {
data = new DeleteAclsResponseData(struct, version);
}
@Override
protected Struct toStruct(short version) {
validate(version);
return data.toStruct(version);
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
public List<DeleteAclsResponseData.DeleteAclsFilterResult> filterResults() {
return data.filterResults();
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(filterResults().stream().map(r -> Errors.forCode(r.errorCode())).collect(Collectors.toList()));
}
public static DeleteAclsResponse parse(ByteBuffer buffer, short version) {
return new DeleteAclsResponse(ApiKeys.DELETE_ACLS.parseResponse(version, buffer), version);
}
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
private void validate(short version) {
if (version == 0) {
final boolean unsupported = filterResults().stream()
.flatMap(r -> r.matchingAcls().stream())
.anyMatch(matchingAcl -> matchingAcl.patternType() != PatternType.LITERAL.code());
if (unsupported)
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
final boolean unknown = filterResults().stream()
.flatMap(r -> r.matchingAcls().stream())
.anyMatch(matchingAcl -> matchingAcl.patternType() == PatternType.UNKNOWN.code()
|| matchingAcl.resourceType() == ResourceType.UNKNOWN.code()
|| matchingAcl.permissionType() == AclPermissionType.UNKNOWN.code()
|| matchingAcl.operation() == AclOperation.UNKNOWN.code());
if (unknown)
throw new IllegalArgumentException("DeleteAclsMatchingAcls contain UNKNOWN elements");
}
public static DeleteAclsFilterResult filterResult(AclDeleteResult result) {
ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE);
List<DeleteAclsMatchingAcl> matchingAcls = result.aclBindingDeleteResults().stream()
.map(DeleteAclsResponse::matchingAcl)
.collect(Collectors.toList());
return new DeleteAclsFilterResult()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setMatchingAcls(matchingAcls);
}
private static DeleteAclsMatchingAcl matchingAcl(AclDeleteResult.AclBindingDeleteResult result) {
ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE);
AclBinding acl = result.aclBinding();
return matchingAcl(acl, error);
}
// Visible for testing
public static DeleteAclsMatchingAcl matchingAcl(AclBinding acl, ApiError error) {
return new DeleteAclsMatchingAcl()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setResourceName(acl.pattern().name())
.setResourceType(acl.pattern().resourceType().code())
.setPatternType(acl.pattern().patternType().code())
.setHost(acl.entry().host())
.setOperation(acl.entry().operation().code())
.setPermissionType(acl.entry().permissionType().code())
.setPrincipal(acl.entry().principal());
}
public static AclBinding aclBinding(DeleteAclsMatchingAcl matchingAcl) {
ResourcePattern resourcePattern = new ResourcePattern(ResourceType.fromCode(matchingAcl.resourceType()),
matchingAcl.resourceName(), PatternType.fromCode(matchingAcl.patternType()));
AccessControlEntry accessControlEntry = new AccessControlEntry(matchingAcl.principal(), matchingAcl.host(),
AclOperation.fromCode(matchingAcl.operation()), AclPermissionType.fromCode(matchingAcl.permissionType()));
return new AclBinding(resourcePattern, accessControlEntry);
}
}
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: <NAME>
# @author: <NAME>
__copyright__ = "Copyright 2012, Locaweb IDC"
import redis
from hashlib import sha1
from functools import wraps
from bottle import request, response
from netl2api.server.utils import RedisClient
from netl2api.lib.config import get_netl2server_cfg, setup_netl2server_logger
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
__all__ = ["cached", "invalidate_cache"]
cfg = get_netl2server_cfg()
logger = setup_netl2server_logger(cfg)
cache_enable = cfg.get("cache", "enabled") == "true"
redis_cli = RedisClient()
def cached(ttl=600):
def proxy(f):
@wraps(f)
def caching(*args, **kwargs):
if cache_enable is False:
return f(*args, **kwargs)
try:
cache_db = redis_cli.get_connection()
except Exception, e:
logger.exception("Error in redis_cli connection (cache database)")
return f(*args, **kwargs)
cache_key = "%s:%s" % (request.environ.get("REQUEST_METHOD"), request.environ.get("PATH_INFO"))
cache_subkey = ";".join(["%s=%s" % (k,v) for k,v in request.query.iteritems() \
if k != "ticket"])
cache_subkey += ";".join(["%s=%s" % (k,v) for k,v in request.forms.iteritems() \
if k != "ticket"])
cache_rkey = "cache:%s:%s" % (cache_key, sha1(cache_subkey).hexdigest())
cached_r = cache_db.get(cache_rkey)
if cached_r is not None:
logger.info("Cache HIT -- context: %s" % request["context"])
response.set_header("X-Cached", "True")
response.set_header("Cache-Control", "max-age=%s, must-revalidate" % int(cache_db.ttl(cache_rkey) or 0))
return loads(cached_r)
#logger.debug("Cache MISS (calling %s()) -- context %s" % (f_name, context))
r = f(*args, **kwargs)
response.set_header("X-Cached", "False")
response.set_header("Cache-Control", "max-age=%s, must-revalidate" % ttl)
cache_db.setex(cache_rkey, dumps(r), ttl)
return r
return caching
return proxy
def invalidate_cache(key=None):
try:
cache_db = redis_cli.get_connection()
except Exception, e:
logger.exception("Error in redis_cli connection (cache database)")
return
try:
cache_db.delete(*cache_db.keys("cache:*:%s*" % key))
except redis.exceptions.ResponseError:
pass
|
Mechanical design of social robot Nancy This paper presents the mechanical design of a social robot, Nancy, developed by Social Robotics Laboratory, Interactive and Digital Media Institute, National University of Singapore. The objective of our project is to introduce the social robot which is capable to interact with people, especially the elderly, based on fundamental, functional and social tasks. We developed Nancy with multi-module structure and 32 degrees of freedom (DOFs), including 24 and 8 DOFs in upper-body and lower-body, respectively. 8 DOFs are considered in each arm to make Nancy able to imitate human limbs' motion. Nancy's head involves eye and eyelid motions with 3 DOFs which is on top of a 2-DOFs neck. The wheeled-base is designed in such a way to facilitate the multi-directional movements for Nancy. One of our main objectives in Nancy's mechanical design is to obtain powerful and smooth motion, light-weight joints and simple kinematics. They are achieved by Bowden cable-pulley mechanism driven by actuators located in lower-body. Accordingly, all joints are designed to have intersected axes. Most joints are driven by brushless D.C. motors embedded with electrical brake system to stabilize the robot, especially in case of power loss. |
Are We Doing Enough? The obligation to tackle biases and injustices rooted in the publishing ecosystem has reached the academic communities' consciousness (Royal Society, 2022; APA, 2021; Nature, 2022). Similarly, the IS community is institutionalizing governance mechanisms to demonstrate its commitment to Diversity, Equity and Inclusion (DEI) (Burton-Jones & Sarker, 2021; Association for Information Systems, 2021), in hopes of reversing the unwanted effects of explicit and implicit biases in communicating and publishing IS scholarship. |
Numerical modelling of complex turbulent freesurface flows with the SPH method: an overview The gridless smoothed particle hydrodynamics (SPH) method is now commonly used in computational fluid dynamics (CFD) and appears to be promising in predicting complex freesurface flows. However, increasing flow complexity requires appropriate approaches for taking account of turbulent effects, whereas some authors are still working without any turbulence closure in SPH. A review of recently developed turbulence models adapted to the SPH method is presented herein, from the simplistic point of view of a oneequation model involving mixing length to more sophisticated (and thus realistic) models like explicit algebraic Reynolds stress models (EARSM) or large eddy simulation (LES). Each proposed model is tested and validated on the basis of schematic cases for which laboratory data, theoretical or numerical solutions are available in the general field of turbulent freesurface incompressible flows (e.g. openchannel flow and schematic dam break). They give satisfactory results, even though some progress should be made in the future in terms of freesurface influence and wall conditions. Recommendations are given to SPH users to apply this method to the modelling of complex freesurface turbulent flows. Copyright © 2006 John Wiley & Sons, Ltd. |
<gh_stars>10-100
#!/usr/bin/env node
import { existsSync } from 'fs'
import arg from 'next/dist/compiled/arg/index.js'
import * as Log from '../build/output/log'
import { cliCommand } from '../bin/next'
import build from '../build'
import { printAndExit } from '../server/lib/utils'
import isError from '../lib/is-error'
import { getProjectDir } from '../lib/get-project-dir'
const nextBuild: cliCommand = (argv) => {
const validArgs: arg.Spec = {
// Types
'--help': Boolean,
'--profile': Boolean,
'--debug': Boolean,
'--no-lint': Boolean,
// Aliases
'-h': '--help',
'-d': '--debug',
}
let args: arg.Result<arg.Spec>
try {
args = arg(validArgs, { argv })
} catch (error) {
if (isError(error) && error.code === 'ARG_UNKNOWN_OPTION') {
return printAndExit(error.message, 1)
}
throw error
}
if (args['--help']) {
printAndExit(
`
Description
Compiles the application for production deployment
Usage
$ next build <dir>
<dir> represents the directory of the Next.js application.
If no directory is provided, the current directory will be used.
Options
--profile Can be used to enable React Production Profiling
--no-lint Disable linting
`,
0
)
}
if (args['--profile']) {
Log.warn('Profiling is enabled. Note: This may affect performance')
}
if (args['--no-lint']) {
Log.warn('Linting is disabled')
}
const dir = getProjectDir(args._[0])
// Check if the provided directory exists
if (!existsSync(dir)) {
printAndExit(`> No such directory exists as the project root: ${dir}`)
}
return build(
dir,
null,
args['--profile'],
args['--debug'],
!args['--no-lint']
).catch((err) => {
console.error('')
console.error('> Build error occurred')
printAndExit(err)
})
}
export { nextBuild }
|
Role of Polysulfides in SelfHealing LithiumSulfur Batteries IO N In this work, a novel electrolyte was developed to solve the cycling instability and ineffi ciency that are inherent to the Li-S battery, which otherwise is a very promising energy storage technology. The sulfur cathode in the current Li-S battery offers superior theoretical capacity (1672 mAh g − 1 ) compared to all Li-ion battery cathodes (300 mAh g − 1 maximum). This high capacity is the result of the redox process in the Li-S cell. Therefore, a full Li-S battery could, in theory, deliver an energy density of 2500 Wh kg − 1, which is more than twice that of any secondary battery. Moreover, a Li-S battery has the advantages of using an abundant, nontoxic and low-cost cathode materials, as well as having a wide operating temperature range. Despite these advantages, much work is required to address several performance-related issues, which prevent the development of practical Li-S batteries. These issues are rapid capacity fading and low coulombic effi ciency, which are believed to be linked to the dissolution of lithium polysulfi des (Li 2 S x ) (x = 4 to 8) from the sulfur electrode into the electrolyte. The occurrence of these intermediate species favors fast electrochemical kinetics toward the formation of the fi nal product Li 2 S during the discharge and provides intrinsic protection against overcharge in Li-S cells. However, the dissolved polysulfi des give rise to a redox shuttle phenomenon inside the cell due to their spontaneous reduction and oxidation and facile diffusion in the electrolyte between the anode and cathode. The reduction of the polysulfi des may form an insoluble Li 2 S 2 and/or a Li 2 S fi lm on the metallic lithium, which can create a barrier to Li + -ion diffusion and, thus, degrade overall cell performance. To solve these issues, researchers have pursued many different approaches. The most advanced consists of tailoring the cathode architecture to confi ne the polysulfi des species by either impregnating sulfur into porous carbon matrixes, |
// Get the timezone polys from the db
std::unordered_map<uint32_t,multi_polygon_type> GetTimeZones(sqlite3 *db_handle,
const AABB2<PointLL>& aabb) {
std::unordered_map<uint32_t,multi_polygon_type> polys;
if (!db_handle)
return polys;
sqlite3_stmt *stmt = 0;
uint32_t ret;
char *err_msg = nullptr;
uint32_t result = 0;
std::string sql = "select TZID, st_astext(geom) from tz_world where ";
sql += "ST_Intersects(geom, BuildMBR(" + std::to_string(aabb.minx()) + ",";
sql += std::to_string(aabb.miny()) + ", " + std::to_string(aabb.maxx()) + ",";
sql += std::to_string(aabb.maxy()) + ")) ";
sql += "and rowid IN (SELECT rowid FROM SpatialIndex WHERE f_table_name = ";
sql += "'tz_world' AND search_frame = BuildMBR(" + std::to_string(aabb.minx()) + ",";
sql += std::to_string(aabb.miny()) + ", " + std::to_string(aabb.maxx()) + ",";
sql += std::to_string(aabb.maxy()) + "));";
ret = sqlite3_prepare_v2(db_handle, sql.c_str(), sql.length(), &stmt, 0);
if (ret == SQLITE_OK) {
result = sqlite3_step(stmt);
while (result == SQLITE_ROW) {
std::string tz_id;
std::string geom;
if (sqlite3_column_type(stmt, 0) == SQLITE_TEXT)
tz_id = (char*)sqlite3_column_text(stmt, 0);
if (sqlite3_column_type(stmt, 1) == SQLITE_TEXT)
geom = (char*)sqlite3_column_text(stmt, 1);
uint32_t idx = DateTime::get_tz_db().to_index(tz_id);
if (idx == 0) {
result = sqlite3_step(stmt);
continue;
}
multi_polygon_type multi_poly;
boost::geometry::read_wkt(geom, multi_poly);
polys.emplace(idx, multi_poly);
result = sqlite3_step(stmt);
}
}
if (stmt) {
sqlite3_finalize(stmt);
stmt = 0;
}
return polys;
} |
Lani Minella
Career
After college, Minella started on Morning Drive radio for an alternative station in the late 80s, but after being heard of her celebrity impersonations, she was called and referred to GTE Interactive Media to imitate voices from the movie FernGully: The Last Rainforest, which was being pitched for a LaserDisc presentation to Magnavox and Philips. She was then referred by the company to their to CD-Roms division where she worked on more voices and assisted script writing on games for children. Minella asked the company if they're others doing similar work and suggested that she should go to trade shows, which led her to attend showcases like Consumer Electronics Show in Las Vegas, Game Developers Conference in San Jose, and Electronic Entertainment Expo in Los Angeles, where she tried to set up her business card but failed to get any offers, which led her to start her own acting agency AudioGodz, a production company specializing in all aspects of voiceovers from talent to casting, directing, writing and production in 1992.
In 1996, Minella worked on a major first-person shooter video game, Duke Nukem 3D, as voice director and actress, voicing most of the female characters in the series. Lani helped cast the voice actor of Duke Nukem, Jon St. John. She met Jon when she was cast as the voice for a commercial he was producing in San Diego. After being impressed by her voices, Jon imitated the voices back at Lani and asked if he ever done voice acting for video games, she shortly got him a telephone interview with the game's creator, George Broussard, and eventually got the part as Duke.
Lani's first major voice role in a video game was for Her Interactive's adventure-mystery series Nancy Drew, starring as the titular character Nancy Drew, debuting with 1998's Secrets Can Kill. Lani reprised her role as Nancy Drew in 31 other video games, with her last being Sea of Darkness in 2015, the decision was made by the CEO due to the dwindled popularity of the series and decided to find a voice actress who is local in the Seattle area, as Lani lives in San Diego.
Since starting her career and becoming well-known for her unique 4-octave vocal range, Lani has provided voices for many voices for other characters in a variety of video games, which includes the Sea Emperor Leviathan in Subnautica, Rouge the Bat in Sonic Adventure 2, Ivy Valentine from the Soulcalibur series, Lucas in the Super Smash Bros. series, various characters in The Land Before Time games, Luke in the Professor Layton series, Sindel and Sheeva in the 2011 Mortal Kombat, and Mia in Fire Emblem Heroes.
Personal life
Minella is half Italian. In her spare time, she does vocal coaching, gardening, and taking care of her rescue pets when she's home. |
# Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to the end flag
import atexit
import weakref
import asyncio
import itertools
import threading
import concurrent.futures.thread as _base
from .future import Future
from .. import _get_parallel_context
from ..asyncio import is_running_in_event_loop
_tasks_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
# FIXME: debug leaked
#import sys
#import gc
#for k in (list(_tasks_queues.keys())):
# print(f"{k} {sys.getrefcount(k)} {gc.get_referrers(k)}")
for work_queue in _tasks_queues.values():
if work_queue._loop.is_running():
asyncio.run_coroutine_threadsafe(work_queue.put(None), loop=work_queue._loop)
for task in _tasks_queues.keys():
task.result()
atexit.register(_python_exit)
class _AsyncWorkItem(_base._WorkItem):
async def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
if asyncio.iscoroutine(result):
result = await result
except BaseException as exc:
self.future.set_exception(exc)
self = None
else:
self.future.set_result(result)
async def _worker(executor_weakref, work_queue):
while True:
work_item = await work_queue.get()
try:
if work_item is not None:
await work_item.run()
del work_item
continue
finally:
work_queue.task_done()
executor = executor_weakref()
try:
if _shutdown or executor is None or executor._shutdown:
if executor is not None:
executor._shutdown = True
await work_queue.put(None)
return
finally:
del executor
async def _loop_send_stop_event(stop_event):
"""Set stop event to stop event loop.
"""
stop_event.set()
async def _loop_main(stop_event):
"""Event loop main function
that blocks until stop event is set.
"""
await stop_event.wait()
def _async_loop_thread(loop, stop_event):
"""Async loop thread.
"""
asyncio.set_event_loop(loop)
loop.run_until_complete(_loop_main(stop_event))
class AsyncPoolExecutor(_base._base.Executor):
"""Async pool executor.
"""
_counter = itertools.count().__next__
def __init__(self, max_workers=1024, task_name_prefix="",
loop=None, _check_max_workers=True):
if _check_max_workers and int(max_workers) <= 0:
raise ValueError("max_workers must be greater than 0")
self._open = False
self._max_workers = max_workers
self._loop = loop or asyncio.new_event_loop()
self._loop_stop_event = asyncio.Event(loop=self._loop)
self._work_queue = asyncio.Queue(loop=self._loop)
self._tasks = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._task_name_prefix = (task_name_prefix or
("AsyncPoolExecutor-%d" % self._counter()))
self._async_loop_thread = None
@property
def open(self):
return bool(self._open)
def __enter__(self):
with self._shutdown_lock:
if not self._open:
self._async_loop_thread = threading.Thread(target=_async_loop_thread,
kwargs={"loop": self._loop, "stop_event": self._loop_stop_event},
daemon=True)
self._async_loop_thread.start()
self._open = True
return self
def submit(self, fn, args=None, kwargs=None, block=True):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
with self._shutdown_lock:
if not self._open:
raise RuntimeError("cannot schedule new futures before pool is opened")
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
if _shutdown:
raise RuntimeError("cannot schedule new futures after interpreter shutdown")
future = Future()
ctx = _get_parallel_context()
args = fn, *args
work_item = _AsyncWorkItem(future, ctx.run, args, kwargs)
idle_workers = self._adjust_task_count()
if (idle_workers or block) and self._max_workers > 0:
if is_running_in_event_loop() and asyncio.get_event_loop() is self._loop:
raise RuntimeError("deadlock detected")
asyncio.run_coroutine_threadsafe(self._work_queue.put(work_item), loop=self._loop).result()
if (not block and not idle_workers) or self._max_workers < 1:
if is_running_in_event_loop() and asyncio.get_event_loop() is self._loop:
raise RuntimeError("deadlock detected")
asyncio.run_coroutine_threadsafe(work_item.run(), loop=self._loop).result()
if is_running_in_event_loop():
return asyncio.wrap_future(future)
return future
def _adjust_task_count(self):
"""Increase worker count up to max_workers if needed.
Returns `True` if worker is immediately available
to handle the work item or `False` otherwise.
"""
if len(self._tasks) - self._work_queue._unfinished_tasks > 0:
return True
def weakref_cb(_, work_queue=self._work_queue):
asyncio.run_coroutine_threadsafe(work_queue.put(None), loop=self._loop).result()
num_tasks = len(self._tasks)
if num_tasks < self._max_workers:
task_name = "%s_%d" % (self._task_name_prefix or self, num_tasks)
task = asyncio.run_coroutine_threadsafe(_worker(weakref.ref(self, weakref_cb), self._work_queue), loop=self._loop)
self._tasks.add(task)
_tasks_queues[task] = self._work_queue
return True
return False
def __exit__(self, exc_type, exc_value, exc_tb):
self.shutdown(wait=True)
return False
def shutdown(self, wait=True):
with self._shutdown_lock:
if self._shutdown:
return
self._shutdown = True
if not self._loop.is_running():
return
asyncio.run_coroutine_threadsafe(
self._work_queue.put(None), loop=self._loop).result()
if wait:
exc = None
for task in self._tasks:
try:
task.result()
except BaseException as e:
exc = e
try:
if exc is not None:
raise exc
finally:
asyncio.run_coroutine_threadsafe(
_loop_send_stop_event(self._loop_stop_event), loop=self._loop
).result()
if self._async_loop_thread is not None:
self._async_loop_thread.join()
class SharedAsyncPoolExecutor(AsyncPoolExecutor):
"""Shared async pool executor.
"""
def __init__(self, max_workers, task_name_prefix=""):
if int(max_workers) < 0:
raise ValueError("max_workers must be positive or 0")
super(SharedAsyncPoolExecutor, self).__init__(
max_workers=max_workers-1, task_name_prefix=task_name_prefix, _check_max_workers=False)
def submit(self, fn, args=None, kwargs=None, block=False):
return super(SharedAsyncPoolExecutor, self).submit(fn=fn, args=args, kwargs=kwargs, block=block)
GlobalAsyncPoolExecutor = SharedAsyncPoolExecutor
|
Robust Parametric Fault Estimation in a Hopper System Abstract The ability of diagnosis of the possible faults is a necessity for satellite launch vehicles during their mission. In this paper, a structural analysis method is employed to divide the complex propulsion system into simpler subsystems for fault diagnosis filter design. A robust fault diagnosis method, which is an optimization based approach, is applied to the subsystems of the propulsion system. The optimization problem has been solved within two different tools and the results are compared with two other optimization based approaches. The turbo-pump system is used to illustrate the employed methods and obtained results. |
To understand what suburbs will look like as the 21st century unfolds, we need to consider what suburbs look like as the 21st century begins.
Despite signs of urban revitalization, particularly in cities like Boston and Providence, the 2000 US Census confirms that suburbs continue to dominate our country’s economic, social, and political landscape.
Clearly, the decentralization of economic and residential life remains the dominant growth pattern in the United States.
Yet suburbs are no longer just bedroom communities for workers commuting to traditional downtowns. Rather, many are now strong employment centers serving a variety of economic functions in their regions.
The American economy is becoming an exit-ramp economy, with office, commercial, and retail facilities increasingly located along suburban freeways.
This is particularly true in hot tech markets like Washington, D.C., Austin, and Boston, where firms like America Online, Dell, and Raytheon have built large exurban campuses far from the city center.
Suburbs are also becoming more racially and ethnically diverse.
For example, a recent study of the Greater Washington, D.C., area, the fifth-largest magnet for immigrants in the 1990s, showed that 87 percent of new arrivals settled in suburban communities. An incredible 46 percent of new arrivals, particularly from Asia, settled outside the area’s Beltway, traditionally the demarcation between older, urbanized communities and newer, suburban communities.
Finally, suburbs are becoming more economically and physically diverse.
At one end of the continuum lie suburbs built in the early or mid-part of the 20th century that are experiencing central city-like challenges – aging infrastructure, deteriorating schools and commercial corridors, and inadequate housing.
Like cities, these older communities require reinvestment and redevelopment. In some cases, like the southern suburbs of Philadelphia, Seattle, Atlanta, and Chicago, they also require broader responses to the issues presented by populations that contain disproportionate numbers of working poor families and aging homeowners.
At the other end of the suburban continuum lie the newest ring of suburbs emerging at the fringe of metropolitan areas. These places – Loudoun County in Northern Virginia, Douglas County outside Denver, and the Route 495 corridor around Boston – are growing at a feverish pace. Yet it is a particular kind of growth – sprawling, low-density, and auto-dependent.
For residents in these communities, suburban prosperity has come with the heavy, unanticipated price of traffic congestion, overcrowded schools, disappearing open space, and diminished quality of life.
In many metropolitan areas, the changing face of suburbia is fueling an intense debate about the quality, pace, and shape of growth. An odd assortment of constituencies – employers, environmentalists, conservationists, religious and political leaders, and regular citizens – are taking action to address the related challenges of different kinds of suburbs.
In many metropolitan areas, frustration with this type of suburban growth is leading to important change in government regulations and programs that in the past have tended to drive development outward.
States like Maryland and New Jersey are experimenting with new ”smart growth” policies that target spending on roads, infrastructure, and schools in older suburban communities.
States like Minnesota and Georgia are creating new kinds of regional authorities to govern issues like transportation and affordable housing.
In the 1998 and 2000 elections, voters throughout the country approved ballot referendums that devote substantial resources to the preservation of open space and the redevelopment of polluted urban land.
If these reforms in states across the country continue and expand, and if localities follow through with their own land and development changes, the look and function of many American suburbs may change markedly in coming decades. Some may embrace thoughtful design and planning to revitalize compact town centers. Others may attempt to better integrate industrial and office parks into residential and commercial life.
In general, there may be a lot less sprawl and a lot more attention paid to the positive attributes of traditional communities.
In the end, the suburbs of the next century will offer Americans a greater range of community choices.
As in other aspects of our consumer lives, suburbs will be remade (or newly constructed) to meet a variety of demands, needs and desires. As America changes as we grow older, as we grow more diverse, as our economy transforms, our suburbs will change with us. |
The assessment of flow-mediated dilation (FMD) of the brachial artery Brachial artery flow-mediated dilation (FMD) is a measure of endothelial dysfunction which is used to evaluate cardiovascular risk. A stand-alone video processing system based on a DSP board was developed in our lab to assess the brachial artery FMD from ultrasound images. In this paper the system is introduced and compared with the main methods and devices illustrated in literature. The available systems were analysed and catalogued according to their main features: input data, accuracy, real-time capability, and complexity. Our system was tested both on synthetic ultrasound images and in in-vivo FMD examinations so that its performances could be compared with those of the other methods. |
<gh_stars>1-10
import { missingNumber } from './missing-number';
describe('268. Missing Number', () => {
it('missingNumber', () => {
expect(missingNumber([3, 0, 1])).toEqual(2);
expect(missingNumber([9, 6, 4, 2, 3, 5, 7, 0, 1])).toEqual(8);
});
});
|
<gh_stars>0
#pragma once
#include <RichOle.h>
class RichTextBox : public CWnd
{
public:
RichTextBox();
~RichTextBox();
virtual BOOL Create(DWORD dwStyle,
const RECT &rect, CWnd *pParentWnd, LPCTSTR lpszName);
//<<Begin Copy from CRichEditCtrl
public:
// Attributes
BOOL CanUndo() const;
BOOL CanRedo() const;
UNDONAMEID GetUndoName() const;
UNDONAMEID GetRedoName() const;
int GetLineCount() const;
BOOL GetModify() const;
void SetModify(__in BOOL bModified = TRUE);
BOOL SetTextMode(__in UINT fMode);
UINT GetTextMode() const;
void GetRect(__out LPRECT lpRect) const;
CPoint GetCharPos(__in long lChar) const;
UINT GetOptions() const;
void SetOptions(__in WORD wOp, __in DWORD dwFlags);
BOOL SetAutoURLDetect(__in BOOL bEnable = TRUE);
UINT GetWordWrapMode() const;
UINT SetWordWrapMode(__in UINT uFlags) const;
BOOL GetPunctuation(__in UINT fType, __out PUNCTUATION *lpPunc) const;
BOOL SetPunctuation(__in UINT fType, __in PUNCTUATION *lpPunc);
// NOTE: first word in lpszBuffer must contain the size of the buffer!
// NOTE: Copied line does not contain null character!
int GetLine(__in int nIndex, LPTSTR lpszBuffer) const;
// NOTE: Copied line does not contain null character!
int GetLine(__in int nIndex, LPTSTR lpszBuffer, __in int nMaxLength) const;
BOOL CanPaste(__in UINT nFormat = 0) const;
void GetSel(__out long &nStartChar, __out long &nEndChar) const;
void GetSel(__out CHARRANGE &cr) const;
void LimitText(__in long nChars = 0);
long LineFromChar(__in long nIndex) const;
CPoint PosFromChar(__in UINT nChar) const;
int CharFromPos(__in CPoint pt) const;
void SetSel(__in long nStartChar, __in long nEndChar);
void SetSel(__in CHARRANGE &cr);
DWORD GetDefaultCharFormat(__out CHARFORMAT &cf) const;
DWORD GetDefaultCharFormat(__out CHARFORMAT2 &cf) const;
DWORD GetSelectionCharFormat(__out CHARFORMAT &cf) const;
DWORD GetSelectionCharFormat(__out CHARFORMAT2 &cf) const;
long GetEventMask() const;
long GetLimitText() const;
DWORD GetParaFormat(__out PARAFORMAT &pf) const;
DWORD GetParaFormat(__out PARAFORMAT2 &pf) const;
// #if(_MSC_VER > 1310) // VC2003
// // richedit EM_GETSELTEXT is ANSI
// _AFX_INSECURE_DEPRECATE("GetSelText(char *) is unsafe. Instead, use GetSelText(void) returning CString")
// #endif
// long GetSelText(__out LPSTR lpBuf) const;
int GetTextRange(__in int nFirst, __in int nLast, __out CString &refString) const;
CString GetSelText() const;
WORD GetSelectionType() const;
COLORREF SetBackgroundColor(__in BOOL bSysColor, __in COLORREF cr);
BOOL SetDefaultCharFormat(__in CHARFORMAT &cf);
BOOL SetDefaultCharFormat(__in CHARFORMAT2 &cf);
BOOL SetSelectionCharFormat(__in CHARFORMAT &cf);
BOOL SetSelectionCharFormat(__in CHARFORMAT2 &cf);
BOOL SetWordCharFormat(__in CHARFORMAT &cf);
BOOL SetWordCharFormat(__in CHARFORMAT2 &cf);
DWORD SetEventMask(__in DWORD dwEventMask);
BOOL SetParaFormat(__in PARAFORMAT &pf);
BOOL SetParaFormat(__in PARAFORMAT2 &pf);
BOOL SetTargetDevice(__in HDC hDC, __in long lLineWidth);
//BOOL SetTargetDevice(__in CDC &dc, __in long lLineWidth);
long GetTextLength() const;
long GetTextLengthEx(__in DWORD dwFlags, __in UINT uCodePage = -1) const;
BOOL SetReadOnly(__in BOOL bReadOnly = TRUE);
int GetFirstVisibleLine() const;
// Operations
void EmptyUndoBuffer();
void StopGroupTyping();
UINT SetUndoLimit(__in UINT nLimit);
// Gets the character index of the first character of a specified line in a multiline edit control
int LineIndex(__in int nLine = -1) const;
int LineLength(__in int nLine = -1) const;
void LineScroll(__in int nLines, __in int nChars = 0);
void ReplaceSel(LPCTSTR lpszNewText, __in BOOL bCanUndo = FALSE);
void SetRect(__in LPCRECT lpRect);
BOOL DisplayBand(__in LPRECT pDisplayRect);
long FindText(__in DWORD dwFlags, __out FINDTEXTEX *pFindText) const;
DWORD FindWordBreak(__in UINT nCode, __in DWORD nStart) const;
long FormatRange(__in FORMATRANGE *pfr, __in BOOL bDisplay = TRUE);
void HideSelection(__in BOOL bHide, __in BOOL bPerm);
void PasteSpecial(__in UINT nClipFormat, __in DWORD dvAspect = 0, __in HMETAFILE hMF = 0);
void RequestResize();
long StreamIn(__in int nFormat, EDITSTREAM &es);
long StreamOut(__in int nFormat, EDITSTREAM &es);
// Clipboard operations
BOOL Undo();
BOOL Redo();
void Clear();
void Copy();
void Cut();
void Paste();
// OLE support
IRichEditOle *GetIRichEditOle() const;
BOOL SetOLECallback(IRichEditOleCallback *pCallback);
//<< End Copy from CRichEditCtrl
public:
int InsertOle(IOleObject *pOleObject, LONG cp, int nWidth, int nHeight);
int GetFirstVisibleOleOjbect(__out REOBJECT *pObj);
IOleObject *HitTest(LPPOINT lpPt);
virtual LRESULT WindowProc(UINT message, WPARAM wParam, LPARAM lParam);
};
|
#include "lvTlsSocket.h"
#include "lvTlsCallbackBase.h"
#include <vector>
#include <thread>
#include <chrono>
#include <queue>
#include <mutex>
#include <functional>
#include <istream>
#include <ostream>
#include <condition_variable>
using namespace boost::asio;
namespace lvasynctls {
lvTlsSocketBase::lvTlsSocketBase(std::shared_ptr<lvAsyncEngine> engineContext, boost::asio::ssl::context& sslContext, size_t streamSize, size_t outputQueueSize) :
engineOwner{ engineContext },
socketStrand{ *(engineContext->getIOService()) },
socket{ *(engineContext->getIOService()), sslContext },
socketErr{ 0 },
iSLock{},
inputRunning{ false },
inQueue{},
inputStreamBuffer{ streamSize },
inputStream{ &inputStreamBuffer },
oQLock{},
outputRunning{ false },
outQueue{},
oQMaxSize{ outputQueueSize }
{
}
void lvTlsSocketBase::shutdown()
{
//we are somewhat protected by the mutex in lvland, but this seems to be whats needed to avoid crashes and (more commonly) hangs
//https://stackoverflow.com/questions/32046034/what-is-the-proper-way-to-securely-disconnect-an-asio-ssl-socket
//https://stackoverflow.com/questions/22575315/how-to-gracefully-shutdown-a-boost-asio-ssl-client
//https://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error/25703699#25703699
int noErr = 0;
int abortSock = LVTLSSOCKETERRFLAGABORT;
//set socket error flag if it wasn't set already
if (socketErr.compare_exchange_strong(noErr, abortSock)) {
//true means it was set to noErr before so we need to kill everything
bool runningCpy = false;
boost::system::error_code ec;
int32_t iter = 0;
//stop all ongoing write operations, retry up to 2 second (see wait at bottom)
while (iter < 2000) {
iter++;
try {
//cancel existing operations
socket.lowest_layer().cancel(ec);
socket.lowest_layer().shutdown(boost::asio::ip::tcp::socket::shutdown_receive, ec);
}
catch (...) {
//assumption is that something in socket.lowest_layer is dead, so we should probably stop running.
iter = INT32_MAX;
}
{
std::lock_guard<std::mutex> lg(oQLock);
runningCpy = outputRunning;
}
if (!runningCpy) {
iter = INT32_MAX;
}
else {
std::this_thread::sleep_for(std::chrono::milliseconds(1)); //yield
}
}
iter = 0;
//stop all ongoing read operations, retry up to 2 second (see wait at bottom)
while (iter < 2000) {
iter++;
try {
//cancel existing operations
socket.lowest_layer().cancel(ec);
socket.lowest_layer().shutdown(boost::asio::ip::tcp::socket::shutdown_receive, ec);
}
catch (...) {
//assumption is that something in socket.lowest_layer is dead, so we should probably stop running.
iter = INT32_MAX;
}
{
std::lock_guard<std::mutex> lg(iSLock);
runningCpy = inputRunning;
}
if (!inputRunning) {
iter = INT32_MAX;
}
else {
std::this_thread::sleep_for(std::chrono::milliseconds(1)); //yield
}
}
//clear queues
{
std::lock_guard<std::mutex> lg(oQLock);
while (!outQueue.empty()) {
//properly clean up queue elements
auto temp = outQueue.front();
delete temp.chunkdata;
if (temp.optcallback) {
temp.optcallback->setErrorCondition(1000, "socket shut down");
temp.optcallback->execute();
delete temp.optcallback;
}
outQueue.pop();
}
}
{
std::lock_guard<std::mutex> lg(iSLock);
while (!inQueue.empty()) {
//properly clean up queue elements
auto temp = inQueue.front();
if (temp.optcallback) {
temp.optcallback->setErrorCondition(1000, "socket shut down");
temp.optcallback->execute();
delete temp.optcallback;
}
inQueue.pop();
}
}
try {
std::atomic_bool done{ false };
//start shutdown process, this hack brought to you by stack overflow. Async shutdown blocks until data moves...
socket.async_shutdown(socketStrand.wrap(
[&done](const boost::system::error_code& ec) {
//we want this shutdown process to still be synchronous, we just want to force it a bit
//the queue lets us keep things synchronous
done = true;
}
));
//so then we move some data with an async write
const char buffer[] = "\0";
boost::system::error_code wec;
//this write should immediately fail, allowing shutdown to complete
async_write(socket, boost::asio::buffer(buffer), socketStrand.wrap(
[](...) {
//ignore anything that happens here
return;
}
));
for (size_t i = 0; i < 500; i++)
{
if (done) {
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(5)); //yield
}
}
catch (...) {
}
try {
socket.lowest_layer().close();
}
catch (...) {
}
}
}
lvTlsSocketBase::~lvTlsSocketBase()
{
engineOwner->unregisterSocket(this);
shutdown();
return;
}
// WRITE
void lvTlsSocketBase::writeCB(const boost::system::error_code & error, std::size_t bytes_transferred) {
std::vector<unsigned char> * lastData = nullptr;
lvasynctls::lvTlsCallback * lastCallback = nullptr;
outputChunk next{ nullptr, nullptr };
size_t sz = 0;
int err = socketErr;
bool more = false;
{
//ok, now we've started the write action lets pop
std::lock_guard<std::mutex> lg(oQLock);
sz = outQueue.size();
if (sz > 0) {
auto wd = outQueue.front();
lastData = wd.chunkdata;
lastCallback = wd.optcallback;
outQueue.pop();
}
if ((sz > 1) && !error && !err && engineOwner) {
//more data to write
next = outQueue.front();
more = true;
}
else {
//no more data, set output running back to false
outputRunning = false;
}
}
delete lastData;
if (lastCallback) {
errorCheck(error, lastCallback);
lastCallback->execute();
delete lastCallback;
lastCallback = nullptr;
}
if (more) {
//socket is still OK, this request succeeded, and there is more work to do
try
{
async_write(socket, buffer(*(next.chunkdata)), socketStrand.wrap(boost::bind(&lvTlsSocketBase::writeCB, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred)));
}
catch (...)
{
int noErr = 0;
int eVal = 42;
socketErr.compare_exchange_strong(noErr, eVal);
{
std::lock_guard<std::mutex> lg(oQLock);
outputRunning = false;
}
}
}
else {
{
std::lock_guard<std::mutex> lg(oQLock);
outputRunning = false;
}
if (error) {
int noErr = 0;
int code = error.value();
//set socket error flag, eventually killing all operations on the socket.
socketErr.compare_exchange_strong(noErr, code);
}
}
}
void lvTlsSocketBase::writeAction() {
outputChunk next{ nullptr, nullptr };
bool ready = false;
{
std::lock_guard<std::mutex> lg(oQLock);
if ((0 == socketErr) && (outQueue.size() >= 1)) {
//preview the queue, this lets any other enqueuers know they don't need to start a write action
next = outQueue.front();
ready = true;
}
}
if (ready) {
async_write(socket, buffer(*(next.chunkdata)), socketStrand.wrap(boost::bind(&lvTlsSocketBase::writeCB, this, boost::asio::placeholders::error,boost::asio::placeholders::bytes_transferred)));
}
else {
{
std::lock_guard<std::mutex> lg(oQLock);
outputRunning = false;
}
}
return;
}
//take ownership of data buffer if return > 0 and of callback, we will dispose
//return: (-2, socket dead), (-1, arg err), (0, no space), (1, success, is enqueued), (2, success, will run next)
int lvTlsSocketBase::startWrite(std::vector<unsigned char> * data, lvasynctls::lvTlsCallback * callback)
{
//general gist of problem and solution is here: https://stackoverflow.com/questions/7754695/boost-asio-async-write-how-to-not-interleaving-async-write-calls
if (data) {
outputChunk wd{ data, callback };
bool start = false;
int err = 0;
{
std::lock_guard<std::mutex> lg(oQLock);
err = socketErr;
if ((outQueue.size() >= oQMaxSize) && outputRunning) {
//too many elements, dont' enqueue; if not running, go ahead and add but start the run process.
return 0;
}
if (err != 0) {
//socket should be dead
return -2;
}
outQueue.push(wd);
if (!outputRunning) {
start = true;
outputRunning = true;
}
}
if (start) {
try
{
(engineOwner->getIOService())->post([this]() mutable { writeAction(); });
}
catch (const std::exception&)
{
int noErr = 0;
int eVal = 42;
socketErr.compare_exchange_strong(noErr, eVal);
{
std::lock_guard<std::mutex> lg(oQLock);
outputRunning = false;
}
return -2;
}
return 2;
}
else if (0 == err) {
//someone else is taking care of it
return 1;
}
else {
return -2;
}
}
//only get here if data was null
return -1;
}
// READ
int lvTlsSocketBase::startStreamReadUntilTermChar(unsigned char term, size_t max, lvasynctls::lvTlsCallback * callback)
{
inputChunk ind {
//read operation lambda
[this, term, max, callback](readOperationFinalize finalizer) {
async_read_until(socket, inputStreamBuffer, term, finalizer);
},
callback
};
return startReadCore(ind);
}
int lvTlsSocketBase::startStreamReadUntilTermString(std::string term, size_t max, lvasynctls::lvTlsCallback * callback)
{
inputChunk ind{
//read operation lambda
[this, term, max, callback](readOperationFinalize finalizer) {
async_read_until(socket, inputStreamBuffer, term, finalizer);
},
callback
};
return startReadCore(ind);
}
int lvTlsSocketBase::startStreamRead(size_t len, lvasynctls::lvTlsCallback * callback)
{
inputChunk ind{
//read operation lambda
[this, len, callback](readOperationFinalize finalizer) {
auto maxRead = inputStreamBuffer.max_size() - inputStreamBuffer.size();
auto readsize = (len > maxRead) ? maxRead : len;
async_read(socket, inputStreamBuffer,
[this, readsize](const boost::system::error_code & err, std::size_t bytes_transferred) -> std::size_t
{
if (err || (bytes_transferred > readsize) || inputStreamBuffer.size() == inputStreamBuffer.max_size()) {
return 0;
}
else {
auto remain = readsize - bytes_transferred;
auto space = inputStreamBuffer.max_size() - inputStreamBuffer.size();
return ((remain > space) ? space : remain);
}
return 0;
},
finalizer);
},
callback
};
return startReadCore(ind);
}
void lvasynctls::lvTlsSocketBase::readAction()
{
bool success = false;
inputChunk ind{ nullptr, nullptr };
{
std::lock_guard<std::mutex> lg(iSLock);
if ((0 == socketErr) && (inQueue.size() >= 1)) {
//preview the queue to make sure something is avail
ind = inQueue.front();
success = true;
}
}
if (success) {
ind.op(
[this](const boost::system::error_code & error, std::size_t bytes_transferred) mutable
{
lvasynctls::lvTlsCallback* cb = nullptr;
size_t sz = 0;
{
//ok, now we've finished the read action lets pop
std::lock_guard<std::mutex> lg(iSLock);
sz = inQueue.size();
if (sz > 0) {
auto ind = inQueue.front();
cb = ind.optcallback;
inQueue.pop();
}
if (!error && (0 == socketErr) && (sz > 1)) {
//if there is still work to do and an error did not occur at this time
try
{
(engineOwner->getIOService())->post([this]() mutable { readAction(); });
}
catch (...)
{
int noErr = 0;
int eVal = 42;
socketErr.compare_exchange_strong(noErr, eVal);
inputRunning = false;
}
}
else {
//we're going to stop working, so set inputRunning to false to signal that the next read should start it up
inputRunning = false;
if (error) {
//set error flag so other operations quit
int noErr = 0;
int eVal = error.value();
socketErr.compare_exchange_strong(noErr, eVal);
}
}
}
//run final user callback if it wasn't already killed by socket closing
if (cb) {
errorCheck(error, cb);
cb->execute();
delete cb;
cb = nullptr;
}
}
);
}
else {
inputRunning = false;
}
return;
}
//core function for all reads, although considering how many lambdas are involved it might not be worth it...
//return: (-2, socket dead), (-1, no data-arg err), (0, no space), (1, success, is enqueued), (2, success, will run next)
int lvasynctls::lvTlsSocketBase::startReadCore(inputChunk inChunk)
{
//general gist of problem and solution is here: https://stackoverflow.com/questions/7754695/boost-asio-async-write-how-to-not-interleaving-async-write-calls
{
std::lock_guard<std::mutex> lg(iSLock);
if (0 == socketErr) {
inQueue.push(inChunk);
if (!inputRunning) {
//when we got the lock the input wasnt running, so we need to start the read operation
inputRunning = true;
try
{
(engineOwner->getIOService())->post([this]() mutable { readAction(); });
}
catch (...)
{
int noErr = 0;
int eVal = 42;
socketErr.compare_exchange_strong(noErr, eVal);
inputRunning = false;
return -2;
}
return 2;
}
else {
//someone else is taking care of it
return 1;
}
}
else {
return -2;
}
}
return 0;
}
//streams
int64_t lvTlsSocketBase::getInputStreamSize()
{
return inputStreamBuffer.size();
}
int64_t lvTlsSocketBase::inputStreamReadSome(char* buffer, int64_t len)
{
return inputStream.readsome(buffer, len);
}
int64_t lvTlsSocketBase::inputStreamReadN(char * buffer, int64_t len)
{
inputStream.read(buffer, len);
if (inputStream.fail() || inputStream.bad() || inputStream.eof()) {
inputStream.clear();
return inputStream.gcount();
}
else {
return len;
}
}
int64_t lvTlsSocketBase::inputStreamGetLine(char * buffer, int64_t len, char delimiter)
{
inputStream.getline(buffer, len, delimiter);
if (inputStream.fail() || inputStream.bad() || inputStream.eof()) {
inputStream.clear();
return inputStream.gcount();
}
else {
return len;
}
}
boost::asio::ssl::stream<boost::asio::ip::tcp::socket>& lvTlsSocketBase::getStream()
{
return socket;
}
int lvTlsSocketBase::getError()
{
int err = socketErr;
return err;
}
}
|
<reponame>TheKodeToad/Sol-Client
package io.github.solclient.client.ui.component.impl;
import java.util.function.Consumer;
import io.github.solclient.client.mod.impl.SolClientMod;
import io.github.solclient.client.ui.component.Component;
import io.github.solclient.client.ui.component.ComponentRenderInfo;
import io.github.solclient.client.ui.component.controller.AnimatedColourController;
import io.github.solclient.client.util.Utils;
import io.github.solclient.client.util.data.Colour;
public class TickboxComponent extends ScaledIconComponent {
private boolean value;
private final Consumer<Boolean> booleanConsumer;
private final Component hoverController;
public TickboxComponent(boolean value, Consumer<Boolean> booleanConsumer, Component hoverController) {
super("sol_client_tickbox", 16, 16,
new AnimatedColourController((component, defaultColour) -> component.isHovered() ? SolClientMod.instance.uiHover
: SolClientMod.instance.uiColour));
this.value = value;
this.booleanConsumer = booleanConsumer;
this.hoverController = hoverController;
hoverController.onClick((info, button) -> {
if(!super.isHovered() && button == 0) {
mouseClicked(info, button);
}
return true;
});
add(new ScaledIconComponent("sol_client_small_tick", 16, 16,
new AnimatedColourController((component, defaultColour) -> this.value
? (isHovered() ? SolClientMod.instance.uiHover : SolClientMod.instance.uiColour)
: Colour.TRANSPARENT)),
(component, defaultBounds) -> defaultBounds);
}
@Override
public boolean isHovered() {
return hoverController.isHovered();
}
@Override
public boolean mouseClicked(ComponentRenderInfo info, int button) {
if(button != 0) {
return false;
}
Utils.playClickSound(true);
value = !value;
booleanConsumer.accept(value);
return true;
}
@Override
public boolean useFallback() {
return true;
}
@Override
public void renderFallback(ComponentRenderInfo info) {
Utils.drawOutline(getRelativeBounds(), getColour());
}
}
|
<reponame>jlucktay/f1-go-scrape
package main
import (
"net/url"
"testing"
"github.com/jlucktay/f1-go-scrape/examples/triplebyte/crawler"
"github.com/jlucktay/f1-go-scrape/examples/triplebyte/htmlhelp"
)
func TestHtmlhelp(t *testing.T) {
root, err := url.Parse("http://example.com")
if err != nil {
t.Fatal("Broken test, can't parse root url")
}
doc := `
<!DOCTYPE html>
<html>
<body>
<h1>Test Case 1</h1>
<p>I am a paragraph! <a href="javascript:doThing">blah</a></p>
<p>Sometimes I am <a href="./cynical.html">overly cynical</a>, but sometimes I am
<a href="./page2.html">overly naïve.</a></p>
</body>
</html>
`
ns, errors := htmlhelp.Neighbors(doc, *root)
if len(errors) > 0 {
t.Errorf("unexpected errors: %v", err)
}
expect := []string{"javascript:doThing", "http://example.com/cynical.html", "http://example.com/page2.html"}
if len(ns) != len(expect) {
t.Errorf("unexpected neighbors: %v", ns)
return
}
for i, u := range ns {
if u.String() != expect[i] {
t.Errorf("neighbor mismatch: %s %s", u.String(), expect[i])
}
}
}
func TestCrawler(t *testing.T) {
c := crawler.Crawler{
Threads: 100,
Log: crawler.Silent(),
}
graph, err := c.Crawl("http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/", "")
if err != nil {
t.Fatalf("can't crawl: %v", err)
}
tests := []struct {
u string
status crawler.NodeStatus
code int
}{
{"http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2", crawler.SUCCESS, 200},
{"http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-real", crawler.SUCCESS, 200},
{"http://triplebyte.github.io/web-crawler-test-site/already-passing-tests/page2-fake", crawler.SUCCESS, 404},
}
for _, tc := range tests {
u, err := url.Parse(tc.u)
if err != nil {
t.Fatalf("Broken test: %q not a URL", tc.u)
}
n, ok := graph.Nodes[*u]
switch {
case !ok:
t.Errorf("Crawl(%q): not found", u)
case n.Status != tc.status:
t.Errorf("Crawl(%q).Status: '%d' '%d'", u, tc.status, n.Status)
case n.Code != tc.code:
t.Errorf("Crawl(%q).Code: '%d' '%d'", u, tc.code, n.Code)
}
}
}
|
Results of Fifty Years of Faunistic Survey on Indian Isopods The study of Crustaceans has been carried on continuously in the Zoological Survey of India for well over 50 years. Workers like WoodMason, Alcock, Annandale, Kemp, Sewell and" Chopra made a critical study of different orders of this group and have made the Zoological Survey of India as a centre of research on Indian Carcinology. In recent years, this trend has been maintained by Tiwari, Ramakrishna, Daniel, Reddiah, Ummerkutty, Biswas, and others. As a result of the work carried out in the Zoological Survey of lndia for over fifty years, the Department possesses very rich collection of these animals. These collections have been accumulated from various sources, but most of the material has been collected by the officers of the Indian Museum prior to 1916 and the staff of the Zoological Survey of India, since then. The various military and political expeditions, on which zoological specimens were obtained, have enriched very considerably the collections of the Survey. Another imrortant source from which Crustacea collections have been coming in the past is the R.I.M.S. "Investigator" and the Surgeon Naturalists that have successively worked on board the ship. Among the names of Surgeon Naturalists, Alcock and Sewell stand out significantly as having enriched the Crustacea collections of the Survey. Two important surveys undertaken for the collection of zoological specimens with special reference to crustaceans are enumerated, in brief, below. |
<reponame>Itera/mad-learning-web
import { useState, useEffect } from 'react';
import { useAsyncData } from 'src/hooks/async';
type LoadableContentProps<D> = {
resolveContent: () => Promise<D>;
renderLoading: () => JSX.Element;
renderSuccess: (data: D, refresh: () => void) => JSX.Element;
renderError: (e: unknown, refresh: () => void) => JSX.Element;
loaderDelay: number;
};
function LoadableContent<D>({
resolveContent,
renderLoading,
renderSuccess,
renderError,
loaderDelay,
}: LoadableContentProps<D>) {
const [data, error, isLoading, refresh] = useAsyncData(resolveContent, [
resolveContent,
]);
const [showLoader, setShowLoader] = useState(false);
useEffect(() => {
const timer = setTimeout(() => setShowLoader(true), loaderDelay);
return () => clearTimeout(timer);
});
if (isLoading) {
if (showLoader) {
return renderLoading();
}
return null;
}
if (error != null) {
return renderError(error, refresh);
}
return data && renderSuccess(data, refresh);
}
LoadableContent.defaultProps = {
loaderDelay: 300,
};
export default LoadableContent;
|
It is known to use X-ray sources to obtain computerized tomography images. In many prior art applications (and particularly in the field of medical applications) the object being scanned lies fully within the X-ray source fan angle. There are situations, however, when computerized tomography images for relatively larger objects are desired (for objects such as, but not limited to, automobiles and/or larger automobile components, rocket motors, cargo containers, and so forth); i.e., objects that are larger than the available X-ray source fan angle. Also in many prior art applications, the object (such as a human body) comprises a relatively soft or less opaque object, thus requiring smaller amounts of X-ray energy to obtain useful images. There are again situations, however, when computerized tomography images for relatively larger, harder, more opaque objects are desired (again for objects such as, but not limited to, automobiles and their components, rocket motors, cargo containers, and so forth). Considerably higher amounts of X-ray energy are typically required to obtain useful images with such objects.
The challenges become particularly acute when these two problems coincide; that is, when a relatively large object also comprises a relatively opaque object. It becomes quickly impractical (economically and/or physically) to provide a large enough X-ray source to provide both sufficient power and a sufficiently large X-ray source fan angle to accommodate such objects. By one prior art approach, larger objects are scanned using translate/rotate geometries in conjunction with high energy X-ray sources. Such systems often employ detector designs having relatively small apertures as compared to their corresponding detector pitch (often known as small aperture/large pitch detectors) in order to attempt to provide high spatial resolution and good image quality. Unfortunately, such an approach occasions numerous problems including, but not limited to, large and complex mechanical configurations, alignment and accuracy challenges, relatively slow scanning speed, difficulty with or an inability to scan an object in an optimum position for that object, and non-flow through system operation to name a few.
Rotate only geometries have also been proposed (particularly for tangential scanning modes when scanning rocket motors). This approach fails to provide accurate whole object images and typically only accommodates circularly symmetrical objects. Also, this approach only provides accurate images of circumferential details in the outer annulus of the object; radial details and inner ring details are incorrectly imaged.
Skilled artisans will appreciate that elements in the figures are illustrated for simplicity and clarity and have not necessarily been drawn to scale. For example, the dimensions and/or relative positioning of some of the elements in the figures may be exaggerated relative to other elements to help to improve understanding of various embodiments of the present invention. Also, common but well-understood elements that are useful or necessary in a commercially feasible embodiment are often not depicted in order to facilitate a less obstructed view of these various embodiments of the present invention. It will further be appreciated that certain actions and/or steps may be described or depicted in a particular order of occurrence while those skilled in the arts will understand that such specificity with respect to sequence is not actually required. It will also be understood that the terms and expressions used herein have the ordinary meaning as is accorded to such terms and expressions with respect to their corresponding respective areas of inquiry and study except where specific meanings have otherwise been set forth herein. |
<filename>ReactNativeFrontend/ios/Pods/Headers/Public/Flipper-Folly/folly/synchronization/AtomicNotification.h
../../../../../Flipper-Folly/folly/synchronization/AtomicNotification.h |
Comparison of Mechanical and Electrical Characteristics of Various Polymers Blended with Ground Tire Rubber (GTR) and Applications The massive manufacture of tires and the difficulty of reducing the stocks of used tires is a serious environmental problem. There are several methods used for recycling wasted tires, one of which is mechanical crushing, in which vulcanized rubber is separated from steel and fibers, resulting in a ground tire rubber (GTR). This can be used in applications such as insulation for footwear work. The aim of the present investigation is to evaluate the use of the GTR when it is mixed with several types of polymer matrix by means of measuring its dielectric and mechanical properties of the resulting composites (polymer + GTR). The analysis is carried out using seven polymeric matrices mixed with different GTR concentrations. With the present study, it is intended to propose a way to reuse the tires out of use as an industrial work footwear insulation, by demonstrating the feasibility of the properties analyzed. |
def compose_contact_sheet(
media_info,
frames,
args):
desired_size = grid_desired_size(
args.grid,
media_info,
width=args.vcs_width,
horizontal_margin=args.grid_horizontal_spacing)
width = args.grid.x * (desired_size[0] + args.grid_horizontal_spacing) - args.grid_horizontal_spacing
height = args.grid.y * (desired_size[1] + args.grid_vertical_spacing) - args.grid_vertical_spacing
header_font = load_font(args, args.metadata_font, args.metadata_font_size, DEFAULT_METADATA_FONT)
timestamp_font = load_font(args, args.timestamp_font, args.timestamp_font_size, DEFAULT_TIMESTAMP_FONT)
header_lines = prepare_metadata_text_lines(
media_info,
header_font,
args.metadata_horizontal_margin,
width,
template_path=args.metadata_template_path)
line_spacing_coefficient = 1.2
header_line_height = int(args.metadata_font_size * line_spacing_coefficient)
header_height = 2 * args.metadata_margin + len(header_lines) * header_line_height
if args.metadata_position == "hidden":
header_height = 0
final_image_width = width
final_image_height = height + header_height
transparent = (255, 255, 255, 0)
image = Image.new("RGBA", (final_image_width, final_image_height), args.background_color)
image_capture_layer = Image.new("RGBA", (final_image_width, final_image_height), transparent)
image_header_text_layer = Image.new("RGBA", (final_image_width, final_image_height), transparent)
image_timestamp_layer = Image.new("RGBA", (final_image_width, final_image_height), transparent)
image_timestamp_text_layer = Image.new("RGBA", (final_image_width, final_image_height), transparent)
draw_header_text_layer = ImageDraw.Draw(image_header_text_layer)
draw_timestamp_layer = ImageDraw.Draw(image_timestamp_layer)
draw_timestamp_text_layer = ImageDraw.Draw(image_timestamp_text_layer)
h = 0
def draw_metadata_helper():
return draw_metadata(
draw_header_text_layer,
args,
header_line_height=header_line_height,
header_lines=header_lines,
header_font=header_font,
header_font_color=args.metadata_font_color,
start_height=h)
if args.metadata_position == "top":
h = draw_metadata_helper()
w = 0
frames = sorted(frames, key=lambda x: x.timestamp)
for i, frame in enumerate(frames):
f = Image.open(frame.filename)
f.putalpha(args.capture_alpha)
image_capture_layer.paste(f, (w, h))
if args.show_timestamp:
pretty_timestamp = MediaInfo.pretty_duration(frame.timestamp, show_centis=True)
text_size = timestamp_font.getsize(pretty_timestamp)
rectangle_hpadding = args.timestamp_horizontal_padding
rectangle_vpadding = args.timestamp_vertical_padding
upper_left, bottom_right = compute_timestamp_position(args, w, h, text_size, desired_size,
rectangle_hpadding, rectangle_vpadding)
if not args.timestamp_border_mode:
draw_timestamp_layer.rectangle(
[upper_left, bottom_right],
fill=args.timestamp_background_color
)
else:
offset_factor = args.timestamp_border_size
offsets = [
(1, 0),
(-1, 0),
(0, 1),
(0, -1),
(1, 1),
(1, -1),
(-1, 1),
(-1, -1)
]
final_offsets = []
for offset_counter in range(1, offset_factor + 1):
final_offsets += [(x[0] * offset_counter, x[1] * offset_counter) for x in offsets]
for offset in final_offsets:
draw_timestamp_text_layer.text(
(
upper_left[0] + rectangle_hpadding + offset[0],
upper_left[1] + rectangle_vpadding + offset[1]
),
pretty_timestamp,
font=timestamp_font,
fill=args.timestamp_border_color
)
draw_timestamp_text_layer.text(
(
upper_left[0] + rectangle_hpadding,
upper_left[1] + rectangle_vpadding
),
pretty_timestamp,
font=timestamp_font,
fill=args.timestamp_font_color
)
w += desired_size[0] + args.grid_horizontal_spacing
if (i + 1) % args.grid.x == 0:
h += desired_size[1] + args.grid_vertical_spacing
if (i + 1) % args.grid.x == 0:
w = 0
if args.metadata_position == "bottom":
h -= args.grid_vertical_spacing
h = draw_metadata_helper()
out_image = Image.alpha_composite(image, image_capture_layer)
out_image = Image.alpha_composite(out_image, image_header_text_layer)
out_image = Image.alpha_composite(out_image, image_timestamp_layer)
out_image = Image.alpha_composite(out_image, image_timestamp_text_layer)
return out_image |
. PURPOSE The purpose of this study was to investigate the stages of change in smoking cessation after a Coronary Artery Bypass Graft(CABG) and to identify the related factors. METHODS The subjects (n=157) were patients who underwent a CABG in a university hospital from March 1998 to October 2005 and were smokers before the CABG. Data was collected via chart review and a telephone interview, and analyzed with descriptive statistics, chi test, one-way ANOVA, and Kruskal-Wallis procedure by the SPSS/PC win 12.0 program. RESULTS The subjects smoked for an average of 34 years (21 cigarettes per day) before surgery. Eleven percent of the subjects were in pre-contemplation, 6.4% in contemplation, 13.5% in preparation, 4.5% in action, and 64.5% in the maintenance stage. Nicotine dependence and self-efficacy were different among the groups with different stages of change in smoking cessation. Nicotine dependence was the lowest (p=0.00) and self-efficacy was the highest (p=0.00) in the maintenance stage. The number of subjects in pre-contemplation and contemplation significantly increased 6 years after surgery (p=0.05). CONCLUSIONS To implement effective smoking cessation interventions for CABG patients, the intervention should be developed to accommodate individual readiness for smoking cessation, especially so for those who had a CABG more than 6 years previously. |
<reponame>rknop/amuse
#include "octgrav.h"
|
Habits of highly effective dentists. While stress is pervasive in the world today, and particularly in the dental office, coping strategies can counteract it. This paper discusses the personality features of the hardy dentist; the fact that stress and its effects depend largely on individual perception of stressors; and five habits to develop for stress reduction: seeking information, taking direct action, inhibiting action, engaging intrapsychic efforts and calling on others. |
#q1.Students = [‘jack’,’jill’,’david’,’silva’,’ronaldo’]
#Marks = [‘55’,’56’,’57’,’66’,’76’]
#Make a dictionary using lists above and delete the key-value (students:marks) pairs with lowest marks.
Marks = [55,56,57,66,76]
Students = ['jack','jill','david','silva','ronaldo']
d={}
for i in range(len(Marks)):
d[Students[i]]=Marks[i]
print(d)
|
In a bizarre case, doctors of a city hospital found 12 gold biscuits each weighing 33g inside the abdomen of a 63-year-old businessman.
Advertising
The businessman had come to the hospital for surgery, stating that he had swallowed a water bottle cap and wanted to get it out of his body.
“He approached us on April 7 seeking surgery to remove a water bottle cap which he claimed he had accidentally swallowed. We got an X-ray done and it didn’t appear to be a cap. As the gold bars got stacked one behind the other it appeared to be a metal,” said Dr C S Ramachandran, senior consulting surgeon at Sir Ganga Ram hospital.
A team of doctors operated upon him on Apr 9 to get through the abdominal cavity and remove the foreign metal. “We were shocked to find not one but 12 gold biscuits in his abdomen. When we asked him he was not ready to speak. We immediately sealed them in a container and handed it over to the medical superitendent,” said Dr Ramachandran adding that the patient was discharged on Apr 15.
The hospital authorities later informed the police and subsequently custom department officials were informed. Dr Ramachandran said that the patient, a businessman from Chandni Chowk was a known patient and he had operated upon him thrice in the past since 1989.
“He was operated for gall bladder removal, appendicitis and incisional hernia. He had diabetes. We were shocked when gold biscuits came out of his abdomen during the operation.
“But I am at least happy that I could save his life. If it would have stayed inside for couple of more days, it would have led to severe bleeding and rapture of the intestine and septicemia. Moreover, he had severe diabetes,” said the doctor.
According to hospital sources, the man had swallowed the 12 gold biscuits, worth approximately Rs 12 lakhs in all, to smuggle them into the country from Singapore 10 days ago.
Advertising
He landed in trouble when he failed to get it out through his stool. It eventually started hurting him because of which he apprached the doctors. |
<gh_stars>1-10
package dbsf.floodFill;
/* *****************************************************************************
733. 图像渲染
有一幅以二维整数数组表示的图画,每一个整数表示该图画的像素值大小,数值在 0 到 65535 之间。
给你一个坐标 (sr, sc) 表示图像渲染开始的像素值(行 ,列)和一个新的颜色值 newColor,让你重新上色这幅图像。
为了完成上色工作,从初始坐标开始,记录初始坐标的上下左右四个方向上像素值与初始坐标相同的相连像素点,接着再记录这四个方向上符合条件的像素点与他们对应四个方
向上像素值与初始坐标相同的相连像素点,……,重复该过程。将所有有记录的像素点的颜色值改为新的颜色值。
最后返回经过上色渲染后的图像。
示例 1:
输入:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
输出: [[2,2,2],[2,2,0],[2,0,1]]
解析:
在图像的正中间,(坐标(sr,sc)=(1,1)),
在路径上所有符合条件的像素点的颜色都被更改成2。
注意,右下角的像素没有更改为2,
因为它不是在上下左右四个方向上与初始点相连的像素点。
注意:
image 和 image[0] 的长度在范围 [1, 50] 内。
给出的初始点将满足 0 <= sr < image.length 和 0 <= sc < image[0].length。
image[i][j] 和 newColor 表示的颜色值在范围 [0, 65535]内。
*******************************************************************************/
/* *****************************************************************************
题解:
深度优先搜索
复杂度分析:
时间复杂度:O(m * n)
空间复杂度:O(m * n) 栈空间
*******************************************************************************/
import java.util.LinkedList;
import java.util.Queue;
/**
* @author <NAME>
*/
public class FloodFill {
public int[][] floodFillDFS(int[][] image, int sr, int sc, int newColor) {
int m = image.length;
int n = image[0].length;
dfs(image, m, n, sr, sc, image[sr][sc], newColor);
image[sr][sc] = newColor;
return image;
}
private void dfs(int[][] image, int m, int n, int sr, int sc, int oldColor, int newColor) {
// 已经处理过直接返回
if (image[sr][sc] == newColor) {
return;
}
image[sr][sc] = newColor;
// 上
if (sr - 1 >= 0 && image[sr - 1][sc] == oldColor) {
dfs(image, m, n, sr - 1, sc, oldColor, newColor);
}
// 左
if (sc - 1 >= 0 && image[sr][sc - 1] == oldColor) {
dfs(image, m, n, sr, sc - 1, oldColor, newColor);
}
// 下
if (sr + 1 < m && image[sr + 1][sc] == oldColor) {
dfs(image, m, n, sr + 1, sc, oldColor, newColor);
}
// 右
if (sc + 1 < n && image[sr][sc + 1] == oldColor) {
dfs(image, m, n, sr, sc + 1, oldColor, newColor);
}
}
public int[][] floodFillBFS(int[][] image, int sr, int sc, int newColor) {
int currColor = image[sr][sc];
if (currColor == newColor) {
return image;
}
int m = image.length;
int n = image[0].length;
Queue<int[]> queue = new LinkedList<>();
queue.offer(new int[]{sr, sc});
while (!queue.isEmpty()) {
int[] curr = queue.poll();
int i = curr[0];
int j = curr[1];
image[i][j] = newColor;
if (i - 1 >= 0 && image[i - 1][j] == currColor) {
queue.offer(new int[]{i - 1, j});
}
if (i + 1 < m && image[i + 1][j] == currColor) {
queue.offer(new int[]{i + 1, j});
}
if (j - 1 >= 0 && image[i][j - 1] == currColor) {
queue.offer(new int[]{i, j - 1});
}
if (j + 1 < n && image[i][j + 1] == currColor) {
queue.offer(new int[]{i, j + 1});
}
}
return image;
}
}
|
WOODWARD, Okla., Oct. 21 (UPI) -- An Oklahoma motorcyclist's helmet camera was recording when she ran out into a busy intersection to rescue a kitten that appeared to fall from a car.
The video, posted to YouTube by user E511, shows the motorcyclist's point of view as she waits at a Woodward intersection and a kitten appears to fall from a red car traveling across the road.
The woman rode her motorcycle into the middle of the intersection, throwing her hands up to signal other drivers to stop, before getting off her bike and running to grab the kitten.
The motorcyclist gave the kitten to a passer-by on the side of the road before going back for her motorcycle.
The biker said in a Reddit post she has dubbed the kitten Skidmark. She said she contacted the owner of the red car to see if they were the owner of the kitten, but she has yet to hear back. |
/*****************************************************
*
* Checks for any previous update, and re-runs it.
*
*****************************************************/
private void checkNotifyState()
{
if ( KiteSDK.DEBUG_RETAINED_FRAGMENT ) Log.d( LOG_TAG, "checkNotifyState() mStateNotifier = " + mStateNotifier );
if ( mStateNotifier != null )
{
Object callbackActivity = getCallbackActivity();
if ( KiteSDK.DEBUG_RETAINED_FRAGMENT ) Log.d( LOG_TAG, " callbackActivity = " + callbackActivity );
if ( callbackActivity != null ) mStateNotifier.notify( callbackActivity );
Object callbackFragment = getCallbackFragment();
if ( KiteSDK.DEBUG_RETAINED_FRAGMENT ) Log.d( LOG_TAG, " callbackFragment = " + callbackFragment );
if ( callbackFragment != null ) mStateNotifier.notify( callbackFragment );
}
} |
/**
* A ReferenceChoiceSet contains references to Objects. Often these are
* CDOMObjects, but that is not strictly required.
*
* The contents of a ReferenceChoiceSet is defined at construction of the
* ReferenceChoiceSet. The contents of a ReferenceChoiceSet is fixed, and will
* not vary by the PlayerCharacter used to resolve the ReferenceChoiceSet.
*
* @param <T>
* The class of object this ReferenceChoiceSet contains.
*/
public class ReferenceChoiceSet<T> implements PrimitiveChoiceSet<T>
{
/**
* The underlying Set of CDOMReferences that contain the objects in this
* ReferenceChoiceSet
*/
private final Collection<CDOMReference<T>> refCollection;
/**
* Constructs a new ReferenceChoiceSet which contains the Set of objects
* contained within the given CDOMReferences. The CDOMReferences do not need
* to be resolved at the time of construction of the ReferenceChoiceSet.
*
* This constructor is reference-semantic and value-semantic. Ownership of
* the Collection provided to this constructor is not transferred.
* Modification of the Collection (after this constructor completes) does
* not result in modifying the ReferenceChoiceSet, and the
* ReferenceChoiceSet will not modify the given Collection. However, this
* ReferenceChoiceSet will maintain strong references to the CDOMReference
* objects contained within the given Collection.
*
* @param col
* A Collection of CDOMReferences which define the Set of objects
* contained within the ReferenceChoiceSet
* @throws IllegalArgumentException
* if the given Collection is null or empty.
*/
public ReferenceChoiceSet(Collection<? extends CDOMReference<T>> col)
{
super();
if (col == null)
{
throw new IllegalArgumentException(
"Choice Collection cannot be null");
}
if (col.isEmpty())
{
throw new IllegalArgumentException(
"Choice Collection cannot be empty");
}
refCollection = new WeightedCollection<CDOMReference<T>>(col);
}
/**
* Returns a representation of this ReferenceChoiceSet, suitable for storing
* in an LST file.
*
* @param useAny
* use "ANY" for the global "ALL" reference when creating the LST
* format
* @return A representation of this ReferenceChoiceSet, suitable for storing
* in an LST file.
*/
@Override
public String getLSTformat(boolean useAny)
{
WeightedCollection<CDOMReference<?>> sortedSet = new WeightedCollection<CDOMReference<?>>(
ReferenceUtilities.REFERENCE_SORTER);
sortedSet.addAll(refCollection);
return ReferenceUtilities.joinLstFormat(sortedSet, Constants.COMMA,
useAny);
}
/**
* The class of object this ReferenceChoiceSet contains.
*
* The behavior of this method is undefined if the CDOMReference objects
* provided during the construction of this ReferenceChoiceSet are not yet
* resolved.
*
* @return The class of object this ReferenceChoiceSet contains.
*/
@Override
public Class<T> getChoiceClass()
{
return refCollection == null ? null : refCollection.iterator().next()
.getReferenceClass();
}
/**
* Returns a Set containing the Objects which this ReferenceChoiceSet
* contains. The contents of a ReferenceChoiceSet is fixed, and will not
* vary by the PlayerCharacter used to resolve the ReferenceChoiceSet.
*
* The behavior of this method is undefined if the CDOMReference objects
* provided during the construction of this ReferenceChoiceSet are not yet
* resolved.
*
* Ownership of the Set returned by this method will be transferred to the
* calling object. Modification of the returned Set should not result in
* modifying the ReferenceChoiceSet, and modifying the ReferenceChoiceSet
* after the Set is returned should not modify the Set. However,
* modification of the underlying objects contained within the Set will
* result in modification of the object contained in this
* ReferenceChoiceSet.
*
* @param pc
* The PlayerCharacter for which the choices in this
* ReferenceChoiceSet should be returned.
* @return A Set containing the Objects which this ReferenceChoiceSet
* contains.
*/
@Override
public Set<T> getSet(PlayerCharacter pc)
{
Set<T> returnSet = new HashSet<T>();
for (CDOMReference<T> ref : refCollection)
{
returnSet.addAll(ref.getContainedObjects());
}
return returnSet;
}
/**
* Returns the consistent-with-equals hashCode for this ReferenceChoiceSet
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode()
{
return refCollection.size();
}
/**
* Returns true if this ReferenceChoiceSet is equal to the given Object.
* Equality is defined as being another ReferenceChoiceSet object with equal
* underlying contents.
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj)
{
if (obj == this)
{
return true;
}
if (obj instanceof ReferenceChoiceSet)
{
ReferenceChoiceSet<?> other = (ReferenceChoiceSet<?>) obj;
return refCollection.equals(other.refCollection);
}
return false;
}
/**
* Returns the GroupingState for this ReferenceChoiceSet. The GroupingState
* indicates how this ReferenceChoiceSet can be combined with other
* PrimitiveChoiceSets.
*
* @return The GroupingState for this ReferenceChoiceSet.
*/
@Override
public GroupingState getGroupingState()
{
GroupingState state = GroupingState.EMPTY;
for (CDOMReference<T> ref : refCollection)
{
state = ref.getGroupingState().add(state);
}
return state.compound(GroupingState.ALLOWS_UNION);
}
} |
Proteomic changes induced by podophyllotoxin in human cervical carcinoma HeLa cells. Podophyllotoxin, a kind of lignan extracted from the Podophyllum plant, has been shown to inhibit the growth of various carcinoma cells. However, the molecular mechanism remains unclear. In this study, the inhibition of cell growth and changes in protein expression induced by podophyllotoxin were investigated in human cervical carcinoma HeLa cells. Our results demonstrate that Podophyllotoxin inhibits HeLa cell growth and induces apoptosis. By using proteomic techniques, seven proteins were found to be significantly regulated by podophyllotoxin compared to the untreated control; among them, four were down-regulated and three were up-regulated. All of the seven proteins were identified with peptide mass fingerprinting using matrix-assisted laser desorption/ionization time-of-flight mass spectrometry (MALDI-TOF-MS) after in-gel trypsin digestion. Five of these proteins are involved in protein metabolism, and the other two play roles in cell communication and signaling transduction pathways. It is suggested that the effect of podophyllotoxin on the growth of tumor cells is significantly related to the metabolism-associated proteins. |
Ritonavir's role in reducing fentanyl clearance and prolonging its half-life. BACKGROUND The human immunodeficiency virus protease inhibitor ritonavir is a potent inhibitor of the cytochrome P450 3A4 enzyme, and ritonavir's concomitant administration with the substrates of this enzyme may lead to dangerous drug interactions. METHODS The authors investigated possible interactions between ritonavir and intravenously administered fentanyl in a double-blind, placebo-controlled, cross-over study in two phases. Twelve healthy volunteers received orally ritonavir or placebo for 3 days; the dose of ritonavir was 200 mg three times on the first day and 300 mg three times on the second. The last dose of ritonavir 300 mg or placebo was given on the morning of the third day. On the second day, 2 h after the afternoon pretreatment dose, fentanyl 5 microg/kg was injected intravenously in 2 min with naloxone to moderate its effects, and 15 timed venous blood samples were collected for 18 h. RESULTS Ritonavir reduced the clearance of fentanyl by 67% from 15.6+/-8.2 to 5.2+/-2.0 ml x min(-1) x kg(-1) (P<0.01). The area under the fentanyl plasma concentration-time curve from 0 to 18 h was increased from 4.8+/-2.7 to 8.8+/-2.3 ng x ml(-1) x h(-1) by ritonavir (P<0.01). Ritonavir did not affect the initial concentrations and the steady-state volume of distribution of fentanyl. One subject discontinued participation before fentanyl administration because of severe side effects, and during the study 8 of the remaining 11 subjects reported nausea. CONCLUSIONS Ritonavir can inhibit the metabolism of fentanyl significantly, so caution should be exercised if fentanyl is given to patients receiving ritonavir medication. |
Media playback is not supported on this device Fresh doubt surrounds Bahrain GP
Formula 1's return to Bahrain in 2012 is being called into question following continuing unrest in the country.
This year's race was called off following a clampdown on pro-democracy protests in the Gulf kingdom.
Red Bull team principal Christian Horner said F1 bosses would discuss the issue in the coming weeks.
"It's always concerning with the media reports that you hear," said Horner. "But [we] trust in the promoter and FIA to deal with it accordingly."
He said the Bahrain would "inevitably" be discussed at the next meeting of governing body the FIA's World Council, F1's decision-making body.
Earlier this month, the Gulf kingdom faced international criticism after medical staff who treated protestors were given prison sentences of up to 15 years.
Last month, the FIA published the calendar for next year with Bahrain pencilled in for 22 April.
BAHRAIN GP TIMELINE 21 February - Civil unrest forces cancellation of Bahrain GP
- Civil unrest forces cancellation of Bahrain GP 3 June - Bahrain GP rescheduled for 30 October
- Bahrain GP rescheduled for 30 October 15 June - FIA confirms no Bahrain race in 2011
FIA confirms no Bahrain race in 2011 1 September - FIA confirms return of Bahrain race in 2012
In February this year, the Bahrain pre-season test and race were called off following anti-government protests in the Gulf Kingdom in which more than 30 people lost their lives.
The FIA said the race could still be rescheduled and in June it announced that it would take place on 30 October, with the inaugural Indian Grand Prix moving to a date in December.
The decision proved highly controversial and outraged human rights campaigners, with nearly half a million people signing an online petition demanding a boycott.
F1 teams made it clear they were opposed to the rescheduling of the race. They protested on grounds of logistics, but were known to have concerns about going to Bahrain in the circumstances.
Two weeks later, the sport's governing body confirmed the race would not be part of the 2011 programme.
The teams are understood to still have concerns about going to Bahrain in the context of the political situation in the country. |
Recognition of nonverbal communication of emotion after traumatic brain injury. BACKGROUND Individuals who have had a traumatic brain injury (TBI) often have difficulty processing nonverbal communication The published research in this area has focused on a TBI patient's ability to recognize facial expression, vocal intonation, and postural expression (Croker, 2005; Hopkins, Dywan & Segalowitz, 2002). OBJECTIVE This study compared the non-verbal processing skills of brain-injured patients versus non-injured controls in all three domains. METHODS The stimuli were photographs of facial and postural expressions and audio recordings of intonational expressions. RESULTS The results indicated that persons with TBI have particular difficulty recognizing non-verbal communication resulting from vocal intonations. CONCLUSIONS The TBI patients had difficulty processing tonality, therefore, it is reasonable to suggest that clinicians, friends, and family members should emphasize the explicit verbal content of spoken language when speaking to a person with TBI. |
import { ListItemText, Typography, withStyles, ListItemIcon, MenuItem } from '@material-ui/core';
import * as React from 'react';
export interface IAltinnMenuItemProps {
text: string;
iconClass: string;
onClick: (event: React.SyntheticEvent) => void;
disabled?: boolean;
id: string;
}
const StyledMenuItem = withStyles({
root: {
paddingTop: '0px',
paddingBottom: '0px',
},
})(MenuItem);
const AltinnMenuItemIcon = withStyles({
root: {
minWidth: '3.0rem',
},
})(ListItemIcon);
export function AltinnMenuItem({
text, iconClass, onClick, disabled, id,
}: IAltinnMenuItemProps) {
return (
<StyledMenuItem
onClick={onClick}
disabled={disabled}
id={id}
>
<AltinnMenuItemIcon>
<i className={iconClass} />
</AltinnMenuItemIcon>
<ListItemText disableTypography={true}>
<Typography variant='caption'>
{text}
</Typography>
</ListItemText>
</StyledMenuItem>
);
}
export default AltinnMenuItem;
|
import java.util.Scanner;
public class Main {
public static void main(String[] args) {
Scanner scanner = new Scanner(System.in);
int L = scanner.nextInt();
int[] a = new int[L + 1];
for (int i = 1; i <= L; i++) a[i] = scanner.nextInt();
long[][] dp = new long[L + 1][5];
for (int i = 1; i <= L; i++) {
int f = a[i] % 2 == 0 ? 0 : 1;
if (a[i] == 0) f = 2;
int g = a[i] % 2 == 1 ? 0 : 1;
dp[i][0] = dp[i - 1][0] + a[i];
dp[i][1] = min(dp[i - 1][0], dp[i - 1][1]) + f;
dp[i][2] = min(dp[i - 1][0], dp[i - 1][1], dp[i - 1][2]) + g;
dp[i][3] = min(dp[i - 1][0], dp[i - 1][1], dp[i - 1][2], dp[i - 1][3]) + f;
dp[i][4] = min(dp[i - 1][0], dp[i - 1][1], dp[i - 1][2], dp[i - 1][3], dp[i - 1][4]) + a[i];
}
long min = dp[L][0];
for (int i = 1; i < 5; i++) min = Math.min(min, dp[L][i]);
System.out.println(min);
}
private static long min(long... args) {
long m = Long.MAX_VALUE;
for (long a : args) m = Math.min(m, a);
return m;
}
}
|
import { IGoal, GoalLevelEnum, IKeyResult, IOrganizationTeam, IEmployee } from '@gauzy/contracts';
import { Entity, Column, OneToMany, ManyToOne, Index, RelationId } from 'typeorm';
import { ApiProperty } from '@nestjs/swagger';
import { IsOptional, IsEnum, IsString } from 'class-validator';
import {
Employee,
KeyResult,
OrganizationTeam,
TenantOrganizationBaseEntity
} from '../core/entities/internal';
@Entity('goal')
export class Goal extends TenantOrganizationBaseEntity implements IGoal {
@ApiProperty({ type: () => String })
@Column()
name: string;
@ApiProperty({ type: () => String })
@Column()
@IsOptional()
description?: string;
@ApiProperty({ type: () => String })
@Column()
deadline: string;
@ApiProperty({ type: () => String, enum: GoalLevelEnum })
@IsEnum(GoalLevelEnum)
@Column()
level: string;
@ApiProperty({ type: () => Number })
@Column()
progress: number;
/*
|--------------------------------------------------------------------------
| @ManyToOne
|--------------------------------------------------------------------------
*/
/**
* OrganizationTeam
*/
@ApiProperty({ type: () => OrganizationTeam })
@ManyToOne(() => OrganizationTeam, (team) => team.goals, {
onDelete: 'CASCADE'
})
ownerTeam?: IOrganizationTeam;
@ApiProperty({ type: () => String })
@RelationId((it: Goal) => it.ownerTeam)
@IsString()
@IsOptional()
@Index()
@Column({ nullable: true })
ownerTeamId?: string;
/**
* Owner Employee
*/
@ApiProperty({ type: () => Employee })
@ManyToOne(() => Employee, (employee) => employee.goals, {
onDelete: 'CASCADE'
})
ownerEmployee?: IEmployee;
@ApiProperty({ type: () => String })
@RelationId((it: Goal) => it.ownerEmployee)
@IsString()
@IsOptional()
@Index()
@Column({ nullable: true })
ownerEmployeeId?: string;
/**
* Lead Employee
*/
@ApiProperty({ type: () => Employee })
@ManyToOne(() => Employee, (employee) => employee.leads, {
onDelete: 'CASCADE'
})
lead?: IEmployee;
@ApiProperty({ type: () => String })
@RelationId((it: Goal) => it.lead)
@IsString()
@IsOptional()
@Index()
@Column({ nullable: true })
leadId?: string;
/**
* KeyResult
*/
@ApiProperty({ type: () => KeyResult })
@ManyToOne(() => KeyResult, (keyResult) => keyResult.id)
alignedKeyResult?: IKeyResult;
@ApiProperty({ type: () => String })
@RelationId((it: Goal) => it.alignedKeyResult)
@IsString()
@IsOptional()
@Index()
@Column({ nullable: true })
alignedKeyResultId?: string;
/*
|--------------------------------------------------------------------------
| @OneToMany
|--------------------------------------------------------------------------
*/
/**
* KeyResult
*/
@ApiProperty({ type: () => KeyResult, isArray: true })
@OneToMany(() => KeyResult, (keyResult) => keyResult.goal, {
cascade: true
})
keyResults?: IKeyResult[];
}
|
<filename>Analysis/AnalysisOrg/IO/TrackProgress.cpp
/* Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved */
#include "TrackProgress.h"
TrackProgress::TrackProgress(){
fpLog = NULL;
time(&analysis_start_time);
time(&analysis_current_time);
}
TrackProgress::~TrackProgress(){
fprintf(stdout, "Completion Time = %s\n", ctime(&analysis_current_time));
fflush (stdout);
fclose(fpLog);
}
void TrackProgress::ReportState(const char *my_state){
time(&analysis_current_time);
fprintf(stdout, "\n%s: Elapsed: %.1lf minutes\n\n", my_state, difftime(analysis_current_time, analysis_start_time) / 60);
fprintf(fpLog, "%s = %.1lf minutes\n", my_state, difftime(analysis_current_time, analysis_start_time) / 60);
fflush(NULL);
}
void TrackProgress::InitFPLog (CommandLineOpts &inception_state)
{
char file[] = "processParameters.txt";
char *fileName = ( char * ) malloc ( strlen ( inception_state.sys_context.results_folder ) + strlen ( file ) + 2 );
sprintf ( fileName, "%s/%s", inception_state.sys_context.results_folder, file );
fopen_s ( &fpLog, fileName, "a" );
if ( !fpLog ) {
perror ( fileName );
exit ( errno );
}
free ( fileName );
fileName = NULL;
}
void TrackProgress::WriteProcessParameters (CommandLineOpts &inception_state)
{
// Dump the processing parameters to a file
fprintf ( fpLog, "[global]\n" );
fprintf ( fpLog, "dataDirectory = %s\n", inception_state.sys_context.dat_source_directory );
fprintf ( fpLog, "Smoothing File = %s\n", inception_state.img_control.tikSmoothingFile );
fprintf ( fpLog, "runId = %s\n", inception_state.sys_context.runId );
fprintf ( fpLog, "flowOrder = %s\n", inception_state.flow_context.flowOrder );
fprintf ( fpLog, "washFlow = %d\n", inception_state.img_control.has_wash_flow );
fprintf ( fpLog, "libraryKey = %s\n", inception_state.key_context.libKey );
fprintf ( fpLog, "tfKey = %s\n", inception_state.key_context.tfKey );
fprintf ( fpLog, "minNumKeyFlows = %d\n", inception_state.key_context.minNumKeyFlows );
fprintf ( fpLog, "maxNumKeyFlows = %d\n", inception_state.key_context.maxNumKeyFlows );
fprintf ( fpLog, "nokey = %s\n", (inception_state.bkg_control.nokey ? "true":"false" ) );
fprintf ( fpLog, "numFlows = %d\n", inception_state.flow_context.numTotalFlows );
fprintf ( fpLog, "cyclesProcessed = %d\n", inception_state.flow_context.numTotalFlows/4 ); // @TODO: may conflict with PGM now
fprintf ( fpLog, "framesProcessed = %d\n", inception_state.img_control.maxFrames );
fprintf ( fpLog, "framesInData = %d\n", inception_state.img_control.totalFrames );
fprintf ( fpLog, "bkgModelUsed = %s\n", "yes" );
fprintf ( fpLog, "nucTraceCorrectionUsed = %s\n", ( inception_state.no_control.NUC_TRACE_CORRECT ? "true":"false" ) );
fprintf ( fpLog, "nearest-neighborParameters = Inner: (%d,%d) Outer: (%d,%d)\n", inception_state.img_control.NNinnerx, inception_state.img_control.NNinnery, inception_state.img_control.NNouterx, inception_state.img_control.NNoutery );
fprintf ( fpLog, "Advanced beadfind = %s\n", inception_state.bfd_control.BF_ADVANCED ? "enabled":"disabled" );
fprintf ( fpLog, "use pinned wells = %s\n", inception_state.no_control.USE_PINNED ? "true":"false" );
fprintf ( fpLog, "use exclusion mask = %s\n", inception_state.loc_context.exclusionMaskSet ? "true":"false" );
fprintf ( fpLog, "Version = %s\n", IonVersion::GetVersion().c_str() );
fprintf ( fpLog, "Build = %s\n", IonVersion::GetBuildNum().c_str() );
fprintf ( fpLog, "GitHash = %s\n", IonVersion::GetGitHash().c_str() );
fprintf ( fpLog, "Chip = %d,%d\n", inception_state.loc_context.chip_len_x,inception_state.loc_context.chip_len_y );
fprintf ( fpLog, "Block = %d,%d,%d,%d\n", inception_state.loc_context.chip_offset_x, inception_state.loc_context.chip_offset_y, inception_state.loc_context.cols, inception_state.loc_context.rows );
for ( int q=0;q<inception_state.loc_context.numCropRegions;q++ )
fprintf ( fpLog, "Cropped Region = %d,%d,%d,%d\n", inception_state.loc_context.cropRegions[q].col, inception_state.loc_context.cropRegions[q].row, inception_state.loc_context.cropRegions[q].w, inception_state.loc_context.cropRegions[q].h );
fprintf ( fpLog, "Analysis Region = %d,%d,%d,%d\n", inception_state.loc_context.chipRegion.col, inception_state.loc_context.chipRegion.row, inception_state.loc_context.chipRegion.col+inception_state.loc_context.chipRegion.w, inception_state.loc_context.chipRegion.row+inception_state.loc_context.chipRegion.h );
fprintf ( fpLog, "numRegions = %d\n", inception_state.loc_context.numRegions );
fprintf ( fpLog, "regionRows = %d\nregionCols = %d\n", inception_state.loc_context.regionsY, inception_state.loc_context.regionsX );
fprintf ( fpLog, "regionSize = %dx%d\n", inception_state.loc_context.regionXSize, inception_state.loc_context.regionYSize );
//fprintf (fpLog, "\tRow Column Height Width\n");
//for (int i=0;i<numRegions;i++)
// fprintf (fpLog, "[%3d] %5d %5d %5d %5d\n", i, regions[i].row, regions[i].col,regions[i].h,regions[i].w);
fflush ( NULL );
}
|
Researchers in Toronto and British Columbia have discovered something that many cyclists have found out the hard way: streetcar tracks can be a hazard to your health.
The study, by researchers at Ryerson University and the University of British Columbia, found that about a third of all serious bike crashes that took place in downtown Toronto were caused by streetcar tracks.
"We're hoping that putting some numbers [against this] will help set priorities," says Anne Harris, one of the study's co-authors. Speaking to Here and Now's Gill Deacon, Harris explained that while there are some measures cyclists can take to protect themselves, infrastructure changes — and especially building separated bike lanes — is the best remedy.
The study, published last week in the journal BMC Public Health, looked at 276 bike crashes that happened in Toronto's downtown core between May 2008 and November 2009 and were serious enough to require hospitalization. Researchers found that 87 of those spills happened after the cyclists' tires either became caught in the tracks, or skidded across them.
They said the crashes happened most often on streets that contained parked cars and which had no bike lanes.
'A real spaghetti of tracks'
"Training and attention, these are all good things to advocate for, but one good thing that really offers protection is physical protection ... a separated bike lane," Harris says.
The study's authors say they hope the findings will spur urban designers to create more of those lanes, and as well as light rail networks with their own rights-of-way, which also reduce the number of accidents.
Bike lanes are good, but ones that are physically separated from the rest of the roadway are even better, say the study's authors. (Matt Galloway/CBC)
"Cyclists who are injured on these tracks reported circumstances like having to maneuver around other road users," Harris explains. That maneuvering means that bike riders often get caught in the flangeways — the openings through which streetcars roll through the tracks.
Cyclists turning left were also at a high risk of streetcar track-related accidents, the study found. Intersections can be "a real spaghetti of tracks," says Harris, "which can make it really difficult to cross at the recommended 90 degree angle."
Fatter tires might not help
Aside from building more separated bike lanes and rail systems with their own rights-of-way, the researchers recommend that cyclists be encouraged to use bikes with wider tires. Most bikes on the road today have tires that are "still narrower than the narrowest point on that flangeway," Harris says.
A TTC worker attends the scene after a cyclist was struck by a streetcar in a 2015 accident. (CBC)
But that may not be enough, according to Ross Lyle, head mechanic at Toronto's Bikes on Wheels. "People think a big fat tire is going to be okay. A streetcar track is way wider than you think and it'll accept anything," he told CBC's Ali Chiasson. "So there's really no bike that's better for it."
His safety advice? "It's really just about paying attention." |
Charms and potions are unlikely to feature on the curriculum any time soon, but the staff of Hogwarts could still find a friendly welcome in any muggle school.
Three teachers from the Harry Potter books – Albus Dumbledore, Minerva McGonagall and Severus Snape – have been named among the top 10 favourite fictional teachers, according to school staff.
Professor Dumbledore, Harry's mentor through much of the series, took first place, whilst the tough-but-fair Professor McGonagall was third.
In second place was the beloved Miss Honey from the Roald Dahl classic Matilda.
Professor Snape – seen as a bad guy for much of the Potter saga until his redemption in the final book – was seventh, in the Times Educational Supplement's (TES) survey of 1,200 muggle (non-magical) educators.
Proving that teachers admire a rule-breaker, also included in the top 10 were unorthodox English teacher Mr Keating from Dead Poets Society (fourth), quick-tempered Mr Gilbert from The Inbetweeners (ninth) and crystal-meth making chemistry teacher Walter White from Breaking Bad (tenth).
Philip Nel, a professor of English at Kansas State University and a Harry Potter expert, suggested that the inclusion of three Hogwarts teachers is down to not just the popularity of the series, but also author J K Rowling's understanding of the teaching profession.
He told the TES: "In many ways, she uses her books to highlight what does and doesn't work in the classroom.
"Each time you're looking at a teacher in the Harry Potter books, you're also looking at Rowling's sense of what teaching is, and what good and bad teaching looks like."
Beth Marshall, associate professor of education at Simon Fraser University in Canada, suggested that teachers who do not follow the accepted norms for teacher behaviour can be inspirational.
"Schooling is mostly learning about how to follow rules, and a lot of these teachers that we love break those rules – Mr Keating, Whoopi Goldberg in Sister Act II," she told the TES.
"These teachers who break the rules, who ask us to do something extraordinary within a school setting, they stick with us." |
Intravenous left ventriculography utilizing digital subtraction technique. To detect the left ventricular boundary in the intravenous ventriculography, we used a subtraction technique for background suppression. Images containing contrast medium and reference mask images were transferred to a computer through a flying spot scanner and stored on the digital disc. Stored reference mask images were subtracted from the digitized contrast images. The resulting images were then electronically enhanced to extract the left ventricular (LV) image. The LV boundary was delineated with an algorithm we have developed and the volume of the LV cavity was calculated automatically. The validity of this method was compared with data obtained from conventional left ventriculogram (LVG). In 11 patients, values for end-diastolic volume (EDV), end-systolic volume (ESV) and ejection fraction (EF) calculated from the intravenous LVG were correlated closely with those from the conventional LVG (128 +/- 38 (SD) vs 133 +/- 39 ml, r = 0.95; 50 +/- 28 vs 53 +/- 30 ml, r = 0.98; 63 +/- 10 vs 62 +/- 12%, r = 0.96, respectively). Nine patients with valvular regurgitation were followed up serially after valve replacement. EDV index fell significantly after corrective surgery (145 +/- 50 to 81 +/- 33 ml/m2, p less than 0.02), whereas, EF was affected variably depending upon the preoperative state (58 +/- 13 to 61 +/- 11%, not significant). Thus, this method is less invasive than conventional LVG and has successfully allowed for sequential determination of ventricular function on an outpatient basis. |
/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil; -*- */
/*
* Copyright (c) 2010 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __HIO_REQUEST_HTTP_H__
#define __HIO_REQUEST_HTTP_H__
#include <glib-object.h>
#include <hrt/hrt-buffer.h>
#include <hio/hio-incoming.h>
#include <hio/hio-response-http.h>
G_BEGIN_DECLS
typedef struct HioRequestHttp HioRequestHttp;
typedef struct HioRequestHttpClass HioRequestHttpClass;
struct HioRequestHttp {
HioIncoming parent_instance;
char *method;
guint16 major;
guint16 minor;
char *path;
char *query_string;
HioResponseHttp *response;
};
struct HioRequestHttpClass {
HioIncomingClass parent_class;
void (* add_header) (HioRequestHttp *request,
HrtBuffer *name,
HrtBuffer *value);
};
#define HIO_TYPE_REQUEST_HTTP (hio_request_http_get_type ())
#define HIO_REQUEST_HTTP(object) (G_TYPE_CHECK_INSTANCE_CAST ((object), HIO_TYPE_REQUEST_HTTP, HioRequestHttp))
#define HIO_REQUEST_HTTP_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST ((klass), HIO_TYPE_REQUEST_HTTP, HioRequestHttpClass))
#define HIO_IS_REQUEST_HTTP(object) (G_TYPE_CHECK_INSTANCE_TYPE ((object), HIO_TYPE_REQUEST_HTTP))
#define HIO_IS_REQUEST_HTTP_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE ((klass), HIO_TYPE_REQUEST_HTTP))
#define HIO_REQUEST_HTTP_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), HIO_TYPE_REQUEST_HTTP, HioRequestHttpClass))
GType hio_request_http_get_type (void) G_GNUC_CONST;
HioRequestHttp* hio_request_http_new (GType subtype,
const char *method,
int major,
int minor,
const char *path,
const char *query_string);
int hio_request_http_get_major_version (HioRequestHttp *request);
int hio_request_http_get_minor_version (HioRequestHttp *request);
const char* hio_request_http_get_method (HioRequestHttp *request);
const char* hio_request_http_get_path (HioRequestHttp *request);
const char* hio_request_http_get_query_string (HioRequestHttp *request);
void hio_request_http_add_header (HioRequestHttp *request,
HrtBuffer *name,
HrtBuffer *value);
void hio_request_http_set_response (HioRequestHttp *request,
HioResponseHttp *response);
HioResponseHttp* hio_request_http_get_response (HioRequestHttp *request);
G_END_DECLS
#endif /* __HIO_REQUEST_HTTP_H__ */
|
A Randomized Trial of 12-Week Interventions for the Treatment of Developmental Phonological Disorder in Francophone Children. PURPOSE This study was designed to test the relative efficacy of different combinations of intervention approaches when targeting speech production accuracy and phonological awareness skills. All children received individual speech therapy, a home program, and a small-group phonological awareness intervention. METHOD Sixty-five 4-year-olds with a developmental phonological disorder received these intervention components in different combinations over 12 weeks, resulting in 4 groups: output-oriented individual intervention and articulation practice home program, output-oriented individual intervention and dialogic reading home program, input-oriented individual intervention and articulation practice home program, and input-oriented individual intervention and dialogic reading home program. RESULTS A significant interaction of the individual treatment condition and the home program condition was observed for 2 outcome measures: targeted feature match (which reflected changes in speech production accuracy for features and word shape structures that were targeted in therapy) and explicit phonological awareness skills. CONCLUSION In this context, in which the children received a brief period of direct therapy and a home program component provided sequentially, the most effective strategy was to teach the parents to use treatment procedures at home that were congruent with the direct therapy component. |
import { Expose } from 'class-transformer'
export default class User {
constructor (data: User = {} as User) {
this.id = data.id
this.name = data.name
this.password = <PASSWORD>
this.email = data.email
}
@Expose()
id: number | null;
@Expose()
name: string;
@Expose()
password: string;
@Expose()
email: string;
}
|
Early life influences on cognitive impairment among oldest old Chinese. OBJECTIVES This article examines the effects of early life socioeconomic conditions on the risk of cognitive impairment among oldest old persons in China. We also examine whether adult socioeconomic status mediates the association between early life socioeconomic status and cognitive impairment in old age. METHODS Data derived from two waves of the Chinese Longitudinal Healthy Longevity Survey. We estimated logistic and multinomial regression models of cognitive impairment for a nationwide sample of people aged 80 to 105 (N = 8,444). RESULTS Among both men and women, urban residence in early life as well as education was associated with lower odds of cognitive impairment at baseline. We found modest support for a protective effect of advantaged childhood background on the odds of cognitive impairment onset during the 2-year follow-up, especially among women. DISCUSSION Our findings suggest that socioeconomic environment throughout the life course, early life in particular, can influence the risk of cognitive impairment in old age. Not only can public policy that targets illiteracy, hunger, and poverty improve the lives of tens of thousands of children, but ultimately such investments will pay significant dividends many decades later in enhancing the cognitive well-being of older persons. |
Washington/Beijing: US President Donald Trump said on Monday the UN Security Council must be prepared to impose new sanctions on North Korea as concerns mount that it may test a sixth nuclear bomb as early as Tuesday. "The status quo in North Korea is also unacceptable," Trump told a meeting with the 15 UN Security Council ambassadors, including China and Russia, at the White House. "The council must be prepared to impose additional and stronger sanctions on North Korean nuclear and ballistic missile programs."
"This is a real threat to the world, whether we want to talk about it or not. North Korea is a big world problem and it's a problem that we have to finally solve," he said. "People put blindfolds on for decades and now it's time to solve the problem."
The State Department said US Secretary of State Rex Tillerson would chair a special ministerial meeting of the Security Council on North Korea on Friday to discuss ways to maximise the impact of existing sanctions and show "resolve to respond to further provocations with appropriate new measures".
Tillerson and US Secretary of Defense Jim Mattis, Director of National Intelligence Dan Coats and General Joseph Dunford, chairman of the Joint Chiefs of Staff, will also hold a rare briefing on North Korea at the White House on Wednesday for the entire US Senate, Senate aides said.
Administration officials routinely travel to Congress to address lawmakers but it is unusual for the entire 100-member Senate to go to such an event at the White House, and for those four top officials to be involved.
The White House said Trump and German Chancellor Angela Merkel discussed the "urgent security challenge" posed by North Korea in a phone call on Monday.
In an earlier phone conversation with Trump, Chinese President Xi Jinping called for all sides to exercise restraint, as Japan conducted exercises with a US aircraft carrier strike group headed for Korean waters.
Two Japanese destroyers have joined the US carrier group for exercises and South Korea said it was in talks about holding joint naval drills.
China, North Korea's sole major ally, has been angered by its nuclear and missile programmes and its belligerence.
The White House said Trump and Xi "reaffirmed the urgency of the threat posed by North Korea's missile and nuclear programmes, and committed to strengthen coordination in achieving the denuclearisation of the Korean peninsula."
Xi told Trump China "hopes that all relevant sides exercise restraint, and avoid doing anything to worsen the tense situation", China's foreign ministry said in a statement.
It said the call was the latest manifestation of the close communication between the presidents, which was good for their countries and the world.
US ambassador to the United Nations Nikki Haley said Washington and the international community were maintaining pressure on North Korean leader Kim Jong Un but "not trying to pick a fight with him".
Asked whether a pre-emptive strike was under consideration, she told NBC's "Today" programme: "We are not going to do anything unless he gives us reason to do something."
Trump also spoke by phone with Japanese Prime Minister Shinzo Abe.
"We agreed to strongly demand that North Korea, which is repeating its provocation, show restraint," Abe told reporters. "We will maintain close contact with the United States, keep a high level of vigilance and respond firmly."
The US government has not specified where the carrier strike group is but US Vice President Mike Pence said on Saturday it would arrive "within days".
Adding to the tension, North Korea detained a US citizen on Saturday as he attempted to leave the country. |
Drosophila uses a tripod gait across all walking speeds, and the geometry of the tripod is important for speed control Changes in walking speed are characterized by changes in both the animals gait and the mechanics of its interaction with the ground. Here we study these changes in walking Drosophila. We measured the flys center of mass movement with high spatial resolution and the position of its footprints. Flies predominantly employ a modified tripod gait that only changes marginally with speed. The mechanics of a tripod gait can be approximated with a simple model angular and radial spring-loaded inverted pendulum (ARSLIP) which is characterized by two springs of an effective leg that become stiffer as the speed increases. Surprisingly, the change in the stiffness of the spring is mediated by the change in tripod shape rather than a change in stiffness of individual legs. The effect of tripod shape on mechanics can also explain the large variation in kinematics among insects, and ARSLIP can model these variations. Introduction Behavior, including locomotion, results from interactions between the nervous system, the body, and the environment (Chiel and Beer, 1997;Full and Koditschek, 1999). Despite a history of research in both neurobiology (;Cruse, 1990;Delcomyn, 1985;;Graham, 1985) and biomechanics (Full and Koditschek, 1999;Full and Tu, 1990), a complete integration of neural and mechanical systems for legged locomotion remains elusive. Recent developments in both methods for assessing neural activity (;;) and the vast and ever-improving genetic toolkit have made Drosophila a vital model system for the study of neural control of behavior. In contrast, the mechanics of legged locomotion in flies remains understudied. In this study, we will focus on changes in speed during walking: we will first describe interleg coordination (used interchangeably with gait in this article), a necessary first step toward understanding mechanics, and then the mechanics of body-environment interaction that accompany changes in speed. In insects, changes in interleg coordination with change in speed are strikingly different from mammals: mammals undergo transition from walking to other coordination patterns such as run, trot, or gallop at precise speeds. Moreover, in mammals, gait transitions measured in terms of speeds relative to their size defined as Froude number (Fr) occur at specific Fr. They walk below Fr of 0.3 while choosing other gaits at higher Fr. In contrast, insects employ a tripod gait at a wide range of Fr from 0.001 in flies (), Fr of 0.25 in ants (), and Fr > 1 in cockroaches. Insects do change their gaits ; when insects change gait, the gait selection in insects appears to be probabilistic, that is, different gaits can be employed at the same speed. Regardless, tripod is the most common gait in insects and why a tripod coordination can support a large range of speed is not well understood. Changes in speed are also accompanied by changes in mechanics and are particularly well understood in mammalian locomotion. In particular, the mechanics of the center of mass (CoM) during locomotion are relatively simple, and models to explain CoM mechanics have provided many insights (;Full and Koditschek, 1999). During mammalian walking, the CoM is at its highest position at mid-stance, and the horizontal speed of the CoM is lowest at mid-stance ( Figure 1A). Running in humans or galloping in quadrupeds displays different kinematics from walking that is characterized by a minimum in CoM height ( Figure 1A). Both the walking and running CoM kinematics can be explained by a simple mechanical model called the spring-loaded inverted pendulum (SLIP). In the SLIP model, the mass of the animal is concentrated into a point mass, which is supported by a single, massless effective leg ( Figure 1A). During the first half of stance, the spring is compressed as the body moves through the stance phase, converting kinetic energy into elastic energy stored by leg muscles and tendons. During the second half of stance, the stored elastic energy is converted back into kinetic energy. Thus, the kinetic energy, and therefore the speed, In this model (spring-loaded inverted pendulum ), the mass of the animal is concentrated into a point that is supported by a single massless spring. The arrow represents the direction of locomotion. This model can capture the basic features of the CoM movement during walking (in humans) and running (in both humans and cockroaches). (B) SLIP cannot describe the fly's CoM movement, which has a mid-stance maxima in speed. (C) A fly walking on three legs can be described by a springy tripod. The sagittal plane mechanics is governed by the sagittal plane projection of the springy tripod (see rectangle). (D) A springy tripod will produce angular restorative forces. Any movement away from the mid-stance position will produce restorative forces (represented by thin arrows). The thick arrows represent forces from front and back legs; thickness of the arrow indicates the magnitude of the force. Top: before mid-stance, the front leg is stretched and the back leg is compressed, leading to larger forces from the back leg. Bottom: after mid-stance, the front leg is compressed and exerts larger tangential forces. Net result is restorative forces. (E) The sagittal projection of a springy tripod can be modeled as the angular and radial spring-loaded inverted pendulum (ARSLIP) model. The angular springs expand as the CoM moves away from the mid-stance position and thereby generate restorative forces. The restorative forces can produce the mid-stance maximum in speed observed in flies in addition to the CoM movement pattern in human walking and running. reaches its lowest value at mid-stance, as does the height in most cases. These mid-stance minimum in speed and height are also observed during running in many different mammals (Blickhan, 1989;Blickhan and Full, 1993;;McMahon, 1984;McMahon and Cheng, 1990), making SLIP an effective model for running. More recently, it has been appreciated that SLIP can also serve as a model for walking by producing a speed minimum and height maximum at midstance ( Figure 1A) when the spring is stiff (). That SLIP can serve as a model for both walking and running has proven useful as a unifying model for mammalian locomotion. SLIP can also serve as a model for running in cockroaches. An elegant series of studies on running cockroaches has shown a striking similarity to mammalian running; in both cases, the CoM reaches a minimum in speed and height at mid-stance Tu, 1990, Full andTu, 1991) and can be modeled by SLIP. The three legs of a tripod can be replaced by a single spring-loaded effective leg. However, by its very nature, SLIP cannot generate the CoM kinematics of many insects including Drosophila ( Figure 1B) because a fly's horizontal speed during walking is at its maximum at midstance (Graham, 1972;). Therefore, a mechanical framework consistent with both the CoM kinematics in flies and cockroaches is necessary. A qualitative consideration of the mechanics of an animal walking on three legs shows that SLIP might be an oversimplified model: an animal walking with a tripod gait can be approximated as a point mass supported by three massless springs or a springy tripod ( Figure 1C). The sagittal plane projection of the springy tripod shown in the red box in Figure 1C is the mechanical system that governs the CoM movement in the sagittal plane. A springy tripod cannot be approximated by SLIP because the springy tripod is stable while SLIP is unstable. An animal supported by a single SLIP-like leg will fall. As the CoM moves away from the vertical (say toward the front of the fly), the front leg compresses and tends to push the fly backward ( Figure 1D). Similarly, if the CoM moves back, the hind leg will push it forward. These restorative forces cannot be modeled by SLIP but can be modeled by a simple extension to SLIP through the addition of an angular spring to model restorative forces. In other words, the three legs of a tripod act like a single leg whose behavior is described by a new biomechanical model -angular and radial spring-loaded pendulum (ARSLIP; Figure 1E). This model would enable the modeling of both the cockroach-like and fly-like kinematic patterns. The mechanics of a springy tripod is not only affected by the stiffness of individual legs but also by its geometry (or where the legs are positioned on the ground). Changes in geometry can be a mechanism to accommodate large variation in speed supported by the tripod gait and have the potential to explain why tripod gaits can support a large range of speeds. The effect of geometry on mechanics can also be modeled by the ARSLIP model through the differential effects of the geometry on the two spring constants -the radial and angular spring constants ( Figure 1E) -which describe the ARSLIP model. In this study, we created an automated method for measuring the movement of a fly's CoM in all three dimensions while also tracking the position of the fly's stance legs. Using this method, we analyzed a fly's gait over >500 steps during which the fly is always walking straight. Flies employ a modified tripod (M-tripod) gait throughout their entire speed range with only a small dependence on speed. The proposed ARSLIP model can explain how tripod geometry affects the nature of forces that act on the fly, and ultimately defines its dynamics and can provide an elegant explanation for why insects do not change their gait over a wide speed range. Results An automated method for obtaining a fly's walking kinematics with high spatial resolution We designed an automated data acquisition system that generates a large positional dataset with high spatial resolution to investigate the fly's gait and CoM kinematics. Similar to an approach employed previously (Nye and Ritzmann, 1992;), we recorded the side and the bottom (reflected off a mirror) view of a fly walking in a clear, closed cuboid chamber ( Figure 2A). We extracted all the steps during which a fly walked straight for more than one step. The fly's CoM was extracted using the Kanade-Lucas-Tomasi (KLT) (Tomasi and Kanade, 1991) algorithm and produced low-noise estimates of the CoM position; the vertical resolution being 20 mm (see 'Materials and methods', 'Tracking CoM and foothold positions', Video 1), which makes the rhythmic up-and-down movement of the CoM apparent ( Figure 2B). The positions of the leg tips during stance were extracted using a custom algorithm (see 'Materials and methods', 'Tracking CoM and foothold positions', Figure 2-figure supplement 1). The legs were labeled according to an established convention ( Figure 2C), and the gait map ( Figure 2C) was put together such that the legs that constitute a tripod -right prothoracic (R1), left mesothoracic (L2), and right metathoracic (R3) -are plotted on consecutive rows (orange); and those of the other tripod (L1-R2-L3) are plotted in another set of consecutive rows (marked in blue), to allow a direct assessment of the presence or absence of the tripod gait. As a means of corroborating previous findings, we plotted stance and swing duration as a function of speed ( Figure 2D). Consistent with previous studies (Graham, 1972;;Pearson, 1976;Strauss and Heisenberg, 1990;Wilson, 1966), the stance duration is inversely proportional to speed. The swing duration also changes with speed but to a smaller extent than the changes in stance duration. Flies employ interleg coordination close to tripod across speeds We used two methods to characterize the speed-dependent change in coordination between legs: first, to facilitate comparison with previous work, interleg coordination was defined based on delays between the times at which the legs start either a swing or a stance phase. To visualize a fly's gait, the times that a fly's legs start the stance phase in relation to the time that the right front leg (R1) entered the stance phase were plotted. Legs that form the first set (R1-L2-R3) of tripod legs enter stance phase with a short interleg delay ( Figure 3A). The legs that form the other tripod (L1-R2-L3) enter the stance phase with a short interleg delay with each other but out of phase with the first set. The coordination pattern did not change noticeably as a function of speed ( Figure 3A). This raw gait map ( Figure 3A) suggests that the flies predominantly employ a tripod gait across all speeds. This trend (in Figure 3A) was quantified by calculating the delays relative to the cycle period (the time it takes a leg to complete both a swing and a stance, Figure 3B) or normalized delay. The normalized delays between the legs of the same tripod were small throughout the entire speed range; the within tripod delays became even smaller with speed. The prothoracic leg led the other legs in its tripod with a small but significant negative delay consistent with previous observations in cockroaches (;Delcomyn, 1971). On the other hand, the normalized delays across legs in the opposing tripods were 0.5 ( Figure 3B). These analyses suggest that the gait -as defined by phase differences between legsemployed by flies during forward walking across the entire range of speeds is close to a tripod. The normalized delays between different legs are consistent with that of a single gait that is close to a tripod but in which the front leg of the tripod is ahead of the middle leg, which in turn is ahead of the rear leg; we will refer to this gait as M-tripod ( Figure 3C). The delays between legs within a tripod do decrease slightly with speed ( Figure 3C). The small dependence means that there is no qualitative change in gait. This small dependence on speed is consistent with the continuum of coordination patterns observed in a recent study (). A second method to quantify leg coordination is to use instantaneous phase lags between legs ( Figure 3D) averaged over a gait cycle (;Revzen and Guckenheimer, 2008). Although time delays are easier to visualize and phase lag more abstract, the latter provides a more accurate measure of coordination because it takes the entire step into account instead of only the beginning of stance (see 'Materials and methods' 'Gait analysis based on leg phases'). As in the case of stance start times, the distribution of phase lags between the reference leg (R1) and the other legs show small phase differences between the legs within a tripod and large phase differences between the legs in the opposing tripod ( Figure 3E). The phase plots also reveal that the front leg of the tripod leads the middle and back legs. As the speed increases, the phase difference between the tripod leg decreases, and the spread of the phase difference also becomes smaller ( Figure 3E). The analysis using instantaneous phases is consistent with a single gait -M-tripodacross the entire range of speed; the exact speed dependence of M-tripod gait using phase difference ( Figure 3F) is slightly different from the speed dependence calculated from stance start times ( Figure 3C). A small percentage (about 4%) of steps at very low speeds did not conform to any gait, and a few steps had a tetrapod coordination pattern, but an overwhelming majority of the steps have a tripodal coordination. The rest of the study will focus on steps that have a tripodal coordination. Kinematic changes associated with changes in speed Given that flies can walk over their entire speed range using a M-tripod gait implies that a change in gait is not essential for a change in speed. To better understand the mechanism underlying change in speed, we focused on the tripodal steps and asked how the movement of the fly's CoM over the tripod gait cycle changed with speed. Because the tripod legs are not perfectly in sync, we defined the tripod start as the halfway time point between the time that the very first foot of the current tripod lands and the last foot of the preceding tripod is lifted ( Figure 4A). Similarly, we set the tripod end as the halfway point between the very first foot landing time of the following tripod and the last lift-off time of the tripod of interest (dotted blue lines in Figure 4A mark the start and end of each tripod). We will refer to the tripod stance as a step. Figure 4A shows the speed profile during a fast step. As previously reported for stick insects and Drosophila (Graham, 1972;), the CoM typically reached a maximum horizontal speed at mid-stance ( Figure 4A). Figure 4B shows the speed for a slower step. A slow step is characterized by both a lower initial speed and a smaller speed increase. It is important to note that there is a mid-stance maximum in the height of the fly and that the flies are more erect when they are walking faster ( Figure 4A, B). We will show later that this change in height is partially responsible for the increase in speed. The change in speed within a step increased with the average speed during the step. ( Figure 4C). At low walking speed, much of the change in speed was due to the increased initial speed. At higher walking speed, the mid-stance increases in speed made a greater contribution ( Figure 4D). Normalized time delays of stance start times between legs within a tripod (R1 and L2, L2 and R3) and legs in the opposing tripod (R1 and L1). The time delays were normalized by cycle duration. R1-L2 and L2-R3 delays are small at low speeds and become even smaller as the walking speed increases (Wilcoxon sign-rank test). R1-L1 delays are unchanged. (C) The phase difference between legs is consistent with a single gait, which is a modified version of a tripod (M-tripod) in which the front leg of the tripod leads the middle leg that in turn leads the back leg. The delay between the legs has a small dependence on speed (v). (D) Definition of leg phase angles. Stances start and end at 0 and p, respectively; swings start and end at -p and 0, respectively. (E) The leg phases relative to R1 show that interleg coordinations at different speeds all consistent with M-tripod. The delays between tripod legs do become smaller with speed (Wilcoxon sign-rank test) while the delays between R1 and L1 leg remain unchanged. (F) The M-tripod based on phase lag (v=speed). The distance traveled over the tripod gait cycle also increases with speed ( Figure 4E), and the duration of the tripod gait cycle decreases ( Figure 4F). Thus, the increase in speed is due to both the greater distance travelled during the step, and faster steps. The longer and faster steps result both from a faster speed at the beginning of the step and a greater increase in speed during the step. In the rest of the article, we will describe a simple mechanical model that not only describes the mid-stance maximum in speed during a step but also describes the changes in mechanics underlying changes in speed. A new mechanical model for locomotion in insects As described in the 'Introduction', an animal walking with a tripod coordination can be modeled as a springy tripod where a point mass is supported by three legs. For symmetry, in our model, these legs were of equal natural length, and the body's movement within a step can be described as an arc about the middle leg. The body's position at any instant is described by, the angle the body makes with the vertical, and r is the length of the middle leg. The behavior of this mechanical system can be described by its elastic potential energy. As the body moves through its stance phase, this elastic potential energy changes as some legs stretch and others compress. The total elastic potential energy of a springy tripod ( Figure 5) is simply the sum of the potential energies due to the three legs. Angular and Radial Spring-loaded Inverted Pendulum Leg 1 (front leg) Leg 2 (middle leg) Leg 3 (back leg) A Figure 5. The angular and radial spring-loaded inverted pendulum (ARSLIP) model is equivalent to the springy tripod model. (A) The sagittal plane mechanics is governed by the sagittal plane projection of the springy tripod. The arrow denotes the direction of motion. The parameters that define the springy tripod model are shown. The overall stiffness of the springy tripod is determined by the spring constant of individual legs, k, the height of the tripod (r m ), and the distance between the front and back legs (2L). See Table 1 as well. The behavior of the springy tripod is described by how the coordinates of the point mass -r and -change with time. (B) The sagittal plane projection of a springy tripod can be modeled as the ARSLIP model. The parameters that describe the ARSLP model are shown. Just like the springy tripod, ARSLIP is described by how the coordinates of the point massr and -change with time. The potential energy of the tripod can be derived as a sum of the elastic energies of the three legs. The ARSLIP potential energy can be derived by summing radial and angular potential energies. The equivalence of the two models is shown by finding parameter set for ARSLIP where the potential energies as a function of r and are similar when is small and changes in r are small (derived in 'Materials and methods'). where R tri is the natural length of the springy tripod; r is the length of the middle leg; is the angle that it makes with the vertical axis, which are also identified with the radial and angular coordinate of ARSLIP; 2L is the spread of the tripod or the distance between the prothoracic and metathoracic legs of the tripod in the direction of walking; and k is the stiffness of each leg. The variables are also enumerated in Table 1 and shown in Figure 5. We can show through a formal analysis using a Taylor series expansion of the Lagrangian for the springy tripod (see 'Materials and methods' 'Derivation of the formula relating tripod model to ARSLIP') that springy tripod approximately reduces to the ARSLIP model ( Figure 5B). As a reminder, in the ARSLIP model, the three legs of the springy tripod are replaced by a single effective leg with a radial and an angular spring. Specifically, V tri is equivalent to the ARSLIP potential energy, V (Equation 2), for evolution that is close to the midpoint r m ), and 0. The r R 2 term corresponds to potential energy due to the radial spring aligned along the effective leg connecting the middle tripod leg to the CoM, and the 2 term corresponds to the potential energy from an angular spring capturing the tangential restorative forces exerted by the front and back legs. In this model, the mechanics of an animal is controlled by the two spring constants, k s and k a, which describe the stiffness of the radial and the angular spring, respectively, and the natural spring length, R. In essence, the mechanics of the fly walking on a springy tripod can be described by the ARSLIP model. An important point that we will elaborate on later is that the springy tripod is a simplification of the actual configuration of the fly while it is walking, but the ARSLIP model is a more general Figure 7A) Mass of the fly Mass was kept fixed; average mass of flies of a particular sex and genotype was measured ARSLIP Angular and radial spring-loaded inverted pendulum model and can serve as an accurate model even without making the assumptions of the springy tripod. The approach above is based on potential energy. The distinction between SLIP and ARSLIP is clearer when considering forces modeled in the two cases. In SLIP, forces are always along the leg. The ARSLIP model provides a mechanism by which tangential forces can also be transmitted to the body. Importantly, the angular spring forces switch direction at mid-stance, which means that they aid forward progression during the first half of the stance and oppose forward progression during the second half of the stance. This pattern is exactly opposite to the pattern created by SLIP. Depending on whether the leg spring dominates, or the angular spring, one can get a cockroachlike speed minimum or fly-like speed maximum at mid-stance. ARSLIP models the kinematics of a fly's CoM during walking We evaluated the performance of the SLIP and ARSLIP models by fitting them to the fly's CoM kinematics. Because the stance times of two consecutive tripods can overlap substantially, a complete model would involve two effective legs, each of which functions as either SLIP or ARSLIP; this complete model with two effective legs would have too many parameters and might obscure many of the insights that we obtain from modeling. Therefore, we modeled the CoM kinematics of a tripod stance (as defined in Figure 4A) using a single effective leg. The model parameters approximate the control exerted by the fly at each step. In the ARSLIP model, the fly chooses as its initial condition the angle of attack (a), angular speed (W), leg length (r), and radial speed (r _ ) at the beginning of the step. The evolution of the CoM depends on the angular spring constant (k a ), leg spring constant (k s ), and the natural leg length of the effective leg (R). The only difference between ARSLIP and SLIP is the absence of the angular spring, and hence there is no k a in SLIP. We minimized the root mean squared error (RMSE) between the SLIP and ARSLIP-predicted position of the CoM, and the experimentally measured position using an optimization algorithm (see 'Materials and methods', 'System of ordinary differential equations for SLIP and ARSLIP and details regarding fitting ordinary differential equations to individual steps'). SLIP can model the small increase in the vertical position of the CoM, which results from two competing effects: height increase due to the progression of the CoM from its extremum to the vertical mid-stance position and a height decrease due to the compression of the leg spring ( Figure 6A). However, as reported previously (), SLIP fails to describe the horizontal progression of the CoM. This failure is clear from a comparison of the experimental horizontal speed profile and the theoretical speed profile ( Figure 6A, bottom panel). In contrast, ARSLIP can describe both the horizontal and vertical progression of the CoM ( Figure 6B). In ARSLIP, the angular spring accelerates the CoM during the first half of the stance phase. It can, therefore, compensate for, or even overcome, the decelerating effects of the radial spring and gravity, and can model the mid-stance maximum in speed. ARSLIP presented significantly smaller RMSEs for both horizontal and vertical CoM displacements than SLIP ( Figure 6C). That the ARSLIP model describes the CoM kinematics well means that two linear springs defined by their spring constantsk a and k s -are sufficient to describe the fly's CoM kinematics during a step. The range of parameter values for all the fitted steps in our dataset is shown in Figure 6-figure supplement 1. The median k s was 0.009 N/m. This spring constant implies that to support its mass of 1 mg or 10 mN weight, the fly compresses this effective spring by about 1 mm or approximately 50% of its length. During a step, the spring is always compressed such that its length is close to the fixed point of the spring (length at which the spring forces cancel gravitational forces) and oscillates about this fixed point without reaching its natural length. The magnitude of these oscillations is small and reaches a maximum of 10% (of its length at fixed point) about the fixed point. The nondimensional radial spring constant g s (see 'Materials and methods' for definition) is~2 compared to >10 for humans (). The median k a was 1.1 10 8 Nm/radian. In nondimensional terms, the angular spring constant g a was~0.5, which is like the values obtained in humans. Therefore, compared to humans, the relative role of angular spring in flies is much larger. Most animals increase their walking speed by decreasing the stance duration (;Pearson, 1976). Modeling stance using an effective limb that functions as a spring provides a simple explanation for the mechanical changes that accompany this decrease: in any two-dimensional motion -including walking in the sagittal plane discussed here -the vertical and horizontal motion must be synchronized by relating parameters governing the vertical and horizontal time scale. The vertical oscillatory motion is controlled by the radial spring constant, and the horizontal motion by the angular speed. As the walking speed increases and the stance duration decreases, the vertical oscillations must occur faster by making the effective leg stiffer (because the time needed for vertical oscillation decreases with increases in spring stiffness). This increase in stiffness has indeed been observed in humans (). We found a similar increase in stiffness in flies ( Figure 6D). As the fly's walking speed increases, g s, the nondimensionalized version of k s increases. The nondimensional angular spring constant, g a, increases as well; this increase accounts for the greater within-step increases in speed observed at higher speed (see Figure 4C). In the next section, we will show that the mechanism underlying the change in spring constant is surprisingly a result of the change in the geometry of the tripod with speed rather than the change in spring constant of the individual legs. Change in tripod geometry increases spring stiffness necessary for change in speed In bipedal walkers, the only mechanism for increasing the overall stiffness of the system is to increase the stiffness of each leg. In the case of polypedal walkers such as insects, including flies, the geometry of the tripod itself is a parameter that can be adjusted to alter the stiffness of the overall system. To test the extent to which the geometry of the tripod on a given step influences its kinematics on that step, we derived the equivalent ARSLIP model that displays the same dynamics as the springy tripod around its mid-stance position using the empirically obtained tripod geometry (determined by the tripod spread, L; and mid-stance height, r m, in Figure 7A) and mass of the fly (see 'Materials and methods' for details). Specifically, using the following equations: We can relate the ARSLIP spring constantsk a and k s and R (the natural length of the ARSLIP spring) to k, the spring constant of individual legs, R tri, the natural length of individual legs, and the tripod geometry (determined by L and r m in Figure 7A). If the spring constant of individual legs remains the same, the springy tripod model predicts that g a and g s (nondimensionalized versions of k a and k s ) will both vary over a twofold range due to the variation in the observed geometry of the tripod. Specifically, in the range of values observed in flies, as L=r m ratio decreases or the tripod becomes narrow and tall, both g a and g s increase ( Figure 7B). We can exploit this dependence of g a and g s on the tripod geometry to examine how well the change in tripod geometry from one step to the next predicts the best fit g a and g s values from Figure 6. To this end, we determined a single k and R tri for each fly, which best satisfies Equations 3-5 for all the steps fitted with the ARSLIP model for that fly. To compare across flies, we converted the k a and k s values to their nondimensionalized versions g a and g s : We found that, despite all the simplifying assumptions, the predicted g a and g s derived from the tripod geometry were close to the optimal g a and g s obtained from the best fit to the CoM kinematics ( Figure 7C). The similarity between predicted and fitted spring constants is particularly significant because the prediction for all the steps of a given fly was made with a single parameter set while fits were optimized for each step representing a large decrease in the number of parameters. These results show that the tripod geometry plays a critical role in governing the spring constants. The strong correlation between spring constants predicted from the geometry and those from optimization suggests that changes in tripod geometry are employed by the fly to change speed. Since g a and g s increase with speed (replotted in Figure 7D), we anticipate that the increase in speed is usually reflected as a change in L=r m ratio ( Figure 7E), implying that the change in tripod geometry is an important mechanism for the control of speed during walking. Discussion There are four main findings in this study:. Flies use a M-tripod gait across all speeds.. Faster steps are accompanied by higher initial speed and larger increases in speed during the step, resulting in the fly covering longer distances in a shorter time. The kinematic pattern during a step and its changes with speed are explained by a new model -ARSLIP -within which the dynamics are described by two spring constants. An increase in speed is accompanied by an increase in the spring constants that characterize the ARSLIP model. This increase in stiffness is an important biomechanical adaptation necessary for change in speed.. The increased stiffness is not a result of each leg becoming stiff but results from a change in the geometry of the tripod and the height of the fly: flies locomote with a narrower and higher posture, resulting in increased stiffness at a higher speed.. To our knowledge, the effect of tripod geometry on insect locomotion has not been investigated. Because the tripod geometry varies widely between insects, the tripod geometry might be an important determinant of an insect's walking kinematics and has the potential to explain many features of insect locomotion (see last section of 'Discussion') including the fact that insects can walk with a tripod gait across a large range of speeds. These findings are discussed below. Flies employ a tripod coordination during forward walking Flies appear to predominantly employ a gait close to a tripod gait -M-tripod -across their entire range of speeds. These results are consistent with other studies in flies (;Strauss and Heisenberg, 1990). Similar observations have been made during free walking in other insects such as cockroaches (Delcomyn, 1985;Hughes, 1952;Spirito and Mushrush, 1979), ants (Reinhardt and Blickhan, 2014;;Zollikofer, 1994), and locusts. The M-tripod gait itself is not fixed but has a small dependence on speed. This small dependence on speed as well as the increase in duty factor as the speed decreases implies that the average number of legs on the ground at any given time will decrease with speed. This increase in the number of legs has been shown to be important for stability (). The M-tripod gait only applies to forward walking at a fixed speed. The entire complement of gaits that the fly employs to turn, accelerate, and decelerate remains to be determined. It is important to note too that our findings do not imply that the flies are only capable of a fixed gait. There is evidence that flies change their gait upon amputation (). In our dataset as well, there is clear evidence for tetrapod gait; however, the fraction of steps during which flies adopt a tetrapod gait is very small. The geometry of the tripod is an important determinant of walking speed An unexpected result was the extent to which the shape of the tripod formed by the three tripod legs -particularly the ratio of the height of the tripod to its anterior-posterior spread -can explain a fly's CoM kinematics during a step. Previous studies have shown that neither the swing duration nor the swing amplitude (the distance that a leg travels during the swing) changes much as the fly's walking speed changes (;Strauss and Heisenberg, 1990;), a result that is confirmed in this study. Much of the change in speed results from a decrease in stance duration (;Strauss and Heisenberg, 1990;), another result consistent with this study. In other words, increase in walking speed results from an increase in the angular speed of the body about its stance legs. This increase requires two elements: a neural element whereby increasing the drive into the central pattern generators would cause them to cycle faster, as has been demonstrated in stick insects (). A biomechanical element: moving faster also requires larger forces from the ground, and a mechanically stiffer system (in this case, the mechanical system consists of the fly and the legs that support the fly) would be able to transmit more forces from the ground to the body. There are two mechanisms by which the system can become mechanically stiffer. Either a fly could make each leg stiffer just as has been shown in humans (;Kim and Park, 2011) or it could change the geometry of the tripod to make the overall system stiffer. The data in Figure 7 is consistent with the second idea that the changes in the geometry of the tripod are the dominant component by which flies control the stiffness of g a and g s and thereby change their walking speed. Changes in g s allow the fly to adjust the stiffness of its mechanical system to the stance duration; a stiffer g s means shorter time period of oscillation. Changing g s through changes in geometry would also change g a. We regard the changes in g a as an inevitable consequence of the changes in g s ; nonetheless, the increase does provide a parsimonious explanation for the greater mid-stance maximum in speed observed when the fly walks faster. To our knowledge, the control of speed through tripod geometry has never been explored in any insect. One reason for this deficiency is methodological. Researchers usually collate their data across steps, trials, and individuals. This process is bound to obscure any trends in tripod shape; analysis at the level of single steps is necessary, and trends within an individual must be compared. Another methodological issue is that the height of the animal during locomotion is rarely measured. The large variation in tripod geometry can explain the broad range of kinematics observed among insects In flies, the L=r m ratio varies between 1 and 2. How about other insects? We could obtain a rough estimation of the L=r m ratio for a few insects by piecing together information from some manuscripts or by measuring these ratios from the figures in the papers: for three species of stick insects, the ratio ranges between 2 and 3 (), wood ants have a L=r m ratio of closer to 1 (Reinhardt and Blickhan, 2014), and cockroaches have a L=r m ratio closer to 6 (). This large variation in the L=r m ratio will have a large effect on the CoM kinematics within a step. Given a leg stiffness g, at low L=r m ratio, that is, when the legs are almost vertical, g s is large in comparison to g a ; therefore, the deceleration due to g s as the fly approaches mid-stance cannot be compensated by the acceleration due to g a. As the L=r m ratio increases, the effects due to g a and g s are comparable for a range of L=r m values. At very large L=r m ratios, g s again dominates. These ideas can be formalized by deriving conditions for which the gait is cockroach-like versus fly-like (Appendix): whether the gait is cockroach-like or fly-like depends on the interplay between g a and g s, which in turn depends on the stiffness of individual legs and the geometry of the tripod. For a given L=r m, there is a leg stiffness g above which the kinematics change from a cockroach-like gait to a fly-like gait ( Figure 8A). As expected, the data points for each of the fitted steps for the fly lie above the function that demarcates the two kinematic types. Importantly, the L=r m ratio for flies and several other insects places them in a regime in which the leg stiffness required for a fly-like gait is at a minimum ( Figure 8A). Indeed, ants, stick insects, and flies all have fly-like kinematics. On the other hand, cockroaches have L=r m values that predispose them toward a mid-stance minimum velocity profile. Another important insight from this analysis comes from the dependence of g s on L=r m ratio ( Figure 8B). At the L=r m ratios observed in flies and other insects such as ants and stick insects, small changes in L=r m will produce a corresponding change in g s ( Figure 8B). On the other hand, at large L=r m ratio, such as those employed by a cockroach, the g s is constant and does not depend on the L=r m ratio. This difference makes sense. At the large Fr numbers employed by cockroaches, the CoM kinematics is dominated by the angular speed with which the body moves about its leg (), and it is rather insensitive to the magnitude of g s. On the other hand, and as demonstrated for flies in this study, at lower speeds the mechanics is dominated by the spring constants g a and g s. In sum, the strong dependence of mechanics on the L=r m ratio or tripod geometry can explain both why tripod gait can be employed across a range of speeds observed among different insects and that a given insect can control its speed in part by changing the geometry of its tripod. ARSLIP as a general model for multilegged locomotion The finding that the SLIP model employed in mammalian locomotion is also adequate as a model for cockroach running led to the idea that SLIP is a general model for locomotion regardless of how many legs are on the ground. However, it has been well known before the recent studies in flies that CoM kinematics for other insects are dramatically different from cockroach (Graham, 1985;Reinhardt and Blickhan, 2014), and cannot be explained by the SLIP model. To our knowledge, there have not been many attempts at arriving at a single model that can serve as a conceptual model that explains the diverse kinematics observed across the insect world. The development of a general model will be aided by two important insights in this study: first, we show that the geometry of the tripod plays an important role in determining the mechanics of the CoM. It appears that cockroaches do walk with a particularly wide and low tripod where the mechanics are closest to being described by the SLIP model. But the wide and low tripod observed for cockroaches appear to be an exception rather than a rule. The tripod geometries of other insects are different, and as shown in Figure 8, at least some of the kinematic variations observed in insects result from these differing tripod geometries. Second, the effect of the different tripod geometries can be captured through a simple extension of the SLIP model -the ARSLIP model. The ARSLIP model is simple enough that it retains much of the elegant simplicity that makes SLIP such a powerful model while being better equipped to capture the greater diversity of kinematics observed in insects. Future experiments aimed at exploiting the natural diversity of the insect world to rigorously test the relationship between the geometry of the stance legs and CoM kinematics, and the ability of ARSLIP to describe this relationship will prove to be a powerful toolkit for developing a general model for hexapedal locomotion. It is obvious but still important to note that the springy tripod presented here is a simplification for the actual dynamics of locomotion in insects. In the case of fly, the springy tripod is a decent model of the fly's walking as shown by its ability to predict the optimized ARSLIP spring constants from geometry (Figure 7). However, the ARSLIP model itself is more general and can model other known features of insect legged locomotion that we have not considered here: First, here we have modeled each leg as a linear spring, the angular spring results from a combined action of the three legs. In the most general case, each leg itself can function as both linear and angular spring. The resulting model will still be the ARSLIP model; however, the expressions relating k a and k s to tripod geometry and stiffness of a given leg will be different from what we have derived here. Second, and like the first point above, apart from spring forces, insect legs can produce attachment forces (). Once again, attachment forces do not invalidate the utility of ARSLIP as a model, but will affect the values of k a and k s differentially and represent an important mechanism that can explain the difference in kinematics for different insects. Third, for many insects, the hind leg is much longer than the other legs. The longer length of the hind legs might make a third term in the Taylor series expansion necessary (the first two terms being the leg spring and the angular spring, respectively). This third term may act as an asymmetric propulsive force. Finally, ARSLIP does not necessarily need three legs. The ARSLIP model can also model an insect employing more legs on the ground. In fact, one important insight in this study is that whenever there is more than one leg on the ground, SLIP is unlikely to work as a model. This is because SLIP assumes that the net forces on the CoM act along the single effective leg. When there are more than one leg of the ground, this constraint -forces only along the legseverely limits the ability of a model to describe locomotion. ARSLIP removes this constraint and allows the description of forces perpendicular to the leg. Thus, the ARSLIP model is the more natural take-off point for efforts to obtain a truly general model for locomotion not only in insects but in multilegged animals in general. Materials and methods Flies The flies were reared at 25C, and 12 hr:12 hr light:dark cycle. Ten minutes before the experiment the flies were removed from a vial and placed under CO 2 anesthesia, and their wings were detached using forceps. We experimented with different wild-type strains to record steps at a range of walking speeds and to ensure that any general principle we discover is indeed general (at least across a range of inbred strains). These wild-type strains were w1118, Berlin K, and Oregon-R-C (or Oregon C) (Bloomington stock numbers: 5905, 8522, and 5, respectively). Table 2 shows all of the flies in our dataset and the data each fly contributed to the analyses in each figure. Data used for Figure 2D were stance and swing durations for each of the six legs. Data used for Figure 3A,B and E were derived from all complete gait cycles that include at least one frame of the last leg's stance phase. Data for Figure 3A had an additional constraint that required a cycle data to have complete observation of the last leg's stance phase from start to end. Data used for Figure 4 were derived from all tripod stance phase. Data used for Figure 6 and 7 were derived from tripod stance phases in which the single support phase constitutes at least 25% of stance. However, additional constraints were added to Figure 6D and 7: data with erroneous leg position tracks were eliminated, and then flies with less than six steps were eliminated. Data acquisition and processing Our experimental data consisted of the CoM position of the fly in all three dimensions and the position of the fly's footholds in the horizontal plane. This section describes the acquisition and processing procedures that yield this dataset. Recording chamber The chamber side walls and ceiling (inner L W H: 21 7 17 mm) were built from microscope slides and held together using an instant adhesive (Loctite 495). A hole was drilled in one of the side walls 10 mm from the floor to provide an air jet nozzle for the initiation of walking. A 0.13-0.17-mmthick coverslip was used as the chamber floor to minimize the distance between the side view and bottom view, and therefore the frame size; the frame size was kept to a minimum to increase the frame rate. After a fly was placed inside the chamber, the chamber was secured on the coverslip using a tape. The chamber-coverslip assembly was then held horizontally using clamps. Below the assembly a mirror was tilted at 45to the coverslip. The mirror reflected the bottom view of the chamber to the camera (see Figure 2 for schematic). The bottom and the side of the chamber were lit with infrared light. Data acquisition Data acquisition and processing were fully automated, except for manual screening of raw videos before the processing step. A USB 3.0 camera Basler acA1920-150 um (380 Hz at 1024 779) and a telecentric lens (Edmund Optics, Barrington, NJ 0.40x SilverTL, part number 56-677) were used to record the video at 380 fps at 1024 779 resolution. Exposure was set at 2.5 ms. This setup had a modulation transfer function of 10% in the vertical direction and 6% in the horizontal direction at 25.39 line pairs/mm. The camera monitored the chamber at 30 Hz in real time until any motion within the field of view triggered acquisition at 380 Hz for 1.2 s. The motion was detected by measuring the change in intensity between the total pixel intensity values of the two most recent frames. After each acquisition, the recorded video was saved to disk if and only if the fly walked more than 5 mm across the floor. This automated procedure could monitor and record a single fly for more than 10 hr. Tracking CoM and foothold positions The fly's CoM was estimated by using the most prominent features of the fly as fiducial markers. The features were extracted on the first frame by using the minimum eigenvalue algorithm and following the extracted points throughout the video using KLT (KLT feature tracker in MATLAB ). An estimated affine transformation matrix between the sets of feature points of consecutive frames was multiplied to the CoM position in the previous frame to evaluate the CoM position in the current frame (Figure 2-figure supplement 1). Next, between every pair of consecutive frames, CoM was backtracked one step. The distance between original and backtracked CoMs is a reliable measurement of the so-called forwardbackward tracking error (). The error was small enough that we could evaluate the SLIP and ARSLIP models. The errors were also much smaller than the change in CoM position during a step (Figure 2-figure supplement 2). Therefore, the noise of the estimated Video 2. The output of each processing step is plotted. https://elifesciences.org/articles/65878#video2 CoM trajectories was small, so numerical derivations of the trajectories returned velocity trends with a small noise (Figure 2-figure supplement 2). Foothold location was automatically detected using a series of image processing algorithms detailed in Figure 2-figure supplement 1. The basic idea was to binarize the bottom view and thin the resulting image to yield a skeleton. The end points of the resulting skeleton returned points, including the actual footholds, along with other noisy or random points. The actual footholds were robustly detected by filtering out the noisy points and extracting points that are located the furthest away from the CoM. The noise filtering was performed by removing small objects composed of fewer than 100 pixels. The legs were labeled based on the mean of each foothold trajectory in the CoM frame (see details in Figure 2-figure supplement 1). All the processing steps are shown in Video 2. Gait analysis We performed gait analysis using either the stance start times or the instantaneous phase. Gait analysis based on stance start times For quantifying gait based on stance start times, the time at which the R1 leg starts its stance denoted the beginning of the cycle. The cycle lasted until the R1 leg entered the next stance. The time between R1 entering two consecutive stances was the cycle period. The time delay between R1 and other legs was calculated by taking the time difference between the stance times of the other legs, allowing for the fact that some of the legs would start their stance before the R1 leg. To normalize the time delays, these delays were divided by the cycle period. Gait analysis based on leg phases The position of legs, y i t, was measured in a body coordinate system, and the positive y-axis points toward the anterior part of the body. Because we only knew leg position during stance, we performed a linear interpolation of y i t during swing. The instantaneous phase angles of the legs were obtained by applying Hilbert transform on y i t (Figure 3; ;Revzen and Guckenheimer, 2008;). Hilbert transform turns a real-valued signal into a complex-valued analytic signal, which provides accurate instantaneous magnitude and frequency of the real-valued signal. The time-dependent angle of complex-valued analytical signal is the instantaneous phase angle. The phases were between (see Figure 3D for definition). For the phase delay analyses ( Figure 3D-F), the phase delay was normalized to by dividing the instantaneous delays by 2p. This normalized phase delay for each leg relative to R1 was averaged over the entire stance phase of the R1 leg (touch-down to lift-off). Definition of M-tripod Median values of delays between mesothoracic and contralateral metathoracic legs D meta;meso and delays between prothoracic and contralateral mesothoracic legs D meso;pro are used to determine delays within tripod legs of a synthetic M-tripod. The calculation for change in height and velocity A time series of height or speed over a tripod stance was detrended by a line that connected the values at the beginning and end of the stance phase. Finally, the maximum and minimum values of the detrended data were summed to calculate height or speed changes (Figure 4-figure supplement 1). Nondimensional parameters We chose to nondimensionalize a unit of mass by an animal's body mass (m), length by an animal's natural leg length (R), and acceleration by gravitational acceleration at the surface of the earth (g). Following this rule, we could nondimensionalize speed and spring constants as shown below: Fr v 2 Rg g s k s R mg g a k a mgR System of ordinary differential equations for SLIP and ARSLIP and details regarding fitting ordinary differential equations to individual steps The following system of ordinary differential equations (ODEs) (Equations 6 and 7) is derived using Euler-Lagrange equations to describe ARSLIP. A polar coordinate system was chosen for simplicity. where r t is the length of the single effective leg used in SLIP and ARSLIP models, t is the leg angle from the vertical axis, k s is the leg spring constant, k a is an angular spring constant, m is the total mass of a fly, and R is the natural leg length. Dot denotes time derivative. A detailed derivation of the ODE is presented in the supplementary section of. The best fit of ARSLIP to a given experimental trajectory was found using the Global Search algorithm (MATLAB Global Optimization Toolbox). The RMSEs for height and distance were individually evaluated, and the sum was used as the objective function. k s and k a were searched within the range below (Equations 8 and 9): 0 <k a <50 10 9 N m These bounds were chosen empirically. R and m were experimentally measured for the various strains ( Table 3). The length of the middle leg was measured from the still frames of the fly. Multiple measurements of a fly were averaged (R real ), and then based on the average value optimal R was estimated by searching within ±10% boundary of R real. Mass (m) was measured by averaging the mass of 10 individuals from the same genotype and gender. Because a model with two effective legs would have too many parameters and would obscure many of the clear insights gained from modeling, we chose steps for which the duration of support by a single tripod was at least 25% of the tripod stance. This criterion does not mean that 75% of the step is spent with both tripods on the ground. Because the tripod legs are not synchronized, much of the time spent with both tripods on the substrate is the time it takes for legs from the second tripod to leave the ground. The experimentally measured initial conditions of _ and r _ were used to constrain the optimal initial condition. The optimal initial condition was constrained to be within ±10% of the measurements. Because we set the foothold location of ARSLIP as the middle of the front and hind leg foothold positions, initial conditions of and r could be determined from experimental data. The SLIP model was fitted using a similar method under the same parameter conditions except for the absence of k a due to the lack of angular spring in SLIP. The system of ODEs for SLIP is given by For both models, the gravitational constant g had a value of 9:807m=s 2. Derivation of the formula relating tripod model to ARSLIP The total elastic potential energy of a springy tripod ( Figure 5) is given by The variables are also enumerated in Table 1 and shown in Figure 5. In this equation, R tri is the natural length of the springy tripod that is optimized for each fly; r is the height of the middle leg; is the angle that it makes with the vertical axis, which is identified with the radial and angular coordinate of ARSLIP; L is the measured spread of the tripod ( Figure 7A); and k is the stiffness of a given leg. The total elastic potential energy is simply a sum of the potential energies due to each individual leg. We claim that V tri is equivalent to the ARSLIP potential energy, V (Equation 12), for evolution that is close to the midpoint R m ), and 0. Because r=r m j j~0:1 and j j~0:2, it should be sufficient to show that the two potential energies agree with each other up to the quadratic order in fluctuations around the fixed point. This means that the first and second derivatives with respect to r and at r r m and 0 are the same for both the potential energies. Specifically, are the same for both the potential energies. The relations involving the first derivative of (Equation 15) and cross-double derivative involving both and r are automatically satisfied because of symmetry (Equation 18), the latter demonstrating the independence of the radial and angular springy forces that are assumed in the ARSLIP model. We are then left with three equations, including the first derivative of r (Equation 14), and the two double derivatives w.r.t. r and (Equations 16 and 17). The effective ARSLIP potential energy involves the parameters R, k s, and k a. Consequently, it is possible to relate ARSLIP and springy tripod using the following equations: Thus, the parameters, R, k s, and k a can be determined from the tripod potential energy parameters, k and R tri, and the geometric quantities r m, L (Equations 19-21). We obtained r m and L from the geometric data for each step. We assumed that a given fly has the same R tri and k; and fit k s and k a for all the steps of the given fly. This assumption led to a best fit value of k and R tri. |
Identification of Novel microRNAs in Post-Transcriptional Control of Nrf2 Expression and Redox Homeostasis in Neuronal, SH-SY5Y Cells Nuclear factor-erythroid 2-related factor 2 (Nrf2/NFE2L2), a redox-sensitive transcription factor plays a critical role in adaptation to cellular stress and affords cellular defense by initiating transcription of antioxidative and detoxification genes. While a protein can be regulated at multiple levels, control of Nrf2 has been largely studied at post-translational regulation points by Keap1. Importantly, post-transcriptional/translational based regulation of Nrf2 is less understood and to date there are no reports on such mechanisms in neuronal systems. In this context, studies involving the role of microRNAs (miRs) which are normally considered as fine tuning regulators of protein production through translation repression and/or post-transcriptional alterations, are in place. In the current study, based on in-silico analysis followed by immunoblotting and real time analysis, we have identified and validated for the first time that human NFE2L2 could be targeted by miR153/miR27a/miR142-5p/miR144 in neuronal, SH-SY5Y cells. Co-transfection studies with individual miR mimics along with either WT 3 UTR of human Nrf2 or mutated miRNA targeting seed sequence within Nrf2 3 UTR, demonstrated that Nrf2 is a direct regulatory target of these miRs. In addition, ectopic expression of miR153/miR27a/miR142-5p/miR144 affected Nrf2 mRNA abundance and nucleo-cytoplasmic concentration of Nrf2 in a Keap1 independent manner resulting in inefficient transactivating ability of Nrf2. Furthermore, forced expression of miRs diminished GCLC and GSR expression resulting in alteration of Nrf2 dependent redox homeostasis. Finally, bioinformatics based miRNA-disease network analysis (MDN) along with extended computational network analysis of Nrf2 associated pathologic processes suggests that if in a particular cellular scenario where any of these miR153/miR27a/miR142-5p/miR144 either individually or as a group is altered, it could affect Nrf2 thus triggering and/or determining the fate of wide range of disease outcomes. Introduction NF-E2 related factor 2 (NFE2L2/Nrf2), a redox sensitive transcription factor responds to oxidative insult by regulating a battery of cytoprotective genes including those involved in glutathione metabolism. Basal homeostatic levels of Nrf2 in any cellular system are predominantly maintained by quenching the interaction of Nrf2 in cytosol with Keap1, a Cullin 3dependent substrate adaptor protein. Upon exposure to electrophilic stress stimuli, Nrf2 dissociates from Keap1 thus eluding proteasomal degradation. It is shuttled to nucleus where it binds to critical cis-acting antioxidant response element (ARE) and triggers the transactivation of its targets. Nrf2 is considered as one of the chief ARE binding transactivators and since its discovery as a protein controlling the expression of -globin gene, numerous reports have documented beneficial role of Nrf2 in affording antioxidant cytoprotection in various disease settings. Both clinical and experimental evidences (cell and animal models) have demonstrated a strong correlation between dysregulation in Nrf2 pathway and various neurological diseases. In addition to widely studied Keap1 based Nrf2 control, there have been reports addressing transcriptional, translational and phosphorylation based posttranslational control of Nrf2 expression [2,. Yet, the mechanisms elucidating the posttranscriptional control of Nrf2 are scant. Recently, a novel class of posttranscriptional regulators, microRNAs (miRs) which are short non-coding RNAs of,21-23 nucleotides in length have been shown to effect translation repression or degradation of a target mRNA or both in a sequencespecific manner. Of great interest in emerging field of miRs, it is to be noted that miravirsen (antagomiR for 122) have been successfully tested in Phase 2a trials for treating nave patients with chronic HCV genotype 1 infection. Research exploring the importance of miRs in brain gene regulation in health and disease has gained considerable momentum as is evident from the reports in postmortem brain samples of most common neurodegenerative disorders such as Alzheimer's (AD), Parkinson's disease (PD), amyotrophic lateral sclerosis (ALS), Huntington's disease (HD), Frontotemporal lobar degeneration (FTLD). So far, there are only few studies in non-neuronal models that has validated Nrf2 silencing by miR144, miR28 and miR34a. Given the strong causal relationship between oxidative stress and various pathologies of CNS, it is of paramount importance to thoroughly understand the complex regulation of ''master redox switch'', Nrf2 in neuronal models. Thus, identification of miR based dysregulation of Nrf2 in neuronal models will generate an understanding of how neuron-based cytoprotection machinery can be perturbed in pathologic settings and enable development of potential Nrf2-dependent neuroprotection strategies. In this context, our current study has demonstrated a model of posttranscriptional repression of Nrf2 and its associated redox homeostasis by novel miRNAs 153/27a/142-5p/144 in a SH-SY5Y neuronal cellular model. However, future studies will need to determine the importance of association of Nrf2 deficiency with respect to these miRs and its implication in oxidative stress dependent neurodegeneration. Transient Transfection SH-SY5Y cells were transiently transfected with 100 nM of indicated pre-miRs and 200 ng of plasmid reporter constructs. For the experiments involving transfection of miR alone, siPort amine was used as transfection agent and Fugene HD was used in the experiments involving co-transfection of plasmid constructs and miRNA. Briefly, miRs (or) vector constructs and respective transfection reagents were appropriately diluted in OPTI-MEM I medium separately and incubated for 5 min. The mixed reagents were incubated at room temperature for 20 min allowing the formation of transfection complex. Transfection was performed in serum free, antibiotic free media and 1.5 h post-transfection media containing serum, antibiotics was added and the plates were returned to incubator. After 48 h, cells were processed for various downstream applications. Immunoblotting Following experimental treatments, SH-SY5Y cells were gently washed with cold PBS and lysed in ice-cold RIPA lysis buffer. The lysates were sonicated and supernatants were collected by centrifuging at 13,000 rpm for 15 min at 4uC. Equal amounts of protein lysates from different treatment samples were separated by homemade SDS-PAGE gels and transferred onto PVDF membrane. The membrane was then blocked with 5% non-fat dry milk for 1 h at room temperature and probed against specific primary antibody for Nrf2, GCLC, GSR, PTEN, Actin, GAPDH, tubulin. After washing the membranes were incubated for 1 h with corresponding peroxidase-conjugated secondary antibody. Washed blots were immunodetected using Supersignal West Pico Chemiluminescent substrate kit (ThermoScientific, Rockford, IL). GAPDH, Actin or tubulin expression was used to normalize loading. The immunoreactive signals were quantified by densitometry using Image J software. Cytosolic and Nuclear Extraction Cytosolic and nuclear fractions of SH-SY5Y cells were extracted using a commercially available NE-PER Nuclear and Cytoplasmic Extraction kit (ThermoScientific, Rockford, IL) according to the RNA Extraction and Taqman Based Real-time q-RT PCR Total cellular RNA was extracted using Trizol reagent (Invitrogen, Carlsbad, CA). 1.5 mg of RNA was incubated with gDNA wipe out buffer (Qiagen, Valencia, CA) at 42uC for 2 min to remove any genomic DNA contamination. Following gDNA elimination, the RNA is reverse transcribed to cDNA using For real time PCR analysis of Nrf2, NQO1, GCLC, endogenous Nrf2 39 UTR and GAPDH mRNA expression, 1/10 th of cDNAs prepared as above was used. Taqman gene expression assays consisting of predesigned primer and probe sets specific for human NFE2L2 (Hs00232352_ml); human NQO1 (Hs02512143_s1); human GCLC (Hs00155249_m1); human GAPDH (Hs03929097_g1) and custom designed for human endogenous Nrf2 39 UTR (AJGJPXK) were from Applied Biosystems (Bedford, MA). Real time PCR amplification was performed in 386-well optical plates in a final volume of 20 mL containing 10 mL of TaqMan Universal Mastermix (Applied biosystems), 20 pmol of respective primers and 1/10 th of reverse transcribed RNA. Real time PCR was conducted on a Biorad CFX384 Real time system (Biorad). The thermal cycling conditions used consisted of 50uC/2 min; 95uC/10 min followed by 40 PCR cycles at 95uC/15 sec and 60uC/1 min. Fold change in the mRNA expression was calculated on the basis of cycle threshold (Ct) value and GAPDH was used for normalization. Relative quantitation of transcript levels were plotted as fold difference as compared with untreated samples and was calculated by the formula: 2 2DDCT, where DCT value of the sample was arrived at by subtracting the Ct value of respective genes from the Ct value of GAPDH and DDCT value was determined by subtracting DCT value of treated condition from DCT of untreated condition. miRNA detection by real time analysis involved reverse transcription of cDNA using a small RNA specific stem-loop RT primer (hsa-miR144: RT-002676; hsa-miR153: RT-000476; hsa-miR27a: RT-002445; hsa-miR142-5p: RT-002248; U6 snRNA: RT-001973). Once specific cDNA was generated, individual miRNA was detected using Taqman small RNA assay Real time PCR analysis (hsa-miR144: TM-002676; hsa-miR153: TM-000476; hsa-miR27a: TM-002445; hsa-miR142-5p: TM-002248; U6 snRNA: TM-001973). Results were normalized to small nuclear RNA U6 that served as control and the data was expressed as Log 2 fold change in respective miRs/U6 snRNA levels. Luciferase Assays SH-SY5Y cells were transfected with various constructs as indicated in appropriate figures. 48 h post transfection, cellular extracts were collected after lysing with passive lysis buffer (Promega, Madison, WI) and centrifuging at 15,000 rpm for 20 min. Firefly and Renilla luciferase activities were determined from lysates using dual luciferase assay system (Promega, Madison, WI) in a Glomax (20/20) luminometer. As suggested by the manufacturer, firefly luciferase activity was normalized either to Renilla luciferase activity or protein concentration obtained from the corresponding samples. Site-directed Mutagenesis using Overlapping Primers Luciferase reporter constructs containing mutation of miR153, miR27a and miR142-5p target sites of Nrf2 39 UTR were generated using partial overlapping primer based PCR according to Zheng et al.. Primer pairs used in this strategy were designed to possess complementarities to each other at 59 terminus (bases underlined in Table 1). Briefly, PCR amplifications were performed using PrimeSTAR Max DNA polymerase, a unique high-performance DNA polymerase with wild type Nrf2 39 UTR reporter construct (pGL3 WT Nrf2 39 UTR) as template. The PCR parameters were initiated by pre-heating the reaction components to 94uC for 3 min. Cycling parameters for amplifying mutant miR153 include 30 cycles of 98uC/15 sec, 60uC/15 sec, and 68uC/1 min; followed by extension at 68uC/7 min. Individual mutants of 27a and 142-5p were PCR amplified using 25 cycles of 98uC/15 sec, 65uC/15 sec, and 72uC/45 sec; followed by extension at 72uC for 7 min. PCR products carrying the appropriate mutations were gel electrophoresed, purified, DpnI digested at 37uC for 1.5 h and inactivated at 80uC for 20 min. An aliquot was transformed into NEB10-beta competent E.coli cells and a total of 7 colonies from each were selected and their plasmids were isolated by PureYield Plasmid miniprep kit (Promega, Madison, WI). All mutants were sequence verified for the presence of desired mutations and absence of any other mutations in the Nrf2 39 UTR sequence (Genewiz, South Plainfield, NJ). GSH/GSSG Glo Assay Luminescence based GSH/GSSG Glo assay was used to measure the GSH/GSSG levels. SH-SY5Y cells transfected with indicated miRs for 48 h and processed for total glutathione (GSH) and oxidized glutathione (GSSG) in parallel reactions according to manufacturer's instructions (Promega, Madison, WI) with minor modifications. Following treatment, the cells were scraped in GSH buffer and halved into two fractions equally. Immediately one half of the lysate was added to a tube containing water and the other half of the lysate was added to N-Ethylmaleimide (NEM) containing tube that blocks glutathione in its reduced state. Intact GSSG that is left unblocked by NEM was then reduced to GSH followed by addition of luciferin generation reagent containing DTT and glutathione-S-transferase. The resultant luciferin formed in a GST-coupled firefly luciferase reaction was measured in terms of luminescent signal in a Glomax Multi detection system (Promega, Madison, WI). Both GSH and GSSG levels were determined against standard GSH according to manufacturer's instructions. GSH and GSSG levels were normalized to protein concentrations and the data were plotted as GSH/GSSG ratio. For imaging, cells were treated with 5 mM of DCF-DA at the end of the experiment in dark and returned to incubator. After 30 min the cells were gently washed with PBS to remove excess fluorophore. Cells were imaged immediately on an Olympus IX71 microscope with a 406 objective. DCF was excited at 488 nm using a multi-Argon laser and the emission collected through 510 nm barrier filter. The laser intensity output used was attenuated to minimize photobleaching and phototoxicity to the cells during XY scanning by adjusting PMT, gain and offset. For reproducibility and comparison purposes, all experimental conditions as well as microscope settings were kept identical. Random fields were imaged and a representative photomicrograph was presented. Statistical Analysis Results are expressed as means6s.e.m. For experiments involving more than two groups, statistical evaluation of the data was performed using one-way ANOVA followed by Newman-Keuls as post-test analysis. Student's t-test was performed in case of experiments with two groups. A value of P,0.05 was considered as statistically significant. Results and Discussion In-silico Prediction of Target miRNAs against Human Nrf2 and Experimental Validation of Nrf2 Downregulation by Forced Expression of miR144, miR153, miR27a and miR142-5p Nrf2, an essential transcription factor for regulating both basal and inducible expression of diverse cytoprotective genes has been recently demonstrated to be regulated by miR144 and miR28 in non-neuronal models. It is well known that multiple miRs target a single gene and a single miR can regulate many genes and hence to obtain a deeper understanding of complex gene networks a comprehensive knowledge of a target-miRNA interaction is in place. Taking these findings into account we sought to examine the miR based Nrf2 regulation in neuronal cellular model. To test this hypothesis, we primarily employed bioinformatic analysis of human Nrf2/NFE2L2 39 UTR for miRNA seed sequences using the ''predicted targets component of miRecords''. Nrf2 transcript harbors a 39 UTR of,428 bp in length and hence many potential miRNA binding sites are possible. As expected the search resulted in overall prediction of 408 plausible miRNA that could target human Nrf2. Typically the accuracy of any individual prediction program has been calculated to range between 30-76%. This necessitates selection of the right database before experimental testing and to this end the first level of filter we applied was choosing ''miRecords'' which integrates data from 11 different established miRNA target prediction programs. The second level of stringency applied was selecting only the top and common miRs which remained prominent at the intersection (overlapping part) of at least 6 individual databases listed in miRecords. Based on these criteria, we narrowed down to a list of 4 different miRs (hsa-miR27a, hsa-miR153, hsa-miR142-5p including the already reported hsa-miR144) (Fig. 1A). Having selected the putative miRs that could target Nrf2, we next assessed for the conservancy of those seed sequences of miRs in Nrf2 39 UTR among different organisms by ''TargetScan''. It is clear that the different miRNA target sequences on Nrf2 39 UTR display a high phylogenic conservation by,90% among various mammals (Fig. 1B). This likely indicates a critical role for these miRNA binding sites on Nrf2 39 UTR to regulate Nrf2 that is conserved during a highly dynamic evolutionary process. To validate whether the computationally predicted miRNAs could target Nrf2 in neuronal system, we chose human neuroblastoma SH-SY5Y, a neuronal-like subline of SK-N-SH cells and overexpressed with each of these miRs (miR144, miR153, miR27a and miR142-5p) individually. These cells possess the phenotype of sympathetic ganglion neurons and have been extensively utilized to study gene expression changes against neurotoxicants and various other neuro-related phenomena such as oxidative cytotoxicity, neuronal apoptosis, neurodegeneration, neurotransmitter release and calcium homeostasis. Primarily, 48 h post-transfection, the transfection efficiency of these miRs was evaluated by Taqman based qRT-PCR (Fig. S1A-S1D). Endogenous level of miR144 and miR142-5p in SH-SY-5Y cells were observed to be negligible. Thus, in order to calculate the transfection efficiency of these miRs, a widely accepted method of imputing a Ct threshold value of 40 was adopted. Results were analyzed and expressed as natural logarithm (ln) of relative quantity of miR144 and miR142-5p, normalized to U6 snRNA from a calibrator sample (scramble control miR). Transfection of miR144 and miR142-5p duplex increased the endogenous level of these miRs by log 3.4 fold and 5.2 fold respectively ( Fig. S1A; S1D). The level of miR153 and miR27a was relatively high when compared with endogenous levels of miR144 and miR142-5p. Ectopic expression of miR153 and miR27a resulted in log 8.5 fold and 8.8 fold increase in levels of these miRs respectively ( Fig. S1B; S1C). Having confirmed the efficiency of miR overexpression, we next performed Western blot analysis for Nrf2 using anti-Nrf2 (C-20; sc-722). It is to be noted luciferase was measured 48 h post-transfection and results are plotted as percentage change over respective control. (C) Cells were co-transfected with both the reporter gene (either wild type or miR 153 site mutated Nrf2 39 UTR) constructs and scramble miR mimic/miR153 mimic. 48 h following transfection, firefly luciferase activity normalized to renilla activity was determined. The results are represented as percentage change over respective controls. (D) 100 nM of pre-miR scramble control (or) pre-miR27a was transfected into SH-SY5Y cells together with a wild-type or miR27a binding site mutated Nrf2 39 UTR construct. Cells were lysed 48 h post-transfection and luciferase signal was measured. Firefly luciferase signals were normalized to renilla luciferase signal and percentage change over corresponding controls was represented. (E) Luciferase reporters containing wild-type or miR142-5p site mutant of human Nrf2 39 UTR were co-transfected with scramble control miRNA or miR142-5p precursors into SH-SY5Y cells. 48 h after transfection, dual luciferase activity was measured. After normalization for renilla luciferase activity, the results were plotted as percentage change and compared to the corresponding controls. In ( RNA was isolated and retro-transcribed from cells treated as in 6A, and Taqman based realtime qPCR was carried out using GCLC and GAPDH specific primers. Data is expressed as relative change in GCLC mRNA levels normalized to GAPDH (n = 4). (C) Western blotting was performed on total cellular that the actual predicted molecular mass of Nrf2 protein is 68 kDA and previous studies indicate that due to its inherent high acidic charges, Nrf2 anomalously migrates at,100 kDA in SDS-PAGE. Other reports also suggest that,100 kDA form of Nrf2 is a result of dimer formation with actin or ubiquitination of Nrf2. A recent report from Lau et al., concluded,95-110 kDA as the appropriate molecular weight of Nrf2. However, there are many independent reports including a study wherein bacterially expressed recombinant TAT-Nrf2 protein was purified and identified to migrate as 72 kDA protein in SDS-PAGE (,69 kDA 2Nrf2+,3 kDA TAT Tag). In our experiments, overexpression of all of the aforementioned miRs individually decreased both the expression of predicted (68 kDA) and apparent Nrf2 protein (that runs at,100 kDA) significantly by,50% (P,0.05) when compared to scramble miR transfected SH-SY5Y cells ( Fig. 1C; 1D). Both 100 kDA and 68 kDA Nrf2 bands were normalized to GAPDH level following Image J quantification. We confirmed the specificity of these miRs by overexpressing SH-SY5Y cells with a non-Nrf2 targeting micro-RNA, miR21 (predicted by miRecords). As expected, miR21 overexpression did not result in any change in Nrf2 protein expression (Fig. S2A). miR21 transfection was validated by significant decrease in protein levels of one of its bonafide targets, PTEN (Fig. S2B). Overall, this data suggests that Nrf2 is translationally repressed by miR144, miR153, miR27a, miR142-5p in a specific manner. Also, at this point, the possibility of these miRs altering the abundance of available Nrf2 mRNA pool for translation is not ruled out as miRs have been shown to regulate gene expression by mRNA deadenylation and decay. miR144, miR153, miR27a and miR142-5p Represses Nrf2 39 UTR and Endogenous Nrf2 mRNA MicroRNAs repress protein production by preferentially interacting with complementary sequence motifs in the 3' untranslated region (UTR) of target mRNAs. It has been previously demonstrated that human Nrf2 39 UTR possess 2 potential miR144 binding sites at 265-271 and 370-377. According to our prediction analysis using TargetScan, we observed evolutionarily conserved binding sites for miR27a, miR142-5p, miR153 between 62-68, 83-90, 98-105 respectively in the human Nrf2 39 UTR (Fig. 1B). Thus, to test whether the forced expression of selected individual miRNA candidates (miR144, miR153, miR27a and miR142-5p) have any repressing effect on Nrf2 39 UTR, we used a reporter construct that was cloned with 428 bp of human Nrf2 39 UTR downstream of luciferase gene. Typically, a decreased luminescence output in this assay indicates that miRNAs had effectively bound to and targeted the 39 UTR. It was observed that forced expression of all the tested miRs significantly repressed Nrf2 39 UTR reporter activity (p,0.05). The results show that individual overexpression of miR144, miR153, miR142-5p effected a,42% repression and a maximal repression by about,68% was shown by miR27a ( Fig. 2A). Though ectopically expressed 39 UTR transcriptional reporter construct is used to estimate the expression pattern of a gene that is regulated by miRs, it may not reflect the precise regulation that occur in endogenous cellular mileu. The cloned UTR may lack several flanking cis-regulatory elements (or) the intrinsic Nrf2 coding sequence that dictates RNA fold necessary for miRNA interaction as opposed to the endogenous set. To eliminate such experimental bias, Bartuma et al. employed RT-PCR based detection of endogenous 39 UTR of HMGA2 in various fetal tissues, adipocytic tumors and amniocytic cell cultures. We adopted this strategy to assess the effect of overexpression of indicated miRNA candidates on endogenous expression of Nrf2 39 UTR mRNA using Nrf2 39 UTR specific primers by real time qRT-PCR analysis. As shown in Figure 2B, overexpression of different precursor miRs showed significant reduction in endogenous expression of Nrf2 39 UTR mRNA when compared to that of scramble miR (p,0.05). To further confirm that Nrf2 39 UTR regulation by miR144, miR153, miR27a, miR142-5p indeed impact the expression of Nrf2 mRNA, we determined the levels of Nrf2 message in SH-SY5Y cells transfected with and without the aforementioned miRs using quantitative real time PCR for Nrf2. The cells transfected with precursor miR144, miR153, miR27a, miR142-5p displayed significant reduction in Nrf2 transcript levels when compared with scramble miR transfected cells (p,0.05) (Fig. 2C). Though, the endogenous level of Nrf2 39 UTR and Nrf2 mRNA was significantly repressed by ectopically expressing miRs, the magnitude of reduction was not of similar extent (Fig. 2C vs 2B). The probable reason is that at a given time, the cellular mRNA pool will include both species of Nrf2 mRNA namely, complete Nrf2 (with coding sequence+UTR intact) and Nrf2 (with coding sequence+partial UTR: in the process of decay). Thus, the primers used in Nrf2 mRNA detection by real time analysis would measure all the species of Nrf2 mRNA accounting for actual fold in reduction. However, the primers used in Nrf2 39 UTR analysis would detect only the species with intact UTR sequence, in otherwords it would not detect the pool of mRNA where UTR sequence is lost. In addition, the significant reduction in Nrf2 mRNA enforced by miRs is not precisely reflected in the magnitude of Nrf2 protein reduction (compare,50% in protein repression; Fig. 1D vs,25% in mRNA levels; Fig. 2C). Though, several studies show miRNA induced protein repression strongly correlates to mRNA levels, the possibility of miRs inducing translational repression even before mRNA deadenylation and decay have been very recently reported. Thus, future studies should assess the relative timing and involvement of various closely linked events such as translation repression, mRNA deadenylation and decay in miR144/miR153/miR27a/miR142-5p induced silencing of Nrf2. Altogether, our results strongly suggest that miR144/153/27a/142-5p could suppress Nrf2 gene expression through 39 UTR binding and down-modulating Nrf2 mRNA in SH-SY5Y neuronal cells. Enforced Expression of miR(s) Affects Nrf2 Localization and its Transactivation To test whether our findings of decreased Nrf2 protein in whole cell homogenates would affect its localization and function we carried out cytosolic and nuclear fractionation and analyzed for lysates from the indicated experimental group with GCLC antibody. The corresponding blot was stripped, reprobed with GAPDH antibody and a representative data are shown. Densitometric analysis of GCLC bands normalized to GAPDH bands are shown in the bottom panel (n = 5). (D) Whole cell lysates (similar to 6C) were analyzed for the expression of GSR and GAPDH. A representative Western blot image is shown in the top panel. Graph of densitometric scans of GSR immunoblot normalized to that of GAPDH is presented in the bottom panel (n = 4). (E) Different miR mimics were overexpressed as indicated. Cellular ROS production was measured using ROS-sensitive probe, DCF-DA and fluorescence signal of DCF formed was recorded by Nrf2 expression by immunoblotting. Nrf2 corresponding to,100 kDA and 68 kDA was found to be decreased in both the cytosolic and nuclear fractions of tested miRNA mimics transfected cells when compared to scramble control pre-miR transfected cells (Fig. 3A & 3B). Blots were stripped and reprobed for GAPDH and laminb1 that served as loading controls. Further the fraction purity was ascertained by reverse probing the cytosolic blot for laminb1 and nuclear blot for GAPDH. Generally, a low level of nuclear Nrf2 results in declined basal or induced transactivation of its target genes. We therefore tested whether the miRNA induced downregulation of Nrf2 protein results in decreased mRNA expression of one of the ''Nrf2 regulons'', NQO1. Taqman based real time qRT-PCR analysis with NQO1 gene specific primers demonstrated that overexpression of miRs resulted in significant reduction in expression of NQO1 mRNA (p,0.05) (Fig. 3C). To confirm if the expression of NQO1 mRNA decreased by overexpression of miRs was due to dysregulated transcriptional activation, we performed a luciferase based reporter assay by co-transfection of plasmid containing NQO-1 ARE enhancer element along with the indicated miRs. Overexpression of miRs resulted in a profound decrease by about 7 fold in NQO1 ARE -driven luciferase activity (Fig. 3D) indicating the effective suppression of Nrf2-mediated transactivity. Though we observed a remarkable downregulation of NQO1-ARE activity (Fig. 3D), a comparable level of reduction in nuclear Nrf2 levels was not noted (Fig. 3B). The evident disparity could be argued for the fact that luciferase assay depends on the degree of binding of active transcription factors (TFs) (herein, active Nrf2, but not total Nrf2 level) to its consensus element. While, Western output is dependent on the level of TFs rather than the state of TFs (active or inactive) and its degree of binding to its target. Further, strict correlation among ARE reporter activity and ARE containing endogenous target expression (mRNA levels of GCLC, NQO1) was also not observed. It is understood that 46 NQO1-ARE reporter plasmid is engineered with only Nrf2 binding elements in which case the activity of reporter plasmid is solely dependent on Nrf2. However, in the endogenous cellular setting, target gene expression could be dependent on the enhancers and suppressors other than Nrf2 which could bind to non-ARE DNA regions. Therefore, the luciferase based transcriptional activity measurement of NQO1-ARE construct would be very sensitive to Nrf2 levels as opposed to endogenous transcriptional activity measurement (mRNA levels by real time) and thus, connecting these two different measurement strategies based on exact magnitude of changes would be practically difficult. Nevertheless, our findings demonstrate that in neuronal SH-SY5Y cells, miR144/153/27a/142-5p induced Nrf2 downregulation affects the nucleo-cytoplasmic concentration of Nrf2 which is reflected in its inefficient transactivating ability. miR144/miR153/miR27a/miR142-5p Mediated Repression of Nrf2 is Keap1-independent Keap1 mediated control of Nrf2 is a spatiotemporally regulated process and cytosolic Keap1 is believed to inversely control the nucleo-cytoplasmic shuttling of Nrf2 and latter's access to its targets in nucleus. Notably it has been demonstrated that miRs can also induce translation upregulation of target mRNAs on certain instances. Thus, we raised a critical question as to whether overexpression of the indicated miRs has any possible auxiliary impact in upregulating Keap1 thereby indirectly repressing Nrf2. To assess this, we first performed a bio-informatic analysis using ''miRecords'' for Keap1. Unlike Nrf2 analysis which was performed with a high stringency of mapping the top miRs from atleast 6 individual databases (Fig. 1A), miR:Keap1 prediction was carried out with less stringency and selected those miRs that were populated at least in 2 different databases. 124 miRNA was predicted to target Keap1 (NM_012289) and a representation of miR-Keap1 network generated by ''Cytosca-pe_v2.8.2'' is shown (Fig. S3). It is clear from the list that even with less stringent filtering, Keap1 is not predicted to be a regulatory target of any of the 4 miRs tested herein. This suggests that Nrf2 regulation by miR144/153/27a/142-5p appears to be a tightly controlled event with the likelihood of circumventing any redundancies involving Keap1. Having scanned the in-silico analysis of Keap-1:miR network, we next validated the bioinformatic prediction using immunoblotting for Keap1 in total cellular lysates. Overexpression of the miR mimics did not affect Keap1 protein levels relative to that of scramble control miR mimic (Fig. 4). Recently, Keap1 independent regulation of Nrf2 by miR28 has been reported in breast cancer cells. However, till date there is no report on miR based Nrf2 regulation in neuronal system. Thus our study is the first to demonstrate that Nrf2 protein could be subjected to translation repression by miR144/miR153/ miR27a/miR142-5p in a Keap1 independent manner in neuronal cellular system. Nrf2 is a Direct Target of miR144, miR153, miR27a and miR142-5p As we showed that the miRs can downregulate Nrf2 39 UTR expression thereby Nrf2 protein production in a Keap1 independent manner (Fig. 2 & Fig. 4), we next sought to determine whether this is indeed due to a direct effect of these miRs via binding to its respective complementary sequence on the 39 UTR. To address this specificity of these interactions, we generated mutation reporter constructs bearing a 3-nucleotide change in the individual miR target sequences on Nrf2 39 UTR by site directed mutagenesis and compared its activity against the WT Nrf2 39 UTR. The schematic representation of binding sites and the individual mutants for miR144 (site-1 & site-2), miR153, miR27a and miR142-5p in the human Nrf2 39 UTR was shown in Fig. 5A and the successful incorporation of mutagenized bases was confirmed by sequencing of the individual mutant constructs. list was used as an input file and a network was generated using another bioinformatic program, ''Cytoscape_v2.8.20. NFE2L2, which is at the intersection of 4 miRs is represented by a bold colored lines. While all the other target genes which are at the intersection of 4 miRs are represented by independent colored lines (miR144-brown; miR153-red; miR27a-black; miR142-5p-purple). (B) KEGG pathway mapping identified Prion disease as the predominant pathway at the intersection of miR144, miR153, miR27a, miR142-5p involving NFE2L2, one of the genes that is predicted to be regulated with a high -ln(p-value) of 20.79 by ''DIANA-mirPath''. (C) Using the concept of network biology, we constructed miRNA-associated disease network (MDN) to visualize the relationship of miRs of our interest with multiple pathologic/pathogenic conditions. The network was generated by compiling the data manually from Pubmed (till 05/07/2012) along with the information from two other databases, Human miRNA Disease Database (HMDD -http://202.38.126.151/hmdd/mirna/md/) and miR2Disease (http://www.mir2disease.org/). This compiled data was used in ''Cytosca-pe_v2.8.20 to establish connections between miRs and associated diseases. Different diseases associated with indicated miRs are represented as light green colored nodes. Diseases associated with individual miRs are connected by specific colored edges (lines) and any two different miRs that share one common disease are connected by red colored edge. Neuro specific pathologies are represented in red font. doi:10.1371/journal.pone.0051111.g007 We focused on miR144 (site 2) in the current study as Sangokoya et al. previously demonstrated that miR144 binding site 2 (position 370-377) is only involved in miR144 mediated repression of Nrf2 in erythrocytes. All the constructs either wild type or indicated mutants of Nrf2 39 UTR were co-transfected into cultured SH-SY5Y cells along with individual miR mimics and a renilla luciferase transfection control vector. After 48 h of transfection, both firefly and renilla luciferase levels were assayed by luminometry. Transfection of WT Nrf2 39 UTR construct along with individual miRs significantly repressed the luciferase activity (p,0.05) (Fig. 5B-5E; compare lane 1 vs lane 2). In each case, mutation of miR144 (or) miR153 (or) miR27a (or) miR142-5p binding sites on Nrf2 39 UTR failed to downregulate the luciferase activity as opposed to those observed in WT type reporter construct (Fig. 5B -5E; compare lane 3 vs lane 4). These experiments demonstrate that in order for these miRs to repress Nrf2 activity, the respective miR binding sites on Nrf2 39 UTR must be intact. In other words, mutation of individual miR binding sites viz. miR144, miR153, miR27a and miR142-5p abrogates the interaction and binding of corresponding miRs to human Nrf2 39 UTR, thus indicating Nrf2 as a direct regulatory target of these miRs. Therefore, we explored whether overexpression of these miRs could have any role in modifying GSH/GSSG ratio, a monitor of cellular antioxidant status. Importantly, a decrease in GSH/GSSG ratio can also be used as an indicator to distinguish the oxidatively stressed cells from non-stressed cells. Luminescence based GSH/GSSG glo assay revealed that exogenous expression of miRs resulted in,2 fold decrease in the ratio of GSH/GSSG (p,0.05) (Fig. 6A). We next assessed whether changes in GCLC, a rate limiting enzyme involved in de novo synthesis of GSH is the contributing factor for reduced GSH/GSSG ratio. GCLC transcript expression by real time qRT-PCR analysis indicated a 1.5 to 2 fold decrease in different tested miRs as compared to scramble control miR (p,0.05) (Fig. 6B). Consistent with changes observed in GCLC message, protein levels of GCLC was also significantly altered (p,0.05) (Fig. 6C). In general, glutathione reductase (GSR) is an enzyme that maintains glutathione pool in the reduced form in cytosol. Thus any impairment in GSR coupled with oxidative stress would be expected to favor accumulation of oxidized glutathione (GSSG). Hence, in our experimental conditions we assessed whether expression of GSR is altered and indeed we observed that overexpression of all the tested miRs significantly repressed GSR protein by 1.5-2 fold from that of scramble miRs (p,0.05) (Fig. 6D). It was previously reported that Nrf2 dysregulation significantly altered the expression of GCLC and GSR affecting GSH homeostasis in Nrf2 knockout mice and mammalian cells. In our present study, alterations in Nrf2-dependent de novo pathway (involving GCLC) and regeneration pathway (involving GSR) both are likely to contribute to diminution of GSH/GSSG ratio. Nrf2 deficiency has been reported to increase accumulation of oxidized form of glutathione as well as intracellular ROS levels. Given that all the 4 tested miRs reduced Nrf2 levels and affected GSH homeostasis, we next tested whether this could be related to increased ROS levels by DCF-DA based imaging and fluorescence assay. DCF-DA is a non-polar, cell-permeable, sensitive fluro-phore that is widely used to detect several ROS. DFC-DA is converted to DCF, a fluorescent product, only in presence of ROS and the intensity of DCF fluorescence reflect the levels of ROS products in cellular system. Overexpression of various test miR mimics resulted in a moderate, yet significant (p,0.05) increase in intracellular DCF fluorescence ranging from,20% to 37% compared with scramble control miR (Fig. 6E). Further, image analysis of ROS also yielded an identical result as that of semi-quantitative fluorescence assay (Fig. S4). Thus, in view of preserving redox potential that is key to a normal cellular physiology, our results suggest that Nrf2 dependent redox homeostasis could be controlled in this neuronal system by regulation of levels of the following miRs: miR144/miR153/ miR27a/miR142-5p. Nrf2 at the Intersection of 4 Different miRs. Construction of miR-disease Network (MDN) with Respect to miR144/ miR153/miR27a/miR142-5p As one to many miR:target relationships are likely, we next analyzed the possible strong candidates including Nrf2 that could be mapped at the intersecting points of miR144, miR153, miR27a and miR142-5p using ''mirDIP (microRNA: Data Integration Portal)''. Besides the default settings in mirDIP, we applied a secondary level of filter by assigning a standardized score of minimum ''50.0'' and searched for targets of chosen 4 miRs with microRNA.org as database criteria and intersecting IDs. Totally 28 genes were computed to be at the intersection of 4 different miRs (hsa-miR144/hsa-miR153/hsa-miR27a/hsa-miR142-5p). This list was used as an input file and a network of targets:(144>153>27a>142-5p) was generated by bioinformatic interaction network generating software ''Cytoscape_v2.8.2'' (Fig. 7A). In order to visualize the collective effect of co-expressed miRs in modulating specific pathways, we performed simultaneous enrichment analysis using ''DIANA-mirPath'' that considers multiple miR:target relationships alongside comparing each set of miRNA targets to all known KEGG pathways. This software has an integrated chi-square testing component and following the analysis results are displayed as a negative natural logarithm of the enrichment p-value which is distinct for each pathway. Higher the p value, stronger the association of a particular gene with an indicated KEGG pathway. It is to be noted that DIANA-mirPath analysis populated NFE2L2 as the principal gene at the intersection of miR144, miR153, miR27a, miR142-5p with a highest -ln(p-value) of 20.79 that is mapped to involve in Prion disease by KEGG pathway (Fig. 7B). The computationally generated intersection dataset which gives an overview of the cooperative downregulation of Nrf2 by these miRs was confirmed by overexpressing combination of these 4miRs at 1/10 th of the concentration that were tested individually. Interestingly, we observed a striking and similar level of downregulation of Nrf2 on combining these miRs even at a low dose (40 nM-10 nM each) (Fig. S5A) as opposed to 100 nM used in individual miR transfection experiments. Further, 40 nM of combined miRs (10 nM each) resulted in significant downregulation of GSH/ GSSG ratio which is almost comparable to that of cells overexpressed with 100 nM concentration of individual miRs (Fig. S5B). In general, these miRs (miR144, miR153, miR27a, miR142-5p) when individually present can regulate numerous targets/ pathways and the effect of particularly targeting Nrf2 may vary from moderate to high depending on the cellular/stressor setting. In order to facilitate the comparison of different biological processes that are altered in the aforementioned miRNA datasets, enrichment p-value analysis of the Union dataset with all these miRs was also performed and the results are presented in a bar plot graph (Fig. S6). It is clear that in most of the top targeted pathways, the Union dataset -lnP (Red bars) are higher than the -lnP values obtained for each single miRNA (Yellow, Green and Blue bars) (Fig. S6) indicating that co-ordinated dysregulation of most of the Nrf2 dependent biological pathways might be directly proportional to co-expression of these miRs. The results of Fig. S5 and S6 suggests that in a given cellular context, when these candidate miRs: miR144, miR153, miR27a and miR142-5p coexist even at low levels, each would bind to Nrf2 via multiple, distinct binding sites and may perhaps increase the robustness and likelihood of targeting Nrf2 and its associated functions. In general, identification of disease-related miRs is vital for understanding the pathogenesis of diseases at the molecular level, and to this end, computational analysis of miRNA-disease associations prioritizes and narrows down the candidate miRs with respect to a selective target for further experimental evaluation. Thus, we have presented a comprehensive miRNA:disease linkage network with respect to miRs144/153/27a/142-5p (Fig. 7C). This extended approach is generated based on the published reports gained from experimental evaluations rather than just prediction analysis which clearly suggests that the candidate miRs are strongly associated with heterogenous disease phenotypes with some of the miRs sharing a common disease among them (Fig. 7C). Notably, these miRs either individually or as a combination have been implicated in many neuroabnormalities (Red and Bold letters; Fig. 7C). Given a strong association for oxidative stress in these neurological disorders, our results could suggest a plausible role for these miRs-Nrf2 pathway interactions. Conclusions In conclusion, the results described in this report suggest a role for novel miR candidates viz. miR153, miR27a, miR142-5p and miR144 in regulating Nrf2 expression in SH-SY5Y neuronal cells. Further, miRs tested in the current study impair Nrf2 dependent redox homeostasis in neuronal cellular model which suggests a possible role for these miRs in ''redox sensing'' and neuropathologies that are ROS dependent. Notably in several stress conditions Nrf2 is activated as a counter-attack response. However this Nrf2 dependent adaptive response is not persistent whilst it becomes dampened even when the cellular system defies a particular stress or inducer. Though the apparent mechanism for such shifting responses during prolonged stress is obscure at this point, our current study suggests a potential mechanism that can temporally restrain Nrf2 by context specific upregulation of either one and/or combination of miRNAs. Future studies are warranted to identify whether this miRNA signature (miR144/153/27a/142-5p) is characteristic of severe oxidative stress related neuro-pathologies which will open new perspectives for antagomiR based therapy to counteract ROS damaging effects. Figure S1 Transfection efficiency of individual miRs assessed by Taqman based real time q-PCR analysis. SH-SY5Y cells were transfected with 100 nM of indicated miRs for 48 h. At the end of the experiment, miRNA was isolated miRVana kit (Ambion Inc., Austin, TX). The expression of each miRs was asssessed by Taqman miRNA predesigned assay from Applied Biosystems. Briefly, cDNA was reverse transcribed using a small RNA specific stem-loop RT primer (for each miRNA tested). Real time PCR analysis was performed using Taqman small RNA assay with specific cDNAs as template. Results were normalized to small nuclear RNA U6 that served as control (Panel A-D). The analyzed data was expressed as Log 2 fold change in respective miRs/U6 snRNA levels. * -indicate significant (p,0.05) when compared with scramble control miRNA as assessed by Student's t-test (n = 4). (TIF) Figure S2 Effect of overexpression of miR21, an Nrf2 non-targeting miRNA on Nrf2 protein expression. SH-SY5Y cells were transfected with 100 nM of either a scramble miRNA or a non-Nrf2 targeting miRNA, miR21 for 48 h. Whole cell protein lysates were collected and processed for Western analysis with anti-Nrf2 (Panel A) and anti-PTEN (a bonafide target of miR21) (Panel B). Anti-actin immunoblotting served as loading control. Lower panel to the corresponding Western indicate the densitometric analysis of target protein expression. Student's t-test was performed to assess the statistical significance. * -indicate significant (p,0.05) and ns indicate -not significant when compared with scramble control transfected cells (n = 4). (TIF) Figure S3 Schema of predicted Keap1 targeting miRs. Analysis of miRNAs that are predicted to target human Keap1 by ''miRecords'' and only the intersecting miR IDs from at least 2 individual databases are considered to generate Keap1:miR map using a network generating software ''Cytoscape_v2.8.2''. (TIF) Figure S4 Effect of overexpression of different miRs on endogenous ROS levels. SH-SY5Y cells were transfected with either 100 nM of scramble miRNA or individual miRs for 48 h. At the end of the treatment, cells were treated with 5 mM of DCF-DA in dark and returned to incubator. After 30 min the cells were washed gently with PBS to remove excess fluorophore. Photomicrographs were generated using Olympus IX71 microscope. (TIF) Figure S5 Effect of 10 nM each of combination of miR144, miR153, miR27a and miR142-5p on Nrf2 protein expression. SH-SY5Y cells were transfected with either 40 nM of scramble miRNA or combination of miRs (10 nM each of miR144, miR153, miR27a, miR142-5p) for 48 h. (A) Nrf2 immunoblotting was performed in whole cell protein lysates with anti-GAPDH serving as loading control. (B) As described in Materials and methods, GSH/GSSG Glo assay was performed using GSH as standard (Promega). GSH and GSSG levels were normalized to protein and results were expressed as GSH/GSSG ratio. * -indicate significant (p,0.05) and when compared with scramble control transfected cells (n = 4). (TIF) Figure S6 Integration analysis of multiple miRNAs (miR144/miR153/miR27a/miR142-5p) to various human pathways by DIANA mirPath. The effect of coexpression of this miRNA signature is visible in the bar plot graph of the -lnP values. In most of the top targeted pathways, the Union dataset -lnPs (Red bars) are higher than the -lnP values obtained for each single miRNA (Yellow, Green and Blue bars). (TIF) |
/**
* This function calculates how many numbers are even or odd
*
* @param p_arrayNumbers Array that saves the numbers
* @param p_limit Integer that represent the limit of Array
*/
public static void classificationNumbers(int[] p_arrayNumbers, int p_limit) {
int counterEven = 0, counterOdd = 0;
for (int i = 0; i < p_limit; i++) {
if (p_arrayNumbers[i] % 2 == 0) {
counterEven++;
} else {
counterOdd++;
}
}
System.out.println("The quantity of even numbers is: " + counterEven);
System.out.println("The quantity of odd numbers is: " + counterOdd);
} |
<gh_stars>0
package com.stylefeng.guns.rest.modular.pay.param;
import java.io.Serializable;
import java.math.BigDecimal;
public class WxPayJSAPI implements Serializable {
private String orderNum;
private String orderDesc;
private BigDecimal orderMoney;
private String openId;
@Override
public String toString() {
return "WxPayJSAPI{" +
"orderNum='" + orderNum + '\'' +
", orderDesc='" + orderDesc + '\'' +
", orderMoney=" + orderMoney +
", openId='" + openId + '\'' +
'}';
}
public WxPayJSAPI() {
}
public WxPayJSAPI(String orderNum, String orderDesc, BigDecimal orderMoney, String openId) {
this.orderNum = orderNum;
this.orderDesc = orderDesc;
this.orderMoney = orderMoney;
this.openId = openId;
}
public String getOrderNum() {
return orderNum;
}
public void setOrderNum(String orderNum) {
this.orderNum = orderNum;
}
public String getOrderDesc() {
return orderDesc;
}
public void setOrderDesc(String orderDesc) {
this.orderDesc = orderDesc;
}
public BigDecimal getOrderMoney() {
return orderMoney;
}
public void setOrderMoney(BigDecimal orderMoney) {
this.orderMoney = orderMoney;
}
public String getOpenId() {
return openId;
}
public void setOpenId(String openId) {
this.openId = openId;
}
}
|
package misrraimsp.uned.pfg.firstmarket.core.controller;
import com.google.gson.JsonSyntaxException;
import com.stripe.Stripe;
import com.stripe.exception.SignatureVerificationException;
import com.stripe.exception.StripeException;
import com.stripe.model.Event;
import com.stripe.model.EventDataObjectDeserializer;
import com.stripe.model.PaymentIntent;
import com.stripe.model.StripeObject;
import com.stripe.net.Webhook;
import misrraimsp.uned.pfg.firstmarket.config.propertyHolder.PaymentProperties;
import misrraimsp.uned.pfg.firstmarket.config.staticParameter.OrderStatus;
import misrraimsp.uned.pfg.firstmarket.config.staticParameter.PageSize;
import misrraimsp.uned.pfg.firstmarket.config.staticParameter.sort.OrderSortCriteria;
import misrraimsp.uned.pfg.firstmarket.core.model.Order;
import misrraimsp.uned.pfg.firstmarket.core.model.User;
import misrraimsp.uned.pfg.firstmarket.core.service.*;
import misrraimsp.uned.pfg.firstmarket.util.event.OnCartCommittedEvent;
import misrraimsp.uned.pfg.firstmarket.util.event.OnPaymentCancellationEvent;
import misrraimsp.uned.pfg.firstmarket.util.event.OnPaymentSuccessEvent;
import misrraimsp.uned.pfg.firstmarket.util.exception.EntityNotFoundByIdException;
import misrraimsp.uned.pfg.firstmarket.util.exception.ItemsAvailabilityException;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.MessageSource;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.security.core.annotation.AuthenticationPrincipal;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.List;
import java.util.Optional;
@Controller
public class OrderController extends BasicController {
private final ApplicationEventPublisher applicationEventPublisher;
private final CartServer cartServer;
private final PaymentProperties paymentProperties;
public OrderController(UserServer userServer,
BookServer bookServer,
CatServer catServer,
ImageServer imageServer,
MessageSource messageSource,
OrderServer orderServer,
ApplicationEventPublisher applicationEventPublisher,
CartServer cartServer,
PaymentProperties paymentProperties) {
super(userServer, bookServer, catServer, imageServer, messageSource, orderServer);
this.applicationEventPublisher = applicationEventPublisher;
this.cartServer = cartServer;
this.paymentProperties = paymentProperties;
}
@GetMapping("/orders")
public String showOrders(@RequestParam(defaultValue = "${fm.pagination.default-index}") int pageNo,
@RequestParam(defaultValue = "${fm.pagination.default-size-index.order}") PageSize pageSize,
@RequestParam(defaultValue = "${fm.pagination.default-sort-index.order}") OrderSortCriteria sort,
Model model,
@AuthenticationPrincipal User authUser) {
if (authUser == null) {
LOGGER.warn("Anonymous user trying to access order info");
return "redirect:/login";
}
populateModel(model.asMap(), authUser);
populateModelToOrder(model, pageNo, pageSize, sort, authUser);
return "orders";
}
private void populateModelToOrder(Model model,
int pageNo,
PageSize pageSize,
OrderSortCriteria sort,
User authUser) {
Pageable pageable = PageRequest.of(pageNo, pageSize.getSize(), sort.getDirection(), sort.getProperty());
Page<Order> orderPage = (userServer.hasRole(authUser, "ROLE_ADMIN")) ? orderServer.findAll(pageable) : orderServer.getOrdersByUser(authUser, pageable);
int lastPageNo = orderPage.getTotalPages() - 1;
if (lastPageNo > 0 && lastPageNo < pageNo) {
pageable = PageRequest.of(lastPageNo, pageSize.getSize(), sort.getDirection(), sort.getProperty());
orderPage = (userServer.hasRole(authUser, "ROLE_ADMIN")) ? orderServer.findAll(pageable) : orderServer.getOrdersByUser(authUser, pageable);
}
model.addAttribute("pageOfEntities", orderPage);
model.addAttribute("sort", sort);
model.addAttribute("pageSize", pageSize);
}
@PostMapping("/admin/setOrderStatus")
public ModelAndView processSetOrderStatus(ModelAndView modelAndView,
@RequestParam Long orderId,
@RequestParam OrderStatus orderStatus,
@RequestParam(name = "pageNo") Optional<String> optPageNo,
@RequestParam(name = "pageSize") Optional<String> optPageSize,
@RequestParam(name = "sort") Optional<String> optSort) {
orderServer.setStatus(orderId, orderStatus);
modelAndView.setViewName("redirect:/orders");
optPageNo.ifPresent(pageNo -> modelAndView.addObject("pageNo", pageNo));
optPageSize.ifPresent(pageSize -> modelAndView.addObject("pageSize", pageSize));
optSort.ifPresent(sort -> modelAndView.addObject("sort", sort));
return modelAndView;
}
@GetMapping("/user/success")
public String showSuccess(Model model,
@AuthenticationPrincipal User authUser) throws InterruptedException {
populateModel(model.asMap(), authUser);
populateModelToInfo(
model.asMap(),
"Success",
messageSource.getMessage("success.title",null, null),
List.of(messageSource.getMessage("success.message",null, null)),
true);
return "info";
}
@GetMapping("/user/checkout")
public String checkout(Model model,
@AuthenticationPrincipal User authUser) throws StripeException {
Stripe.apiKey = paymentProperties.getKey().get("private");
User user = userServer.findById(authUser.getId());
if (user.getCart().isCommitted()) {
LOGGER.debug("User(id={}) cart(id={}) is already committed (pi id={})", user.getId(), user.getCart().getId(), user.getCart().getStripePaymentIntentId());
}
else {
try {
cartServer.commitCart(user);
applicationEventPublisher.publishEvent(new OnCartCommittedEvent(user));
LOGGER.debug("cart-committed event published (userId={}, cartId={})", user.getId(), user.getCart().getId());
}
catch (ItemsAvailabilityException e) {
populateModel(model.asMap(), authUser);
model.addAttribute("itemsDisabled", e.getItemsDisabled());
model.addAttribute("itemsOutOfStock", e.getItemsOutOfStock());
model.addAttribute("cartBookRegistry", bookServer.getCartBookRegistry());
cartServer.pruneCart(user.getCart(), e.getItemsDisabled());
e.getItemsOutOfStock().forEach(item ->
LOGGER.debug("Book(id={}) run out of stock (Item: id={}, quantity={})", item.getBook().getId(), item.getId(), item.getQuantity())
);
return "cart";
}
}
populateModel(model.asMap(), authUser);
return "checkoutForm";
}
@PostMapping("/listener")
public void processStripeEvent(@RequestBody String payload,
@RequestHeader("Stripe-Signature") String sigHeader,
HttpServletRequest request,
HttpServletResponse response) {
LOGGER.debug("POST at /listener from {}", request.getRemoteAddr());
if (paymentProperties.getIps().stream().noneMatch(ip -> ip.equals(request.getRemoteAddr()))) {
LOGGER.warn("Trying to POST at /listener from an unknown ip address {}", request.getRemoteAddr());
response.setStatus(403);
return;
}
String eventType;
PaymentIntent paymentIntent;
// dev-localhost
if (sigHeader.equals("local-dev")) {
String[] parts = payload.split("-");
eventType = parts[0];
try {
paymentIntent = PaymentIntent.retrieve(parts[1]);
}
catch (StripeException e) {
LOGGER.warn("Stripe - Some exception occurred", e);
response.setStatus(500);
return;
}
}
// web deployed
else {
// Getting the event
Event event;
try {
event = Webhook.constructEvent(payload, sigHeader, paymentProperties.getKey().get("webhook"));
}
catch (JsonSyntaxException e) {
// Invalid payload
LOGGER.warn("Stripe - Invalid payload", e);
response.setStatus(400);
return;
}
catch (SignatureVerificationException e) {
// Invalid signature
LOGGER.warn("Stripe - Invalid signature", e);
response.setStatus(400);
return;
}
// Deserialize the nested object inside the event
assert event != null;
EventDataObjectDeserializer deserializer = event.getDataObjectDeserializer();
StripeObject stripeObject;
if (deserializer.getObject().isPresent()) {
stripeObject = deserializer.getObject().get();
}
else {
LOGGER.warn("Stripe - Event deserialization failed, probably due to an API version mismatch.");
response.setStatus(200);
return;
}
// Getting the payment-intent
//PaymentIntent paymentIntent;
if (stripeObject instanceof PaymentIntent) {
paymentIntent = (PaymentIntent) stripeObject;
}
else {
// for now other objects other than PaymentIntent are not accepted
LOGGER.debug("Stripe - An object other than PaymentIntent has been sent to the listener");
response.setStatus(200);
return;
}
eventType = event.getType();
}
// Retrieve user
String piUserId = paymentIntent.getMetadata().get("user-id");
if (piUserId == null || piUserId.isBlank()) {
LOGGER.error("Stripe - No user identification within PaymentIntent(id={}) sent to the stripe listener", paymentIntent.getId());
return;
}
try {
User user = userServer.findById(Long.parseLong(piUserId));
switch(eventType) {
case "payment_intent.succeeded":
LOGGER.debug("Stripe - user(id={}) PaymentIntent(id={}) SUCCEEDED", user.getId(), paymentIntent.getId());
applicationEventPublisher.publishEvent(new OnPaymentSuccessEvent(user));
LOGGER.debug("payment_intent.succeeded event published (userId={})", user.getId());
break;
case "payment_intent.canceled":
LOGGER.debug("Stripe - user(id={}) PaymentIntent(id={}) CANCELED", user.getId(), paymentIntent.getId());
applicationEventPublisher.publishEvent(new OnPaymentCancellationEvent(user));
LOGGER.debug("payment_intent.succeeded event published (userId={})", user.getId());
break;
case "payment_intent.payment_failed":
LOGGER.debug("Stripe - user(id={}) PaymentIntent(id={}) FAILED", user.getId(), paymentIntent.getId());
break;
case "payment_intent.created":
case "payment_intent.processing":
case "payment_intent.amount_capturable_updated":
// unhandled event type
LOGGER.debug("Stripe - UNHANDLED event (type={}) received related with user(id={}) PaymentIntent(id={})", eventType, user.getId(), paymentIntent.getId());
break;
default:
// Unexpected event type
LOGGER.error("Stripe - UNEXPECTED event (type={}) received related with user(id={}) PaymentIntent(id={})", eventType, user.getId(), paymentIntent.getId());
}
}
catch (EntityNotFoundByIdException e) {
if (e.getClassName().equals(User.class.getSimpleName())) {
LOGGER.error("Stripe - No user(id={}) found with PaymentIntent(id={}) info sent to the stripe listener", piUserId, paymentIntent.getId());
}
throw e;
}
response.setStatus(200);
}
}
|
Potential use of Lupinus exaltatus Zucc. (Leguminosae) as green manure in soils of Jalisco, Mexico The environmental impact generated by agriculture with excessive use of fertilizers has led to the search for alternatives to improve soil fertility. This study aimed to evaluate the potential of Lupinus exaltatus in terms of decomposition and mineralization of nitrogen (N) when incorporated into the soil as green manure (GM) and its effect on the growth of Triticum aestivum L. seedlings. Litter bags were used, with a total of 216 nylon bags (10 5 cm), in each bag were placed 5 g dry base of GM in the vegetative stage and flowering. Subsequently, the GM bags were placed separately Vertisol and Regosol soil at a depth of 5 cm; and every three weeks until the end of the incubation, three bags were recovered per treatment. For evaluation of the effect GM on T. aestivum growth experiment was established in pots with soil Regosol, it consisted of incorporating 50 and 34 g dry base of the GM (equivalent to 10 and 15 t.ha-1). The GM in the vegetative stage lost an average of 83,52 % of its initial weight, while in flowering the loss was 76,49 %, the mineralized N was higher in Regosol soil than in Vertisol with 74,02 % and 70,58 % respectively. The wheat seedlings presented 30 % more dry matter and N with GM than the control treatment. L. exaltatus had a rapid decomposition and mineralization of N in the first stages of incubation. |
Brain Tumor Detection Using Supervised Learning: A Survey With the advancement in technology, artificial intelligence and computer vision are being used extensively in health care sector. Specifically, theres a lot of research happening in brain tumor detection and classification. A brain tumor can be defined as a chronic disease in which the brain tissues start to grow in an uncontrollable manner. There are very few technologies currently in use to detect brain tumors such as CT - Scans and MRIs. And, such technologies require expert diagnosis of the type and location of the tumor, and such tasks are time-consuming. This is the reason, there is a need for an automatic brain tumor detection system that can make the diagnosis faster. The survey paper will review the supervised machine learning algorithm and supervised neural network algorithms that can be employed to detect the tumor in 2D brain images. The experiments were carried out using SVM and other deep neural network approaches like ANN, CNN, VGG-16, ResNet, and InceptionNet. The dataset was downloaded from Kaggle. The average testing accuracy achieved was 97.76% |
package org.processwarp.android;
public class WorkerService0 extends WorkerService {
}
|
Myoepithelial cells: good fences make good neighbors The mammary gland consists of an extensively branched ductal network contained within a distinctive basement membrane and encompassed by a stromal compartment. During lactation, production of milk depends on the action of the two epithelial cell types that make up the ductal network: luminal cells, which secrete the milk components into the ductal lumen; and myoepithelial cells, which contract to aid in the ejection of milk. There is increasing evidence that the myoepithelial cells also play a key role in the organizational development of the mammary gland, and that the loss and/or change of myoepithelial cell function is a key step in the development of breast cancer. In this review we briefly address the characteristics of breast myoepithelial cells from human breast and mouse mammary gland, how they function in normal mammary gland development, and their recently appreciated role in tumor suppression. Introduction The mammary ductal tree is a bilayered structure that consists of an iterative repetition of basic functional elements. However, when comparing the mouse and human mammary glands, differences emerge. In the mouse the mammary epithelial cells are encased by a periductal stroma that is surrounded by fat tissue, whereas human breast epithelial cells are directly encompassed by highly vascularized intralobular loose connective tissue, and are separated from the adipose tissue by dense interlobular fibrous connective tissue. Moreover, in the mouse, branching ducts terminate in end buds that differentiate during pregnancy and lactation into lobular acini (for review ), whereas the human breast exhibits a higher level of differentiation, with terminal ductal lobular units present in the resting state; these lobular acini differentiate further during pregnancy and lactation to secrete milk (for review ). The ductal network in both mouse and human is comprised of two epithelial cell types: luminal epithelial and myoepithelial cells. Ductal myoepithelial cells are spindle shaped and oriented parallel to the long axis such that they form a continuous layer around the luminal cells, especially in the ducts (Fig. 1); upon contraction the myoepithelial cells decrease the length and increase the diameter of the ducts to eject the milk. In contrast, acinar myoepithelial cells are stellate shaped, forming a discontinuous basket-like network around the luminal cells, although during pregnancy and lactation the myoepithelial cell body and processes extend to fully encompass the expanded alveolar epithelial cells. Functionally, myoepithelial cells are a hybrid of both smooth muscle ('myo') and epithelial cells (Table 1). Like muscle cells, myoepithelial cells express filamentous smooth muscle actin and smooth muscle myosin, and exhibit contractile properties; like epithelial cells, myoepithelial cells express intermediate filaments (the epithelial keratins) and have cadherin-mediated cell-cell junctions. Structurally, myoepithelial cells form distinct desmosomes with both luminal cells and other myoepithelial cells, generate gap junctions and cadherin-cadherin interactions with other myoepithelial cells, and adhere to the basement membrane (BM) via hemidesmosomes. The structural and functional elements of myoepithelial cells are inextricably linked. During lactation, myoepithelial cells contract in response to oxytocin and move milk into the ducts (for review ), and gap junctions and cadherin-based interactions connecting myoepithelial cells function to coordinate the ejection of milk smoothly (for review ). During development, myoepithelial cells also act to induce luminal cell polarity and to regulate ductal morphogenesis ; here, connection to the BM and the desmosomal interactions with the luminal epithelial cells facilitate paracrine regulatory mechanisms. Proper coordination of all of these activities is necessary to maintain normal breast function; accordingly, it is unsurprising that the loss of myoepithelial function is almost universally associated with breast cancer. Myoepithelial function in normal breast The functional interactions that define the bilayered acinus have been explored using three-dimensional culture systems. When phenotypically normal human or rodent luminal cells are grown in laminin-rich extracellular matrix (lrECM) gels, they recreate the structure and function of the acinus found in vivo even in the absence of myoepithelial cells. We believe that this is possible, in part, because cultured luminal cells express a number of proteins that are characteristic of myoepithelial cells in vivo (e.g. 4 integrin, epidermal growth factor receptor, vimentin, maspin, and others; for review ). It may be that luminal cells can form acinar structures in culture because of this ability to become luminal/myoepithelial 'hybrids'. The possibility that expression of specific myoepithelial proteins confers distinctive signaling cues that promote cell survival and proper apicobasal polarity is an active area of investigation in our laboratory and those of our collaborators. Of the molecules produced by myoepithelial cells to regulate luminal cell function, laminin-1 and desmosomal proteins have emerged as key mediators. Laminin-1 is a heterotrimer of 1, 1 and 1 chains, and is a major component of BM (for review ). Embryos derived from murine embryonic stem cells null for the laminin-1 1 and 1 chains are embryonically lethal at day 5.5 and lack BM. Interestingly, embryos derived from murine embryonic stem cells null for the laminin-1 1 chain or the 1 LG4-5 domains are also embryonically lethal; however, these null embryos do form an embryonic BM, possibly because of compensation by the 5 chain from laminin-10 (511). Cell/laminin-1 interactions were previously implicated in tissue morphogenesis and maintenance of polarity in kidney, salivary gland, and intestine and mammary epithelial cells, and we showed that interactions with laminin-1 are important for the functional mammary cell differentiation to produce the milk protein -casein. Disruption of signaling by 1 integrin inhibitory antibodies or by the E-3 fragment of laminin-1 inhibits the expression of -casein, and subsequent experiments have suggested that organized polymerization of laminin-1 is required for functional mammary differentiation. We previously showed that human breast luminal cells, when grown in threedimensional type I collagen as opposed to laminin-rich gels, form structures with altered integrins that have reversed polarity and lack central lumina ; however, if these same cells are cocultured with myoepithelial cells in collagen I gels Parallel studies by others using a rotary culture system have suggested an alternative solution in which cell-cell adhesion may be the ultimate regulator for establishment of the acinar structure ( Fig. 2). Runswick and coworkers found that inhibition of myoepithelial-specific desmosomal cadherins, desmocollin 3 (Dsc 3) desmoglein 3 (Dsg 3), prevented morphogenesis of the bilayered acinus structure and disrupted the basal positioning of myoepithelial cells. These experiments suggested that functional desmosomes between adjacent myoepithelial cells and epithelial cells are involved in the formation of acinar-like structures. It remains to be shown whether laminin or desmosomal proteins are sufficient for polarity or whether both are required; this question is under investigation in our laboratory. Several transgenic mouse models have provided further insight into the role played by myoepithelial cells during mammary gland morphogenesis. The cell adhesion receptor P-cadherin is localized to myoepithelial cells; among mice that are homozygous null for P-cadherin, virgin mice exhibit precocious mammary gland development similar to the differentiation that is normally present in early pregnant animals. These findings suggest that myoepithelial expression of P-cadherin may provide an inhibitory signal for luminal cell growth. The parathyroid hormone-related peptide (PTHrP) has been implicated in epithelial-stromal interactions during mammary gland development. In the K14-PTHrP transgenic model, overexpression of the peptide hormone PTHrP in myoepithelial cells inhibits side branching, and ductal elongation is stunted compared with wild-type mice, suggesting that perturbing myoepithelial-stromal interactions affects growth and differentiation of luminal cells. These studies provide insight into specific processes by which myoepithelial cells transmit information for apicobasal polarity and branching morphogenesis; future studies will need to focus on the molecular mechanisms by which these factors interact to establish the acinar structure and the hierarchical nature of their activities. Paracrine regulator during morphogenesis Ductal elongation requires the production and organization of new BM, and myoepithelial cells play a key role in these processes as well. Myoepithelial cells synthesize BM components such as collagen IV, laminin-1, laminin-5, and fibronectin that regulate ductal growth, and facilitate the Acinar structures form, albeit at a smaller size compared with the three-dimensional method. Runswick and coworkers showed that blocking desmosome adhesion via blocking peptides inhibited acinar formation.. Myoepithelial cells also express the heparin-binding growth factor pleiotrophin (also known as HARP), which is active during growth and development, and epimorphin, a morphogen that is required for mouse mammary gland branching in three-dimensional culture assays. Over-expression of epimorphin disrupts the organization of the ductal tree in transgenic mice. Furthermore, myoepithelial cells synthesize and secrete basic fibroblast growth factor (bFGF) and hepatocyte growth factor (HGF/SF), which function during tubular morphogenesis. (In culture assays HGF is believed to be sufficient to mediate branching ; however, we previously showed that it does so only if epimorphin is also expressed.) Also, myoepithelial cells may modulate HGF-stimulated branching by expression of activin Ba, a member of the transforming growth factor- superfamily. Sophisticated branching morphogenesis assays utilizing isolated luminal and myoepithelial cells will be necessary to dissect how these interactions control mammary gland branching. Myoepithelial cells act in tumor suppression The majority of breast cancer studies have focused on luminal cells, because these are known to be the source of most carcinomas of the breast (for review ). However, progression to carcinoma involves alteration of the entire organized structure of the breast; depending on tumor grade, the changes can include the loss of apicobasal polarity, collapse of the glandular structure, disappearance of normal myoepithelial cells, and disruption of the BM at the epithelial-stromal junction. The mechanisms responsible for the loss of the myoepithelial layer and BM in invasive cancer are unknown. Man and Sang proposed that loss of myoepithelial cells in cancer is due to localized death of these cells; however, this is not proven, and the potential factors responsible for selective cell death are not known. How myoepithelial cells may act to suppress tumor progres-sion in vivo and how these functions are compromised during cancer development remain major unanswered questions. It is generally believed that myoepithelial cells rarely become malignant (for review ). Recently, Angele and coworkers found that human luminal and myoepithelial cells differ in their DNA repair capacity, and this may contribute to the lower rate of transformation in myoepithelial cells. Additionally, when they do undergo transformation, they usually form benign or low-grade neoplasms. Myoepithelial cells express many ECM structural proteins, proteinase inhibitors and angiogenic inhibitors, and accumulate ECM rather than degrade it, which may explain in part why these lesions are not invasive. In addition, myoepithelial cells express a number of type II tumor suppressor genes, defined as factors that affect phenotype through changes in expression rather than through genetic mutation (Table 2). Barsky and coworkers were the first to use functional assays to show that myoepithelial cells exhibit many antitumorigenic properties, such as the ability to inhibit tumor cell invasion and angiogenesis. Subsequent studies revealed that myoepithelialconditioned media inhibited the growth of breast cancer cell lines and induced a G 2 /M cell cycle arrest. The ability of myoepithelial cells to inhibit breast cancer cell growth and invasion may in part be attributed to their expression of maspin, a member of the serpin family of serine protease inhibitors. Over-expression of maspin in the breast cancer cell line MDA-MB-435 resulted in inhibition of tumor functions such as growth, angiogenesis, and invasion. In addition, Jones and coworkers showed that myoepithelial cells inhibit invasion through downregulation of MMP expression by tumor cells and fibroblasts. These data suggest that normal myoepithelial cells inhibit tumor cell function through a combined suppression of tumor cell growth, invasion, and angiogenesis. Do cancer myoepithelial cells have altered function? The myoepithelial layer appears to remains intact in ductal carcinoma in situ (DCIS); despite this, the myoepithelial cells appear to be aberrant because they differ from normal myoepithelial cells in gene expression, and secrete many chemokines and other factors. This indicates that although myoepithelial cells are present, they no longer send the correct signals to luminal cells. This observation raises the question of whether there are differences between normal myoepithelial cells and those myoepithelial cells that are present in DCIS. Gudjonsson and coworkers found that 20% of carcinomas in which myoepithelial cells were present expressed little or no laminin-1, and that purified cancer myoepithelial cells were unable, for the most part, to 'polarize' luminal cells in three-dimensional collagen assays. These data suggested that cancer myoepithelial cells might be unable to transmit the necessary cues to induce correct luminal cell polarity, at least in part due to their inability to produce laminin-1. In another study, Allinen and coworkers used SAGE (serial analysis of gene expression) to identify gene expression differences between myoepithelial cells isolated from normal and DCIS samples (Fig. 3). Moreover, those investigators found that cancer myoepithelial cells exhibited the greatest changes in gene expression, and that the chemokine CXCL14 was expressed at higher levels in the DCIS myoepithelial cells than in normal myoepithelial cells. Recently, the chemokine CXCL12/SDF-1 and its receptor CXCR4 were implicated in the induction of tumor cell growth and metastasis. Thus, cancer myoepithelial cells, rather than being tumor suppressors, may act to induce growth, migration, and invasion of breast cancer cells, and to undermine the integrity of BM. Partial myoepithelial differentiation in invasive cancer Myoepithelial cells and myoepithelial differentiation are largely absent in breast cancer (for review ), although there are exceptions to this rule. In the microarray analysis performed by Perou and coworkers, the 15% (6/40 cases) of tested breast cancer cases that exhibited partial myoepithelial differentiation were also estrogen receptor (ER) negative, and Keese-Adu and colleagues found that 29% (22/77 cases) of tested ER-negative breast cancer samples also exhibited a partial myoepithelial phenotype. These observations suggest a relationship between the loss of ER expression and acquisition of myoepithelial characteristics in breast cancer cells. The expression of the myoepithelial proteins keratin 14, 6 4 integrin, and Dsg 3 in breast cancer cell lines has been shown to correlate with a more aggressive phenotype in cell culture assays Conclusion A key unknown player is the nature of the myoepithelial precursor cell, identification of which may help to define the Relaxin Hormone-regulation, cell growth Activin Hormone regulation Adapted from Bissell and Radisky. Figure 3 DCIS myoepithelial cells exhibit an altered gene expression. In the normal breast myoepithelial cells (MEPs) are located between the luminal cells and the basement membrane. By their location they might act as a barrier to tumor invasion. In ductal carcinoma in situ (DCIS) the myoepithelial layer is still present; however, Allinen and coworkers recently showed that there appears to be molecular differences between MEPs present in normal breast versus DCIS lesions. pathways that stimulate myoepithelial differentiation and how these pathways are disrupted during tumorigenesis. We and others have shown that a bipotential progenitor cell may reside in the luminal cell compartment; in cell culture suprabasal luminal cells (MUC1 -/ESA + ) are able to generate both luminal and myoepithelial cells. If myoepithelial cells are derived from a bipotential cell, then what pathways stimulate myoepithelial fate? Using the mammosphere culture system, Dontu and coworkers showed that Notch signaling stimulates multipotential progenitor cells to adopt a myoepithelial lineage specific commitment. The Wnt signaling pathway has also been implicated in myoepithelial differentiation. Lie and coworkers found that mammary gland hyperplasias and tumors from Wnt-1 transgenic mice contained a population of cells that expressed progenitor cell markers Keratin-6 and Sca-1. Interestingly, the Wnt-1 tumors stained positive for both luminal and myoepithelial cell markers, and similar results were found with the MMTV-catenin and MMTV-c-myc transgenic mouse models. Loss of heterozygosity for PTEN was detected in both the luminal and myoepithelial cells, suggesting a common origin. Clearly, the function of myoepithelial cells in the breast is more than just contractility, and myoepithelial cells are more than a fence between the milk-producing luminal cells and the surrounding stroma. It is clear that much remains to be learned about the physiological role of these cells in the normal breast and the functional differences between normal and cancer myoepithelial cells. |
Treasury Secretary Timothy Geithner's first plan only mentioned using the bailout funds to support the private-public partnership, and he was vague on the details of how that program would work.
WASHINGTON — Treasury Secretary Timothy Geithner could announce as soon as Monday his much-anticipated plan to get toxic assets off the books of the country's struggling banks, administration and industry officials said.
The plan will use the Federal Reserve and the Federal Deposit Insurance Corp. to make the resources of the government's $700 billion financial rescue fund go further, the officials said Friday.
Geithner is being forced to tap the Fed and the FDIC for support because the prospects for getting additional money from Congress for the bailout effort have dimmed significantly given the recent uproar over millions of dollars in bonuses provided to troubled insurance giant American International Group Inc., the largest recipient of government support.
One program will use the bailout fund to create a public-private partnership to back purchases of bad assets by private investors.
A second portion of the plan will expand a recently launched program being run by the Federal Reserve called the Term Asset-Backed Securities Loan Facility, or TALF. That program is providing loans for investors to buy assets backed by consumer debt in an effort to make it easier for consumers to get auto, student and credit card loans. Under Geithner's proposal, this program would be expanded to support investors' purchases of banks' toxic assets.
The third part of the Geithner plan would utilize the resources of the FDIC, the agency that guarantees bank deposits, to purchase toxic assets.
When Geithner announced the administration's overhaul of the financial rescue program on Feb. 10, he only mentioned using the bailout funds to support the private-public partnership, and he was vague on the details of how that program would work.
The initial proposal was widely panned by investors, who were disappointed in a lack of specifics. The Dow Jones industrial average tumbled 380 points on the day the original program was announced.
Geithner's new plan would tap the resources of the Fed and the FDIC to attack what many analysts see as the major failing of the bank rescue effort so far, the failure to rid banks' of more than $1 trillion in bad loans and other toxic assets weighing down their books. As a result, banks have been unable to shake off the effects of the worst financial crisis to hit the country in seven decades.
While the administration included a placeholder in its budget request last month for as much as an additional $750 billion in rescue funds, more than doubling the current commitment, the uproar over the AIG bonuses has underscored the dim prospects that Congress would vote to bolster the size of the current fund.
The effort to deal with toxic assets is the latest in a string of initiatives the administration has put forward to deal with the financial crisis that had made it hard for consumers and businesses to get loans and has deepened the current recession, already the longest in a quarter-century.
The administration has put forward new programs to deal with mortgage foreclosures, expanded efforts to bolster lending to small businesses, launched with the Fed the TALF to unfreeze markets that support credit card, auto and student loans and also begun a so-called stress test of the country's 19 largest banks to make sure they have sufficient resources to withstand an even more severe recession.
A key unknown is whether the new plan to deal with toxic assets will succeed in attracting private investors to start buying the bad assets. Investors have fled these markets, scared off by the billions of dollars of losses that have already incurred on everything from mortgage-backed securities to consumer and business loans.
Hedge funds and other big investors may be even more leery of accepting the government's enticements to purchase these assets for fear of the imposition of tighter government restraints in such areas as executive compensation in the wake of the uproar over AIG.
In addition to unveiling his plan for toxic assets, Geithner is also expected to put forward next week the administration's proposals to overhaul the government's current financial regulatory structure.
President Barack Obama said this week that this plan will include a proposal to give the administration expanded authority to take control of major troubled institutions that are deemed too big to fail because their collapse would pose a risk to the entire financial system. |
import { createStore, produce } from "solid-js/store";
type TSettings = {
animation: TAnimation;
closeMenuBtnReclick: boolean;
};
type TAnimation = {
enable: boolean;
duration: number;
};
const [settings, _setSettings] = createStore<TSettings>({
animation: {
enable: false,
duration: 300,
},
closeMenuBtnReclick: true,
});
export default settings;
export const setSettings = (cb: (s: TSettings) => void) =>
_setSettings(produce<TSettings>(cb));
|
Q:
How do I stop my daughter from hitting another child?
Recently my daughter started to hit another girl with whom she is currently sharing her nanny. My daughter is 2 years and 2 months now, the other child is 1 year 3 months. The nanny was our daughters only nanny until 5 months ago, when we started sharing with the other family. We had a few trial days and it was perfect, but after a few proper days it became clear that our daughter is not very happy with the arrangement - probably she was jealous.
A few weeks ago the problems grew more serious, and our daughter started hitting the other girl much more often. Part of the reason might be that the younger girl is interested in everything around and in particular in the toy that our daughter is playing with (natural at this age I guess), so our daughter was fighting back. But now she became so aggressive that the nanny and all the parents involved are very worried: often she would attach the little girl when she just passes by or waves her hand.
We are trying to figure out how to improve their relationship and how to make our daughter stop violence. Gentle explanations don't seem to work - I believe she understands she does the wrong thing, she says sorry, kisses the smaller child etc., but 3 mins later the same thing happens again.
Could anyone recommend anything we could do about it? And what could make it worse? The real question is should we try to punish our daughter in any way (e.g. do time-outs: stop the play and take her to the other end of the room for a short while), or is it going to have the opposite effect?
A:
It sounds like the situation (and your daughter) are suffering for lack of consistent, appropriate discipline and an understanding of the cause of your daughter's behavior.
First, get a discipline plan in place. You, your spouse, the nanny, and anyone else involved in your child's upbringing must stick to it or it won't work. It should go something like this:
Warning -- in a firm voice tell her to stop doing X or she will go to time out. (In the case of hitting, the warning comes before she hits, when she shows signs of getting ready to hit.)
Time out -- If the behavior happens after the warning, get on eye level with her, tell her that she is going into time-out because she did X, then pick her up and put her in the time-out spot. She stays there for two minutes (1 minute per year of age) and if she leaves time-out you put her back without a word (this is important -- DO NOT engage her) and restart the timer. At first, this will be a marathon event. Once she's figured out that you won't back down no matter how long she fights it, she'll stay put. Don't give up.
Apology -- When time out is over, get on her eye level again. Ask her why she was in time-out and if she can't or won't explain, remind her firmly. Then have her apologize, to you and to the child she hurt. After that she gets hugs and kisses and life goes on.
It is extremely important that this is done with unfailing consistency, and not just for hitting. Make a list of house rules and make sure they are followed. While less severe behaviors (such as running in the house) will garner a warning the first time the behavior is done, not before, the rest of the process stays the same. The best way to prevent severe misbehavior is to stop it before it gets severe.
The second part of this process is to figure out why your child was behaving this way in the first place. A failure of discipline certainly led to this becoming a regular habit, but it started for a reason. Is your daughter reacting out of jealousy when the other child gets attention? Is the other child misbehaving in a way that makes your daughter feel she must defend herself?
Jealousy usually fades with time, as long as the children are both getting appropriate amounts of attention, and the other child isn't allowed to act inappropriately toward your daughter. For example, even though the 15mo's desire to play with whatever your daughter is playing with is normal, that does not make it appropriate. Kids learn appropriate behavior only when it is modeled by their role models as consistently as bad behavior must be punished. Neither the younger child's age, nor your daughter's inappropriate behavior may be treated as an excuse for inappropriate behavior on the younger child's part, or the situation won't get better. Rules are rules for everyone, and your daughter won't value rules unless they protect her (say, from the other child taking her toys away) as well as limit her.
Discipline for a 15 month old is, of course, different from that for a 2yo. The 15mo should be given a warning when she misbehaves, and then removed from the play area immediately upon the second infraction. Stick her in a playpen or similar confinement without toys while the adult present gives attention to your daughter for a few minutes (and pretends the 15mo isn't there without regard to any amount of screaming or other protest). Do not take her out until a couple of minutes have passed AND she is quietly behaving. She'll start to learn that bad behavior is no fun.
Both of the children will resist the change at first -- they will probably increase the bad behavior in the hope that it will go back to getting the reaction it used to -- but within a couple of weeks you will see a great improvement in their behavior. However, any inconsistency, or giving attention in time-out, etc. will undo your efforts. Additionally, reacting 5 or even 3 minutes after a bad behavior is too late for a child this young to connect the behavior with the punishment. Your or the nanny's response must be immediate, or the punishment will seem random to her rather than the consequence of a particular behavior.
A:
I'm not sure that at your daughter's age, she is totally capable of controlling herself or even understanding what an apology is. She does know or can feel the intense emotions that come during and after hitting. I would let her know that hitting isn't allowed and that you won't let her hurt another child. Move her away if need be and watch for signs that she might be getting ready to hit so you can stop it. The nanny can keep your daughter close to her if need be. Keep the tone firm, but neutral (what I've noticed with my son is that the stronger my reaction, the stronger he comes right back at me and meets that energy level). But also see if you can help her to verbalize her feelings and find other ways to express her anger - stomping feet, shaking fists, etc. Get on her level and say what you're seeing..."you really wanted that...and you can't have it....," etc. Don't lecture, try to rationalize, or shame. When she gets older, you can encourage her to use her words instead of hitting. She should be allowed her feelings though.
A:
Absolutely you should punish your daughter!
Hitting, pushing, and other aggressive behavior is not permitted. Some negative consequence must be applied when she acts out in this way. I suggest that you put her in timeout, and if the conflict is about something, take the something away from your daughter and give it to the other child.
As you have demonstrated and seem to have figured out, persuasion and negotiating do not work with toddlers. She isn't sorry for hitting; she has no idea what the word "sorry" means. She just knows that "sorry" is something she has to say to shut you up after she hits that irritating baby.
Note #1: Don't worry about "improving the relationship", just worry about improving the behavior. They may become fast friends, or they may not really like each other. It doesn't matter .. in either case your daughter must behave properly.
Note #2: Violence is hardly the right word for this sort of thing .. it is overly dramatic, not particularly descriptive, and not helpful to resolving the situation. It also trivializes real violence. Accurately characterizing bad behavior is the first step to properly responding to it. "Hitting" or "pushing" are better terms for this kind of thing. |
Genetic basis of human complement C8 beta deficiency. The eighth component of human complement (C8) is a serum protein consisting of three chains (alpha, beta, and gamma) and encoded by three different genes, C8A, C8B, and C8G. C8A and C8B are closely linked on chromosome 1p, whereas C8G is located on chromosome 9q. In the serum the beta subunit is non-covalently bound to the disulfide-linked alpha-gamma subunit. Patients with C8 beta deficiency suffer from recurrent neisserial infections such as meningitis. Exon-specific polymerase chain reaction (PCR) amplification with primer pairs from the flanking intron sequences was used to amplify all 12 C8B exons separately. No difference regarding the exon sizes was observed in a C8 beta-deficient patient compared with a normal person. Therefore, direct sequence analysis of all exon-specific PCR products from normal and C8 beta-deficient individuals was carried out. As a cause for C8 beta deficiency, we found a single C-T exchange in exon 9 leading to a stop codon. An allele-specific PCR system was designed to detect the normal and the deficiency allele simultaneously. Using this approach as well as PCR typing of the Taql polymorphism located in intron 11, five families with 7 C8 beta-deficient members were investigated. The mutation was not found to be restricted to one of the two Taql RFLP alleles. The mutant allele was observed in all families investigated and can therefore be regarded as a major cause of C8 beta deficiency in the Caucasian population. In addition, two C8 beta-deficient patients were found to be heterozygous for the C-T exchange. The molecular basis of the alleles without this point mutation also causing deficiency has not yet been defined. |
When it comes to restaurants, never underestimate the role of location.
For Plates, a casual American eatery located in the Downtown Athletic Club and surrounded by midtown office buildings, the location means a busy lunch but not much of a dinner rush.
Sometimes the best time to visit a restaurant is in its off hours. At Plates, we found good service, well-executed, familiar food and a quiet — though sometimes slightly odd — atmosphere, right in the center of the city.
Scene & Decor During our visit, on a rainy Thursday night, Plates was nearly empty, though we did see a handful of diners grabbing a drink or a bite after the gym.
In itself, a location next to a gym is no big deal. Restaurants have neighbors. However, most restaurants don't share glass walls with said neighbors; Plates does. That visual link between the two spaces left us with the erroneous impression that Plates was somehow part of the gym.
The two businesses have different entrances, though, and Plates' entryway is peculiar. Diners walk through a passage that must have originally been a storage room or exit hallway. A few decorations try to spruce up the spot but don't disguise it enough to erase the feeling that you're somehow behind the scenes.
But the gym window and strange entrance are small things. Inside, Plates' decor has the vaguely urban-industrial vibe of a West Elm catalog — think metal tables and rough wood accents. It feels a little generic but is appealing enough, especially because Plates' food works.
Appetizers Our first course was a home run all the way around. A cup of crab and corn chowder ($4.99) was creamy and thick, with chunks of crab. The soup was well seasoned with just a hint of underlying sweetness, thanks to the corn and crab.
A large plateful of fried calamari dunked in Buffalo sauce and served with blue cheese ($8.99) was fantastic. The calamari was cooked nicely — it was tender with just enough breading to balance the spicy sauce. Buffalo sauce and squid are a good pair.
Entrees Like the calamari, the lamb cheese steak ($10.99) put a simple but unexpected spin on a familiar dish. Replacing the beef with provolone-covered lamb made the sandwich a crossover between a gyro and the Philadelphia classic. Somehow, it ended up tasting more sophisticated than either of those sandwiches do on their own.
Fries, on the side, were just as good. Thick enough to be hearty but still crispy on the outside, they hit all the right notes.
Two oversized mahi mahi and shrimp tacos ($10.99) were the only disappointment of the evening. The flavors worked — the pico de gallo was fresh and we especially liked the smoky chipotle aioli — but the fish itself, chopped into small bites, was overcooked. Rubbery shrimp didn't ruin the tacos, but we found ourselves focusing more on the vegetables and a smoking hot side of Spanish rice while avoiding the fish.
Drinks Plates doesn't offer any beers on tap but they do make a mean glass of sangria ($8). Fruity, but not too sweet, the drink was fun and also a good match for the food.
A glass of Finca El Origen malbec ($8), a smooth and peppery red from Argentina, was equally drinkable — especially with the rich, savory crab and corn chowder.
Dessert Dessert was a comically large slice of sweet potato cheesecake ($4.99), made in-house. When we saw the size of the slice, we were sure we'd leave half of it on the plate. But after a few bites of the creamy, tangy-sweet filling, we were hooked. We ate the whole thing.
Service Eating at Plates outside of office hours meant we had nearly the whole place to ourselves — and the dedicated service of our waitress. She was as friendly as could be, offering recommendations and making sure our drinks stayed full.
By the time we left, we were the only people in the place. That can be uncomfortable, but at Plates, the lack of company felt more private than weird, like we'd chosen someplace off the beaten dinner path.
Between the happy service and approachable and sometimes surprising menu, it felt like a smart choice, too.
Back story: Opened in mid-2012 by Baltimore native and restaurant veteran Valanti Koliofotis, Plates adds a likable casual dining option to the midtown restaurant scene.
Signature dish: The lamb cheese steak is a fun twist on the familiar Philly sandwich. Instead of beef, shaved lamb joins melted provolone, caramelized onions, lettuce, tomato and mayonnaise on a crusty roll. The lamb's slight gaminess makes the sandwich more sophisticated — even edgier — than traditional cheese steaks. |
/**
* Tests for {@link Negative}, {@link NegativeOrZero}, {@link Positive} and {@link PositiveOrZero} built-in constraints.
*
* @author Guillaume Smet
*/
@SpecVersion(spec = "beanvalidation", version = "3.0.0")
public class NegativePositiveConstraintsTest extends AbstractTCKTest {
@Deployment
public static WebArchive createTestArchive() {
return webArchiveBuilder()
.withTestClass( NegativePositiveConstraintsTest.class )
.build();
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_NEGATIVE, id = "a")
public void testNegativeConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
NegativeEntity dummy = new NegativeEntity();
Set<ConstraintViolation<NegativeEntity>> constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Negative.class ).withProperty( "bytePrimitive" ),
violationOf( Negative.class ).withProperty( "intPrimitive" ),
violationOf( Negative.class ).withProperty( "longPrimitive" ),
violationOf( Negative.class ).withProperty( "shortPrimitive" ),
violationOf( Negative.class ).withProperty( "doublePrimitive" ),
violationOf( Negative.class ).withProperty( "floatPrimitive" )
);
dummy.intPrimitive = 101;
dummy.longPrimitive = 1001;
dummy.bytePrimitive = 111;
dummy.shortPrimitive = 142;
dummy.doublePrimitive = 123.34d;
dummy.floatPrimitive = 456.34f;
dummy.intObject = Integer.valueOf( 100 );
dummy.longObject = Long.valueOf( 15678l );
dummy.byteObject = Byte.valueOf( (byte) 50 );
dummy.shortObject = Short.valueOf( (short) 3 );
dummy.doubleObject = Double.valueOf( 123.34d );
dummy.floatObject = Float.valueOf( 5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( 100.9 );
dummy.bigInteger = BigInteger.valueOf( 100 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Negative.class ).withProperty( "bytePrimitive" ),
violationOf( Negative.class ).withProperty( "intPrimitive" ),
violationOf( Negative.class ).withProperty( "longPrimitive" ),
violationOf( Negative.class ).withProperty( "shortPrimitive" ),
violationOf( Negative.class ).withProperty( "doublePrimitive" ),
violationOf( Negative.class ).withProperty( "floatPrimitive" ),
violationOf( Negative.class ).withProperty( "byteObject" ),
violationOf( Negative.class ).withProperty( "intObject" ),
violationOf( Negative.class ).withProperty( "longObject" ),
violationOf( Negative.class ).withProperty( "shortObject" ),
violationOf( Negative.class ).withProperty( "doubleObject" ),
violationOf( Negative.class ).withProperty( "floatObject" ),
violationOf( Negative.class ).withProperty( "bigDecimal" ),
violationOf( Negative.class ).withProperty( "bigInteger" )
);
dummy.intPrimitive = 0;
dummy.longPrimitive = 0;
dummy.bytePrimitive = 0;
dummy.shortPrimitive = 0;
dummy.doublePrimitive = 0;
dummy.floatPrimitive = 0;
dummy.intObject = Integer.valueOf( 0 );
dummy.longObject = Long.valueOf( 0 );
dummy.byteObject = Byte.valueOf( (byte) 0 );
dummy.shortObject = Short.valueOf( (short) 0 );
dummy.doubleObject = Double.valueOf( 0 );
dummy.floatObject = Float.valueOf( 0 );
dummy.bigDecimal = BigDecimal.valueOf( 0 );
dummy.bigInteger = BigInteger.valueOf( 0 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Negative.class ).withProperty( "bytePrimitive" ),
violationOf( Negative.class ).withProperty( "intPrimitive" ),
violationOf( Negative.class ).withProperty( "longPrimitive" ),
violationOf( Negative.class ).withProperty( "shortPrimitive" ),
violationOf( Negative.class ).withProperty( "doublePrimitive" ),
violationOf( Negative.class ).withProperty( "floatPrimitive" ),
violationOf( Negative.class ).withProperty( "byteObject" ),
violationOf( Negative.class ).withProperty( "intObject" ),
violationOf( Negative.class ).withProperty( "longObject" ),
violationOf( Negative.class ).withProperty( "shortObject" ),
violationOf( Negative.class ).withProperty( "doubleObject" ),
violationOf( Negative.class ).withProperty( "floatObject" ),
violationOf( Negative.class ).withProperty( "bigDecimal" ),
violationOf( Negative.class ).withProperty( "bigInteger" )
);
dummy.intPrimitive = -101;
dummy.longPrimitive = -1001;
dummy.bytePrimitive = -111;
dummy.shortPrimitive = -142;
dummy.doublePrimitive = -123.34d;
dummy.floatPrimitive = -456.34f;
dummy.intObject = Integer.valueOf( -100 );
dummy.longObject = Long.valueOf( -15678l );
dummy.byteObject = Byte.valueOf( (byte) -50 );
dummy.shortObject = Short.valueOf( (short) -3 );
dummy.doubleObject = Double.valueOf( -123.34d );
dummy.floatObject = Float.valueOf( -5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( -100.9 );
dummy.bigInteger = BigInteger.valueOf( -100 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_NEGATIVE, id = "a")
public void testNegativeConstraintInfinityAndNaN() {
Validator validator = TestUtil.getValidatorUnderTest();
NegativeEntity dummy = new NegativeEntity();
dummy.intPrimitive = -1;
dummy.longPrimitive = -1;
dummy.bytePrimitive = -1;
dummy.shortPrimitive = -1;
dummy.doublePrimitive = -1;
dummy.floatPrimitive = -1;
dummy.floatObject = Float.NEGATIVE_INFINITY;
dummy.doubleObject = Double.NEGATIVE_INFINITY;
Set<ConstraintViolation<NegativeEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.floatObject = Float.POSITIVE_INFINITY;
dummy.doubleObject = Double.POSITIVE_INFINITY;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Negative.class ).withProperty( "doubleObject" ),
violationOf( Negative.class ).withProperty( "floatObject" )
);
dummy.floatObject = Float.NaN;
dummy.doubleObject = Double.NaN;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Negative.class ).withProperty( "doubleObject" ),
violationOf( Negative.class ).withProperty( "floatObject" )
);
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_NEGATIVEORZERO, id = "a")
public void testNegativeOrZeroConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
NegativeOrZeroEntity dummy = new NegativeOrZeroEntity();
Set<ConstraintViolation<NegativeOrZeroEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = 101;
dummy.longPrimitive = 1001;
dummy.bytePrimitive = 111;
dummy.shortPrimitive = 142;
dummy.doublePrimitive = 123.34d;
dummy.floatPrimitive = 456.34f;
dummy.intObject = Integer.valueOf( 100 );
dummy.longObject = Long.valueOf( 15678l );
dummy.byteObject = Byte.valueOf( (byte) 50 );
dummy.shortObject = Short.valueOf( (short) 3 );
dummy.doubleObject = Double.valueOf( 123.34d );
dummy.floatObject = Float.valueOf( 5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( 100.9 );
dummy.bigInteger = BigInteger.valueOf( 100 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( NegativeOrZero.class ).withProperty( "bytePrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "intPrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "longPrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "shortPrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "doublePrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "floatPrimitive" ),
violationOf( NegativeOrZero.class ).withProperty( "byteObject" ),
violationOf( NegativeOrZero.class ).withProperty( "intObject" ),
violationOf( NegativeOrZero.class ).withProperty( "longObject" ),
violationOf( NegativeOrZero.class ).withProperty( "shortObject" ),
violationOf( NegativeOrZero.class ).withProperty( "doubleObject" ),
violationOf( NegativeOrZero.class ).withProperty( "floatObject" ),
violationOf( NegativeOrZero.class ).withProperty( "bigDecimal" ),
violationOf( NegativeOrZero.class ).withProperty( "bigInteger" )
);
dummy.intPrimitive = 0;
dummy.longPrimitive = 0;
dummy.bytePrimitive = 0;
dummy.shortPrimitive = 0;
dummy.doublePrimitive = 0;
dummy.floatPrimitive = 0;
dummy.intObject = Integer.valueOf( 0 );
dummy.longObject = Long.valueOf( 0 );
dummy.byteObject = Byte.valueOf( (byte) 0 );
dummy.shortObject = Short.valueOf( (short) 0 );
dummy.doubleObject = Double.valueOf( 0 );
dummy.floatObject = Float.valueOf( 0 );
dummy.bigDecimal = BigDecimal.valueOf( 0 );
dummy.bigInteger = BigInteger.valueOf( 0 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = -101;
dummy.longPrimitive = -1001;
dummy.bytePrimitive = -111;
dummy.shortPrimitive = -142;
dummy.doublePrimitive = -123.34d;
dummy.floatPrimitive = -456.34f;
dummy.intObject = Integer.valueOf( -100 );
dummy.longObject = Long.valueOf( -15678l );
dummy.byteObject = Byte.valueOf( (byte) -50 );
dummy.shortObject = Short.valueOf( (short) -3 );
dummy.doubleObject = Double.valueOf( -123.34d );
dummy.floatObject = Float.valueOf( -5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( -100.9 );
dummy.bigInteger = BigInteger.valueOf( -100 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_NEGATIVEORZERO, id = "a")
public void testNegativeOrZeroConstraintInfinityAndNaN() {
Validator validator = TestUtil.getValidatorUnderTest();
NegativeOrZeroEntity dummy = new NegativeOrZeroEntity();
dummy.floatObject = Float.NEGATIVE_INFINITY;
dummy.doubleObject = Double.NEGATIVE_INFINITY;
Set<ConstraintViolation<NegativeOrZeroEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.floatObject = Float.POSITIVE_INFINITY;
dummy.doubleObject = Double.POSITIVE_INFINITY;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( NegativeOrZero.class ).withProperty( "doubleObject" ),
violationOf( NegativeOrZero.class ).withProperty( "floatObject" )
);
dummy.floatObject = Float.NaN;
dummy.doubleObject = Double.NaN;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( NegativeOrZero.class ).withProperty( "doubleObject" ),
violationOf( NegativeOrZero.class ).withProperty( "floatObject" )
);
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_POSITIVE, id = "a")
public void testPositiveConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
PositiveEntity dummy = new PositiveEntity();
Set<ConstraintViolation<PositiveEntity>> constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Positive.class ).withProperty( "bytePrimitive" ),
violationOf( Positive.class ).withProperty( "intPrimitive" ),
violationOf( Positive.class ).withProperty( "longPrimitive" ),
violationOf( Positive.class ).withProperty( "shortPrimitive" ),
violationOf( Positive.class ).withProperty( "doublePrimitive" ),
violationOf( Positive.class ).withProperty( "floatPrimitive" )
);
dummy.intPrimitive = 101;
dummy.longPrimitive = 1001;
dummy.bytePrimitive = 111;
dummy.shortPrimitive = 142;
dummy.doublePrimitive = 123.34d;
dummy.floatPrimitive = 456.34f;
dummy.intObject = Integer.valueOf( 100 );
dummy.longObject = Long.valueOf( 15678l );
dummy.byteObject = Byte.valueOf( (byte) 50 );
dummy.shortObject = Short.valueOf( (short) 3 );
dummy.doubleObject = Double.valueOf( 123.34d );
dummy.floatObject = Float.valueOf( 5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( 100.9 );
dummy.bigInteger = BigInteger.valueOf( 100 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = 0;
dummy.longPrimitive = 0;
dummy.bytePrimitive = 0;
dummy.shortPrimitive = 0;
dummy.doublePrimitive = 0;
dummy.floatPrimitive = 0;
dummy.intObject = Integer.valueOf( 0 );
dummy.longObject = Long.valueOf( 0 );
dummy.byteObject = Byte.valueOf( (byte) 0 );
dummy.shortObject = Short.valueOf( (short) 0 );
dummy.doubleObject = Double.valueOf( 0 );
dummy.floatObject = Float.valueOf( 0 );
dummy.bigDecimal = BigDecimal.valueOf( 0 );
dummy.bigInteger = BigInteger.valueOf( 0 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Positive.class ).withProperty( "bytePrimitive" ),
violationOf( Positive.class ).withProperty( "intPrimitive" ),
violationOf( Positive.class ).withProperty( "longPrimitive" ),
violationOf( Positive.class ).withProperty( "shortPrimitive" ),
violationOf( Positive.class ).withProperty( "doublePrimitive" ),
violationOf( Positive.class ).withProperty( "floatPrimitive" ),
violationOf( Positive.class ).withProperty( "byteObject" ),
violationOf( Positive.class ).withProperty( "intObject" ),
violationOf( Positive.class ).withProperty( "longObject" ),
violationOf( Positive.class ).withProperty( "shortObject" ),
violationOf( Positive.class ).withProperty( "doubleObject" ),
violationOf( Positive.class ).withProperty( "floatObject" ),
violationOf( Positive.class ).withProperty( "bigDecimal" ),
violationOf( Positive.class ).withProperty( "bigInteger" )
);
dummy.intPrimitive = -101;
dummy.longPrimitive = -1001;
dummy.bytePrimitive = -111;
dummy.shortPrimitive = -142;
dummy.doublePrimitive = -123.34d;
dummy.floatPrimitive = -456.34f;
dummy.intObject = Integer.valueOf( -100 );
dummy.longObject = Long.valueOf( -15678l );
dummy.byteObject = Byte.valueOf( (byte) -50 );
dummy.shortObject = Short.valueOf( (short) -3 );
dummy.doubleObject = Double.valueOf( -123.34d );
dummy.floatObject = Float.valueOf( -5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( -100.9 );
dummy.bigInteger = BigInteger.valueOf( -100 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Positive.class ).withProperty( "bytePrimitive" ),
violationOf( Positive.class ).withProperty( "intPrimitive" ),
violationOf( Positive.class ).withProperty( "longPrimitive" ),
violationOf( Positive.class ).withProperty( "shortPrimitive" ),
violationOf( Positive.class ).withProperty( "doublePrimitive" ),
violationOf( Positive.class ).withProperty( "floatPrimitive" ),
violationOf( Positive.class ).withProperty( "byteObject" ),
violationOf( Positive.class ).withProperty( "intObject" ),
violationOf( Positive.class ).withProperty( "longObject" ),
violationOf( Positive.class ).withProperty( "shortObject" ),
violationOf( Positive.class ).withProperty( "doubleObject" ),
violationOf( Positive.class ).withProperty( "floatObject" ),
violationOf( Positive.class ).withProperty( "bigDecimal" ),
violationOf( Positive.class ).withProperty( "bigInteger" )
);
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_POSITIVEORZERO, id = "a")
public void testPositiveConstraintInfinityAndNaN() {
Validator validator = TestUtil.getValidatorUnderTest();
PositiveEntity dummy = new PositiveEntity();
dummy.intPrimitive = 1;
dummy.longPrimitive = 1;
dummy.bytePrimitive = 1;
dummy.shortPrimitive = 1;
dummy.doublePrimitive = 1;
dummy.floatPrimitive = 1;
dummy.floatObject = Float.POSITIVE_INFINITY;
dummy.doubleObject = Double.POSITIVE_INFINITY;
Set<ConstraintViolation<PositiveEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.floatObject = Float.NEGATIVE_INFINITY;
dummy.doubleObject = Double.NEGATIVE_INFINITY;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Positive.class ).withProperty( "doubleObject" ),
violationOf( Positive.class ).withProperty( "floatObject" )
);
dummy.floatObject = Float.NaN;
dummy.doubleObject = Double.NaN;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( Positive.class ).withProperty( "doubleObject" ),
violationOf( Positive.class ).withProperty( "floatObject" )
);
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_POSITIVEORZERO, id = "a")
public void testPositiveOrZeroConstraint() {
Validator validator = TestUtil.getValidatorUnderTest();
PositiveOrZeroEntity dummy = new PositiveOrZeroEntity();
Set<ConstraintViolation<PositiveOrZeroEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = 101;
dummy.longPrimitive = 1001;
dummy.bytePrimitive = 111;
dummy.shortPrimitive = 142;
dummy.doublePrimitive = 123.34d;
dummy.floatPrimitive = 456.34f;
dummy.intObject = Integer.valueOf( 100 );
dummy.longObject = Long.valueOf( 15678l );
dummy.byteObject = Byte.valueOf( (byte) 50 );
dummy.shortObject = Short.valueOf( (short) 3 );
dummy.doubleObject = Double.valueOf( 123.34d );
dummy.floatObject = Float.valueOf( 5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( 100.9 );
dummy.bigInteger = BigInteger.valueOf( 100 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = 0;
dummy.longPrimitive = 0;
dummy.bytePrimitive = 0;
dummy.shortPrimitive = 0;
dummy.doublePrimitive = 0;
dummy.floatPrimitive = 0;
dummy.intObject = Integer.valueOf( 0 );
dummy.longObject = Long.valueOf( 0 );
dummy.byteObject = Byte.valueOf( (byte) 0 );
dummy.shortObject = Short.valueOf( (short) 0 );
dummy.doubleObject = Double.valueOf( 0 );
dummy.floatObject = Float.valueOf( 0 );
dummy.bigDecimal = BigDecimal.valueOf( 0 );
dummy.bigInteger = BigInteger.valueOf( 0 );
constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.intPrimitive = -101;
dummy.longPrimitive = -1001;
dummy.bytePrimitive = -111;
dummy.shortPrimitive = -142;
dummy.doublePrimitive = -123.34d;
dummy.floatPrimitive = -456.34f;
dummy.intObject = Integer.valueOf( -100 );
dummy.longObject = Long.valueOf( -15678l );
dummy.byteObject = Byte.valueOf( (byte) -50 );
dummy.shortObject = Short.valueOf( (short) -3 );
dummy.doubleObject = Double.valueOf( -123.34d );
dummy.floatObject = Float.valueOf( -5678.56f );
dummy.bigDecimal = BigDecimal.valueOf( -100.9 );
dummy.bigInteger = BigInteger.valueOf( -100 );
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( PositiveOrZero.class ).withProperty( "bytePrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "intPrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "longPrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "shortPrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "doublePrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "floatPrimitive" ),
violationOf( PositiveOrZero.class ).withProperty( "byteObject" ),
violationOf( PositiveOrZero.class ).withProperty( "intObject" ),
violationOf( PositiveOrZero.class ).withProperty( "longObject" ),
violationOf( PositiveOrZero.class ).withProperty( "shortObject" ),
violationOf( PositiveOrZero.class ).withProperty( "doubleObject" ),
violationOf( PositiveOrZero.class ).withProperty( "floatObject" ),
violationOf( PositiveOrZero.class ).withProperty( "bigDecimal" ),
violationOf( PositiveOrZero.class ).withProperty( "bigInteger" )
);
}
@Test
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS, id = "a")
@SpecAssertion(section = Sections.BUILTINCONSTRAINTS_POSITIVE, id = "a")
public void testPositiveOrZeroConstraintInfinityAndNaN() {
Validator validator = TestUtil.getValidatorUnderTest();
PositiveOrZeroEntity dummy = new PositiveOrZeroEntity();
dummy.floatObject = Float.POSITIVE_INFINITY;
dummy.doubleObject = Double.POSITIVE_INFINITY;
Set<ConstraintViolation<PositiveOrZeroEntity>> constraintViolations = validator.validate( dummy );
assertNoViolations( constraintViolations );
dummy.floatObject = Float.NEGATIVE_INFINITY;
dummy.doubleObject = Double.NEGATIVE_INFINITY;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( PositiveOrZero.class ).withProperty( "doubleObject" ),
violationOf( PositiveOrZero.class ).withProperty( "floatObject" )
);
dummy.floatObject = Float.NaN;
dummy.doubleObject = Double.NaN;
constraintViolations = validator.validate( dummy );
assertThat( constraintViolations ).containsOnlyViolations(
violationOf( PositiveOrZero.class ).withProperty( "doubleObject" ),
violationOf( PositiveOrZero.class ).withProperty( "floatObject" )
);
}
private static class NegativeEntity {
@Negative
private BigDecimal bigDecimal;
@Negative
private BigInteger bigInteger;
@Negative
private byte bytePrimitive;
@Negative
private short shortPrimitive;
@Negative
private int intPrimitive;
@Negative
private long longPrimitive;
@Negative
private double doublePrimitive;
@Negative
private float floatPrimitive;
@Negative
private Byte byteObject;
@Negative
private Short shortObject;
@Negative
private Integer intObject;
@Negative
private Long longObject;
@Negative
private Double doubleObject;
@Negative
private Float floatObject;
}
private static class NegativeOrZeroEntity {
@NegativeOrZero
private BigDecimal bigDecimal;
@NegativeOrZero
private BigInteger bigInteger;
@NegativeOrZero
private byte bytePrimitive;
@NegativeOrZero
private short shortPrimitive;
@NegativeOrZero
private int intPrimitive;
@NegativeOrZero
private long longPrimitive;
@NegativeOrZero
private double doublePrimitive;
@NegativeOrZero
private float floatPrimitive;
@NegativeOrZero
private Byte byteObject;
@NegativeOrZero
private Short shortObject;
@NegativeOrZero
private Integer intObject;
@NegativeOrZero
private Long longObject;
@NegativeOrZero
private Double doubleObject;
@NegativeOrZero
private Float floatObject;
}
private static class PositiveEntity {
@Positive
private BigDecimal bigDecimal;
@Positive
private BigInteger bigInteger;
@Positive
private byte bytePrimitive;
@Positive
private short shortPrimitive;
@Positive
private int intPrimitive;
@Positive
private long longPrimitive;
@Positive
private double doublePrimitive;
@Positive
private float floatPrimitive;
@Positive
private Byte byteObject;
@Positive
private Short shortObject;
@Positive
private Integer intObject;
@Positive
private Long longObject;
@Positive
private Double doubleObject;
@Positive
private Float floatObject;
}
private static class PositiveOrZeroEntity {
@PositiveOrZero
private BigDecimal bigDecimal;
@PositiveOrZero
private BigInteger bigInteger;
@PositiveOrZero
private byte bytePrimitive;
@PositiveOrZero
private short shortPrimitive;
@PositiveOrZero
private int intPrimitive;
@PositiveOrZero
private long longPrimitive;
@PositiveOrZero
private double doublePrimitive;
@PositiveOrZero
private float floatPrimitive;
@PositiveOrZero
private Byte byteObject;
@PositiveOrZero
private Short shortObject;
@PositiveOrZero
private Integer intObject;
@PositiveOrZero
private Long longObject;
@PositiveOrZero
private Double doubleObject;
@PositiveOrZero
private Float floatObject;
}
} |
Isolation and characterization of culturable actinobacteria associated with Polytrichum strictum (Galindez Island, the maritime Antarctic) The main objective of the study is the evaluation of the diversity of actinobacteria associated with Polytrichum strictum dominant species of widespread Antarctic all moss turf subformation and their characteristics as the producers of biologically active compounds. The actinobacterial isolates were isolated by direct inoculation, phenol pretreatment, and heated treatment. The cultural properties of the isolates were evaluated using diagnostic media. The antimicrobial activity of the isolates was determined by the point inoculations method. The phylogenetic analysis was based on sequence analysis of the 16S rRNA gene. The biosynthetic genes screening was performed using polymerase chain reaction. A total of 23 actinobacterial isolates associated with P. strictum were isolated, the four identified genera being Streptomyces (7 isolates), Micromonospora (14 isolates), Kribbella (1 isolate), and Micrococcus (1 isolate). Eight psychrotrophic strains of all identified genera were identified. The optimal pH values for all isolates were in the range 610. Four isolates grew on the medium with 7.5% NaCl. A significant number of the isolates showed a wide range of enzymatic activities. Antagonists of a wide range of pathogenic microorganisms were found, including against multidrug-resistant strain of Candida albicans and Methicillin-resistant Staphylococcus aureus. Some strains were active against phytopathogenic bacteria, namely three strains against Erwinia amylovora, one strain against Agrobacterium tumefaciens, and one strain against Pectobacterium carotovorum. More than half of the isolates showed antifungal activity against Fusarium oxysporum and Aspergillus niger. The biosynthetic genes involved in synthesizing a wide range of bioactive compounds were found in more than 80% of isolates. Antarctic actinobacteria isolated in this study demonstrate potential as the producers of a wide range of biologically active compounds. Further studies of these actinobacteria may lead to the identification of previously unknown biologically active compounds. |
Tamako in Moratorium
Plot
Tamako is an unemployed university graduate living with her divorced father, who runs a sports equipment shop. Tamako spends her time sleeping, eating, watching TV, reading manga, and playing video games. She is disdainful of her father, who is fond of her, but wants her to find a job. He often comes home drunk, full of affection, and buys her expensive gifts, which she demands he return.
With the help of a local boy, Tamako has an amateur photoshoot and secretly applies for an idol group. Her father finds out, embarrassing her.
Tamako learns that her father is dating a local teacher. She attends her accessory making class to learn more about her. They strike up conversation, and the woman realises who she is, trying to make friends. After Tamako complains to her about her father, the woman tells her she is mean.
Reception
It was ranked number nine in the top ten best Japanese films of 2013 by Kinema Junpo. The Japanese Professional Movie Awards ranked it at 6th place in its Best 10 2013 rankings. |
package main
import (
"fmt"
)
func main() {
b := 255
var a = &b
fmt.Printf("Type of a is %T\n", a)
fmt.Println("address of b is", a)
s := map[string]int{"age": 24}
var t map[string]string // 正确
t1 := map[string]string // 错误
fmt.Println(s)
fmt.Println(t)
fmt.Println(t1)
}
|
"""Tests for AmazonRow."""
from datetime import datetime
import pytest
from tests.testlibraries.instance_resource import InstanceResource
from zaimcsvconverter.inputcsvformats.amazon_201911 import (
Amazon201911DiscountRow,
Amazon201911PaymentRow,
Amazon201911RowData,
Amazon201911RowFactory,
Amazon201911RowToSkip,
Amazon201911ShippingHandlingRow,
)
from zaimcsvconverter.models import Item, Store
class TestAmazon201911RowData:
"""Tests for AmazonRowData."""
# Reason: Testing different version of row data is better to be separated code.
# noinspection DuplicatedCode
@staticmethod
# pylint: disable=too-many-locals
def test_init_and_property():
"""
Property date should return datetime object.
Property store_date should return used_store.
"""
ordered_date = "2018/10/23"
order_id = "123-4567890-1234567"
item_name = "Echo Dot (エコードット) 第2世代 - スマートスピーカー with Alexa、ホワイト"
note = "販売: Amazon Japan G.K. コンディション: 新品"
price = "4980"
number = "1"
subtotal_price_item = "6276"
total_order = "6390"
destination = "ローソン桜塚"
status = "2018年10月23日に発送済み"
billing_address = "テストアカウント"
billing_amount = "5952"
credit_card_billing_date = "2018/10/23"
credit_card_billing_amount = "5952"
credit_card_identity = "Visa(下4けたが1234)"
url_order_summary = "https://www.amazon.co.jp/gp/css/summary/edit.html?ie=UTF8&orderID=123-4567890-1234567"
url_receipt = (
"https://www.amazon.co.jp/gp/css/summary/print.html/ref=oh_aui_ajax_dpi?ie=UTF8&orderID=123-4567890-1234567"
)
url_item = "https://www.amazon.co.jp/gp/product/B06ZYTTC4P/ref=od_aui_detailpages01?ie=UTF8&psc=1"
row_data = Amazon201911RowData(
ordered_date,
order_id,
item_name,
note,
price,
number,
subtotal_price_item,
total_order,
destination,
status,
billing_address,
billing_amount,
credit_card_billing_date,
credit_card_billing_amount,
credit_card_identity,
url_order_summary,
url_receipt,
url_item,
)
assert row_data.order_id == order_id
assert row_data.note == note
assert row_data.price == 4980
assert row_data.number == 1
assert row_data.subtotal_price_item == 6276
assert row_data.destination == destination
assert row_data.status == status
assert row_data.billing_address == billing_address
assert row_data.billing_amount == billing_amount
assert row_data.credit_card_billing_date == credit_card_billing_date
assert row_data.credit_card_billing_amount == credit_card_billing_amount
assert row_data.credit_card_identity == credit_card_identity
assert row_data.url_order_summary == url_order_summary
assert row_data.url_receipt == url_receipt
assert row_data.url_item == url_item
assert row_data.date == datetime(2018, 10, 23, 0, 0)
assert row_data.total_order == 6390
assert row_data.item_name == item_name
class TestAmazon201911DiscountRow:
"""Tests for Amazon201911DiscountRow."""
# pylint: disable=unused-argument
@staticmethod
def test_init(yaml_config_load, database_session_item):
"""Arguments should set into properties."""
store_name = "Amazon Japan G.K."
item_name = "(Amazon ポイント)"
amazon_row = Amazon201911DiscountRow(InstanceResource.ROW_DATA_AMAZON_201911_AMAZON_POINT)
assert amazon_row.date == datetime(2019, 11, 9, 0, 0, 0)
assert isinstance(amazon_row.store, Store)
assert amazon_row.store.name_zaim == store_name
assert isinstance(amazon_row.item, Item)
assert amazon_row.item.name == item_name
@staticmethod
def test_total_order_fail():
"""Property should raise ValueError when value is None."""
with pytest.raises(ValueError) as error:
# pylint: disable=expression-not-assigned
# noinspection PyStatementEffect
Amazon201911DiscountRow(InstanceResource.ROW_DATA_AMAZON_201911_HUMMING_FINE).total_order
assert str(error.value) == "Total order on discount row is not allowed empty."
class TestAmazon201911ShippingHandlingRow:
"""Tests for Amazon201911ShippingHandlingRow."""
@staticmethod
def test_subtotal_price_item_fail():
with pytest.raises(ValueError) as error:
# pylint: disable=expression-not-assigned
# noinspection PyStatementEffect
Amazon201911ShippingHandlingRow(InstanceResource.ROW_DATA_AMAZON_201911_HUMMING_FINE).subtotal_price_item
assert str(error.value) == "Subtotal price item on shipping handling row is not allowed empty."
class TestAmazon201911PaymentRow:
"""Tests for Amazon201911PaymentRow."""
# Reason: Testing different version of row data is better to be separated code.
# noinspection DuplicatedCode
# pylint: disable=unused-argument
@staticmethod
def test_init(yaml_config_load, database_session_item):
"""Arguments should set into properties."""
store_name = "Amazon Japan G.K."
item_name = "Echo Dot (エコードット) 第2世代 - スマートスピーカー with Alexa、ホワイト"
amazon_row = Amazon201911PaymentRow(InstanceResource.ROW_DATA_AMAZON_201911_ECHO_DOT)
assert amazon_row.price == 4980
assert amazon_row.number == 1
assert amazon_row.date == datetime(2019, 11, 9, 0, 0, 0)
assert isinstance(amazon_row.store, Store)
assert amazon_row.store.name_zaim == store_name
assert isinstance(amazon_row.item, Item)
assert amazon_row.item.name == item_name
@staticmethod
def test_price_fail():
"""Property should raise ValueError when value is None."""
with pytest.raises(ValueError) as error:
# pylint: disable=expression-not-assigned
# noinspection PyStatementEffect
Amazon201911PaymentRow(InstanceResource.ROW_DATA_AMAZON_201911_AMAZON_POINT).price
assert str(error.value) == "Price on payment row is not allowed empty."
@staticmethod
def test_number_fail():
"""Property should raise ValueError when value is None."""
with pytest.raises(ValueError) as error:
# pylint: disable=expression-not-assigned
# noinspection PyStatementEffect
Amazon201911PaymentRow(InstanceResource.ROW_DATA_AMAZON_201911_AMAZON_POINT).number
assert str(error.value) == "Number on payment row is not allowed empty."
class TestAmazon201911RowFactory:
"""Tests for AmazonRowFactory."""
# pylint: disable=unused-argument
@staticmethod
@pytest.mark.parametrize(
"argument, expected",
[
(InstanceResource.ROW_DATA_AMAZON_201911_ECHO_DOT, Amazon201911PaymentRow),
(InstanceResource.ROW_DATA_AMAZON_201911_AMAZON_POINT, Amazon201911DiscountRow),
(InstanceResource.ROW_DATA_AMAZON_201911_SHIPPING_HANDLING, Amazon201911ShippingHandlingRow),
(InstanceResource.ROW_DATA_AMAZON_201911_MS_Learn_IN_MANGA, Amazon201911RowToSkip),
],
)
def test_create(argument: Amazon201911RowData, expected, yaml_config_load, database_session_item):
"""Method should return Store model when note is defined."""
# pylint: disable=protected-access
gold_point_card_plus_row = Amazon201911RowFactory().create(argument)
assert isinstance(gold_point_card_plus_row, expected)
|
<reponame>Illation/GLFramework
#include "stdafx.hpp"
#include "LightComponent.hpp"
#include "../SceneGraph/Entity.hpp"
#include "../GraphicsHelper/LightVolume.hpp"
#include "../GraphicsHelper/ShadowRenderer.hpp"
LightComponent::LightComponent(Light* light):
m_Light(light)
{
}
LightComponent::~LightComponent()
{
SafeDelete(m_Light);
}
void LightComponent::Initialize()
{
}
void LightComponent::Update()
{
}
void LightComponent::Draw(){}
void LightComponent::DrawForward(){}
void LightComponent::DrawVolume()
{
m_Light->DrawVolume(GetTransform());
}
void LightComponent::GenerateShadow()
{
m_Light->GenerateShadow(GetTransform());
}
void LightComponent::UploadVariables(GLuint shaderProgram, unsigned index)
{
if (m_PositionUpdated || m_Light->m_Update)
{
m_Light->UploadVariables(shaderProgram, GetTransform(), index);
m_Light->m_Update = false;
m_PositionUpdated = false;
}
}
void PointLight::UploadVariables(GLuint program, TransformComponent* comp, unsigned index)
{
string idxStr = to_string(index);
string ligStr = "pointLights[";
vec3 pos = comp->GetPosition();
vec3 col = color*brightness;
glUniform3f(glGetUniformLocation(program,
(ligStr + idxStr + "].Position").c_str()), pos.x, pos.y, pos.z);
glUniform3f(glGetUniformLocation(program,
(ligStr + idxStr + "].Color").c_str()), col.x, col.y, col.z);
glUniform1f(glGetUniformLocation(program,
(ligStr + idxStr + "].Radius").c_str()), radius);
}
void PointLight::DrawVolume(TransformComponent* pTransform)
{
vec3 col = color*brightness;
PointLightVolume::GetInstance()->Draw(pTransform->GetPosition(), radius, col);
}
void DirectionalLight::UploadVariables(GLuint program, TransformComponent* comp, unsigned index)
{
string idxStr = to_string(index);
string ligStr = "dirLights[";
vec3 direction = comp->GetForward();
vec3 col = color*brightness;
glUniform3f(glGetUniformLocation(program,
(ligStr + idxStr + "].Direction").c_str()), direction.x, direction.y, direction.z);
glUniform3f(glGetUniformLocation(program,
(ligStr + idxStr + "].Color").c_str()), col.x, col.y, col.z);
}
void DirectionalLight::DrawVolume(TransformComponent* pTransform)
{
vec3 col = color*brightness;
if (IsShadowEnabled())
{
DirectLightVolume::GetInstance()->DrawShadowed(pTransform->GetForward(), col, m_pShadowData);
}
else
{
DirectLightVolume::GetInstance()->Draw(pTransform->GetForward(), col);
}
}
void DirectionalLight::SetShadowEnabled(bool enabled)
{
if (enabled)
{
if(!IsShadowEnabled())
m_pShadowData = new DirectionalShadowData(glm::ivec2(1024, 1024)*8);
}
else
{
SafeDelete(m_pShadowData);
m_pShadowData = nullptr;
}
}
void DirectionalLight::GenerateShadow(TransformComponent* pTransform)
{
if(IsShadowEnabled())ShadowRenderer::GetInstance()->MapDirectional(pTransform, m_pShadowData);
} |
On Thursday, April 10, U.S. equity markets sold off sharply. The Dow was down by 1.62%, the S&P 500 declined by 2.09%, and the NASDAQ fell by 3.10%. Because the price of gold went up by 1.12% on the day, the declines were even greater in “real” (i.e., gold) terms: -2.71%, -3.17%, and -4.17%, respectively.
What happened that day? Well, among other things, 218 Republican congressmen signed their names at the bottom of a political suicide note drafted by Paul Ryan, by voting to pass his “Fiscal Year 2015 Budget Resolution.”
This vote, along with the Budget Resolution itself, dealt a major blow to Republican’s hopes for big gains in the November elections. From March 31, the day before Ryan’s budget plan was announced, to April 16, the advantage held by the Democrats in the “Generic Congressional Vote” opinion polls widened from 1.2 percentage points to 2.5 percentage points.
Right now, the Democrats are falling all over themselves to grab the lifeline that Ryan has thrown them. During the fall campaign, rather than being forced to try to defend the indefensible (i.e., Obamacare), the Democrats can focus attention on the Republicans’ budget plan, which amounts to performing surgery without anesthesia (big spending cuts without strong economic growth).
The Republican Party is known as “the stupid party,” and Ryan’s budget provides strong evidence for this appellation. His plan is strategically, conceptually, economically, and politically stupid.
A plan lays out a series of actions that are intended to achieve a goal. The goal is the most fundamental part of a plan. It must be logical, worthy, important, and correct. Ryan’s goal, balancing the budget, is none of these things. While Ryan calls his plan “The Path to Prosperity,” a better name would be: “The Path to Austerity;” or perhaps, “The Path to Blowing the 2014 Elections.”
Ryan’s plan fails strategically in its first sentence: “Washington owes the American people a responsible, balanced budget.” Ryan’s plan assumes that the most important goal of a budget plan is to eliminate the deficit. It is not. The most important objective is to maximize America’s prosperity.
Prosperity is about jobs and economic growth. Tellingly, while Ryan’s budget plan package includes 7 summary tables and 12 graphs, none of them mentions either jobs or economic growth. Instead, they focus remorselessly on spending, deficits, and debt.
The truth is that balancing the budget is much less important than maximizing our rate of economic growth (which will also maximize both total employment and wage growth). Republicans should always be willing to accept higher near-term deficits (e.g., by cutting taxes) in order to obtain faster economic growth. Here’s why this is fiscally, as well as economically and politically, sound.
A balance sheet has two sides, a left side for assets, and a right side for liabilities. Every business knows that assets are more important than liabilities. A company will gladly add debt, if the assets that it can acquire with the capital raised are worth significantly more than the debt incurred. No successful, growing company focuses on paying off its debt.
Ryan’s plan seems to assume that there is only one side to the federal government’s balance sheet, the right (liabilities) side. However, when asked to buy Treasury bonds, the financial markets look at both sides. And, American strength and prosperity is much more a function of the left (assets) side of the federal balance sheet than the right.
So, what is on the left side of the federal government’s balance sheet? Against what assets are the financial markets lending the U.S. money?
While our government owns some physical things (mainly land) that could be sold for cash, the major asset of the federal government is the present value of its future revenues. This, in turn, is equal to the present value of future GDP times the “tax take,” which is the percent of GDP that the government captures via taxation.
While it pays lip service to economic growth, Ryan’s plan is overwhelmingly about spending, deficits, and debt. This is the wrong focus. Achieving the fastest possible rate of economic growth should be the paramount objective of government budget policy.
This is because:
The present value of GDP is exquisitely sensitive to the long-term rate of real economic growth. While the federal government may capture 18% of GDP, the states, the cities, and the people get to keep the remaining 82%. Both higher personal incomes and higher state and local tax revenues translate into less political pressure for federal spending. Even if your goal is only to cut spending, you need fast economic growth to create an environment where it is politically possible to cut spending. Growth must come first. Ronald Reagan understood this, even if present day Republican “leaders” do not.
How sensitive is the present value of GDP (and therefore federal revenues) to the rate of economic growth? Very, very, very sensitive. Consider the following:
Increasing our long-term real GDP growth rate by just 0.08 percentage points (from the CBO baseline of 2.15%) would do as much for federal finances as cutting federal spending immediately and permanently by 3.0% of GDP. (This would be equivalent to reducing this year’s outlays by more than $500 billion.)
Increasing economic growth by 0.5 percentage points (to 2.65%) would more than quadruple the present value of future GDP. At this higher growth rate, we could cut taxes in half, and still more than double the present value of future federal revenues vs. the CBO baseline.
Increasing our growth rate to 4.0%, which the U.S. averaged under the Bretton Woods gold standard (1947 – 1973), would increase the present value of future GDP by a factor of 125,000. This is not a misprint—we’re talking about 125,000 times more wealth, as the financial markets judge it.
OK, now let’s return to the Ryan budget.
They say that a picture is worth a thousand words. Below is the one graph that Ryan presents in the body of his plan document.
Now, if a private company published a plan with a graph like this, investors would assume that it was going out of business. No one would think that the company’s goal was faster growth.
The trajectory of U.S. debt held by the public in Ryan’s “Path to Prosperity” is no more viable or desirable than that of the “Current Path.” Both paths would lead to economic disaster, just a different kind of economic disaster.
The U.S. dollar is the currency of international trade, and the world monetary system runs on U.S. Treasury securities. Ryan’s plan calls for ripping the foundation out of the world’s financial structure, for the sake of, in essence, investing tax dollars in low-yielding U.S. Treasury bonds.
But wait! Chairman Ryan says that he does care about economic growth. Sorry, but this assertion is not supported by his plan document. For one thing, while it takes credit for producing higher growth, Ryan’s plan does not bother to present any GDP numbers. For this, one has to look to the CBO’s analysis of Ryan’s budget, “Budgetary and Economic Outcomes Under Paths for Federal Revenues and Noninterest Spending Specified by Chairman Ryan, April 2014.”
Based upon worksheet “13. Nominal GDP” of “45211-RyanSuppData-2.xlsx” (published by the CBO along with their report), Ryan’s budget plan would increase nominal GDP growth by 0.13 percentage points through FY2024. Because the CBO report does not suggest that Ryan’s plan would impact inflation, we must assume that the CBO believes that the Ryan plan would increase real GDP growth by the same amount.
So, Ryan’s plan would put the country through the fiscal wringer (and put many Republican candidates in danger of losing the November election) for the sake of squeezing out an additional 0.13 percentage points of annual growth between now and 2024. To present a plan that would increase economic growth by only 0.13 percentage points above the anemic CBO base case, when we are mired in the weakest economic recovery in American history, is to suggest that you don’t understand the problem.
Actually, Chairman Ryan doesn’t just suggest that he doesn’t understand the problem; he declares that he doesn’t understand the problem. The title of one of Ryan’s (unnumbered) “FY 2015 Budget Charts” (published along with his main document) reads, “Spending is the Problem.” No, Congressman, spending isn’t the problem. Slow economic growth is the problem.
During President Clinton’s two terms, real GDP growth averaged 3.89%, which was very close to the 3.86% that the U.S. averaged from 1790 to 2000. In contrast, from 2000 to 2013, real economic growth averaged only 1.76%. This was the slowest growth rate for any 13-year period since the Great Depression. Under Bush 43 and Obama, America moved 14.7 million jobs away from full employment.
If real GDP had grown at 3.89% for the past 13 years, the federal government would have run a $0.6 trillion surplus in 2013 (instead of a $0.6 trillion deficit), despite Obama’s wildly extravagant spending. And, of course, if President Bush had not made the policy errors that caused the slow growth (including standing idly by while the Federal Reserve trashed the dollar and the economy), there would never have been a President Obama, or an $847 billion “stimulus” program to run up the deficit.
Even more important than its effect upon the federal budget is the impact that the faster economic growth would have had upon family budgets. With 3.89% growth, 2013 real GDP would have been 31% higher ($22.0 trillion rather than $16.8 trillion). The nation would have been at effective full employment during the entire 13-year period. Spending on various welfare state benefits would have been much, much lower in 2013 than it actually was.
Given that, for most people, prosperity requires a decent paying job, it is odd that Ryan calls his plan the “Path to Prosperity.” It is hard to imagine that the meager 2.58% real growth rate that Ryan’s plan would produce through 2024 would do much for either employment or family incomes.
During the first 4.5 years of Obama’s so-called “economic recovery,” real GDP increased at a rate of 2.35%, while America actually lost ground (by 1.3 million FTE jobs) with respect to full employment. While Ryan’s promised 2.58% growth rate is higher than this, it is hard to see how it could put much of a dent in our employment shortfall, which was 14.9 million FTE jobs as of the end of March.
What Ryan is offering is a far cry from the 4.57% growth rate of the first 18 quarters of the Reagan recovery, which produced strong growth in both jobs and family incomes.
Perhaps even more alarming than Ryan’s pathetically low growth target is how clueless he seems to be about what actually produces economic growth—or even what it would take to actually cut spending.
Strange things happen when people go to Congress. Men that were previously strong Christians begin worshipping the CBO as if it were some kind of number-spewing golden calf. This seems to be what has happened to Chairman Ryan.
Federal spending is a negative for economic growth, but, in terms of impact, this factor runs a distant fourth behind monetary policy, tax rates (especially on capital), and regulations. However, because the CBO seems to give growth credit only for deficit reduction, Ryan’s plan focuses on that as a source of economic expansion. Unfortunately, the CBO’s model provides very small growth increments in return for large (and politically unacceptable) spending cuts.
The Ryan budget does mention tax reform as a potential source of higher growth, but the plan itself does not propose a tax reform program, and does not take credit for any increase in growth as a result of tax changes.
Again genuflecting before the CBO, Ryan talks of “…revenue-neutral fundamental tax reform.” “Revenue neutral” means, “revenue neutral under the CBO’s static scoring methodology.” As a practical matter, this means, “tax reform that could never actually be enacted.”
Static-revenue-neutral tax reform will produce losers as well as winners, and the losers will always band together to defeat the proposal. “Pro-growth” tax reform can only happen if Republicans are willing to defy the CBO, and propose a net tax cut. Ryan’s budget plan seems to be saying he is not willing to take on the CBO in this way.
However, as important as tax reform is, it is not as important as monetary reform, which would force the Federal Reserve to stabilize the dollar in terms of something real, and stop them from manipulating interest rates.
America has now suffered through more than 40 years of monetary chaos. The Fed’s efforts at economic central planning via a discretionary monetary policy have severely impacted U.S. (and world) economic growth. If the U.S. economy had grown as fast during the past 40 years as it had over the 183 years prior to that, GDP would be 63% greater today, and we wouldn’t be spending our time talking about deficits, debt, and inequality.
So, Ryan’s budget plan does not even mention the nation’s single largest obstacle to prosperity, the out-of-control Federal Reserve. Other than that, it’s great.
The Ryan budget plan is all about cutting spending, but even in this arena, it fails—both economically and politically. The Medicare reforms he proposes will spawn another round of Democratic campaign ads showing Republicans throwing old people off cliffs.
There are two points that the Republicans have to understand if they want the voters to give them full control of government again:
At growth rates that are normal for America (3.5%+), all of the financial problems of Social Security and Medicare disappear, with no tax increases and no benefit cuts.
Only scientific progress can “bend the cost curve” for Medicare. We must mobilize to find cures for Alzheimer’s, cancer, and diabetes.
Unfortunately, the Ryan budget plan doesn’t mention either of these points. Instead, it surrenders to the Washington “conspiracy against economic growth,” and then proposes tackling our entitlement problems via various “cost shifting” exercises.
There is a streak of righteous fiscal masochism in today’s Republican Party. The belief seems to be that we have (fiscally) sinned, and we must do (austerity) penance. This translates into Republican candidates calling for huge (although always unspecified) spending cuts, as well as balanced budget amendments. While this approach may play well in some Republican primaries, it will prove deadly in all too many general election campaigns.
Paul Ryan’s budget plan represents a suicide note for the Republican Party, because it offers austerity rather than growth. This November, the Republicans will either present themselves as the party of economic growth, or they will lose a number of winnable races. |
<reponame>bitfield/kg-generics
package dupes_test
import (
"dupes"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestDupesIsTrueWhenInputContainsNonConsecutiveDuplicates(t *testing.T) {
t.Parallel()
s := []int{1, 2, 3, 1, 5}
want := true
got := dupes.Dupes(s)
if !cmp.Equal(want, got) {
t.Error(cmp.Diff(want, got))
}
}
func TestDupesIsFalseWhenInputContainsNoDuplicates(t *testing.T) {
t.Parallel()
s := []string{"a"}
want := false
got := dupes.Dupes(s)
if !cmp.Equal(want, got) {
t.Error(cmp.Diff(want, got))
}
}
|
a,b = map(str, input())
a = ord(a) - 96
c,d = map(str, input())
c = ord(c) - 96
e = a-c
f = int(b)-int(d)
print(max(abs(e), abs(f)))
while e != 0 or f != 0:
p = ''
if e > 0:
p += 'L'
e -=1
if e < 0:
p += 'R'
e +=1
if f > 0:
p += 'D'
f -=1
if f < 0:
p += 'U'
f +=1
print(p) |
<gh_stars>1-10
#pragma once
#include "glm/gtc/noise.hpp"
// returns a tile-able worley noise value in range [0, 1]
// point is a 3d point in range [0, 1]
[[nodiscard]] auto worley(glm::vec3 point, float cell_count) noexcept -> float;
// returns a tile-able perlin noise value in [0, 1]
// p is a 3d point in range [0, 1]
[[nodiscard]] auto perlin(glm::vec3 p, float frequency, int octave_count) noexcept -> float;
|
Planet Cake
Planet Cake is a reality television series that follows the daily operations of one of Australia's most renowned cake businesses under the tight rein of Paris Cutler, known to her eclectic team of designers and decorators as The Cake Queen.
Paris Cutler gave up her career in the corporate world and bought Planet Cake in 2003 as an existing small store with only one staff member; she had a dream to put decorated cakes on the map both in Australia and globally and over the next 12 years made Planet Cake an internationally recognised brand.
Planet Cake has created over 12000 couture cakes, including cakes for A-list celebrities such as Nicole Kidman, Celine Dion, Rihanna, Katy Perry, Keith Urban, John Travolta and Lady Gaga to name a few, as well as for television and many magazines. They have also created a host of ‘stunt’ cakes, the most famous being a realistic replica of the Sydney Opera House for Australia Day 2011, which weighed over 1.3 tons and required 32 cake decorators to make.
In 2012, Paris along with the Planet Cake team had their own TV show Planet Cake hosted by Foxtel's Lifestyle Food network; the show became one of the networks highest rating shows for the year and has now been shown in over 30 countries around the world. As a result, Planet Cake was awarded an Astra Award for Best Lifestyle Program in 2012. Paris has written four books Planet Cake a Guide for Beginners, Planet Cake Cupcakes, Planet Cake Celebrate and Planet Cake Kids all published by Murdoch Books and now translated into 7 different languages.
The Planet Cake School was launched in 2004 and now has the title of being Australia’s largest cake decorating school. They teach in eight locations in NSW, Australia and they also run classes in Doha, Qatar. Planet Cake have a unique curriculum which is continually updated and improved upon and their education program has trained over 6000 students as well as educating many cake decorators who now operate their own businesses and have become well known in their own right. |
#include "RealPlayer.hpp"
#include <iostream>
using namespace std;
/*
RealPlayer::RealPlayer ( float width, float height )
{
init( width, height);
_number = 0;
}
*/
bool RealPlayer::update( const osgGA::GUIEventAdapter& ea, osg::Group* root )
{
bool accelBall = false;
osg::Vec3 pos = getMatrix().getTrans();
float halfW = width() * 0.5f, halfH = height() * 0.5f;
switch ( _number )
{
// TODO
case 1:
if ( ea.getEventType()==osgGA::GUIEventAdapter::KEYDOWN )
{
switch ( ea.getKey() )
{
// TODO: 0.2 should not be harcoded, but a property of the game class
case osgGA::GUIEventAdapter::KEY_Up:
_speedVec = osg::Vec3(0.0f, 0.2f, 0.0f);
break;
case osgGA::GUIEventAdapter::KEY_Down:
_speedVec = osg::Vec3(0.0f, -0.2f, 0.0f);
break;
// TODO: implement accelBall?
case osgGA::GUIEventAdapter::KEY_Space:
accelBall = true;
break;
default: break;
}
}
else if ( ea.getEventType()==osgGA::GUIEventAdapter::KEYUP )
_speedVec = osg::Vec3();
if ( pos.x()+ _speedVec.x() <halfW || pos.x()+ _speedVec.x() >160-halfW ) // TODO: remove hardcoded 160
{
return false;
}
if ( pos.y()+ _speedVec.y() <halfH || pos.y()+ _speedVec.y() >90-halfH ) // TODO: remove harcoded 90
{
return false;
}
break;
// TODO: implement Player2 movement
case 2:
break;
default: break;
}
// only update for new frames
if ( ea.getEventType() !=osgGA::GUIEventAdapter::FRAME )
return true;
pos += _speedVec;
setMatrix( osg::Matrix::translate(pos) );
return true;
}
const int RealPlayer::getScore()
{
return _score.getScore();
}
void RealPlayer::incrScore()
{
_score.incrScore();
}
void RealPlayer::resetScore()
{
_score.resetScore();
}
bool RealPlayer::setPlayerNumber ( const int number )
{
_number = number;
/* if ( _number == 1 )
{
_scoreText.setScoreSide(PongScore::LEFT);
return true;
}
else if ( _number == 2 )
{
_scoreText.setScoreSide(PongScore::RIGHT);
return true;
}
else
return false;*/
return true;
}
|
Utility of Nutritional Screening in Predicting Short-Term Prognosis of Heart Failure Patients Summary Controlling nutritional status (CONUT) uses 2 biochemical parameters (serum albumin and cholesterol level), and 1 immune parameter (total lymphocyte count) to assess nutritional status. This study examined if CONUT could predict the short-term prognosis of heart failure (HF) patients. A total of 482 (57.5%) HF patients from the Ibaraki Cardiovascular Assessment Study-HF ( n = 838) were enrolled (298 men, 71.7 ± 13.6 years). Blood samples were collected at admission, and nutritional status was assessed using CONUT. CONUT scores were defined as follows: 0-1, normal; 2-4, light; 5-8, moderate; and 9-12, severe degree of undernutrition. Accordingly, 352 (73%) patients had light-to-severe nutritional disturbances. The logarithmically transformed plasma brain natriuretic peptide (log BNP) concentration was significantly higher in the moderate-severe nutritional disturbance group (2.92 ± 0.42) compared to the normal group (2.72 ± 0.45, P < 0.01). CONUT scores were significantly higher in the in-hospital death patients compared with patients who were discharged following symptom alleviation . With the exception of transferred HF patients ( n = 22), logistic regression analysis that incorporated the CONUT score and the log BNP, showed that a higher CONUT score ( P = 0.019) and higher log BNP ( P = 0.009) were predictors of in-hospital death, and the median duration of hospital stay was 20 days. Our results demonstrate the usefulness of CONUT scores as predictors of short-term prognosis in hospitalized HF patients. (Int Heart J 2018; 59: 354-360) simple, well-defined tool to identify patients at risk of developing nutrition-related complications. Hospitalized patients with advanced HF are at a high risk of undernutrition and death. Although Suzuki, et al. have reported slightly longer hospital stays among HF patients with higher CONUT scores, the relationship between a higher CONUT score and in-hospital death has not been elucidated. The aim of this study was to assess the usefulness of CONUT in predicting the short-term prognosis of hospitalized patients with HF symptoms. with in-hospital death. Our results showed that a higher CONUT score was a significant predictor of in-Int T he prevalence of cardiovascular disorders in Japan has increased markedly due to a rapidly aging society and the westernization of lifestyle, as both increase the risk of developing coronary artery disease and other diseases. In an epidemiological study conducted in Japan, the number of heart failure (HF) patients was predicted to reach 1,300,000 by the year 2030. 1) Angiotensin-converting enzyme inhibitors and blockers are essential components of the treatment regimen for patients with chronic HF. Recently, nonpharmacological therapies, such as cardiac resynchronization therapy (CRT), 2) exercise therapy, 3) and Waon therapy 4) have also been used for the treatment of HF patients. Similarly, various other HF therapy regimens have been developed, and the field is dynamic and progressive. How-ever, although therapies have significantly improved, or may improve, patient survival, HF is still associated with high morbidity and mortality. Editorial p.245 According to some registry studies conducted in Japan, the 1-year HF mortality rate is 7-9%, while the rate of hospital readmission due to an HF exacerbation within 1-year of hospital discharge is 15-40%. 5) These reports indicate that the currently employed HF treatment regimen is insufficient. Undernutrition is one of the most important determinants of worse clinical outcomes in HF patients. Intestinal CONUT SCORE TO PREDICT SHORT-TERM PROGNOSIS CONUT indicates controlling nutritional status edema or anorexia-induced low nutritional intake, liver dysfunction, cytokine-induced hypercatabolism, insulin resistance, and other mechanisms may all result in HFrelated undernutrition. 6) HF patients with undernutrition enter a vicious cycle of inflammation, catabolic drive, and undernutrition, that further exacerbate HF. 7) Thus, HF therapy and cardiac rehabilitation cannot advance smoothly in patients with undernutrition, and undernutrition further increases the rate of HF-related mortality and readmission. A substantial number of studies have examined the relationship between undernutrition and HF prognosis, and on the basis of their results, it is apparent, that undernutrition is an independent disease prognosticator in HF patients. 8) Therefore, nutritional screening to differentiate between malnourished and non-malnourished patients is the first step in the successful nutritional management of HF patients. Controlling nutritional status (CONUT) uses 2 biochemical parameters (serum albumin and cholesterol level), and 1 immune parameter (total lymphocyte count) to assess the nutritional status. CONUT is a simple, welldefined tool to identify patients at risk of developing nutrition-related complications. Hospitalized patients with advanced HF are at a high risk of undernutrition and death. Although Suzuki,et al. 9) have reported slightly longer hospital stays among HF patients with higher CO-NUT scores, the relationship between a higher CONUT score and in-hospital death has not been elucidated. The aim of this study was to assess the usefulness of CONUT in predicting the short-term prognosis of hospitalized patients with HF symptoms. Study population: A total of 838 patients with HF symptoms were hospitalized between June 2012 and March 2015 and were enrolled in the Ibaraki Cardiovascular Assessment Study-HF (ICAS-HF) registry. The ICAS-HF is a multicenter registry study involving 11 hospitals in Ibaraki Prefecture, Japan. The ICAS-HF registry inclusion criteria were patient age !20 years and fulfillment of the Framingham criteria for HF. 10) The registry exclusion criteria were patient aged < 20 years, patients who did not provide informed consent to the attending physician, patients with a limited life expectancy due to a malignant neoplasm, patients in whom a two-year observation was deemed to be impossible, and patients who were medically judged as inappropriate by the attending physician. Written informed consent was obtained from all patients, and data collection for this study was approved by the institutional review boards of the 11 participating hospitals. Additionally, the ICAS-HF registry study was conducted in accordance with the ethical principles of the Declaration of Helsinki. Data from the ICAS-HF registry were retrospectively analyzed. Three parameters are used to calculate the CO-NUT score: serum albumin level, total cholesterol level, and total lymphocyte count (Table I). Among the 838 patients enrolled in the registry, serum albumin level was unavailable for 25 patients, total cholesterol level was unavailable for 146 patients, and total lymphocyte count was unavailable for 267 patients. Registry patients for whom CONUT scores could not be estimated were excluded (n = 356), thus, a total of 482 patients with CONUT scores were ultimately enrolled in this study. Table II summarizes the clinical characteristics of the excluded patients. The patient characteristics of the excluded patients were comparable to those of the enrolled patients, and most study variables were similar, with the exception of the serum albumin level and therapeutic agents prescribed (Table II). Data collection: Baseline clinical data were collected for each patient. All patient-related information collected at enrollment, including medical history, laboratory test results, and echocardiographic findings, was recorded in a computer database. Essentially, blood sampling and echocardiographic examinations were performed within 72 hours of admission. Blood tests were performed to determine total lymphocyte counts, hemoglobin, albumin, total cholesterol, serum creatinine, C-reactive protein, and plasma brain natriuretic peptide (BNP) levels. The estimated glomerular filtration rate (eGFR) was calculated using the following formula: eGFR = 194 serum creatinine -1.094 age in years -0.287 for male patients. The adjusted eGFR value for female patients was calculated using the following formula: eGFR female = eGFR 0.739. 11) As edema is known to significantly affect patient body weight at admission, we measured body weight after the condition of the patient had stabilized. The body mass index (BMI) was calculated as body weight in kilograms divided by the square of the height in meters. In the present study, we defined a reduced left ventricular ejection fraction (LVEF) as a visual LVEF < 40% at admission. Assessment of nutritional status using CONUT scores: The CONUT was developed by Ignacio de Ulbarri, et al. 12) as a screening tool for undernutrition utilizing a hospital population. The CONUT score is a sum of 3 parameters: the serum albumin level (g/dL), total cholesterol level (mg/dL), and the total lymphocyte count (count/L) ( Table I). The serum albumin level serves as an indicator of the protein reserves, while the total cholesterol level is an indicator of caloric depletion. The total lymphocyte count is used as an indicator of undernutrition-mediated impaired immune defense. Patients with CONUT scores of 0-1 have a normal nutritional status, those with CO-NUT scores of 2-4 have a light degree of undernutrition, those with CONUT scores of 5-8 have a moderate degree of undernutrition, and those with CONUT scores of 9-12 have a severe degree of undernutrition (Table I). Correlation between BNP level and nutritional status: Results are expressed as mean ± standard deviation or as median (inter-quartile range). BMI indicates body mass index; BNP, brain natriuretic peptide; CONUT, controlling nutritional status; GFR, glomerular filtration rate; HF, heart failure; LVEF, left ventricular ejection fraction; n, number of patients; and NYHA, New York Heart Association. Data were missing for the following characteristics: BMI for 19 patients in the without CONUT score group, and 15 in the with CONUT score group; Hemoglobin for 1 patient in the without CONUT score group; BNP for 59 patients in the without CONUT score group, and 62 in the with CONUT score group; Albumin for 25 patients in the without CONUT score group; C-reactive protein for 7 patients in the without CONUT score group, and 1 in the with CONUT score group; and LVEF for 42 patients in the without CONUT score group, and 24 in the with CONUT score group. Since the number of patients with severe nutritional disturbance was only 15, the logarithmically transformed plasma BNP (log BNP) level was compared between the normal group (n = 130), the light nutritional disturbance group (n = 222), and the moderate-severe nutritional disturbance group (n = 130), after combining the moderate and severe nutritional groups. Assessment of short-term prognosis by CONUT score: We segregated the study patients into 3 groups: HF patients with in-hospital death, HF patients who were discharged after alleviation of symptoms, and HF patients who were transferred elsewhere for continued medical care. We examined whether the nutritional status of the patients, as assessed by the CONUT scores, was associated with in-hospital death. Cardiovascular death was defined as a death attributable to cardiovascular origin, and a noncardiovascular death was defined as a death attributable to reasons of non-cardiovascular origin (e.g., respiratory, gastrointestinal, renal, cancer-related, or infectious). With the exception of transferred HF patients, the median duration of hospital stay was 20 days (25th percentile, 14.5 days; 75th percentile, 30 days) for all other patients. The criterion of longer hospital stays was defined as more than or equal to 30 days (75th percentile duration of hospital stay). We compared the CONUT scores of HF patients with longer (!30 days) and shorter hospital stays (< 30 days). Statistical analysis: Continuous variables are expressed as the mean ± standard deviation if normally distributed and as the median (inter-quartile range) if non-normally distributed. Differences between 2 groups were compared using the unpaired Student's t-test or Mann-Whitney U-test, as appropriate. The chi-square test was used to compare categorical variables. Continuous variables were compared between the 3 study groups using a one-factor ANOVA test. The Games-Howell test was used to identify variables that differed significantly between the study groups. The Kruskal-Wallis test was used for non-normally distributed data. Logistic regression analysis was performed to deter- mine the significant predictors of in-hospital death in all HF patients except those transferred for continued medical care. A P value < 0.05 was considered statistically significant. All statistical analyses were performed using Stat View 5.0 for Windows and SPSS 21.0 for Windows. Results Baseline characteristics of study patients: Table II shows the baseline characteristics of the patients with the CONUT scores. The mean age was 71.7 ± 13.6 years, and male patients accounted for 61.8% (n = 298) of the study population. At the time of admission, based on the New York Heart Association functional classification, 50 patients were classified as class II, 174 patients as class III, and 253 patients were classified as class IV. The median plasma BNP level of the study population was 741.5 (387.0-1257.8) pg/mL, and as the distribution of the BNP level was highly skewed, we normalized the data through a logarithmic transformation (log BNP). The mean visual LVEF, as measured by echocardiography, was 40.5 ± 15.2%. The median CONUT score of the study population was 3. Of the 482 enrolled HF patients for whom CONUT scores could be calculated, 352 (73%) had nutritional disturbances (light, 46.1%; moderate, 23.9%; severe, 3.1%). The log BNP level was significantly higher in the moderate-severe nutritional disturbance group (2.92 ± 0.42) than in the normal group (2.72 ± 0.45, P < 0.01). In-hospital death occurred in 14 patients, of which 10 deaths (71.4%) were due to a cardiovascular origin (8 HF, 1 ventricular fibrillation due to myocardial ischemic attack, and 1 septic shock due to infective endocarditis), and 4 deaths (28.6%) were non-cardiovascular in origin (1 gastrointestinal bleeding, 1 sepsis due to gastrointestinal perforation, 1 pneumonia, and 1 bladder cancer). Evaluation of short-term prognosis by CONUT assessment: The 482 patients in the study population were categorized as follows: patients who suffered in-hospital death (n = 14), patients who were discharged after alleviation of HF symptoms (n = 446), and patients who were transferred elsewhere for continued medical care (n = 22). The clinical characteristics of the enrolled patients are shown in Table III. BMI, systolic blood pressure, hypertension history, plasma BNP level, serum albumin level, Creactive protein, CONUT score, and the use of positively inotropic agents or phosphodiesterase inhibitors differed significantly between the 3 groups. However, age, sex, heart rate, smoking status, HFrelated admission history, population of HF patients with ischemic etiology, population of patients with reduced LVEF, dyslipidemia, hemoglobin, percentages of lymphocytes, eGFR, total cholesterol, and visual LVEF and the use of carperitide, -blockers, and statins did not differ significantly between the 3 groups. Impact of nutritional screening using CONUT scores on in-hospital death events: With the exception of the transferred HF patients, logistic regression analysis was performed on data from all other patients to identify the significant predictors of in-hospital death. The CONUT score was associated with increased risk of in-hospital death in the unadjusted model (model 1) and in the age, sex, and log BNP adjusted model, respectively (model 2) (Table IV). In model 2, a higher CONUT score (P = 0.019) and higher log BNP (P = 0.009) were identified as significant predictors of in-hospital death. Association between hospitalization duration and CO-NUT scores: With the exception of the transferred HF patients, the CONUT scores were higher in patients with longer hospital stays (!30 days, n = 122) than in those with shorter hospital stays (< 30 days, n = 338) . The median hospitalization duration in the in-hospital death group was 29.5 days. Discussion In the present study, we examined whether patient nutritional status, assessed using CONUT scores, was associated with in-hospital death. Our results showed that a higher CONUT score was a significant predictor of in-Nishi, ET AL Results are expressed as mean ± standard deviation or as median (inter-quartile range). *P < 0.01; P < 0.05 versus patients discharged following symptom alleviation. P < 0.01 versus patients transferred for continued medical care. Data are missing for the following characteristics: BMI for 1 patient in the in-hospital death group, 12 patients in the discharged group, and 2 in the transferred group; BNP for 1 patient in the in-hospital death group, 60 patients in the discharged group, and 1 patient in the transferred group; C-reactive protein for 1 patient in the discharged group; LVEF for 1 patient in the in-hospital death group, 21 patients in the discharged group, and 2 patients in the transferred group. Abbreviations as in Table II. hospital death in hospitalized HF patients. Evidence for this association was that a higher CONUT score was associated with increased risk of in-hospital death in the unadjusted model (model 1) and in the age, sex, and log BNP adjusted model, respectively (model 2) (Table IV). Suzuki,et al. 9) observed that HF patients with higher CONUT scores tended to have longer hospital stays. Nochioka, et al. 13) reported that poor nutritional status was associated with increased incidence of death in HF patients classified as stage B in the AHA/ACC guidelines. Furthermore, Narumi,et al. 14) reported that the CONUT score was independently associated with the occurrence of cardiovascular events in chronic HF patients. These studies 9,13,14) support our observation that a higher CONUT score was a significant predictor of in-hospital death in hospitalized HF patients. In addition, we noted that undernutrition occurred frequently in hospitalized HF patients, and that 73% of HF patients in our study population had nutritional disturbances. Suzuki,et al. 9) identified nutritional disturbances in 95% of their HF-affected study population. Narumi, et al. 14) noted a malnutrition prevalence of 60-69% in chronic HF patients. Additionally, according to a recent review by Lin, et al. 6), the prevalence of malnutrition is higher in advanced HF, and in acute decompensated HF (75-90%). The undernutrition prevalence in our study population is comparable to those reported in the above-mentioned studies. 6,9,14) Therefore, supportive therapies should be promptly administered on the assumption that hospitalized HF patients often have undernutrition. Furthermore, as HF and undernutrition can each influence the other, once patients develop severe HF, their nutritional status deteriorates further. HF patients with undernutrition thus enter a vicious cycle of inflammation, catabolic drive, undernutrition, and HF exacerbation. 7) Strategies to improve nutritional status in the early stages of HF are thus a crucial component of HF management and are important in preventing HF exacerbation and improving patient prognosis. Nutritional screening should thus be performed at the earliest possible time to identify malnourished and nonmalnourished patients. In a recently published review by Lin, et al. 6), the use of various nutritional assessment and screening tools was compared in studies conducted on HF patients. The study compared the usage frequency of 2 nutritional assessment tools , 7 nutritional screening tools , and 2 other tools. The review noted that the most commonly used tool was the MNA (used in 5 studies), followed by GNRI (4 studies), NRI (3 studies), and the MNA-SF (2 studies). The meta-analysis based review 6) also noted that compared with the scores obtained from other nutritional assessment and screening tools, MNA scores were the strongest predictors of mortality in HF patients. However, as only 1 study on the CONUT score was included in the review by Lin, et al., 6) its value as a predictor of HF-related mortality may have been underestimated. As the MNA includes subjective data evaluated by medical staff, this index cannot be used conveniently in a routine clinical setting. Moreover, in HF patients who experience progressive loss of body weight due to incremental doses of diuretics, or in HF patients who experience weight gain due to fluid retention, the changes in body weight cannot be used reliably for nutritional assessment. Hypoalbuminemia results in pulmonary congestion and pleural effusion and is an aggravating factor for HF. A patient's albumin level can thus serve as a useful prognostic indicator. 7) Bonilla-Palomas, et al. have demonstrated an association between hypoalbuminemia (albumin !3.4 g/dL) and poor outcomes in patients with acute HF. 15) However, hypoalbuminemia in HF patients often does not resolve despite sufficient energy and protein supplementation. 7) In such patients, the albumin levels may not correctly assess the appropriateness of the ongoing nutritional therapy. 7) The levels of total cholesterol, hemoglobin, and lymphocyte counts are used to assess nutritional status. However, these indices alone cannot provide a comprehensive and accurate indication of a patient's nutritional status. The CONUT score is a unique index that focuses on protein metabolism, lipid metabolism, and immune parameters. As the CONUT score utilizes albumin levels, the score may represent not only the nutritional status but also the severity of HF. Our study shows that a higher CO-NUT score and a higher BNP level are significant predictors of in-hospital death. Therefore, we recommend an initial nutritional screening of HF patients using a simple CONUT screening. A more detailed examination using for example computerized tomography (CT) images, bioelectrical impedance analysis (BIA), or dual-energy X-ray absorption (DXA) may be performed for patients in whom the CONUT scores indicate undernutrition. Despite an abundance of evidence on the relation between undernutrition and immunologic dysfunction, the exact mechanism of interaction remains unclear. In general, severe undernutrition causes atrophy of all lymphoid tissues, including the thymus, tonsils, and lymph nodes. 16) Cell-mediated immunity is diminished more than antibody production. 16) Normal humoral immunity requires adequate function of B-lymphocytes under proper regulation of Thelper lymphocytes. 17) Law, et al. have reported that both the T and B systems were impaired in adults with moderate protein calorie malnutrition, and it was restored by nutritional repletion. 18) Study limitations: The total number of patients enrolled in the ICAS-HF registry and the number of in-hospital death events were not large. Therefore, the number of indices that could be incorporated into the logistic analysis models was small. Additionally, as the CONUT score is a relatively new index of nutritional status measurement, further validation and the establishment of reference scores are required. Ignacio de Ulbarri, et al. demonstrated an agreement between the CONUT score and 2 other classical nutritional assessment methods, the Subjective Global Assessment and the Full Nutritional Assessment. 12) In our study, however, we did not examine the extent of agreement between the CONUT scores and other screening tools such as CT, BIA, and DXA. Moreover, we did not exclude comorbid diseases such as nephrotic syndrome, statin use, the presence of infectious diseases, and blood disorders, which can affect the levels of albumin and cholesterol, and also the lymphocyte count. |
/**
* Process an event that was published by the Asset Consumer OMAS.
*
* @param event event object - call getEventType to find out what type of event.
*/
public void processEvent(AssetConsumerEvent event)
{
if (event.getEventType() == AssetConsumerEventType.NEW_ASSET_EVENT)
{
NewAssetEvent assetEvent = (NewAssetEvent)event;
System.out.println("EVENT: " + assetEvent.getEventType().getEventTypeName() + " - for asset " + assetEvent.getAsset().getGUID());
}
else if (event.getEventType() == AssetConsumerEventType.UPDATED_ASSET_EVENT)
{
UpdatedAssetEvent assetEvent = (UpdatedAssetEvent)event;
System.out.println("EVENT: " + assetEvent.getEventType().getEventTypeName() + " - for asset " + assetEvent.getAsset().getGUID() + " - at " + assetEvent.getUpdateTime());
}
} |
<reponame>huxuan0307/Reactor-network-library<gh_stars>0
#ifndef _EVENTLOOP_H_
#define _EVENTLOOP_H_
#include "noncopyable.h"
#include <functional>
#include <thread>
#include <vector>
#include <memory>
using namespace std::this_thread;
using std::enable_shared_from_this;
class Channel;
class Poller;
class EventLoop:public enable_shared_from_this<EventLoop>
{
NONCOPYABLE(EventLoop)
public:
using event_t = std::function<void()>;
EventLoop();
~EventLoop();
void loop();
void quit();
void assertInLoopThread(){
if(!isInLoopThread()){
abortNotInLoopThread();
}
}
bool isInLoopThread() const{return threadId_ == std::this_thread::get_id();}
EventLoop* getEventLoopOfCurrentThread();
void updateChannel(std::weak_ptr<Channel>);
private:
using ChannelList = std::vector<std::weak_ptr<Channel>>;
void abortNotInLoopThread();
bool looping_;
bool quit_;
const std::thread::id threadId_;
std::unique_ptr<Poller> poller_;
ChannelList activeChannels_;
static const int kPOllerTime_ms;
};
#endif |
Overview of lasers Laser surgery of skin conditions having cosmetic implications has revealed the profound psychological benefits which are unmatched by any other modality of treatment either with or without a knife. An increasingly sophisticated understanding of the biophysics of laser-tissue interactions has lead to a more efficient utilization of the present technology on the clinical side and at the same time is helping the physicists to add more and more highly selective laser systems in the armamentarium of aesthetic laser surgeons. This article provides a general overview of lasers in skin and cosmetology and discusses its current clinical applications from Plastic Surgeon's point of view. |
<gh_stars>0
package org.diverproject.scarlet.util;
import static org.diverproject.scarlet.util.language.NumberUtilsLanguage.FLOAT_PARSER;
import static org.diverproject.scarlet.util.language.NumberUtilsLanguage.FLOAT_PARSER_PARSE;
import static org.diverproject.scarlet.util.language.NumberUtilsLanguage.FLOAT_PARSER_PATTERN;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.diverproject.scarlet.util.exceptions.NumberUtilsRuntimeException;
public class FloatUtils extends NumberUtils
{
private FloatUtils() { }
public static boolean isSafeFloat(String str)
{
if (!isFloatFormat(str))
return false;
return getPattern().matcher(str).find();
}
public static boolean isAllSafeFloat(String[] array)
{
for (String str : array)
if (!isSafeFloat(str))
return false;
return true;
}
public static float parseFloat(String str)
{
return parseFloat(str, null);
}
public static float parseFloat(String str, Float failValue)
{
try {
return Float.parseFloat(str);
} catch (NumberFormatException e) {
if (failValue != null)
return failValue;
throw new NumberUtilsRuntimeException(e, FLOAT_PARSER, str);
}
}
public static float parseFloatObject(String str)
{
return parseFloatObject(str, null);
}
public static float parseFloatObject(String str, Float failValue)
{
return StringUtils.isEmpty(str) ? null : parseFloat(str, failValue);
}
public static float parseFloat(String str, int floatType)
{
return parseFloat(str, floatType, null);
}
public static float parseFloat(String str, int floatType, FloatParser floatParser)
{
String raw = str;
Pattern pattern = (
BitwiseUtils.has(floatType, FloatUtils.DECIMAL_DOT_TYPE) &&
BitwiseUtils.has(floatType, FloatUtils.DECIMAL_COMMA_TYPE) ? (PATTERN_ANY) : (
BitwiseUtils.has(floatType, FloatUtils.DECIMAL_DOT_TYPE) ? (PATTERN_DOT) : (
BitwiseUtils.has(floatType, FloatUtils.DECIMAL_COMMA_TYPE) ? (PATTERN_COMMA) :
null
)
)
);
if (pattern == null)
throw new NumberUtilsRuntimeException(FLOAT_PARSER_PATTERN, floatType);
final Matcher matcher = pattern.matcher(str);
if (!matcher.find())
throw new NumberUtilsRuntimeException(FLOAT_PARSER_PARSE, str);
if (floatParser != null)
{
String signal = matcher.group("signal");
floatParser.setRaw(raw);
floatParser.setPositive(signal.isEmpty() || signal.equals("+"));
floatParser.setExpression(!matcher.group("expoent").isEmpty());
floatParser.setValue(matcher.group("value"));
if (floatParser.isExpression())
{
String expoentSignal = matcher.group("expoentSignal");
floatParser.setExpoentPositive(expoentSignal.isEmpty() || expoentSignal.equals("+"));
floatParser.setExpoent(Integer.parseInt(matcher.group("expoentValue")));
}
}
if (BitwiseUtils.has(floatType, FloatUtils.DECIMAL_COMMA_TYPE))
raw = raw.replace(",", ".");
return Float.parseFloat(raw);
}
public static float capMin(float value, float minValue)
{
return value < minValue ? minValue : value;
}
public static float capMax(float value, float maxValue)
{
return value > maxValue ? maxValue : value;
}
public static float cap(float value, float minValue, float maxValue)
{
return capMin(capMax(value, maxValue), minValue);
}
public static boolean hasMin(float value, float min)
{
return value >= min;
}
public static boolean hasMax(float value, float maxValue)
{
return value <= maxValue;
}
public static boolean hasBetween(float value, float minValue, float maxValue)
{
return hasMin(value, minValue) && hasMax(value, maxValue);
}
}
|
Breed Related Hospital Prevalence of Theileria annulata infection in Cattle-calves Confirmed by Nested Polymerase Chain Reaction in Bikaner District of Rajasthan, India Tropical theileriosis, also known as Mediterranean coast fever, is an extremely fatal and debilitating tick-transmitted disease infecting cattle (). This hemoparasitic infection is caused by Theileria annulata and is responsible for substantial production losses (). About 250 million cattle are at risk to Tropical theileriosis worldwide (). This intracellular infection inflicts economic burden on cattle breeders in terms of mortality and morbidity as well as expenses spent on prophylactic measures against disease and treatment (). Theileria annulata was described in Transcaucasian cattle in 1904 and was first named Piroplasma annulatum. It was reclassified as T. annulata after identification of schizont stage in its lifecycle. Introduction Tropical theileriosis, also known as Mediterranean coast fever, is an extremely fatal and debilitating tick-transmitted disease infecting cattle (). This hemoparasitic infection is caused by Theileria annulata and is responsible for substantial production losses (). About 250 million cattle are at risk to Tropical theileriosis worldwide (). This intracellular infection inflicts economic burden on cattle breeders in terms of mortality and morbidity as well as expenses spent on prophylactic measures against disease and treatment (). Theileria annulata was described in Transcaucasian cattle in 1904 and was first named Piroplasma annulatum. It was reclassified as T. annulata after identification of schizont stage in its lifecycle. Theileria parasites enter the bovine host during tick feeding as sporozoites, which rapidly invade mononuclear leukocytes. Here, they mature into macroschizonts and induce proliferation of the host cell. Macroschizonts develop further into microschizonts and ultimately into merozoites, which are released from the leukocyte. The merozoites invade erythrocytes and develop into piroplasms. Tropical theileriosis is a lymphoproliferative disease in its early phases and is accompanied by enlargement of lymph nodes. On development of pyrexia, a lymphodestructive phase which is associated with a pronounced leukopenia is initiated. The disease is further characterized by a marked anemia (Tait and Hall, 1990). T. annulata infection is characterized by high fever, weakness, weight loss, inappropriate appetite, conjunctival petechia, enlarged lymph nodes and anaemia. Lateral recumbency, diarrhea and dysentery are also associated with later stages of infection (). Bos indicus (Zebu cattle) is naturally more resistant to tick infestation as compared to Bos taurus. This tick resistance in Zebu cattle may be due to presence of significantly higher serum complement level in their blood as compared to cross-bred cattle (). The ability of indigenous cattle to resist tropical theileriosis, coupled with the fact that cell line vaccination is successful in protecting otherwise susceptible stock, demonstrates that the bovine immune system is capable of mounting an effective response to both initial and subsequent infection. These results suggest that resistant breeds of cattle may possess a degree of innate immunity, while vaccinated exotic stock rely on an acquired response following either vaccination or primary challenge. However, evidence to date indicates that the innate and adaptive bovine immune responses act against both primary and secondary challenge. This contrasts with the situation in T. parva, where immunity is afforded principally by the adaptive response of cytotoxic T-cells. Young calves below two months of age were found to be most susceptible to the disease. Cases mainly occurred between the months of March to October every year, coinciding with the period of high activity of the vector ticks (). Lower prevalence and milder form of theileriosis in indigenous as compared to cross-bred cattle was believed to be associated with the lower acute phase protein (APP) responses controlled by macrophage cytokines in these animals. There was prolonged elevation of the proinflammatory cytokine dependent APP, 1glycoprotein (AGP) in exotic and cross-bred cattle, which in contrast was only slightly elevated in the indigenous breed (Glass and Coussens, 2005). Materials and Methods One hundred cattle-calves, out of which 37 cross bred and 63 indigenous, were screened for Theileria annulata infection during October-2015 to September-2016 brought to Teaching Veterinary Clinical Complex, College of Veterinary and Animal Science, Bikaner. Blood samples were collected in ethylene diamine tetra acetic acid (EDTA @1mg/ml) vacutainers from all the suspected cattle-calves for genomic DNA isolation. Genomic DNA were isolated using QIAamp® DNA blood mini kit (QIAGEN, GmbH, Germany) as per Procedure. Aliquots of extracted DNA were kept at -20°C and subjected for confirmatory diagnosis by nested polymerase chain reaction (nPCR). Results and Discussion In the present study, prevalence of Theileria annulata infection was higher in cross-bred cattle-calves (51.35 per cent) as compared to indigenous breeds (34.92 per cent). Similar findings were also reported by Beniwal et al., ; Glass and Coussens and Tuli et al.,. Low prevalence of disease in indigenous as compared to exotic and cross-bred cattle could be due to low acute phase protein (APP) responses controlled by macrophage cytokines in these animals. There is prolonged elevation of the pro-inflammatory cytokine dependent APP, 1 -glycoprotein (AGP) in exotic and cross-bred cattle, which in contrast is only slightly elevated in the indigenous breed (Glass and Coussens, 2005). Raised levels of 1 -glycoprotein are associated with chronic inflammatory conditions () and thus indicative of high systemic levels of pro-inflammatory cytokines in the susceptible breeds. It is believed that APP responses are induced by pro-inflammatory cytokines such as IL-1, IL-6 and TNF (Gabay and Kushner, 1999). Thus it seems likely that Theileria annulata infection will lead to their production. Prevalence is also influenced by cattle breed as cattle usually differ in tick resistance and innate susceptibility to infection (). Bos indicus (Zebu cattle) is naturally more resistant to tick infestation as compared to Bos taurus/cross-bred cattle. This may be due to presence of significantly higher complement level in their blood as compared to cross-bred cattle (). Breed wise prevalence of Theileria annulata infection in cattle-calves is presented in Table 1and Figure 1. Biotechnology and Dr. G.C. Gahlot, Professor and Head, Department of Animal Genetics and Breeding, College of Veterinary and Animal Science, Bikaner, Rajasthan for providing necessary facilities to carry out the present investigation. This study is financially supported by the Rajasthan University of Veterinary and Animal Sciences, Bikaner, Rajasthan. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:log
Author:jasonhaven
date:2018/4/17
-------------------------------------------------
Change Activity:2018/4/17:
-------------------------------------------------
"""
import logging
import os
import datetime
import shutil
_logs = "logs"
_filefmt = os.path.join(_logs, "%Y-%m-%d.log")
class LoggerHandler(logging.Handler):
def __init__(self, filefmt=None):
self.filefmt = filefmt
if filefmt is None:
self.filefmt = _filefmt
logging.Handler.__init__(self)
def emit(self, record):
msg = "{} {} {} : {}".format(datetime.datetime.now().strftime("%H:%M:%S"),
os.path.abspath(__file__).split(os.sep)[-1], record.levelname,
record.getMessage())
_filePath = datetime.datetime.now().strftime(self.filefmt)
_dir = os.path.dirname(_filePath)
try:
if os.path.exists(_dir) is False:
os.makedirs(_dir)
except Exception:
print("can not make dirs")
print("filepath is " + _filePath)
pass
try:
_fobj = open(_filePath, 'a', encoding='utf-8')
_fobj.write(msg)
_fobj.write("\n")
_fobj.flush()
_fobj.close()
except Exception:
print("can not write to file")
print("filepath is " + _filePath)
pass
class Logger():
def __init__(self, isclean=False):
'''
:param isclean:是否清空日志
'''
self.logging_format = '[%(levelname)s %(asctime)s %(module)s:%(lineno)d]: %(message)s'
self.date_format = '%y-%m-%d %H:%M:%S'
self.filehandler = LoggerHandler()
if isclean:
if os.path.exists(_logs):
self.clean_logs()
def get_logger(self):
logging.basicConfig(
level=logging.INFO,
format=self.logging_format,
datefmt=self.date_format
)
logger = logging.getLogger("logger")
logger.addHandler(self.filehandler)
return logger
def clean_logs(self):
delDir = _logs
delList = os.listdir(delDir)
for f in delList:
filePath = os.path.join(delDir, f)
if filePath and os.path.isfile(filePath):
os.remove(filePath)
print(filePath + " was removed!")
elif filePath and os.path.isdir(filePath):
shutil.rmtree(filePath, True)
print("Directory: " + filePath + " was removed!")
if __name__ == '__main__':
logger = Logger(True).get_logger()
logger.info("info")
logger.error("error")
|
. Renal Transplantation is hampered worldwide by the continuing lack of cadaveric organs. The discrepancy between the number of patients on the waiting list and the number of organs available is further compounded by the still unresolved problem of chronic transplant failure. Against this background, the arguments for increasing acceptance of the use of kidneys from living donors, both related and unrelated, are discussed. Initial reports on appreciably improved transplant survival rates of organs from unrelated living donors (85% survival after 3 years ) have since been confirmed by more recent studies. Our own results, in part obtained during a prospective study involving 103 patients (53 related, 50 unrelated) done between October 1994 and April 1999, with strict psychological care/evaluation prior to and after transplantation, revealed a four-year transplant survival rate of 98% in both groups. So far, the higher rejection rate of 34% in unrelated, vs. 13.2% in related, donors has not led to any earlier chronic dysfunction of the transplant. The expanded use of living kidney donors is not only ethically justifiable, but also improves the outcome. |
Posterior reversible encephalopathy syndrome after kidney transplantation in pediatric recipients: Two cases PRES is a neuroclinical and radiological syndrome that can result as a consequence of several different conditions including hypertension, fluid overload, and immunosuppressive treatment. Herein, we report two children who received kidney and combined liverkidney transplantation as treatment for renal hypodysplasia associated with bilateral vesicoureteral reflux and methylmalonic acidemia, respectively. Early after surgery (seven and 10 days), both patients presented with hypertension and seizures. The patients' immunosuppressive regimen included steroid and calcineurin inhibitors (tacrolimus and cyclosporine, respectively) and basiliximab and one with antiIL2 receptor. In both cases, the imaging strongly supported the diagnosis of PRES. In details, the CT scan showed hypodensities in the posterior areas of the brain, and brain MRI demonstrated parietooccipital alterations indicative of vasogenic edema. Treatment with calcineurin inhibitors was temporally discontinued and restarted at lower dosage; arterial hypertension was treated with Cachannel blockers. Both children fully recovered without any neurological sequels. In conclusion, in children undergoing solid organ transplantation, who develop neurological symptoms PRES, should be carefully considered in the differential diagnosis and once the diagnosis is ruled in, we recommend strict arterial blood pressure control and adjustment or withholding of calcineurin inhibitor therapy should be considered based upon blood levels. |
The present application relates generally to power systems and, more particularly, to power distribution systems and methods of operating a power distribution system.
Known electrical distribution systems include a plurality of switchgear lineups including circuit breakers that that are each coupled to one or more loads. The circuit breakers typically include a trip unit that controls the circuit breakers based upon sensed current flowing through the circuit breakers. More specifically, the trip unit causes current flowing through the circuit breaker to be interrupted if the current is outside of acceptable conditions defined by current and time thresholds.
For example, at least some known circuit breakers are programmed with one or more current thresholds (also known as “pickup” thresholds) that identify undesired current levels for the circuit breaker. If a fault draws current in excess of one or more current thresholds for a predetermined amount of time, for example, the trip unit typically activates the associated circuit breaker to stop current from flowing through the circuit breaker or initiates a timing sequence that eventually activates the circuit breaker if the threshold is exceeded for a predetermined amount of time. However, in power distribution systems that include a plurality of circuit breakers, a typical arrangement uses a hierarchy of circuit breakers. Large circuit breakers (i.e., circuit breakers with a high current rating) that are positioned closer to a power source than a plurality of lower current feeder circuit breakers feed the lower current feeder circuit breakers. Each feeder circuit breaker may feed a plurality of other circuit breakers, which connect to loads or other distribution equipment.
A fault may occur anywhere in the circuit breaker hierarchy. When a fault occurs, each circuit breaker that has the same fault current flowing through it may measure the same fault current differently as a result of varying sensor and circuit tolerances. When the fault occurs, the circuit breaker closest to the fault should trip to stop current from flowing through the circuit breaker. If a circuit breaker higher in the hierarchy, that is, closer to the source than the circuit breaker closest to the fault, trips, multiple circuits or loads will unnecessarily lose service.
To accommodate for the varying tolerances and to ensure that multiple circuit breakers do not unnecessarily trip based on the same fault current due to measurement variance, the current thresholds of at least some known circuit breakers are nested with each other to avoid overlapping fault current thresholds. For example, thresholds for circuit breakers at upper levels of the hierarchy typically are higher than the thresholds for circuit breakers at lower levels of the hierarchy to avoid overlapping thresholds. The nested fault current thresholds cause circuit breakers at higher tiers or levels of the hierarchy to have increasingly higher current thresholds. Accordingly, circuit breakers at higher tiers may not be able to detect fault currents that lower tier circuit breakers may detect. In this way, the circuit breaker closest to the fault will operate in response to the fault and will have a lower fault current threshold than upper level circuit breakers. If a fault occurs at a higher level in the hierarchy, for example, between a feeder and a branch or between a main breaker and a feeder, the system may have a reduced fault detection sensitivity because the circuit breakers at the higher levels of the hierarchy have higher fault current thresholds that may not detect a damaging fault current within the higher levels. |
<reponame>KazutakaYamanouchi/bachelor-study
# 標準モジュール
import argparse
import csv
from datetime import datetime
from logging import (
getLogger, basicConfig,
DEBUG, INFO, WARNING
)
from pathlib import Path
import random
import sys
from time import perf_counter
# 追加モジュール
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from tqdm import tqdm
# 自作モジュール
from models.generator import Generator
from models.discriminator import Discriminator
import utils.dwt as dwt
import utils.dct as dct
import utils.fft as fft
# コマンドライン引数を取得するパーサー
parser = argparse.ArgumentParser(
prog='PyTorch Generative Adversarial Network',
description='PyTorchを用いてGANの画像生成を行います。'
)
# 訓練に関する引数
parser.add_argument(
'-b', '--batch-size', help='バッチサイズを指定します。',
type=int, default=100, metavar='B'
)
parser.add_argument(
'-e', '--num-epochs', help='学習エポック数を指定します。',
type=int, default=50, metavar='E'
)
parser.add_argument(
'--lr-scale', help='初期学習率のスケーリング係数を指定します。'
'lr = default_lr * lr_scale / batch_size',
type=int, default=0
)
parser.add_argument(
'-np', '--num_progress', help='何層分周波数分解するかを指定します。(5~0)',
type=int, default=5
)
parser.add_argument(
'--dataset', help='データセットを指定します。',
type=str, default='cifar10',
choices=['mnist', 'fashion_mnist', 'cifar10', 'stl10', 'imagenet2012']
)
parser.add_argument(
'--data-path', help='データセットのパスを指定します。',
type=str, default='~/.datasets/vision'
)
parser.add_argument(
'--seed', help='乱数生成器のシード値を指定します。',
type=int, default=999
)
# 入力に関するコマンドライン引数
parser.add_argument(
'-id', '--input_dir', help='入力ディレクトリの名前を指定します。',
type=str, default=None,
)
# 出力に関するコマンドライン引数
parser.add_argument(
'--dir-name', help='出力ディレクトリの名前を指定します。',
type=str, default=None,
)
parser.add_argument(
'--nz', help='潜在空間の次元を指定します。',
type=int, default=256
)
# 画像生成
parser.add_argument(
'--num-samples', help='結果を見るための1クラス当たりのサンプル数を指定します。',
type=int, default=49
)
parser.add_argument(
'--sample-interval', help='生成画像の保存間隔をエポック数で指定します。',
type=int, default=10,
)
# モデルの保存
parser.add_argument(
'--dct', help='dctにて周波数分解を実行します。',
action='store_true'
)
parser.add_argument(
'--fft', help='fftにて周波数分解を実行します。',
action='store_true'
)
parser.add_argument(
'--save', help='訓練したモデルを保存します。',
action='store_true'
)
parser.add_argument(
'--lossy', help='可逆モードで実行します。',
action='store_true'
)
parser.add_argument(
'--lg', help='指定したパスのGeneratorのセーブファイルを読み込みます。',
action='store_true'
)
parser.add_argument(
'--ld', help='指定したパスのDiscriminatorのセーブファイルを読み込みます。',
action='store_true'
)
parser.add_argument(
'--info', help='ログ表示レベルをINFOに設定し、詳細なログを表示します。',
action='store_true'
)
parser.add_argument(
'--debug', help='ログ表示レベルをDEBUGに設定し、より詳細なログを表示します。',
action='store_true'
)
# コマンドライン引数をパースする
args = parser.parse_args()
# 結果を出力するために起動日時を保持する
LAUNCH_DATETIME = datetime.now()
# ロギングの設定
basicConfig(
format='%(asctime)s %(name)s %(funcName)s %(levelname)s: %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=DEBUG if args.debug else INFO if args.info else WARNING,
)
# 名前を指定してロガーを取得する
logger = getLogger('main')
batch_size = args.batch_size
num_progress = args.num_progress
num_epochs = args.num_epochs
workers = 2
nc = 3
lr_scale = args.lr_scale
nz = args.nz
lr_g = 0.001
ngf = 64
lr_d = 0.001
ndf = 64
if args.input_dir is not None:
INPUT_DIR = Path(
f'./outputs/{args.dataset}/{args.input_dir}/models/{num_progress+1}')
if args.lg:
load_generator = INPUT_DIR.joinpath('generator.pt')
if args.ld:
load_discriminator = INPUT_DIR.joinpath('discriminator.pt')
# 出力に関する定数
if args.dir_name is None:
OUTPUT_DIR = Path(
LAUNCH_DATETIME.strftime(
f'./outputs/{args.dataset}/%Y%m%d%H%M%S'))
else:
OUTPUT_DIR = Path(f'./outputs/{args.dataset}/{args.dir_name}')
OUTPUT_DIR.mkdir(parents=True)
logger.info(f'結果出力用のディレクトリ({OUTPUT_DIR})を作成しました。')
f_outputs = open(
OUTPUT_DIR.joinpath('outputs.txt'), mode='w', encoding='utf-8')
f_outputs.write(' '.join(sys.argv) + '\n')
OUTPUT_SAMPLE_DIR = OUTPUT_DIR.joinpath('samples')
OUTPUT_SAMPLE_DIR.mkdir(parents=True)
logger.info(f'画像用のディレクトリ({OUTPUT_SAMPLE_DIR})を作成しました。')
if args.save:
OUTPUT_MODEL_DIR = OUTPUT_DIR.joinpath('models')
OUTPUT_MODEL_DIR.mkdir(parents=True)
logger.info(f'モデル用のディレクトリ({OUTPUT_MODEL_DIR})を作成しました。')
# 乱数生成器のシード値の設定
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# TODO: 完成したらコメントを外す
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
logger.info('乱数生成器のシード値を設定しました。')
device = 'cuda'
logger.info(f'メインデバイスとして〈{device}〉が選択されました。')
logger.info('画像に適用する変換のリストを定義します。')
data_transforms = []
to_tensor = transforms.ToTensor()
data_transforms.append(to_tensor)
normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
data_transforms.append(normalize)
logger.info('変換リストに正規化を追加しました。')
# dataset = dset.STL10(
# root=args.data_path, split='train',
# transform=transforms.Compose(data_transforms), download=True)
dataset = dset.CIFAR10(
root=args.data_path, train=True,
transform=transforms.Compose(data_transforms), download=True)
# データセットの1番目の画像から色数を取得
nc, h, w = dataset[0][0].size() # dataset[0][0].size() = (C, H, W)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
shuffle=True, drop_last=True, num_workers=workers)
logger.info('データローダを生成しました。')
print(len(dataset))
# =========================================================================== #
# モデルの定義
# =========================================================================== #
model_g = Generator(
nz=nz, nc=nc
).to(device)
print(model_g)
# パラメータのロード
if args.lg:
checkpoint = torch.load(load_generator)
state_dict = checkpoint['model_state_dict']
model_g.load_state_dict(state_dict)
logger.info('Generatorのパラメータをロードしました。')
model_d = Discriminator(
nc=nc
).to(device)
print(model_d)
# パラメータのロード
if args.ld:
checkpoint = torch.load(load_discriminator)
state_dict = checkpoint['model_state_dict']
model_d.load_state_dict(state_dict)
logger.info('Discriminatorのパラメータをロードしました。')
# =========================================================================== #
# オプティマイザの定義
# =========================================================================== #
optim_g = torch.optim.Adam(
model_g.parameters(),
lr=lr_g,
betas=[0.5, 0.999]
)
optim_d = torch.optim.Adam(
model_d.parameters(),
lr=lr_d,
betas=[0.5, 0.999]
)
sample_z = torch.randn(args.num_samples, nz, device=device)
f_results = open(
OUTPUT_DIR.joinpath('results.csv'), mode='w', encoding='utf-8')
csv_writer = csv.writer(f_results, lineterminator='\n')
result_items = [
'Epoch',
'Generator Loss Mean', 'Discriminator Loss Mean',
'Train Elapsed Time'
]
csv_writer.writerow(result_items)
csv_idx = {item: i for i, item in enumerate(result_items)}
if args.dct:
wavelet = dct.DCT(num_progress)
elif args.fft:
wavelet = fft.FFT()
else:
wavelet = dwt.DWT(args.lossy)
wavelet = wavelet.to(device)
# =========================================================================== #
# 訓練
# =========================================================================== #
for epoch in range(num_epochs):
results = ['' for _ in range(len(csv_idx))]
results[csv_idx['Epoch']] = f'{epoch + 1}'
log_loss_g, log_loss_d = [], []
pbar = tqdm(
enumerate(dataloader),
desc=f'[{epoch+1}/{num_epochs}] 訓練開始',
total=len(dataset)//batch_size,
leave=False)
model_g.train() # Generatorを訓練モードに切り替える
model_d.train() # Discriminatorを訓練モードに切り替える
begin_time = perf_counter() # 時間計測開始
for i, (real_images, _) in pbar:
real_images = real_images.to(device)
real_images = wavelet(real_images, num_progress)
z = torch.randn(batch_size, nz, device=device)
fake_images = model_g(z)
#######################################################################
# Discriminatorの訓練
#######################################################################
model_d.zero_grad()
# Real画像についてDを訓練,
pred_d_real = model_d(real_images)
loss_d_real = F.relu(1.0 - pred_d_real).mean()
# Fake画像についてDを訓練
pred_d_fake = model_d(fake_images, detach=True)
loss_d_fake = F.relu(1.0 + pred_d_fake).mean()
loss_d = loss_d_real + loss_d_fake
loss_d.backward()
log_loss_d.append(loss_d.item())
optim_d.step()
#######################################################################
# Generatorの訓練
#######################################################################
model_g.zero_grad()
pred_g = model_d(fake_images)
loss_g = -pred_g.mean()
loss_g.backward()
log_loss_g.append(loss_g.item())
optim_g.step()
# プログレスバーの情報を更新
pbar.set_description_str(
f'[{epoch+1}/{num_epochs}] 訓練中... '
f'<損失: (G={loss_g.item():.016f}, D={loss_d.item():.016f})>')
end_time = perf_counter() # 時間計測終了
pbar.close()
loss_g_mean = np.mean(log_loss_g)
loss_d_mean = np.mean(log_loss_d)
results[csv_idx['Generator Loss Mean']] = f'{loss_g_mean:.016f}'
results[csv_idx['Discriminator Loss Mean']] = f'{loss_d_mean:.016f}'
train_elapsed_time = end_time - begin_time
results[csv_idx['Train Elapsed Time']] = f'{train_elapsed_time:.07f}'
print(
f'[{epoch+1}/{num_epochs}] 訓練完了. '
f'<エポック処理時間: {train_elapsed_time:.07f}[s/epoch]'
f', 平均損失: (G={loss_g_mean:.016f}, D={loss_d_mean:.016f})>')
model_g.eval()
model_d.eval()
if (
epoch == 0
or (epoch + 1) % args.sample_interval == 0
or epoch == num_epochs - 1
):
sample_dir = OUTPUT_SAMPLE_DIR.joinpath(f'{epoch + 1}')
sample_dir.mkdir()
with torch.no_grad():
sample_images = model_g(sample_z).cpu()
vutils.save_image(
sample_images,
sample_dir.joinpath(f'{epoch}.png'),
nrow=int(np.sqrt(args.num_samples)),
range=(-1.0, 1.0),
normalize=True
)
logger.info('画像を生成しました。')
csv_writer.writerow(results)
f_results.flush()
OUTPUT_MODEL_DIR = OUTPUT_MODEL_DIR.joinpath(f'{num_progress}')
if args.save and (epoch == num_epochs - 1):
OUTPUT_MODEL_DIR.mkdir(exist_ok=True) # モデルの出力ディレクトリを作成
torch.save( # Generatorのセーブ
{
'model_state_dict': model_g.state_dict(),
'optimizer_state_dict': optim_g.state_dict(),
'lrs_state_dict': lr_g,
'last_epoch': epoch,
'batch_size': batch_size,
'dataset': args.dataset,
'nz': nz,
'nc': nc,
'lossy': args.lossy
},
OUTPUT_MODEL_DIR.joinpath('generator.pt')
)
torch.save( # Discriminatorのセーブ
{
'model_state_dict': model_d.state_dict(),
'optimizer_state_dict': optim_d.state_dict(),
'lrs_state_dict': lr_d,
'last_epoch': epoch,
'batch_size': batch_size,
'dataset': args.dataset,
'nc': nc,
'lossy': args.lossy
},
OUTPUT_MODEL_DIR.joinpath('discriminator.pt')
)
f_results.close()
f_outputs.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.